X-Git-Url: https://arthur.barton.de/cgi-bin/gitweb.cgi?p=netatalk.git;a=blobdiff_plain;f=libevent%2Fevthread.c;h=90e195d584a564a7345b3e09730181f68b1c2a54;hp=2cfb76e9103d1a36e406277e5b4ffd98f54033a2;hb=3a84db87064922ad10ac10cc1d6833380e575995;hpb=15c1fc2f2328736dd428ec3be37c893d8ee2e065 diff --git a/libevent/evthread.c b/libevent/evthread.c index 2cfb76e9..90e195d5 100644 --- a/libevent/evthread.c +++ b/libevent/evthread.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Niels Provos, Nick Mathewson + * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -76,12 +76,30 @@ evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs) ? &_original_lock_fns : &_evthread_lock_fns; if (!cbs) { + if (target->alloc) + event_warnx("Trying to disable lock functions after " + "they have been set up will probaby not work."); memset(target, 0, sizeof(_evthread_lock_fns)); return 0; } + if (target->alloc) { + /* Uh oh; we already had locking callbacks set up.*/ + if (target->lock_api_version == cbs->lock_api_version && + target->supported_locktypes == cbs->supported_locktypes && + target->alloc == cbs->alloc && + target->free == cbs->free && + target->lock == cbs->lock && + target->unlock == cbs->unlock) { + /* no change -- allow this. */ + return 0; + } + event_warnx("Can't change lock callbacks once they have been " + "initialized."); + return -1; + } if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) { memcpy(target, cbs, sizeof(_evthread_lock_fns)); - return 0; + return event_global_setup_locks_(1); } else { return -1; } @@ -95,8 +113,28 @@ evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs) ? &_original_cond_fns : &_evthread_cond_fns; if (!cbs) { + if (target->alloc_condition) + event_warnx("Trying to disable condition functions " + "after they have been set up will probaby not " + "work."); memset(target, 0, sizeof(_evthread_cond_fns)); - } else if (cbs->alloc_condition && cbs->free_condition && + return 0; + } + if (target->alloc_condition) { + /* Uh oh; we already had condition callbacks set up.*/ + if (target->condition_api_version == cbs->condition_api_version && + target->alloc_condition == cbs->alloc_condition && + target->free_condition == cbs->free_condition && + target->signal_condition == cbs->signal_condition && + target->wait_condition == cbs->wait_condition) { + /* no change -- allow this. */ + return 0; + } + event_warnx("Can't change condition callbacks once they " + "have been initialized."); + return -1; + } + if (cbs->alloc_condition && cbs->free_condition && cbs->signal_condition && cbs->wait_condition) { memcpy(target, cbs, sizeof(_evthread_cond_fns)); } @@ -247,6 +285,9 @@ evthread_enable_lock_debuging(void) sizeof(struct evthread_condition_callbacks)); _evthread_cond_fns.wait_condition = debug_cond_wait; _evthread_lock_debugging_enabled = 1; + + /* XXX return value should get checked. */ + event_global_setup_locks_(0); } int @@ -270,6 +311,62 @@ _evthread_debug_get_real_lock(void *lock_) return lock->lock; } +void * +evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks) +{ + /* there are four cases here: + 1) we're turning on debugging; locking is not on. + 2) we're turning on debugging; locking is on. + 3) we're turning on locking; debugging is not on. + 4) we're turning on locking; debugging is on. */ + + if (!enable_locks && _original_lock_fns.alloc == NULL) { + /* Case 1: allocate a debug lock. */ + EVUTIL_ASSERT(lock_ == NULL); + return debug_lock_alloc(locktype); + } else if (!enable_locks && _original_lock_fns.alloc != NULL) { + /* Case 2: wrap the lock in a debug lock. */ + struct debug_lock *lock; + EVUTIL_ASSERT(lock_ != NULL); + + if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) { + /* We can't wrap it: We need a recursive lock */ + _original_lock_fns.free(lock_, locktype); + return debug_lock_alloc(locktype); + } + lock = mm_malloc(sizeof(struct debug_lock)); + if (!lock) { + _original_lock_fns.free(lock_, locktype); + return NULL; + } + lock->lock = lock_; + lock->locktype = locktype; + lock->count = 0; + lock->held_by = 0; + return lock; + } else if (enable_locks && ! _evthread_lock_debugging_enabled) { + /* Case 3: allocate a regular lock */ + EVUTIL_ASSERT(lock_ == NULL); + return _evthread_lock_fns.alloc(locktype); + } else { + /* Case 4: Fill in a debug lock with a real lock */ + struct debug_lock *lock = lock_; + EVUTIL_ASSERT(enable_locks && + _evthread_lock_debugging_enabled); + EVUTIL_ASSERT(lock->locktype == locktype); + EVUTIL_ASSERT(lock->lock == NULL); + lock->lock = _original_lock_fns.alloc( + locktype|EVTHREAD_LOCKTYPE_RECURSIVE); + if (!lock->lock) { + lock->count = -200; + mm_free(lock); + return NULL; + } + return lock; + } +} + + #ifndef EVTHREAD_EXPOSE_STRUCTS unsigned long _evthreadimpl_get_id() @@ -337,6 +434,12 @@ _evthreadimpl_is_lock_debugging_enabled(void) { return _evthread_lock_debugging_enabled; } + +int +_evthreadimpl_locking_enabled(void) +{ + return _evthread_lock_fns.lock != NULL; +} #endif #endif