#define K5_MUTEX_STATS_INIT { 0, {0}, {0}, {0}, {0} }
typedef k5_debug_time_t k5_mutex_stats_tmp;
#define k5_mutex_stats_start() get_current_time()
-extern void krb5int_mutex_lock_update_stats(k5_debug_mutex_stats *m,
- k5_mutex_stats_tmp startwait);
-extern void krb5int_mutex_unlock_update_stats(k5_debug_mutex_stats *m);
+void KRB5_CALLCONV krb5int_mutex_lock_update_stats(k5_debug_mutex_stats *m,
+ k5_mutex_stats_tmp start);
+void KRB5_CALLCONV krb5int_mutex_unlock_update_stats(k5_debug_mutex_stats *m);
#define k5_mutex_lock_update_stats krb5int_mutex_lock_update_stats
#define k5_mutex_unlock_update_stats krb5int_mutex_unlock_update_stats
-extern void krb5int_mutex_report_stats();
+void KRB5_CALLCONV krb5int_mutex_report_stats(/* k5_mutex_t *m */);
#else
/* If statistics tracking isn't enabled, these functions don't actually
do anything. Declare anyways so we can do type checking etc. */
void KRB5_CALLCONV krb5int_mutex_lock_update_stats(k5_debug_mutex_stats *m,
- k5_mutex_stats_tmp start);
+ k5_mutex_stats_tmp start);
void KRB5_CALLCONV krb5int_mutex_unlock_update_stats(k5_debug_mutex_stats *m);
void KRB5_CALLCONV krb5int_mutex_report_stats(/* k5_mutex_t *m */);
typedef struct {
pthread_mutex_t p;
+#ifdef DEBUG_THREADS
+ pthread_t owner;
+#endif
#ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
k5_os_nothread_mutex n;
#endif
} k5_os_mutex;
+#ifdef DEBUG_THREADS
+# ifdef __GNUC__
+# define k5_pthread_mutex_lock(M) \
+ ({ \
+ k5_os_mutex *_m2 = (M); \
+ int _r2 = pthread_mutex_lock(&_m2->p); \
+ if (_r2 == 0) _m2->owner = pthread_self(); \
+ _r2; \
+ })
+# else
+static inline int
+k5_pthread_mutex_lock(k5_os_mutex *m)
+{
+ int r = pthread_mutex_lock(&m->p);
+ if (r)
+ return r;
+ m->owner = pthread_self();
+ return 0;
+}
+# endif
+# define k5_pthread_assert_locked(M) \
+ (assert(pthread_equal((M)->owner, pthread_self())))
+# define k5_pthread_mutex_unlock(M) \
+ (assert(pthread_equal((M)->owner, pthread_self())), \
+ (M)->owner = (pthread_t) 0, \
+ pthread_mutex_unlock(&(M)->p))
+#else
+# define k5_pthread_mutex_lock(M) pthread_mutex_lock(&(M)->p)
+static inline void k5_pthread_assert_locked(pthread_mutex_t *m) { }
+# define k5_pthread_mutex_unlock(M) pthread_mutex_unlock(&(M)->p)
+#endif
+
/* Define as functions to:
(1) eliminate "statement with no effect" warnings for "0"
(2) encourage type-checking in calling code */
static inline void k5_pthread_assert_unlocked(pthread_mutex_t *m) { }
-static inline void k5_pthread_assert_locked(pthread_mutex_t *m) { }
#if defined(DEBUG_THREADS_SLOW) && HAVE_SCHED_H && (HAVE_SCHED_YIELD || HAVE_PRAGMA_WEAK_REF)
# include <sched.h>
#ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
-# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+# if defined(PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP) && defined(DEBUG_THREADS)
+# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+ { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0, \
+ K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
+# elif defined(DEBUG_THREADS)
+# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+ { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0, \
+ K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
+# else
+# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
{ PTHREAD_MUTEX_INITIALIZER, K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
+# endif
# define k5_os_mutex_finish_init(M) \
k5_os_nothread_mutex_finish_init(&(M)->n)
# define k5_os_mutex_lock(M) \
return_after_yield(K5_PTHREADS_LOADED \
- ? pthread_mutex_lock(&(M)->p) \
+ ? k5_pthread_mutex_lock(&(M)->p) \
: k5_os_nothread_mutex_lock(&(M)->n))
# define k5_os_mutex_unlock(M) \
(MAYBE_SCHED_YIELD(), \
(K5_PTHREADS_LOADED \
- ? pthread_mutex_unlock(&(M)->p) \
+ ? k5_pthread_mutex_unlock(M) \
: k5_os_nothread_mutex_unlock(&(M)->n)))
# define k5_os_mutex_assert_unlocked(M) \
: k5_os_nothread_mutex_assert_unlocked(&(M)->n))
# define k5_os_mutex_assert_locked(M) \
(K5_PTHREADS_LOADED \
- ? k5_pthread_assert_locked(&(M)->p) \
+ ? k5_pthread_assert_locked(M) \
: k5_os_nothread_mutex_assert_locked(&(M)->n))
#else
-# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+# ifdef DEBUG_THREADS
+# ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
+# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+ { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0 }
+# else
+# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+ { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0 }
+# endif
+# else
+# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
{ PTHREAD_MUTEX_INITIALIZER }
+# endif
static inline int k5_os_mutex_finish_init(k5_os_mutex *m) { return 0; }
# define k5_os_mutex_init(M) pthread_mutex_init(&(M)->p, 0)
# define k5_os_mutex_destroy(M) pthread_mutex_destroy(&(M)->p)
-# define k5_os_mutex_lock(M) return_after_yield(pthread_mutex_lock(&(M)->p))
-# define k5_os_mutex_unlock(M) (MAYBE_SCHED_YIELD(),pthread_mutex_unlock(&(M)->p))
+# define k5_os_mutex_lock(M) return_after_yield(k5_pthread_mutex_lock(M))
+# define k5_os_mutex_unlock(M) (MAYBE_SCHED_YIELD(),k5_pthread_mutex_unlock(M))
# define k5_os_mutex_assert_unlocked(M) k5_pthread_assert_unlocked(&(M)->p)
-# define k5_os_mutex_assert_locked(M) k5_pthread_assert_locked(&(M)->p)
+# define k5_os_mutex_assert_locked(M) k5_pthread_assert_locked(M)
#endif /* is pthreads always available? */
#define k5_mutex_destroy(M) \
(k5_os_mutex_assert_unlocked(&(M)->os), \
krb5int_mutex_report_stats(M), \
- (M)->loc_last = K5_DEBUG_LOC, \
+ k5_mutex_lock(M), (M)->loc_last = K5_DEBUG_LOC, k5_mutex_unlock(M), \
k5_os_mutex_destroy(&(M)->os))
#ifdef __GNUC__
#define k5_mutex_lock(M) \
}
#define k5_mutex_lock(M) k5_mutex_lock_1(M, K5_DEBUG_LOC)
#endif
-static inline int k5_mutex_unlock_1(k5_mutex_t *m, k5_debug_loc l)
-{
- int err = 0;
- k5_mutex_unlock_update_stats(&m->stats);
- err = k5_os_mutex_unlock(&m->os);
- if (err)
- return err;
- m->loc_last = l;
- return err;
-}
-#define k5_mutex_unlock(M) k5_mutex_unlock_1(M, K5_DEBUG_LOC)
+#define k5_mutex_unlock(M) \
+ (k5_mutex_assert_locked(M), \
+ k5_mutex_unlock_update_stats(&(M)->stats), \
+ (M)->loc_last = K5_DEBUG_LOC, \
+ k5_os_mutex_unlock(&(M)->os))
#define k5_mutex_assert_locked(M) k5_os_mutex_assert_locked(&(M)->os)
#define k5_mutex_assert_unlocked(M) k5_os_mutex_assert_unlocked(&(M)->os)