4 * Copyright 2004,2005,2006,2007 by the Massachusetts Institute of Technology.
7 * Export of this software from the United States of America may
8 * require a specific license from the United States Government.
9 * It is the responsibility of any person or organization contemplating
10 * export to obtain such a license before exporting.
12 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
13 * distribute this software and its documentation for any purpose and
14 * without fee is hereby granted, provided that the above copyright
15 * notice appear in all copies and that both that copyright notice and
16 * this permission notice appear in supporting documentation, and that
17 * the name of M.I.T. not be used in advertising or publicity pertaining
18 * to distribution of the software without specific, written prior
19 * permission. Furthermore if you modify this software you must label
20 * your software as modified software and not distribute it in such a
21 * fashion that it might be confused with the original M.I.T. software.
22 * M.I.T. makes no representations about the suitability of
23 * this software for any purpose. It is provided "as is" without express
24 * or implied warranty.
27 * Preliminary thread support.
35 # define KRB5_CALLCONV
37 #ifndef KRB5_CALLCONV_C
38 # define KRB5_CALLCONV_C
41 \f/* Interface (tentative):
45 // Between these two, we should be able to do pure compile-time
46 // and pure run-time initialization.
47 // POSIX: partial initializer is PTHREAD_MUTEX_INITIALIZER,
48 // finish does nothing
49 // Windows: partial initializer is an invalid handle,
50 // finish does the real initialization work
51 // debug: partial initializer sets one magic value,
52 // finish verifies and sets a new magic value for
53 // lock/unlock to check
54 k5_mutex_t foo_mutex = K5_MUTEX_PARTIAL_INITIALIZER;
55 int k5_mutex_finish_init(k5_mutex_t *);
56 // for dynamic allocation
57 int k5_mutex_init(k5_mutex_t *);
58 // Must work for both kinds of alloc, even if it means adding flags.
59 int k5_mutex_destroy(k5_mutex_t *);
62 int k5_mutex_lock(k5_mutex_t *);
63 int k5_mutex_unlock(k5_mutex_t *);
65 In each library, one new function to finish the static mutex init,
66 and any other library-wide initialization that might be desired.
67 On POSIX, this function would be called via the second support
68 function (see below). On Windows, it would be called at library
69 load time. These functions, or functions they calls, should be the
70 only places that k5_mutex_finish_init gets called.
72 A second function or macro called at various possible "first" entry
73 points which either calls pthread_once on the first function
74 (POSIX), or checks some flag set by the first function (Windows,
75 debug support), and possibly returns an error. (In the
76 non-threaded case, a simple flag can be used to avoid multiple
77 invocations, and the mutexes don't need run-time initialization
80 A third function for library termination calls mutex_destroy on
81 each mutex for the library. This function would be called
82 automatically at library unload time. If it turns out to be needed
83 at exit time for libraries that don't get unloaded, perhaps we
84 should also use atexit(). Any static mutexes should be cleaned up
85 with k5_mutex_destroy here.
87 How does that second support function invoke the first support
88 function only once? Through something modelled on pthread_once
89 that I haven't written up yet. Probably:
91 k5_once_t foo_once = K5_ONCE_INIT;
92 k5_once(k5_once_t *, void (*)(void));
94 For POSIX: Map onto pthread_once facility.
95 For non-threaded case: A simple flag.
96 For Windows: Not needed; library init code takes care of it.
98 XXX: A general k5_once mechanism isn't possible for Windows,
99 without faking it through named mutexes or mutexes initialized at
100 startup. I was only using it in one place outside these headers,
101 so I'm dropping the general scheme. Eventually the existing uses
102 in k5-thread.h and k5-platform.h will be converted to pthread_once
106 Thread-specific data:
108 // TSD keys are limited in number in gssapi/krb5/com_err; enumerate
109 // them all. This allows support code init to allocate the
110 // necessary storage for pointers all at once, and avoids any
111 // possible error in key creation.
112 enum { ... } k5_key_t;
113 // Register destructor function. Called in library init code.
114 int k5_key_register(k5_key_t, void (*destructor)(void *));
115 // Returns NULL or data.
116 void *k5_getspecific(k5_key_t);
117 // Returns error if key out of bounds, or the pointer table can't
118 // be allocated. A call to k5_key_register must have happened first.
119 // This may trigger the calling of pthread_setspecific on POSIX.
120 int k5_setspecific(k5_key_t, void *);
121 // Called in library termination code.
122 // Trashes data in all threads, calling the registered destructor
123 // (but calling it from the current thread).
124 int k5_key_delete(k5_key_t);
126 For the non-threaded version, the support code will have a static
127 array indexed by k5_key_t values, and get/setspecific simply access
130 The TSD destructor table is global state, protected by a mutex if
133 Debug support: Not much. Might check if k5_key_register has been
134 called and abort if not.
137 Any actual external symbols will use the krb5int_ prefix. The k5_
138 names will be simple macros or inline functions to rename the
139 external symbols, or slightly more complex ones to expand the
140 implementation inline (e.g., map to POSIX versions and/or debug
141 code using __FILE__ and the like).
144 More to be added, perhaps. */
146 #define DEBUG_THREADS
147 #define DEBUG_THREADS_LOC
148 #undef DEBUG_THREADS_SLOW /* debugging stuff that'll slow things down? */
149 #undef DEBUG_THREADS_STATS
153 /* For tracking locations, of (e.g.) last lock or unlock of mutex. */
154 #ifdef DEBUG_THREADS_LOC
156 const char *filename;
159 #define K5_DEBUG_LOC_INIT { __FILE__, __LINE__ }
161 #define K5_DEBUG_LOC (__extension__ (k5_debug_loc)K5_DEBUG_LOC_INIT)
163 static inline k5_debug_loc k5_debug_make_loc(const char *file, int line)
170 #define K5_DEBUG_LOC (k5_debug_make_loc(__FILE__,__LINE__))
172 #else /* ! DEBUG_THREADS_LOC */
173 typedef char k5_debug_loc;
174 #define K5_DEBUG_LOC_INIT 0
175 #define K5_DEBUG_LOC 0
178 #define k5_debug_update_loc(L) ((L) = K5_DEBUG_LOC)
182 /* Statistics gathering:
184 Currently incomplete, don't try enabling it.
186 Eventually: Report number of times locked, total and standard
187 deviation of the time the lock was held, total and std dev time
188 spent waiting for the lock. "Report" will probably mean "write a
189 line to a file if a magic environment variable is set." */
191 #ifdef DEBUG_THREADS_STATS
193 #if HAVE_TIME_H && (!defined(HAVE_SYS_TIME_H) || defined(TIME_WITH_SYS_TIME))
197 # include <sys/time.h>
205 #include <inttypes.h>
206 typedef uint64_t k5_debug_timediff_t; /* or long double */
207 typedef struct timeval k5_debug_time_t;
208 static inline k5_debug_timediff_t
209 timediff(k5_debug_time_t t2, k5_debug_time_t t1)
211 return (t2.tv_sec - t1.tv_sec) * 1000000 + (t2.tv_usec - t1.tv_usec);
213 static inline k5_debug_time_t get_current_time(void)
216 if (gettimeofday(&tv,0) < 0) { tv.tv_sec = tv.tv_usec = 0; }
219 struct k5_timediff_stats {
220 k5_debug_timediff_t valmin, valmax, valsum, valsqsum;
224 k5_debug_time_t time_acquired, time_created;
225 struct k5_timediff_stats lockwait, lockheld;
226 } k5_debug_mutex_stats;
227 #define k5_mutex_init_stats(S) \
228 (memset((S), 0, sizeof(k5_debug_mutex_stats)), \
229 (S)->time_created = get_current_time(), \
231 #define k5_mutex_finish_init_stats(S) (0)
232 #define K5_MUTEX_STATS_INIT { 0, {0}, {0}, {0}, {0} }
233 typedef k5_debug_time_t k5_mutex_stats_tmp;
234 #define k5_mutex_stats_start() get_current_time()
235 void KRB5_CALLCONV krb5int_mutex_lock_update_stats(k5_debug_mutex_stats *m,
236 k5_mutex_stats_tmp start);
237 void KRB5_CALLCONV krb5int_mutex_unlock_update_stats(k5_debug_mutex_stats *m);
238 #define k5_mutex_lock_update_stats krb5int_mutex_lock_update_stats
239 #define k5_mutex_unlock_update_stats krb5int_mutex_unlock_update_stats
240 void KRB5_CALLCONV krb5int_mutex_report_stats(/* k5_mutex_t *m */);
244 typedef char k5_debug_mutex_stats;
245 #define k5_mutex_init_stats(S) (*(S) = 's', 0)
246 #define k5_mutex_finish_init_stats(S) (0)
247 #define K5_MUTEX_STATS_INIT 's'
248 typedef int k5_mutex_stats_tmp;
249 #define k5_mutex_stats_start() (0)
252 k5_mutex_lock_update_stats(k5_debug_mutex_stats *m __attribute__((unused)),
253 k5_mutex_stats_tmp t __attribute__((unused)))
257 # define k5_mutex_lock_update_stats(M,S) (S)
259 #define k5_mutex_unlock_update_stats(M) (*(M) = 's')
261 /* If statistics tracking isn't enabled, these functions don't actually
262 do anything. Declare anyways so we can do type checking etc. */
263 void KRB5_CALLCONV krb5int_mutex_lock_update_stats(k5_debug_mutex_stats *m,
264 k5_mutex_stats_tmp start);
265 void KRB5_CALLCONV krb5int_mutex_unlock_update_stats(k5_debug_mutex_stats *m);
266 void KRB5_CALLCONV krb5int_mutex_report_stats(/* k5_mutex_t *m */);
268 #define krb5int_mutex_report_stats(M) ((M)->stats = 'd')
274 /* The mutex structure we use, k5_mutex_t, has some OS-specific bits,
275 and some non-OS-specific bits for debugging and profiling.
277 The OS specific bits, in k5_os_mutex, break down into three primary
278 implementations, POSIX threads, Windows threads, and no thread
279 support. However, the POSIX thread version is further subdivided:
280 In one case, we can determine at run time whether the thread
281 library is linked into the application, and use it only if it is
282 present; in the other case, we cannot, and the thread library must
283 be linked in always, but can be used unconditionally. In the
284 former case, the k5_os_mutex structure needs to hold both the POSIX
285 and the non-threaded versions.
287 The various k5_os_mutex_* operations are the OS-specific versions,
288 applied to the OS-specific data, and k5_mutex_* uses k5_os_mutex_*
289 to do the OS-specific parts of the work. */
291 /* Define the OS mutex bit. */
293 /* First, if we're not actually doing multiple threads, do we
294 want the debug support or not? */
298 enum k5_mutex_init_states {
299 K5_MUTEX_DEBUG_PARTLY_INITIALIZED = 0x12,
300 K5_MUTEX_DEBUG_INITIALIZED,
301 K5_MUTEX_DEBUG_DESTROYED
303 enum k5_mutex_flag_states {
304 K5_MUTEX_DEBUG_UNLOCKED = 0x23,
305 K5_MUTEX_DEBUG_LOCKED
309 enum k5_mutex_init_states initialized;
310 enum k5_mutex_flag_states locked;
311 } k5_os_nothread_mutex;
313 # define K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER \
314 { K5_MUTEX_DEBUG_PARTLY_INITIALIZED, K5_MUTEX_DEBUG_UNLOCKED }
316 # define k5_os_nothread_mutex_finish_init(M) \
317 (assert((M)->initialized != K5_MUTEX_DEBUG_INITIALIZED), \
318 assert((M)->initialized == K5_MUTEX_DEBUG_PARTLY_INITIALIZED), \
319 assert((M)->locked == K5_MUTEX_DEBUG_UNLOCKED), \
320 (M)->initialized = K5_MUTEX_DEBUG_INITIALIZED, 0)
321 # define k5_os_nothread_mutex_init(M) \
322 ((M)->initialized = K5_MUTEX_DEBUG_INITIALIZED, \
323 (M)->locked = K5_MUTEX_DEBUG_UNLOCKED, 0)
324 # define k5_os_nothread_mutex_destroy(M) \
325 (assert((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED), \
326 (M)->initialized = K5_MUTEX_DEBUG_DESTROYED, 0)
328 # define k5_os_nothread_mutex_lock(M) \
329 (k5_os_nothread_mutex_assert_unlocked(M), \
330 (M)->locked = K5_MUTEX_DEBUG_LOCKED, 0)
331 # define k5_os_nothread_mutex_unlock(M) \
332 (k5_os_nothread_mutex_assert_locked(M), \
333 (M)->locked = K5_MUTEX_DEBUG_UNLOCKED, 0)
335 # define k5_os_nothread_mutex_assert_locked(M) \
336 (assert((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED), \
337 assert((M)->locked != K5_MUTEX_DEBUG_UNLOCKED), \
338 assert((M)->locked == K5_MUTEX_DEBUG_LOCKED))
339 # define k5_os_nothread_mutex_assert_unlocked(M) \
340 (assert((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED), \
341 assert((M)->locked != K5_MUTEX_DEBUG_LOCKED), \
342 assert((M)->locked == K5_MUTEX_DEBUG_UNLOCKED))
344 #else /* threads disabled and not debugging */
346 typedef char k5_os_nothread_mutex;
347 # define K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER 0
348 /* Empty inline functions avoid the "statement with no effect"
349 warnings, and do better type-checking than functions that don't use
351 static inline int k5_os_nothread_mutex_finish_init(k5_os_nothread_mutex *m) {
354 static inline int k5_os_nothread_mutex_init(k5_os_nothread_mutex *m) {
357 static inline int k5_os_nothread_mutex_destroy(k5_os_nothread_mutex *m) {
360 static inline int k5_os_nothread_mutex_lock(k5_os_nothread_mutex *m) {
363 static inline int k5_os_nothread_mutex_unlock(k5_os_nothread_mutex *m) {
366 # define k5_os_nothread_mutex_assert_locked(M) ((void)0)
367 # define k5_os_nothread_mutex_assert_unlocked(M) ((void)0)
372 2 - function has not been run
373 3 - function has been run
374 4 - function is being run -- deadlock detected */
375 typedef unsigned char k5_os_nothread_once_t;
376 # define K5_OS_NOTHREAD_ONCE_INIT 2
377 # define k5_os_nothread_once(O,F) \
379 : *(O) == 2 ? (*(O) = 4, (F)(), *(O) = 3, 0) \
380 : (assert(*(O) != 4), assert(*(O) == 2 || *(O) == 3), 0))
384 #ifndef ENABLE_THREADS
386 typedef k5_os_nothread_mutex k5_os_mutex;
387 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \
388 K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER
389 # define k5_os_mutex_finish_init k5_os_nothread_mutex_finish_init
390 # define k5_os_mutex_init k5_os_nothread_mutex_init
391 # define k5_os_mutex_destroy k5_os_nothread_mutex_destroy
392 # define k5_os_mutex_lock k5_os_nothread_mutex_lock
393 # define k5_os_mutex_unlock k5_os_nothread_mutex_unlock
394 # define k5_os_mutex_assert_locked k5_os_nothread_mutex_assert_locked
395 # define k5_os_mutex_assert_unlocked k5_os_nothread_mutex_assert_unlocked
397 # define k5_once_t k5_os_nothread_once_t
398 # define K5_ONCE_INIT K5_OS_NOTHREAD_ONCE_INIT
399 # define k5_once k5_os_nothread_once
403 # include <pthread.h>
405 /* Weak reference support, etc.
407 Linux: Stub mutex routines exist, but pthread_once does not.
409 Solaris <10: In libc there's a pthread_once that doesn't seem to do
410 anything. Bleah. But pthread_mutexattr_setrobust_np is defined
411 only in libpthread. However, some version of GNU libc (Red Hat's
412 Fedora Core 5, reportedly) seems to have that function, but no
413 declaration, so we'd have to declare it in order to test for its
414 address. We now have tests to see if pthread_once actually works,
415 so stick with that for now.
417 Solaris 10: The real thread support now lives in libc, and
418 libpthread is just a filter object. So we might as well use the
419 real functions unconditionally. Since we haven't got a test for
420 this property yet, we use NO_WEAK_PTHREADS defined in aclocal.m4
421 depending on the OS type.
423 IRIX 6.5 stub pthread support in libc is really annoying. The
424 pthread_mutex_lock function returns ENOSYS for a program not linked
425 against -lpthread. No link-time failure, no weak symbols, etc.
426 The C library doesn't provide pthread_once; we can use weak
427 reference support for that.
429 If weak references are not available, then for now, we assume that
430 the pthread support routines will always be available -- either the
431 real thing, or functional stubs that merely prohibit creating
434 If we find a platform with non-functional stubs and no weak
435 references, we may have to resort to some hack like dlsym on the
436 symbol tables of the current process. */
437 #if defined(HAVE_PRAGMA_WEAK_REF) && !defined(NO_WEAK_PTHREADS)
438 # pragma weak pthread_once
439 # pragma weak pthread_mutex_lock
440 # pragma weak pthread_mutex_unlock
441 # pragma weak pthread_mutex_destroy
442 # pragma weak pthread_mutex_init
443 # pragma weak pthread_self
444 # pragma weak pthread_equal
445 extern int krb5int_pthread_loaded(void);
446 # define K5_PTHREADS_LOADED (krb5int_pthread_loaded())
448 /* no pragma weak support */
449 # define K5_PTHREADS_LOADED (1)
452 #if defined(__mips) && defined(__sgi) && (defined(_SYSTYPE_SVR4) || defined(__SYSTYPE_SVR4__))
453 /* IRIX 6.5 stub pthread support in libc is really annoying. The
454 pthread_mutex_lock function returns ENOSYS for a program not linked
455 against -lpthread. No link-time failure, no weak reference tests,
458 The C library doesn't provide pthread_once; we can use weak
459 reference support for that. */
460 # ifndef HAVE_PRAGMA_WEAK_REF
461 # if defined(__GNUC__) && __GNUC__ < 3
462 # error "Please update to a newer gcc with weak symbol support, or switch to native cc, reconfigure and recompile."
464 # error "Weak reference support is required"
469 #if defined(HAVE_PRAGMA_WEAK_REF) && !defined(NO_WEAK_PTHREADS)
470 # define USE_PTHREAD_LOCK_ONLY_IF_LOADED
472 /* Can't rely on useful stubs -- see above regarding Solaris. */
475 k5_os_nothread_once_t n;
477 # define K5_ONCE_INIT { PTHREAD_ONCE_INIT, K5_OS_NOTHREAD_ONCE_INIT }
478 # define k5_once(O,F) (K5_PTHREADS_LOADED \
479 ? pthread_once(&(O)->o,F) \
480 : k5_os_nothread_once(&(O)->n,F))
482 typedef pthread_once_t k5_once_t;
483 # define K5_ONCE_INIT PTHREAD_ONCE_INIT
484 # define k5_once pthread_once
492 #ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
493 k5_os_nothread_mutex n;
499 # define k5_pthread_mutex_lock(M) \
501 k5_os_mutex *_m2 = (M); \
502 int _r2 = pthread_mutex_lock(&_m2->p); \
503 if (_r2 == 0) _m2->owner = pthread_self(); \
508 k5_pthread_mutex_lock(k5_os_mutex *m)
510 int r = pthread_mutex_lock(&m->p);
513 m->owner = pthread_self();
517 # define k5_pthread_assert_locked(M) \
518 (K5_PTHREADS_LOADED \
519 ? assert(pthread_equal((M)->owner, pthread_self())) \
521 # define k5_pthread_mutex_unlock(M) \
522 (k5_pthread_assert_locked(M), \
523 (M)->owner = (pthread_t) 0, \
524 pthread_mutex_unlock(&(M)->p))
526 # define k5_pthread_mutex_lock(M) pthread_mutex_lock(&(M)->p)
527 static inline void k5_pthread_assert_locked(k5_os_mutex *m) { }
528 # define k5_pthread_mutex_unlock(M) pthread_mutex_unlock(&(M)->p)
531 /* Define as functions to:
532 (1) eliminate "statement with no effect" warnings for "0"
533 (2) encourage type-checking in calling code */
535 static inline void k5_pthread_assert_unlocked(pthread_mutex_t *m) { }
537 #if defined(DEBUG_THREADS_SLOW) && HAVE_SCHED_H && (HAVE_SCHED_YIELD || HAVE_PRAGMA_WEAK_REF)
539 # if !HAVE_SCHED_YIELD
540 # pragma weak sched_yield
541 # define MAYBE_SCHED_YIELD() ((void)((&sched_yield != NULL) ? sched_yield() : 0))
543 # define MAYBE_SCHED_YIELD() ((void)sched_yield())
546 # define MAYBE_SCHED_YIELD() ((void)0)
549 /* It may not be obvious why this function is desirable.
551 I want to call pthread_mutex_lock, then sched_yield, then look at
552 the return code from pthread_mutex_lock. That can't be implemented
553 in a macro without a temporary variable, or GNU C extensions.
555 There used to be an inline function which did it, with both
556 functions called from the inline function. But that messes with
557 the debug information on a lot of configurations, and you can't
558 tell where the inline function was called from. (Typically, gdb
559 gives you the name of the function from which the inline function
560 was called, and a line number within the inline function itself.)
562 With this auxiliary function, pthread_mutex_lock can be called at
563 the invoking site via a macro; once it returns, the inline function
564 is called (with messed-up line-number info for gdb hopefully
565 localized to just that call). */
567 #define return_after_yield(R) \
570 MAYBE_SCHED_YIELD(); \
574 static inline int return_after_yield(int r)
581 #ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
583 # if defined(PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP) && defined(DEBUG_THREADS)
584 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \
585 { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0, \
586 K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
587 # elif defined(DEBUG_THREADS)
588 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \
589 { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0, \
590 K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
592 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \
593 { PTHREAD_MUTEX_INITIALIZER, K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
596 # define k5_os_mutex_finish_init(M) \
597 k5_os_nothread_mutex_finish_init(&(M)->n)
598 # define k5_os_mutex_init(M) \
599 (k5_os_nothread_mutex_init(&(M)->n), \
600 (K5_PTHREADS_LOADED \
601 ? pthread_mutex_init(&(M)->p, 0) \
603 # define k5_os_mutex_destroy(M) \
604 (k5_os_nothread_mutex_destroy(&(M)->n), \
605 (K5_PTHREADS_LOADED \
606 ? pthread_mutex_destroy(&(M)->p) \
609 # define k5_os_mutex_lock(M) \
610 return_after_yield(K5_PTHREADS_LOADED \
611 ? k5_pthread_mutex_lock(M) \
612 : k5_os_nothread_mutex_lock(&(M)->n))
613 # define k5_os_mutex_unlock(M) \
614 (MAYBE_SCHED_YIELD(), \
615 (K5_PTHREADS_LOADED \
616 ? k5_pthread_mutex_unlock(M) \
617 : k5_os_nothread_mutex_unlock(&(M)->n)))
619 # define k5_os_mutex_assert_unlocked(M) \
620 (K5_PTHREADS_LOADED \
621 ? k5_pthread_assert_unlocked(&(M)->p) \
622 : k5_os_nothread_mutex_assert_unlocked(&(M)->n))
623 # define k5_os_mutex_assert_locked(M) \
624 (K5_PTHREADS_LOADED \
625 ? k5_pthread_assert_locked(M) \
626 : k5_os_nothread_mutex_assert_locked(&(M)->n))
630 # ifdef DEBUG_THREADS
631 # ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
632 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \
633 { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0 }
635 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \
636 { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0 }
639 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \
640 { PTHREAD_MUTEX_INITIALIZER }
643 static inline int k5_os_mutex_finish_init(k5_os_mutex *m) { return 0; }
644 # define k5_os_mutex_init(M) pthread_mutex_init(&(M)->p, 0)
645 # define k5_os_mutex_destroy(M) pthread_mutex_destroy(&(M)->p)
646 # define k5_os_mutex_lock(M) return_after_yield(k5_pthread_mutex_lock(M))
647 # define k5_os_mutex_unlock(M) (MAYBE_SCHED_YIELD(),k5_pthread_mutex_unlock(M))
649 # define k5_os_mutex_assert_unlocked(M) k5_pthread_assert_unlocked(&(M)->p)
650 # define k5_os_mutex_assert_locked(M) k5_pthread_assert_locked(M)
652 #endif /* is pthreads always available? */
661 # define K5_OS_MUTEX_PARTIAL_INITIALIZER { INVALID_HANDLE_VALUE, 0 }
663 # define k5_os_mutex_finish_init(M) \
664 (assert((M)->h == INVALID_HANDLE_VALUE), \
665 ((M)->h = CreateMutex(NULL, FALSE, NULL)) ? 0 : GetLastError())
666 # define k5_os_mutex_init(M) \
667 ((M)->is_locked = 0, \
668 ((M)->h = CreateMutex(NULL, FALSE, NULL)) ? 0 : GetLastError())
669 # define k5_os_mutex_destroy(M) \
670 (CloseHandle((M)->h) ? ((M)->h = 0, 0) : GetLastError())
672 static inline int k5_os_mutex_lock(k5_os_mutex *m)
675 res = WaitForSingleObject(m->h, INFINITE);
676 if (res == WAIT_FAILED)
677 return GetLastError();
678 /* Eventually these should be turned into some reasonable error
680 assert(res != WAIT_TIMEOUT);
681 assert(res != WAIT_ABANDONED);
682 assert(res == WAIT_OBJECT_0);
683 /* Avoid locking twice. */
684 assert(m->is_locked == 0);
689 # define k5_os_mutex_unlock(M) \
690 (assert((M)->is_locked == 1), \
691 (M)->is_locked = 0, \
692 ReleaseMutex((M)->h) ? 0 : GetLastError())
694 # define k5_os_mutex_assert_unlocked(M) ((void)0)
695 # define k5_os_mutex_assert_locked(M) ((void)0)
699 # error "Thread support enabled, but thread system unknown"
707 k5_debug_loc loc_last, loc_created;
709 k5_debug_mutex_stats stats;
711 #define K5_MUTEX_PARTIAL_INITIALIZER \
712 { K5_DEBUG_LOC_INIT, K5_DEBUG_LOC_INIT, \
713 K5_OS_MUTEX_PARTIAL_INITIALIZER, K5_MUTEX_STATS_INIT }
714 static inline int k5_mutex_init_1(k5_mutex_t *m, k5_debug_loc l)
716 int err = k5_os_mutex_init(&m->os);
718 m->loc_created = m->loc_last = l;
719 err = k5_mutex_init_stats(&m->stats);
723 #define k5_mutex_init(M) k5_mutex_init_1((M), K5_DEBUG_LOC)
724 static inline int k5_mutex_finish_init_1(k5_mutex_t *m, k5_debug_loc l)
726 int err = k5_os_mutex_finish_init(&m->os);
728 m->loc_created = m->loc_last = l;
729 err = k5_mutex_finish_init_stats(&m->stats);
733 #define k5_mutex_finish_init(M) k5_mutex_finish_init_1((M), K5_DEBUG_LOC)
734 #define k5_mutex_destroy(M) \
735 (k5_os_mutex_assert_unlocked(&(M)->os), \
736 krb5int_mutex_report_stats(M), \
737 k5_mutex_lock(M), (M)->loc_last = K5_DEBUG_LOC, k5_mutex_unlock(M), \
738 k5_os_mutex_destroy(&(M)->os))
740 #define k5_mutex_lock(M) \
743 k5_mutex_stats_tmp _stats = k5_mutex_stats_start(); \
744 k5_mutex_t *_m = (M); \
745 _err = k5_os_mutex_lock(&_m->os); \
746 if (_err == 0) _m->loc_last = K5_DEBUG_LOC; \
747 if (_err == 0) k5_mutex_lock_update_stats(&_m->stats, _stats); \
751 static inline int k5_mutex_lock_1(k5_mutex_t *m, k5_debug_loc l)
754 k5_mutex_stats_tmp stats = k5_mutex_stats_start();
755 err = k5_os_mutex_lock(&m->os);
759 k5_mutex_lock_update_stats(&m->stats, stats);
762 #define k5_mutex_lock(M) k5_mutex_lock_1(M, K5_DEBUG_LOC)
764 #define k5_mutex_unlock(M) \
765 (k5_mutex_assert_locked(M), \
766 k5_mutex_unlock_update_stats(&(M)->stats), \
767 (M)->loc_last = K5_DEBUG_LOC, \
768 k5_os_mutex_unlock(&(M)->os))
770 #define k5_mutex_assert_locked(M) k5_os_mutex_assert_locked(&(M)->os)
771 #define k5_mutex_assert_unlocked(M) k5_os_mutex_assert_unlocked(&(M)->os)
773 #define k5_assert_locked k5_mutex_assert_locked
774 #define k5_assert_unlocked k5_mutex_assert_unlocked
777 /* Thread-specific data; implemented in a support file, because we'll
778 need to keep track of some global data for cleanup purposes.
780 Note that the callback function type is such that the C library
781 routine free() is a valid callback. */
784 K5_KEY_GSS_KRB5_SET_CCACHE_OLD_NAME,
785 K5_KEY_GSS_KRB5_CCACHE_NAME,
788 /* rename shorthand symbols for export */
789 #define k5_key_register krb5int_key_register
790 #define k5_getspecific krb5int_getspecific
791 #define k5_setspecific krb5int_setspecific
792 #define k5_key_delete krb5int_key_delete
793 extern int k5_key_register(k5_key_t, void (*)(void *));
794 extern void *k5_getspecific(k5_key_t);
795 extern int k5_setspecific(k5_key_t, void *);
796 extern int k5_key_delete(k5_key_t);
798 extern int KRB5_CALLCONV krb5int_mutex_alloc (k5_mutex_t **);
799 extern void KRB5_CALLCONV krb5int_mutex_free (k5_mutex_t *);
800 extern int KRB5_CALLCONV krb5int_mutex_lock (k5_mutex_t *);
801 extern int KRB5_CALLCONV krb5int_mutex_unlock (k5_mutex_t *);
803 /* In time, many of the definitions above should move into the support
804 library, and this file should be greatly simplified. For type
805 definitions, that'll take some work, since other data structures
806 incorporate mutexes directly, and our mutex type is dependent on
807 configuration options and system attributes. For most functions,
808 though, it should be relatively easy.
810 For now, plugins should use the exported functions, and not the
811 above macros, and use krb5int_mutex_alloc for allocations. */
814 #define k5_mutex_lock krb5int_mutex_lock
815 #undef k5_mutex_unlock
816 #define k5_mutex_unlock krb5int_mutex_unlock
819 #endif /* multiple inclusion? */