+2005-01-14 Ken Raeburn <raeburn@mit.edu>
+
+ * k5-thread.h [HAVE_PTHREAD && HAVE_PRAGMA_WEAK_REF]: Mark
+ pthread_self and pthread_equal as weak references.
+
+2005-01-13 Ken Raeburn <raeburn@mit.edu>
+
+ * k5-thread.h (k5_os_mutex) [pthread case]: Add new field "owner"
+ if DEBUG_THREADS.
+ (k5_pthread_mutex_lock, k5_pthread_mutex_unlock,
+ k5_pthread_assert_locked): New macros/functions; if DEBUG_THREADS,
+ and thread support loaded, set or check the owner field.
+ (K5_OS_MUTEX_PARTIAL_INITIALIZER) [pthread case && DEBUG_THREADS]:
+ Set the owner field. If PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
+ is defined, use it.
+ (k5_os_mutex_lock, k5_os_mutex_unlock, k5_os_mutex_assert_locked)
+ [pthread case]: Use k5_pthread_ versions.
+ (k5_mutex_destroy): Update the location data with the mutex
+ locked, before destroying it.
+ (k5_mutex_unlock): Update the location data while the mutex is
+ still locked, and check the assertion that the mutex really is
+ locked. Convert inline function to macro.
+
+ * k5-thread.h (krb5int_mutex_lock_update_stats,
+ krb5int_mutex_unlock_update_stats, krb5int_mutex_report_stats)
+ [!DEBUG_THREADS_STATS]: Declare KRB5_CALLCONV.
+
2005-01-04 Jeffrey Altman <jaltman@mit.edu>
* krb5.hin: add prototype for krb5_is_thread_safe
# pragma weak pthread_mutex_unlock
# pragma weak pthread_mutex_destroy
# pragma weak pthread_mutex_init
+# pragma weak pthread_self
+# pragma weak pthread_equal
# ifdef HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP_IN_THREAD_LIB
# pragma weak pthread_mutexattr_setrobust_np
# endif
typedef struct {
pthread_mutex_t p;
+#ifdef DEBUG_THREADS
+ pthread_t owner;
+#endif
#ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
k5_os_nothread_mutex n;
#endif
} k5_os_mutex;
+#ifdef DEBUG_THREADS
+# ifdef __GNUC__
+# define k5_pthread_mutex_lock(M) \
+ ({ \
+ k5_os_mutex *_m2 = (M); \
+ int _r2 = pthread_mutex_lock(&_m2->p); \
+ if (_r2 == 0) _m2->owner = pthread_self(); \
+ _r2; \
+ })
+# else
+static inline int
+k5_pthread_mutex_lock(k5_os_mutex *m)
+{
+ int r = pthread_mutex_lock(&m->p);
+ if (r)
+ return r;
+ m->owner = pthread_self();
+ return 0;
+}
+# endif
+# define k5_pthread_assert_locked(M) \
+ (K5_PTHREADS_LOADED \
+ ? assert(pthread_equal((M)->owner, pthread_self())) \
+ : (void)0)
+# define k5_pthread_mutex_unlock(M) \
+ (k5_pthread_assert_locked(M), \
+ (M)->owner = (pthread_t) 0, \
+ pthread_mutex_unlock(&(M)->p))
+#else
+# define k5_pthread_mutex_lock(M) pthread_mutex_lock(&(M)->p)
+static inline void k5_pthread_assert_locked(pthread_mutex_t *m) { }
+# define k5_pthread_mutex_unlock(M) pthread_mutex_unlock(&(M)->p)
+#endif
+
/* Define as functions to:
(1) eliminate "statement with no effect" warnings for "0"
(2) encourage type-checking in calling code */
static inline void k5_pthread_assert_unlocked(pthread_mutex_t *m) { }
-static inline void k5_pthread_assert_locked(pthread_mutex_t *m) { }
#if defined(DEBUG_THREADS_SLOW) && HAVE_SCHED_H && (HAVE_SCHED_YIELD || HAVE_PRAGMA_WEAK_REF)
# include <sched.h>
#ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
-# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+# if defined(PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP) && defined(DEBUG_THREADS)
+# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+ { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0, \
+ K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
+# elif defined(DEBUG_THREADS)
+# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+ { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0, \
+ K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
+# else
+# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
{ PTHREAD_MUTEX_INITIALIZER, K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
+# endif
# define k5_os_mutex_finish_init(M) \
k5_os_nothread_mutex_finish_init(&(M)->n)
# define k5_os_mutex_lock(M) \
return_after_yield(K5_PTHREADS_LOADED \
- ? pthread_mutex_lock(&(M)->p) \
+ ? k5_pthread_mutex_lock(&(M)->p) \
: k5_os_nothread_mutex_lock(&(M)->n))
# define k5_os_mutex_unlock(M) \
(MAYBE_SCHED_YIELD(), \
(K5_PTHREADS_LOADED \
- ? pthread_mutex_unlock(&(M)->p) \
+ ? k5_pthread_mutex_unlock(M) \
: k5_os_nothread_mutex_unlock(&(M)->n)))
# define k5_os_mutex_assert_unlocked(M) \
: k5_os_nothread_mutex_assert_unlocked(&(M)->n))
# define k5_os_mutex_assert_locked(M) \
(K5_PTHREADS_LOADED \
- ? k5_pthread_assert_locked(&(M)->p) \
+ ? k5_pthread_assert_locked(M) \
: k5_os_nothread_mutex_assert_locked(&(M)->n))
#else
-# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+# ifdef DEBUG_THREADS
+# ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
+# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+ { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0 }
+# else
+# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+ { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0 }
+# endif
+# else
+# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
{ PTHREAD_MUTEX_INITIALIZER }
+# endif
static inline int k5_os_mutex_finish_init(k5_os_mutex *m) { return 0; }
# define k5_os_mutex_init(M) pthread_mutex_init(&(M)->p, 0)
# define k5_os_mutex_destroy(M) pthread_mutex_destroy(&(M)->p)
-# define k5_os_mutex_lock(M) return_after_yield(pthread_mutex_lock(&(M)->p))
-# define k5_os_mutex_unlock(M) (MAYBE_SCHED_YIELD(),pthread_mutex_unlock(&(M)->p))
+# define k5_os_mutex_lock(M) return_after_yield(k5_pthread_mutex_lock(M))
+# define k5_os_mutex_unlock(M) (MAYBE_SCHED_YIELD(),k5_pthread_mutex_unlock(M))
# define k5_os_mutex_assert_unlocked(M) k5_pthread_assert_unlocked(&(M)->p)
-# define k5_os_mutex_assert_locked(M) k5_pthread_assert_locked(&(M)->p)
+# define k5_os_mutex_assert_locked(M) k5_pthread_assert_locked(M)
#endif /* is pthreads always available? */
#define k5_mutex_finish_init(M) k5_mutex_finish_init_1((M), K5_DEBUG_LOC)
#define k5_mutex_destroy(M) \
(k5_os_mutex_assert_unlocked(&(M)->os), \
- (M)->loc_last = K5_DEBUG_LOC, \
+ k5_mutex_lock(M), (M)->loc_last = K5_DEBUG_LOC, k5_mutex_unlock(M), \
k5_os_mutex_destroy(&(M)->os))
#ifdef __GNUC__
#define k5_mutex_lock(M) \
}
#define k5_mutex_lock(M) k5_mutex_lock_1(M, K5_DEBUG_LOC)
#endif
-static inline int k5_mutex_unlock_1(k5_mutex_t *m, k5_debug_loc l)
-{
- int err = 0;
- err = k5_os_mutex_unlock(&m->os);
- if (err)
- return err;
- m->loc_last = l;
- return err;
-}
-#define k5_mutex_unlock(M) k5_mutex_unlock_1(M, K5_DEBUG_LOC)
+#define k5_mutex_unlock(M) \
+ (k5_mutex_assert_locked(M), \
+ (M)->loc_last = K5_DEBUG_LOC, \
+ k5_os_mutex_unlock(&(M)->os))
#define k5_mutex_assert_locked(M) k5_os_mutex_assert_locked(&(M)->os)
#define k5_mutex_assert_unlocked(M) k5_os_mutex_assert_unlocked(&(M)->os)
+2005-01-13 Ken Raeburn <raeburn@mit.edu>
+
+ * prng.c (krb5int_prng_init): Incorporate do_yarrow_init body.
+ Don't check inited variable.
+ (inited): Variable deleted.
+ (krb5_c_random_make_octets, krb5int_prng_cleanup): Don't check
+ it.
+ (do_yarrow_init): Deleted.
+
2005-01-12 Tom Yu <tlyu@mit.edu>
* prng.c (read_entropy_from_device): Use ssize_t, not size_t, so
#include "yarrow.h"
static Yarrow_CTX y_ctx;
-static int inited, init_error;
+static int init_error;
#define yarrow_lock krb5int_yarrow_lock
k5_mutex_t yarrow_lock = K5_MUTEX_PARTIAL_INITIALIZER;
return (0);
}
-static void do_yarrow_init(void);
int krb5int_prng_init(void)
-{
- do_yarrow_init();
- if (init_error)
- return KRB5_CRYPTO_INTERNAL;
- return 0;
-}
-
-static void do_yarrow_init(void)
{
unsigned i;
int yerr;
yerr = k5_mutex_finish_init(&yarrow_lock);
- if (yerr) {
- init_error = yerr;
- return;
- }
+ if (yerr)
+ return yerr;
yerr = krb5int_yarrow_init (&y_ctx, NULL);
- if ((yerr != YARROW_OK) && (yerr != YARROW_NOT_SEEDED)) {
- init_error = yerr;
- return;
- }
+ if ((yerr != YARROW_OK) && (yerr != YARROW_NOT_SEEDED))
+ return KRB5_CRYPTO_INTERNAL;
for (i=0; i < KRB5_C_RANDSOURCE_MAX; i++ ) {
unsigned source_id;
- if (krb5int_yarrow_new_source (&y_ctx, &source_id) != YARROW_OK ) {
- init_error = 17;
- return;
- }
+ if (krb5int_yarrow_new_source (&y_ctx, &source_id) != YARROW_OK )
+ return KRB5_CRYPTO_INTERNAL;
assert (source_id == i);
}
- inited=1;
- init_error = 0;
+
+ return 0;
}
krb5_error_code KRB5_CALLCONV
krb5_c_random_make_octets(krb5_context context, krb5_data *data)
{
int yerr;
- assert (inited);
yerr = krb5int_yarrow_output (&y_ctx, data->data, data->length);
if (yerr == YARROW_NOT_SEEDED) {
yerr = krb5int_yarrow_reseed (&y_ctx, YARROW_SLOW_POOL);
void krb5int_prng_cleanup (void)
{
- if (inited)
- krb5int_yarrow_final (&y_ctx);
+ krb5int_yarrow_final (&y_ctx);
k5_mutex_destroy(&yarrow_lock);
- inited = 0;
}
+2005-01-13 Ken Raeburn <raeburn@mit.edu>
+
+ * yarrow.c (yarrow_reseed_locked): Renamed from
+ krb5int_yarrow_reseed and made static.
+ (Yarrow_detect_fork, yarrow_input_maybe_locking,
+ krb5int_yarrow_output_Block): Call it.
+ (krb5int_yarrow_reseed): New function, grabs lock and calls the
+ old version.
+ (krb5int_yarrow_final): Hold the lock until after clearing the
+ Yarrow context data.
+
2004-11-22 Ken Raeburn <raeburn@mit.edu>
* yarrow.c (yarrow_input_maybe_locking): Renamed from
}
}
+static int yarrow_reseed_locked( Yarrow_CTX* y, int pool );
+
/* if the program was forked, the child must not operate on the same
PRNG state */
#ifdef YARROW_DETECT_FORK
sizeof (newpid), 0));
TRY (yarrow_input_locked (y, 0, &newpid,
sizeof (newpid), 0));
- TRY (krb5int_yarrow_reseed (y, YARROW_FAST_POOL));
+ TRY (yarrow_reseed_locked (y, YARROW_FAST_POOL));
}
CATCH:
{
if (source->entropy[YARROW_FAST_POOL] >= y->fast_thresh)
{
- ret = krb5int_yarrow_reseed(y, YARROW_FAST_POOL);
+ ret = yarrow_reseed_locked(y, YARROW_FAST_POOL);
if ( ret != YARROW_OK && ret != YARROW_NOT_SEEDED )
{
THROW( ret );
if (y->slow_k_of_n >= y->slow_k_of_n_thresh)
{
y->slow_k_of_n = 0;
- ret = krb5int_yarrow_reseed(y, YARROW_SLOW_POOL);
+ ret = yarrow_reseed_locked(y, YARROW_SLOW_POOL);
if ( ret != YARROW_OK && ret != YARROW_NOT_SEEDED )
{
THROW( ret );
TRACE( printf( "OUTPUT LIMIT REACHED," ); );
- TRY( krb5int_yarrow_reseed( y, YARROW_SLOW_POOL ) );
+ TRY( yarrow_reseed_locked( y, YARROW_SLOW_POOL ) );
}
}
#endif
-int krb5int_yarrow_reseed(Yarrow_CTX* y, int pool)
+static int yarrow_reseed_locked(Yarrow_CTX* y, int pool)
{
EXCEP_DECL;
HASH_CTX* fast_pool = &y->pool[YARROW_FAST_POOL];
EXCEP_RET;
}
+int krb5int_yarrow_reseed(Yarrow_CTX* y, int pool)
+{
+ int r;
+ LOCK();
+ r = yarrow_reseed_locked(y, pool);
+ UNLOCK();
+ return r;
+}
int krb5int_yarrow_stretch(const byte* m, size_t size, byte* out, size_t out_size)
{
#endif
CATCH:
- if ( locked ) { TRY( UNLOCK() ); }
krb5int_yarrow_cipher_final(&y->cipher);
mem_zero( y, sizeof(Yarrow_CTX) );
+ if ( locked ) { TRY( UNLOCK() ); }
EXCEP_RET;
}
+2005-01-13 Ken Raeburn <raeburn@mit.edu>
+
+ * error_message.c (com_err_terminate): Lock the list mutex before
+ walking through it; unlock and destroy it afterwards.
+
2004-11-05 Ken Raeburn <raeburn@mit.edu>
* et_h.awk: Declare initialize_*_error_table as taking no
struct dynamic_et_list *e, *enext;
if (! INITIALIZER_RAN(com_err_initialize) || PROGRAM_EXITING())
return;
- k5_mutex_destroy(&et_list_lock);
+ k5_mutex_lock(&et_list_lock);
for (e = et_list_dynamic; e; e = enext) {
enext = e->next;
free(e);
}
+ k5_mutex_unlock(&et_list_lock);
+ k5_mutex_destroy(&et_list_lock);
terminated = 1;
}
+2005-01-13 Ken Raeburn <raeburn@mit.edu>
+
+ * prof_file.c (profile_free_file_data): Destroy mutex before
+ freeing containing structure.
+ (profile_open_file): If mutex creation fails, free storage
+ directly instead of calling profile_close_file.
+
2004-12-14 Ken Raeburn <raeburn@mit.edu>
* prof_tree.c (profile_node_iterator): When the iterator has a
retval = k5_mutex_init(&data->lock);
if (retval) {
- profile_close_file(prf);
+ free(data);
+ free(prf);
return retval;
}