pullup from trunk
authorTom Yu <tlyu@mit.edu>
Sat, 15 Jan 2005 01:04:55 +0000 (01:04 +0000)
committerTom Yu <tlyu@mit.edu>
Sat, 15 Jan 2005 01:04:55 +0000 (01:04 +0000)
ticket: 2878
version_reported: 1.4
version_fixed: 1.4

git-svn-id: svn://anonsvn.mit.edu/krb5/branches/krb5-1-4@17044 dc483132-0cff-0310-8789-dd5450dbe970

src/include/ChangeLog
src/include/k5-thread.h
src/lib/crypto/ChangeLog
src/lib/crypto/prng.c
src/lib/crypto/yarrow/ChangeLog
src/lib/crypto/yarrow/yarrow.c
src/util/et/ChangeLog
src/util/et/error_message.c
src/util/profile/ChangeLog
src/util/profile/prof_file.c

index 3a551385b2543b0c23693e26f8b02908d00fecf6..b159b282f62d41053936425862a2f5e1ff53a0d1 100644 (file)
@@ -1,3 +1,30 @@
+2005-01-14  Ken Raeburn  <raeburn@mit.edu>
+
+       * k5-thread.h [HAVE_PTHREAD && HAVE_PRAGMA_WEAK_REF]: Mark
+       pthread_self and pthread_equal as weak references.
+
+2005-01-13  Ken Raeburn  <raeburn@mit.edu>
+
+       * k5-thread.h (k5_os_mutex) [pthread case]: Add new field "owner"
+       if DEBUG_THREADS.
+       (k5_pthread_mutex_lock, k5_pthread_mutex_unlock,
+       k5_pthread_assert_locked): New macros/functions; if DEBUG_THREADS,
+       and thread support loaded, set or check the owner field.
+       (K5_OS_MUTEX_PARTIAL_INITIALIZER) [pthread case && DEBUG_THREADS]:
+       Set the owner field.  If PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
+       is defined, use it.
+       (k5_os_mutex_lock, k5_os_mutex_unlock, k5_os_mutex_assert_locked)
+       [pthread case]: Use k5_pthread_ versions.
+       (k5_mutex_destroy): Update the location data with the mutex
+       locked, before destroying it.
+       (k5_mutex_unlock): Update the location data while the mutex is
+       still locked, and check the assertion that the mutex really is
+       locked.  Convert inline function to macro.
+
+       * k5-thread.h (krb5int_mutex_lock_update_stats,
+       krb5int_mutex_unlock_update_stats, krb5int_mutex_report_stats)
+       [!DEBUG_THREADS_STATS]: Declare KRB5_CALLCONV.
+
 2005-01-04  Jeffrey Altman <jaltman@mit.edu>
 
         * krb5.hin: add prototype for krb5_is_thread_safe
index 5fecf082757f5cbe858a920dc3e733eb117e1ec0..210562458e2e9e038d1f38911a67796eb024d0fa 100644 (file)
@@ -367,6 +367,8 @@ typedef k5_os_nothread_mutex k5_os_mutex;
 # pragma weak pthread_mutex_unlock
 # pragma weak pthread_mutex_destroy
 # pragma weak pthread_mutex_init
+# pragma weak pthread_self
+# pragma weak pthread_equal
 # ifdef HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP_IN_THREAD_LIB
 #  pragma weak pthread_mutexattr_setrobust_np
 # endif
@@ -423,17 +425,53 @@ typedef pthread_once_t k5_once_t;
 
 typedef struct {
     pthread_mutex_t p;
+#ifdef DEBUG_THREADS
+    pthread_t owner;
+#endif
 #ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
     k5_os_nothread_mutex n;
 #endif
 } k5_os_mutex;
 
+#ifdef DEBUG_THREADS
+# ifdef __GNUC__
+#  define k5_pthread_mutex_lock(M)                     \
+       ({                                              \
+           k5_os_mutex *_m2 = (M);                     \
+           int _r2 = pthread_mutex_lock(&_m2->p);      \
+           if (_r2 == 0) _m2->owner = pthread_self();  \
+           _r2;                                        \
+       })
+# else
+static inline int
+k5_pthread_mutex_lock(k5_os_mutex *m)
+{
+    int r = pthread_mutex_lock(&m->p);
+    if (r)
+       return r;
+    m->owner = pthread_self();
+    return 0;
+}
+# endif
+# define k5_pthread_assert_locked(M)                           \
+       (K5_PTHREADS_LOADED                                     \
+        ? assert(pthread_equal((M)->owner, pthread_self()))    \
+        : (void)0)
+# define k5_pthread_mutex_unlock(M)    \
+       (k5_pthread_assert_locked(M),   \
+        (M)->owner = (pthread_t) 0,    \
+        pthread_mutex_unlock(&(M)->p))
+#else
+# define k5_pthread_mutex_lock(M) pthread_mutex_lock(&(M)->p)
+static inline void k5_pthread_assert_locked(pthread_mutex_t *m) { }
+# define k5_pthread_mutex_unlock(M) pthread_mutex_unlock(&(M)->p)
+#endif
+
 /* Define as functions to:
    (1) eliminate "statement with no effect" warnings for "0"
    (2) encourage type-checking in calling code  */
 
 static inline void k5_pthread_assert_unlocked(pthread_mutex_t *m) { }
-static inline void k5_pthread_assert_locked(pthread_mutex_t *m) { }
 
 #if defined(DEBUG_THREADS_SLOW) && HAVE_SCHED_H && (HAVE_SCHED_YIELD || HAVE_PRAGMA_WEAK_REF)
 # include <sched.h>
@@ -481,8 +519,18 @@ static inline int return_after_yield(int r)
 
 #ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
 
-# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+# if defined(PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP) && defined(DEBUG_THREADS)
+#  define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+       { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0, \
+         K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
+# elif defined(DEBUG_THREADS)
+#  define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+       { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0, \
+         K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
+# else
+#  define K5_OS_MUTEX_PARTIAL_INITIALIZER \
        { PTHREAD_MUTEX_INITIALIZER, K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
+# endif
 
 # define k5_os_mutex_finish_init(M)            \
        k5_os_nothread_mutex_finish_init(&(M)->n)
@@ -499,12 +547,12 @@ static inline int return_after_yield(int r)
 
 # define k5_os_mutex_lock(M)                                           \
        return_after_yield(K5_PTHREADS_LOADED                           \
-                          ? pthread_mutex_lock(&(M)->p)                \
+                          ? k5_pthread_mutex_lock(&(M)->p)             \
                           : k5_os_nothread_mutex_lock(&(M)->n))
 # define k5_os_mutex_unlock(M)                         \
        (MAYBE_SCHED_YIELD(),                           \
         (K5_PTHREADS_LOADED                            \
-         ? pthread_mutex_unlock(&(M)->p)               \
+         ? k5_pthread_mutex_unlock(M)                  \
          : k5_os_nothread_mutex_unlock(&(M)->n)))
 
 # define k5_os_mutex_assert_unlocked(M)                        \
@@ -513,22 +561,32 @@ static inline int return_after_yield(int r)
         : k5_os_nothread_mutex_assert_unlocked(&(M)->n))
 # define k5_os_mutex_assert_locked(M)                  \
        (K5_PTHREADS_LOADED                             \
-        ? k5_pthread_assert_locked(&(M)->p)            \
+        ? k5_pthread_assert_locked(M)                  \
         : k5_os_nothread_mutex_assert_locked(&(M)->n))
 
 #else
 
-# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+# ifdef DEBUG_THREADS
+#  ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
+#   define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+       { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0 }
+#  else
+#   define K5_OS_MUTEX_PARTIAL_INITIALIZER \
+       { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0 }
+#  endif
+# else
+#  define K5_OS_MUTEX_PARTIAL_INITIALIZER \
        { PTHREAD_MUTEX_INITIALIZER }
+# endif
 
 static inline int k5_os_mutex_finish_init(k5_os_mutex *m) { return 0; }
 # define k5_os_mutex_init(M)           pthread_mutex_init(&(M)->p, 0)
 # define k5_os_mutex_destroy(M)                pthread_mutex_destroy(&(M)->p)
-# define k5_os_mutex_lock(M)   return_after_yield(pthread_mutex_lock(&(M)->p))
-# define k5_os_mutex_unlock(M)         (MAYBE_SCHED_YIELD(),pthread_mutex_unlock(&(M)->p))
+# define k5_os_mutex_lock(M)   return_after_yield(k5_pthread_mutex_lock(M))
+# define k5_os_mutex_unlock(M)         (MAYBE_SCHED_YIELD(),k5_pthread_mutex_unlock(M))
 
 # define k5_os_mutex_assert_unlocked(M)        k5_pthread_assert_unlocked(&(M)->p)
-# define k5_os_mutex_assert_locked(M)  k5_pthread_assert_locked(&(M)->p)
+# define k5_os_mutex_assert_locked(M)  k5_pthread_assert_locked(M)
 
 #endif /* is pthreads always available? */
 
@@ -614,7 +672,7 @@ static inline int k5_mutex_finish_init_1(k5_mutex_t *m, k5_debug_loc l)
 #define k5_mutex_finish_init(M)        k5_mutex_finish_init_1((M), K5_DEBUG_LOC)
 #define k5_mutex_destroy(M)                    \
        (k5_os_mutex_assert_unlocked(&(M)->os), \
-        (M)->loc_last = K5_DEBUG_LOC,          \
+        k5_mutex_lock(M), (M)->loc_last = K5_DEBUG_LOC, k5_mutex_unlock(M), \
         k5_os_mutex_destroy(&(M)->os))
 #ifdef __GNUC__
 #define k5_mutex_lock(M)                               \
@@ -637,16 +695,10 @@ static inline int k5_mutex_lock_1(k5_mutex_t *m, k5_debug_loc l)
 }
 #define k5_mutex_lock(M)       k5_mutex_lock_1(M, K5_DEBUG_LOC)
 #endif
-static inline int k5_mutex_unlock_1(k5_mutex_t *m, k5_debug_loc l)
-{
-    int err = 0;
-    err = k5_os_mutex_unlock(&m->os);
-    if (err)
-       return err;
-    m->loc_last = l;
-    return err;
-}
-#define k5_mutex_unlock(M)     k5_mutex_unlock_1(M, K5_DEBUG_LOC)
+#define k5_mutex_unlock(M)                             \
+       (k5_mutex_assert_locked(M),                     \
+        (M)->loc_last = K5_DEBUG_LOC,                  \
+        k5_os_mutex_unlock(&(M)->os))
 
 #define k5_mutex_assert_locked(M)      k5_os_mutex_assert_locked(&(M)->os)
 #define k5_mutex_assert_unlocked(M)    k5_os_mutex_assert_unlocked(&(M)->os)
index f77c170b25f47e46bf898218fe3eef1fb701e81a..feccaa79125db713373c0318d5edaf4d2f6eb167 100644 (file)
@@ -1,3 +1,12 @@
+2005-01-13  Ken Raeburn  <raeburn@mit.edu>
+
+       * prng.c (krb5int_prng_init): Incorporate do_yarrow_init body.
+       Don't check inited variable.
+       (inited): Variable deleted.
+       (krb5_c_random_make_octets, krb5int_prng_cleanup): Don't check
+       it.
+       (do_yarrow_init): Deleted.
+
 2005-01-12  Tom Yu  <tlyu@mit.edu>
 
        * prng.c (read_entropy_from_device): Use ssize_t, not size_t, so
index 7371e974e0a791857d29da4dc9eb88b395c4b5cf..54a68e06752bed222f0c258bb20393b5de559cd1 100644 (file)
@@ -30,7 +30,7 @@
 
 #include "yarrow.h"
 static Yarrow_CTX y_ctx;
-static int inited, init_error;
+static int init_error;
 #define yarrow_lock krb5int_yarrow_lock
 k5_mutex_t yarrow_lock = K5_MUTEX_PARTIAL_INITIALIZER;
 
@@ -57,42 +57,27 @@ entropy_estimate (unsigned int randsource, size_t length)
 return (0);
 }
 
-static void do_yarrow_init(void);
 int krb5int_prng_init(void)
-{
-    do_yarrow_init();
-    if (init_error)
-       return KRB5_CRYPTO_INTERNAL;
-    return 0;
-}
-
-static void do_yarrow_init(void)
 {
     unsigned i;
     int yerr;
 
     yerr = k5_mutex_finish_init(&yarrow_lock);
-    if (yerr) {
-       init_error = yerr;
-       return;
-    }
+    if (yerr)
+       return yerr;
 
     yerr = krb5int_yarrow_init (&y_ctx, NULL);
-    if ((yerr != YARROW_OK) && (yerr != YARROW_NOT_SEEDED)) {
-       init_error = yerr;
-       return;
-    }
+    if ((yerr != YARROW_OK) && (yerr != YARROW_NOT_SEEDED))
+       return KRB5_CRYPTO_INTERNAL;
 
     for (i=0; i < KRB5_C_RANDSOURCE_MAX; i++ ) {
        unsigned source_id;
-       if (krb5int_yarrow_new_source (&y_ctx, &source_id) != YARROW_OK ) {
-           init_error = 17;
-           return;
-       }
+       if (krb5int_yarrow_new_source (&y_ctx, &source_id) != YARROW_OK )
+           return KRB5_CRYPTO_INTERNAL;
        assert (source_id == i);
     }
-    inited=1;
-    init_error = 0;
+
+    return 0;
 }
 
 krb5_error_code KRB5_CALLCONV
@@ -124,7 +109,6 @@ krb5_error_code KRB5_CALLCONV
 krb5_c_random_make_octets(krb5_context context, krb5_data *data)
 {
     int yerr;
-    assert (inited);
     yerr = krb5int_yarrow_output (&y_ctx, data->data, data->length);
     if (yerr == YARROW_NOT_SEEDED) {
       yerr = krb5int_yarrow_reseed (&y_ctx, YARROW_SLOW_POOL);
@@ -138,10 +122,8 @@ krb5_c_random_make_octets(krb5_context context, krb5_data *data)
 
 void krb5int_prng_cleanup (void)
 {
-    if (inited)
-       krb5int_yarrow_final (&y_ctx);
+    krb5int_yarrow_final (&y_ctx);
     k5_mutex_destroy(&yarrow_lock);
-    inited = 0;
 }
 
 
index f3e5666647a8649c7e71abba4eb026e5bc74e9ce..96cc35c14fed769ac8c67f90f1e91c9a474e6e9b 100644 (file)
@@ -1,3 +1,14 @@
+2005-01-13  Ken Raeburn  <raeburn@mit.edu>
+
+       * yarrow.c (yarrow_reseed_locked): Renamed from
+       krb5int_yarrow_reseed and made static.
+       (Yarrow_detect_fork, yarrow_input_maybe_locking,
+       krb5int_yarrow_output_Block): Call it.
+       (krb5int_yarrow_reseed): New function, grabs lock and calls the
+       old version.
+       (krb5int_yarrow_final): Hold the lock until after clearing the
+       Yarrow context data.
+
 2004-11-22  Ken Raeburn  <raeburn@mit.edu>
 
        * yarrow.c (yarrow_input_maybe_locking): Renamed from
index a619c5b2e557b309902a944cbc91d072b12d0c49..472f042e3fd72c721babe23e04c3e9e067a44094 100644 (file)
@@ -117,6 +117,8 @@ static void krb5int_yarrow_init_Limits(Yarrow_CTX* y)
     }
 }
 
+static int yarrow_reseed_locked( Yarrow_CTX* y, int pool );
+
 /* if the program was forked, the child must not operate on the same
    PRNG state */
 #ifdef YARROW_DETECT_FORK
@@ -144,7 +146,7 @@ static int Yarrow_detect_fork(Yarrow_CTX *y)
                                  sizeof (newpid), 0));
        TRY (yarrow_input_locked (y, 0, &newpid,
                                  sizeof (newpid), 0));
-       TRY (krb5int_yarrow_reseed (y, YARROW_FAST_POOL));
+       TRY (yarrow_reseed_locked (y, YARROW_FAST_POOL));
     }
 
  CATCH:
@@ -302,7 +304,7 @@ int yarrow_input_maybe_locking( Yarrow_CTX* y, unsigned source_id,
        {
            if (source->entropy[YARROW_FAST_POOL] >= y->fast_thresh)
            {
-               ret = krb5int_yarrow_reseed(y, YARROW_FAST_POOL);
+               ret = yarrow_reseed_locked(y, YARROW_FAST_POOL);
                if ( ret != YARROW_OK && ret != YARROW_NOT_SEEDED )
                {
                    THROW( ret );
@@ -319,7 +321,7 @@ int yarrow_input_maybe_locking( Yarrow_CTX* y, unsigned source_id,
                if (y->slow_k_of_n >= y->slow_k_of_n_thresh)
                {
                    y->slow_k_of_n = 0;
-                   ret = krb5int_yarrow_reseed(y, YARROW_SLOW_POOL);
+                   ret = yarrow_reseed_locked(y, YARROW_SLOW_POOL);
                    if ( ret != YARROW_OK && ret != YARROW_NOT_SEEDED )
                    {
                        THROW( ret );
@@ -433,7 +435,7 @@ static int krb5int_yarrow_output_Block( Yarrow_CTX* y, void* out )
            
            TRACE( printf( "OUTPUT LIMIT REACHED," ); );
 
-           TRY( krb5int_yarrow_reseed( y, YARROW_SLOW_POOL ) );
+           TRY( yarrow_reseed_locked( y, YARROW_SLOW_POOL ) );
        }
     }
   
@@ -665,7 +667,7 @@ static int Yarrow_Save_State( Yarrow_CTX *y )
 
 #endif
 
-int krb5int_yarrow_reseed(Yarrow_CTX* y, int pool)
+static int yarrow_reseed_locked(Yarrow_CTX* y, int pool)
 {
     EXCEP_DECL;
     HASH_CTX* fast_pool = &y->pool[YARROW_FAST_POOL];
@@ -812,6 +814,14 @@ int krb5int_yarrow_reseed(Yarrow_CTX* y, int pool)
 
     EXCEP_RET;
 }
+int krb5int_yarrow_reseed(Yarrow_CTX* y, int pool)
+{
+       int r;
+       LOCK();
+       r = yarrow_reseed_locked(y, pool);
+       UNLOCK();
+       return r;
+}
 
 int krb5int_yarrow_stretch(const byte* m, size_t size, byte* out, size_t out_size)
 {
@@ -905,9 +915,9 @@ int krb5int_yarrow_final(Yarrow_CTX* y)
 #endif
 
  CATCH:
-    if ( locked ) { TRY( UNLOCK() ); }
     krb5int_yarrow_cipher_final(&y->cipher);
     mem_zero( y, sizeof(Yarrow_CTX) );
+    if ( locked ) { TRY( UNLOCK() ); }
     EXCEP_RET;
 }
 
index e9b0e232b1a6ff80b50a4ab4cbea63b3170e61e6..eeec13ebd7510e0723629fb070ec68d60b0212fb 100644 (file)
@@ -1,3 +1,8 @@
+2005-01-13  Ken Raeburn  <raeburn@mit.edu>
+
+       * error_message.c (com_err_terminate): Lock the list mutex before
+       walking through it; unlock and destroy it afterwards.
+
 2004-11-05  Ken Raeburn  <raeburn@mit.edu>
 
        * et_h.awk: Declare initialize_*_error_table as taking no
index 42c230051ff69b2b29b548e329042bd27d1dc367..8bef6804d9507dd65c8f98bdfd204a12a2e70168 100644 (file)
@@ -62,11 +62,13 @@ void com_err_terminate(void)
     struct dynamic_et_list *e, *enext;
     if (! INITIALIZER_RAN(com_err_initialize) || PROGRAM_EXITING())
        return;
-    k5_mutex_destroy(&et_list_lock);
+    k5_mutex_lock(&et_list_lock);
     for (e = et_list_dynamic; e; e = enext) {
        enext = e->next;
        free(e);
     }
+    k5_mutex_unlock(&et_list_lock);
+    k5_mutex_destroy(&et_list_lock);
     terminated = 1;
 }
 
index bfe663636591830901a306f541f4307e4a2ad90e..985664014dcc36964126a76c4755d6784c4f91e5 100644 (file)
@@ -1,3 +1,10 @@
+2005-01-13  Ken Raeburn  <raeburn@mit.edu>
+
+       * prof_file.c (profile_free_file_data): Destroy mutex before
+       freeing containing structure.
+       (profile_open_file): If mutex creation fails, free storage
+       directly instead of calling profile_close_file.
+
 2004-12-14  Ken Raeburn  <raeburn@mit.edu>
 
        * prof_tree.c (profile_node_iterator): When the iterator has a
index 028253720690e2007221ccffd4d8628a047a1b40..cada5250565305a5206a4127eb6529dd90f9f65f 100644 (file)
@@ -272,7 +272,8 @@ errcode_t profile_open_file(const_profile_filespec_t filespec,
 
        retval = k5_mutex_init(&data->lock);
        if (retval) {
-           profile_close_file(prf);
+           free(data);
+           free(prf);
            return retval;
        }