+2004-04-30 Ken Raeburn <raeburn@mit.edu>
+
+ * rc_base.c (krb5_rc_resolve_type): Initialize the mutex in the
+ replay cache structure.
+ (krb5_rc_default, krb5_rc_resolve_full): Destroy it if creation of
+ the replay cache fails.
+ * rc_dfl.c (krb5_rc_dfl_get_span, krb5_rc_dfl_init): Lock the
+ mutex while operating on the replay cache object.
+ (krb5_rc_dfl_expunge_locked): Renamed from krb5_rc_dfl_expunge and
+ made static. Call krb5_rc_dfl_recover_locked.
+ (krb5_rc_dfl_expunge): New wrapper function, locks the mutex.
+ (krb5_rc_dfl_recover_locked): Renamed from krb5_rc_dfl_recover and
+ made static. Call krb5_rc_dfl_expunge_locked.
+ (krb5_rc_dfl_recover): New wrapper function, locks the mutex.
+ (krb5_rc_dfl_store): Lock the mutex. Call _expunge_locked.
+
2004-04-24 Ken Raeburn <raeburn@mit.edu>
* rc_base.c (rc_typelist_lock): Use new partial initializer.
/* allocate *id? nah */
(*id)->ops = t->ops;
k5_mutex_unlock(&rc_typelist_lock);
- return 0;
+ return k5_mutex_init(&(*id)->lock);
}
char * krb5_rc_get_type(krb5_context context, krb5_rcache id)
if ((retval = krb5_rc_resolve_type(context, id,
krb5_rc_default_type(context)))) {
+ k5_mutex_destroy(&(*id)->lock);
FREE(*id);
return retval;
}
if ((retval = krb5_rc_resolve(context, *id,
- krb5_rc_default_name(context))))
+ krb5_rc_default_name(context)))) {
+ k5_mutex_destroy(&(*id)->lock);
FREE(*id);
+ return retval;
+ }
(*id)->magic = KV5M_RCACHE;
return retval;
}
if ((retval = krb5_rc_resolve_type(context, id,type))) {
FREE(type);
+ k5_mutex_destroy(&(*id)->lock);
FREE(*id);
return retval;
}
FREE(type);
- if ((retval = krb5_rc_resolve(context, *id,residual + 1)))
+ if ((retval = krb5_rc_resolve(context, *id,residual + 1))) {
+ k5_mutex_destroy(&(*id)->lock);
FREE(*id);
+ return retval;
+ }
(*id)->magic = KV5M_RCACHE;
return retval;
}
krb5_rc_dfl_get_span(krb5_context context, krb5_rcache id,
krb5_deltat *lifespan)
{
- *lifespan = ((struct dfl_data *) (id->data))->lifespan;
+ krb5_error_code err;
+ struct dfl_data *t;
+
+ t = (struct dfl_data *) id->data;
+ err = k5_mutex_lock(&id->lock);
+ if (err)
+ return err;
+ *lifespan = t->lifespan;
+ k5_mutex_unlock(&id->lock);
return 0;
}
struct dfl_data *t = (struct dfl_data *)id->data;
krb5_error_code retval;
+ retval = k5_mutex_lock(&id->lock);
+ if (retval)
+ return retval;
t->lifespan = lifespan ? lifespan : context->clockskew;
/* default to clockskew from the context */
#ifndef NOIOSTUFF
- if ((retval = krb5_rc_io_creat(context, &t->d, &t->name)))
+ if ((retval = krb5_rc_io_creat(context, &t->d, &t->name))) {
+ k5_mutex_unlock(&id->lock);
return retval;
+ }
if ((krb5_rc_io_write(context, &t->d,
(krb5_pointer) &t->lifespan, sizeof(t->lifespan))
- || krb5_rc_io_sync(context, &t->d)))
+ || krb5_rc_io_sync(context, &t->d))) {
+ k5_mutex_unlock(&id->lock);
return KRB5_RC_IO;
+ }
#endif
+ k5_mutex_unlock(&id->lock);
return 0;
}
+/* Called with the mutex already locked. */
krb5_error_code
krb5_rc_dfl_close_no_free(krb5_context context, krb5_rcache id)
{
krb5_error_code KRB5_CALLCONV
krb5_rc_dfl_close(krb5_context context, krb5_rcache id)
{
+ krb5_error_code retval;
+ retval = k5_mutex_lock(&id->lock);
+ if (retval)
+ return retval;
krb5_rc_dfl_close_no_free(context, id);
+ k5_mutex_unlock(&id->lock);
+ k5_mutex_destroy(&id->lock);
free(id);
return 0;
}
}
-
-krb5_error_code KRB5_CALLCONV
-krb5_rc_dfl_recover(krb5_context context, krb5_rcache id)
+static krb5_error_code
+krb5_rc_dfl_recover_locked(krb5_context context, krb5_rcache id)
{
#ifdef NOIOSTUFF
return KRB5_RC_NOIO;
int expired_entries = 0;
krb5_int32 now;
- if ((retval = krb5_rc_io_open(context, &t->d, t->name)))
+ if ((retval = krb5_rc_io_open(context, &t->d, t->name))) {
return retval;
+ }
t->recovering = 1;
if (retval)
krb5_rc_io_close(context, &t->d);
else if (expired_entries > EXCESSREPS)
- retval = krb5_rc_dfl_expunge(context, id);
+ retval = krb5_rc_dfl_expunge_locked(context, id);
t->recovering = 0;
return retval;
#endif
}
+krb5_error_code KRB5_CALLCONV
+krb5_rc_dfl_recover(krb5_context context, krb5_rcache id)
+{
+ krb5_error_code ret;
+ ret = k5_mutex_lock(&id->lock);
+ if (ret)
+ return ret;
+ ret = krb5_rc_dfl_recover_locked(context, id);
+ k5_mutex_unlock(&id->lock);
+ return ret;
+}
+
static krb5_error_code
krb5_rc_io_store(krb5_context context, struct dfl_data *t,
krb5_donot_replay *rep)
return ret;
}
+static krb5_error_code krb5_rc_dfl_expunge_locked(krb5_context, krb5_rcache);
+
krb5_error_code KRB5_CALLCONV
krb5_rc_dfl_store(krb5_context context, krb5_rcache id, krb5_donot_replay *rep)
{
if (ret)
return ret;
+ ret = k5_mutex_lock(&id->lock);
+ if (ret)
+ return ret;
+
switch(rc_store(context, id, rep, now)) {
case CMP_MALLOC:
+ k5_mutex_unlock(&id->lock);
return KRB5_RC_MALLOC;
case CMP_REPLAY:
+ k5_mutex_unlock(&id->lock);
return KRB5KRB_AP_ERR_REPEAT;
case 0: break;
default: /* wtf? */ ;
}
#ifndef NOIOSTUFF
ret = krb5_rc_io_store(context, t, rep);
- if (ret)
+ if (ret) {
+ k5_mutex_unlock(&id->lock);
return ret;
+ }
#endif
/* Shall we automatically expunge? */
if (t->nummisses > t->numhits + EXCESSREPS)
{
- return krb5_rc_dfl_expunge(context, id);
+ ret = krb5_rc_dfl_expunge_locked(context, id);
+ k5_mutex_unlock(&id->lock);
+ return ret;
}
#ifndef NOIOSTUFF
else
{
- if (krb5_rc_io_sync(context, &t->d))
+ if (krb5_rc_io_sync(context, &t->d)) {
+ k5_mutex_unlock(&id->lock);
return KRB5_RC_IO;
+ }
}
#endif
+ k5_mutex_unlock(&id->lock);
return 0;
}
-krb5_error_code KRB5_CALLCONV
-krb5_rc_dfl_expunge(krb5_context context, krb5_rcache id)
+static krb5_error_code
+krb5_rc_dfl_expunge_locked(krb5_context context, krb5_rcache id)
{
struct dfl_data *t = (struct dfl_data *)id->data;
#ifdef NOIOSTUFF
free(name);
if (retval)
return retval;
- retval = krb5_rc_dfl_recover(context, id);
+ retval = krb5_rc_dfl_recover_locked(context, id);
if (retval)
return retval;
t = (struct dfl_data *)id->data; /* point to recovered cache */
return retval;
#endif
}
+
+krb5_error_code KRB5_CALLCONV
+krb5_rc_dfl_expunge(krb5_context context, krb5_rcache id)
+{
+ krb5_error_code ret;
+ ret = k5_mutex_lock(&id->lock);
+ if (ret)
+ return ret;
+ ret = krb5_rc_dfl_expunge_locked(context, id);
+ k5_mutex_unlock(&id->lock);
+ return ret;
+}