* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
- * permission. Furthermore if you modify this software you must label
+ * permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
unsigned char **buf, int tok_type);
/* flags for g_verify_token_header() */
-#define G_VFY_TOKEN_HDR_WRAPPER_REQUIRED 0x01
-#define G_VFY_TOKEN_HDR_IGNORE_SEQ_SIZE 0x02
+#define G_VFY_TOKEN_HDR_WRAPPER_REQUIRED 0x01
+#define G_VFY_TOKEN_HDR_IGNORE_SEQ_SIZE 0x02
gss_int32 g_verify_token_header (const gss_OID_desc * mech,
unsigned int *body_size,
OM_uint32
generic_gss_oid_compose(
- OM_uint32 *, /* minor_status */
- const char *, /* prefix */
- size_t, /* prefix_len */
- int, /* suffix */
- gss_OID_desc *); /* oid */
+ OM_uint32 *, /* minor_status */
+ const char *, /* prefix */
+ size_t, /* prefix_len */
+ int, /* suffix */
+ gss_OID_desc *); /* oid */
OM_uint32
generic_gss_oid_decompose(
- OM_uint32 *, /* minor_status */
- const char *, /*prefix */
- size_t, /* prefix_len */
- gss_OID_desc *, /* oid */
- int *); /* suffix */
+ OM_uint32 *, /* minor_status */
+ const char *, /*prefix */
+ size_t, /* prefix_len */
+ gss_OID_desc *, /* oid */
+ int *); /* suffix */
int gssint_mecherrmap_init(void);
void gssint_mecherrmap_destroy(void);
OM_uint32 generic_gss_copy_oid_set
(OM_uint32 *, /* minor_status */
- const gss_OID_set_desc *, /* const oidset*/
- gss_OID_set * /*new_oidset*/);
+ const gss_OID_set_desc *, /* const oidset*/
+ gss_OID_set * /*new_oidset*/);
#endif /* _GSSAPIP_GENERIC_H_ */
GSS_DLLIMP gss_OID GSS_C_NT_EXPORT_NAME = oids+6;
gss_OID gss_nt_exported_name = oids+6;
-GSS_DLLIMP gss_OID GSS_C_INQ_SSPI_SESSION_KEY = oids+7;
+GSS_DLLIMP gss_OID GSS_C_INQ_SSPI_SESSION_KEY = oids+7;
#define GSSAPIGENERIC_END_DECLS
#endif
-#define GSS_EMPTY_BUFFER(buf) ((buf) == NULL ||\
- (buf)->value == NULL || (buf)->length == 0)
+#define GSS_EMPTY_BUFFER(buf) ((buf) == NULL ||\
+ (buf)->value == NULL || (buf)->length == 0)
GSSAPIGENERIC_BEGIN_DECLS
char *canon, *str;
if ((hent = gethostbyname(hostname)) == NULL)
- return(NULL);
+ return(NULL);
if (! (haddr = (char *) xmalloc(hent->h_length))) {
- return(NULL);
+ return(NULL);
}
memcpy(haddr, hent->h_addr_list[0], hent->h_length);
if (! (hent = gethostbyaddr(haddr, hent->h_length, hent->h_addrtype))) {
- return(NULL);
+ return(NULL);
}
xfree(haddr);
if ((canon = (char *) strdup(hent->h_name)) == NULL)
- return(NULL);
+ return(NULL);
for (str = canon; *str; str++)
- if (isupper(*str)) *str = tolower(*str);
+ if (isupper(*str)) *str = tolower(*str);
return(canon);
}
char buf[MAXHOSTNAMELEN+1], *ptr;
if (gethostname(buf, sizeof(buf)) < 0)
- return 0;
+ return 0;
buf[sizeof(buf)-1] = '\0';
return(G_BAD_TOK_HEADER);
if ((flags & G_VFY_TOKEN_HDR_IGNORE_SEQ_SIZE) == 0 &&
- seqsize != toksize)
+ seqsize != toksize)
return(G_BAD_TOK_HEADER);
if ((toksize-=1) < 0)
*/
static OM_uint32
kg_accept_dce(minor_status, context_handle, verifier_cred_handle,
- input_token, input_chan_bindings, src_name, mech_type,
- output_token, ret_flags, time_rec, delegated_cred_handle)
+ input_token, input_chan_bindings, src_name, mech_type,
+ output_token, ret_flags, time_rec, delegated_cred_handle)
OM_uint32 *minor_status;
gss_ctx_id_t *context_handle;
gss_cred_id_t verifier_cred_handle;
ap_rep.length = input_token->length;
code = krb5_rd_rep_dce(ctx->k5_context,
- ctx->auth_context,
- &ap_rep,
- &nonce);
+ ctx->auth_context,
+ &ap_rep,
+ &nonce);
if (code != 0) {
major_status = GSS_S_FAILURE;
goto fail;
if (src_name) {
if ((code = krb5_copy_principal(ctx->k5_context, ctx->there, &name))) {
- major_status = GSS_S_FAILURE;
- goto fail;
+ major_status = GSS_S_FAILURE;
+ goto fail;
}
/* intern the src_name */
if (! kg_save_name((gss_name_t) name)) {
- code = G_VALIDATE_FAILED;
- major_status = GSS_S_FAILURE;
- goto fail;
+ code = G_VALIDATE_FAILED;
+ major_status = GSS_S_FAILURE;
+ goto fail;
}
*src_name = (gss_name_t) name;
}
if (ctx)
(void) krb5_gss_delete_sec_context(minor_status,
- (gss_ctx_id_t *) &ctx, NULL);
+ (gss_ctx_id_t *) &ctx, NULL);
*context_handle = GSS_C_NO_CONTEXT;
*minor_status = code;
static OM_uint32
kg_accept_krb5(minor_status, context_handle,
- verifier_cred_handle, input_token,
- input_chan_bindings, src_name, mech_type,
- output_token, ret_flags, time_rec,
- delegated_cred_handle)
+ verifier_cred_handle, input_token,
+ input_chan_bindings, src_name, mech_type,
+ output_token, ret_flags, time_rec,
+ delegated_cred_handle)
OM_uint32 *minor_status;
gss_ctx_id_t *context_handle;
gss_cred_id_t verifier_cred_handle;
mech_used = gss_mech_krb5;
goto fail;
} else if (code == G_BAD_TOK_HEADER) {
- /* DCE style not encapsulated */
- ap_req.length = input_token->length;
- ap_req.data = input_token->value;
- mech_used = gss_mech_krb5;
- no_encap = 1;
+ /* DCE style not encapsulated */
+ ap_req.length = input_token->length;
+ ap_req.data = input_token->value;
+ mech_used = gss_mech_krb5;
+ no_encap = 1;
} else {
major_status = GSS_S_DEFECTIVE_TOKEN;
goto fail;
code = krb5_auth_con_getkey(context, auth_context, &subkey);
if (code) {
- major_status = GSS_S_FAILURE;
- goto fail;
+ major_status = GSS_S_FAILURE;
+ goto fail;
}
zero.length = 0;
zero.data = "";
code = krb5_c_verify_checksum(context,
- subkey,
- KRB5_KEYUSAGE_AP_REQ_AUTH_CKSUM,
- &zero,
- authdat->checksum,
- &valid);
+ subkey,
+ KRB5_KEYUSAGE_AP_REQ_AUTH_CKSUM,
+ &zero,
+ authdat->checksum,
+ &valid);
if (code || !valid) {
- major_status = GSS_S_BAD_SIG;
- krb5_free_keyblock(context, subkey);
- goto fail;
+ major_status = GSS_S_BAD_SIG;
+ krb5_free_keyblock(context, subkey);
+ goto fail;
}
gss_flags = GSS_C_MUTUAL_FLAG | GSS_C_REPLAY_FLAG | GSS_C_SEQUENCE_FLAG;
/* only DCE_STYLE clients are allowed to send raw AP-REQs */
if (no_encap != ((gss_flags & GSS_C_DCE_STYLE) != 0)) {
- major_status = GSS_S_DEFECTIVE_TOKEN;
- goto fail;
+ major_status = GSS_S_DEFECTIVE_TOKEN;
+ goto fail;
}
/* create the ctx struct and start filling it in */
((gss_flags) & (GSS_C_INTEG_FLAG | GSS_C_CONF_FLAG |
GSS_C_MUTUAL_FLAG | GSS_C_REPLAY_FLAG |
GSS_C_SEQUENCE_FLAG | GSS_C_DELEG_FLAG |
- GSS_C_DCE_STYLE | GSS_C_IDENTIFY_FLAG |
- GSS_C_EXTENDED_ERROR_FLAG)));
+ GSS_C_DCE_STYLE | GSS_C_IDENTIFY_FLAG |
+ GSS_C_EXTENDED_ERROR_FLAG)));
ctx->seed_init = 0;
ctx->big_endian = bigend;
ctx->cred_rcache = cred_rcache;
/* XXX move this into gss_name_t */
if (ticket->enc_part2->authorization_data != NULL &&
- (code = krb5_copy_authdata(context,
- ticket->enc_part2->authorization_data,
- &ctx->authdata))) {
- major_status = GSS_S_FAILURE;
- goto fail;
+ (code = krb5_copy_authdata(context,
+ ticket->enc_part2->authorization_data,
+ &ctx->authdata))) {
+ major_status = GSS_S_FAILURE;
+ goto fail;
}
if ((code = krb5_copy_principal(context, ticket->server, &ctx->here))) {
major_status = GSS_S_FAILURE;
ctx->have_acceptor_subkey = 0;
/* DCE_STYLE implies acceptor_subkey */
if ((ctx->gss_flags & GSS_C_DCE_STYLE) == 0) {
- code = kg_setup_keys(context, ctx, ctx->subkey, &ctx->cksumtype);
- if (code) {
- major_status = GSS_S_FAILURE;
- goto fail;
- }
+ code = kg_setup_keys(context, ctx, ctx->subkey, &ctx->cksumtype);
+ if (code) {
+ major_status = GSS_S_FAILURE;
+ goto fail;
+ }
}
ctx->krb_times = ticket->enc_part2->times; /* struct copy */
ctx->krb_flags = ticket->enc_part2->flags;
/* DCE_STYLE implies mutual authentication */
if (ctx->gss_flags & GSS_C_DCE_STYLE)
- ctx->gss_flags |= GSS_C_MUTUAL_FLAG;
+ ctx->gss_flags |= GSS_C_MUTUAL_FLAG;
/* at this point, the entire context structure is filled in,
so it can be released. */
krb5_int32 seq_temp;
int cfx_generate_subkey;
- /*
- * Do not generate a subkey per RFC 4537 unless we are upgrading to CFX,
- * because pre-CFX tokens do not indicate which key to use. (Note that
- * DCE_STYLE implies that we will use a subkey.)
- */
- if (ctx->proto == 0 &&
- (ctx->gss_flags & GSS_C_DCE_STYLE) == 0 &&
- (ap_req_options & AP_OPTS_USE_SUBKEY)) {
- code = (*kaccess.krb5_auth_con_get_subkey_enctype) (context,
- auth_context,
- &negotiated_etype);
- if (code != 0) {
- major_status = GSS_S_FAILURE;
- goto fail;
- }
-
- switch (negotiated_etype) {
- case ENCTYPE_DES_CBC_MD5:
- case ENCTYPE_DES_CBC_MD4:
- case ENCTYPE_DES_CBC_CRC:
- case ENCTYPE_DES3_CBC_SHA1:
- case ENCTYPE_ARCFOUR_HMAC:
- case ENCTYPE_ARCFOUR_HMAC_EXP:
- ap_req_options &= ~(AP_OPTS_USE_SUBKEY);
- break;
- }
- }
+ /*
+ * Do not generate a subkey per RFC 4537 unless we are upgrading to CFX,
+ * because pre-CFX tokens do not indicate which key to use. (Note that
+ * DCE_STYLE implies that we will use a subkey.)
+ */
+ if (ctx->proto == 0 &&
+ (ctx->gss_flags & GSS_C_DCE_STYLE) == 0 &&
+ (ap_req_options & AP_OPTS_USE_SUBKEY)) {
+ code = (*kaccess.krb5_auth_con_get_subkey_enctype) (context,
+ auth_context,
+ &negotiated_etype);
+ if (code != 0) {
+ major_status = GSS_S_FAILURE;
+ goto fail;
+ }
+
+ switch (negotiated_etype) {
+ case ENCTYPE_DES_CBC_MD5:
+ case ENCTYPE_DES_CBC_MD4:
+ case ENCTYPE_DES_CBC_CRC:
+ case ENCTYPE_DES3_CBC_SHA1:
+ case ENCTYPE_ARCFOUR_HMAC:
+ case ENCTYPE_ARCFOUR_HMAC_EXP:
+ ap_req_options &= ~(AP_OPTS_USE_SUBKEY);
+ break;
+ }
+ }
if (ctx->proto == 1 || (ctx->gss_flags & GSS_C_DCE_STYLE) ||
- (ap_req_options & AP_OPTS_USE_SUBKEY))
+ (ap_req_options & AP_OPTS_USE_SUBKEY))
cfx_generate_subkey = CFX_ACCEPTOR_SUBKEY;
else
cfx_generate_subkey = 0;
}
ctx->have_acceptor_subkey = 1;
- code = kg_setup_keys(context, ctx, ctx->acceptor_subkey,
- &ctx->acceptor_subkey_cksumtype);
- if (code) {
- major_status = GSS_S_FAILURE;
- goto fail;
- }
+ code = kg_setup_keys(context, ctx, ctx->acceptor_subkey,
+ &ctx->acceptor_subkey_cksumtype);
+ if (code) {
+ major_status = GSS_S_FAILURE;
+ goto fail;
+ }
}
/* the reply token hasn't been sent yet, but that's ok. */
- if (ctx->gss_flags & GSS_C_DCE_STYLE) {
- assert(ctx->have_acceptor_subkey);
+ if (ctx->gss_flags & GSS_C_DCE_STYLE) {
+ assert(ctx->have_acceptor_subkey);
- /* in order to force acceptor subkey to be used, don't set PROT_READY */
+ /* in order to force acceptor subkey to be used, don't set PROT_READY */
- /* Raw AP-REP is returned */
- output_token->length = ap_rep.length;
- output_token->value = ap_rep.data;
- ap_rep.data = NULL; /* don't double free */
+ /* Raw AP-REP is returned */
+ output_token->length = ap_rep.length;
+ output_token->value = ap_rep.data;
+ ap_rep.data = NULL; /* don't double free */
- ctx->established = 0;
+ ctx->established = 0;
- *context_handle = (gss_ctx_id_t)ctx;
- *minor_status = 0;
- major_status = GSS_S_CONTINUE_NEEDED;
+ *context_handle = (gss_ctx_id_t)ctx;
+ *minor_status = 0;
+ major_status = GSS_S_CONTINUE_NEEDED;
- /* Only last leg should set return arguments */
- goto fail;
- } else
- ctx->gss_flags |= GSS_C_PROT_READY_FLAG;
+ /* Only last leg should set return arguments */
+ goto fail;
+ } else
+ ctx->gss_flags |= GSS_C_PROT_READY_FLAG;
ctx->established = 1;
if (ap_rep.data)
krb5_free_data_contents(context, &ap_rep);
if (major_status == GSS_S_COMPLETE ||
- (major_status == GSS_S_CONTINUE_NEEDED && code != KRB5KRB_AP_ERR_MSG_TYPE)) {
+ (major_status == GSS_S_CONTINUE_NEEDED && code != KRB5KRB_AP_ERR_MSG_TYPE)) {
ctx->k5_context = context;
context = NULL;
goto done;
*/
/*SUPPRESS 29*/
if (ctx != NULL) {
- if (ctx->established == 0 && (ctx->gss_flags & GSS_C_DCE_STYLE)) {
- return kg_accept_dce(minor_status, context_handle,
- verifier_cred_handle, input_token,
- input_chan_bindings, src_name, mech_type,
- output_token, ret_flags, time_rec,
- delegated_cred_handle);
- } else {
- *minor_status = EINVAL;
- save_error_string(EINVAL, "accept_sec_context called with existing context handle");
- return GSS_S_FAILURE;
- }
+ if (ctx->established == 0 && (ctx->gss_flags & GSS_C_DCE_STYLE)) {
+ return kg_accept_dce(minor_status, context_handle,
+ verifier_cred_handle, input_token,
+ input_chan_bindings, src_name, mech_type,
+ output_token, ret_flags, time_rec,
+ delegated_cred_handle);
+ } else {
+ *minor_status = EINVAL;
+ save_error_string(EINVAL, "accept_sec_context called with existing context handle");
+ return GSS_S_FAILURE;
+ }
}
return kg_accept_krb5(minor_status, context_handle,
- verifier_cred_handle, input_token,
- input_chan_bindings, src_name, mech_type,
- output_token, ret_flags, time_rec,
- delegated_cred_handle);
+ verifier_cred_handle, input_token,
+ input_chan_bindings, src_name, mech_type,
+ output_token, ret_flags, time_rec,
+ delegated_cred_handle);
}
/* Heimdal calls this gsskrb5_register_acceptor_identity. */
OM_uint32
gss_krb5int_register_acceptor_identity(OM_uint32 *minor_status,
- const gss_OID desired_mech,
- const gss_OID desired_object,
- gss_buffer_t value)
+ const gss_OID desired_mech,
+ const gss_OID desired_object,
+ gss_buffer_t value)
{
char *new, *old;
int err;
if (cred->rcache != NULL) {
code = krb5_rc_close(context, cred->rcache);
if (code) {
- *minor_status = code;
- krb5_free_context(context);
- return GSS_S_FAILURE;
+ *minor_status = code;
+ krb5_free_context(context);
+ return GSS_S_FAILURE;
}
}
OM_uint32 KRB5_CALLCONV
gss_krb5int_copy_ccache(OM_uint32 *minor_status,
- gss_cred_id_t cred_handle,
- const gss_OID desired_object,
- const gss_buffer_t value)
+ gss_cred_id_t cred_handle,
+ const gss_OID desired_object,
+ const gss_buffer_t value)
{
krb5_gss_cred_id_t k5creds;
krb5_cc_cursor cursor;
assert(value->length == sizeof(out_ccache));
if (value->length != sizeof(out_ccache))
- return GSS_S_FAILURE;
+ return GSS_S_FAILURE;
out_ccache = (krb5_ccache)value->value;
krb5_gss_release_oid(minor_status, &ctx->mech_used);
if (ctx->authdata)
- krb5_free_authdata(context, ctx->authdata);
+ krb5_free_authdata(context, ctx->authdata);
if (ctx->k5_context)
krb5_free_context(ctx->k5_context);
OM_uint32 KRB5_CALLCONV
gss_krb5int_get_tkt_flags(OM_uint32 *minor_status,
- const gss_ctx_id_t context_handle,
- const gss_OID desired_object,
- gss_buffer_set_t *data_set)
+ const gss_ctx_id_t context_handle,
+ const gss_OID desired_object,
+ gss_buffer_set_t *data_set)
{
krb5_gss_ctx_id_rec *ctx;
gss_buffer_desc rep;
krb5_error_code
kg_setup_keys(krb5_context context,
- krb5_gss_ctx_id_rec *ctx,
- krb5_keyblock *subkey,
- krb5_cksumtype *cksumtype);
+ krb5_gss_ctx_id_rec *ctx,
+ krb5_keyblock *subkey,
+ krb5_cksumtype *cksumtype);
int kg_confounder_size (krb5_context context, krb5_keyblock *key);
krb5_error_code kg_encrypt_iov (krb5_context context,
int proto, int dce_style,
- size_t ec, size_t rrc,
- krb5_keyblock *key, int usage,
+ size_t ec, size_t rrc,
+ krb5_keyblock *key, int usage,
krb5_pointer iv,
- gss_iov_buffer_desc *iov,
+ gss_iov_buffer_desc *iov,
int iov_count);
krb5_error_code
krb5_error_code
kg_arcfour_docrypt_iov (krb5_context context,
- const krb5_keyblock *longterm_key , int ms_usage,
+ const krb5_keyblock *longterm_key , int ms_usage,
const unsigned char *kd_data, size_t kd_data_len,
- gss_iov_buffer_desc *iov,
+ gss_iov_buffer_desc *iov,
int iov_count);
krb5_error_code kg_decrypt (krb5_context context,
krb5_error_code kg_decrypt_iov (krb5_context context,
int proto, int dce_style,
- size_t ec, size_t rrc,
+ size_t ec, size_t rrc,
krb5_keyblock *key, int usage,
krb5_pointer iv,
- gss_iov_buffer_desc *iov,
+ gss_iov_buffer_desc *iov,
int iov_count);
OM_uint32 kg_seal (OM_uint32 *minor_status,
/* AEAD */
krb5_error_code gss_krb5int_make_seal_token_v3_iov(krb5_context context,
- krb5_gss_ctx_id_rec *ctx,
- int conf_req_flag,
- int *conf_state,
- gss_iov_buffer_desc *iov,
- int iov_count,
- int toktype);
+ krb5_gss_ctx_id_rec *ctx,
+ int conf_req_flag,
+ int *conf_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ int toktype);
OM_uint32 gss_krb5int_unseal_v3_iov(krb5_context context,
- OM_uint32 *minor_status,
- krb5_gss_ctx_id_rec *ctx,
- gss_iov_buffer_desc *iov,
- int iov_count,
- int *conf_state,
- gss_qop_t *qop_state,
- int toktype);
+ OM_uint32 *minor_status,
+ krb5_gss_ctx_id_rec *ctx,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ int *conf_state,
+ gss_qop_t *qop_state,
+ int toktype);
gss_iov_buffer_t kg_locate_iov (gss_iov_buffer_desc *iov,
- int iov_count,
- OM_uint32 type);
+ int iov_count,
+ OM_uint32 type);
void kg_iov_msglen(gss_iov_buffer_desc *iov,
- int iov_count,
- size_t *data_length,
- size_t *assoc_data_length);
+ int iov_count,
+ size_t *data_length,
+ size_t *assoc_data_length);
void kg_release_iov(gss_iov_buffer_desc *iov,
- int iov_count);
+ int iov_count);
krb5_error_code kg_make_checksum_iov_v1(krb5_context context,
- krb5_cksumtype type,
- size_t token_cksum_len,
- krb5_keyblock *seq,
- krb5_keyblock *enc, /* for conf len */
- krb5_keyusage sign_usage,
- gss_iov_buffer_desc *iov,
- int iov_count,
- int toktype,
- krb5_checksum *checksum);
+ krb5_cksumtype type,
+ size_t token_cksum_len,
+ krb5_keyblock *seq,
+ krb5_keyblock *enc, /* for conf len */
+ krb5_keyusage sign_usage,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ int toktype,
+ krb5_checksum *checksum);
krb5_error_code kg_make_checksum_iov_v3(krb5_context context,
- krb5_cksumtype type,
- size_t rrc,
- krb5_keyblock *key,
- krb5_keyusage sign_usage,
- gss_iov_buffer_desc *iov,
- int iov_count);
+ krb5_cksumtype type,
+ size_t rrc,
+ krb5_keyblock *key,
+ krb5_keyusage sign_usage,
+ gss_iov_buffer_desc *iov,
+ int iov_count);
krb5_error_code kg_verify_checksum_iov_v3(krb5_context context,
- krb5_cksumtype type,
- size_t rrc,
- krb5_keyblock *key,
- krb5_keyusage sign_usage,
- gss_iov_buffer_desc *iov,
- int iov_count,
- krb5_boolean *valid);
+ krb5_cksumtype type,
+ size_t rrc,
+ krb5_keyblock *key,
+ krb5_keyusage sign_usage,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ krb5_boolean *valid);
OM_uint32 kg_seal_iov (OM_uint32 *minor_status,
- gss_ctx_id_t context_handle,
- int conf_req_flag,
- gss_qop_t qop_req,
- int *conf_state,
- gss_iov_buffer_desc *iov,
- int iov_count,
- int toktype);
+ gss_ctx_id_t context_handle,
+ int conf_req_flag,
+ gss_qop_t qop_req,
+ int *conf_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ int toktype);
OM_uint32 kg_unseal_iov (OM_uint32 *minor_status,
- gss_ctx_id_t context_handle,
- int *conf_state,
- gss_qop_t *qop_state,
- gss_iov_buffer_desc *iov,
- int iov_count,
- int toktype);
+ gss_ctx_id_t context_handle,
+ int *conf_state,
+ gss_qop_t *qop_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ int toktype);
OM_uint32 kg_seal_iov_length(OM_uint32 *minor_status,
- gss_ctx_id_t context_handle,
- int conf_req_flag,
- gss_qop_t qop_req,
- int *conf_state,
- gss_iov_buffer_desc *iov,
- int iov_count);
+ gss_ctx_id_t context_handle,
+ int conf_req_flag,
+ gss_qop_t qop_req,
+ int *conf_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count);
krb5_cryptotype kg_translate_flag_iov(OM_uint32 type);
OM_uint32 kg_fixup_padding_iov(OM_uint32 *minor_status,
- gss_iov_buffer_desc *iov,
- int iov_count);
+ gss_iov_buffer_desc *iov,
+ int iov_count);
int kg_map_toktype(int proto, int toktype);
);
OM_uint32 krb5_gss_wrap_iov
-(OM_uint32 *, /* minor_status */
- gss_ctx_id_t, /* context_handle */
- int, /* conf_req_flag */
- gss_qop_t, /* qop_req */
- int *, /* conf_state */
- gss_iov_buffer_desc *, /* iov */
- int /* iov_count */
+(OM_uint32 *, /* minor_status */
+ gss_ctx_id_t, /* context_handle */
+ int, /* conf_req_flag */
+ gss_qop_t, /* qop_req */
+ int *, /* conf_state */
+ gss_iov_buffer_desc *, /* iov */
+ int /* iov_count */
);
OM_uint32
krb5_gss_wrap_iov_length
-(OM_uint32 *, /* minor_status */
- gss_ctx_id_t, /* context_handle */
- int, /* conf_req_flag */
- gss_qop_t, /* qop_req */
- int *, /* conf_state */
- gss_iov_buffer_desc *, /* iov */
- int /* iov_count */
+(OM_uint32 *, /* minor_status */
+ gss_ctx_id_t, /* context_handle */
+ int, /* conf_req_flag */
+ gss_qop_t, /* qop_req */
+ int *, /* conf_state */
+ gss_iov_buffer_desc *, /* iov */
+ int /* iov_count */
);
OM_uint32 krb5_gss_unwrap
);
OM_uint32 krb5_gss_unwrap_iov
-(OM_uint32 *, /* minor_status */
- gss_ctx_id_t, /* context_handle */
- int *, /* conf_state */
- gss_qop_t *, /* qop_state */
- gss_iov_buffer_desc *, /* iov */
- int /* iov_count */
+(OM_uint32 *, /* minor_status */
+ gss_ctx_id_t, /* context_handle */
+ int *, /* conf_state */
+ gss_qop_t *, /* qop_state */
+ gss_iov_buffer_desc *, /* iov */
+ int /* iov_count */
);
OM_uint32 krb5_gss_wrap_size_limit
OM_uint32
gss_krb5int_free_lucid_sec_context(OM_uint32 *, const gss_OID,
- const gss_OID, gss_buffer_t);
+ const gss_OID, gss_buffer_t);
extern k5_mutex_t kg_kdc_flag_mutex;
krb5_error_code krb5_gss_init_context (krb5_context *ctxp);
#define GSS_KRB5_USE_KDC_CONTEXT_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x08"
OM_uint32 krb5int_gss_use_kdc_context(OM_uint32 *, const gss_OID,
- const gss_OID, gss_buffer_t);
+ const gss_OID, gss_buffer_t);
krb5_error_code krb5_gss_use_kdc_context(void);
OM_uint32
gss_krb5int_extract_authz_data_from_sec_context(OM_uint32 *minor_status,
- const gss_ctx_id_t context_handle,
- const gss_OID desired_object,
- gss_buffer_set_t *ad_data);
+ const gss_ctx_id_t context_handle,
+ const gss_OID desired_object,
+ gss_buffer_set_t *ad_data);
#define GSS_KRB5_SET_CRED_RCACHE_OID_LENGTH 11
#define GSS_KRB5_SET_CRED_RCACHE_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x0b"
OM_uint32
gss_krb5int_extract_authtime_from_sec_context(OM_uint32 *,
- const gss_ctx_id_t,
- const gss_OID,
- gss_buffer_set_t *);
+ const gss_ctx_id_t,
+ const gss_OID,
+ gss_buffer_set_t *);
#ifdef _GSS_STATIC_LINK
int gss_krb5int_lib_init(void);
}
#define g_OID_prefix_equal(o1, o2) \
- (((o1)->length >= (o2)->length) && \
- (memcmp((o1)->elements, (o2)->elements, (o2)->length) == 0))
+ (((o1)->length >= (o2)->length) && \
+ (memcmp((o1)->elements, (o2)->elements, (o2)->length) == 0))
/*
* gss_inquire_sec_context_by_oid() methods
OM_uint32 (*func)(OM_uint32 *, const gss_ctx_id_t, const gss_OID, gss_buffer_set_t *);
} krb5_gss_inquire_sec_context_by_oid_ops[] = {
{
- {GSS_KRB5_GET_TKT_FLAGS_OID_LENGTH, GSS_KRB5_GET_TKT_FLAGS_OID},
- gss_krb5int_get_tkt_flags
+ {GSS_KRB5_GET_TKT_FLAGS_OID_LENGTH, GSS_KRB5_GET_TKT_FLAGS_OID},
+ gss_krb5int_get_tkt_flags
},
{
- {GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID},
- gss_krb5int_extract_authz_data_from_sec_context
+ {GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID},
+ gss_krb5int_extract_authz_data_from_sec_context
},
{
- {GSS_KRB5_INQ_SSPI_SESSION_KEY_OID_LENGTH, GSS_KRB5_INQ_SSPI_SESSION_KEY_OID},
- gss_krb5int_inq_session_key
+ {GSS_KRB5_INQ_SSPI_SESSION_KEY_OID_LENGTH, GSS_KRB5_INQ_SSPI_SESSION_KEY_OID},
+ gss_krb5int_inq_session_key
},
{
- {GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID},
- gss_krb5int_export_lucid_sec_context
+ {GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID},
+ gss_krb5int_export_lucid_sec_context
},
{
- {GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID},
- gss_krb5int_extract_authtime_from_sec_context
+ {GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID},
+ gss_krb5int_extract_authtime_from_sec_context
}
};
static OM_uint32
krb5_gss_inquire_sec_context_by_oid (OM_uint32 *minor_status,
- const gss_ctx_id_t context_handle,
- const gss_OID desired_object,
- gss_buffer_set_t *data_set)
+ const gss_ctx_id_t context_handle,
+ const gss_OID desired_object,
+ gss_buffer_set_t *data_set)
{
krb5_gss_ctx_id_rec *ctx;
size_t i;
if (minor_status == NULL)
- return GSS_S_CALL_INACCESSIBLE_WRITE;
+ return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (desired_object == GSS_C_NO_OID)
- return GSS_S_CALL_INACCESSIBLE_READ;
+ return GSS_S_CALL_INACCESSIBLE_READ;
if (data_set == NULL)
- return GSS_S_CALL_INACCESSIBLE_WRITE;
+ return GSS_S_CALL_INACCESSIBLE_WRITE;
*data_set = GSS_C_NO_BUFFER_SET;
if (!kg_validate_ctx_id(context_handle))
- return GSS_S_NO_CONTEXT;
+ return GSS_S_NO_CONTEXT;
ctx = (krb5_gss_ctx_id_rec *) context_handle;
if (!ctx->established)
- return GSS_S_NO_CONTEXT;
+ return GSS_S_NO_CONTEXT;
for (i = 0; i < sizeof(krb5_gss_inquire_sec_context_by_oid_ops)/
- sizeof(krb5_gss_inquire_sec_context_by_oid_ops[0]); i++) {
- if (g_OID_prefix_equal(desired_object, &krb5_gss_inquire_sec_context_by_oid_ops[i].oid)) {
- return (*krb5_gss_inquire_sec_context_by_oid_ops[i].func)(minor_status,
- context_handle,
- desired_object,
- data_set);
- }
+ sizeof(krb5_gss_inquire_sec_context_by_oid_ops[0]); i++) {
+ if (g_OID_prefix_equal(desired_object, &krb5_gss_inquire_sec_context_by_oid_ops[i].oid)) {
+ return (*krb5_gss_inquire_sec_context_by_oid_ops[i].func)(minor_status,
+ context_handle,
+ desired_object,
+ data_set);
+ }
}
*minor_status = EINVAL;
static OM_uint32
krb5_gss_inquire_cred_by_oid(OM_uint32 *minor_status,
- const gss_cred_id_t cred_handle,
- const gss_OID desired_object,
- gss_buffer_set_t *data_set)
+ const gss_cred_id_t cred_handle,
+ const gss_OID desired_object,
+ gss_buffer_set_t *data_set)
{
OM_uint32 major_status = GSS_S_FAILURE;
krb5_gss_cred_id_t cred;
size_t i;
if (minor_status == NULL)
- return GSS_S_CALL_INACCESSIBLE_WRITE;
+ return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (desired_object == GSS_C_NO_OID)
- return GSS_S_CALL_INACCESSIBLE_READ;
+ return GSS_S_CALL_INACCESSIBLE_READ;
if (data_set == NULL)
- return GSS_S_CALL_INACCESSIBLE_WRITE;
+ return GSS_S_CALL_INACCESSIBLE_WRITE;
*data_set = GSS_C_NO_BUFFER_SET;
if (cred_handle == GSS_C_NO_CREDENTIAL) {
- *minor_status = (OM_uint32)KRB5_NOCREDS_SUPPLIED;
- return GSS_S_NO_CRED;
+ *minor_status = (OM_uint32)KRB5_NOCREDS_SUPPLIED;
+ return GSS_S_NO_CRED;
}
major_status = krb5_gss_validate_cred(minor_status, cred_handle);
if (GSS_ERROR(major_status))
- return major_status;
+ return major_status;
cred = (krb5_gss_cred_id_t) cred_handle;
#if 0
for (i = 0; i < sizeof(krb5_gss_inquire_cred_by_oid_ops)/
- sizeof(krb5_gss_inquire_cred_by_oid_ops[0]); i++) {
- if (g_OID_prefix_equal(desired_object, &krb5_gss_inquire_cred_by_oid_ops[i].oid)) {
- return (*krb5_gss_inquire_cred_by_oid_ops[i].func)(minor_status,
- cred_handle,
- desired_object,
- data_set);
- }
+ sizeof(krb5_gss_inquire_cred_by_oid_ops[0]); i++) {
+ if (g_OID_prefix_equal(desired_object, &krb5_gss_inquire_cred_by_oid_ops[i].oid)) {
+ return (*krb5_gss_inquire_cred_by_oid_ops[i].func)(minor_status,
+ cred_handle,
+ desired_object,
+ data_set);
+ }
}
#endif
static OM_uint32
krb5_gss_set_sec_context_option (OM_uint32 *minor_status,
- gss_ctx_id_t *context_handle,
- const gss_OID desired_object,
- const gss_buffer_t value)
+ gss_ctx_id_t *context_handle,
+ const gss_OID desired_object,
+ const gss_buffer_t value)
{
size_t i;
if (minor_status == NULL)
- return GSS_S_CALL_INACCESSIBLE_WRITE;
+ return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (context_handle == NULL)
- return GSS_S_CALL_INACCESSIBLE_READ;
+ return GSS_S_CALL_INACCESSIBLE_READ;
if (desired_object == GSS_C_NO_OID)
- return GSS_S_CALL_INACCESSIBLE_READ;
+ return GSS_S_CALL_INACCESSIBLE_READ;
if (*context_handle != GSS_C_NO_CONTEXT) {
- krb5_gss_ctx_id_rec *ctx;
+ krb5_gss_ctx_id_rec *ctx;
- if (!kg_validate_ctx_id(*context_handle))
- return GSS_S_NO_CONTEXT;
+ if (!kg_validate_ctx_id(*context_handle))
+ return GSS_S_NO_CONTEXT;
- ctx = (krb5_gss_ctx_id_rec *) context_handle;
+ ctx = (krb5_gss_ctx_id_rec *) context_handle;
- if (!ctx->established)
- return GSS_S_NO_CONTEXT;
+ if (!ctx->established)
+ return GSS_S_NO_CONTEXT;
}
#if 0
for (i = 0; i < sizeof(krb5_gss_set_sec_context_option_ops)/
- sizeof(krb5_gss_set_sec_context_option_ops[0]); i++) {
- if (g_OID_prefix_equal(desired_object, &krb5_gss_set_sec_context_option_ops[i].oid)) {
- return (*krb5_gss_set_sec_context_option_ops[i].func)(minor_status,
- context_handle,
- desired_object,
- value);
- }
+ sizeof(krb5_gss_set_sec_context_option_ops[0]); i++) {
+ if (g_OID_prefix_equal(desired_object, &krb5_gss_set_sec_context_option_ops[i].oid)) {
+ return (*krb5_gss_set_sec_context_option_ops[i].func)(minor_status,
+ context_handle,
+ desired_object,
+ value);
+ }
}
#endif
OM_uint32 (*func)(OM_uint32 *, gss_cred_id_t, const gss_OID, const gss_buffer_t);
} krb5_gssspi_set_cred_option_ops[] = {
{
- {GSS_KRB5_COPY_CCACHE_OID_LENGTH, GSS_KRB5_COPY_CCACHE_OID},
- gss_krb5int_copy_ccache
+ {GSS_KRB5_COPY_CCACHE_OID_LENGTH, GSS_KRB5_COPY_CCACHE_OID},
+ gss_krb5int_copy_ccache
},
{
- {GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID_LENGTH, GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID},
- gss_krb5int_set_allowable_enctypes
+ {GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID_LENGTH, GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID},
+ gss_krb5int_set_allowable_enctypes
},
{
- {GSS_KRB5_SET_CRED_RCACHE_OID_LENGTH, GSS_KRB5_SET_CRED_RCACHE_OID},
- gss_krb5int_set_cred_rcache
+ {GSS_KRB5_SET_CRED_RCACHE_OID_LENGTH, GSS_KRB5_SET_CRED_RCACHE_OID},
+ gss_krb5int_set_cred_rcache
}
};
static OM_uint32
krb5_gssspi_set_cred_option(OM_uint32 *minor_status,
- gss_cred_id_t cred_handle,
- const gss_OID desired_object,
- const gss_buffer_t value)
+ gss_cred_id_t cred_handle,
+ const gss_OID desired_object,
+ const gss_buffer_t value)
{
OM_uint32 major_status = GSS_S_FAILURE;
size_t i;
if (minor_status == NULL)
- return GSS_S_CALL_INACCESSIBLE_WRITE;
+ return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (cred_handle == GSS_C_NO_CREDENTIAL) {
- *minor_status = (OM_uint32)KRB5_NOCREDS_SUPPLIED;
- return GSS_S_NO_CRED;
+ *minor_status = (OM_uint32)KRB5_NOCREDS_SUPPLIED;
+ return GSS_S_NO_CRED;
}
if (desired_object == GSS_C_NO_OID)
- return GSS_S_CALL_INACCESSIBLE_READ;
+ return GSS_S_CALL_INACCESSIBLE_READ;
major_status = krb5_gss_validate_cred(minor_status, cred_handle);
if (GSS_ERROR(major_status))
- return major_status;
+ return major_status;
for (i = 0; i < sizeof(krb5_gssspi_set_cred_option_ops)/
- sizeof(krb5_gssspi_set_cred_option_ops[0]); i++) {
- if (g_OID_prefix_equal(desired_object, &krb5_gssspi_set_cred_option_ops[i].oid)) {
- return (*krb5_gssspi_set_cred_option_ops[i].func)(minor_status,
- cred_handle,
- desired_object,
- value);
- }
+ sizeof(krb5_gssspi_set_cred_option_ops[0]); i++) {
+ if (g_OID_prefix_equal(desired_object, &krb5_gssspi_set_cred_option_ops[i].oid)) {
+ return (*krb5_gssspi_set_cred_option_ops[i].func)(minor_status,
+ cred_handle,
+ desired_object,
+ value);
+ }
}
*minor_status = EINVAL;
OM_uint32 (*func)(OM_uint32 *, const gss_OID, const gss_OID, gss_buffer_t);
} krb5_gssspi_mech_invoke_ops[] = {
{
- {GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID_LENGTH, GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID},
- gss_krb5int_register_acceptor_identity
+ {GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID_LENGTH, GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID},
+ gss_krb5int_register_acceptor_identity
},
{
- {GSS_KRB5_CCACHE_NAME_OID_LENGTH, GSS_KRB5_CCACHE_NAME_OID},
- gss_krb5int_ccache_name
+ {GSS_KRB5_CCACHE_NAME_OID_LENGTH, GSS_KRB5_CCACHE_NAME_OID},
+ gss_krb5int_ccache_name
},
{
- {GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID},
- gss_krb5int_free_lucid_sec_context
+ {GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID},
+ gss_krb5int_free_lucid_sec_context
},
{
- {GSS_KRB5_USE_KDC_CONTEXT_OID_LENGTH, GSS_KRB5_USE_KDC_CONTEXT_OID},
- krb5int_gss_use_kdc_context
+ {GSS_KRB5_USE_KDC_CONTEXT_OID_LENGTH, GSS_KRB5_USE_KDC_CONTEXT_OID},
+ krb5int_gss_use_kdc_context
}
};
static OM_uint32
krb5_gssspi_mech_invoke (OM_uint32 *minor_status,
- const gss_OID desired_mech,
- const gss_OID desired_object,
- gss_buffer_t value)
+ const gss_OID desired_mech,
+ const gss_OID desired_object,
+ gss_buffer_t value)
{
size_t i;
if (minor_status == NULL)
- return GSS_S_CALL_INACCESSIBLE_WRITE;
+ return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (desired_mech == GSS_C_NO_OID)
- return GSS_S_BAD_MECH;
+ return GSS_S_BAD_MECH;
if (desired_object == GSS_C_NO_OID)
- return GSS_S_CALL_INACCESSIBLE_READ;
+ return GSS_S_CALL_INACCESSIBLE_READ;
for (i = 0; i < sizeof(krb5_gssspi_mech_invoke_ops)/
- sizeof(krb5_gssspi_mech_invoke_ops[0]); i++) {
- if (g_OID_prefix_equal(desired_object, &krb5_gssspi_mech_invoke_ops[i].oid)) {
- return (*krb5_gssspi_mech_invoke_ops[i].func)(minor_status,
- desired_mech,
- desired_object,
- value);
- }
+ sizeof(krb5_gssspi_mech_invoke_ops[0]); i++) {
+ if (g_OID_prefix_equal(desired_object, &krb5_gssspi_mech_invoke_ops[i].oid)) {
+ return (*krb5_gssspi_mech_invoke_ops[i].func)(minor_status,
+ desired_mech,
+ desired_object,
+ value);
+ }
}
*minor_status = EINVAL;
krb5_gss_set_sec_context_option,
krb5_gssspi_set_cred_option,
krb5_gssspi_mech_invoke,
- NULL, /* wrap_aead */
- NULL, /* unwrap_aead */
+ NULL, /* wrap_aead */
+ NULL, /* unwrap_aead */
krb5_gss_wrap_iov,
krb5_gss_unwrap_iov,
krb5_gss_wrap_iov_length,
- NULL, /* complete_auth_token */
+ NULL, /* complete_auth_token */
};
return err;
err = k5_mutex_finish_init(&kg_vdb.mutex);
if (err)
- return err;
+ return err;
#endif
#ifdef _GSS_STATIC_LINK
err = gss_krb5mechglue_init();
if (err)
- return err;
+ return err;
#endif
return 0;
OM_uint32 KRB5_CALLCONV
gsskrb5_extract_authz_data_from_sec_context(OM_uint32 *minor_status,
- const gss_ctx_id_t context_handle,
- int ad_type,
- gss_buffer_t ad_data);
+ const gss_ctx_id_t context_handle,
+ int ad_type,
+ gss_buffer_t ad_data);
OM_uint32 KRB5_CALLCONV
gss_krb5_set_cred_rcache(OM_uint32 *minor_status,
- gss_cred_id_t cred,
- krb5_rcache rcache);
+ gss_cred_id_t cred,
+ krb5_rcache rcache);
OM_uint32 KRB5_CALLCONV
gsskrb5_extract_authtime_from_sec_context(OM_uint32 *, gss_ctx_id_t, krb5_timestamp *);
/* build up the token */
if (ctx->gss_flags & GSS_C_DCE_STYLE) {
- /*
- * For DCE RPC, do not encapsulate the AP-REQ in the
- * typical GSS wrapping.
- */
- token->length = ap_req.length;
- token->value = ap_req.data;
-
- ap_req.data = NULL; /* don't double free */
+ /*
+ * For DCE RPC, do not encapsulate the AP-REQ in the
+ * typical GSS wrapping.
+ */
+ token->length = ap_req.length;
+ token->value = ap_req.data;
+
+ ap_req.data = NULL; /* don't double free */
} else {
- /* allocate space for the token */
- tlen = g_token_size((gss_OID) mech_type, ap_req.length);
+ /* allocate space for the token */
+ tlen = g_token_size((gss_OID) mech_type, ap_req.length);
- if ((t = (unsigned char *) xmalloc(tlen)) == NULL) {
- code = ENOMEM;
- goto cleanup;
- }
+ if ((t = (unsigned char *) xmalloc(tlen)) == NULL) {
+ code = ENOMEM;
+ goto cleanup;
+ }
- /* fill in the buffer */
- ptr = t;
+ /* fill in the buffer */
+ ptr = t;
- g_make_token_header(mech_type, ap_req.length,
- &ptr, KG_TOK_CTX_AP_REQ);
+ g_make_token_header(mech_type, ap_req.length,
+ &ptr, KG_TOK_CTX_AP_REQ);
- TWRITE_STR(ptr, (unsigned char *) ap_req.data, ap_req.length);
+ TWRITE_STR(ptr, (unsigned char *) ap_req.data, ap_req.length);
- /* pass it back */
+ /* pass it back */
- token->length = tlen;
- token->value = (void *) t;
+ token->length = tlen;
+ token->value = (void *) t;
}
code = 0;
GSS_C_TRANS_FLAG |
((req_flags) & (GSS_C_MUTUAL_FLAG | GSS_C_REPLAY_FLAG |
GSS_C_SEQUENCE_FLAG | GSS_C_DELEG_FLAG |
- GSS_C_DCE_STYLE | GSS_C_IDENTIFY_FLAG |
- GSS_C_EXTENDED_ERROR_FLAG)));
+ GSS_C_DCE_STYLE | GSS_C_IDENTIFY_FLAG |
+ GSS_C_EXTENDED_ERROR_FLAG)));
ctx->seed_init = 0;
ctx->big_endian = 0; /* all initiators do little-endian, as per spec */
ctx->seqstate = 0;
if (req_flags & GSS_C_DCE_STYLE)
- ctx->gss_flags |= GSS_C_MUTUAL_FLAG;
+ ctx->gss_flags |= GSS_C_MUTUAL_FLAG;
if ((code = krb5_timeofday(context, &now)))
goto fail;
ctx->have_acceptor_subkey = 0;
code = kg_setup_keys(context, ctx, ctx->subkey, &ctx->cksumtype);
if (code != 0)
- goto fail;
+ goto fail;
/* at this point, the context is constructed and valid,
hence, releaseable */
ptr = (unsigned char *) input_token->value;
if (ctx->gss_flags & GSS_C_DCE_STYLE) {
- /* Raw AP-REP */
- ap_rep.length = input_token->length;
- ap_rep.data = (char *)input_token->value;
+ /* Raw AP-REP */
+ ap_rep.length = input_token->length;
+ ap_rep.data = (char *)input_token->value;
} else if (g_verify_token_header(ctx->mech_used,
&(ap_rep.length),
&ptr, KG_TOK_CTX_AP_REP,
(ctx->gss_flags & GSS_C_SEQUENCE_FLAG) !=0, ctx->proto);
if (ap_rep_data->subkey != NULL &&
- (ctx->proto == 1 || (ctx->gss_flags & GSS_C_DCE_STYLE) ||
- ap_rep_data->subkey->enctype != ctx->subkey->enctype)) {
+ (ctx->proto == 1 || (ctx->gss_flags & GSS_C_DCE_STYLE) ||
+ ap_rep_data->subkey->enctype != ctx->subkey->enctype)) {
/* Keep acceptor's subkey. */
ctx->have_acceptor_subkey = 1;
code = krb5_copy_keyblock(context, ap_rep_data->subkey,
&ctx->acceptor_subkey);
if (code) {
- krb5_free_ap_rep_enc_part(context, ap_rep_data);
+ krb5_free_ap_rep_enc_part(context, ap_rep_data);
goto fail;
- }
- code = kg_setup_keys(context, ctx, ctx->acceptor_subkey,
- &ctx->acceptor_subkey_cksumtype);
- if (code) {
- krb5_free_ap_rep_enc_part(context, ap_rep_data);
- goto fail;
- }
+ }
+ code = kg_setup_keys(context, ctx, ctx->acceptor_subkey,
+ &ctx->acceptor_subkey_cksumtype);
+ if (code) {
+ krb5_free_ap_rep_enc_part(context, ap_rep_data);
+ goto fail;
+ }
}
/* free the ap_rep_data */
krb5_free_ap_rep_enc_part(context, ap_rep_data);
if (ctx->gss_flags & GSS_C_DCE_STYLE) {
- krb5_data outbuf;
+ krb5_data outbuf;
- code = krb5_mk_rep_dce(context, ctx->auth_context, &outbuf);
- if (code)
- goto fail;
+ code = krb5_mk_rep_dce(context, ctx->auth_context, &outbuf);
+ if (code)
+ goto fail;
- output_token->value = outbuf.data;
- output_token->length = outbuf.length;
+ output_token->value = outbuf.data;
+ output_token->length = outbuf.length;
}
/* set established */
#ifndef _WIN32
OM_uint32
krb5int_gss_use_kdc_context(OM_uint32 *minor_status,
- const gss_OID desired_mech,
- const gss_OID desired_object,
- gss_buffer_t value)
+ const gss_OID desired_mech,
+ const gss_OID desired_object,
+ gss_buffer_t value)
{
OM_uint32 err;
return err;
*minor_status = k5_mutex_lock(&kg_kdc_flag_mutex);
if (*minor_status) {
- return GSS_S_FAILURE;
+ return GSS_S_FAILURE;
}
kdc_flag = 1;
k5_mutex_unlock(&kg_kdc_flag_mutex);
major_status = generic_gss_add_buffer_set_member(minor_status, &keyvalue, data_set);
if (GSS_ERROR(major_status))
- goto cleanup;
+ goto cleanup;
oid.elements = oid_buf;
oid.length = sizeof(oid_buf);
major_status = generic_gss_oid_compose(minor_status,
- GSS_KRB5_SESSION_KEY_ENCTYPE_OID,
- GSS_KRB5_SESSION_KEY_ENCTYPE_OID_LENGTH,
- key->enctype,
- &oid);
+ GSS_KRB5_SESSION_KEY_ENCTYPE_OID,
+ GSS_KRB5_SESSION_KEY_ENCTYPE_OID_LENGTH,
+ key->enctype,
+ &oid);
if (GSS_ERROR(major_status))
- goto cleanup;
+ goto cleanup;
keyinfo.value = oid.elements;
keyinfo.length = oid.length;
major_status = generic_gss_add_buffer_set_member(minor_status, &keyinfo, data_set);
if (GSS_ERROR(major_status))
- goto cleanup;
+ goto cleanup;
return GSS_S_COMPLETE;
cleanup:
if (*data_set != GSS_C_NO_BUFFER_SET) {
- if ((*data_set)->count != 0)
- memset((*data_set)->elements[0].value, 0, (*data_set)->elements[0].length);
- gss_release_buffer_set(&minor, data_set);
+ if ((*data_set)->count != 0)
+ memset((*data_set)->elements[0].value, 0, (*data_set)->elements[0].length);
+ gss_release_buffer_set(&minor, data_set);
}
return major_status;
ctx = (krb5_gss_ctx_id_rec *) context_handle;
major_status = generic_gss_oid_decompose(minor_status,
- GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID,
- GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID_LENGTH,
- desired_object,
- &ad_type);
+ GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID,
+ GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID_LENGTH,
+ desired_object,
+ &ad_type);
if (major_status != GSS_S_COMPLETE || ad_type == 0) {
- *minor_status = ENOENT;
- return GSS_S_FAILURE;
+ *minor_status = ENOENT;
+ return GSS_S_FAILURE;
}
if (ctx->authdata != NULL) {
- for (i = 0; ctx->authdata[i] != NULL; i++) {
- if (ctx->authdata[i]->ad_type == ad_type) {
- gss_buffer_desc ad_data;
-
- ad_data.length = ctx->authdata[i]->length;
- ad_data.value = ctx->authdata[i]->contents;
-
- major_status = generic_gss_add_buffer_set_member(minor_status,
- &ad_data, data_set);
- if (GSS_ERROR(major_status))
- break;
- }
- }
+ for (i = 0; ctx->authdata[i] != NULL; i++) {
+ if (ctx->authdata[i]->ad_type == ad_type) {
+ gss_buffer_desc ad_data;
+
+ ad_data.length = ctx->authdata[i]->length;
+ ad_data.value = ctx->authdata[i]->contents;
+
+ major_status = generic_gss_add_buffer_set_member(minor_status,
+ &ad_data, data_set);
+ if (GSS_ERROR(major_status))
+ break;
+ }
+ }
}
if (GSS_ERROR(major_status)) {
- OM_uint32 tmp;
+ OM_uint32 tmp;
- generic_gss_release_buffer_set(&tmp, data_set);
+ generic_gss_release_buffer_set(&tmp, data_set);
}
return major_status;
OM_uint32
gss_krb5int_extract_authtime_from_sec_context(OM_uint32 *minor_status,
- const gss_ctx_id_t context_handle,
+ const gss_ctx_id_t context_handle,
const gss_OID desired_oid,
gss_buffer_set_t *data_set)
{
/* create the seq_num */
if ((code = kg_make_seq_num(context, seq, direction?0:0xff,
- (krb5_ui_4)*seqnum, ptr+14, ptr+6))) {
+ (krb5_ui_4)*seqnum, ptr+14, ptr+6))) {
xfree (plain);
xfree(t);
return(code);
*/
#include <assert.h>
-#include "k5-platform.h" /* for 64-bit support */
-#include "k5-int.h" /* for zap() */
+#include "k5-platform.h" /* for 64-bit support */
+#include "k5-int.h" /* for zap() */
#include "gssapiP_krb5.h"
#include <stdarg.h>
static krb5_error_code
make_seal_token_v1_iov(krb5_context context,
- krb5_gss_ctx_id_rec *ctx,
- int conf_req_flag,
- int *conf_state,
- gss_iov_buffer_desc *iov,
- int iov_count,
- int toktype)
+ krb5_gss_ctx_id_rec *ctx,
+ int conf_req_flag,
+ int *conf_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ int toktype)
{
krb5_error_code code = 0;
gss_iov_buffer_t header;
header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
if (header == NULL)
- return EINVAL;
+ return EINVAL;
padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
if (padding == NULL && (ctx->gss_flags & GSS_C_DCE_STYLE) == 0)
- return EINVAL;
+ return EINVAL;
trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
if (trailer != NULL)
- trailer->buffer.length = 0;
+ trailer->buffer.length = 0;
/* Determine confounder length */
if (toktype == KG_TOK_WRAP_MSG || conf_req_flag)
- k5_headerlen = kg_confounder_size(context, ctx->enc);
+ k5_headerlen = kg_confounder_size(context, ctx->enc);
/* Check padding length */
if (toktype == KG_TOK_WRAP_MSG) {
- size_t k5_padlen = (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) ? 1 : 8;
- size_t gss_padlen;
- size_t conf_data_length;
-
- kg_iov_msglen(iov, iov_count, &data_length, &assoc_data_length);
- conf_data_length = k5_headerlen + data_length - assoc_data_length;
-
- if (k5_padlen == 1)
- gss_padlen = 1; /* one byte to indicate one byte of padding */
- else
- gss_padlen = k5_padlen - (conf_data_length % k5_padlen);
-
- if (ctx->gss_flags & GSS_C_DCE_STYLE) {
- /* DCE will pad the actual data itself; padding buffer optional and will be zeroed */
- gss_padlen = 0;
-
- if (conf_data_length % k5_padlen)
- code = KRB5_BAD_MSIZE;
- } else if (padding->type & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
- code = kg_allocate_iov(padding, gss_padlen);
- } else if (padding->buffer.length < gss_padlen) {
- code = KRB5_BAD_MSIZE;
- }
- if (code != 0)
- goto cleanup;
-
- /* Initialize padding buffer to pad itself */
- if (padding != NULL) {
- padding->buffer.length = gss_padlen;
- memset(padding->buffer.value, (int)gss_padlen, gss_padlen);
- }
-
- if (ctx->gss_flags & GSS_C_DCE_STYLE)
- tmsglen = k5_headerlen; /* confounder length */
- else
- tmsglen = conf_data_length + padding->buffer.length;
+ size_t k5_padlen = (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) ? 1 : 8;
+ size_t gss_padlen;
+ size_t conf_data_length;
+
+ kg_iov_msglen(iov, iov_count, &data_length, &assoc_data_length);
+ conf_data_length = k5_headerlen + data_length - assoc_data_length;
+
+ if (k5_padlen == 1)
+ gss_padlen = 1; /* one byte to indicate one byte of padding */
+ else
+ gss_padlen = k5_padlen - (conf_data_length % k5_padlen);
+
+ if (ctx->gss_flags & GSS_C_DCE_STYLE) {
+ /* DCE will pad the actual data itself; padding buffer optional and will be zeroed */
+ gss_padlen = 0;
+
+ if (conf_data_length % k5_padlen)
+ code = KRB5_BAD_MSIZE;
+ } else if (padding->type & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
+ code = kg_allocate_iov(padding, gss_padlen);
+ } else if (padding->buffer.length < gss_padlen) {
+ code = KRB5_BAD_MSIZE;
+ }
+ if (code != 0)
+ goto cleanup;
+
+ /* Initialize padding buffer to pad itself */
+ if (padding != NULL) {
+ padding->buffer.length = gss_padlen;
+ memset(padding->buffer.value, (int)gss_padlen, gss_padlen);
+ }
+
+ if (ctx->gss_flags & GSS_C_DCE_STYLE)
+ tmsglen = k5_headerlen; /* confounder length */
+ else
+ tmsglen = conf_data_length + padding->buffer.length;
}
/* Determine token size */
k5_headerlen += tlen - tmsglen;
if (header->type & GSS_IOV_BUFFER_FLAG_ALLOCATE)
- code = kg_allocate_iov(header, k5_headerlen);
+ code = kg_allocate_iov(header, k5_headerlen);
else if (header->buffer.length < k5_headerlen)
- code = KRB5_BAD_MSIZE;
+ code = KRB5_BAD_MSIZE;
if (code != 0)
- goto cleanup;
+ goto cleanup;
header->buffer.length = k5_headerlen;
/* 2..3 SEAL_ALG or Filler */
if (toktype == KG_TOK_WRAP_MSG && conf_req_flag) {
- ptr[2] = (ctx->sealalg ) & 0xFF;
- ptr[3] = (ctx->sealalg >> 8) & 0xFF;
+ ptr[2] = (ctx->sealalg ) & 0xFF;
+ ptr[3] = (ctx->sealalg >> 8) & 0xFF;
} else {
- /* No seal */
- ptr[2] = 0xFF;
- ptr[3] = 0xFF;
+ /* No seal */
+ ptr[2] = 0xFF;
+ ptr[3] = 0xFF;
}
/* 4..5 Filler */
switch (ctx->signalg) {
case SGN_ALG_DES_MAC_MD5:
case SGN_ALG_MD2_5:
- md5cksum.checksum_type = CKSUMTYPE_RSA_MD5;
- break;
+ md5cksum.checksum_type = CKSUMTYPE_RSA_MD5;
+ break;
case SGN_ALG_HMAC_SHA1_DES3_KD:
- md5cksum.checksum_type = CKSUMTYPE_HMAC_SHA1_DES3;
- break;
+ md5cksum.checksum_type = CKSUMTYPE_HMAC_SHA1_DES3;
+ break;
case SGN_ALG_HMAC_MD5:
- md5cksum.checksum_type = CKSUMTYPE_HMAC_MD5_ARCFOUR;
- if (toktype != KG_TOK_WRAP_MSG)
- sign_usage = 15;
- break;
+ md5cksum.checksum_type = CKSUMTYPE_HMAC_MD5_ARCFOUR;
+ if (toktype != KG_TOK_WRAP_MSG)
+ sign_usage = 15;
+ break;
default:
case SGN_ALG_DES_MAC:
- abort ();
+ abort ();
}
code = krb5_c_checksum_length(context, md5cksum.checksum_type, &k5_trailerlen);
if (code != 0)
- goto cleanup;
+ goto cleanup;
md5cksum.length = k5_trailerlen;
if (k5_headerlen != 0) {
- code = kg_make_confounder(context, ctx->enc, ptr + 14 + ctx->cksum_size);
- if (code != 0)
- goto cleanup;
+ code = kg_make_confounder(context, ctx->enc, ptr + 14 + ctx->cksum_size);
+ if (code != 0)
+ goto cleanup;
}
/* compute the checksum */
code = kg_make_checksum_iov_v1(context, md5cksum.checksum_type,
- ctx->cksum_size, ctx->seq, ctx->enc,
- sign_usage, iov, iov_count, toktype,
- &md5cksum);
+ ctx->cksum_size, ctx->seq, ctx->enc,
+ sign_usage, iov, iov_count, toktype,
+ &md5cksum);
if (code != 0)
- goto cleanup;
+ goto cleanup;
switch (ctx->signalg) {
case SGN_ALG_DES_MAC_MD5:
case SGN_ALG_3:
- code = kg_encrypt(context, ctx->seq, KG_USAGE_SEAL,
- (g_OID_equal(ctx->mech_used, gss_mech_krb5_old) ?
- ctx->seq->contents : NULL),
- md5cksum.contents, md5cksum.contents, 16);
- if (code != 0)
- goto cleanup;
-
- cksum.length = ctx->cksum_size;
- cksum.contents = md5cksum.contents + 16 - cksum.length;
-
- memcpy(ptr + 14, cksum.contents, cksum.length);
- break;
+ code = kg_encrypt(context, ctx->seq, KG_USAGE_SEAL,
+ (g_OID_equal(ctx->mech_used, gss_mech_krb5_old) ?
+ ctx->seq->contents : NULL),
+ md5cksum.contents, md5cksum.contents, 16);
+ if (code != 0)
+ goto cleanup;
+
+ cksum.length = ctx->cksum_size;
+ cksum.contents = md5cksum.contents + 16 - cksum.length;
+
+ memcpy(ptr + 14, cksum.contents, cksum.length);
+ break;
case SGN_ALG_HMAC_SHA1_DES3_KD:
- assert(md5cksum.length == ctx->cksum_size);
- memcpy(ptr + 14, md5cksum.contents, md5cksum.length);
- break;
+ assert(md5cksum.length == ctx->cksum_size);
+ memcpy(ptr + 14, md5cksum.contents, md5cksum.length);
+ break;
case SGN_ALG_HMAC_MD5:
- memcpy(ptr + 14, md5cksum.contents, ctx->cksum_size);
- break;
+ memcpy(ptr + 14, md5cksum.contents, ctx->cksum_size);
+ break;
}
/* create the seq_num */
code = kg_make_seq_num(context, ctx->seq, ctx->initiate ? 0 : 0xFF,
- (OM_uint32)ctx->seq_send, ptr + 14, ptr + 6);
+ (OM_uint32)ctx->seq_send, ptr + 14, ptr + 6);
if (code != 0)
- goto cleanup;
+ goto cleanup;
if (conf_req_flag) {
- if (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) {
- unsigned char bigend_seqnum[4];
- krb5_keyblock *enc_key;
- size_t i;
-
- bigend_seqnum[0] = (ctx->seq_send >> 24) & 0xFF;
- bigend_seqnum[1] = (ctx->seq_send >> 16) & 0xFF;
- bigend_seqnum[2] = (ctx->seq_send >> 8 ) & 0xFF;
- bigend_seqnum[3] = (ctx->seq_send ) & 0xFF;
-
- code = krb5_copy_keyblock(context, ctx->enc, &enc_key);
- if (code != 0)
- goto cleanup;
-
- assert(enc_key->length == 16);
-
- for (i = 0; i < enc_key->length; i++)
- ((char *)enc_key->contents)[i] ^= 0xF0;
-
- code = kg_arcfour_docrypt_iov(context, enc_key, 0,
- bigend_seqnum, 4,
- iov, iov_count);
- krb5_free_keyblock(context, enc_key);
- } else {
- code = kg_encrypt_iov(context, ctx->proto,
- ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0),
- 0 /*EC*/, 0 /*RRC*/,
- ctx->enc, KG_USAGE_SEAL, NULL,
- iov, iov_count);
- }
- if (code != 0)
- goto cleanup;
+ if (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) {
+ unsigned char bigend_seqnum[4];
+ krb5_keyblock *enc_key;
+ size_t i;
+
+ bigend_seqnum[0] = (ctx->seq_send >> 24) & 0xFF;
+ bigend_seqnum[1] = (ctx->seq_send >> 16) & 0xFF;
+ bigend_seqnum[2] = (ctx->seq_send >> 8 ) & 0xFF;
+ bigend_seqnum[3] = (ctx->seq_send ) & 0xFF;
+
+ code = krb5_copy_keyblock(context, ctx->enc, &enc_key);
+ if (code != 0)
+ goto cleanup;
+
+ assert(enc_key->length == 16);
+
+ for (i = 0; i < enc_key->length; i++)
+ ((char *)enc_key->contents)[i] ^= 0xF0;
+
+ code = kg_arcfour_docrypt_iov(context, enc_key, 0,
+ bigend_seqnum, 4,
+ iov, iov_count);
+ krb5_free_keyblock(context, enc_key);
+ } else {
+ code = kg_encrypt_iov(context, ctx->proto,
+ ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0),
+ 0 /*EC*/, 0 /*RRC*/,
+ ctx->enc, KG_USAGE_SEAL, NULL,
+ iov, iov_count);
+ }
+ if (code != 0)
+ goto cleanup;
}
ctx->seq_send++;
code = 0;
if (conf_state != NULL)
- *conf_state = conf_req_flag;
+ *conf_state = conf_req_flag;
cleanup:
if (code != 0)
- kg_release_iov(iov, iov_count);
+ kg_release_iov(iov, iov_count);
krb5_free_checksum_contents(context, &md5cksum);
return code;
OM_uint32
kg_seal_iov(OM_uint32 *minor_status,
- gss_ctx_id_t context_handle,
- int conf_req_flag,
- gss_qop_t qop_req,
- int *conf_state,
- gss_iov_buffer_desc *iov,
- int iov_count,
- int toktype)
+ gss_ctx_id_t context_handle,
+ int conf_req_flag,
+ gss_qop_t qop_req,
+ int *conf_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ int toktype)
{
krb5_gss_ctx_id_rec *ctx;
krb5_error_code code;
krb5_context context;
if (qop_req != 0) {
- *minor_status = (OM_uint32)G_UNKNOWN_QOP;
- return GSS_S_FAILURE;
+ *minor_status = (OM_uint32)G_UNKNOWN_QOP;
+ return GSS_S_FAILURE;
}
if (!kg_validate_ctx_id(context_handle)) {
- *minor_status = (OM_uint32)G_VALIDATE_FAILED;
- return GSS_S_NO_CONTEXT;
+ *minor_status = (OM_uint32)G_VALIDATE_FAILED;
+ return GSS_S_NO_CONTEXT;
}
ctx = (krb5_gss_ctx_id_rec *)context_handle;
if (!ctx->established) {
- *minor_status = KG_CTX_INCOMPLETE;
- return GSS_S_NO_CONTEXT;
+ *minor_status = KG_CTX_INCOMPLETE;
+ return GSS_S_NO_CONTEXT;
}
context = ctx->k5_context;
code = krb5_timeofday(context, &now);
if (code != 0) {
- *minor_status = code;
- save_error_info(*minor_status, context);
- return GSS_S_FAILURE;
+ *minor_status = code;
+ save_error_info(*minor_status, context);
+ return GSS_S_FAILURE;
}
if (conf_req_flag && kg_integ_only_iov(iov, iov_count)) {
- /* may be more sensible to return an error here */
- conf_req_flag = FALSE;
+ /* may be more sensible to return an error here */
+ conf_req_flag = FALSE;
}
switch (ctx->proto) {
case 0:
- code = make_seal_token_v1_iov(context, ctx, conf_req_flag,
- conf_state, iov, iov_count, toktype);
- break;
+ code = make_seal_token_v1_iov(context, ctx, conf_req_flag,
+ conf_state, iov, iov_count, toktype);
+ break;
case 1:
- code = gss_krb5int_make_seal_token_v3_iov(context, ctx, conf_req_flag,
- conf_state, iov, iov_count, toktype);
- break;
+ code = gss_krb5int_make_seal_token_v3_iov(context, ctx, conf_req_flag,
+ conf_state, iov, iov_count, toktype);
+ break;
default:
- code = G_UNKNOWN_QOP;
- break;
+ code = G_UNKNOWN_QOP;
+ break;
}
if (code != 0) {
- *minor_status = code;
- save_error_info(*minor_status, context);
- return GSS_S_FAILURE;
+ *minor_status = code;
+ save_error_info(*minor_status, context);
+ return GSS_S_FAILURE;
}
*minor_status = 0;
return (ctx->krb_times.endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
}
-#define INIT_IOV_DATA(_iov) do { (_iov)->buffer.value = NULL; \
- (_iov)->buffer.length = 0; } \
- while (0)
+#define INIT_IOV_DATA(_iov) do { (_iov)->buffer.value = NULL; \
+ (_iov)->buffer.length = 0; } \
+ while (0)
OM_uint32
kg_seal_iov_length(OM_uint32 *minor_status,
- gss_ctx_id_t context_handle,
- int conf_req_flag,
- gss_qop_t qop_req,
- int *conf_state,
- gss_iov_buffer_desc *iov,
- int iov_count)
+ gss_ctx_id_t context_handle,
+ int conf_req_flag,
+ gss_qop_t qop_req,
+ int *conf_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
{
krb5_gss_ctx_id_rec *ctx;
gss_iov_buffer_t header, trailer, padding;
int dce_style;
if (qop_req != GSS_C_QOP_DEFAULT) {
- *minor_status = (OM_uint32)G_UNKNOWN_QOP;
- return GSS_S_FAILURE;
+ *minor_status = (OM_uint32)G_UNKNOWN_QOP;
+ return GSS_S_FAILURE;
}
if (!kg_validate_ctx_id(context_handle)) {
- *minor_status = (OM_uint32)G_VALIDATE_FAILED;
- return GSS_S_NO_CONTEXT;
+ *minor_status = (OM_uint32)G_VALIDATE_FAILED;
+ return GSS_S_NO_CONTEXT;
}
ctx = (krb5_gss_ctx_id_rec *)context_handle;
if (!ctx->established) {
- *minor_status = KG_CTX_INCOMPLETE;
- return GSS_S_NO_CONTEXT;
+ *minor_status = KG_CTX_INCOMPLETE;
+ return GSS_S_NO_CONTEXT;
}
header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
if (header == NULL) {
- *minor_status = EINVAL;
- return GSS_S_FAILURE;
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
}
INIT_IOV_DATA(header);
trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
if (trailer != NULL) {
- INIT_IOV_DATA(trailer);
+ INIT_IOV_DATA(trailer);
}
dce_style = ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0);
/* For CFX, EC is used instead of padding, and is placed in header or trailer */
padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
if (padding == NULL) {
- if (conf_req_flag && ctx->proto == 0 && !dce_style) {
- *minor_status = EINVAL;
- return GSS_S_FAILURE;
- }
+ if (conf_req_flag && ctx->proto == 0 && !dce_style) {
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
+ }
} else {
- INIT_IOV_DATA(padding);
+ INIT_IOV_DATA(padding);
}
kg_iov_msglen(iov, iov_count, &data_length, &assoc_data_length);
if (conf_req_flag && kg_integ_only_iov(iov, iov_count))
- conf_req_flag = FALSE;
+ conf_req_flag = FALSE;
context = ctx->k5_context;
gss_headerlen = gss_padlen = gss_trailerlen = 0;
if (ctx->proto == 1) {
- krb5_enctype enctype;
- size_t ec;
-
- if (ctx->have_acceptor_subkey)
- enctype = ctx->acceptor_subkey->enctype;
- else
- enctype = ctx->subkey->enctype;
-
- code = krb5_c_crypto_length(context, enctype,
- conf_req_flag ?
- KRB5_CRYPTO_TYPE_TRAILER : KRB5_CRYPTO_TYPE_CHECKSUM,
- &k5_trailerlen);
- if (code != 0) {
- *minor_status = code;
- return GSS_S_FAILURE;
- }
-
- if (conf_req_flag) {
- code = krb5_c_crypto_length(context, enctype, KRB5_CRYPTO_TYPE_HEADER, &k5_headerlen);
- if (code != 0) {
- *minor_status = code;
- return GSS_S_FAILURE;
- }
- }
-
- gss_headerlen = 16; /* Header */
- if (conf_req_flag) {
- gss_headerlen += k5_headerlen; /* Kerb-Header */
- gss_trailerlen = 16 /* E(Header) */ + k5_trailerlen; /* Kerb-Trailer */
-
- code = krb5_c_padding_length(context, enctype,
- data_length - assoc_data_length + 16 /* E(Header) */, &k5_padlen);
- if (code != 0) {
- *minor_status = code;
- return GSS_S_FAILURE;
- }
-
- if (k5_padlen == 0 && dce_style) {
- /* Windows rejects AEAD tokens with non-zero EC */
- code = krb5_c_block_size(context, enctype, &ec);
- if (code != 0) {
- *minor_status = code;
- return GSS_S_FAILURE;
- }
- } else
- ec = k5_padlen;
-
- gss_trailerlen += ec;
- } else {
- gss_trailerlen = k5_trailerlen; /* Kerb-Checksum */
- }
+ krb5_enctype enctype;
+ size_t ec;
+
+ if (ctx->have_acceptor_subkey)
+ enctype = ctx->acceptor_subkey->enctype;
+ else
+ enctype = ctx->subkey->enctype;
+
+ code = krb5_c_crypto_length(context, enctype,
+ conf_req_flag ?
+ KRB5_CRYPTO_TYPE_TRAILER : KRB5_CRYPTO_TYPE_CHECKSUM,
+ &k5_trailerlen);
+ if (code != 0) {
+ *minor_status = code;
+ return GSS_S_FAILURE;
+ }
+
+ if (conf_req_flag) {
+ code = krb5_c_crypto_length(context, enctype, KRB5_CRYPTO_TYPE_HEADER, &k5_headerlen);
+ if (code != 0) {
+ *minor_status = code;
+ return GSS_S_FAILURE;
+ }
+ }
+
+ gss_headerlen = 16; /* Header */
+ if (conf_req_flag) {
+ gss_headerlen += k5_headerlen; /* Kerb-Header */
+ gss_trailerlen = 16 /* E(Header) */ + k5_trailerlen; /* Kerb-Trailer */
+
+ code = krb5_c_padding_length(context, enctype,
+ data_length - assoc_data_length + 16 /* E(Header) */, &k5_padlen);
+ if (code != 0) {
+ *minor_status = code;
+ return GSS_S_FAILURE;
+ }
+
+ if (k5_padlen == 0 && dce_style) {
+ /* Windows rejects AEAD tokens with non-zero EC */
+ code = krb5_c_block_size(context, enctype, &ec);
+ if (code != 0) {
+ *minor_status = code;
+ return GSS_S_FAILURE;
+ }
+ } else
+ ec = k5_padlen;
+
+ gss_trailerlen += ec;
+ } else {
+ gss_trailerlen = k5_trailerlen; /* Kerb-Checksum */
+ }
} else if (!dce_style) {
- k5_padlen = (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) ? 1 : 8;
+ k5_padlen = (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) ? 1 : 8;
- if (k5_padlen == 1)
- gss_padlen = 1;
- else
- gss_padlen = k5_padlen - ((data_length - assoc_data_length) % k5_padlen);
+ if (k5_padlen == 1)
+ gss_padlen = 1;
+ else
+ gss_padlen = k5_padlen - ((data_length - assoc_data_length) % k5_padlen);
}
data_length += gss_padlen;
if (ctx->proto == 0) {
- /* Header | Checksum | Confounder | Data | Pad */
- size_t data_size;
+ /* Header | Checksum | Confounder | Data | Pad */
+ size_t data_size;
- k5_headerlen = kg_confounder_size(context, ctx->enc);
+ k5_headerlen = kg_confounder_size(context, ctx->enc);
- data_size = 14 /* Header */ + ctx->cksum_size + k5_headerlen;
+ data_size = 14 /* Header */ + ctx->cksum_size + k5_headerlen;
- if (!dce_style)
- data_size += data_length;
+ if (!dce_style)
+ data_size += data_length;
- gss_headerlen = g_token_size(ctx->mech_used, data_size);
+ gss_headerlen = g_token_size(ctx->mech_used, data_size);
- /* g_token_size() will include data_size as well as the overhead, so
- * subtract data_length just to get the overhead (ie. token size) */
- if (!dce_style)
- gss_headerlen -= data_length;
+ /* g_token_size() will include data_size as well as the overhead, so
+ * subtract data_length just to get the overhead (ie. token size) */
+ if (!dce_style)
+ gss_headerlen -= data_length;
}
if (minor_status != NULL)
- *minor_status = 0;
+ *minor_status = 0;
if (trailer == NULL)
- gss_headerlen += gss_trailerlen;
+ gss_headerlen += gss_trailerlen;
else
- trailer->buffer.length = gss_trailerlen;
+ trailer->buffer.length = gss_trailerlen;
assert(gss_padlen == 0 || padding != NULL);
if (padding != NULL)
- padding->buffer.length = gss_padlen;
+ padding->buffer.length = gss_padlen;
header->buffer.length = gss_headerlen;
if (conf_state != NULL)
- *conf_state = conf_req_flag;
+ *conf_state = conf_req_flag;
return GSS_S_COMPLETE;
}
: KG_USAGE_ACCEPTOR_SIGN));
if (ctx->have_acceptor_subkey) {
key = ctx->acceptor_subkey;
- cksumtype = ctx->acceptor_subkey_cksumtype;
+ cksumtype = ctx->acceptor_subkey_cksumtype;
} else {
key = ctx->subkey;
- cksumtype = ctx->cksumtype;
+ cksumtype = ctx->cksumtype;
}
assert(key != NULL);
#endif
} else if (toktype == KG_TOK_WRAP_MSG && !conf_req_flag) {
krb5_data plain;
- size_t cksumsize;
+ size_t cksumsize;
/* Here, message is the application-supplied data; message2 is
what goes into the output token. They may be the same, or
if (plain.data == NULL)
return ENOMEM;
- err = krb5_c_checksum_length(context, cksumtype, &cksumsize);
- if (err)
- goto error;
+ err = krb5_c_checksum_length(context, cksumtype, &cksumsize);
+ if (err)
+ goto error;
- assert(cksumsize <= 0xffff);
+ assert(cksumsize <= 0xffff);
bufsize = 16 + message2->length + cksumsize;
outbuf = malloc(bufsize);
krb5_cksumtype cksumtype;
if (ctx->big_endian != 0)
- goto defective;
+ goto defective;
if (qop_state)
*qop_state = GSS_C_QOP_DEFAULT;
value in that case, though, so we can just ignore the flag. */
if (ctx->have_acceptor_subkey && (ptr[2] & FLAG_ACCEPTOR_SUBKEY)) {
key = ctx->acceptor_subkey;
- cksumtype = ctx->acceptor_subkey_cksumtype;
+ cksumtype = ctx->acceptor_subkey_cksumtype;
} else {
key = ctx->subkey;
- cksumtype = ctx->cksumtype;
+ cksumtype = ctx->cksumtype;
}
assert(key != NULL);
message_buffer->value = NULL;
}
} else {
- size_t cksumsize;
+ size_t cksumsize;
- err = krb5_c_checksum_length(context, cksumtype, &cksumsize);
- if (err)
- goto error;
+ err = krb5_c_checksum_length(context, cksumtype, &cksumsize);
+ if (err)
+ goto error;
/* no confidentiality */
if (conf_state)
*/
#include <assert.h>
-#include "k5-platform.h" /* for 64-bit support */
-#include "k5-int.h" /* for zap() */
+#include "k5-platform.h" /* for 64-bit support */
+#include "k5-int.h" /* for zap() */
#include "gssapiP_krb5.h"
#include <stdarg.h>
krb5_error_code
gss_krb5int_make_seal_token_v3_iov(krb5_context context,
- krb5_gss_ctx_id_rec *ctx,
- int conf_req_flag,
- int *conf_state,
- gss_iov_buffer_desc *iov,
- int iov_count,
- int toktype)
+ krb5_gss_ctx_id_rec *ctx,
+ int conf_req_flag,
+ int *conf_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ int toktype)
{
krb5_error_code code = 0;
gss_iov_buffer_t header;
acceptor_flag = ctx->initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
key_usage = (toktype == KG_TOK_WRAP_MSG
- ? (ctx->initiate
- ? KG_USAGE_INITIATOR_SEAL
- : KG_USAGE_ACCEPTOR_SEAL)
- : (ctx->initiate
- ? KG_USAGE_INITIATOR_SIGN
- : KG_USAGE_ACCEPTOR_SIGN));
+ ? (ctx->initiate
+ ? KG_USAGE_INITIATOR_SEAL
+ : KG_USAGE_ACCEPTOR_SEAL)
+ : (ctx->initiate
+ ? KG_USAGE_INITIATOR_SIGN
+ : KG_USAGE_ACCEPTOR_SIGN));
if (ctx->have_acceptor_subkey) {
- key = ctx->acceptor_subkey;
- cksumtype = ctx->acceptor_subkey_cksumtype;
+ key = ctx->acceptor_subkey;
+ cksumtype = ctx->acceptor_subkey_cksumtype;
} else {
- key = ctx->subkey;
- cksumtype = ctx->cksumtype;
+ key = ctx->subkey;
+ cksumtype = ctx->cksumtype;
}
assert(key != NULL);
assert(cksumtype != 0);
header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
if (header == NULL)
- return EINVAL;
+ return EINVAL;
padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
if (padding != NULL)
- padding->buffer.length = 0;
+ padding->buffer.length = 0;
trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
outbuf = (unsigned char *)header->buffer.value;
if (toktype == KG_TOK_WRAP_MSG && conf_req_flag) {
- unsigned int k5_headerlen, k5_trailerlen, k5_padlen;
- size_t ec = 0;
- size_t conf_data_length = data_length - assoc_data_length;
-
- code = krb5_c_crypto_length(context, key->enctype, KRB5_CRYPTO_TYPE_HEADER, &k5_headerlen);
- if (code != 0)
- goto cleanup;
-
- code = krb5_c_padding_length(context, key->enctype,
- conf_data_length + 16 /* E(Header) */, &k5_padlen);
- if (code != 0)
- goto cleanup;
-
- if (k5_padlen == 0 && (ctx->gss_flags & GSS_C_DCE_STYLE)) {
- /* Windows rejects AEAD tokens with non-zero EC */
- code = krb5_c_block_size(context, key->enctype, &ec);
- if (code != 0)
- goto cleanup;
- } else
- ec = k5_padlen;
-
- code = krb5_c_crypto_length(context, key->enctype, KRB5_CRYPTO_TYPE_TRAILER, &k5_trailerlen);
- if (code != 0)
- goto cleanup;
-
- gss_headerlen = 16 /* Header */ + k5_headerlen;
- gss_trailerlen = ec + 16 /* E(Header) */ + k5_trailerlen;
-
- if (trailer == NULL) {
- rrc = gss_trailerlen;
- /* Workaround for Windows bug where it rotates by EC + RRC */
- if (ctx->gss_flags & GSS_C_DCE_STYLE)
- rrc -= ec;
- gss_headerlen += gss_trailerlen;
- }
-
- if (header->type & GSS_IOV_BUFFER_FLAG_ALLOCATE)
- code = kg_allocate_iov(header, gss_headerlen);
- else if (header->buffer.length < gss_headerlen)
- code = KRB5_BAD_MSIZE;
- if (code != 0)
- goto cleanup;
- header->buffer.length = gss_headerlen;
-
- if (trailer != NULL) {
- if (trailer->type & GSS_IOV_BUFFER_FLAG_ALLOCATE)
- code = kg_allocate_iov(trailer, gss_trailerlen);
- else if (trailer->buffer.length < gss_trailerlen)
- code = KRB5_BAD_MSIZE;
- if (code != 0)
- goto cleanup;
- trailer->buffer.length = gss_trailerlen;
- }
-
- /* TOK_ID */
- store_16_be(KG2_TOK_WRAP_MSG, outbuf);
- /* flags */
- outbuf[2] = (acceptor_flag
- | (conf_req_flag ? FLAG_WRAP_CONFIDENTIAL : 0)
- | (ctx->have_acceptor_subkey ? FLAG_ACCEPTOR_SUBKEY : 0));
- /* filler */
- outbuf[3] = 0xFF;
- /* EC */
- store_16_be(ec, outbuf + 4);
- /* RRC */
- store_16_be(0, outbuf + 6);
- store_64_be(ctx->seq_send, outbuf + 8);
-
- /* EC | copy of header to be encrypted, located in (possibly rotated) trailer */
- if (trailer == NULL)
- tbuf = (unsigned char *)header->buffer.value + 16; /* Header */
- else
- tbuf = (unsigned char *)trailer->buffer.value;
-
- memset(tbuf, 0xFF, ec);
- memcpy(tbuf + ec, header->buffer.value, 16);
-
- code = kg_encrypt_iov(context, ctx->proto,
- ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0),
- ec, rrc, key, key_usage, 0, iov, iov_count);
- if (code != 0)
- goto cleanup;
-
- /* RRC */
- store_16_be(rrc, outbuf + 6);
-
- ctx->seq_send++;
+ unsigned int k5_headerlen, k5_trailerlen, k5_padlen;
+ size_t ec = 0;
+ size_t conf_data_length = data_length - assoc_data_length;
+
+ code = krb5_c_crypto_length(context, key->enctype, KRB5_CRYPTO_TYPE_HEADER, &k5_headerlen);
+ if (code != 0)
+ goto cleanup;
+
+ code = krb5_c_padding_length(context, key->enctype,
+ conf_data_length + 16 /* E(Header) */, &k5_padlen);
+ if (code != 0)
+ goto cleanup;
+
+ if (k5_padlen == 0 && (ctx->gss_flags & GSS_C_DCE_STYLE)) {
+ /* Windows rejects AEAD tokens with non-zero EC */
+ code = krb5_c_block_size(context, key->enctype, &ec);
+ if (code != 0)
+ goto cleanup;
+ } else
+ ec = k5_padlen;
+
+ code = krb5_c_crypto_length(context, key->enctype, KRB5_CRYPTO_TYPE_TRAILER, &k5_trailerlen);
+ if (code != 0)
+ goto cleanup;
+
+ gss_headerlen = 16 /* Header */ + k5_headerlen;
+ gss_trailerlen = ec + 16 /* E(Header) */ + k5_trailerlen;
+
+ if (trailer == NULL) {
+ rrc = gss_trailerlen;
+ /* Workaround for Windows bug where it rotates by EC + RRC */
+ if (ctx->gss_flags & GSS_C_DCE_STYLE)
+ rrc -= ec;
+ gss_headerlen += gss_trailerlen;
+ }
+
+ if (header->type & GSS_IOV_BUFFER_FLAG_ALLOCATE)
+ code = kg_allocate_iov(header, gss_headerlen);
+ else if (header->buffer.length < gss_headerlen)
+ code = KRB5_BAD_MSIZE;
+ if (code != 0)
+ goto cleanup;
+ header->buffer.length = gss_headerlen;
+
+ if (trailer != NULL) {
+ if (trailer->type & GSS_IOV_BUFFER_FLAG_ALLOCATE)
+ code = kg_allocate_iov(trailer, gss_trailerlen);
+ else if (trailer->buffer.length < gss_trailerlen)
+ code = KRB5_BAD_MSIZE;
+ if (code != 0)
+ goto cleanup;
+ trailer->buffer.length = gss_trailerlen;
+ }
+
+ /* TOK_ID */
+ store_16_be(KG2_TOK_WRAP_MSG, outbuf);
+ /* flags */
+ outbuf[2] = (acceptor_flag
+ | (conf_req_flag ? FLAG_WRAP_CONFIDENTIAL : 0)
+ | (ctx->have_acceptor_subkey ? FLAG_ACCEPTOR_SUBKEY : 0));
+ /* filler */
+ outbuf[3] = 0xFF;
+ /* EC */
+ store_16_be(ec, outbuf + 4);
+ /* RRC */
+ store_16_be(0, outbuf + 6);
+ store_64_be(ctx->seq_send, outbuf + 8);
+
+ /* EC | copy of header to be encrypted, located in (possibly rotated) trailer */
+ if (trailer == NULL)
+ tbuf = (unsigned char *)header->buffer.value + 16; /* Header */
+ else
+ tbuf = (unsigned char *)trailer->buffer.value;
+
+ memset(tbuf, 0xFF, ec);
+ memcpy(tbuf + ec, header->buffer.value, 16);
+
+ code = kg_encrypt_iov(context, ctx->proto,
+ ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0),
+ ec, rrc, key, key_usage, 0, iov, iov_count);
+ if (code != 0)
+ goto cleanup;
+
+ /* RRC */
+ store_16_be(rrc, outbuf + 6);
+
+ ctx->seq_send++;
} else if (toktype == KG_TOK_WRAP_MSG && !conf_req_flag) {
- tok_id = KG2_TOK_WRAP_MSG;
+ tok_id = KG2_TOK_WRAP_MSG;
wrap_with_checksum:
- gss_headerlen = 16;
-
- code = krb5_c_crypto_length(context, key->enctype, KRB5_CRYPTO_TYPE_CHECKSUM, &gss_trailerlen);
- if (code != 0)
- goto cleanup;
-
- assert(gss_trailerlen <= 0xFFFF);
-
- if (trailer == NULL) {
- rrc = gss_trailerlen;
- gss_headerlen += gss_trailerlen;
- }
-
- if (header->type & GSS_IOV_BUFFER_FLAG_ALLOCATE)
- code = kg_allocate_iov(header, gss_headerlen);
- else if (header->buffer.length < gss_headerlen)
- code = KRB5_BAD_MSIZE;
- if (code != 0)
- goto cleanup;
- header->buffer.length = gss_headerlen;
-
- if (trailer != NULL) {
- if (trailer->type & GSS_IOV_BUFFER_FLAG_ALLOCATE)
- code = kg_allocate_iov(trailer, gss_trailerlen);
- else if (trailer->buffer.length < gss_trailerlen)
- code = KRB5_BAD_MSIZE;
- if (code != 0)
- goto cleanup;
- trailer->buffer.length = gss_trailerlen;
- }
-
- /* TOK_ID */
- store_16_be(tok_id, outbuf);
- /* flags */
- outbuf[2] = (acceptor_flag
- | (ctx->have_acceptor_subkey ? FLAG_ACCEPTOR_SUBKEY : 0));
- /* filler */
- outbuf[3] = 0xFF;
- if (toktype == KG_TOK_WRAP_MSG) {
- /* Use 0 for checksum calculation, substitute
- * checksum length later.
- */
- /* EC */
- store_16_be(0, outbuf + 4);
- /* RRC */
- store_16_be(0, outbuf + 6);
- } else {
- /* MIC and DEL store 0xFF in EC and RRC */
- store_16_be(0xFFFF, outbuf + 4);
- store_16_be(0xFFFF, outbuf + 6);
- }
- store_64_be(ctx->seq_send, outbuf + 8);
-
- code = kg_make_checksum_iov_v3(context, cksumtype,
- rrc, key, key_usage,
- iov, iov_count);
- if (code != 0)
- goto cleanup;
-
- ctx->seq_send++;
-
- if (toktype == KG_TOK_WRAP_MSG) {
- /* Fix up EC field */
- store_16_be(gss_trailerlen, outbuf + 4);
- /* Fix up RRC field */
- store_16_be(rrc, outbuf + 6);
- }
+ gss_headerlen = 16;
+
+ code = krb5_c_crypto_length(context, key->enctype, KRB5_CRYPTO_TYPE_CHECKSUM, &gss_trailerlen);
+ if (code != 0)
+ goto cleanup;
+
+ assert(gss_trailerlen <= 0xFFFF);
+
+ if (trailer == NULL) {
+ rrc = gss_trailerlen;
+ gss_headerlen += gss_trailerlen;
+ }
+
+ if (header->type & GSS_IOV_BUFFER_FLAG_ALLOCATE)
+ code = kg_allocate_iov(header, gss_headerlen);
+ else if (header->buffer.length < gss_headerlen)
+ code = KRB5_BAD_MSIZE;
+ if (code != 0)
+ goto cleanup;
+ header->buffer.length = gss_headerlen;
+
+ if (trailer != NULL) {
+ if (trailer->type & GSS_IOV_BUFFER_FLAG_ALLOCATE)
+ code = kg_allocate_iov(trailer, gss_trailerlen);
+ else if (trailer->buffer.length < gss_trailerlen)
+ code = KRB5_BAD_MSIZE;
+ if (code != 0)
+ goto cleanup;
+ trailer->buffer.length = gss_trailerlen;
+ }
+
+ /* TOK_ID */
+ store_16_be(tok_id, outbuf);
+ /* flags */
+ outbuf[2] = (acceptor_flag
+ | (ctx->have_acceptor_subkey ? FLAG_ACCEPTOR_SUBKEY : 0));
+ /* filler */
+ outbuf[3] = 0xFF;
+ if (toktype == KG_TOK_WRAP_MSG) {
+ /* Use 0 for checksum calculation, substitute
+ * checksum length later.
+ */
+ /* EC */
+ store_16_be(0, outbuf + 4);
+ /* RRC */
+ store_16_be(0, outbuf + 6);
+ } else {
+ /* MIC and DEL store 0xFF in EC and RRC */
+ store_16_be(0xFFFF, outbuf + 4);
+ store_16_be(0xFFFF, outbuf + 6);
+ }
+ store_64_be(ctx->seq_send, outbuf + 8);
+
+ code = kg_make_checksum_iov_v3(context, cksumtype,
+ rrc, key, key_usage,
+ iov, iov_count);
+ if (code != 0)
+ goto cleanup;
+
+ ctx->seq_send++;
+
+ if (toktype == KG_TOK_WRAP_MSG) {
+ /* Fix up EC field */
+ store_16_be(gss_trailerlen, outbuf + 4);
+ /* Fix up RRC field */
+ store_16_be(rrc, outbuf + 6);
+ }
} else if (toktype == KG_TOK_MIC_MSG) {
- tok_id = KG2_TOK_MIC_MSG;
- trailer = NULL;
- goto wrap_with_checksum;
+ tok_id = KG2_TOK_MIC_MSG;
+ trailer = NULL;
+ goto wrap_with_checksum;
} else if (toktype == KG_TOK_DEL_CTX) {
- tok_id = KG2_TOK_DEL_CTX;
- goto wrap_with_checksum;
+ tok_id = KG2_TOK_DEL_CTX;
+ goto wrap_with_checksum;
} else {
- abort();
+ abort();
}
code = 0;
cleanup:
if (code != 0)
- kg_release_iov(iov, iov_count);
+ kg_release_iov(iov, iov_count);
return code;
}
OM_uint32
gss_krb5int_unseal_v3_iov(krb5_context context,
- OM_uint32 *minor_status,
- krb5_gss_ctx_id_rec *ctx,
- gss_iov_buffer_desc *iov,
- int iov_count,
- int *conf_state,
- gss_qop_t *qop_state,
- int toktype)
+ OM_uint32 *minor_status,
+ krb5_gss_ctx_id_rec *ctx,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ int *conf_state,
+ gss_qop_t *qop_state,
+ int toktype)
{
OM_uint32 code;
gss_iov_buffer_t header;
int conf_flag = 0;
if (ctx->big_endian != 0)
- return GSS_S_DEFECTIVE_TOKEN;
+ return GSS_S_DEFECTIVE_TOKEN;
if (qop_state != NULL)
- *qop_state = GSS_C_QOP_DEFAULT;
+ *qop_state = GSS_C_QOP_DEFAULT;
header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
assert(header != NULL);
padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
if (padding != NULL && padding->buffer.length != 0)
- return GSS_S_DEFECTIVE_TOKEN;
+ return GSS_S_DEFECTIVE_TOKEN;
trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
acceptor_flag = ctx->initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
key_usage = (toktype == KG_TOK_WRAP_MSG
- ? (!ctx->initiate
- ? KG_USAGE_INITIATOR_SEAL
- : KG_USAGE_ACCEPTOR_SEAL)
- : (!ctx->initiate
- ? KG_USAGE_INITIATOR_SIGN
- : KG_USAGE_ACCEPTOR_SIGN));
+ ? (!ctx->initiate
+ ? KG_USAGE_INITIATOR_SEAL
+ : KG_USAGE_ACCEPTOR_SEAL)
+ : (!ctx->initiate
+ ? KG_USAGE_INITIATOR_SIGN
+ : KG_USAGE_ACCEPTOR_SIGN));
kg_iov_msglen(iov, iov_count, &data_length, &assoc_data_length);
ptr = (unsigned char *)header->buffer.value;
if (header->buffer.length < 16) {
- *minor_status = 0;
- return GSS_S_DEFECTIVE_TOKEN;
+ *minor_status = 0;
+ return GSS_S_DEFECTIVE_TOKEN;
}
if ((ptr[2] & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
- *minor_status = (OM_uint32)G_BAD_DIRECTION;
- return GSS_S_BAD_SIG;
+ *minor_status = (OM_uint32)G_BAD_DIRECTION;
+ return GSS_S_BAD_SIG;
}
if (ctx->have_acceptor_subkey && (ptr[2] & FLAG_ACCEPTOR_SUBKEY)) {
- key = ctx->acceptor_subkey;
- cksumtype = ctx->acceptor_subkey_cksumtype;
+ key = ctx->acceptor_subkey;
+ cksumtype = ctx->acceptor_subkey_cksumtype;
} else {
- key = ctx->subkey;
- cksumtype = ctx->cksumtype;
+ key = ctx->subkey;
+ cksumtype = ctx->cksumtype;
}
assert(key != NULL);
if (toktype == KG_TOK_WRAP_MSG) {
- unsigned int k5_trailerlen;
-
- if (load_16_be(ptr) != KG2_TOK_WRAP_MSG)
- goto defective;
- conf_flag = ((ptr[2] & FLAG_WRAP_CONFIDENTIAL) != 0);
- if (ptr[3] != 0xFF)
- goto defective;
- ec = load_16_be(ptr + 4);
- rrc = load_16_be(ptr + 6);
- seqnum = load_64_be(ptr + 8);
-
- code = krb5_c_crypto_length(context, key->enctype,
- conf_flag ? KRB5_CRYPTO_TYPE_TRAILER :
- KRB5_CRYPTO_TYPE_CHECKSUM,
- &k5_trailerlen);
- if (code != 0) {
- *minor_status = code;
- return GSS_S_FAILURE;
- }
-
- /* Deal with RRC */
- if (trailer == NULL) {
- size_t desired_rrc = k5_trailerlen;
-
- if (conf_flag) {
- desired_rrc += 16; /* E(Header) */
-
- if ((ctx->gss_flags & GSS_C_DCE_STYLE) == 0)
- desired_rrc += ec;
- }
-
- /* According to MS, we only need to deal with a fixed RRC for DCE */
- if (rrc != desired_rrc)
- goto defective;
- } else if (rrc != 0) {
- /* Should have been rotated by kg_unseal_stream_iov() */
- goto defective;
- }
-
- if (conf_flag) {
- unsigned char *althdr;
-
- /* Decrypt */
- code = kg_decrypt_iov(context, ctx->proto,
- ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0),
- ec, rrc,
- key, key_usage, 0, iov, iov_count);
- if (code != 0) {
- *minor_status = code;
- return GSS_S_BAD_SIG;
- }
-
- /* Validate header integrity */
- if (trailer == NULL)
- althdr = (unsigned char *)header->buffer.value + 16 + ec;
- else
- althdr = (unsigned char *)trailer->buffer.value + ec;
-
- if (load_16_be(althdr) != KG2_TOK_WRAP_MSG
- || althdr[2] != ptr[2]
- || althdr[3] != ptr[3]
- || memcmp(althdr + 8, ptr + 8, 8) != 0) {
- *minor_status = 0;
- return GSS_S_BAD_SIG;
- }
- } else {
- /* Verify checksum: note EC is checksum size here, not padding */
- if (ec != k5_trailerlen)
- goto defective;
-
- /* Zero EC, RRC before computing checksum */
- store_16_be(0, ptr + 4);
- store_16_be(0, ptr + 6);
-
- code = kg_verify_checksum_iov_v3(context, cksumtype, rrc,
- key, key_usage,
- iov, iov_count, &valid);
- if (code != 0 || valid == FALSE) {
- *minor_status = code;
- return GSS_S_BAD_SIG;
- }
- }
-
- code = g_order_check(&ctx->seqstate, seqnum);
+ unsigned int k5_trailerlen;
+
+ if (load_16_be(ptr) != KG2_TOK_WRAP_MSG)
+ goto defective;
+ conf_flag = ((ptr[2] & FLAG_WRAP_CONFIDENTIAL) != 0);
+ if (ptr[3] != 0xFF)
+ goto defective;
+ ec = load_16_be(ptr + 4);
+ rrc = load_16_be(ptr + 6);
+ seqnum = load_64_be(ptr + 8);
+
+ code = krb5_c_crypto_length(context, key->enctype,
+ conf_flag ? KRB5_CRYPTO_TYPE_TRAILER :
+ KRB5_CRYPTO_TYPE_CHECKSUM,
+ &k5_trailerlen);
+ if (code != 0) {
+ *minor_status = code;
+ return GSS_S_FAILURE;
+ }
+
+ /* Deal with RRC */
+ if (trailer == NULL) {
+ size_t desired_rrc = k5_trailerlen;
+
+ if (conf_flag) {
+ desired_rrc += 16; /* E(Header) */
+
+ if ((ctx->gss_flags & GSS_C_DCE_STYLE) == 0)
+ desired_rrc += ec;
+ }
+
+ /* According to MS, we only need to deal with a fixed RRC for DCE */
+ if (rrc != desired_rrc)
+ goto defective;
+ } else if (rrc != 0) {
+ /* Should have been rotated by kg_unseal_stream_iov() */
+ goto defective;
+ }
+
+ if (conf_flag) {
+ unsigned char *althdr;
+
+ /* Decrypt */
+ code = kg_decrypt_iov(context, ctx->proto,
+ ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0),
+ ec, rrc,
+ key, key_usage, 0, iov, iov_count);
+ if (code != 0) {
+ *minor_status = code;
+ return GSS_S_BAD_SIG;
+ }
+
+ /* Validate header integrity */
+ if (trailer == NULL)
+ althdr = (unsigned char *)header->buffer.value + 16 + ec;
+ else
+ althdr = (unsigned char *)trailer->buffer.value + ec;
+
+ if (load_16_be(althdr) != KG2_TOK_WRAP_MSG
+ || althdr[2] != ptr[2]
+ || althdr[3] != ptr[3]
+ || memcmp(althdr + 8, ptr + 8, 8) != 0) {
+ *minor_status = 0;
+ return GSS_S_BAD_SIG;
+ }
+ } else {
+ /* Verify checksum: note EC is checksum size here, not padding */
+ if (ec != k5_trailerlen)
+ goto defective;
+
+ /* Zero EC, RRC before computing checksum */
+ store_16_be(0, ptr + 4);
+ store_16_be(0, ptr + 6);
+
+ code = kg_verify_checksum_iov_v3(context, cksumtype, rrc,
+ key, key_usage,
+ iov, iov_count, &valid);
+ if (code != 0 || valid == FALSE) {
+ *minor_status = code;
+ return GSS_S_BAD_SIG;
+ }
+ }
+
+ code = g_order_check(&ctx->seqstate, seqnum);
} else if (toktype == KG_TOK_MIC_MSG) {
- if (load_16_be(ptr) != KG2_TOK_MIC_MSG)
- goto defective;
+ if (load_16_be(ptr) != KG2_TOK_MIC_MSG)
+ goto defective;
verify_mic_1:
- if (ptr[3] != 0xFF)
- goto defective;
- seqnum = load_64_be(ptr + 8);
-
- code = kg_verify_checksum_iov_v3(context, cksumtype, 0,
- key, key_usage,
- iov, iov_count, &valid);
- if (code != 0 || valid == FALSE) {
- *minor_status = code;
- return GSS_S_BAD_SIG;
- }
- code = g_order_check(&ctx->seqstate, seqnum);
+ if (ptr[3] != 0xFF)
+ goto defective;
+ seqnum = load_64_be(ptr + 8);
+
+ code = kg_verify_checksum_iov_v3(context, cksumtype, 0,
+ key, key_usage,
+ iov, iov_count, &valid);
+ if (code != 0 || valid == FALSE) {
+ *minor_status = code;
+ return GSS_S_BAD_SIG;
+ }
+ code = g_order_check(&ctx->seqstate, seqnum);
} else if (toktype == KG_TOK_DEL_CTX) {
- if (load_16_be(ptr) != KG2_TOK_DEL_CTX)
- goto defective;
- goto verify_mic_1;
+ if (load_16_be(ptr) != KG2_TOK_DEL_CTX)
+ goto defective;
+ goto verify_mic_1;
} else {
- goto defective;
+ goto defective;
}
*minor_status = 0;
if (conf_state != NULL)
- *conf_state = conf_flag;
+ *conf_state = conf_flag;
return code;
}
if (bodysize < 2) {
- *minor_status = (OM_uint32)G_BAD_TOK_HEADER;
- return GSS_S_DEFECTIVE_TOKEN;
+ *minor_status = (OM_uint32)G_BAD_TOK_HEADER;
+ return GSS_S_DEFECTIVE_TOKEN;
}
toktype2 = load_16_be(ptr);
ret = gss_krb5int_unseal_token_v3(&ctx->k5_context, minor_status, ctx,
ptr, bodysize, message_buffer,
conf_state, qop_state, toktype);
- break;
+ break;
case KG_TOK_MIC_MSG:
case KG_TOK_WRAP_MSG:
case KG_TOK_DEL_CTX:
ret = kg_unseal_v1(ctx->k5_context, minor_status, ctx, ptr, bodysize,
message_buffer, conf_state, qop_state,
toktype);
- break;
+ break;
default:
- *minor_status = (OM_uint32)G_BAD_TOK_HEADER;
- ret = GSS_S_DEFECTIVE_TOKEN;
- break;
+ *minor_status = (OM_uint32)G_BAD_TOK_HEADER;
+ ret = GSS_S_DEFECTIVE_TOKEN;
+ break;
}
if (ret != 0)
*/
#include <assert.h>
-#include "k5-platform.h" /* for 64-bit support */
-#include "k5-int.h" /* for zap() */
+#include "k5-platform.h" /* for 64-bit support */
+#include "k5-int.h" /* for zap() */
#include "gssapiP_krb5.h"
#include <stdarg.h>
static OM_uint32
kg_unseal_v1_iov(krb5_context context,
- OM_uint32 *minor_status,
- krb5_gss_ctx_id_rec *ctx,
- gss_iov_buffer_desc *iov,
- int iov_count,
- size_t token_wrapper_len,
- int *conf_state,
- gss_qop_t *qop_state,
- int toktype)
+ OM_uint32 *minor_status,
+ krb5_gss_ctx_id_rec *ctx,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ size_t token_wrapper_len,
+ int *conf_state,
+ gss_qop_t *qop_state,
+ int toktype)
{
OM_uint32 code;
gss_iov_buffer_t header;
trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
if (trailer != NULL && trailer->buffer.length != 0) {
- *minor_status = (OM_uint32)KRB5_BAD_MSIZE;
- return GSS_S_DEFECTIVE_TOKEN;
+ *minor_status = (OM_uint32)KRB5_BAD_MSIZE;
+ return GSS_S_DEFECTIVE_TOKEN;
}
if (header->buffer.length < token_wrapper_len + 14) {
- *minor_status = 0;
- return GSS_S_DEFECTIVE_TOKEN;
+ *minor_status = 0;
+ return GSS_S_DEFECTIVE_TOKEN;
}
ptr = (unsigned char *)header->buffer.value + token_wrapper_len;
sealalg |= ptr[3] << 8;
if (ptr[4] != 0xFF || ptr[5] != 0xFF) {
- *minor_status = 0;
- return GSS_S_DEFECTIVE_TOKEN;
+ *minor_status = 0;
+ return GSS_S_DEFECTIVE_TOKEN;
}
if (toktype != KG_TOK_WRAP_MSG && sealalg != 0xFFFF) {
- *minor_status = 0;
- return GSS_S_DEFECTIVE_TOKEN;
+ *minor_status = 0;
+ return GSS_S_DEFECTIVE_TOKEN;
}
if (toktype == KG_TOK_WRAP_MSG &&
- !(sealalg == 0xFFFF || sealalg == ctx->sealalg)) {
- *minor_status = 0;
- return GSS_S_DEFECTIVE_TOKEN;
+ !(sealalg == 0xFFFF || sealalg == ctx->sealalg)) {
+ *minor_status = 0;
+ return GSS_S_DEFECTIVE_TOKEN;
}
if ((ctx->sealalg == SEAL_ALG_NONE && signalg > 1) ||
- (ctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) ||
- (ctx->sealalg == SEAL_ALG_DES3KD &&
- signalg != SGN_ALG_HMAC_SHA1_DES3_KD)||
- (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4 &&
- signalg != SGN_ALG_HMAC_MD5)) {
- *minor_status = 0;
- return GSS_S_DEFECTIVE_TOKEN;
+ (ctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) ||
+ (ctx->sealalg == SEAL_ALG_DES3KD &&
+ signalg != SGN_ALG_HMAC_SHA1_DES3_KD)||
+ (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4 &&
+ signalg != SGN_ALG_HMAC_MD5)) {
+ *minor_status = 0;
+ return GSS_S_DEFECTIVE_TOKEN;
}
switch (signalg) {
case SGN_ALG_DES_MAC_MD5:
case SGN_ALG_MD2_5:
case SGN_ALG_HMAC_MD5:
- cksum_len = 8;
- if (toktype != KG_TOK_WRAP_MSG)
- sign_usage = 15;
- break;
+ cksum_len = 8;
+ if (toktype != KG_TOK_WRAP_MSG)
+ sign_usage = 15;
+ break;
case SGN_ALG_3:
- cksum_len = 16;
- break;
+ cksum_len = 16;
+ break;
case SGN_ALG_HMAC_SHA1_DES3_KD:
- cksum_len = 20;
- break;
+ cksum_len = 20;
+ break;
default:
- *minor_status = 0;
- return GSS_S_DEFECTIVE_TOKEN;
+ *minor_status = 0;
+ return GSS_S_DEFECTIVE_TOKEN;
}
/* get the token parameters */
code = kg_get_seq_num(context, ctx->seq, ptr + 14, ptr + 6, &direction,
- &seqnum);
+ &seqnum);
if (code != 0) {
- *minor_status = code;
- return GSS_S_BAD_SIG;
+ *minor_status = code;
+ return GSS_S_BAD_SIG;
}
assert(ctx->big_endian == 0);
/* decode the message, if SEAL */
if (toktype == KG_TOK_WRAP_MSG) {
- if (sealalg != 0xFFFF) {
- if (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) {
- unsigned char bigend_seqnum[4];
- krb5_keyblock *enc_key;
- size_t i;
-
- bigend_seqnum[0] = (seqnum >> 24) & 0xFF;
- bigend_seqnum[1] = (seqnum >> 16) & 0xFF;
- bigend_seqnum[2] = (seqnum >> 8 ) & 0xFF;
- bigend_seqnum[3] = (seqnum ) & 0xFF;
-
- code = krb5_copy_keyblock(context, ctx->enc, &enc_key);
- if (code != 0) {
- retval = GSS_S_FAILURE;
- goto cleanup;
- }
-
- assert(enc_key->length == 16);
-
- for (i = 0; i < enc_key->length; i++)
- ((char *)enc_key->contents)[i] ^= 0xF0;
-
- code = kg_arcfour_docrypt_iov(context, enc_key, 0,
- &bigend_seqnum[0], 4,
- iov, iov_count);
- krb5_free_keyblock(context, enc_key);
- } else {
- code = kg_decrypt_iov(context, 0,
- ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0),
- 0 /*EC*/, 0 /*RRC*/,
- ctx->enc, KG_USAGE_SEAL, NULL,
- iov, iov_count);
- }
- if (code != 0) {
- retval = GSS_S_FAILURE;
- goto cleanup;
- }
- }
- conflen = kg_confounder_size(context, ctx->enc);
+ if (sealalg != 0xFFFF) {
+ if (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) {
+ unsigned char bigend_seqnum[4];
+ krb5_keyblock *enc_key;
+ size_t i;
+
+ bigend_seqnum[0] = (seqnum >> 24) & 0xFF;
+ bigend_seqnum[1] = (seqnum >> 16) & 0xFF;
+ bigend_seqnum[2] = (seqnum >> 8 ) & 0xFF;
+ bigend_seqnum[3] = (seqnum ) & 0xFF;
+
+ code = krb5_copy_keyblock(context, ctx->enc, &enc_key);
+ if (code != 0) {
+ retval = GSS_S_FAILURE;
+ goto cleanup;
+ }
+
+ assert(enc_key->length == 16);
+
+ for (i = 0; i < enc_key->length; i++)
+ ((char *)enc_key->contents)[i] ^= 0xF0;
+
+ code = kg_arcfour_docrypt_iov(context, enc_key, 0,
+ &bigend_seqnum[0], 4,
+ iov, iov_count);
+ krb5_free_keyblock(context, enc_key);
+ } else {
+ code = kg_decrypt_iov(context, 0,
+ ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0),
+ 0 /*EC*/, 0 /*RRC*/,
+ ctx->enc, KG_USAGE_SEAL, NULL,
+ iov, iov_count);
+ }
+ if (code != 0) {
+ retval = GSS_S_FAILURE;
+ goto cleanup;
+ }
+ }
+ conflen = kg_confounder_size(context, ctx->enc);
}
if (header->buffer.length != token_wrapper_len + 14 + cksum_len + conflen) {
- retval = GSS_S_DEFECTIVE_TOKEN;
- goto cleanup;
+ retval = GSS_S_DEFECTIVE_TOKEN;
+ goto cleanup;
}
/* compute the checksum of the message */
case SGN_ALG_MD2_5:
case SGN_ALG_DES_MAC:
case SGN_ALG_3:
- md5cksum.checksum_type = CKSUMTYPE_RSA_MD5;
- break;
+ md5cksum.checksum_type = CKSUMTYPE_RSA_MD5;
+ break;
case SGN_ALG_HMAC_MD5:
- md5cksum.checksum_type = CKSUMTYPE_HMAC_MD5_ARCFOUR;
- break;
+ md5cksum.checksum_type = CKSUMTYPE_HMAC_MD5_ARCFOUR;
+ break;
case SGN_ALG_HMAC_SHA1_DES3_KD:
- md5cksum.checksum_type = CKSUMTYPE_HMAC_SHA1_DES3;
- break;
+ md5cksum.checksum_type = CKSUMTYPE_HMAC_SHA1_DES3;
+ break;
default:
- abort();
+ abort();
}
code = krb5_c_checksum_length(context, md5cksum.checksum_type, &sumlen);
if (code != 0) {
- retval = GSS_S_FAILURE;
- goto cleanup;
+ retval = GSS_S_FAILURE;
+ goto cleanup;
}
md5cksum.length = sumlen;
/* compute the checksum of the message */
code = kg_make_checksum_iov_v1(context, md5cksum.checksum_type,
- cksum_len, ctx->seq, ctx->enc,
- sign_usage, iov, iov_count, toktype,
- &md5cksum);
+ cksum_len, ctx->seq, ctx->enc,
+ sign_usage, iov, iov_count, toktype,
+ &md5cksum);
if (code != 0) {
- retval = GSS_S_FAILURE;
- goto cleanup;
+ retval = GSS_S_FAILURE;
+ goto cleanup;
}
switch (signalg) {
case SGN_ALG_DES_MAC_MD5:
case SGN_ALG_3:
- code = kg_encrypt(context, ctx->seq, KG_USAGE_SEAL,
- (g_OID_equal(ctx->mech_used, gss_mech_krb5_old) ?
- ctx->seq->contents : NULL),
- md5cksum.contents, md5cksum.contents, 16);
- if (code != 0) {
- retval = GSS_S_FAILURE;
- goto cleanup;
- }
-
- cksum.length = cksum_len;
- cksum.contents = md5cksum.contents + 16 - cksum.length;
-
- code = memcmp(cksum.contents, ptr + 14, cksum.length);
- break;
+ code = kg_encrypt(context, ctx->seq, KG_USAGE_SEAL,
+ (g_OID_equal(ctx->mech_used, gss_mech_krb5_old) ?
+ ctx->seq->contents : NULL),
+ md5cksum.contents, md5cksum.contents, 16);
+ if (code != 0) {
+ retval = GSS_S_FAILURE;
+ goto cleanup;
+ }
+
+ cksum.length = cksum_len;
+ cksum.contents = md5cksum.contents + 16 - cksum.length;
+
+ code = memcmp(cksum.contents, ptr + 14, cksum.length);
+ break;
case SGN_ALG_HMAC_SHA1_DES3_KD:
case SGN_ALG_HMAC_MD5:
- code = memcmp(md5cksum.contents, ptr + 14, cksum_len);
- break;
+ code = memcmp(md5cksum.contents, ptr + 14, cksum_len);
+ break;
default:
- code = 0;
- retval = GSS_S_DEFECTIVE_TOKEN;
- goto cleanup;
- break;
+ code = 0;
+ retval = GSS_S_DEFECTIVE_TOKEN;
+ goto cleanup;
+ break;
}
if (code != 0) {
- code = 0;
- retval = GSS_S_BAD_SIG;
- goto cleanup;
+ code = 0;
+ retval = GSS_S_BAD_SIG;
+ goto cleanup;
}
/*
* this and fixup the last data IOV appropriately.
*/
if (toktype == KG_TOK_WRAP_MSG &&
- (ctx->gss_flags & GSS_C_DCE_STYLE) == 0) {
- retval = kg_fixup_padding_iov(&code, iov, iov_count);
- if (retval != GSS_S_COMPLETE)
- goto cleanup;
+ (ctx->gss_flags & GSS_C_DCE_STYLE) == 0) {
+ retval = kg_fixup_padding_iov(&code, iov, iov_count);
+ if (retval != GSS_S_COMPLETE)
+ goto cleanup;
}
if (conf_state != NULL)
- *conf_state = (sealalg != 0xFFFF);
+ *conf_state = (sealalg != 0xFFFF);
if (qop_state != NULL)
- *qop_state = GSS_C_QOP_DEFAULT;
+ *qop_state = GSS_C_QOP_DEFAULT;
code = krb5_timeofday(context, &now);
if (code != 0) {
- *minor_status = code;
- retval = GSS_S_FAILURE;
- goto cleanup;
+ *minor_status = code;
+ retval = GSS_S_FAILURE;
+ goto cleanup;
}
if (now > ctx->krb_times.endtime) {
- *minor_status = 0;
- retval = GSS_S_CONTEXT_EXPIRED;
- goto cleanup;
+ *minor_status = 0;
+ retval = GSS_S_CONTEXT_EXPIRED;
+ goto cleanup;
}
if ((ctx->initiate && direction != 0xff) ||
- (!ctx->initiate && direction != 0)) {
- *minor_status = (OM_uint32)G_BAD_DIRECTION;
- retval = GSS_S_BAD_SIG;
+ (!ctx->initiate && direction != 0)) {
+ *minor_status = (OM_uint32)G_BAD_DIRECTION;
+ retval = GSS_S_BAD_SIG;
}
code = 0;
*/
static OM_uint32
kg_unseal_iov_token(OM_uint32 *minor_status,
- krb5_gss_ctx_id_rec *ctx,
- int *conf_state,
- gss_qop_t *qop_state,
- gss_iov_buffer_desc *iov,
- int iov_count,
- int toktype)
+ krb5_gss_ctx_id_rec *ctx,
+ int *conf_state,
+ gss_qop_t *qop_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ int toktype)
{
krb5_error_code code;
krb5_context context = ctx->k5_context;
header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
if (header == NULL) {
- *minor_status = EINVAL;
- return GSS_S_FAILURE;
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
}
padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
input_length = header->buffer.length;
if ((ctx->gss_flags & GSS_C_DCE_STYLE) == 0) {
- size_t data_length, assoc_data_length;
+ size_t data_length, assoc_data_length;
- kg_iov_msglen(iov, iov_count, &data_length, &assoc_data_length);
+ kg_iov_msglen(iov, iov_count, &data_length, &assoc_data_length);
- input_length += data_length - assoc_data_length;
+ input_length += data_length - assoc_data_length;
- if (padding != NULL)
- input_length += padding->buffer.length;
+ if (padding != NULL)
+ input_length += padding->buffer.length;
- if (trailer != NULL)
- input_length += trailer->buffer.length;
+ if (trailer != NULL)
+ input_length += trailer->buffer.length;
}
if (ctx->gss_flags & GSS_C_DCE_STYLE)
- vfyflags |= G_VFY_TOKEN_HDR_IGNORE_SEQ_SIZE;
+ vfyflags |= G_VFY_TOKEN_HDR_IGNORE_SEQ_SIZE;
code = g_verify_token_header(ctx->mech_used,
- &bodysize, &ptr, -1,
- input_length, 0);
+ &bodysize, &ptr, -1,
+ input_length, 0);
if (code != 0) {
*minor_status = code;
return GSS_S_DEFECTIVE_TOKEN;
}
if (bodysize < 2) {
- *minor_status = (OM_uint32)G_BAD_TOK_HEADER;
- return GSS_S_DEFECTIVE_TOKEN;
+ *minor_status = (OM_uint32)G_BAD_TOK_HEADER;
+ return GSS_S_DEFECTIVE_TOKEN;
}
toktype2 = load_16_be(ptr);
case KG2_TOK_MIC_MSG:
case KG2_TOK_WRAP_MSG:
case KG2_TOK_DEL_CTX:
- code = gss_krb5int_unseal_v3_iov(context, minor_status, ctx, iov, iov_count,
- conf_state, qop_state, toktype);
- break;
+ code = gss_krb5int_unseal_v3_iov(context, minor_status, ctx, iov, iov_count,
+ conf_state, qop_state, toktype);
+ break;
case KG_TOK_MIC_MSG:
case KG_TOK_WRAP_MSG:
case KG_TOK_DEL_CTX:
- code = kg_unseal_v1_iov(context, minor_status, ctx, iov, iov_count,
- (size_t)(ptr - (unsigned char *)header->buffer.value),
- conf_state, qop_state, toktype);
- break;
+ code = kg_unseal_v1_iov(context, minor_status, ctx, iov, iov_count,
+ (size_t)(ptr - (unsigned char *)header->buffer.value),
+ conf_state, qop_state, toktype);
+ break;
default:
- *minor_status = (OM_uint32)G_BAD_TOK_HEADER;
- code = GSS_S_DEFECTIVE_TOKEN;
- break;
+ *minor_status = (OM_uint32)G_BAD_TOK_HEADER;
+ code = GSS_S_DEFECTIVE_TOKEN;
+ break;
}
if (code != 0)
- save_error_info(*minor_status, context);
+ save_error_info(*minor_status, context);
return code;
}
*/
static OM_uint32
kg_unseal_stream_iov(OM_uint32 *minor_status,
- krb5_gss_ctx_id_rec *ctx,
- int *conf_state,
- gss_qop_t *qop_state,
- gss_iov_buffer_desc *iov,
- int iov_count,
- int toktype)
+ krb5_gss_ctx_id_rec *ctx,
+ int *conf_state,
+ gss_qop_t *qop_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ int toktype)
{
unsigned char *ptr;
unsigned int bodysize;
assert(toktype == KG_TOK_WRAP_MSG);
if (toktype != KG_TOK_WRAP_MSG || (ctx->gss_flags & GSS_C_DCE_STYLE)) {
- code = EINVAL;
- goto cleanup;
+ code = EINVAL;
+ goto cleanup;
}
stream = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_STREAM);
ptr = (unsigned char *)stream->buffer.value;
code = g_verify_token_header(ctx->mech_used,
- &bodysize, &ptr, -1,
- stream->buffer.length, 0);
+ &bodysize, &ptr, -1,
+ stream->buffer.length, 0);
if (code != 0) {
- major_status = GSS_S_DEFECTIVE_TOKEN;
- goto cleanup;
+ major_status = GSS_S_DEFECTIVE_TOKEN;
+ goto cleanup;
}
if (bodysize < 2) {
- *minor_status = (OM_uint32)G_BAD_TOK_HEADER;
- return GSS_S_DEFECTIVE_TOKEN;
+ *minor_status = (OM_uint32)G_BAD_TOK_HEADER;
+ return GSS_S_DEFECTIVE_TOKEN;
}
toktype2 = load_16_be(ptr);
tiov = (gss_iov_buffer_desc *)calloc((size_t)iov_count + 2, sizeof(gss_iov_buffer_desc));
if (tiov == NULL) {
- code = ENOMEM;
- goto cleanup;
+ code = ENOMEM;
+ goto cleanup;
}
/* HEADER */
theader->buffer.value = stream->buffer.value;
theader->buffer.length = ptr - (unsigned char *)stream->buffer.value;
if (bodysize < 14 ||
- stream->buffer.length != theader->buffer.length + bodysize) {
- major_status = GSS_S_DEFECTIVE_TOKEN;
- goto cleanup;
+ stream->buffer.length != theader->buffer.length + bodysize) {
+ major_status = GSS_S_DEFECTIVE_TOKEN;
+ goto cleanup;
}
theader->buffer.length += 14;
/* n[SIGN_DATA] | DATA | m[SIGN_DATA] */
for (j = 0; j < iov_count; j++) {
- OM_uint32 type = GSS_IOV_BUFFER_TYPE(iov[j].type);
+ OM_uint32 type = GSS_IOV_BUFFER_TYPE(iov[j].type);
- if (type == GSS_IOV_BUFFER_TYPE_DATA) {
- if (data != NULL) {
- /* only a single DATA buffer can appear */
- code = EINVAL;
- goto cleanup;
- }
+ if (type == GSS_IOV_BUFFER_TYPE_DATA) {
+ if (data != NULL) {
+ /* only a single DATA buffer can appear */
+ code = EINVAL;
+ goto cleanup;
+ }
- data = &iov[j];
- tdata = &tiov[i];
- }
- if (type == GSS_IOV_BUFFER_TYPE_DATA ||
- type == GSS_IOV_BUFFER_TYPE_SIGN_ONLY)
- tiov[i++] = iov[j];
+ data = &iov[j];
+ tdata = &tiov[i];
+ }
+ if (type == GSS_IOV_BUFFER_TYPE_DATA ||
+ type == GSS_IOV_BUFFER_TYPE_SIGN_ONLY)
+ tiov[i++] = iov[j];
}
if (data == NULL) {
- /* a single DATA buffer must be present */
- code = EINVAL;
- goto cleanup;
+ /* a single DATA buffer must be present */
+ code = EINVAL;
+ goto cleanup;
}
/* PADDING | TRAILER */
case KG2_TOK_MIC_MSG:
case KG2_TOK_WRAP_MSG:
case KG2_TOK_DEL_CTX: {
- size_t ec, rrc;
- krb5_enctype enctype = ctx->enc->enctype;
- unsigned int k5_headerlen = 0;
- unsigned int k5_trailerlen = 0;
-
- conf_req_flag = ((ptr[0] & FLAG_WRAP_CONFIDENTIAL) != 0);
- ec = conf_req_flag ? load_16_be(ptr + 2) : 0;
- rrc = load_16_be(ptr + 4);
-
- if (rrc != 0) {
- if (!gss_krb5int_rotate_left((unsigned char *)stream->buffer.value + 16,
- stream->buffer.length - 16, rrc)) {
- code = ENOMEM;
- goto cleanup;
- }
- store_16_be(0, ptr + 4); /* set RRC to zero */
- }
-
- if (conf_req_flag) {
- code = krb5_c_crypto_length(context, enctype, KRB5_CRYPTO_TYPE_HEADER, &k5_headerlen);
- if (code != 0)
- goto cleanup;
- theader->buffer.length += k5_headerlen; /* length validated later */
- }
-
- /* no PADDING for CFX, EC is used instead */
- code = krb5_c_crypto_length(context, enctype,
- conf_req_flag ? KRB5_CRYPTO_TYPE_TRAILER : KRB5_CRYPTO_TYPE_CHECKSUM,
- &k5_trailerlen);
- if (code != 0)
- goto cleanup;
-
- ttrailer->buffer.length = ec + (conf_req_flag ? 16 : 0 /* E(Header) */) + k5_trailerlen;
- ttrailer->buffer.value = (unsigned char *)stream->buffer.value +
- stream->buffer.length - ttrailer->buffer.length;
- break;
+ size_t ec, rrc;
+ krb5_enctype enctype = ctx->enc->enctype;
+ unsigned int k5_headerlen = 0;
+ unsigned int k5_trailerlen = 0;
+
+ conf_req_flag = ((ptr[0] & FLAG_WRAP_CONFIDENTIAL) != 0);
+ ec = conf_req_flag ? load_16_be(ptr + 2) : 0;
+ rrc = load_16_be(ptr + 4);
+
+ if (rrc != 0) {
+ if (!gss_krb5int_rotate_left((unsigned char *)stream->buffer.value + 16,
+ stream->buffer.length - 16, rrc)) {
+ code = ENOMEM;
+ goto cleanup;
+ }
+ store_16_be(0, ptr + 4); /* set RRC to zero */
+ }
+
+ if (conf_req_flag) {
+ code = krb5_c_crypto_length(context, enctype, KRB5_CRYPTO_TYPE_HEADER, &k5_headerlen);
+ if (code != 0)
+ goto cleanup;
+ theader->buffer.length += k5_headerlen; /* length validated later */
+ }
+
+ /* no PADDING for CFX, EC is used instead */
+ code = krb5_c_crypto_length(context, enctype,
+ conf_req_flag ? KRB5_CRYPTO_TYPE_TRAILER : KRB5_CRYPTO_TYPE_CHECKSUM,
+ &k5_trailerlen);
+ if (code != 0)
+ goto cleanup;
+
+ ttrailer->buffer.length = ec + (conf_req_flag ? 16 : 0 /* E(Header) */) + k5_trailerlen;
+ ttrailer->buffer.value = (unsigned char *)stream->buffer.value +
+ stream->buffer.length - ttrailer->buffer.length;
+ break;
}
case KG_TOK_MIC_MSG:
case KG_TOK_WRAP_MSG:
case KG_TOK_DEL_CTX:
- theader->buffer.length += ctx->cksum_size + kg_confounder_size(context, ctx->enc);
+ theader->buffer.length += ctx->cksum_size + kg_confounder_size(context, ctx->enc);
- /*
- * we can't set the padding accurately until decryption;
- * kg_fixup_padding_iov() will take care of this
- */
- tpadding->buffer.length = 1;
- tpadding->buffer.value = (unsigned char *)stream->buffer.value + stream->buffer.length - 1;
+ /*
+ * we can't set the padding accurately until decryption;
+ * kg_fixup_padding_iov() will take care of this
+ */
+ tpadding->buffer.length = 1;
+ tpadding->buffer.value = (unsigned char *)stream->buffer.value + stream->buffer.length - 1;
- /* no TRAILER for pre-CFX */
- ttrailer->buffer.length = 0;
- ttrailer->buffer.value = NULL;
+ /* no TRAILER for pre-CFX */
+ ttrailer->buffer.length = 0;
+ ttrailer->buffer.value = NULL;
- break;
+ break;
default:
- code = (OM_uint32)G_BAD_TOK_HEADER;
- major_status = GSS_S_DEFECTIVE_TOKEN;
- goto cleanup;
- break;
+ code = (OM_uint32)G_BAD_TOK_HEADER;
+ major_status = GSS_S_DEFECTIVE_TOKEN;
+ goto cleanup;
+ break;
}
/* IOV: -----------0-------------+---1---+--2--+----------------3--------------*/
/* validate lengths */
if (stream->buffer.length < theader->buffer.length +
- tpadding->buffer.length +
- ttrailer->buffer.length)
+ tpadding->buffer.length +
+ ttrailer->buffer.length)
{
- code = (OM_uint32)KRB5_BAD_MSIZE;
- major_status = GSS_S_DEFECTIVE_TOKEN;
- goto cleanup;
+ code = (OM_uint32)KRB5_BAD_MSIZE;
+ major_status = GSS_S_DEFECTIVE_TOKEN;
+ goto cleanup;
}
/* setup data */
tdata->buffer.length = stream->buffer.length - ttrailer->buffer.length -
- tpadding->buffer.length - theader->buffer.length;
+ tpadding->buffer.length - theader->buffer.length;
assert(data != NULL);
if (data->type & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
- code = kg_allocate_iov(tdata, tdata->buffer.length);
- if (code != 0)
- goto cleanup;
- memcpy(tdata->buffer.value,
- (unsigned char *)stream->buffer.value + theader->buffer.length, tdata->buffer.length);
+ code = kg_allocate_iov(tdata, tdata->buffer.length);
+ if (code != 0)
+ goto cleanup;
+ memcpy(tdata->buffer.value,
+ (unsigned char *)stream->buffer.value + theader->buffer.length, tdata->buffer.length);
} else
- tdata->buffer.value = (unsigned char *)stream->buffer.value + theader->buffer.length;
+ tdata->buffer.value = (unsigned char *)stream->buffer.value + theader->buffer.length;
assert(i <= iov_count + 2);
major_status = kg_unseal_iov_token(&code, ctx, conf_state, qop_state,
- tiov, i, toktype);
+ tiov, i, toktype);
if (major_status == GSS_S_COMPLETE)
- *data = *tdata;
+ *data = *tdata;
else if (tdata->type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
- OM_uint32 tmp;
+ OM_uint32 tmp;
- gss_release_buffer(&tmp, &tdata->buffer);
- tdata->type &= ~(GSS_IOV_BUFFER_FLAG_ALLOCATED);
+ gss_release_buffer(&tmp, &tdata->buffer);
+ tdata->type &= ~(GSS_IOV_BUFFER_FLAG_ALLOCATED);
}
cleanup:
if (tiov != NULL)
- free(tiov);
+ free(tiov);
*minor_status = code;
OM_uint32
kg_unseal_iov(OM_uint32 *minor_status,
- gss_ctx_id_t context_handle,
- int *conf_state,
- gss_qop_t *qop_state,
- gss_iov_buffer_desc *iov,
- int iov_count,
- int toktype)
+ gss_ctx_id_t context_handle,
+ int *conf_state,
+ gss_qop_t *qop_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ int toktype)
{
krb5_gss_ctx_id_rec *ctx;
OM_uint32 code;
if (!kg_validate_ctx_id(context_handle)) {
- *minor_status = (OM_uint32)G_VALIDATE_FAILED;
- return GSS_S_NO_CONTEXT;
+ *minor_status = (OM_uint32)G_VALIDATE_FAILED;
+ return GSS_S_NO_CONTEXT;
}
ctx = (krb5_gss_ctx_id_rec *)context_handle;
if (!ctx->established) {
- *minor_status = KG_CTX_INCOMPLETE;
- return GSS_S_NO_CONTEXT;
+ *minor_status = KG_CTX_INCOMPLETE;
+ return GSS_S_NO_CONTEXT;
}
if (kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_STREAM) != NULL) {
- code = kg_unseal_stream_iov(minor_status, ctx, conf_state, qop_state,
- iov, iov_count, toktype);
+ code = kg_unseal_stream_iov(minor_status, ctx, conf_state, qop_state,
+ iov, iov_count, toktype);
} else {
- code = kg_unseal_iov_token(minor_status, ctx, conf_state, qop_state,
- iov, iov_count, toktype);
+ code = kg_unseal_iov_token(minor_status, ctx, conf_state, qop_state,
+ iov, iov_count, toktype);
}
return code;
krb5_flags *ticket_flags)
{
static const gss_OID_desc const req_oid = {
- GSS_KRB5_GET_TKT_FLAGS_OID_LENGTH,
- GSS_KRB5_GET_TKT_FLAGS_OID };
+ GSS_KRB5_GET_TKT_FLAGS_OID_LENGTH,
+ GSS_KRB5_GET_TKT_FLAGS_OID };
OM_uint32 major_status;
gss_buffer_set_t data_set = GSS_C_NO_BUFFER_SET;
if (ticket_flags == NULL)
- return GSS_S_CALL_INACCESSIBLE_WRITE;
+ return GSS_S_CALL_INACCESSIBLE_WRITE;
major_status = gss_inquire_sec_context_by_oid(minor_status,
- context_handle,
- (const gss_OID)&req_oid,
- &data_set);
+ context_handle,
+ (const gss_OID)&req_oid,
+ &data_set);
if (major_status != GSS_S_COMPLETE)
- return major_status;
+ return major_status;
if (data_set == GSS_C_NO_BUFFER_SET ||
data_set->count != 1 ||
- data_set->elements[0].length != sizeof(*ticket_flags)) {
- *minor_status = EINVAL;
- return GSS_S_FAILURE;
+ data_set->elements[0].length != sizeof(*ticket_flags)) {
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
}
*ticket_flags = *((krb5_flags *)data_set->elements[0].value);
krb5_ccache out_ccache)
{
static const gss_OID_desc const req_oid = {
- GSS_KRB5_COPY_CCACHE_OID_LENGTH,
- GSS_KRB5_COPY_CCACHE_OID };
+ GSS_KRB5_COPY_CCACHE_OID_LENGTH,
+ GSS_KRB5_COPY_CCACHE_OID };
OM_uint32 major_status;
gss_buffer_desc req_buffer;
if (out_ccache == NULL)
- return GSS_S_CALL_INACCESSIBLE_WRITE;
+ return GSS_S_CALL_INACCESSIBLE_WRITE;
req_buffer.value = out_ccache;
req_buffer.length = sizeof(out_ccache);
major_status = gssspi_set_cred_option(minor_status,
- cred_handle,
- (const gss_OID)&req_oid,
- &req_buffer);
+ cred_handle,
+ (const gss_OID)&req_oid,
+ &req_buffer);
return major_status;
}
gss_buffer_set_t data_set = GSS_C_NO_BUFFER_SET;
if (kctx == NULL)
- return GSS_S_CALL_INACCESSIBLE_WRITE;
+ return GSS_S_CALL_INACCESSIBLE_WRITE;
*kctx = NULL;
req_oid.length = sizeof(oid_buf);
major_status = generic_gss_oid_compose(minor_status,
- GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID,
- GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID_LENGTH,
- (int)version,
- &req_oid);
+ GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID,
+ GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID_LENGTH,
+ (int)version,
+ &req_oid);
if (GSS_ERROR(major_status))
- return major_status;
+ return major_status;
major_status = gss_inquire_sec_context_by_oid(minor_status,
- *context_handle,
- &req_oid,
- &data_set);
+ *context_handle,
+ &req_oid,
+ &data_set);
if (GSS_ERROR(major_status))
- return major_status;
+ return major_status;
if (data_set == GSS_C_NO_BUFFER_SET ||
data_set->count != 1 ||
- data_set->elements[0].length != sizeof(void *)) {
- *minor_status = EINVAL;
- return GSS_S_FAILURE;
+ data_set->elements[0].length != sizeof(void *)) {
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
}
*kctx = *((void **)data_set->elements[0].value);
krb5_enctype *ktypes)
{
static const gss_OID_desc const req_oid = {
- GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID_LENGTH,
- GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID };
+ GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID_LENGTH,
+ GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID };
OM_uint32 major_status;
struct krb5_gss_set_allowable_enctypes_req req;
gss_buffer_desc req_buffer;
req_buffer.value = &req;
major_status = gssspi_set_cred_option(minor_status,
- cred,
- (const gss_OID)&req_oid,
- &req_buffer);
+ cred,
+ (const gss_OID)&req_oid,
+ &req_buffer);
return major_status;
}
const char **out_name)
{
static const gss_OID_desc const req_oid = {
- GSS_KRB5_CCACHE_NAME_OID_LENGTH,
- GSS_KRB5_CCACHE_NAME_OID };
+ GSS_KRB5_CCACHE_NAME_OID_LENGTH,
+ GSS_KRB5_CCACHE_NAME_OID };
OM_uint32 major_status;
struct krb5_gss_ccache_name_req req;
gss_buffer_desc req_buffer;
req_buffer.value = &req;
major_status = gssspi_mech_invoke(minor_status,
- (const gss_OID)gss_mech_krb5,
- (const gss_OID)&req_oid,
- &req_buffer);
+ (const gss_OID)gss_mech_krb5,
+ (const gss_OID)&req_oid,
+ &req_buffer);
return major_status;
}
void *kctx)
{
static const gss_OID_desc const req_oid = {
- GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID_LENGTH,
- GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID };
+ GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID_LENGTH,
+ GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID };
OM_uint32 major_status;
gss_buffer_desc req_buffer;
req_buffer.value = kctx;
major_status = gssspi_mech_invoke(minor_status,
- (const gss_OID)gss_mech_krb5,
- (const gss_OID)&req_oid,
- &req_buffer);
+ (const gss_OID)gss_mech_krb5,
+ (const gss_OID)&req_oid,
+ &req_buffer);
return major_status;
}
krb5_gss_register_acceptor_identity(const char *keytab)
{
static const gss_OID_desc const req_oid = {
- GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID_LENGTH,
- GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID };
+ GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID_LENGTH,
+ GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID };
OM_uint32 major_status;
OM_uint32 minor_status;
gss_buffer_desc req_buffer;
req_buffer.value = (char *)keytab;
major_status = gssspi_mech_invoke(&minor_status,
- (const gss_OID)gss_mech_krb5,
- (const gss_OID)&req_oid,
- &req_buffer);
+ (const gss_OID)gss_mech_krb5,
+ (const gss_OID)&req_oid,
+ &req_buffer);
return major_status;
}
krb5_gss_use_kdc_context(void)
{
static const gss_OID_desc const req_oid = {
- GSS_KRB5_USE_KDC_CONTEXT_OID_LENGTH,
- GSS_KRB5_USE_KDC_CONTEXT_OID };
+ GSS_KRB5_USE_KDC_CONTEXT_OID_LENGTH,
+ GSS_KRB5_USE_KDC_CONTEXT_OID };
OM_uint32 major_status;
OM_uint32 minor_status;
gss_buffer_desc req_buffer;
req_buffer.value = NULL;
major_status = gssspi_mech_invoke(&minor_status,
- (const gss_OID)gss_mech_krb5,
- (const gss_OID)&req_oid,
- &req_buffer);
+ (const gss_OID)gss_mech_krb5,
+ (const gss_OID)&req_oid,
+ &req_buffer);
if (major_status != GSS_S_COMPLETE) {
if (minor_status != 0)
gss_buffer_set_t data_set = GSS_C_NO_BUFFER_SET;
if (ad_data == NULL)
- return GSS_S_CALL_INACCESSIBLE_WRITE;
+ return GSS_S_CALL_INACCESSIBLE_WRITE;
req_oid.elements = oid_buf;
req_oid.length = sizeof(oid_buf);
major_status = generic_gss_oid_compose(minor_status,
- GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID,
- GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID_LENGTH,
- ad_type,
- &req_oid);
+ GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID,
+ GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID_LENGTH,
+ ad_type,
+ &req_oid);
if (GSS_ERROR(major_status))
- return major_status;
+ return major_status;
major_status = gss_inquire_sec_context_by_oid(minor_status,
- context_handle,
- (const gss_OID)&req_oid,
- &data_set);
+ context_handle,
+ (const gss_OID)&req_oid,
+ &data_set);
if (major_status != GSS_S_COMPLETE) {
- return major_status;
+ return major_status;
}
if (data_set == GSS_C_NO_BUFFER_SET ||
- data_set->count != 1) {
- return GSS_S_FAILURE;
+ data_set->count != 1) {
+ return GSS_S_FAILURE;
}
ad_data->length = data_set->elements[0].length;
krb5_rcache rcache)
{
static const gss_OID_desc const req_oid = {
- GSS_KRB5_SET_CRED_RCACHE_OID_LENGTH,
- GSS_KRB5_SET_CRED_RCACHE_OID };
+ GSS_KRB5_SET_CRED_RCACHE_OID_LENGTH,
+ GSS_KRB5_SET_CRED_RCACHE_OID };
OM_uint32 major_status;
gss_buffer_desc req_buffer;
req_buffer.value = rcache;
major_status = gssspi_set_cred_option(minor_status,
- cred,
- (const gss_OID)&req_oid,
- &req_buffer);
+ cred,
+ (const gss_OID)&req_oid,
+ &req_buffer);
return major_status;
}
OM_uint32 KRB5_CALLCONV
gsskrb5_extract_authtime_from_sec_context(OM_uint32 *minor_status,
- gss_ctx_id_t context_handle,
- krb5_timestamp *authtime)
+ gss_ctx_id_t context_handle,
+ krb5_timestamp *authtime)
{
static const gss_OID_desc const req_oid = {
- GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID_LENGTH,
- GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID };
+ GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID_LENGTH,
+ GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID };
OM_uint32 major_status;
gss_buffer_set_t data_set = GSS_C_NO_BUFFER_SET;
if (authtime == NULL)
- return GSS_S_CALL_INACCESSIBLE_WRITE;
+ return GSS_S_CALL_INACCESSIBLE_WRITE;
major_status = gss_inquire_sec_context_by_oid(minor_status,
- context_handle,
- (const gss_OID)&req_oid,
- &data_set);
+ context_handle,
+ (const gss_OID)&req_oid,
+ &data_set);
if (major_status != GSS_S_COMPLETE)
- return major_status;
+ return major_status;
if (data_set == GSS_C_NO_BUFFER_SET ||
data_set->count != 1 ||
- data_set->elements[0].length != sizeof(*authtime)) {
- *minor_status = EINVAL;
- return GSS_S_FAILURE;
+ data_set->elements[0].length != sizeof(*authtime)) {
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
}
*authtime = *((krb5_timestamp *)data_set->elements[0].value);
gss_krb5int_export_lucid_sec_context(
OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
- const gss_OID desired_object,
- gss_buffer_set_t *data_set)
+ const gss_OID desired_object,
+ gss_buffer_set_t *data_set)
{
krb5_error_code kret = 0;
OM_uint32 retval;
krb5_gss_ctx_id_t ctx = (krb5_gss_ctx_id_t)context_handle;
void *lctx = NULL;
- int version = 0;
- gss_buffer_desc rep;
+ int version = 0;
+ gss_buffer_desc rep;
/* Assume failure */
retval = GSS_S_FAILURE;
*data_set = GSS_C_NO_BUFFER_SET;
retval = generic_gss_oid_decompose(minor_status,
- GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID,
- GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID_LENGTH,
- desired_object,
- &version);
+ GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID,
+ GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID_LENGTH,
+ desired_object,
+ &version);
if (GSS_ERROR(retval))
- return retval;
+ return retval;
/* Externalize a structure of the right version */
switch (version) {
retval = generic_gss_add_buffer_set_member(minor_status, &rep, data_set);
if (GSS_ERROR(retval))
- goto error_out;
+ goto error_out;
error_out:
if (*minor_status == 0)
OM_uint32 retval;
krb5_error_code kret = 0;
int version;
- void *kctx;
+ void *kctx;
/* Assume failure */
retval = GSS_S_FAILURE;
/* AEAD interfaces */
OM_uint32
krb5_gss_wrap_iov(OM_uint32 *minor_status,
- gss_ctx_id_t context_handle,
- int conf_req_flag,
- gss_qop_t qop_req,
- int *conf_state,
- gss_iov_buffer_desc *iov,
- int iov_count)
+ gss_ctx_id_t context_handle,
+ int conf_req_flag,
+ gss_qop_t qop_req,
+ int *conf_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
{
OM_uint32 major_status;
major_status = kg_seal_iov(minor_status, context_handle, conf_req_flag,
- qop_req, conf_state,
- iov, iov_count, KG_TOK_WRAP_MSG);
+ qop_req, conf_state,
+ iov, iov_count, KG_TOK_WRAP_MSG);
return major_status;
}
OM_uint32
krb5_gss_wrap_iov_length(OM_uint32 *minor_status,
- gss_ctx_id_t context_handle,
- int conf_req_flag,
- gss_qop_t qop_req,
- int *conf_state,
- gss_iov_buffer_desc *iov,
- int iov_count)
+ gss_ctx_id_t context_handle,
+ int conf_req_flag,
+ gss_qop_t qop_req,
+ int *conf_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
{
OM_uint32 major_status;
major_status = kg_seal_iov_length(minor_status, context_handle, conf_req_flag,
- qop_req, conf_state, iov, iov_count);
+ qop_req, conf_state, iov, iov_count);
return major_status;
}
KV5M_KEYBLOCK,
(krb5_pointer) ctx->acceptor_subkey,
&required);
- if (!kret && ctx->authdata) {
- krb5_int32 i;
-
- for (i = 0; !kret && ctx->authdata[i]; i++) {
- kret = krb5_size_opaque(kcontext,
- KV5M_AUTHDATA,
- (krb5_pointer)ctx->authdata[i],
- &required);
- }
- }
+ if (!kret && ctx->authdata) {
+ krb5_int32 i;
+
+ for (i = 0; !kret && ctx->authdata[i]; i++) {
+ kret = krb5_size_opaque(kcontext,
+ KV5M_AUTHDATA,
+ (krb5_pointer)ctx->authdata[i],
+ &required);
+ }
+ }
if (!kret)
*sizep += required;
}
if (!kret)
kret = krb5_ser_pack_int32((krb5_int32) ctx->cred_rcache,
&bp, &remain);
- if (!kret) {
- krb5_int32 i = 0;
-
- if (ctx->authdata) {
- for (; ctx->authdata[i]; i++)
- ;
- }
- /* authdata count */
- kret = krb5_ser_pack_int32(i, &bp, &remain);
- if (!kret && ctx->authdata) {
- /* authdata */
- for (i = 0; !kret && ctx->authdata[i]; i++)
- kret = krb5_externalize_opaque(kcontext,
- KV5M_AUTHDATA,
- ctx->authdata[i],
- &bp,
- &remain);
- }
- }
+ if (!kret) {
+ krb5_int32 i = 0;
+
+ if (ctx->authdata) {
+ for (; ctx->authdata[i]; i++)
+ ;
+ }
+ /* authdata count */
+ kret = krb5_ser_pack_int32(i, &bp, &remain);
+ if (!kret && ctx->authdata) {
+ /* authdata */
+ for (i = 0; !kret && ctx->authdata[i]; i++)
+ kret = krb5_externalize_opaque(kcontext,
+ KV5M_AUTHDATA,
+ ctx->authdata[i],
+ &bp,
+ &remain);
+ }
+ }
/* trailer */
if (!kret)
kret = krb5_ser_pack_int32(KG_CONTEXT, &bp, &remain);
if (!kret)
kret = krb5_ser_unpack_int32(&ibuf, &bp, &remain);
ctx->cred_rcache = ibuf;
- /* authdata */
+ /* authdata */
if (!kret)
kret = krb5_ser_unpack_int32(&ibuf, &bp, &remain);
- if (!kret) {
- krb5_int32 nadata = ibuf, i;
-
- if (nadata > 0) {
- ctx->authdata = (krb5_authdata **)calloc((size_t)nadata + 1,
- sizeof(krb5_authdata *));
- if (ctx->authdata == NULL) {
- kret = ENOMEM;
- } else {
- for (i = 0; !kret && i < nadata; i++)
- kret = krb5_internalize_opaque(kcontext,
- KV5M_AUTHDATA,
- (krb5_pointer *)&ctx->authdata[i],
- &bp,
- &remain);
- }
- }
- }
+ if (!kret) {
+ krb5_int32 nadata = ibuf, i;
+
+ if (nadata > 0) {
+ ctx->authdata = (krb5_authdata **)calloc((size_t)nadata + 1,
+ sizeof(krb5_authdata *));
+ if (ctx->authdata == NULL) {
+ kret = ENOMEM;
+ } else {
+ for (i = 0; !kret && i < nadata; i++)
+ kret = krb5_internalize_opaque(kcontext,
+ KV5M_AUTHDATA,
+ (krb5_pointer *)&ctx->authdata[i],
+ &bp,
+ &remain);
+ }
+ }
+ }
/* Get trailer */
if (!kret)
kret = krb5_ser_unpack_int32(&ibuf, &bp, &remain);
OM_uint32 KRB5_CALLCONV
gss_krb5int_ccache_name(OM_uint32 *minor_status,
- const gss_OID desired_mech,
- const gss_OID desired_object,
- gss_buffer_t value)
+ const gss_OID desired_mech,
+ const gss_OID desired_object,
+ gss_buffer_t value)
{
char *old_name = NULL;
OM_uint32 err = 0;
assert(value->length == sizeof(*req));
if (value->length != sizeof(*req))
- return GSS_S_FAILURE;
+ return GSS_S_FAILURE;
req = (struct krb5_gss_ccache_name_req *)value->value;
#if 0
OM_uint32
krb5_gss_get_mic_iov(OM_uint32 *minor_status,
- gss_ctx_id_t context_handle,
- gss_qop_t qop_req,
- gss_iov_buffer_desc *iov,
- int iov_count)
+ gss_ctx_id_t context_handle,
+ gss_qop_t qop_req,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
{
OM_uint32 major_status;
major_status = kg_seal_iov(minor_status, context_handle, FALSE,
- qop_req, NULL,
- iov, iov_count, KG_TOK_MIC_MSG);
+ qop_req, NULL,
+ iov, iov_count, KG_TOK_MIC_MSG);
return major_status;
}
OM_uint32
krb5_gss_get_mic_iov_length(OM_uint32 *minor_status,
- gss_ctx_id_t context_handle,
- int conf_req_flag,
- gss_qop_t qop_req,
- int *conf_state,
- gss_iov_buffer_desc *iov,
- int iov_count)
+ gss_ctx_id_t context_handle,
+ int conf_req_flag,
+ gss_qop_t qop_req,
+ int *conf_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
{
OM_uint32 major_status;
major_status = kg_seal_iov_length(minor_status, context_handle, conf_req_flag,
- qop_req, conf_state, iov, iov_count);
+ qop_req, conf_state, iov, iov_count);
return major_status;
}
#endif
/* AEAD interface */
OM_uint32
krb5_gss_unwrap_iov(OM_uint32 *minor_status,
- gss_ctx_id_t context_handle,
- int *conf_state,
- gss_qop_t *qop_state,
- gss_iov_buffer_desc *iov,
- int iov_count)
+ gss_ctx_id_t context_handle,
+ int *conf_state,
+ gss_qop_t *qop_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
{
OM_uint32 major_status;
major_status = kg_unseal_iov(minor_status, context_handle,
- conf_state, qop_state,
- iov, iov_count, KG_TOK_WRAP_MSG);
+ conf_state, qop_state,
+ iov, iov_count, KG_TOK_WRAP_MSG);
return major_status;
}
krb5_error_code
kg_make_checksum_iov_v1(krb5_context context,
- krb5_cksumtype type,
- size_t cksum_len,
- krb5_keyblock *seq,
- krb5_keyblock *enc,
- krb5_keyusage sign_usage,
- gss_iov_buffer_desc *iov,
- int iov_count,
- int toktype,
- krb5_checksum *checksum)
+ krb5_cksumtype type,
+ size_t cksum_len,
+ krb5_keyblock *seq,
+ krb5_keyblock *enc,
+ krb5_keyusage sign_usage,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ int toktype,
+ krb5_checksum *checksum)
{
krb5_error_code code;
gss_iov_buffer_desc *header;
kiov_count = 3 + iov_count;
kiov = (krb5_crypto_iov *)xmalloc(kiov_count * sizeof(krb5_crypto_iov));
if (kiov == NULL)
- return ENOMEM;
+ return ENOMEM;
/* Checksum over ( Header | Confounder | Data | Pad ) */
if (toktype == KG_TOK_WRAP_MSG)
- conf_len = kg_confounder_size(context, (krb5_keyblock *)enc);
+ conf_len = kg_confounder_size(context, (krb5_keyblock *)enc);
/* Checksum output */
kiov[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
kiov[i].data.length = checksum->length;
kiov[i].data.data = xmalloc(checksum->length);
if (kiov[i].data.data == NULL) {
- xfree(kiov);
- return ENOMEM;
+ xfree(kiov);
+ return ENOMEM;
}
i++;
/* Confounder */
if (toktype == KG_TOK_WRAP_MSG) {
- kiov[i].flags = KRB5_CRYPTO_TYPE_DATA;
- kiov[i].data.length = conf_len;
- kiov[i].data.data = (char *)header->buffer.value + header->buffer.length - conf_len;
- i++;
+ kiov[i].flags = KRB5_CRYPTO_TYPE_DATA;
+ kiov[i].data.length = conf_len;
+ kiov[i].data.data = (char *)header->buffer.value + header->buffer.length - conf_len;
+ i++;
}
for (j = 0; j < iov_count; j++) {
- kiov[i].flags = kg_translate_flag_iov(iov[j].type);
- kiov[i].data.length = iov[j].buffer.length;
- kiov[i].data.data = (char *)iov[j].buffer.value;
- i++;
+ kiov[i].flags = kg_translate_flag_iov(iov[j].type);
+ kiov[i].data.length = iov[j].buffer.length;
+ kiov[i].data.data = (char *)iov[j].buffer.value;
+ i++;
}
code = krb5_c_make_checksum_iov(context, type, seq, sign_usage, kiov, kiov_count);
if (code == 0) {
- checksum->length = kiov[0].data.length;
- checksum->contents = (unsigned char *)kiov[0].data.data;
+ checksum->length = kiov[0].data.length;
+ checksum->contents = (unsigned char *)kiov[0].data.data;
} else
- free(kiov[0].data.data);
+ free(kiov[0].data.data);
xfree(kiov);
static krb5_error_code
checksum_iov_v3(krb5_context context,
- krb5_cksumtype type,
- size_t rrc,
- krb5_keyblock *key,
- krb5_keyusage sign_usage,
- gss_iov_buffer_desc *iov,
- int iov_count,
- krb5_boolean verify,
- krb5_boolean *valid)
+ krb5_cksumtype type,
+ size_t rrc,
+ krb5_keyblock *key,
+ krb5_keyusage sign_usage,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ krb5_boolean verify,
+ krb5_boolean *valid)
{
krb5_error_code code;
gss_iov_buffer_desc *header;
unsigned int k5_checksumlen;
if (verify)
- *valid = FALSE;
+ *valid = FALSE;
code = krb5_c_crypto_length(context, key->enctype, KRB5_CRYPTO_TYPE_CHECKSUM, &k5_checksumlen);
if (code != 0)
- return code;
+ return code;
header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
assert(header != NULL);
assert(rrc != 0 || trailer != NULL);
if (trailer == NULL) {
- if (rrc != k5_checksumlen)
- return KRB5_BAD_MSIZE;
- if (header->buffer.length != 16 + k5_checksumlen)
- return KRB5_BAD_MSIZE;
+ if (rrc != k5_checksumlen)
+ return KRB5_BAD_MSIZE;
+ if (header->buffer.length != 16 + k5_checksumlen)
+ return KRB5_BAD_MSIZE;
} else if (trailer->buffer.length != k5_checksumlen)
- return KRB5_BAD_MSIZE;
+ return KRB5_BAD_MSIZE;
kiov_count = 2 + iov_count;
kiov = (krb5_crypto_iov *)xmalloc(kiov_count * sizeof(krb5_crypto_iov));
if (kiov == NULL)
- return ENOMEM;
+ return ENOMEM;
/* Checksum over ( Data | Header ) */
/* Data */
for (j = 0; j < iov_count; j++) {
- kiov[i].flags = kg_translate_flag_iov(iov[j].type);
- kiov[i].data.length = iov[j].buffer.length;
- kiov[i].data.data = (char *)iov[j].buffer.value;
- i++;
+ kiov[i].flags = kg_translate_flag_iov(iov[j].type);
+ kiov[i].data.length = iov[j].buffer.length;
+ kiov[i].data.data = (char *)iov[j].buffer.value;
+ i++;
}
/* Header */
/* Checksum */
kiov[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
if (trailer == NULL) {
- kiov[i].data.length = header->buffer.length - 16;
- kiov[i].data.data = (char *)header->buffer.value + 16;
+ kiov[i].data.length = header->buffer.length - 16;
+ kiov[i].data.data = (char *)header->buffer.value + 16;
} else {
- kiov[i].data.length = trailer->buffer.length;
- kiov[i].data.data = (char *)trailer->buffer.value;
+ kiov[i].data.length = trailer->buffer.length;
+ kiov[i].data.data = (char *)trailer->buffer.value;
}
i++;
if (verify)
- code = krb5_c_verify_checksum_iov(context, type, key, sign_usage, kiov, kiov_count, valid);
+ code = krb5_c_verify_checksum_iov(context, type, key, sign_usage, kiov, kiov_count, valid);
else
- code = krb5_c_make_checksum_iov(context, type, key, sign_usage, kiov, kiov_count);
+ code = krb5_c_make_checksum_iov(context, type, key, sign_usage, kiov, kiov_count);
xfree(kiov);
krb5_error_code
kg_make_checksum_iov_v3(krb5_context context,
- krb5_cksumtype type,
- size_t rrc,
- krb5_keyblock *key,
- krb5_keyusage sign_usage,
- gss_iov_buffer_desc *iov,
- int iov_count)
+ krb5_cksumtype type,
+ size_t rrc,
+ krb5_keyblock *key,
+ krb5_keyusage sign_usage,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
{
return checksum_iov_v3(context, type, rrc, key,
- sign_usage, iov, iov_count, 0, NULL);
+ sign_usage, iov, iov_count, 0, NULL);
}
krb5_error_code
kg_verify_checksum_iov_v3(krb5_context context,
- krb5_cksumtype type,
- size_t rrc,
- krb5_keyblock *key,
- krb5_keyusage sign_usage,
- gss_iov_buffer_desc *iov,
- int iov_count,
- krb5_boolean *valid)
+ krb5_cksumtype type,
+ size_t rrc,
+ krb5_keyblock *key,
+ krb5_keyusage sign_usage,
+ gss_iov_buffer_desc *iov,
+ int iov_count,
+ krb5_boolean *valid)
{
return checksum_iov_v3(context, type, rrc, key,
- sign_usage, iov, iov_count, 1, valid);
+ sign_usage, iov, iov_count, 1, valid);
}
static krb5_error_code
kg_copy_keys(krb5_context context,
- krb5_gss_ctx_id_rec *ctx,
- krb5_keyblock *subkey)
+ krb5_gss_ctx_id_rec *ctx,
+ krb5_keyblock *subkey)
{
krb5_error_code code;
if (ctx->enc != NULL) {
- krb5_free_keyblock(context, ctx->enc);
- ctx->enc = NULL;
+ krb5_free_keyblock(context, ctx->enc);
+ ctx->enc = NULL;
}
code = krb5_copy_keyblock(context, subkey, &ctx->enc);
if (code != 0)
- return code;
+ return code;
if (ctx->seq != NULL) {
- krb5_free_keyblock(context, ctx->seq);
- ctx->seq = NULL;
+ krb5_free_keyblock(context, ctx->seq);
+ ctx->seq = NULL;
}
code = krb5_copy_keyblock(context, subkey, &ctx->seq);
if (code != 0)
- return code;
+ return code;
return 0;
}
krb5_error_code
kg_setup_keys(krb5_context context,
- krb5_gss_ctx_id_rec *ctx,
- krb5_keyblock *subkey,
- krb5_cksumtype *cksumtype)
+ krb5_gss_ctx_id_rec *ctx,
+ krb5_keyblock *subkey,
+ krb5_cksumtype *cksumtype)
{
krb5_error_code code;
unsigned int i;
ctx->proto = 0;
if (ctx->enc == NULL) {
- ctx->signalg = -1;
- ctx->sealalg = -1;
+ ctx->signalg = -1;
+ ctx->sealalg = -1;
}
code = krb5int_accessor(&kaccess, KRB5INT_ACCESS_VERSION);
if (code != 0)
- return code;
+ return code;
code = (*kaccess.krb5int_c_mandatory_cksumtype)(context, subkey->enctype,
- cksumtype);
+ cksumtype);
if (code != 0)
- return code;
+ return code;
switch (subkey->enctype) {
case ENCTYPE_DES_CBC_MD5:
case ENCTYPE_DES_CBC_MD4:
case ENCTYPE_DES_CBC_CRC:
- code = kg_copy_keys(context, ctx, subkey);
- if (code != 0)
- return code;
-
- ctx->enc->enctype = ENCTYPE_DES_CBC_RAW;
- ctx->seq->enctype = ENCTYPE_DES_CBC_RAW;
- ctx->signalg = SGN_ALG_DES_MAC_MD5;
- ctx->cksum_size = 8;
- ctx->sealalg = SEAL_ALG_DES;
-
- for (i = 0; i < ctx->enc->length; i++)
- /*SUPPRESS 113*/
- ctx->enc->contents[i] ^= 0xF0;
- break;
+ code = kg_copy_keys(context, ctx, subkey);
+ if (code != 0)
+ return code;
+
+ ctx->enc->enctype = ENCTYPE_DES_CBC_RAW;
+ ctx->seq->enctype = ENCTYPE_DES_CBC_RAW;
+ ctx->signalg = SGN_ALG_DES_MAC_MD5;
+ ctx->cksum_size = 8;
+ ctx->sealalg = SEAL_ALG_DES;
+
+ for (i = 0; i < ctx->enc->length; i++)
+ /*SUPPRESS 113*/
+ ctx->enc->contents[i] ^= 0xF0;
+ break;
case ENCTYPE_DES3_CBC_SHA1:
- code = kg_copy_keys(context, ctx, subkey);
- if (code != 0)
- return code;
-
- ctx->enc->enctype = ENCTYPE_DES3_CBC_RAW;
- ctx->seq->enctype = ENCTYPE_DES3_CBC_RAW;
- ctx->signalg = SGN_ALG_HMAC_SHA1_DES3_KD;
- ctx->cksum_size = 20;
- ctx->sealalg = SEAL_ALG_DES3KD;
- break;
+ code = kg_copy_keys(context, ctx, subkey);
+ if (code != 0)
+ return code;
+
+ ctx->enc->enctype = ENCTYPE_DES3_CBC_RAW;
+ ctx->seq->enctype = ENCTYPE_DES3_CBC_RAW;
+ ctx->signalg = SGN_ALG_HMAC_SHA1_DES3_KD;
+ ctx->cksum_size = 20;
+ ctx->sealalg = SEAL_ALG_DES3KD;
+ break;
case ENCTYPE_ARCFOUR_HMAC:
case ENCTYPE_ARCFOUR_HMAC_EXP:
- code = kg_copy_keys(context, ctx, subkey);
- if (code != 0)
- return code;
-
- ctx->signalg = SGN_ALG_HMAC_MD5;
- ctx->cksum_size = 8;
- ctx->sealalg = SEAL_ALG_MICROSOFT_RC4;
- break;
+ code = kg_copy_keys(context, ctx, subkey);
+ if (code != 0)
+ return code;
+
+ ctx->signalg = SGN_ALG_HMAC_MD5;
+ ctx->cksum_size = 8;
+ ctx->sealalg = SEAL_ALG_MICROSOFT_RC4;
+ break;
default:
- ctx->proto = 1;
- break;
+ ctx->proto = 1;
+ break;
}
return 0;
size_t blocksize;
/* We special case rc4*/
if (key->enctype == ENCTYPE_ARCFOUR_HMAC ||
- key->enctype == ENCTYPE_ARCFOUR_HMAC_EXP)
+ key->enctype == ENCTYPE_ARCFOUR_HMAC_EXP)
return 8;
code = krb5_c_block_size(context, key->enctype, &blocksize);
if (code)
confsize = kg_confounder_size(context, key);
if (confsize < 0)
- return KRB5_BAD_MSIZE;
+ return KRB5_BAD_MSIZE;
lrandom.length = confsize;
lrandom.data = (char *)buf;
goto cleanup_arcfour;
if (exportable) {
- memcpy(t, kg_arcfour_l40, sizeof(kg_arcfour_l40));
- i += sizeof(kg_arcfour_l40);
+ memcpy(t, kg_arcfour_l40, sizeof(kg_arcfour_l40));
+ i += sizeof(kg_arcfour_l40);
}
t[i++] = ms_usage &0xff;
t[i++] = (ms_usage>>8) & 0xff;
if (code)
goto cleanup_arcfour;
if (exportable)
- memset(usage_key.contents + 7, 0xab, 9);
+ memset(usage_key.contents + 7, 0xab, 9);
input.data = ( void *) kd_data;
input.length = kd_data_len;
assert(header != NULL);
if (header->buffer.length < conf_len)
- return KRB5_BAD_MSIZE;
+ return KRB5_BAD_MSIZE;
trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
assert(trailer == NULL || trailer->buffer.length == 0);
kiov_count = 3 + iov_count;
kiov = (krb5_crypto_iov *)malloc(kiov_count * sizeof(krb5_crypto_iov));
if (kiov == NULL)
- return ENOMEM;
+ return ENOMEM;
/* For pre-CFX (raw enctypes) there is no krb5 header */
kiov[i].flags = KRB5_CRYPTO_TYPE_HEADER;
i++;
for (j = 0; j < iov_count; j++) {
- kiov[i].flags = kg_translate_flag_iov(iov[j].type);
- if (kiov[i].flags == KRB5_CRYPTO_TYPE_EMPTY)
- continue;
+ kiov[i].flags = kg_translate_flag_iov(iov[j].type);
+ if (kiov[i].flags == KRB5_CRYPTO_TYPE_EMPTY)
+ continue;
- kiov[i].data.length = iov[j].buffer.length;
- kiov[i].data.data = (char *)iov[j].buffer.value;
- i++;
+ kiov[i].data.length = iov[j].buffer.length;
+ kiov[i].data.data = (char *)iov[j].buffer.value;
+ i++;
}
kiov[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
static krb5_error_code
kg_translate_iov_v3(context, dce_style, ec, rrc, key, iov, iov_count, pkiov, pkiov_count)
krb5_context context;
- int dce_style; /* DCE_STYLE indicates actual RRC is EC + RRC */
- size_t ec; /* Extra rotate count for DCE_STYLE, pad length otherwise */
- size_t rrc; /* Rotate count */
+ int dce_style; /* DCE_STYLE indicates actual RRC is EC + RRC */
+ size_t ec; /* Extra rotate count for DCE_STYLE, pad length otherwise */
+ size_t rrc; /* Rotate count */
const krb5_keyblock *key;
gss_iov_buffer_desc *iov;
int iov_count;
code = krb5_c_crypto_length(context, key->enctype, KRB5_CRYPTO_TYPE_HEADER, &k5_headerlen);
if (code != 0)
- return code;
+ return code;
code = krb5_c_crypto_length(context, key->enctype, KRB5_CRYPTO_TYPE_TRAILER, &k5_trailerlen);
if (code != 0)
- return code;
+ return code;
/* Check header and trailer sizes */
gss_headerlen = 16 /* GSS-Header */ + k5_headerlen; /* Kerb-Header */
/* If we're caller without a trailer, we must rotate by trailer length */
if (trailer == NULL) {
- size_t actual_rrc = rrc;
+ size_t actual_rrc = rrc;
- if (dce_style)
- actual_rrc += ec; /* compensate for Windows bug */
+ if (dce_style)
+ actual_rrc += ec; /* compensate for Windows bug */
- if (actual_rrc != gss_trailerlen)
- return KRB5_BAD_MSIZE;
+ if (actual_rrc != gss_trailerlen)
+ return KRB5_BAD_MSIZE;
- gss_headerlen += gss_trailerlen;
- gss_trailerlen = 0;
+ gss_headerlen += gss_trailerlen;
+ gss_trailerlen = 0;
} else {
- if (trailer->buffer.length != gss_trailerlen)
- return KRB5_BAD_MSIZE;
+ if (trailer->buffer.length != gss_trailerlen)
+ return KRB5_BAD_MSIZE;
}
if (header->buffer.length != gss_headerlen)
- return KRB5_BAD_MSIZE;
+ return KRB5_BAD_MSIZE;
kiov_count = 3 + iov_count;
kiov = (krb5_crypto_iov *)malloc(kiov_count * sizeof(krb5_crypto_iov));
if (kiov == NULL)
- return ENOMEM;
+ return ENOMEM;
/*
* The krb5 header is located at the end of the GSS header.
i++;
for (j = 0; j < iov_count; j++) {
- kiov[i].flags = kg_translate_flag_iov(iov[j].type);
- if (kiov[i].flags == KRB5_CRYPTO_TYPE_EMPTY)
- continue;
+ kiov[i].flags = kg_translate_flag_iov(iov[j].type);
+ if (kiov[i].flags == KRB5_CRYPTO_TYPE_EMPTY)
+ continue;
- kiov[i].data.length = iov[j].buffer.length;
- kiov[i].data.data = (char *)iov[j].buffer.value;
- i++;
+ kiov[i].data.length = iov[j].buffer.length;
+ kiov[i].data.data = (char *)iov[j].buffer.value;
+ i++;
}
/*
kiov[i].flags = KRB5_CRYPTO_TYPE_DATA;
kiov[i].data.length = ec + 16; /* E(Header) */
if (trailer == NULL)
- kiov[i].data.data = (char *)header->buffer.value + 16;
+ kiov[i].data.data = (char *)header->buffer.value + 16;
else
- kiov[i].data.data = (char *)trailer->buffer.value;
+ kiov[i].data.data = (char *)trailer->buffer.value;
i++;
/*
static krb5_error_code
kg_translate_iov(context, proto, dce_style, ec, rrc, key, iov, iov_count, pkiov, pkiov_count)
krb5_context context;
- int proto; /* 1 if CFX, 0 for pre-CFX */
+ int proto; /* 1 if CFX, 0 for pre-CFX */
int dce_style;
size_t ec;
size_t rrc;
size_t *pkiov_count;
{
return proto ?
- kg_translate_iov_v3(context, dce_style, ec, rrc, key, iov, iov_count, pkiov, pkiov_count) :
- kg_translate_iov_v1(context, key, iov, iov_count, pkiov, pkiov_count);
+ kg_translate_iov_v3(context, dce_style, ec, rrc, key, iov, iov_count, pkiov, pkiov_count) :
+ kg_translate_iov_v1(context, key, iov, iov_count, pkiov, pkiov_count);
}
krb5_error_code
}
code = kg_translate_iov(context, proto, dce_style, ec, rrc, key,
- iov, iov_count, &kiov, &kiov_count);
+ iov, iov_count, &kiov, &kiov_count);
if (code == 0) {
- code = krb5_c_encrypt_iov(context, key, usage, pivd, kiov, kiov_count);
- free(kiov);
+ code = krb5_c_encrypt_iov(context, key, usage, pivd, kiov, kiov_count);
+ free(kiov);
}
if (pivd != NULL)
}
code = kg_translate_iov(context, proto, dce_style, ec, rrc, key,
- iov, iov_count, &kiov, &kiov_count);
+ iov, iov_count, &kiov, &kiov_count);
if (code == 0) {
- code = krb5_c_decrypt_iov(context, key, usage, pivd, kiov, kiov_count);
- free(kiov);
+ code = krb5_c_decrypt_iov(context, key, usage, pivd, kiov, kiov_count);
+ free(kiov);
}
if (pivd != NULL)
krb5_error_code
kg_arcfour_docrypt_iov (krb5_context context,
- const krb5_keyblock *longterm_key , int ms_usage,
+ const krb5_keyblock *longterm_key , int ms_usage,
const unsigned char *kd_data, size_t kd_data_len,
gss_iov_buffer_desc *iov, int iov_count)
{
goto cleanup_arcfour;
if (exportable) {
- memcpy(t, kg_arcfour_l40, sizeof(kg_arcfour_l40));
- i += sizeof(kg_arcfour_l40);
+ memcpy(t, kg_arcfour_l40, sizeof(kg_arcfour_l40));
+ i += sizeof(kg_arcfour_l40);
}
t[i++] = ms_usage &0xff;
t[i++] = (ms_usage>>8) & 0xff;
if (code)
goto cleanup_arcfour;
if (exportable)
- memset(usage_key.contents + 7, 0xab, 9);
+ memset(usage_key.contents + 7, 0xab, 9);
input.data = ( void *) kd_data;
input.length = kd_data_len;
goto cleanup_arcfour;
code = kg_translate_iov(context, 0 /* proto */, 0 /* dce_style */,
- 0 /* ec */, 0 /* rrc */, longterm_key,
- iov, iov_count, &kiov, &kiov_count);
+ 0 /* ec */, 0 /* rrc */, longterm_key,
+ iov, iov_count, &kiov, &kiov_count);
if (code)
- goto cleanup_arcfour;
+ goto cleanup_arcfour;
code = ((*kaccess.arcfour_enc_provider->encrypt_iov)(
&seq_enc_key, 0,
free ((void *) usage_key.contents);
free ((void *) seq_enc_key.contents);
if (kiov != NULL)
- free(kiov);
+ free(kiov);
return (code);
}
switch (GSS_IOV_BUFFER_TYPE(type)) {
case GSS_IOV_BUFFER_TYPE_DATA:
case GSS_IOV_BUFFER_TYPE_PADDING:
- ktype = KRB5_CRYPTO_TYPE_DATA;
- break;
+ ktype = KRB5_CRYPTO_TYPE_DATA;
+ break;
case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
- ktype = KRB5_CRYPTO_TYPE_SIGN_ONLY;
- break;
+ ktype = KRB5_CRYPTO_TYPE_SIGN_ONLY;
+ break;
default:
- ktype = KRB5_CRYPTO_TYPE_EMPTY;
- break;
+ ktype = KRB5_CRYPTO_TYPE_EMPTY;
+ break;
}
return ktype;
gss_iov_buffer_t
kg_locate_iov(gss_iov_buffer_desc *iov,
- int iov_count,
- OM_uint32 type)
+ int iov_count,
+ OM_uint32 type)
{
int i;
gss_iov_buffer_t p = GSS_C_NO_IOV_BUFFER;
if (iov == GSS_C_NO_IOV_BUFFER)
- return GSS_C_NO_IOV_BUFFER;
+ return GSS_C_NO_IOV_BUFFER;
for (i = iov_count - 1; i >= 0; i--) {
- if (GSS_IOV_BUFFER_TYPE(iov[i].type) == type) {
- if (p == GSS_C_NO_IOV_BUFFER)
- p = &iov[i];
- else
- return GSS_C_NO_IOV_BUFFER;
- }
+ if (GSS_IOV_BUFFER_TYPE(iov[i].type) == type) {
+ if (p == GSS_C_NO_IOV_BUFFER)
+ p = &iov[i];
+ else
+ return GSS_C_NO_IOV_BUFFER;
+ }
}
return p;
void
kg_iov_msglen(gss_iov_buffer_desc *iov,
- int iov_count,
- size_t *data_length_p,
- size_t *assoc_data_length_p)
+ int iov_count,
+ size_t *data_length_p,
+ size_t *assoc_data_length_p)
{
int i;
size_t data_length = 0, assoc_data_length = 0;
*data_length_p = *assoc_data_length_p = 0;
for (i = 0; i < iov_count; i++) {
- OM_uint32 type = GSS_IOV_BUFFER_TYPE(iov[i].type);
+ OM_uint32 type = GSS_IOV_BUFFER_TYPE(iov[i].type);
- if (type == GSS_IOV_BUFFER_TYPE_SIGN_ONLY)
- assoc_data_length += iov[i].buffer.length;
+ if (type == GSS_IOV_BUFFER_TYPE_SIGN_ONLY)
+ assoc_data_length += iov[i].buffer.length;
- if (type == GSS_IOV_BUFFER_TYPE_DATA ||
- type == GSS_IOV_BUFFER_TYPE_SIGN_ONLY)
- data_length += iov[i].buffer.length;
+ if (type == GSS_IOV_BUFFER_TYPE_DATA ||
+ type == GSS_IOV_BUFFER_TYPE_SIGN_ONLY)
+ data_length += iov[i].buffer.length;
}
*data_length_p = data_length;
assert(iov != GSS_C_NO_IOV_BUFFER);
for (i = 0; i < iov_count; i++) {
- if (iov[i].type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
- gss_release_buffer(&min_stat, &iov[i].buffer);
- iov[i].type &= ~(GSS_IOV_BUFFER_FLAG_ALLOCATED);
- }
+ if (iov[i].type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
+ gss_release_buffer(&min_stat, &iov[i].buffer);
+ iov[i].type &= ~(GSS_IOV_BUFFER_FLAG_ALLOCATED);
+ }
}
}
OM_uint32
kg_fixup_padding_iov(OM_uint32 *minor_status,
- gss_iov_buffer_desc *iov,
- int iov_count)
+ gss_iov_buffer_desc *iov,
+ int iov_count)
{
gss_iov_buffer_t padding = NULL;
gss_iov_buffer_t data = NULL;
padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
if (data == NULL) {
- *minor_status = 0;
- return GSS_S_COMPLETE;
+ *minor_status = 0;
+ return GSS_S_COMPLETE;
}
if (padding == NULL || padding->buffer.length == 0) {
- *minor_status = EINVAL;
- return GSS_S_FAILURE;
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
}
p = (unsigned char *)padding->buffer.value;
if (data->buffer.length + padding->buffer.length < padlength ||
padlength == 0) {
- *minor_status = (OM_uint32)KRB5_BAD_MSIZE;
- return GSS_S_DEFECTIVE_TOKEN;
+ *minor_status = (OM_uint32)KRB5_BAD_MSIZE;
+ return GSS_S_DEFECTIVE_TOKEN;
}
/*
*
* eg. if the buffers are structured as follows:
*
- * +---DATA---+-PAD-+
- * | ABCDE444 | 4 |
- * +----------+-----+
+ * +---DATA---+-PAD-+
+ * | ABCDE444 | 4 |
+ * +----------+-----+
*
* after compensation they would look like:
*
- * +-DATA--+-PAD--+
- * | ABCDE | NULL |
- * +-------+------+
+ * +-DATA--+-PAD--+
+ * | ABCDE | NULL |
+ * +-------+------+
*/
relative_padlength = padlength - padding->buffer.length;
data->buffer.length -= relative_padlength;
if (padding->type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
- gss_release_buffer(&minor, &padding->buffer);
- padding->type &= ~(GSS_IOV_BUFFER_FLAG_ALLOCATED);
+ gss_release_buffer(&minor, &padding->buffer);
+ padding->type &= ~(GSS_IOV_BUFFER_FLAG_ALLOCATED);
}
padding->buffer.length = 0;
case KG_TOK_SIGN_MSG:
toktype2 = KG2_TOK_MIC_MSG;
break;
- case KG_TOK_WRAP_MSG:
+ case KG_TOK_WRAP_MSG:
toktype2 = KG2_TOK_WRAP_MSG;
break;
case KG_TOK_DEL_CTX:
assert(iov != GSS_C_NO_IOV_BUFFER);
for (i = 0; i < iov_count; i++) {
- if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA) {
- has_conf_data = TRUE;
- break;
- }
+ if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA) {
+ has_conf_data = TRUE;
+ break;
+ }
}
return (has_conf_data == FALSE);
iov->buffer.length = size;
iov->buffer.value = xmalloc(size);
if (iov->buffer.value == NULL) {
- iov->buffer.length = 0;
- return ENOMEM;
+ iov->buffer.length = 0;
+ return ENOMEM;
}
iov->type |= GSS_IOV_BUFFER_FLAG_ALLOCATED;
code = krb5_copy_keyblock(context, key, &tmpkey);
if (code)
- return(code);
+ return(code);
/* reverse the key bytes, as per spec */
for (i=0; i<tmpkey->length; i++)
- tmpkey->contents[i] = key->contents[key->length - 1 - i];
+ tmpkey->contents[i] = key->contents[key->length - 1 - i];
code = kg_encrypt(context, tmpkey, KG_USAGE_SEAL, NULL, zeros, seed, 16);
plain[6] = direction;
plain[7] = direction;
if (key->enctype == ENCTYPE_ARCFOUR_HMAC ||
- key->enctype == ENCTYPE_ARCFOUR_HMAC_EXP) {
+ key->enctype == ENCTYPE_ARCFOUR_HMAC_EXP) {
/* Yes, Microsoft used big-endian sequence number.*/
plain[0] = (seqnum>>24) & 0xff;
plain[1] = (seqnum>>16) & 0xff;
unsigned char plain[8];
if (key->enctype == ENCTYPE_ARCFOUR_HMAC ||
- key->enctype == ENCTYPE_ARCFOUR_HMAC_EXP) {
+ key->enctype == ENCTYPE_ARCFOUR_HMAC_EXP) {
code = kg_arcfour_docrypt (key, 0,
cksum, 8,
buf, 8,
*direction = plain[4];
if (key->enctype == ENCTYPE_ARCFOUR_HMAC ||
- key->enctype == ENCTYPE_ARCFOUR_HMAC_EXP) {
+ key->enctype == ENCTYPE_ARCFOUR_HMAC_EXP) {
*seqnum = (plain[3]|(plain[2]<<8) | (plain[1]<<16)| (plain[0]<<24));
} else {
*seqnum = ((plain[0]) |
#if 0
OM_uint32
krb5_gss_verify_mic_iov(OM_uint32 *minor_status,
- gss_ctx_id_t context_handle,
- gss_qop_t *qop_state,
- gss_iov_buffer_desc *iov,
- int iov_count)
+ gss_ctx_id_t context_handle,
+ gss_qop_t *qop_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
{
OM_uint32 major_status;
major_status = kg_unseal_iov(minor_status, context_handle,
- NULL, qop_state,
- iov, iov_count, KG_TOK_WRAP_MSG);
+ NULL, qop_state,
+ iov, iov_count, KG_TOK_WRAP_MSG);
return major_status;
}
/* Token header: 16 octets. */
if (conf_req_flag) {
- krb5_enctype enctype;
+ krb5_enctype enctype;
- enctype = ctx->have_acceptor_subkey ? ctx->acceptor_subkey->enctype
- : ctx->subkey->enctype;
+ enctype = ctx->have_acceptor_subkey ? ctx->acceptor_subkey->enctype
+ : ctx->subkey->enctype;
while (sz > 0 && krb5_encrypt_size(sz, enctype) + 16 > req_output_size)
sz--;
sz = 0;
#endif
} else {
- krb5_cksumtype cksumtype;
- krb5_error_code err;
- size_t cksumsize;
-
- cksumtype = ctx->have_acceptor_subkey ? ctx->acceptor_subkey_cksumtype
- : ctx->cksumtype;
-
- err = krb5_c_checksum_length(ctx->k5_context, cksumtype, &cksumsize);
- if (err) {
- *minor_status = err;
- return GSS_S_FAILURE;
- }
+ krb5_cksumtype cksumtype;
+ krb5_error_code err;
+ size_t cksumsize;
+
+ cksumtype = ctx->have_acceptor_subkey ? ctx->acceptor_subkey_cksumtype
+ : ctx->cksumtype;
+
+ err = krb5_c_checksum_length(ctx->k5_context, cksumtype, &cksumsize);
+ if (err) {
+ *minor_status = err;
+ return GSS_S_FAILURE;
+ }
/* Allow for token header and checksum. */
if (sz < 16 + cksumsize)
asn1_error_code asn1_decode_boolean
- (asn1buf *buf, unsigned int *val);
+ (asn1buf *buf, unsigned int *val);
asn1_error_code asn1_decode_integer
(asn1buf *buf, long *val);
asn1_error_code asn1_decode_unsigned_integer
#include "asn1_make.h"
asn1_error_code asn1_encode_boolean(asn1buf *buf, asn1_intmax val,
- unsigned int *retlen)
+ unsigned int *retlen)
{
asn1_error_code retval;
unsigned int length = 0;
correct byte order, in an allocated krb5_data. */
#ifdef POINTERS_ARE_ALL_THE_SAME
-#define LOADPTR(PTR,TYPE) \
+#define LOADPTR(PTR,TYPE) \
(assert((TYPE)->loadptr != NULL), (TYPE)->loadptr(PTR))
#else
-#define LOADPTR(PTR,TYPE) \
+#define LOADPTR(PTR,TYPE) \
(*(const void *const *)(PTR))
#endif
*/
asn1_error_code asn1_encode_boolean
- (asn1buf *buf, asn1_intmax val, unsigned int *retlen);
+ (asn1buf *buf, asn1_intmax val, unsigned int *retlen);
asn1_error_code asn1_encode_integer
(asn1buf *buf, asn1_intmax val, unsigned int *retlen);
/* requires *buf is allocated
*principal = NULL;
{ begin_structure();
- get_lenfield(newpasswd->length, newpasswd->data, 0, asn1_decode_charstring);
- if (tagnum == 1) {
- alloc_field(*principal, krb5_principal_data);
- opt_field(*principal, 1, asn1_decode_principal_name, 0);
- opt_field(*principal, 2, asn1_decode_realm, 0);
- }
- end_structure();
+ get_lenfield(newpasswd->length, newpasswd->data, 0, asn1_decode_charstring);
+ if (tagnum == 1) {
+ alloc_field(*principal, krb5_principal_data);
+ opt_field(*principal, 1, asn1_decode_principal_name, 0);
+ opt_field(*principal, 2, asn1_decode_realm, 0);
+ }
+ end_structure();
}
cleanup();
}
{
setup();
{ begin_structure();
- get_field(val->user,0,asn1_decode_principal_name);
- get_field(val->user,1,asn1_decode_realm);
- get_field(val->cksum,2,asn1_decode_checksum);
- get_lenfield(val->auth_package.length,val->auth_package.data,3,asn1_decode_generalstring);
- end_structure();
+ get_field(val->user,0,asn1_decode_principal_name);
+ get_field(val->user,1,asn1_decode_realm);
+ get_field(val->cksum,2,asn1_decode_checksum);
+ get_lenfield(val->auth_package.length,val->auth_package.data,3,asn1_decode_generalstring);
+ end_structure();
}
cleanup();
}
{
setup();
{ begin_structure();
- get_field(val->include_pac,0,asn1_decode_boolean);
- end_structure();
+ get_field(val->include_pac,0,asn1_decode_boolean);
+ end_structure();
}
cleanup();
}
(asn1buf *buf, krb5_algorithm_identifier ***val);
asn1_error_code asn1_decode_setpw_req
- (asn1buf *buf, krb5_data *rep, krb5_principal *principal);
+ (asn1buf *buf, krb5_data *rep, krb5_principal *principal);
asn1_error_code asn1_decode_pa_for_user
- (asn1buf *buf, krb5_pa_for_user *val);
+ (asn1buf *buf, krb5_pa_for_user *val);
asn1_error_code asn1_decode_pa_pac_req
- (asn1buf *buf, krb5_pa_pac_req *val);
+ (asn1buf *buf, krb5_pa_pac_req *val);
#endif
/* caddr[11] HostAddresses OPTIONAL */
FIELDOF_OPT(krb5_enc_kdc_rep_part, ptr_seqof_host_addresses, caddrs,
11, 11),
- /* encrypted-pa-data[12] SEQUENCE OF PA-DATA OPTIONAL */
+ /* encrypted-pa-data[12] SEQUENCE OF PA-DATA OPTIONAL */
FIELDOF_OPT(krb5_enc_kdc_rep_part, ptr_seqof_pa_data, enc_padata, 12, 12),
};
static unsigned int optional_enc_kdc_rep_part(const void *p)
}
krb5_error_code decode_krb5_setpw_req(const krb5_data *code,
- krb5_data **rep,
- krb5_principal *principal)
+ krb5_data **rep,
+ krb5_principal *principal)
{
setup_buf_only();
alloc_field(*rep, krb5_data);
#define KVNO 5
/* Universal Tag Numbers */
-#define ASN1_BOOLEAN 1
+#define ASN1_BOOLEAN 1
#define ASN1_INTEGER 2
#define ASN1_BITSTRING 3
#define ASN1_OCTETSTRING 4
#define ASN1_NULL 5
#define ASN1_OBJECTIDENTIFIER 6
-#define ASN1_ENUMERATED 10
+#define ASN1_ENUMERATED 10
#define ASN1_SEQUENCE 16
#define ASN1_SET 17
#define ASN1_PRINTABLESTRING 19
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
- * permission. Furthermore if you modify this software you must label
+ * permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
- * permission. Furthermore if you modify this software you must label
+ * permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
- * permission. Furthermore if you modify this software you must label
+ * permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
static void fail_if(int condition, const char *name)
{
if (condition) {
- fprintf(stderr, "%s failed\n", name);
- exit(1);
+ fprintf(stderr, "%s failed\n", name);
+ exit(1);
}
}
static void check_buf(struct k5buf *buf, const char *name)
{
fail_if(buf->buftype != FIXED && buf->buftype != DYNAMIC
- && buf->buftype != ERROR, name);
+ && buf->buftype != ERROR, name);
if (buf->buftype == ERROR)
- return;
+ return;
fail_if(buf->space == 0, name);
fail_if(buf->space > SPACE_MAX, name);
fail_if(buf->len >= buf->space, name);
size_t i;
for (i = 0; i < sizeof(data); i++)
- data[i] = 'a';
+ data[i] = 'a';
/* Cause the buffer size to double from 128 to 256 bytes. */
krb5int_buf_init_dynamic(&buf);
size_t i;
for (i = 0; i < sizeof(data) - 1; i++)
- data[i] = 'a';
+ data[i] = 'a';
data[i] = '\0';
/* Format some text into a non-empty fixed buffer. */