module: icp: spi: crypto_ops_t: remove unused op types

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Closes #12901
This commit is contained in:
наб 2021-12-22 23:29:25 +01:00 committed by Brian Behlendorf
parent f5896e2bdf
commit 464700ae02
17 changed files with 74 additions and 3862 deletions

View File

@ -109,62 +109,6 @@ extern int crypto_mac_update(crypto_context_t ctx, crypto_data_t *data,
extern int crypto_mac_final(crypto_context_t ctx, crypto_data_t *data,
crypto_call_req_t *cr);
/*
* Single and multi-part sign with private key operations.
*/
extern int crypto_sign(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_data_t *data, crypto_ctx_template_t tmpl,
crypto_data_t *signature, crypto_call_req_t *cr);
extern int crypto_sign_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_ctx_template_t, crypto_data_t *, crypto_call_req_t *);
extern int crypto_sign_init(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *cr);
extern int crypto_sign_init_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_ctx_template_t,
crypto_context_t *, crypto_call_req_t *);
extern int crypto_sign_update(crypto_context_t ctx, crypto_data_t *data,
crypto_call_req_t *cr);
extern int crypto_sign_final(crypto_context_t ctx, crypto_data_t *signature,
crypto_call_req_t *cr);
extern int crypto_sign_recover_init_prov(crypto_provider_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_ctx_template_t tmpl, crypto_context_t *, crypto_call_req_t *);
extern int crypto_sign_recover(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_data_t *data, crypto_ctx_template_t tmpl, crypto_data_t *signature,
crypto_call_req_t *cr);
extern int crypto_sign_recover_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_ctx_template_t, crypto_data_t *, crypto_call_req_t *);
/*
* Single and multi-part verify with public key operations.
*/
extern int crypto_verify(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_data_t *data, crypto_ctx_template_t tmpl, crypto_data_t *signature,
crypto_call_req_t *cr);
extern int crypto_verify_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_ctx_template_t, crypto_data_t *, crypto_call_req_t *);
extern int crypto_verify_init(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *cr);
extern int crypto_verify_init_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_ctx_template_t,
crypto_context_t *, crypto_call_req_t *);
extern int crypto_verify_update(crypto_context_t ctx, crypto_data_t *data,
crypto_call_req_t *cr);
extern int crypto_verify_final(crypto_context_t ctx, crypto_data_t *signature,
crypto_call_req_t *cr);
extern int crypto_verify_recover_init_prov(crypto_provider_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_ctx_template_t tmpl, crypto_context_t *, crypto_call_req_t *);
extern int crypto_verify_recover(crypto_mechanism_t *mech, crypto_key_t *key,
crypto_data_t *signature, crypto_ctx_template_t tmpl, crypto_data_t *data,
crypto_call_req_t *cr);
extern int crypto_verify_recover_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_ctx_template_t, crypto_data_t *, crypto_call_req_t *);
/*
* Single and multi-part encryption operations.
*/
@ -206,149 +150,6 @@ extern int crypto_decrypt_update(crypto_context_t ctx,
extern int crypto_decrypt_final(crypto_context_t ctx, crypto_data_t *plaintext,
crypto_call_req_t *cr);
/*
* Single and multi-part encrypt/MAC dual operations.
*/
extern int crypto_encrypt_mac(crypto_mechanism_t *encr_mech,
crypto_mechanism_t *mac_mech, crypto_data_t *pt,
crypto_key_t *encr_key, crypto_key_t *mac_key,
crypto_ctx_template_t encr_tmpl, crypto_ctx_template_t mac_tmpl,
crypto_dual_data_t *ct, crypto_data_t *mac, crypto_call_req_t *cr);
extern int crypto_encrypt_mac_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_mechanism_t *, crypto_data_t *,
crypto_key_t *, crypto_key_t *, crypto_ctx_template_t,
crypto_ctx_template_t, crypto_dual_data_t *, crypto_data_t *,
crypto_call_req_t *);
extern int crypto_encrypt_mac_init(crypto_mechanism_t *encr_mech,
crypto_mechanism_t *mac_mech, crypto_key_t *encr_key,
crypto_key_t *mac_key, crypto_ctx_template_t encr_tmpl,
crypto_ctx_template_t mac_tmpl, crypto_context_t *ctxp,
crypto_call_req_t *cr);
extern int crypto_encrypt_mac_init_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_mechanism_t *, crypto_key_t *, crypto_key_t *,
crypto_ctx_template_t, crypto_ctx_template_t, crypto_context_t *,
crypto_call_req_t *);
extern int crypto_encrypt_mac_update(crypto_context_t ctx,
crypto_data_t *pt, crypto_dual_data_t *ct, crypto_call_req_t *cr);
extern int crypto_encrypt_mac_final(crypto_context_t ctx,
crypto_dual_data_t *ct, crypto_data_t *mac, crypto_call_req_t *cr);
/*
* Single and multi-part MAC/decrypt dual operations.
*/
extern int crypto_mac_decrypt(crypto_mechanism_t *mac_mech,
crypto_mechanism_t *decr_mech, crypto_dual_data_t *ct,
crypto_key_t *mac_key, crypto_key_t *decr_key,
crypto_ctx_template_t mac_tmpl, crypto_ctx_template_t decr_tmpl,
crypto_data_t *mac, crypto_data_t *pt, crypto_call_req_t *cr);
extern int crypto_mac_decrypt_prov(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *mac_mech, crypto_mechanism_t *decr_mech,
crypto_dual_data_t *ct, crypto_key_t *mac_key, crypto_key_t *decr_key,
crypto_ctx_template_t mac_tmpl, crypto_ctx_template_t decr_tmpl,
crypto_data_t *mac, crypto_data_t *pt, crypto_call_req_t *cr);
extern int crypto_mac_verify_decrypt(crypto_mechanism_t *mac_mech,
crypto_mechanism_t *decr_mech, crypto_dual_data_t *ct,
crypto_key_t *mac_key, crypto_key_t *decr_key,
crypto_ctx_template_t mac_tmpl, crypto_ctx_template_t decr_tmpl,
crypto_data_t *mac, crypto_data_t *pt, crypto_call_req_t *cr);
extern int crypto_mac_verify_decrypt_prov(crypto_provider_t,
crypto_session_id_t, crypto_mechanism_t *mac_mech,
crypto_mechanism_t *decr_mech, crypto_dual_data_t *ct,
crypto_key_t *mac_key, crypto_key_t *decr_key,
crypto_ctx_template_t mac_tmpl, crypto_ctx_template_t decr_tmpl,
crypto_data_t *mac, crypto_data_t *pt, crypto_call_req_t *cr);
extern int crypto_mac_decrypt_init(crypto_mechanism_t *mac_mech,
crypto_mechanism_t *decr_mech, crypto_key_t *mac_key,
crypto_key_t *decr_key, crypto_ctx_template_t mac_tmpl,
crypto_ctx_template_t decr_tmpl, crypto_context_t *ctxp,
crypto_call_req_t *cr);
extern int crypto_mac_decrypt_init_prov(crypto_provider_t,
crypto_session_id_t, crypto_mechanism_t *mac_mech,
crypto_mechanism_t *decr_mech, crypto_key_t *mac_key,
crypto_key_t *decr_key, crypto_ctx_template_t mac_tmpl,
crypto_ctx_template_t decr_tmpl, crypto_context_t *ctxp,
crypto_call_req_t *cr);
extern int crypto_mac_decrypt_update(crypto_context_t ctx,
crypto_dual_data_t *ct, crypto_data_t *pt, crypto_call_req_t *cr);
extern int crypto_mac_decrypt_final(crypto_context_t ctx, crypto_data_t *mac,
crypto_data_t *pt, crypto_call_req_t *cr);
/* Session Management */
extern int crypto_session_open(crypto_provider_t, crypto_session_id_t *,
crypto_call_req_t *);
extern int crypto_session_close(crypto_provider_t, crypto_session_id_t,
crypto_call_req_t *);
extern int crypto_session_login(crypto_provider_t, crypto_session_id_t,
crypto_user_type_t, char *, size_t, crypto_call_req_t *);
extern int crypto_session_logout(crypto_provider_t, crypto_session_id_t,
crypto_call_req_t *);
/* Object Management */
extern int crypto_object_copy(crypto_provider_t, crypto_session_id_t,
crypto_object_id_t, crypto_object_attribute_t *, uint_t,
crypto_object_id_t *, crypto_call_req_t *);
extern int crypto_object_create(crypto_provider_t, crypto_session_id_t,
crypto_object_attribute_t *, uint_t, crypto_object_id_t *,
crypto_call_req_t *);
extern int crypto_object_destroy(crypto_provider_t, crypto_session_id_t,
crypto_object_id_t, crypto_call_req_t *);
extern int crypto_object_get_attribute_value(crypto_provider_t,
crypto_session_id_t, crypto_object_id_t, crypto_object_attribute_t *,
uint_t, crypto_call_req_t *);
extern int crypto_object_get_size(crypto_provider_t, crypto_session_id_t,
crypto_object_id_t, size_t *, crypto_call_req_t *);
extern int crypto_object_find_final(crypto_provider_t, void *,
crypto_call_req_t *);
extern int crypto_object_find_init(crypto_provider_t, crypto_session_id_t,
crypto_object_attribute_t *, uint_t, void **, crypto_call_req_t *);
extern int crypto_object_find(crypto_provider_t, void *, crypto_object_id_t *,
uint_t *, uint_t, crypto_call_req_t *);
extern int crypto_object_set_attribute_value(crypto_provider_t,
crypto_session_id_t, crypto_object_id_t, crypto_object_attribute_t *,
uint_t, crypto_call_req_t *);
/* Key Management */
extern int crypto_key_derive(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_object_attribute_t *,
uint_t, crypto_object_id_t *, crypto_call_req_t *);
extern int crypto_key_generate(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_object_attribute_t *, uint_t,
crypto_object_id_t *, crypto_call_req_t *);
extern int crypto_key_generate_pair(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_object_attribute_t *, uint_t,
crypto_object_attribute_t *, uint_t, crypto_object_id_t *,
crypto_object_id_t *, crypto_call_req_t *);
extern int crypto_key_unwrap(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, uchar_t *, size_t *,
crypto_object_attribute_t *, uint_t, crypto_object_id_t *,
crypto_call_req_t *);
extern int crypto_key_wrap(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_object_id_t *, uchar_t *,
size_t *, crypto_call_req_t *);
extern int crypto_key_check_prov(crypto_provider_t, crypto_mechanism_t *mech,
crypto_key_t *key);
extern int crypto_key_check(crypto_mechanism_t *mech, crypto_key_t *key);
/*
* Routines to cancel a single asynchronous request or all asynchronous
* requests associated with a particular context.
*/
extern void crypto_cancel_req(crypto_req_id_t req);
extern void crypto_cancel_ctx(crypto_context_t ctx);
/*
* crypto_get_mech_list(9F) allocates and returns the list of currently
* supported cryptographic mechanisms.
*/
extern crypto_mech_name_t *crypto_get_mech_list(uint_t *count, int kmflag);
extern void crypto_free_mech_list(crypto_mech_name_t *mech_names,
uint_t count);
extern crypto_provider_t crypto_get_provider(char *, char *, char *);
extern int crypto_get_provinfo(crypto_provider_t, crypto_provider_ext_info_t *);
extern void crypto_release_provider(crypto_provider_t);
/*
* A kernel consumer can request to be notified when some particular event
* occurs. The valid events, callback function type, and functions to

View File

@ -253,20 +253,6 @@ typedef struct crypto_data {
#define cd_uio cdu.cdu_uio
#define cd_mp cdu.cdu_mp
typedef struct crypto_dual_data {
crypto_data_t dd_data; /* The data */
off_t dd_offset2; /* Used by dual operation */
size_t dd_len2; /* # of bytes to take */
} crypto_dual_data_t;
#define dd_format dd_data.cd_format
#define dd_offset1 dd_data.cd_offset
#define dd_len1 dd_data.cd_length
#define dd_miscdata dd_data.cd_miscdata
#define dd_raw dd_data.cd_raw
#define dd_uio dd_data.cd_uio
#define dd_mp dd_data.cd_mp
/* The keys, and their contents */
typedef enum {
@ -430,26 +416,6 @@ typedef void *crypto_provider_t;
#define CRYPTO_EXT_SIZE_SERIAL 16
#define CRYPTO_EXT_SIZE_TIME 16
typedef struct crypto_provider_ext_info {
uchar_t ei_label[CRYPTO_EXT_SIZE_LABEL];
uchar_t ei_manufacturerID[CRYPTO_EXT_SIZE_MANUF];
uchar_t ei_model[CRYPTO_EXT_SIZE_MODEL];
uchar_t ei_serial_number[CRYPTO_EXT_SIZE_SERIAL];
ulong_t ei_flags;
ulong_t ei_max_session_count;
ulong_t ei_max_pin_len;
ulong_t ei_min_pin_len;
ulong_t ei_total_public_memory;
ulong_t ei_free_public_memory;
ulong_t ei_total_private_memory;
ulong_t ei_free_private_memory;
crypto_version_t ei_hardware_version;
crypto_version_t ei_firmware_version;
uchar_t ei_time[CRYPTO_EXT_SIZE_TIME];
int ei_hash_max_input_len;
int ei_hmac_max_input_len;
} crypto_provider_ext_info_t;
typedef uint_t crypto_session_id_t;
typedef enum cmd_type {
@ -566,16 +532,6 @@ typedef enum cmd_type {
*/
#define CRYPTO_LAST_ERROR 0x00000053
/*
* Special values that can be used to indicate that information is unavailable
* or that there is not practical limit. These values can be used
* by fields of the SPI crypto_provider_ext_info(9S) structure.
* The value of CRYPTO_UNAVAILABLE_INFO should be the same as
* CK_UNAVAILABLE_INFO in the PKCS#11 spec.
*/
#define CRYPTO_UNAVAILABLE_INFO ((ulong_t)(-1))
#define CRYPTO_EFFECTIVELY_INFINITE 0x0
#ifdef __cplusplus
}
#endif

View File

@ -198,8 +198,7 @@ crypto_cipher_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
mech, key, NULL, NULL, tmpl);
}
error = kcf_submit_request(real_provider, ctx, crq, &params,
B_FALSE);
error = kcf_submit_request(real_provider, ctx, crq, &params);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
@ -334,7 +333,7 @@ crypto_encrypt_prov(crypto_provider_t provider, crypto_session_id_t sid,
KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
plaintext, ciphertext, tmpl);
error = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
error = kcf_submit_request(real_provider, NULL, crq, &params);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
@ -400,7 +399,7 @@ retry:
} else {
KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, pd->pd_sid,
mech, key, plaintext, ciphertext, spi_ctx_tmpl);
error = kcf_submit_request(pd, NULL, crq, &params, B_FALSE);
error = kcf_submit_request(pd, NULL, crq, &params);
}
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
@ -502,7 +501,7 @@ crypto_encrypt_update(crypto_context_t context, crypto_data_t *plaintext,
KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_UPDATE,
ctx->cc_session, NULL, NULL, plaintext, ciphertext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
error = kcf_submit_request(pd, ctx, cr, &params);
return (error);
}
@ -550,7 +549,7 @@ crypto_encrypt_final(crypto_context_t context, crypto_data_t *ciphertext,
} else {
KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_FINAL,
ctx->cc_session, NULL, NULL, NULL, ciphertext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
error = kcf_submit_request(pd, ctx, cr, &params);
}
/* Release the hold done in kcf_new_ctx() during init step. */
@ -616,7 +615,7 @@ crypto_decrypt_prov(crypto_provider_t provider, crypto_session_id_t sid,
KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
ciphertext, plaintext, tmpl);
rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
rv = kcf_submit_request(real_provider, NULL, crq, &params);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
@ -683,7 +682,7 @@ retry:
} else {
KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, pd->pd_sid,
mech, key, ciphertext, plaintext, spi_ctx_tmpl);
error = kcf_submit_request(pd, NULL, crq, &params, B_FALSE);
error = kcf_submit_request(pd, NULL, crq, &params);
}
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
@ -785,7 +784,7 @@ crypto_decrypt_update(crypto_context_t context, crypto_data_t *ciphertext,
KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_UPDATE,
ctx->cc_session, NULL, NULL, ciphertext, plaintext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
error = kcf_submit_request(pd, ctx, cr, &params);
return (error);
}
@ -834,77 +833,7 @@ crypto_decrypt_final(crypto_context_t context, crypto_data_t *plaintext,
} else {
KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_FINAL,
ctx->cc_session, NULL, NULL, NULL, plaintext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
/* Release the hold done in kcf_new_ctx() during init step. */
KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
return (error);
}
/*
* See comments for crypto_encrypt_update().
*/
int
crypto_encrypt_single(crypto_context_t context, crypto_data_t *plaintext,
crypto_data_t *ciphertext, crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_ENCRYPT(pd, ctx, plaintext,
ciphertext, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_SINGLE, pd->pd_sid,
NULL, NULL, plaintext, ciphertext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
/* Release the hold done in kcf_new_ctx() during init step. */
KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
return (error);
}
/*
* See comments for crypto_decrypt_update().
*/
int
crypto_decrypt_single(crypto_context_t context, crypto_data_t *ciphertext,
crypto_data_t *plaintext, crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_DECRYPT(pd, ctx, ciphertext,
plaintext, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_SINGLE, pd->pd_sid,
NULL, NULL, ciphertext, plaintext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
error = kcf_submit_request(pd, ctx, cr, &params);
}
/* Release the hold done in kcf_new_ctx() during init step. */
@ -925,6 +854,4 @@ EXPORT_SYMBOL(crypto_decrypt_init_prov);
EXPORT_SYMBOL(crypto_decrypt_init);
EXPORT_SYMBOL(crypto_decrypt_update);
EXPORT_SYMBOL(crypto_decrypt_final);
EXPORT_SYMBOL(crypto_encrypt_single);
EXPORT_SYMBOL(crypto_decrypt_single);
#endif

View File

@ -109,7 +109,7 @@ crypto_digest_prov(crypto_provider_t provider, crypto_session_id_t sid,
data, digest);
/* no crypto context to carry between multiple parts. */
rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
rv = kcf_submit_request(real_provider, NULL, crq, &params);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
@ -159,8 +159,7 @@ retry:
pd->pd_sid, mech, NULL, data, digest);
/* no crypto context to carry between multiple parts. */
error = kcf_submit_request(pd, NULL, crq, &params,
B_FALSE);
error = kcf_submit_request(pd, NULL, crq, &params);
}
}
@ -241,8 +240,7 @@ crypto_digest_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
} else {
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_INIT, sid,
mech, NULL, NULL, NULL);
error = kcf_submit_request(real_provider, ctx, crq, &params,
B_FALSE);
error = kcf_submit_request(real_provider, ctx, crq, &params);
}
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
@ -352,7 +350,7 @@ crypto_digest_update(crypto_context_t context, crypto_data_t *data,
} else {
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_UPDATE,
ctx->cc_session, NULL, NULL, data, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
error = kcf_submit_request(pd, ctx, cr, &params);
}
return (error);
@ -401,77 +399,7 @@ crypto_digest_final(crypto_context_t context, crypto_data_t *digest,
} else {
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_FINAL,
ctx->cc_session, NULL, NULL, NULL, digest);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
/* Release the hold done in kcf_new_ctx() during init step. */
KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
return (error);
}
/*
* Performs a digest update on the specified key. Note that there is
* no k-API crypto_digest_key() equivalent of this function.
*/
int
crypto_digest_key_prov(crypto_context_t context, crypto_key_t *key,
crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_DIGEST_KEY(pd, ctx, key, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_DIGEST_KEY,
ctx->cc_session, NULL, key, NULL, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
return (error);
}
/*
* See comments for crypto_digest_update() and crypto_digest_final().
*/
int
crypto_digest_single(crypto_context_t context, crypto_data_t *data,
crypto_data_t *digest, crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_DIGEST(pd, ctx, data, digest, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_SINGLE, pd->pd_sid,
NULL, NULL, data, digest);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
error = kcf_submit_request(pd, ctx, cr, &params);
}
/* Release the hold done in kcf_new_ctx() during init step. */
@ -486,6 +414,4 @@ EXPORT_SYMBOL(crypto_digest_init_prov);
EXPORT_SYMBOL(crypto_digest_init);
EXPORT_SYMBOL(crypto_digest_update);
EXPORT_SYMBOL(crypto_digest_final);
EXPORT_SYMBOL(crypto_digest_key_prov);
EXPORT_SYMBOL(crypto_digest_single);
#endif

View File

@ -109,7 +109,7 @@ crypto_mac_prov(crypto_provider_t provider, crypto_session_id_t sid,
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
data, mac, tmpl);
rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
rv = kcf_submit_request(real_provider, NULL, crq, &params);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
@ -187,8 +187,7 @@ retry:
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_ATOMIC,
pd->pd_sid, mech, key, data, mac, spi_ctx_tmpl);
error = kcf_submit_request(pd, NULL, crq, &params,
KCF_ISDUALREQ(crq));
error = kcf_submit_request(pd, NULL, crq, &params);
}
}
@ -234,7 +233,7 @@ crypto_mac_verify_prov(crypto_provider_t provider, crypto_session_id_t sid,
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_MAC_VERIFY_ATOMIC, sid, mech,
key, data, mac, tmpl);
rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
rv = kcf_submit_request(real_provider, NULL, crq, &params);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
@ -308,8 +307,7 @@ retry:
KCF_OP_MAC_VERIFY_ATOMIC, pd->pd_sid, mech,
key, data, mac, spi_ctx_tmpl);
error = kcf_submit_request(pd, NULL, crq, &params,
KCF_ISDUALREQ(crq));
error = kcf_submit_request(pd, NULL, crq, &params);
}
}
@ -404,8 +402,7 @@ crypto_mac_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
} else {
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_INIT, sid, mech, key,
NULL, NULL, tmpl);
rv = kcf_submit_request(real_provider, ctx, crq, &params,
B_FALSE);
rv = kcf_submit_request(real_provider, ctx, crq, &params);
}
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
@ -539,7 +536,7 @@ crypto_mac_update(crypto_context_t context, crypto_data_t *data,
} else {
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_UPDATE,
ctx->cc_session, NULL, NULL, data, NULL, NULL);
rv = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
rv = kcf_submit_request(pd, ctx, cr, &params);
}
return (rv);
@ -588,7 +585,7 @@ crypto_mac_final(crypto_context_t context, crypto_data_t *mac,
} else {
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_FINAL,
ctx->cc_session, NULL, NULL, NULL, mac, NULL);
rv = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
rv = kcf_submit_request(pd, ctx, cr, &params);
}
/* Release the hold done in kcf_new_ctx() during init step. */
@ -596,42 +593,6 @@ crypto_mac_final(crypto_context_t context, crypto_data_t *mac,
return (rv);
}
/*
* See comments for crypto_mac_update() and crypto_mac_final().
*/
int
crypto_mac_single(crypto_context_t context, crypto_data_t *data,
crypto_data_t *mac, crypto_call_req_t *cr)
{
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
int error;
kcf_req_params_t params;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
((pd = kcf_ctx->kc_prov_desc) == NULL)) {
return (CRYPTO_INVALID_CONTEXT);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_MAC(pd, ctx, data, mac, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_SINGLE, pd->pd_sid,
NULL, NULL, data, mac, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
/* Release the hold done in kcf_new_ctx() during init step. */
KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
return (error);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(crypto_mac_prov);
EXPORT_SYMBOL(crypto_mac);
@ -641,5 +602,4 @@ EXPORT_SYMBOL(crypto_mac_init_prov);
EXPORT_SYMBOL(crypto_mac_init);
EXPORT_SYMBOL(crypto_mac_update);
EXPORT_SYMBOL(crypto_mac_final);
EXPORT_SYMBOL(crypto_mac_single);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -85,18 +85,12 @@
static kcf_mech_entry_t kcf_digest_mechs_tab[KCF_MAXDIGEST];
static kcf_mech_entry_t kcf_cipher_mechs_tab[KCF_MAXCIPHER];
static kcf_mech_entry_t kcf_mac_mechs_tab[KCF_MAXMAC];
static kcf_mech_entry_t kcf_sign_mechs_tab[KCF_MAXSIGN];
static kcf_mech_entry_t kcf_keyops_mechs_tab[KCF_MAXKEYOPS];
static kcf_mech_entry_t kcf_misc_mechs_tab[KCF_MAXMISC];
const kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = {
{0, NULL}, /* No class zero */
{KCF_MAXDIGEST, kcf_digest_mechs_tab},
{KCF_MAXCIPHER, kcf_cipher_mechs_tab},
{KCF_MAXMAC, kcf_mac_mechs_tab},
{KCF_MAXSIGN, kcf_sign_mechs_tab},
{KCF_MAXKEYOPS, kcf_keyops_mechs_tab},
{KCF_MAXMISC, kcf_misc_mechs_tab}
};
/*
@ -240,10 +234,6 @@ kcf_init_mech_tabs(void)
kcf_mac_mechs_tab[3].me_threshold = kcf_sha1_threshold;
/* 1 random number generation pseudo mechanism */
(void) strncpy(kcf_misc_mechs_tab[0].me_name, SUN_RANDOM,
CRYPTO_MAX_MECH_NAME);
kcf_mech_hash = mod_hash_create_strhash_nodtr("kcf mech2id hash",
kcf_mech_hash_size, mod_hash_null_valdtor);
@ -376,13 +366,8 @@ kcf_add_mech_provider(short mech_indx,
int error;
kcf_mech_entry_t *mech_entry = NULL;
crypto_mech_info_t *mech_info;
crypto_mech_type_t kcf_mech_type, mt;
kcf_prov_mech_desc_t *prov_mech, *prov_mech2;
crypto_func_group_t simple_fg_mask, dual_fg_mask;
crypto_mech_info_t *dmi;
crypto_mech_info_list_t *mil, *mil2;
kcf_mech_entry_t *me;
int i;
crypto_mech_type_t kcf_mech_type;
kcf_prov_mech_desc_t *prov_mech;
ASSERT(prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
@ -406,19 +391,8 @@ kcf_add_mech_provider(short mech_indx,
class = KCF_CIPHER_CLASS;
else if (fg & CRYPTO_FG_MAC || fg & CRYPTO_FG_MAC_ATOMIC)
class = KCF_MAC_CLASS;
else if (fg & CRYPTO_FG_SIGN || fg & CRYPTO_FG_VERIFY ||
fg & CRYPTO_FG_SIGN_ATOMIC ||
fg & CRYPTO_FG_VERIFY_ATOMIC ||
fg & CRYPTO_FG_SIGN_RECOVER ||
fg & CRYPTO_FG_VERIFY_RECOVER)
class = KCF_SIGN_CLASS;
else if (fg & CRYPTO_FG_GENERATE ||
fg & CRYPTO_FG_GENERATE_KEY_PAIR ||
fg & CRYPTO_FG_WRAP || fg & CRYPTO_FG_UNWRAP ||
fg & CRYPTO_FG_DERIVE)
class = KCF_KEYOPS_CLASS;
else
class = KCF_MISC_CLASS;
__builtin_unreachable();
/*
* Attempt to create a new mech_entry for the specified
@ -447,95 +421,6 @@ kcf_add_mech_provider(short mech_indx,
KCF_PROV_REFHOLD(prov_desc);
KCF_PROV_IREFHOLD(prov_desc);
dual_fg_mask = mech_info->cm_func_group_mask & CRYPTO_FG_DUAL_MASK;
if (dual_fg_mask == ((crypto_func_group_t)0))
goto add_entry;
simple_fg_mask = (mech_info->cm_func_group_mask &
CRYPTO_FG_SIMPLEOP_MASK) | CRYPTO_FG_RANDOM;
for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
dmi = &prov_desc->pd_mechanisms[i];
/* skip self */
if (dmi->cm_mech_number == mech_info->cm_mech_number)
continue;
/* skip if not a dual operation mechanism */
if (!(dmi->cm_func_group_mask & dual_fg_mask) ||
(dmi->cm_func_group_mask & simple_fg_mask))
continue;
mt = kcf_mech_hash_find(dmi->cm_mech_name);
if (mt == CRYPTO_MECH_INVALID)
continue;
if (kcf_get_mech_entry(mt, &me) != KCF_SUCCESS)
continue;
mil = kmem_zalloc(sizeof (*mil), KM_SLEEP);
mil2 = kmem_zalloc(sizeof (*mil2), KM_SLEEP);
/*
* Ignore hard-coded entries in the mech table
* if the provider hasn't registered.
*/
mutex_enter(&me->me_mutex);
if (me->me_hw_prov_chain == NULL && me->me_sw_prov == NULL) {
mutex_exit(&me->me_mutex);
kmem_free(mil, sizeof (*mil));
kmem_free(mil2, sizeof (*mil2));
continue;
}
/*
* Add other dual mechanisms that have registered
* with the framework to this mechanism's
* cross-reference list.
*/
mil->ml_mech_info = *dmi; /* struct assignment */
mil->ml_kcf_mechid = mt;
/* add to head of list */
mil->ml_next = prov_mech->pm_mi_list;
prov_mech->pm_mi_list = mil;
if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
prov_mech2 = me->me_hw_prov_chain;
else
prov_mech2 = me->me_sw_prov;
if (prov_mech2 == NULL) {
kmem_free(mil2, sizeof (*mil2));
mutex_exit(&me->me_mutex);
continue;
}
/*
* Update all other cross-reference lists by
* adding this new mechanism.
*/
while (prov_mech2 != NULL) {
if (prov_mech2->pm_prov_desc == prov_desc) {
/* struct assignment */
mil2->ml_mech_info = *mech_info;
mil2->ml_kcf_mechid = kcf_mech_type;
/* add to head of list */
mil2->ml_next = prov_mech2->pm_mi_list;
prov_mech2->pm_mi_list = mil2;
break;
}
prov_mech2 = prov_mech2->pm_next;
}
if (prov_mech2 == NULL)
kmem_free(mil2, sizeof (*mil2));
mutex_exit(&me->me_mutex);
}
add_entry:
/*
* Add new kcf_prov_mech_desc at the front of HW providers
* chain.

View File

@ -205,8 +205,7 @@ kcf_prov_tab_lookup(crypto_provider_id_t prov_id)
}
static void
allocate_ops(const crypto_ops_t *src, crypto_ops_t *dst,
uint_t *mech_list_count)
allocate_ops(const crypto_ops_t *src, crypto_ops_t *dst)
{
if (src->co_digest_ops != NULL)
dst->co_digest_ops = kmem_alloc(sizeof (crypto_digest_ops_t),
@ -220,62 +219,9 @@ allocate_ops(const crypto_ops_t *src, crypto_ops_t *dst,
dst->co_mac_ops = kmem_alloc(sizeof (crypto_mac_ops_t),
KM_SLEEP);
if (src->co_sign_ops != NULL)
dst->co_sign_ops = kmem_alloc(sizeof (crypto_sign_ops_t),
KM_SLEEP);
if (src->co_verify_ops != NULL)
dst->co_verify_ops = kmem_alloc(sizeof (crypto_verify_ops_t),
KM_SLEEP);
if (src->co_dual_ops != NULL)
dst->co_dual_ops = kmem_alloc(sizeof (crypto_dual_ops_t),
KM_SLEEP);
if (src->co_dual_cipher_mac_ops != NULL)
dst->co_dual_cipher_mac_ops = kmem_alloc(
sizeof (crypto_dual_cipher_mac_ops_t), KM_SLEEP);
if (src->co_random_ops != NULL) {
dst->co_random_ops = kmem_alloc(
sizeof (crypto_random_number_ops_t), KM_SLEEP);
/*
* Allocate storage to store the array of supported mechanisms
* specified by provider. We allocate extra mechanism storage
* if the provider has random_ops since we keep an internal
* mechanism, SUN_RANDOM, in this case.
*/
(*mech_list_count)++;
}
if (src->co_session_ops != NULL)
dst->co_session_ops = kmem_alloc(sizeof (crypto_session_ops_t),
KM_SLEEP);
if (src->co_object_ops != NULL)
dst->co_object_ops = kmem_alloc(sizeof (crypto_object_ops_t),
KM_SLEEP);
if (src->co_key_ops != NULL)
dst->co_key_ops = kmem_alloc(sizeof (crypto_key_ops_t),
KM_SLEEP);
if (src->co_provider_ops != NULL)
dst->co_provider_ops = kmem_alloc(
sizeof (crypto_provider_management_ops_t), KM_SLEEP);
if (src->co_ctx_ops != NULL)
dst->co_ctx_ops = kmem_alloc(sizeof (crypto_ctx_ops_t),
KM_SLEEP);
if (src->co_mech_ops != NULL)
dst->co_mech_ops = kmem_alloc(sizeof (crypto_mech_ops_t),
KM_SLEEP);
if (src->co_nostore_key_ops != NULL)
dst->co_nostore_key_ops =
kmem_alloc(sizeof (crypto_nostore_key_ops_t), KM_SLEEP);
}
/*
@ -289,7 +235,6 @@ kcf_provider_desc_t *
kcf_alloc_provider_desc(const crypto_provider_info_t *info)
{
kcf_provider_desc_t *desc;
uint_t mech_list_count = info->pi_mech_list_count;
const crypto_ops_t *src_ops = info->pi_ops_vector;
desc = kmem_zalloc(sizeof (kcf_provider_desc_t), KM_SLEEP);
@ -319,15 +264,13 @@ kcf_alloc_provider_desc(const crypto_provider_info_t *info)
* vectors are copied.
*/
crypto_ops_t *opvec = kmem_zalloc(sizeof (crypto_ops_t), KM_SLEEP);
if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
allocate_ops(src_ops, opvec, &mech_list_count);
}
if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
allocate_ops(src_ops, opvec);
desc->pd_ops_vector = opvec;
desc->pd_mech_list_count = mech_list_count;
desc->pd_mech_list_count = info->pi_mech_list_count;
desc->pd_mechanisms = kmem_zalloc(sizeof (crypto_mech_info_t) *
mech_list_count, KM_SLEEP);
info->pi_mech_list_count, KM_SLEEP);
for (int i = 0; i < KCF_OPS_CLASSSIZE; i++)
for (int j = 0; j < KCF_MAXMECHTAB; j++)
desc->pd_mech_indx[i][j] = KCF_INVALID_INDX;
@ -408,54 +351,10 @@ kcf_free_provider_desc(kcf_provider_desc_t *desc)
kmem_free(desc->pd_ops_vector->co_mac_ops,
sizeof (crypto_mac_ops_t));
if (desc->pd_ops_vector->co_sign_ops != NULL)
kmem_free(desc->pd_ops_vector->co_sign_ops,
sizeof (crypto_sign_ops_t));
if (desc->pd_ops_vector->co_verify_ops != NULL)
kmem_free(desc->pd_ops_vector->co_verify_ops,
sizeof (crypto_verify_ops_t));
if (desc->pd_ops_vector->co_dual_ops != NULL)
kmem_free(desc->pd_ops_vector->co_dual_ops,
sizeof (crypto_dual_ops_t));
if (desc->pd_ops_vector->co_dual_cipher_mac_ops != NULL)
kmem_free(desc->pd_ops_vector->co_dual_cipher_mac_ops,
sizeof (crypto_dual_cipher_mac_ops_t));
if (desc->pd_ops_vector->co_random_ops != NULL)
kmem_free(desc->pd_ops_vector->co_random_ops,
sizeof (crypto_random_number_ops_t));
if (desc->pd_ops_vector->co_session_ops != NULL)
kmem_free(desc->pd_ops_vector->co_session_ops,
sizeof (crypto_session_ops_t));
if (desc->pd_ops_vector->co_object_ops != NULL)
kmem_free(desc->pd_ops_vector->co_object_ops,
sizeof (crypto_object_ops_t));
if (desc->pd_ops_vector->co_key_ops != NULL)
kmem_free(desc->pd_ops_vector->co_key_ops,
sizeof (crypto_key_ops_t));
if (desc->pd_ops_vector->co_provider_ops != NULL)
kmem_free(desc->pd_ops_vector->co_provider_ops,
sizeof (crypto_provider_management_ops_t));
if (desc->pd_ops_vector->co_ctx_ops != NULL)
kmem_free(desc->pd_ops_vector->co_ctx_ops,
sizeof (crypto_ctx_ops_t));
if (desc->pd_ops_vector->co_mech_ops != NULL)
kmem_free(desc->pd_ops_vector->co_mech_ops,
sizeof (crypto_mech_ops_t));
if (desc->pd_ops_vector->co_nostore_key_ops != NULL)
kmem_free(desc->pd_ops_vector->co_nostore_key_ops,
sizeof (crypto_nostore_key_ops_t));
kmem_free(desc->pd_ops_vector, sizeof (crypto_ops_t));
}
@ -474,111 +373,6 @@ kcf_free_provider_desc(kcf_provider_desc_t *desc)
kmem_free(desc, sizeof (kcf_provider_desc_t));
}
/*
* Returns an array of hardware and logical provider descriptors,
* a.k.a the PKCS#11 slot list. A REFHOLD is done on each descriptor
* before the array is returned. The entire table can be freed by
* calling kcf_free_provider_tab().
*/
int
kcf_get_slot_list(uint_t *count, kcf_provider_desc_t ***array,
boolean_t unverified)
{
kcf_provider_desc_t *prov_desc;
kcf_provider_desc_t **p = NULL;
char *last;
uint_t cnt = 0;
uint_t i, j;
int rval = CRYPTO_SUCCESS;
size_t n, final_size;
/* count the providers */
mutex_enter(&prov_tab_mutex);
for (i = 0; i < KCF_MAX_PROVIDERS; i++) {
if ((prov_desc = prov_tab[i]) != NULL &&
((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER &&
(prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) ||
prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) {
if (KCF_IS_PROV_USABLE(prov_desc) ||
(unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) {
cnt++;
}
}
}
mutex_exit(&prov_tab_mutex);
if (cnt == 0)
goto out;
n = cnt * sizeof (kcf_provider_desc_t *);
again:
p = kmem_zalloc(n, KM_SLEEP);
/* pointer to last entry in the array */
last = (char *)&p[cnt-1];
mutex_enter(&prov_tab_mutex);
/* fill the slot list */
for (i = 0, j = 0; i < KCF_MAX_PROVIDERS; i++) {
if ((prov_desc = prov_tab[i]) != NULL &&
((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER &&
(prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) ||
prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) {
if (KCF_IS_PROV_USABLE(prov_desc) ||
(unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) {
if ((char *)&p[j] > last) {
mutex_exit(&prov_tab_mutex);
kcf_free_provider_tab(cnt, p);
n = n << 1;
cnt = cnt << 1;
goto again;
}
p[j++] = prov_desc;
KCF_PROV_REFHOLD(prov_desc);
}
}
}
mutex_exit(&prov_tab_mutex);
final_size = j * sizeof (kcf_provider_desc_t *);
cnt = j;
ASSERT(final_size <= n);
/* check if buffer we allocated is too large */
if (final_size < n) {
char *final_buffer = NULL;
if (final_size > 0) {
final_buffer = kmem_alloc(final_size, KM_SLEEP);
bcopy(p, final_buffer, final_size);
}
kmem_free(p, n);
p = (kcf_provider_desc_t **)final_buffer;
}
out:
*count = cnt;
*array = p;
return (rval);
}
/*
* Free an array of hardware provider descriptors. A REFRELE
* is done on each descriptor before the table is freed.
*/
void
kcf_free_provider_tab(uint_t count, kcf_provider_desc_t **array)
{
kcf_provider_desc_t *prov_desc;
int i;
for (i = 0; i < count; i++) {
if ((prov_desc = array[i]) != NULL) {
KCF_PROV_REFRELE(prov_desc);
}
}
kmem_free(array, count * sizeof (kcf_provider_desc_t *));
}
/*
* Returns in the location pointed to by pd a pointer to the descriptor
* for the software provider for the specified mechanism.

View File

@ -66,8 +66,6 @@ static kcf_stats_t kcf_ksdata = {
static kstat_t *kcf_misc_kstat = NULL;
ulong_t kcf_swprov_hndl = 0;
static kcf_areq_node_t *kcf_areqnode_alloc(kcf_provider_desc_t *,
kcf_context_t *, crypto_call_req_t *, kcf_req_params_t *, boolean_t);
static int kcf_disp_sw_request(kcf_areq_node_t *);
static void process_req_hwp(void *);
static int kcf_enqueue(kcf_areq_node_t *);
@ -121,7 +119,7 @@ kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd,
*/
static kcf_areq_node_t *
kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx,
crypto_call_req_t *crq, kcf_req_params_t *req, boolean_t isdual)
crypto_call_req_t *crq, kcf_req_params_t *req)
{
kcf_areq_node_t *arptr, *areq;
@ -134,7 +132,6 @@ kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx,
arptr->an_reqarg = *crq;
arptr->an_params = *req;
arptr->an_context = ictx;
arptr->an_isdual = isdual;
arptr->an_next = arptr->an_prev = NULL;
KCF_PROV_REFHOLD(pd);
@ -342,17 +339,16 @@ bail:
/*
* This routine checks if a request can be retried on another
* provider. If true, mech1 is initialized to point to the mechanism
* structure. mech2 is also initialized in case of a dual operation. fg
* is initialized to the correct crypto_func_group_t bit flag. They are
* initialized by this routine, so that the caller can pass them to a
* kcf_get_mech_provider() or kcf_get_dual_provider() with no further change.
* structure. fg is initialized to the correct crypto_func_group_t bit flag.
* They are initialized by this routine, so that the caller can pass them to
* kcf_get_mech_provider() with no further change.
*
* We check that the request is for a init or atomic routine and that
* it is for one of the operation groups used from k-api .
*/
static boolean_t
can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1,
crypto_mechanism_t **mech2, crypto_func_group_t *fg)
crypto_func_group_t *fg)
{
kcf_req_params_t *params;
kcf_op_type_t optype;
@ -384,44 +380,6 @@ can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1,
break;
}
case KCF_OG_SIGN: {
kcf_sign_ops_params_t *sops = &params->rp_u.sign_params;
sops->so_mech.cm_type = sops->so_framework_mechtype;
*mech1 = &sops->so_mech;
switch (optype) {
case KCF_OP_INIT:
*fg = CRYPTO_FG_SIGN;
break;
case KCF_OP_ATOMIC:
*fg = CRYPTO_FG_SIGN_ATOMIC;
break;
default:
ASSERT(optype == KCF_OP_SIGN_RECOVER_ATOMIC);
*fg = CRYPTO_FG_SIGN_RECOVER_ATOMIC;
}
break;
}
case KCF_OG_VERIFY: {
kcf_verify_ops_params_t *vops = &params->rp_u.verify_params;
vops->vo_mech.cm_type = vops->vo_framework_mechtype;
*mech1 = &vops->vo_mech;
switch (optype) {
case KCF_OP_INIT:
*fg = CRYPTO_FG_VERIFY;
break;
case KCF_OP_ATOMIC:
*fg = CRYPTO_FG_VERIFY_ATOMIC;
break;
default:
ASSERT(optype == KCF_OP_VERIFY_RECOVER_ATOMIC);
*fg = CRYPTO_FG_VERIFY_RECOVER_ATOMIC;
}
break;
}
case KCF_OG_ENCRYPT: {
kcf_encrypt_ops_params_t *eops = &params->rp_u.encrypt_params;
@ -442,32 +400,6 @@ can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1,
break;
}
case KCF_OG_ENCRYPT_MAC: {
kcf_encrypt_mac_ops_params_t *eops =
&params->rp_u.encrypt_mac_params;
eops->em_encr_mech.cm_type = eops->em_framework_encr_mechtype;
*mech1 = &eops->em_encr_mech;
eops->em_mac_mech.cm_type = eops->em_framework_mac_mechtype;
*mech2 = &eops->em_mac_mech;
*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT_MAC :
CRYPTO_FG_ENCRYPT_MAC_ATOMIC;
break;
}
case KCF_OG_MAC_DECRYPT: {
kcf_mac_decrypt_ops_params_t *dops =
&params->rp_u.mac_decrypt_params;
dops->md_mac_mech.cm_type = dops->md_framework_mac_mechtype;
*mech1 = &dops->md_mac_mech;
dops->md_decr_mech.cm_type = dops->md_framework_decr_mechtype;
*mech2 = &dops->md_decr_mech;
*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC_DECRYPT :
CRYPTO_FG_MAC_DECRYPT_ATOMIC;
break;
}
default:
return (B_FALSE);
}
@ -491,11 +423,10 @@ kcf_resubmit_request(kcf_areq_node_t *areq)
kcf_context_t *ictx;
kcf_provider_desc_t *old_pd;
kcf_provider_desc_t *new_pd;
crypto_mechanism_t *mech1 = NULL, *mech2 = NULL;
crypto_mech_type_t prov_mt1, prov_mt2;
crypto_mechanism_t *mech1 = NULL;
crypto_func_group_t fg = 0;
if (!can_resubmit(areq, &mech1, &mech2, &fg))
if (!can_resubmit(areq, &mech1, &fg))
return (error);
old_pd = areq->an_provider;
@ -508,17 +439,9 @@ kcf_resubmit_request(kcf_areq_node_t *areq)
KM_NOSLEEP) == NULL)
return (error);
if (mech1 && !mech2) {
new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error,
areq->an_tried_plist, fg,
(areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
} else {
ASSERT(mech1 != NULL && mech2 != NULL);
new_pd = kcf_get_dual_provider(mech1, mech2, NULL, &prov_mt1,
&prov_mt2, &error, areq->an_tried_plist, fg, fg,
(areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
}
new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error,
areq->an_tried_plist, fg,
(areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
if (new_pd == NULL)
return (error);
@ -588,7 +511,7 @@ kcf_resubmit_request(kcf_areq_node_t *areq)
*/
int
kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont)
crypto_call_req_t *crq, kcf_req_params_t *params)
{
int error = CRYPTO_SUCCESS;
kcf_areq_node_t *areq;
@ -703,16 +626,14 @@ kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
* queue the request and return.
*/
areq = kcf_areqnode_alloc(pd, kcf_ctx, crq,
params, cont);
params);
if (areq == NULL)
error = CRYPTO_HOST_MEMORY;
else {
if (!(crq->cr_flag
& CRYPTO_SKIP_REQID)) {
/*
* Set the request handle. This handle
* is used for any crypto_cancel_req(9f)
* calls from the consumer. We have to
* Set the request handle. We have to
* do this before dispatching the
* request.
*/
@ -739,8 +660,7 @@ kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
/*
* We need to queue the request and return.
*/
areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params,
cont);
areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params);
if (areq == NULL) {
error = CRYPTO_HOST_MEMORY;
goto done;
@ -760,10 +680,8 @@ kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) {
/*
* Set the request handle. This handle is used
* for any crypto_cancel_req(9f) calls from the
* consumer. We have to do this before dispatching
* the request.
* Set the request handle. We have to do this
* before dispatching the request.
*/
crq->cr_reqid = kcf_reqid_insert(areq);
}
@ -856,66 +774,6 @@ kcf_free_req(kcf_areq_node_t *areq)
kmem_cache_free(kcf_areq_cache, areq);
}
/*
* Utility routine to remove a request from the chain of requests
* hanging off a context.
*/
static void
kcf_removereq_in_ctxchain(kcf_context_t *ictx, kcf_areq_node_t *areq)
{
kcf_areq_node_t *cur, *prev;
/*
* Get context lock, search for areq in the chain and remove it.
*/
ASSERT(ictx != NULL);
mutex_enter(&ictx->kc_in_use_lock);
prev = cur = ictx->kc_req_chain_first;
while (cur != NULL) {
if (cur == areq) {
if (prev == cur) {
if ((ictx->kc_req_chain_first =
cur->an_ctxchain_next) == NULL)
ictx->kc_req_chain_last = NULL;
} else {
if (cur == ictx->kc_req_chain_last)
ictx->kc_req_chain_last = prev;
prev->an_ctxchain_next = cur->an_ctxchain_next;
}
break;
}
prev = cur;
cur = cur->an_ctxchain_next;
}
mutex_exit(&ictx->kc_in_use_lock);
}
/*
* Remove the specified node from the global software queue.
*
* The caller must hold the queue lock and request lock (an_lock).
*/
static void
kcf_remove_node(kcf_areq_node_t *node)
{
kcf_areq_node_t *nextp = node->an_next;
kcf_areq_node_t *prevp = node->an_prev;
if (nextp != NULL)
nextp->an_prev = prevp;
else
gswq->gs_last = prevp;
if (prevp != NULL)
prevp->an_next = nextp;
else
gswq->gs_first = nextp;
node->an_state = REQ_CANCELED;
}
/*
* Add the request node to the end of the global software queue.
*
@ -1224,19 +1082,6 @@ kcf_aop_done(kcf_areq_node_t *areq, int error)
}
}
/* Deal with the internal continuation to this request first */
if (areq->an_isdual) {
kcf_dual_req_t *next_arg;
next_arg = (kcf_dual_req_t *)areq->an_reqarg.cr_callback_arg;
next_arg->kr_areq = areq;
KCF_AREQ_REFHOLD(areq);
areq->an_isdual = B_FALSE;
NOTIFY_CLIENT(areq, error);
return;
}
/*
* If CRYPTO_NOTIFY_OPDONE flag is set, we should notify
* always. If this flag is clear, we skip the notification
@ -1344,146 +1189,6 @@ kcf_reqid_delete(kcf_areq_node_t *areq)
mutex_exit(&rt->rt_lock);
}
/*
* Cancel a single asynchronous request.
*
* We guarantee that no problems will result from calling
* crypto_cancel_req() for a request which is either running, or
* has already completed. We remove the request from any queues
* if it is possible. We wait for request completion if the
* request is dispatched to a provider.
*
* Calling context:
* Can be called from user context only.
*
* NOTE: We acquire the following locks in this routine (in order):
* - rt_lock (kcf_reqid_table_t)
* - gswq->gs_lock
* - areq->an_lock
* - ictx->kc_in_use_lock (from kcf_removereq_in_ctxchain())
*
* This locking order MUST be maintained in code every where else.
*/
void
crypto_cancel_req(crypto_req_id_t id)
{
int indx;
kcf_areq_node_t *areq;
kcf_provider_desc_t *pd;
kcf_context_t *ictx;
kcf_reqid_table_t *rt;
rt = kcf_reqid_table[id & REQID_TABLE_MASK];
indx = REQID_HASH(id);
mutex_enter(&rt->rt_lock);
for (areq = rt->rt_idhash[indx]; areq; areq = areq->an_idnext) {
if (GET_REQID(areq) == id) {
/*
* We found the request. It is either still waiting
* in the framework queues or running at the provider.
*/
pd = areq->an_provider;
ASSERT(pd != NULL);
switch (pd->pd_prov_type) {
case CRYPTO_SW_PROVIDER:
mutex_enter(&gswq->gs_lock);
mutex_enter(&areq->an_lock);
/* This request can be safely canceled. */
if (areq->an_state <= REQ_WAITING) {
/* Remove from gswq, global software queue. */
kcf_remove_node(areq);
if ((ictx = areq->an_context) != NULL)
kcf_removereq_in_ctxchain(ictx, areq);
mutex_exit(&areq->an_lock);
mutex_exit(&gswq->gs_lock);
mutex_exit(&rt->rt_lock);
/* Remove areq from hash table and free it. */
kcf_reqid_delete(areq);
KCF_AREQ_REFRELE(areq);
return;
}
mutex_exit(&areq->an_lock);
mutex_exit(&gswq->gs_lock);
break;
case CRYPTO_HW_PROVIDER:
/*
* There is no interface to remove an entry
* once it is on the taskq. So, we do not do
* anything for a hardware provider.
*/
break;
default:
break;
}
/*
* The request is running. Wait for the request completion
* to notify us.
*/
KCF_AREQ_REFHOLD(areq);
while (GET_REQID(areq) == id)
cv_wait(&areq->an_done, &rt->rt_lock);
KCF_AREQ_REFRELE(areq);
break;
}
}
mutex_exit(&rt->rt_lock);
}
/*
* Cancel all asynchronous requests associated with the
* passed in crypto context and free it.
*
* A client SHOULD NOT call this routine after calling a crypto_*_final
* routine. This routine is called only during intermediate operations.
* The client should not use the crypto context after this function returns
* since we destroy it.
*
* Calling context:
* Can be called from user context only.
*/
void
crypto_cancel_ctx(crypto_context_t ctx)
{
kcf_context_t *ictx;
kcf_areq_node_t *areq;
if (ctx == NULL)
return;
ictx = (kcf_context_t *)((crypto_ctx_t *)ctx)->cc_framework_private;
mutex_enter(&ictx->kc_in_use_lock);
/* Walk the chain and cancel each request */
while ((areq = ictx->kc_req_chain_first) != NULL) {
/*
* We have to drop the lock here as we may have
* to wait for request completion. We hold the
* request before dropping the lock though, so that it
* won't be freed underneath us.
*/
KCF_AREQ_REFHOLD(areq);
mutex_exit(&ictx->kc_in_use_lock);
crypto_cancel_req(GET_REQID(areq));
KCF_AREQ_REFRELE(areq);
mutex_enter(&ictx->kc_in_use_lock);
}
mutex_exit(&ictx->kc_in_use_lock);
KCF_CONTEXT_REFRELE(ictx);
}
/*
* Update kstats.
*/
@ -1517,250 +1222,3 @@ kcf_misc_kstat_update(kstat_t *ksp, int rw)
return (0);
}
/*
* Allocate and initialize a kcf_dual_req, used for saving the arguments of
* a dual operation or an atomic operation that has to be internally
* simulated with multiple single steps.
* crq determines the memory allocation flags.
*/
kcf_dual_req_t *
kcf_alloc_req(crypto_call_req_t *crq)
{
kcf_dual_req_t *kcr;
kcr = kmem_alloc(sizeof (kcf_dual_req_t), KCF_KMFLAG(crq));
if (kcr == NULL)
return (NULL);
/* Copy the whole crypto_call_req struct, as it isn't persistent */
if (crq != NULL)
kcr->kr_callreq = *crq;
else
bzero(&(kcr->kr_callreq), sizeof (crypto_call_req_t));
kcr->kr_areq = NULL;
kcr->kr_saveoffset = 0;
kcr->kr_savelen = 0;
return (kcr);
}
/*
* Callback routine for the next part of a simulated dual part.
* Schedules the next step.
*
* This routine can be called from interrupt context.
*/
void
kcf_next_req(void *next_req_arg, int status)
{
kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg;
kcf_req_params_t *params = &(next_req->kr_params);
kcf_areq_node_t *areq = next_req->kr_areq;
int error = status;
kcf_provider_desc_t *pd = NULL;
crypto_dual_data_t *ct = NULL;
/* Stop the processing if an error occurred at this step */
if (error != CRYPTO_SUCCESS) {
out:
areq->an_reqarg = next_req->kr_callreq;
KCF_AREQ_REFRELE(areq);
kmem_free(next_req, sizeof (kcf_dual_req_t));
areq->an_isdual = B_FALSE;
kcf_aop_done(areq, error);
return;
}
switch (params->rp_opgrp) {
case KCF_OG_MAC: {
/*
* The next req is submitted with the same reqid as the
* first part. The consumer only got back that reqid, and
* should still be able to cancel the operation during its
* second step.
*/
kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
crypto_ctx_template_t mac_tmpl;
kcf_mech_entry_t *me;
ct = (crypto_dual_data_t *)mops->mo_data;
mac_tmpl = (crypto_ctx_template_t)mops->mo_templ;
/* No expected recoverable failures, so no retry list */
pd = kcf_get_mech_provider(mops->mo_framework_mechtype,
&me, &error, NULL, CRYPTO_FG_MAC_ATOMIC,
(areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len2);
if (pd == NULL) {
error = CRYPTO_MECH_NOT_SUPPORTED;
goto out;
}
/* Validate the MAC context template here */
if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
(mac_tmpl != NULL)) {
kcf_ctx_template_t *ctx_mac_tmpl;
ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
KCF_PROV_REFRELE(pd);
error = CRYPTO_OLD_CTX_TEMPLATE;
goto out;
}
mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl;
}
break;
}
case KCF_OG_DECRYPT: {
kcf_decrypt_ops_params_t *dcrops =
&(params->rp_u.decrypt_params);
ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
/* No expected recoverable failures, so no retry list */
pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype,
NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC,
(areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len1);
if (pd == NULL) {
error = CRYPTO_MECH_NOT_SUPPORTED;
goto out;
}
break;
}
default:
break;
}
/* The second step uses len2 and offset2 of the dual_data */
next_req->kr_saveoffset = ct->dd_offset1;
next_req->kr_savelen = ct->dd_len1;
ct->dd_offset1 = ct->dd_offset2;
ct->dd_len1 = ct->dd_len2;
/* preserve if the caller is restricted */
if (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED) {
areq->an_reqarg.cr_flag = CRYPTO_RESTRICTED;
} else {
areq->an_reqarg.cr_flag = 0;
}
areq->an_reqarg.cr_callback_func = kcf_last_req;
areq->an_reqarg.cr_callback_arg = next_req;
areq->an_isdual = B_TRUE;
/*
* We would like to call kcf_submit_request() here. But,
* that is not possible as that routine allocates a new
* kcf_areq_node_t request structure, while we need to
* reuse the existing request structure.
*/
switch (pd->pd_prov_type) {
case CRYPTO_SW_PROVIDER:
error = common_submit_request(pd, NULL, params,
KCF_RHNDL(KM_NOSLEEP));
break;
case CRYPTO_HW_PROVIDER: {
kcf_provider_desc_t *old_pd;
taskq_t *taskq = pd->pd_sched_info.ks_taskq;
/*
* Set the params for the second step in the
* dual-ops.
*/
areq->an_params = *params;
old_pd = areq->an_provider;
KCF_PROV_REFRELE(old_pd);
KCF_PROV_REFHOLD(pd);
areq->an_provider = pd;
/*
* Note that we have to do a taskq_dispatch()
* here as we may be in interrupt context.
*/
if (taskq_dispatch(taskq, process_req_hwp, areq,
TQ_NOSLEEP) == (taskqid_t)0) {
error = CRYPTO_HOST_MEMORY;
} else {
error = CRYPTO_QUEUED;
}
break;
}
default:
break;
}
/*
* We have to release the holds on the request and the provider
* in all cases.
*/
KCF_AREQ_REFRELE(areq);
KCF_PROV_REFRELE(pd);
if (error != CRYPTO_QUEUED) {
/* restore, clean up, and invoke the client's callback */
ct->dd_offset1 = next_req->kr_saveoffset;
ct->dd_len1 = next_req->kr_savelen;
areq->an_reqarg = next_req->kr_callreq;
kmem_free(next_req, sizeof (kcf_dual_req_t));
areq->an_isdual = B_FALSE;
kcf_aop_done(areq, error);
}
}
/*
* Last part of an emulated dual operation.
* Clean up and restore ...
*/
void
kcf_last_req(void *last_req_arg, int status)
{
kcf_dual_req_t *last_req = (kcf_dual_req_t *)last_req_arg;
kcf_req_params_t *params = &(last_req->kr_params);
kcf_areq_node_t *areq = last_req->kr_areq;
crypto_dual_data_t *ct = NULL;
switch (params->rp_opgrp) {
case KCF_OG_MAC: {
kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
ct = (crypto_dual_data_t *)mops->mo_data;
break;
}
case KCF_OG_DECRYPT: {
kcf_decrypt_ops_params_t *dcrops =
&(params->rp_u.decrypt_params);
ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
break;
}
default: {
panic("invalid kcf_op_group_t %d", (int)params->rp_opgrp);
return;
}
}
ct->dd_offset1 = last_req->kr_saveoffset;
ct->dd_len1 = last_req->kr_savelen;
/* The submitter used kcf_last_req as its callback */
if (areq == NULL) {
crypto_call_req_t *cr = &last_req->kr_callreq;
(*(cr->cr_callback_func))(cr->cr_callback_arg, status);
kmem_free(last_req, sizeof (kcf_dual_req_t));
return;
}
areq->an_reqarg = last_req->kr_callreq;
KCF_AREQ_REFRELE(areq);
kmem_free(last_req, sizeof (kcf_dual_req_t));
areq->an_isdual = B_FALSE;
kcf_aop_done(areq, status);
}

View File

@ -117,7 +117,7 @@ typedef struct kcf_sched_info {
* When impl.h is broken up (bug# 4703218), this will be done. For now,
* we hardcode these values.
*/
#define KCF_OPS_CLASSSIZE 8
#define KCF_OPS_CLASSSIZE 4
#define KCF_MAXMECHTAB 32
/*
@ -393,21 +393,15 @@ extern kcf_soft_conf_entry_t *soft_config_list;
#define KCF_MAXDIGEST 16 /* Digests */
#define KCF_MAXCIPHER 64 /* Ciphers */
#define KCF_MAXMAC 40 /* Message authentication codes */
#define KCF_MAXSIGN 24 /* Sign/Verify */
#define KCF_MAXKEYOPS 116 /* Key generation and derivation */
#define KCF_MAXMISC 16 /* Others ... */
typedef enum {
KCF_DIGEST_CLASS = 1,
KCF_CIPHER_CLASS,
KCF_MAC_CLASS,
KCF_SIGN_CLASS,
KCF_KEYOPS_CLASS,
KCF_MISC_CLASS
} kcf_ops_class_t;
#define KCF_FIRST_OPSCLASS KCF_DIGEST_CLASS
#define KCF_LAST_OPSCLASS KCF_MISC_CLASS
#define KCF_LAST_OPSCLASS KCF_MAC_CLASS
/* The table of all the kcf_xxx_mech_tab[]s, indexed by kcf_ops_class */
@ -497,66 +491,16 @@ typedef struct crypto_minor {
#define KCF_MECH_TAB_FULL 0x4 /* Need more room in the mech tabs. */
#define KCF_INVALID_INDX ((ushort_t)-1)
/*
* kCF internal mechanism and function group for tracking RNG providers.
*/
#define SUN_RANDOM "random"
#define CRYPTO_FG_RANDOM 0x80000000 /* generate_random() */
/*
* Wrappers for ops vectors. In the wrapper definitions below, the pd
* argument always corresponds to a pointer to a provider descriptor
* of type kcf_prov_desc_t.
*/
#define KCF_PROV_CTX_OPS(pd) ((pd)->pd_ops_vector->co_ctx_ops)
#define KCF_PROV_DIGEST_OPS(pd) ((pd)->pd_ops_vector->co_digest_ops)
#define KCF_PROV_CIPHER_OPS(pd) ((pd)->pd_ops_vector->co_cipher_ops)
#define KCF_PROV_MAC_OPS(pd) ((pd)->pd_ops_vector->co_mac_ops)
#define KCF_PROV_SIGN_OPS(pd) ((pd)->pd_ops_vector->co_sign_ops)
#define KCF_PROV_VERIFY_OPS(pd) ((pd)->pd_ops_vector->co_verify_ops)
#define KCF_PROV_DUAL_OPS(pd) ((pd)->pd_ops_vector->co_dual_ops)
#define KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) \
((pd)->pd_ops_vector->co_dual_cipher_mac_ops)
#define KCF_PROV_RANDOM_OPS(pd) ((pd)->pd_ops_vector->co_random_ops)
#define KCF_PROV_SESSION_OPS(pd) ((pd)->pd_ops_vector->co_session_ops)
#define KCF_PROV_OBJECT_OPS(pd) ((pd)->pd_ops_vector->co_object_ops)
#define KCF_PROV_KEY_OPS(pd) ((pd)->pd_ops_vector->co_key_ops)
#define KCF_PROV_PROVIDER_OPS(pd) ((pd)->pd_ops_vector->co_provider_ops)
#define KCF_PROV_MECH_OPS(pd) ((pd)->pd_ops_vector->co_mech_ops)
#define KCF_PROV_NOSTORE_KEY_OPS(pd) \
((pd)->pd_ops_vector->co_nostore_key_ops)
/*
* Wrappers for crypto_ctx_ops(9S) entry points.
*/
#define KCF_PROV_CREATE_CTX_TEMPLATE(pd, mech, key, template, size, req) ( \
(KCF_PROV_CTX_OPS(pd) && KCF_PROV_CTX_OPS(pd)->create_ctx_template) ? \
KCF_PROV_CTX_OPS(pd)->create_ctx_template( \
(pd)->pd_prov_handle, mech, key, template, size, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_FREE_CONTEXT(pd, ctx) ( \
(KCF_PROV_CTX_OPS(pd) && KCF_PROV_CTX_OPS(pd)->free_context) ? \
KCF_PROV_CTX_OPS(pd)->free_context(ctx) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_COPYIN_MECH(pd, umech, kmech, errorp, mode) ( \
(KCF_PROV_MECH_OPS(pd) && KCF_PROV_MECH_OPS(pd)->copyin_mechanism) ? \
KCF_PROV_MECH_OPS(pd)->copyin_mechanism( \
(pd)->pd_prov_handle, umech, kmech, errorp, mode) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_COPYOUT_MECH(pd, kmech, umech, errorp, mode) ( \
(KCF_PROV_MECH_OPS(pd) && KCF_PROV_MECH_OPS(pd)->copyout_mechanism) ? \
KCF_PROV_MECH_OPS(pd)->copyout_mechanism( \
(pd)->pd_prov_handle, kmech, umech, errorp, mode) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_FREE_MECH(pd, prov_mech) ( \
(KCF_PROV_MECH_OPS(pd) && KCF_PROV_MECH_OPS(pd)->free_mechanism) ? \
KCF_PROV_MECH_OPS(pd)->free_mechanism( \
(pd)->pd_prov_handle, prov_mech) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_CTX_OPS(pd) ((pd)->pd_ops_vector->co_ctx_ops)
/*
* Wrappers for crypto_digest_ops(9S) entry points.
@ -706,552 +650,21 @@ typedef struct crypto_minor {
CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_sign_ops(9S) entry points.
* Wrappers for crypto_ctx_ops(9S) entry points.
*/
#define KCF_PROV_SIGN_INIT(pd, ctx, mech, key, template, req) ( \
(KCF_PROV_SIGN_OPS(pd) && KCF_PROV_SIGN_OPS(pd)->sign_init) ? \
KCF_PROV_SIGN_OPS(pd)->sign_init( \
ctx, mech, key, template, req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_SIGN(pd, ctx, data, sig, req) ( \
(KCF_PROV_SIGN_OPS(pd) && KCF_PROV_SIGN_OPS(pd)->sign) ? \
KCF_PROV_SIGN_OPS(pd)->sign(ctx, data, sig, req) : \
#define KCF_PROV_CREATE_CTX_TEMPLATE(pd, mech, key, template, size, req) ( \
(KCF_PROV_CTX_OPS(pd) && KCF_PROV_CTX_OPS(pd)->create_ctx_template) ? \
KCF_PROV_CTX_OPS(pd)->create_ctx_template( \
(pd)->pd_prov_handle, mech, key, template, size, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_SIGN_UPDATE(pd, ctx, data, req) ( \
(KCF_PROV_SIGN_OPS(pd) && KCF_PROV_SIGN_OPS(pd)->sign_update) ? \
KCF_PROV_SIGN_OPS(pd)->sign_update(ctx, data, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_FREE_CONTEXT(pd, ctx) ( \
(KCF_PROV_CTX_OPS(pd) && KCF_PROV_CTX_OPS(pd)->free_context) ? \
KCF_PROV_CTX_OPS(pd)->free_context(ctx) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_SIGN_FINAL(pd, ctx, sig, req) ( \
(KCF_PROV_SIGN_OPS(pd) && KCF_PROV_SIGN_OPS(pd)->sign_final) ? \
KCF_PROV_SIGN_OPS(pd)->sign_final(ctx, sig, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_SIGN_ATOMIC(pd, session, mech, key, data, template, \
sig, req) ( \
(KCF_PROV_SIGN_OPS(pd) && KCF_PROV_SIGN_OPS(pd)->sign_atomic) ? \
KCF_PROV_SIGN_OPS(pd)->sign_atomic( \
(pd)->pd_prov_handle, session, mech, key, data, sig, template, \
req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_SIGN_RECOVER_INIT(pd, ctx, mech, key, template, \
req) ( \
(KCF_PROV_SIGN_OPS(pd) && KCF_PROV_SIGN_OPS(pd)->sign_recover_init) ? \
KCF_PROV_SIGN_OPS(pd)->sign_recover_init(ctx, mech, key, template, \
req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_SIGN_RECOVER(pd, ctx, data, sig, req) ( \
(KCF_PROV_SIGN_OPS(pd) && KCF_PROV_SIGN_OPS(pd)->sign_recover) ? \
KCF_PROV_SIGN_OPS(pd)->sign_recover(ctx, data, sig, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_SIGN_RECOVER_ATOMIC(pd, session, mech, key, data, template, \
sig, req) ( \
(KCF_PROV_SIGN_OPS(pd) && \
KCF_PROV_SIGN_OPS(pd)->sign_recover_atomic) ? \
KCF_PROV_SIGN_OPS(pd)->sign_recover_atomic( \
(pd)->pd_prov_handle, session, mech, key, data, sig, template, \
req) : CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_verify_ops(9S) entry points.
*/
#define KCF_PROV_VERIFY_INIT(pd, ctx, mech, key, template, req) ( \
(KCF_PROV_VERIFY_OPS(pd) && KCF_PROV_VERIFY_OPS(pd)->verify_init) ? \
KCF_PROV_VERIFY_OPS(pd)->verify_init(ctx, mech, key, template, \
req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_VERIFY(pd, ctx, data, sig, req) ( \
(KCF_PROV_VERIFY_OPS(pd) && KCF_PROV_VERIFY_OPS(pd)->do_verify) ? \
KCF_PROV_VERIFY_OPS(pd)->do_verify(ctx, data, sig, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_VERIFY_UPDATE(pd, ctx, data, req) ( \
(KCF_PROV_VERIFY_OPS(pd) && KCF_PROV_VERIFY_OPS(pd)->verify_update) ? \
KCF_PROV_VERIFY_OPS(pd)->verify_update(ctx, data, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_VERIFY_FINAL(pd, ctx, sig, req) ( \
(KCF_PROV_VERIFY_OPS(pd) && KCF_PROV_VERIFY_OPS(pd)->verify_final) ? \
KCF_PROV_VERIFY_OPS(pd)->verify_final(ctx, sig, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_VERIFY_ATOMIC(pd, session, mech, key, data, template, sig, \
req) ( \
(KCF_PROV_VERIFY_OPS(pd) && KCF_PROV_VERIFY_OPS(pd)->verify_atomic) ? \
KCF_PROV_VERIFY_OPS(pd)->verify_atomic( \
(pd)->pd_prov_handle, session, mech, key, data, sig, template, \
req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_VERIFY_RECOVER_INIT(pd, ctx, mech, key, template, \
req) ( \
(KCF_PROV_VERIFY_OPS(pd) && \
KCF_PROV_VERIFY_OPS(pd)->verify_recover_init) ? \
KCF_PROV_VERIFY_OPS(pd)->verify_recover_init(ctx, mech, key, \
template, req) : CRYPTO_NOT_SUPPORTED)
/* verify_recover() CSPI routine has different argument order than verify() */
#define KCF_PROV_VERIFY_RECOVER(pd, ctx, sig, data, req) ( \
(KCF_PROV_VERIFY_OPS(pd) && KCF_PROV_VERIFY_OPS(pd)->verify_recover) ? \
KCF_PROV_VERIFY_OPS(pd)->verify_recover(ctx, sig, data, req) : \
CRYPTO_NOT_SUPPORTED)
/*
* verify_recover_atomic() CSPI routine has different argument order
* than verify_atomic().
*/
#define KCF_PROV_VERIFY_RECOVER_ATOMIC(pd, session, mech, key, sig, \
template, data, req) ( \
(KCF_PROV_VERIFY_OPS(pd) && \
KCF_PROV_VERIFY_OPS(pd)->verify_recover_atomic) ? \
KCF_PROV_VERIFY_OPS(pd)->verify_recover_atomic( \
(pd)->pd_prov_handle, session, mech, key, sig, data, template, \
req) : CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_dual_ops(9S) entry points.
*/
#define KCF_PROV_DIGEST_ENCRYPT_UPDATE(digest_ctx, encrypt_ctx, plaintext, \
ciphertext, req) ( \
(KCF_PROV_DUAL_OPS(pd) && \
KCF_PROV_DUAL_OPS(pd)->digest_encrypt_update) ? \
KCF_PROV_DUAL_OPS(pd)->digest_encrypt_update( \
digest_ctx, encrypt_ctx, plaintext, ciphertext, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_DECRYPT_DIGEST_UPDATE(decrypt_ctx, digest_ctx, ciphertext, \
plaintext, req) ( \
(KCF_PROV_DUAL_OPS(pd) && \
KCF_PROV_DUAL_OPS(pd)->decrypt_digest_update) ? \
KCF_PROV_DUAL_OPS(pd)->decrypt_digest_update( \
decrypt_ctx, digest_ctx, ciphertext, plaintext, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_SIGN_ENCRYPT_UPDATE(sign_ctx, encrypt_ctx, plaintext, \
ciphertext, req) ( \
(KCF_PROV_DUAL_OPS(pd) && \
KCF_PROV_DUAL_OPS(pd)->sign_encrypt_update) ? \
KCF_PROV_DUAL_OPS(pd)->sign_encrypt_update( \
sign_ctx, encrypt_ctx, plaintext, ciphertext, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_DECRYPT_VERIFY_UPDATE(decrypt_ctx, verify_ctx, ciphertext, \
plaintext, req) ( \
(KCF_PROV_DUAL_OPS(pd) && \
KCF_PROV_DUAL_OPS(pd)->decrypt_verify_update) ? \
KCF_PROV_DUAL_OPS(pd)->decrypt_verify_update( \
decrypt_ctx, verify_ctx, ciphertext, plaintext, req) : \
CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_dual_cipher_mac_ops(9S) entry points.
*/
#define KCF_PROV_ENCRYPT_MAC_INIT(pd, ctx, encr_mech, encr_key, mac_mech, \
mac_key, encr_ctx_template, mac_ctx_template, req) ( \
(KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_init) ? \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_init( \
ctx, encr_mech, encr_key, mac_mech, mac_key, encr_ctx_template, \
mac_ctx_template, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_ENCRYPT_MAC(pd, ctx, plaintext, ciphertext, mac, req) ( \
(KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac) ? \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac( \
ctx, plaintext, ciphertext, mac, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_ENCRYPT_MAC_UPDATE(pd, ctx, plaintext, ciphertext, req) ( \
(KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_update) ? \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_update( \
ctx, plaintext, ciphertext, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_ENCRYPT_MAC_FINAL(pd, ctx, ciphertext, mac, req) ( \
(KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_final) ? \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_final( \
ctx, ciphertext, mac, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_ENCRYPT_MAC_ATOMIC(pd, session, encr_mech, encr_key, \
mac_mech, mac_key, plaintext, ciphertext, mac, \
encr_ctx_template, mac_ctx_template, req) ( \
(KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_atomic) ? \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_atomic( \
(pd)->pd_prov_handle, session, encr_mech, encr_key, \
mac_mech, mac_key, plaintext, ciphertext, mac, \
encr_ctx_template, mac_ctx_template, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_MAC_DECRYPT_INIT(pd, ctx, mac_mech, mac_key, decr_mech, \
decr_key, mac_ctx_template, decr_ctx_template, req) ( \
(KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_init) ? \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_init( \
ctx, mac_mech, mac_key, decr_mech, decr_key, mac_ctx_template, \
decr_ctx_template, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_MAC_DECRYPT(pd, ctx, ciphertext, mac, plaintext, req) ( \
(KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt) ? \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt( \
ctx, ciphertext, mac, plaintext, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_MAC_DECRYPT_UPDATE(pd, ctx, ciphertext, plaintext, req) ( \
(KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_update) ? \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_update( \
ctx, ciphertext, plaintext, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_MAC_DECRYPT_FINAL(pd, ctx, mac, plaintext, req) ( \
(KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_final) ? \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_final( \
ctx, mac, plaintext, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_MAC_DECRYPT_ATOMIC(pd, session, mac_mech, mac_key, \
decr_mech, decr_key, ciphertext, mac, plaintext, \
mac_ctx_template, decr_ctx_template, req) ( \
(KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_atomic) ? \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_atomic( \
(pd)->pd_prov_handle, session, mac_mech, mac_key, \
decr_mech, decr_key, ciphertext, mac, plaintext, \
mac_ctx_template, decr_ctx_template, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_MAC_VERIFY_DECRYPT_ATOMIC(pd, session, mac_mech, mac_key, \
decr_mech, decr_key, ciphertext, mac, plaintext, \
mac_ctx_template, decr_ctx_template, req) ( \
(KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_verify_decrypt_atomic \
!= NULL) ? \
KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_verify_decrypt_atomic( \
(pd)->pd_prov_handle, session, mac_mech, mac_key, \
decr_mech, decr_key, ciphertext, mac, plaintext, \
mac_ctx_template, decr_ctx_template, req) : \
CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_random_number_ops(9S) entry points.
*/
#define KCF_PROV_SEED_RANDOM(pd, session, buf, len, est, flags, req) ( \
(KCF_PROV_RANDOM_OPS(pd) && KCF_PROV_RANDOM_OPS(pd)->seed_random) ? \
KCF_PROV_RANDOM_OPS(pd)->seed_random((pd)->pd_prov_handle, \
session, buf, len, est, flags, req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_GENERATE_RANDOM(pd, session, buf, len, req) ( \
(KCF_PROV_RANDOM_OPS(pd) && \
KCF_PROV_RANDOM_OPS(pd)->generate_random) ? \
KCF_PROV_RANDOM_OPS(pd)->generate_random((pd)->pd_prov_handle, \
session, buf, len, req) : CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_session_ops(9S) entry points.
*
* ops_pd is the provider descriptor that supplies the ops_vector.
* pd is the descriptor that supplies the provider handle.
* Only session open/close needs two handles.
*/
#define KCF_PROV_SESSION_OPEN(ops_pd, session, req, pd) ( \
(KCF_PROV_SESSION_OPS(ops_pd) && \
KCF_PROV_SESSION_OPS(ops_pd)->session_open) ? \
KCF_PROV_SESSION_OPS(ops_pd)->session_open((pd)->pd_prov_handle, \
session, req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_SESSION_CLOSE(ops_pd, session, req, pd) ( \
(KCF_PROV_SESSION_OPS(ops_pd) && \
KCF_PROV_SESSION_OPS(ops_pd)->session_close) ? \
KCF_PROV_SESSION_OPS(ops_pd)->session_close((pd)->pd_prov_handle, \
session, req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_SESSION_LOGIN(pd, session, user_type, pin, len, req) ( \
(KCF_PROV_SESSION_OPS(pd) && \
KCF_PROV_SESSION_OPS(pd)->session_login) ? \
KCF_PROV_SESSION_OPS(pd)->session_login((pd)->pd_prov_handle, \
session, user_type, pin, len, req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_SESSION_LOGOUT(pd, session, req) ( \
(KCF_PROV_SESSION_OPS(pd) && \
KCF_PROV_SESSION_OPS(pd)->session_logout) ? \
KCF_PROV_SESSION_OPS(pd)->session_logout((pd)->pd_prov_handle, \
session, req) : CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_object_ops(9S) entry points.
*/
#define KCF_PROV_OBJECT_CREATE(pd, session, template, count, object, req) ( \
(KCF_PROV_OBJECT_OPS(pd) && KCF_PROV_OBJECT_OPS(pd)->object_create) ? \
KCF_PROV_OBJECT_OPS(pd)->object_create((pd)->pd_prov_handle, \
session, template, count, object, req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_OBJECT_COPY(pd, session, object, template, count, \
new_object, req) ( \
(KCF_PROV_OBJECT_OPS(pd) && KCF_PROV_OBJECT_OPS(pd)->object_copy) ? \
KCF_PROV_OBJECT_OPS(pd)->object_copy((pd)->pd_prov_handle, \
session, object, template, count, new_object, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_OBJECT_DESTROY(pd, session, object, req) ( \
(KCF_PROV_OBJECT_OPS(pd) && KCF_PROV_OBJECT_OPS(pd)->object_destroy) ? \
KCF_PROV_OBJECT_OPS(pd)->object_destroy((pd)->pd_prov_handle, \
session, object, req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_OBJECT_GET_SIZE(pd, session, object, size, req) ( \
(KCF_PROV_OBJECT_OPS(pd) && \
KCF_PROV_OBJECT_OPS(pd)->object_get_size) ? \
KCF_PROV_OBJECT_OPS(pd)->object_get_size((pd)->pd_prov_handle, \
session, object, size, req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_OBJECT_GET_ATTRIBUTE_VALUE(pd, session, object, template, \
count, req) ( \
(KCF_PROV_OBJECT_OPS(pd) && \
KCF_PROV_OBJECT_OPS(pd)->object_get_attribute_value) ? \
KCF_PROV_OBJECT_OPS(pd)->object_get_attribute_value( \
(pd)->pd_prov_handle, session, object, template, count, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_OBJECT_SET_ATTRIBUTE_VALUE(pd, session, object, template, \
count, req) ( \
(KCF_PROV_OBJECT_OPS(pd) && \
KCF_PROV_OBJECT_OPS(pd)->object_set_attribute_value) ? \
KCF_PROV_OBJECT_OPS(pd)->object_set_attribute_value( \
(pd)->pd_prov_handle, session, object, template, count, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_OBJECT_FIND_INIT(pd, session, template, count, ppriv, \
req) ( \
(KCF_PROV_OBJECT_OPS(pd) && \
KCF_PROV_OBJECT_OPS(pd)->object_find_init) ? \
KCF_PROV_OBJECT_OPS(pd)->object_find_init((pd)->pd_prov_handle, \
session, template, count, ppriv, req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_OBJECT_FIND(pd, ppriv, objects, max_objects, object_count, \
req) ( \
(KCF_PROV_OBJECT_OPS(pd) && KCF_PROV_OBJECT_OPS(pd)->object_find) ? \
KCF_PROV_OBJECT_OPS(pd)->object_find( \
(pd)->pd_prov_handle, ppriv, objects, max_objects, object_count, \
req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_OBJECT_FIND_FINAL(pd, ppriv, req) ( \
(KCF_PROV_OBJECT_OPS(pd) && \
KCF_PROV_OBJECT_OPS(pd)->object_find_final) ? \
KCF_PROV_OBJECT_OPS(pd)->object_find_final( \
(pd)->pd_prov_handle, ppriv, req) : CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_key_ops(9S) entry points.
*/
#define KCF_PROV_KEY_GENERATE(pd, session, mech, template, count, object, \
req) ( \
(KCF_PROV_KEY_OPS(pd) && KCF_PROV_KEY_OPS(pd)->key_generate) ? \
KCF_PROV_KEY_OPS(pd)->key_generate((pd)->pd_prov_handle, \
session, mech, template, count, object, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_KEY_GENERATE_PAIR(pd, session, mech, pub_template, \
pub_count, priv_template, priv_count, pub_key, priv_key, req) ( \
(KCF_PROV_KEY_OPS(pd) && KCF_PROV_KEY_OPS(pd)->key_generate_pair) ? \
KCF_PROV_KEY_OPS(pd)->key_generate_pair((pd)->pd_prov_handle, \
session, mech, pub_template, pub_count, priv_template, \
priv_count, pub_key, priv_key, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_KEY_WRAP(pd, session, mech, wrapping_key, key, wrapped_key, \
wrapped_key_len, req) ( \
(KCF_PROV_KEY_OPS(pd) && KCF_PROV_KEY_OPS(pd)->key_wrap) ? \
KCF_PROV_KEY_OPS(pd)->key_wrap((pd)->pd_prov_handle, \
session, mech, wrapping_key, key, wrapped_key, wrapped_key_len, \
req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_KEY_UNWRAP(pd, session, mech, unwrapping_key, wrapped_key, \
wrapped_key_len, template, count, key, req) ( \
(KCF_PROV_KEY_OPS(pd) && KCF_PROV_KEY_OPS(pd)->key_unwrap) ? \
KCF_PROV_KEY_OPS(pd)->key_unwrap((pd)->pd_prov_handle, \
session, mech, unwrapping_key, wrapped_key, wrapped_key_len, \
template, count, key, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_KEY_DERIVE(pd, session, mech, base_key, template, count, \
key, req) ( \
(KCF_PROV_KEY_OPS(pd) && KCF_PROV_KEY_OPS(pd)->key_derive) ? \
KCF_PROV_KEY_OPS(pd)->key_derive((pd)->pd_prov_handle, \
session, mech, base_key, template, count, key, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_KEY_CHECK(pd, mech, key) ( \
(KCF_PROV_KEY_OPS(pd) && KCF_PROV_KEY_OPS(pd)->key_check) ? \
KCF_PROV_KEY_OPS(pd)->key_check((pd)->pd_prov_handle, mech, key) : \
CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_provider_management_ops(9S) entry points.
*
* ops_pd is the provider descriptor that supplies the ops_vector.
* pd is the descriptor that supplies the provider handle.
* Only ext_info needs two handles.
*/
#define KCF_PROV_EXT_INFO(ops_pd, provext_info, req, pd) ( \
(KCF_PROV_PROVIDER_OPS(ops_pd) && \
KCF_PROV_PROVIDER_OPS(ops_pd)->ext_info) ? \
KCF_PROV_PROVIDER_OPS(ops_pd)->ext_info((pd)->pd_prov_handle, \
provext_info, req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_INIT_TOKEN(pd, pin, pin_len, label, req) ( \
(KCF_PROV_PROVIDER_OPS(pd) && KCF_PROV_PROVIDER_OPS(pd)->init_token) ? \
KCF_PROV_PROVIDER_OPS(pd)->init_token((pd)->pd_prov_handle, \
pin, pin_len, label, req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_INIT_PIN(pd, session, pin, pin_len, req) ( \
(KCF_PROV_PROVIDER_OPS(pd) && KCF_PROV_PROVIDER_OPS(pd)->init_pin) ? \
KCF_PROV_PROVIDER_OPS(pd)->init_pin((pd)->pd_prov_handle, \
session, pin, pin_len, req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_SET_PIN(pd, session, old_pin, old_len, new_pin, new_len, \
req) ( \
(KCF_PROV_PROVIDER_OPS(pd) && KCF_PROV_PROVIDER_OPS(pd)->set_pin) ? \
KCF_PROV_PROVIDER_OPS(pd)->set_pin((pd)->pd_prov_handle, \
session, old_pin, old_len, new_pin, new_len, req) : \
CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_nostore_key_ops(9S) entry points.
*/
#define KCF_PROV_NOSTORE_KEY_GENERATE(pd, session, mech, template, count, \
out_template, out_count, req) ( \
(KCF_PROV_NOSTORE_KEY_OPS(pd) && \
KCF_PROV_NOSTORE_KEY_OPS(pd)->nostore_key_generate) ? \
KCF_PROV_NOSTORE_KEY_OPS(pd)->nostore_key_generate( \
(pd)->pd_prov_handle, session, mech, template, count, \
out_template, out_count, req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_NOSTORE_KEY_GENERATE_PAIR(pd, session, mech, pub_template, \
pub_count, priv_template, priv_count, out_pub_template, \
out_pub_count, out_priv_template, out_priv_count, req) ( \
(KCF_PROV_NOSTORE_KEY_OPS(pd) && \
KCF_PROV_NOSTORE_KEY_OPS(pd)->nostore_key_generate_pair) ? \
KCF_PROV_NOSTORE_KEY_OPS(pd)->nostore_key_generate_pair( \
(pd)->pd_prov_handle, session, mech, pub_template, pub_count, \
priv_template, priv_count, out_pub_template, out_pub_count, \
out_priv_template, out_priv_count, req) : CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_NOSTORE_KEY_DERIVE(pd, session, mech, base_key, template, \
count, out_template, out_count, req) ( \
(KCF_PROV_NOSTORE_KEY_OPS(pd) && \
KCF_PROV_NOSTORE_KEY_OPS(pd)->nostore_key_derive) ? \
KCF_PROV_NOSTORE_KEY_OPS(pd)->nostore_key_derive( \
(pd)->pd_prov_handle, session, mech, base_key, template, count, \
out_template, out_count, req) : CRYPTO_NOT_SUPPORTED)
/*
* The following routines are exported by the kcf module (/kernel/misc/kcf)
* to the crypto and cryptoadmin modules.
*/
/* Digest/mac/cipher entry points that take a provider descriptor and session */
extern int crypto_digest_single(crypto_context_t, crypto_data_t *,
crypto_data_t *, crypto_call_req_t *);
extern int crypto_mac_single(crypto_context_t, crypto_data_t *,
crypto_data_t *, crypto_call_req_t *);
extern int crypto_encrypt_single(crypto_context_t, crypto_data_t *,
crypto_data_t *, crypto_call_req_t *);
extern int crypto_decrypt_single(crypto_context_t, crypto_data_t *,
crypto_data_t *, crypto_call_req_t *);
/* Other private digest/mac/cipher entry points not exported through k-API */
extern int crypto_digest_key_prov(crypto_context_t, crypto_key_t *,
crypto_call_req_t *);
/* Private sign entry points exported by KCF */
extern int crypto_sign_single(crypto_context_t, crypto_data_t *,
crypto_data_t *, crypto_call_req_t *);
extern int crypto_sign_recover_single(crypto_context_t, crypto_data_t *,
crypto_data_t *, crypto_call_req_t *);
/* Private verify entry points exported by KCF */
extern int crypto_verify_single(crypto_context_t, crypto_data_t *,
crypto_data_t *, crypto_call_req_t *);
extern int crypto_verify_recover_single(crypto_context_t, crypto_data_t *,
crypto_data_t *, crypto_call_req_t *);
/* Private dual operations entry points exported by KCF */
extern int crypto_digest_encrypt_update(crypto_context_t, crypto_context_t,
crypto_data_t *, crypto_data_t *, crypto_call_req_t *);
extern int crypto_decrypt_digest_update(crypto_context_t, crypto_context_t,
crypto_data_t *, crypto_data_t *, crypto_call_req_t *);
extern int crypto_sign_encrypt_update(crypto_context_t, crypto_context_t,
crypto_data_t *, crypto_data_t *, crypto_call_req_t *);
extern int crypto_decrypt_verify_update(crypto_context_t, crypto_context_t,
crypto_data_t *, crypto_data_t *, crypto_call_req_t *);
/* Random Number Generation */
int crypto_seed_random(crypto_provider_handle_t provider, uchar_t *buf,
size_t len, crypto_call_req_t *req);
int crypto_generate_random(crypto_provider_handle_t provider, uchar_t *buf,
size_t len, crypto_call_req_t *req);
/* Provider Management */
int crypto_get_provider_info(crypto_provider_id_t id,
crypto_provider_info_t **info, crypto_call_req_t *req);
int crypto_get_provider_mechanisms(crypto_minor_t *, crypto_provider_id_t id,
uint_t *count, crypto_mech_name_t **list);
int crypto_init_token(crypto_provider_handle_t provider, char *pin,
size_t pin_len, char *label, crypto_call_req_t *);
int crypto_init_pin(crypto_provider_handle_t provider, char *pin,
size_t pin_len, crypto_call_req_t *req);
int crypto_set_pin(crypto_provider_handle_t provider, char *old_pin,
size_t old_len, char *new_pin, size_t new_len, crypto_call_req_t *req);
void crypto_free_provider_list(crypto_provider_entry_t *list, uint_t count);
void crypto_free_provider_info(crypto_provider_info_t *info);
/* Administrative */
int crypto_get_dev_list(uint_t *count, crypto_dev_list_entry_t **list);
int crypto_get_soft_list(uint_t *count, char **list, size_t *len);
int crypto_get_dev_info(char *name, uint_t instance, uint_t *count,
crypto_mech_name_t **list);
int crypto_get_soft_info(caddr_t name, uint_t *count,
crypto_mech_name_t **list);
int crypto_load_dev_disabled(char *name, uint_t instance, uint_t count,
crypto_mech_name_t *list);
int crypto_load_soft_disabled(caddr_t name, uint_t count,
crypto_mech_name_t *list);
int crypto_unload_soft_module(caddr_t path);
int crypto_load_soft_config(caddr_t name, uint_t count,
crypto_mech_name_t *list);
int crypto_load_door(uint_t did);
void crypto_free_mech_list(crypto_mech_name_t *list, uint_t count);
void crypto_free_dev_list(crypto_dev_list_entry_t *list, uint_t count);
/* Miscellaneous */
int crypto_get_mechanism_number(caddr_t name, crypto_mech_type_t *number);
int crypto_build_permitted_mech_names(kcf_provider_desc_t *,
crypto_mech_name_t **, uint_t *, int);
extern void kcf_destroy_mech_tabs(void);
extern void kcf_init_mech_tabs(void);
extern int kcf_add_mech_provider(short, kcf_provider_desc_t *,
@ -1262,71 +675,27 @@ extern kcf_provider_desc_t *kcf_alloc_provider_desc(
const crypto_provider_info_t *);
extern void kcf_provider_zero_refcnt(kcf_provider_desc_t *);
extern void kcf_free_provider_desc(kcf_provider_desc_t *);
extern void kcf_soft_config_init(void);
extern int get_sw_provider_for_mech(crypto_mech_name_t, char **);
extern crypto_mech_type_t crypto_mech2id_common(const char *, boolean_t);
extern void undo_register_provider(kcf_provider_desc_t *, boolean_t);
extern void redo_register_provider(kcf_provider_desc_t *);
extern void kcf_rnd_init(void);
extern boolean_t kcf_rngprov_check(void);
extern int kcf_rnd_get_pseudo_bytes(uint8_t *, size_t);
extern int kcf_rnd_get_bytes(uint8_t *, size_t, boolean_t, boolean_t);
extern int random_add_pseudo_entropy(uint8_t *, size_t, uint_t);
extern void kcf_rnd_schedule_timeout(boolean_t);
extern int crypto_uio_data(crypto_data_t *, uchar_t *, int, cmd_type_t,
void *, void (*update)(void));
extern int crypto_mblk_data(crypto_data_t *, uchar_t *, int, cmd_type_t,
void *, void (*update)(void));
extern int crypto_put_output_data(uchar_t *, crypto_data_t *, int);
extern int crypto_get_input_data(crypto_data_t *, uchar_t **, uchar_t *);
extern int crypto_copy_key_to_ctx(crypto_key_t *, crypto_key_t **, size_t *,
int kmflag);
extern int crypto_digest_data(crypto_data_t *, void *, uchar_t *,
void (*update)(void), void (*final)(void), uchar_t);
extern int crypto_update_iov(void *, crypto_data_t *, crypto_data_t *,
int (*cipher)(void *, caddr_t, size_t, crypto_data_t *),
void (*copy_block)(uint8_t *, uint64_t *));
extern int crypto_update_uio(void *, crypto_data_t *, crypto_data_t *,
int (*cipher)(void *, caddr_t, size_t, crypto_data_t *),
void (*copy_block)(uint8_t *, uint64_t *));
extern int crypto_update_mp(void *, crypto_data_t *, crypto_data_t *,
int (*cipher)(void *, caddr_t, size_t, crypto_data_t *),
void (*copy_block)(uint8_t *, uint64_t *));
extern int crypto_get_key_attr(crypto_key_t *, crypto_attr_type_t, uchar_t **,
ssize_t *);
/* Access to the provider's table */
extern void kcf_prov_tab_destroy(void);
extern void kcf_prov_tab_init(void);
extern int kcf_prov_tab_add_provider(kcf_provider_desc_t *);
extern int kcf_prov_tab_rem_provider(crypto_provider_id_t);
extern kcf_provider_desc_t *kcf_prov_tab_lookup_by_name(char *);
extern kcf_provider_desc_t *kcf_prov_tab_lookup_by_dev(char *, uint_t);
extern int kcf_get_hw_prov_tab(uint_t *, kcf_provider_desc_t ***, int,
char *, uint_t, boolean_t);
extern int kcf_get_slot_list(uint_t *, kcf_provider_desc_t ***, boolean_t);
extern void kcf_free_provider_tab(uint_t, kcf_provider_desc_t **);
extern kcf_provider_desc_t *kcf_prov_tab_lookup(crypto_provider_id_t);
extern int kcf_get_sw_prov(crypto_mech_type_t, kcf_provider_desc_t **,
kcf_mech_entry_t **, boolean_t);
/* Access to the policy table */
extern boolean_t is_mech_disabled(kcf_provider_desc_t *, crypto_mech_name_t);
extern boolean_t is_mech_disabled_byname(crypto_provider_type_t, char *,
uint_t, crypto_mech_name_t);
extern void kcf_policy_tab_init(void);
extern void kcf_policy_free_desc(kcf_policy_desc_t *);
extern void kcf_policy_remove_by_name(char *, uint_t *, crypto_mech_name_t **);
extern void kcf_policy_remove_by_dev(char *, uint_t, uint_t *,
crypto_mech_name_t **);
extern kcf_policy_desc_t *kcf_policy_lookup_by_name(char *);
extern kcf_policy_desc_t *kcf_policy_lookup_by_dev(char *, uint_t);
extern int kcf_policy_load_soft_disabled(char *, uint_t, crypto_mech_name_t *,
uint_t *, crypto_mech_name_t **);
extern int kcf_policy_load_dev_disabled(char *, uint_t, uint_t,
crypto_mech_name_t *, uint_t *, crypto_mech_name_t **);
extern boolean_t in_soft_config_list(char *);
#ifdef __cplusplus
}

View File

@ -87,141 +87,6 @@ typedef struct kcf_decrypt_ops_params {
crypto_spi_ctx_template_t dop_templ;
} kcf_decrypt_ops_params_t;
typedef struct kcf_sign_ops_params {
crypto_session_id_t so_sid;
crypto_mech_type_t so_framework_mechtype;
crypto_mechanism_t so_mech;
crypto_key_t *so_key;
crypto_data_t *so_data;
crypto_data_t *so_signature;
crypto_spi_ctx_template_t so_templ;
} kcf_sign_ops_params_t;
typedef struct kcf_verify_ops_params {
crypto_session_id_t vo_sid;
crypto_mech_type_t vo_framework_mechtype;
crypto_mechanism_t vo_mech;
crypto_key_t *vo_key;
crypto_data_t *vo_data;
crypto_data_t *vo_signature;
crypto_spi_ctx_template_t vo_templ;
} kcf_verify_ops_params_t;
typedef struct kcf_encrypt_mac_ops_params {
crypto_session_id_t em_sid;
crypto_mech_type_t em_framework_encr_mechtype;
crypto_mechanism_t em_encr_mech;
crypto_key_t *em_encr_key;
crypto_mech_type_t em_framework_mac_mechtype;
crypto_mechanism_t em_mac_mech;
crypto_key_t *em_mac_key;
crypto_data_t *em_plaintext;
crypto_dual_data_t *em_ciphertext;
crypto_data_t *em_mac;
crypto_spi_ctx_template_t em_encr_templ;
crypto_spi_ctx_template_t em_mac_templ;
} kcf_encrypt_mac_ops_params_t;
typedef struct kcf_mac_decrypt_ops_params {
crypto_session_id_t md_sid;
crypto_mech_type_t md_framework_mac_mechtype;
crypto_mechanism_t md_mac_mech;
crypto_key_t *md_mac_key;
crypto_mech_type_t md_framework_decr_mechtype;
crypto_mechanism_t md_decr_mech;
crypto_key_t *md_decr_key;
crypto_dual_data_t *md_ciphertext;
crypto_data_t *md_mac;
crypto_data_t *md_plaintext;
crypto_spi_ctx_template_t md_mac_templ;
crypto_spi_ctx_template_t md_decr_templ;
} kcf_mac_decrypt_ops_params_t;
typedef struct kcf_random_number_ops_params {
crypto_session_id_t rn_sid;
uchar_t *rn_buf;
size_t rn_buflen;
uint_t rn_entropy_est;
uint32_t rn_flags;
} kcf_random_number_ops_params_t;
/*
* so_pd is useful when the provider descriptor (pd) supplying the
* provider handle is different from the pd supplying the ops vector.
* This is the case for session open/close where so_pd can be the pd
* of a logical provider. The pd supplying the ops vector is passed
* as an argument to kcf_submit_request().
*/
typedef struct kcf_session_ops_params {
crypto_session_id_t *so_sid_ptr;
crypto_session_id_t so_sid;
crypto_user_type_t so_user_type;
char *so_pin;
size_t so_pin_len;
kcf_provider_desc_t *so_pd;
} kcf_session_ops_params_t;
typedef struct kcf_object_ops_params {
crypto_session_id_t oo_sid;
crypto_object_id_t oo_object_id;
crypto_object_attribute_t *oo_template;
uint_t oo_attribute_count;
crypto_object_id_t *oo_object_id_ptr;
size_t *oo_object_size;
void **oo_find_init_pp_ptr;
void *oo_find_pp;
uint_t oo_max_object_count;
uint_t *oo_object_count_ptr;
} kcf_object_ops_params_t;
/*
* ko_key is used to encode wrapping key in key_wrap() and
* unwrapping key in key_unwrap(). ko_key_template and
* ko_key_attribute_count are used to encode public template
* and public template attr count in key_generate_pair().
* kops->ko_key_object_id_ptr is used to encode public key
* in key_generate_pair().
*/
typedef struct kcf_key_ops_params {
crypto_session_id_t ko_sid;
crypto_mech_type_t ko_framework_mechtype;
crypto_mechanism_t ko_mech;
crypto_object_attribute_t *ko_key_template;
uint_t ko_key_attribute_count;
crypto_object_id_t *ko_key_object_id_ptr;
crypto_object_attribute_t *ko_private_key_template;
uint_t ko_private_key_attribute_count;
crypto_object_id_t *ko_private_key_object_id_ptr;
crypto_key_t *ko_key;
uchar_t *ko_wrapped_key;
size_t *ko_wrapped_key_len_ptr;
crypto_object_attribute_t *ko_out_template1;
crypto_object_attribute_t *ko_out_template2;
uint_t ko_out_attribute_count1;
uint_t ko_out_attribute_count2;
} kcf_key_ops_params_t;
/*
* po_pin and po_pin_len are used to encode new_pin and new_pin_len
* when wrapping set_pin() function parameters.
*
* po_pd is useful when the provider descriptor (pd) supplying the
* provider handle is different from the pd supplying the ops vector.
* This is true for the ext_info provider entry point where po_pd
* can be the pd of a logical provider. The pd supplying the ops vector
* is passed as an argument to kcf_submit_request().
*/
typedef struct kcf_provmgmt_ops_params {
crypto_session_id_t po_sid;
char *po_pin;
size_t po_pin_len;
char *po_old_pin;
size_t po_old_pin_len;
char *po_label;
crypto_provider_ext_info_t *po_ext_info;
kcf_provider_desc_t *po_pd;
} kcf_provmgmt_ops_params_t;
/*
* The operation type within a function group.
*/
@ -241,51 +106,6 @@ typedef enum kcf_op_type {
/* mac/cipher specific op */
KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC,
/* sign_recover ops */
KCF_OP_SIGN_RECOVER_INIT,
KCF_OP_SIGN_RECOVER,
KCF_OP_SIGN_RECOVER_ATOMIC,
/* verify_recover ops */
KCF_OP_VERIFY_RECOVER_INIT,
KCF_OP_VERIFY_RECOVER,
KCF_OP_VERIFY_RECOVER_ATOMIC,
/* random number ops */
KCF_OP_RANDOM_SEED,
KCF_OP_RANDOM_GENERATE,
/* session management ops */
KCF_OP_SESSION_OPEN,
KCF_OP_SESSION_CLOSE,
KCF_OP_SESSION_LOGIN,
KCF_OP_SESSION_LOGOUT,
/* object management ops */
KCF_OP_OBJECT_CREATE,
KCF_OP_OBJECT_COPY,
KCF_OP_OBJECT_DESTROY,
KCF_OP_OBJECT_GET_SIZE,
KCF_OP_OBJECT_GET_ATTRIBUTE_VALUE,
KCF_OP_OBJECT_SET_ATTRIBUTE_VALUE,
KCF_OP_OBJECT_FIND_INIT,
KCF_OP_OBJECT_FIND,
KCF_OP_OBJECT_FIND_FINAL,
/* key management ops */
KCF_OP_KEY_GENERATE,
KCF_OP_KEY_GENERATE_PAIR,
KCF_OP_KEY_WRAP,
KCF_OP_KEY_UNWRAP,
KCF_OP_KEY_DERIVE,
KCF_OP_KEY_CHECK,
/* provider management ops */
KCF_OP_MGMT_EXTINFO,
KCF_OP_MGMT_INITTOKEN,
KCF_OP_MGMT_INITPIN,
KCF_OP_MGMT_SETPIN
} kcf_op_type_t;
/*
@ -302,16 +122,6 @@ typedef enum kcf_op_group {
KCF_OG_MAC,
KCF_OG_ENCRYPT,
KCF_OG_DECRYPT,
KCF_OG_SIGN,
KCF_OG_VERIFY,
KCF_OG_ENCRYPT_MAC,
KCF_OG_MAC_DECRYPT,
KCF_OG_RANDOM,
KCF_OG_SESSION,
KCF_OG_OBJECT,
KCF_OG_KEY,
KCF_OG_PROVMGMT,
KCF_OG_NOSTORE_KEY
} kcf_op_group_t;
/*
@ -323,10 +133,7 @@ typedef enum kcf_op_group {
#define IS_UPDATE_OP(ftype) ((ftype) == KCF_OP_UPDATE)
#define IS_FINAL_OP(ftype) ((ftype) == KCF_OP_FINAL)
#define IS_ATOMIC_OP(ftype) ( \
(ftype) == KCF_OP_ATOMIC || (ftype) == KCF_OP_MAC_VERIFY_ATOMIC || \
(ftype) == KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC || \
(ftype) == KCF_OP_SIGN_RECOVER_ATOMIC || \
(ftype) == KCF_OP_VERIFY_RECOVER_ATOMIC)
(ftype) == KCF_OP_ATOMIC || (ftype) == KCF_OP_MAC_VERIFY_ATOMIC)
/*
* Keep the parameters associated with a request around.
@ -341,15 +148,6 @@ typedef struct kcf_req_params {
kcf_mac_ops_params_t mac_params;
kcf_encrypt_ops_params_t encrypt_params;
kcf_decrypt_ops_params_t decrypt_params;
kcf_sign_ops_params_t sign_params;
kcf_verify_ops_params_t verify_params;
kcf_encrypt_mac_ops_params_t encrypt_mac_params;
kcf_mac_decrypt_ops_params_t mac_decrypt_params;
kcf_random_number_ops_params_t random_number_params;
kcf_session_ops_params_t session_params;
kcf_object_ops_params_t object_params;
kcf_key_ops_params_t key_params;
kcf_provmgmt_ops_params_t provmgmt_params;
} rp_u;
} kcf_req_params_t;
@ -434,191 +232,6 @@ typedef struct kcf_req_params {
cops->dop_templ = _templ; \
}
#define KCF_WRAP_SIGN_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
_data, _signature, _templ) { \
kcf_sign_ops_params_t *sops = &(req)->rp_u.sign_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_SIGN; \
(req)->rp_optype = ftype; \
sops->so_sid = _sid; \
if (mechp != NULL) { \
sops->so_mech = *mechp; \
sops->so_framework_mechtype = mechp->cm_type; \
} \
sops->so_key = _key; \
sops->so_data = _data; \
sops->so_signature = _signature; \
sops->so_templ = _templ; \
}
#define KCF_WRAP_VERIFY_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
_data, _signature, _templ) { \
kcf_verify_ops_params_t *vops = &(req)->rp_u.verify_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_VERIFY; \
(req)->rp_optype = ftype; \
vops->vo_sid = _sid; \
if (mechp != NULL) { \
vops->vo_mech = *mechp; \
vops->vo_framework_mechtype = mechp->cm_type; \
} \
vops->vo_key = _key; \
vops->vo_data = _data; \
vops->vo_signature = _signature; \
vops->vo_templ = _templ; \
}
#define KCF_WRAP_ENCRYPT_MAC_OPS_PARAMS(req, ftype, _sid, _encr_key, \
_mac_key, _plaintext, _ciphertext, _mac, _encr_templ, _mac_templ) { \
kcf_encrypt_mac_ops_params_t *cmops = &(req)->rp_u.encrypt_mac_params; \
\
(req)->rp_opgrp = KCF_OG_ENCRYPT_MAC; \
(req)->rp_optype = ftype; \
cmops->em_sid = _sid; \
cmops->em_encr_key = _encr_key; \
cmops->em_mac_key = _mac_key; \
cmops->em_plaintext = _plaintext; \
cmops->em_ciphertext = _ciphertext; \
cmops->em_mac = _mac; \
cmops->em_encr_templ = _encr_templ; \
cmops->em_mac_templ = _mac_templ; \
}
#define KCF_WRAP_MAC_DECRYPT_OPS_PARAMS(req, ftype, _sid, _mac_key, \
_decr_key, _ciphertext, _mac, _plaintext, _mac_templ, _decr_templ) { \
kcf_mac_decrypt_ops_params_t *cmops = &(req)->rp_u.mac_decrypt_params; \
\
(req)->rp_opgrp = KCF_OG_MAC_DECRYPT; \
(req)->rp_optype = ftype; \
cmops->md_sid = _sid; \
cmops->md_mac_key = _mac_key; \
cmops->md_decr_key = _decr_key; \
cmops->md_ciphertext = _ciphertext; \
cmops->md_mac = _mac; \
cmops->md_plaintext = _plaintext; \
cmops->md_mac_templ = _mac_templ; \
cmops->md_decr_templ = _decr_templ; \
}
#define KCF_WRAP_RANDOM_OPS_PARAMS(req, ftype, _sid, _buf, _buflen, \
_est, _flags) { \
kcf_random_number_ops_params_t *rops = \
&(req)->rp_u.random_number_params; \
\
(req)->rp_opgrp = KCF_OG_RANDOM; \
(req)->rp_optype = ftype; \
rops->rn_sid = _sid; \
rops->rn_buf = _buf; \
rops->rn_buflen = _buflen; \
rops->rn_entropy_est = _est; \
rops->rn_flags = _flags; \
}
#define KCF_WRAP_SESSION_OPS_PARAMS(req, ftype, _sid_ptr, _sid, \
_user_type, _pin, _pin_len, _pd) { \
kcf_session_ops_params_t *sops = &(req)->rp_u.session_params; \
\
(req)->rp_opgrp = KCF_OG_SESSION; \
(req)->rp_optype = ftype; \
sops->so_sid_ptr = _sid_ptr; \
sops->so_sid = _sid; \
sops->so_user_type = _user_type; \
sops->so_pin = _pin; \
sops->so_pin_len = _pin_len; \
sops->so_pd = _pd; \
}
#define KCF_WRAP_OBJECT_OPS_PARAMS(req, ftype, _sid, _object_id, \
_template, _attribute_count, _object_id_ptr, _object_size, \
_find_init_pp_ptr, _find_pp, _max_object_count, _object_count_ptr) { \
kcf_object_ops_params_t *jops = &(req)->rp_u.object_params; \
\
(req)->rp_opgrp = KCF_OG_OBJECT; \
(req)->rp_optype = ftype; \
jops->oo_sid = _sid; \
jops->oo_object_id = _object_id; \
jops->oo_template = _template; \
jops->oo_attribute_count = _attribute_count; \
jops->oo_object_id_ptr = _object_id_ptr; \
jops->oo_object_size = _object_size; \
jops->oo_find_init_pp_ptr = _find_init_pp_ptr; \
jops->oo_find_pp = _find_pp; \
jops->oo_max_object_count = _max_object_count; \
jops->oo_object_count_ptr = _object_count_ptr; \
}
#define KCF_WRAP_KEY_OPS_PARAMS(req, ftype, _sid, _mech, _key_template, \
_key_attribute_count, _key_object_id_ptr, _private_key_template, \
_private_key_attribute_count, _private_key_object_id_ptr, \
_key, _wrapped_key, _wrapped_key_len_ptr) { \
kcf_key_ops_params_t *kops = &(req)->rp_u.key_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_KEY; \
(req)->rp_optype = ftype; \
kops->ko_sid = _sid; \
if (mechp != NULL) { \
kops->ko_mech = *mechp; \
kops->ko_framework_mechtype = mechp->cm_type; \
} \
kops->ko_key_template = _key_template; \
kops->ko_key_attribute_count = _key_attribute_count; \
kops->ko_key_object_id_ptr = _key_object_id_ptr; \
kops->ko_private_key_template = _private_key_template; \
kops->ko_private_key_attribute_count = _private_key_attribute_count; \
kops->ko_private_key_object_id_ptr = _private_key_object_id_ptr; \
kops->ko_key = _key; \
kops->ko_wrapped_key = _wrapped_key; \
kops->ko_wrapped_key_len_ptr = _wrapped_key_len_ptr; \
}
#define KCF_WRAP_PROVMGMT_OPS_PARAMS(req, ftype, _sid, _old_pin, \
_old_pin_len, _pin, _pin_len, _label, _ext_info, _pd) { \
kcf_provmgmt_ops_params_t *pops = &(req)->rp_u.provmgmt_params; \
\
(req)->rp_opgrp = KCF_OG_PROVMGMT; \
(req)->rp_optype = ftype; \
pops->po_sid = _sid; \
pops->po_pin = _pin; \
pops->po_pin_len = _pin_len; \
pops->po_old_pin = _old_pin; \
pops->po_old_pin_len = _old_pin_len; \
pops->po_label = _label; \
pops->po_ext_info = _ext_info; \
pops->po_pd = _pd; \
}
#define KCF_WRAP_NOSTORE_KEY_OPS_PARAMS(req, ftype, _sid, _mech, \
_key_template, _key_attribute_count, _private_key_template, \
_private_key_attribute_count, _key, _out_template1, \
_out_attribute_count1, _out_template2, _out_attribute_count2) { \
kcf_key_ops_params_t *kops = &(req)->rp_u.key_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_NOSTORE_KEY; \
(req)->rp_optype = ftype; \
kops->ko_sid = _sid; \
if (mechp != NULL) { \
kops->ko_mech = *mechp; \
kops->ko_framework_mechtype = mechp->cm_type; \
} \
kops->ko_key_template = _key_template; \
kops->ko_key_attribute_count = _key_attribute_count; \
kops->ko_key_object_id_ptr = NULL; \
kops->ko_private_key_template = _private_key_template; \
kops->ko_private_key_attribute_count = _private_key_attribute_count; \
kops->ko_private_key_object_id_ptr = NULL; \
kops->ko_key = _key; \
kops->ko_wrapped_key = NULL; \
kops->ko_wrapped_key_len_ptr = 0; \
kops->ko_out_template1 = _out_template1; \
kops->ko_out_template2 = _out_template2; \
kops->ko_out_attribute_count1 = _out_attribute_count1; \
kops->ko_out_attribute_count2 = _out_attribute_count2; \
}
#define KCF_SET_PROVIDER_MECHNUM(fmtype, pd, mechp) \
(mechp)->cm_type = \
KCF_TO_PROV_MECHNUM(pd, fmtype);

View File

@ -87,13 +87,6 @@ extern ulong_t kcf_swprov_hndl;
#define REQHNDL2_KMFLAG(rhndl) \
((rhndl == &kcf_swprov_hndl) ? KM_NOSLEEP : KM_SLEEP)
/* Internal call_req flags. They start after the public ones in api.h */
#define CRYPTO_SETDUAL 0x00001000 /* Set the 'cont' boolean before */
/* submitting the request */
#define KCF_ISDUALREQ(crq) \
(((crq) == NULL) ? B_FALSE : (crq->cr_flag & CRYPTO_SETDUAL))
typedef struct kcf_prov_tried {
kcf_provider_desc_t *pt_pd;
struct kcf_prov_tried *pt_next;
@ -182,7 +175,6 @@ typedef struct kcf_areq_node {
kcondvar_t an_turn_cv;
boolean_t an_is_my_turn;
boolean_t an_isdual; /* for internal reuse */
/*
* Next and previous nodes in the global software
@ -219,15 +211,6 @@ typedef struct kcf_areq_node {
#define NOTIFY_CLIENT(areq, err) (*(areq)->an_reqarg.cr_callback_func)(\
(areq)->an_reqarg.cr_callback_arg, err);
/* For internally generated call requests for dual operations */
typedef struct kcf_call_req {
crypto_call_req_t kr_callreq; /* external client call req */
kcf_req_params_t kr_params; /* Params saved for next call */
kcf_areq_node_t *kr_areq; /* Use this areq */
off_t kr_saveoffset;
size_t kr_savelen;
} kcf_dual_req_t;
/*
* The following are some what similar to macros in callo.h, which implement
* callout tables.
@ -488,14 +471,10 @@ extern kcf_prov_tried_t *kcf_insert_triedlist(kcf_prov_tried_t **,
extern kcf_provider_desc_t *kcf_get_mech_provider(crypto_mech_type_t,
kcf_mech_entry_t **, int *, kcf_prov_tried_t *, crypto_func_group_t,
boolean_t, size_t);
extern kcf_provider_desc_t *kcf_get_dual_provider(crypto_mechanism_t *,
crypto_mechanism_t *, kcf_mech_entry_t **, crypto_mech_type_t *,
crypto_mech_type_t *, int *, kcf_prov_tried_t *,
crypto_func_group_t, crypto_func_group_t, boolean_t, size_t);
extern crypto_ctx_t *kcf_new_ctx(crypto_call_req_t *, kcf_provider_desc_t *,
crypto_session_id_t);
extern int kcf_submit_request(kcf_provider_desc_t *, crypto_ctx_t *,
crypto_call_req_t *, kcf_req_params_t *, boolean_t);
crypto_call_req_t *, kcf_req_params_t *);
extern void kcf_sched_destroy(void);
extern void kcf_sched_init(void);
extern void kcf_sched_start(void);
@ -517,10 +496,6 @@ extern void crypto_bufcall_service(void);
extern void kcf_walk_ntfylist(uint32_t, void *);
extern void kcf_do_notify(kcf_provider_desc_t *, boolean_t);
extern kcf_dual_req_t *kcf_alloc_req(crypto_call_req_t *);
extern void kcf_next_req(void *, int);
extern void kcf_last_req(void *, int);
#ifdef __cplusplus
}
#endif

View File

@ -94,44 +94,6 @@ typedef struct crypto_ctx {
void *cc_opstate; /* state */
} crypto_ctx_t;
/*
* Extended provider information.
*/
/*
* valid values for ei_flags field of extended info structure
* They match the RSA Security, Inc PKCS#11 tokenInfo flags.
*/
#define CRYPTO_EXTF_RNG 0x00000001
#define CRYPTO_EXTF_WRITE_PROTECTED 0x00000002
#define CRYPTO_EXTF_LOGIN_REQUIRED 0x00000004
#define CRYPTO_EXTF_USER_PIN_INITIALIZED 0x00000008
#define CRYPTO_EXTF_CLOCK_ON_TOKEN 0x00000040
#define CRYPTO_EXTF_PROTECTED_AUTHENTICATION_PATH 0x00000100
#define CRYPTO_EXTF_DUAL_CRYPTO_OPERATIONS 0x00000200
#define CRYPTO_EXTF_TOKEN_INITIALIZED 0x00000400
#define CRYPTO_EXTF_USER_PIN_COUNT_LOW 0x00010000
#define CRYPTO_EXTF_USER_PIN_FINAL_TRY 0x00020000
#define CRYPTO_EXTF_USER_PIN_LOCKED 0x00040000
#define CRYPTO_EXTF_USER_PIN_TO_BE_CHANGED 0x00080000
#define CRYPTO_EXTF_SO_PIN_COUNT_LOW 0x00100000
#define CRYPTO_EXTF_SO_PIN_FINAL_TRY 0x00200000
#define CRYPTO_EXTF_SO_PIN_LOCKED 0x00400000
#define CRYPTO_EXTF_SO_PIN_TO_BE_CHANGED 0x00800000
/*
* The crypto_ctx_ops structure contains points to context and context
* templates management operations for cryptographic providers. It is
* passed through the crypto_ops(9S) structure when providers register
* with the kernel using crypto_register_provider(9F).
*/
typedef struct crypto_ctx_ops {
int (*create_ctx_template)(crypto_provider_handle_t,
crypto_mechanism_t *, crypto_key_t *,
crypto_spi_ctx_template_t *, size_t *, crypto_req_handle_t);
int (*free_context)(crypto_ctx_t *);
} __no_const crypto_ctx_ops_t;
/*
* The crypto_digest_ops structure contains pointers to digest
* operations for cryptographic providers. It is passed through
@ -214,271 +176,17 @@ typedef struct crypto_mac_ops {
} __no_const crypto_mac_ops_t;
/*
* The crypto_sign_ops structure contains pointers to signing
* operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
* The crypto_ctx_ops structure contains points to context and context
* templates management operations for cryptographic providers. It is
* passed through the crypto_ops(9S) structure when providers register
* with the kernel using crypto_register_provider(9F).
*/
typedef struct crypto_sign_ops {
int (*sign_init)(crypto_ctx_t *,
crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
int (*sign)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*sign_update)(crypto_ctx_t *,
crypto_data_t *, crypto_req_handle_t);
int (*sign_final)(crypto_ctx_t *,
crypto_data_t *, crypto_req_handle_t);
int (*sign_atomic)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_data_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
int (*sign_recover_init)(crypto_ctx_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
int (*sign_recover)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*sign_recover_atomic)(crypto_provider_handle_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
} __no_const crypto_sign_ops_t;
/*
* The crypto_verify_ops structure contains pointers to verify
* operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_verify_ops {
int (*verify_init)(crypto_ctx_t *,
crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
int (*do_verify)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*verify_update)(crypto_ctx_t *,
crypto_data_t *, crypto_req_handle_t);
int (*verify_final)(crypto_ctx_t *,
crypto_data_t *, crypto_req_handle_t);
int (*verify_atomic)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_data_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
int (*verify_recover_init)(crypto_ctx_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
int (*verify_recover)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*verify_recover_atomic)(crypto_provider_handle_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t,
crypto_req_handle_t);
} __no_const crypto_verify_ops_t;
/*
* The crypto_dual_ops structure contains pointers to dual
* cipher and sign/verify operations for cryptographic providers.
* It is passed through the crypto_ops(9S) structure when
* providers register with the kernel using
* crypto_register_provider(9F).
*/
typedef struct crypto_dual_ops {
int (*digest_encrypt_update)(
crypto_ctx_t *, crypto_ctx_t *, crypto_data_t *,
crypto_data_t *, crypto_req_handle_t);
int (*decrypt_digest_update)(
crypto_ctx_t *, crypto_ctx_t *, crypto_data_t *,
crypto_data_t *, crypto_req_handle_t);
int (*sign_encrypt_update)(
crypto_ctx_t *, crypto_ctx_t *, crypto_data_t *,
crypto_data_t *, crypto_req_handle_t);
int (*decrypt_verify_update)(
crypto_ctx_t *, crypto_ctx_t *, crypto_data_t *,
crypto_data_t *, crypto_req_handle_t);
} __no_const crypto_dual_ops_t;
/*
* The crypto_dual_cipher_mac_ops structure contains pointers to dual
* cipher and MAC operations for cryptographic providers.
* It is passed through the crypto_ops(9S) structure when
* providers register with the kernel using
* crypto_register_provider(9F).
*/
typedef struct crypto_dual_cipher_mac_ops {
int (*encrypt_mac_init)(crypto_ctx_t *,
crypto_mechanism_t *, crypto_key_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_spi_ctx_template_t,
crypto_spi_ctx_template_t, crypto_req_handle_t);
int (*encrypt_mac)(crypto_ctx_t *,
crypto_data_t *, crypto_dual_data_t *, crypto_data_t *,
crypto_req_handle_t);
int (*encrypt_mac_update)(crypto_ctx_t *,
crypto_data_t *, crypto_dual_data_t *, crypto_req_handle_t);
int (*encrypt_mac_final)(crypto_ctx_t *,
crypto_dual_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*encrypt_mac_atomic)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_data_t *, crypto_dual_data_t *,
crypto_data_t *, crypto_spi_ctx_template_t,
crypto_spi_ctx_template_t, crypto_req_handle_t);
int (*mac_decrypt_init)(crypto_ctx_t *,
crypto_mechanism_t *, crypto_key_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_spi_ctx_template_t,
crypto_spi_ctx_template_t, crypto_req_handle_t);
int (*mac_decrypt)(crypto_ctx_t *,
crypto_dual_data_t *, crypto_data_t *, crypto_data_t *,
crypto_req_handle_t);
int (*mac_decrypt_update)(crypto_ctx_t *,
crypto_dual_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*mac_decrypt_final)(crypto_ctx_t *,
crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
int (*mac_decrypt_atomic)(crypto_provider_handle_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_mechanism_t *, crypto_key_t *, crypto_dual_data_t *,
crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t,
crypto_spi_ctx_template_t, crypto_req_handle_t);
int (*mac_verify_decrypt_atomic)(crypto_provider_handle_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_mechanism_t *, crypto_key_t *, crypto_dual_data_t *,
crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t,
crypto_spi_ctx_template_t, crypto_req_handle_t);
} __no_const crypto_dual_cipher_mac_ops_t;
/*
* The crypto_random_number_ops structure contains pointers to random
* number operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_random_number_ops {
int (*seed_random)(crypto_provider_handle_t, crypto_session_id_t,
uchar_t *, size_t, uint_t, uint32_t, crypto_req_handle_t);
int (*generate_random)(crypto_provider_handle_t, crypto_session_id_t,
uchar_t *, size_t, crypto_req_handle_t);
} __no_const crypto_random_number_ops_t;
/*
* Flag values for seed_random.
*/
#define CRYPTO_SEED_NOW 0x00000001
/*
* The crypto_session_ops structure contains pointers to session
* operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_session_ops {
int (*session_open)(crypto_provider_handle_t, crypto_session_id_t *,
crypto_req_handle_t);
int (*session_close)(crypto_provider_handle_t, crypto_session_id_t,
crypto_req_handle_t);
int (*session_login)(crypto_provider_handle_t, crypto_session_id_t,
crypto_user_type_t, char *, size_t, crypto_req_handle_t);
int (*session_logout)(crypto_provider_handle_t, crypto_session_id_t,
crypto_req_handle_t);
} __no_const crypto_session_ops_t;
/*
* The crypto_object_ops structure contains pointers to object
* operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_object_ops {
int (*object_create)(crypto_provider_handle_t, crypto_session_id_t,
crypto_object_attribute_t *, uint_t, crypto_object_id_t *,
crypto_req_handle_t);
int (*object_copy)(crypto_provider_handle_t, crypto_session_id_t,
crypto_object_id_t, crypto_object_attribute_t *, uint_t,
crypto_object_id_t *, crypto_req_handle_t);
int (*object_destroy)(crypto_provider_handle_t, crypto_session_id_t,
crypto_object_id_t, crypto_req_handle_t);
int (*object_get_size)(crypto_provider_handle_t, crypto_session_id_t,
crypto_object_id_t, size_t *, crypto_req_handle_t);
int (*object_get_attribute_value)(crypto_provider_handle_t,
crypto_session_id_t, crypto_object_id_t,
crypto_object_attribute_t *, uint_t, crypto_req_handle_t);
int (*object_set_attribute_value)(crypto_provider_handle_t,
crypto_session_id_t, crypto_object_id_t,
crypto_object_attribute_t *, uint_t, crypto_req_handle_t);
int (*object_find_init)(crypto_provider_handle_t, crypto_session_id_t,
crypto_object_attribute_t *, uint_t, void **,
crypto_req_handle_t);
int (*object_find)(crypto_provider_handle_t, void *,
crypto_object_id_t *, uint_t, uint_t *, crypto_req_handle_t);
int (*object_find_final)(crypto_provider_handle_t, void *,
crypto_req_handle_t);
} __no_const crypto_object_ops_t;
/*
* The crypto_key_ops structure contains pointers to key
* operations for cryptographic providers. It is passed through
* the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_key_ops {
int (*key_generate)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_object_attribute_t *, uint_t,
crypto_object_id_t *, crypto_req_handle_t);
int (*key_generate_pair)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_object_attribute_t *, uint_t,
crypto_object_attribute_t *, uint_t, crypto_object_id_t *,
crypto_object_id_t *, crypto_req_handle_t);
int (*key_wrap)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_object_id_t *,
uchar_t *, size_t *, crypto_req_handle_t);
int (*key_unwrap)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, uchar_t *, size_t *,
crypto_object_attribute_t *, uint_t,
crypto_object_id_t *, crypto_req_handle_t);
int (*key_derive)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_object_attribute_t *,
uint_t, crypto_object_id_t *, crypto_req_handle_t);
int (*key_check)(crypto_provider_handle_t, crypto_mechanism_t *,
crypto_key_t *);
} __no_const crypto_key_ops_t;
/*
* The crypto_provider_management_ops structure contains pointers
* to management operations for cryptographic providers. It is passed
* through the crypto_ops(9S) structure when providers register with the
* kernel using crypto_register_provider(9F).
*/
typedef struct crypto_provider_management_ops {
int (*ext_info)(crypto_provider_handle_t,
crypto_provider_ext_info_t *, crypto_req_handle_t);
int (*init_token)(crypto_provider_handle_t, char *, size_t,
char *, crypto_req_handle_t);
int (*init_pin)(crypto_provider_handle_t, crypto_session_id_t,
char *, size_t, crypto_req_handle_t);
int (*set_pin)(crypto_provider_handle_t, crypto_session_id_t,
char *, size_t, char *, size_t, crypto_req_handle_t);
} __no_const crypto_provider_management_ops_t;
typedef struct crypto_mech_ops {
int (*copyin_mechanism)(crypto_provider_handle_t,
crypto_mechanism_t *, crypto_mechanism_t *, int *, int);
int (*copyout_mechanism)(crypto_provider_handle_t,
crypto_mechanism_t *, crypto_mechanism_t *, int *, int);
int (*free_mechanism)(crypto_provider_handle_t, crypto_mechanism_t *);
} __no_const crypto_mech_ops_t;
typedef struct crypto_nostore_key_ops {
int (*nostore_key_generate)(crypto_provider_handle_t,
crypto_session_id_t, crypto_mechanism_t *,
crypto_object_attribute_t *, uint_t, crypto_object_attribute_t *,
uint_t, crypto_req_handle_t);
int (*nostore_key_generate_pair)(crypto_provider_handle_t,
crypto_session_id_t, crypto_mechanism_t *,
crypto_object_attribute_t *, uint_t, crypto_object_attribute_t *,
uint_t, crypto_object_attribute_t *, uint_t,
crypto_object_attribute_t *, uint_t, crypto_req_handle_t);
int (*nostore_key_derive)(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_object_attribute_t *,
uint_t, crypto_object_attribute_t *, uint_t, crypto_req_handle_t);
} __no_const crypto_nostore_key_ops_t;
typedef struct crypto_ctx_ops {
int (*create_ctx_template)(crypto_provider_handle_t,
crypto_mechanism_t *, crypto_key_t *,
crypto_spi_ctx_template_t *, size_t *, crypto_req_handle_t);
int (*free_context)(crypto_ctx_t *);
} __no_const crypto_ctx_ops_t;
/*
* The crypto_ops(9S) structure contains the structures containing
@ -491,18 +199,7 @@ typedef struct crypto_ops {
const crypto_digest_ops_t *co_digest_ops;
const crypto_cipher_ops_t *co_cipher_ops;
const crypto_mac_ops_t *co_mac_ops;
crypto_sign_ops_t *co_sign_ops;
crypto_verify_ops_t *co_verify_ops;
crypto_dual_ops_t *co_dual_ops;
crypto_dual_cipher_mac_ops_t *co_dual_cipher_mac_ops;
crypto_random_number_ops_t *co_random_ops;
crypto_session_ops_t *co_session_ops;
crypto_object_ops_t *co_object_ops;
crypto_key_ops_t *co_key_ops;
crypto_provider_management_ops_t *co_provider_ops;
const crypto_ctx_ops_t *co_ctx_ops;
crypto_mech_ops_t *co_mech_ops;
crypto_nostore_key_ops_t *co_nostore_key_ops;
} crypto_ops_t;
/*
@ -518,29 +215,11 @@ typedef uint32_t crypto_func_group_t;
#define CRYPTO_FG_ENCRYPT 0x00000001 /* encrypt_init() */
#define CRYPTO_FG_DECRYPT 0x00000002 /* decrypt_init() */
#define CRYPTO_FG_DIGEST 0x00000004 /* digest_init() */
#define CRYPTO_FG_SIGN 0x00000008 /* sign_init() */
#define CRYPTO_FG_SIGN_RECOVER 0x00000010 /* sign_recover_init() */
#define CRYPTO_FG_VERIFY 0x00000020 /* verify_init() */
#define CRYPTO_FG_VERIFY_RECOVER 0x00000040 /* verify_recover_init() */
#define CRYPTO_FG_GENERATE 0x00000080 /* key_generate() */
#define CRYPTO_FG_GENERATE_KEY_PAIR 0x00000100 /* key_generate_pair() */
#define CRYPTO_FG_WRAP 0x00000200 /* key_wrap() */
#define CRYPTO_FG_UNWRAP 0x00000400 /* key_unwrap() */
#define CRYPTO_FG_DERIVE 0x00000800 /* key_derive() */
#define CRYPTO_FG_MAC 0x00001000 /* mac_init() */
#define CRYPTO_FG_ENCRYPT_MAC 0x00002000 /* encrypt_mac_init() */
#define CRYPTO_FG_MAC_DECRYPT 0x00004000 /* decrypt_mac_init() */
#define CRYPTO_FG_ENCRYPT_ATOMIC 0x00008000 /* encrypt_atomic() */
#define CRYPTO_FG_DECRYPT_ATOMIC 0x00010000 /* decrypt_atomic() */
#define CRYPTO_FG_MAC_ATOMIC 0x00020000 /* mac_atomic() */
#define CRYPTO_FG_DIGEST_ATOMIC 0x00040000 /* digest_atomic() */
#define CRYPTO_FG_SIGN_ATOMIC 0x00080000 /* sign_atomic() */
#define CRYPTO_FG_SIGN_RECOVER_ATOMIC 0x00100000 /* sign_recover_atomic() */
#define CRYPTO_FG_VERIFY_ATOMIC 0x00200000 /* verify_atomic() */
#define CRYPTO_FG_VERIFY_RECOVER_ATOMIC 0x00400000 /* verify_recover_atomic() */
#define CRYPTO_FG_ENCRYPT_MAC_ATOMIC 0x00800000 /* encrypt_mac_atomic() */
#define CRYPTO_FG_MAC_DECRYPT_ATOMIC 0x01000000 /* mac_decrypt_atomic() */
#define CRYPTO_FG_RESERVED 0x80000000
/*
* Maximum length of the pi_provider_description field of the
@ -549,21 +228,6 @@ typedef uint32_t crypto_func_group_t;
#define CRYPTO_PROVIDER_DESCR_MAX_LEN 64
/* Bit mask for all the simple operations */
#define CRYPTO_FG_SIMPLEOP_MASK (CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | \
CRYPTO_FG_DIGEST | CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY | CRYPTO_FG_MAC | \
CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC | \
CRYPTO_FG_MAC_ATOMIC | CRYPTO_FG_DIGEST_ATOMIC | CRYPTO_FG_SIGN_ATOMIC | \
CRYPTO_FG_VERIFY_ATOMIC)
/* Bit mask for all the dual operations */
#define CRYPTO_FG_MAC_CIPHER_MASK (CRYPTO_FG_ENCRYPT_MAC | \
CRYPTO_FG_MAC_DECRYPT | CRYPTO_FG_ENCRYPT_MAC_ATOMIC | \
CRYPTO_FG_MAC_DECRYPT_ATOMIC)
/* Add other combos to CRYPTO_FG_DUAL_MASK */
#define CRYPTO_FG_DUAL_MASK CRYPTO_FG_MAC_CIPHER_MASK
/*
* The crypto_mech_info structure specifies one of the mechanisms
* supported by a cryptographic provider. The pi_mechanisms field of
@ -579,8 +243,6 @@ typedef struct crypto_mech_info {
uint32_t cm_mech_flags;
} crypto_mech_info_t;
/* Alias the old name to the new name for compatibility. */
#define cm_keysize_unit cm_mech_flags
/*
* The following is used by a provider that sets

View File

@ -69,9 +69,7 @@ static const crypto_mech_info_t aes_mech_info_tab[] = {
{SUN_CKM_AES_GMAC, AES_GMAC_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC |
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC |
CRYPTO_FG_SIGN | CRYPTO_FG_SIGN_ATOMIC |
CRYPTO_FG_VERIFY | CRYPTO_FG_VERIFY_ATOMIC,
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
};
@ -147,16 +145,7 @@ static const crypto_ops_t aes_crypto_ops = {
NULL,
&aes_cipher_ops,
&aes_mac_ops,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
&aes_ctx_ops
&aes_ctx_ops,
};
static const crypto_provider_info_t aes_prov_info = {

View File

@ -161,16 +161,7 @@ static const crypto_ops_t sha2_crypto_ops = {
&sha2_digest_ops,
NULL,
&sha2_mac_ops,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
&sha2_ctx_ops
&sha2_ctx_ops,
};
static const crypto_provider_info_t sha2_prov_info = {

View File

@ -99,15 +99,6 @@ static const crypto_ops_t skein_crypto_ops = {
&skein_digest_ops,
NULL,
&skein_mac_ops,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
&skein_ctx_ops,
};

View File

@ -75,18 +75,7 @@ copy_ops_vector(const crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops);
}
/*
@ -150,12 +139,6 @@ crypto_register_provider(const crypto_provider_info_t *info,
prov_desc->pd_flags = info->pi_flags;
}
/* object_ops and nostore_key_ops are mutually exclusive */
if (prov_desc->pd_ops_vector->co_object_ops &&
prov_desc->pd_ops_vector->co_nostore_key_ops) {
goto bail;
}
/* process the mechanisms supported by the provider */
if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
goto bail;
@ -184,32 +167,6 @@ crypto_register_provider(const crypto_provider_info_t *info,
else
prov_desc->pd_sched_info.ks_taskq = NULL;
/* no kernel session to logical providers */
if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
/*
* Open a session for session-oriented providers. This session
* is used for all kernel consumers. This is fine as a provider
* is required to support multiple thread access to a session.
* We can do this only after the taskq has been created as we
* do a kcf_submit_request() to open the session.
*/
if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
kcf_req_params_t params;
KCF_WRAP_SESSION_OPS_PARAMS(&params,
KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
CRYPTO_USER, NULL, 0, prov_desc);
ret = kcf_submit_request(prov_desc, NULL, NULL, &params,
B_FALSE);
if (ret != CRYPTO_SUCCESS) {
undo_register_provider(prov_desc, B_TRUE);
ret = CRYPTO_FAILED;
goto bail;
}
}
}
if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
/*
* Create the kstat for this provider. There is a kstat
@ -434,29 +391,9 @@ init_prov_mechs(const crypto_provider_info_t *info, kcf_provider_desc_t *desc)
* mechanism, SUN_RANDOM, in this case.
*/
if (info != NULL) {
if (info->pi_ops_vector->co_random_ops != NULL) {
crypto_mech_info_t *rand_mi;
/*
* Need the following check as it is possible to have
* a provider that implements just random_ops and has
* pi_mechanisms == NULL.
*/
if (info->pi_mechanisms != NULL) {
bcopy(info->pi_mechanisms, desc->pd_mechanisms,
sizeof (crypto_mech_info_t) * (mcount - 1));
}
rand_mi = &desc->pd_mechanisms[mcount - 1];
bzero(rand_mi, sizeof (crypto_mech_info_t));
(void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM,
CRYPTO_MAX_MECH_NAME);
rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM;
} else {
ASSERT(info->pi_mechanisms != NULL);
bcopy(info->pi_mechanisms, desc->pd_mechanisms,
sizeof (crypto_mech_info_t) * mcount);
}
ASSERT(info->pi_mechanisms != NULL);
bcopy(info->pi_mechanisms, desc->pd_mechanisms,
sizeof (crypto_mech_info_t) * mcount);
}
/*
@ -578,26 +515,6 @@ undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
(void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
}
/*
* Utility routine called from crypto_load_soft_disabled(). Callers
* should have done a prior undo_register_provider().
*/
void
redo_register_provider(kcf_provider_desc_t *pd)
{
/* process the mechanisms supported by the provider */
(void) init_prov_mechs(NULL, pd);
/*
* Hold provider in providers table. We should not call
* kcf_prov_tab_add_provider() here as the provider descriptor
* is still valid which means it has an entry in the provider
* table.
*/
KCF_PROV_REFHOLD(pd);
KCF_PROV_IREFHOLD(pd);
}
/*
* Add provider (p1) to another provider's array of providers (p2).
* Hardware and logical providers use this array to cross-reference