1*eda14cbcSMatt Macy /* 2*eda14cbcSMatt Macy * CDDL HEADER START 3*eda14cbcSMatt Macy * 4*eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5*eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6*eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7*eda14cbcSMatt Macy * 8*eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9*eda14cbcSMatt Macy * or http://www.opensolaris.org/os/licensing. 10*eda14cbcSMatt Macy * See the License for the specific language governing permissions 11*eda14cbcSMatt Macy * and limitations under the License. 12*eda14cbcSMatt Macy * 13*eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14*eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15*eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16*eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17*eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18*eda14cbcSMatt Macy * 19*eda14cbcSMatt Macy * CDDL HEADER END 20*eda14cbcSMatt Macy */ 21*eda14cbcSMatt Macy /* 22*eda14cbcSMatt Macy * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23*eda14cbcSMatt Macy * Use is subject to license terms. 24*eda14cbcSMatt Macy */ 25*eda14cbcSMatt Macy 26*eda14cbcSMatt Macy /* 27*eda14cbcSMatt Macy * This file is part of the core Kernel Cryptographic Framework. 28*eda14cbcSMatt Macy * It implements the SPI functions exported to cryptographic 29*eda14cbcSMatt Macy * providers. 30*eda14cbcSMatt Macy */ 31*eda14cbcSMatt Macy 32*eda14cbcSMatt Macy 33*eda14cbcSMatt Macy #include <sys/zfs_context.h> 34*eda14cbcSMatt Macy #include <sys/crypto/common.h> 35*eda14cbcSMatt Macy #include <sys/crypto/impl.h> 36*eda14cbcSMatt Macy #include <sys/crypto/sched_impl.h> 37*eda14cbcSMatt Macy #include <sys/crypto/spi.h> 38*eda14cbcSMatt Macy 39*eda14cbcSMatt Macy /* 40*eda14cbcSMatt Macy * minalloc and maxalloc values to be used for taskq_create(). 41*eda14cbcSMatt Macy */ 42*eda14cbcSMatt Macy int crypto_taskq_threads = CRYPTO_TASKQ_THREADS; 43*eda14cbcSMatt Macy int crypto_taskq_minalloc = CRYPTO_TASKQ_MIN; 44*eda14cbcSMatt Macy int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX; 45*eda14cbcSMatt Macy 46*eda14cbcSMatt Macy static void remove_provider(kcf_provider_desc_t *); 47*eda14cbcSMatt Macy static void process_logical_providers(crypto_provider_info_t *, 48*eda14cbcSMatt Macy kcf_provider_desc_t *); 49*eda14cbcSMatt Macy static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *); 50*eda14cbcSMatt Macy static int kcf_prov_kstat_update(kstat_t *, int); 51*eda14cbcSMatt Macy static void delete_kstat(kcf_provider_desc_t *); 52*eda14cbcSMatt Macy 53*eda14cbcSMatt Macy static kcf_prov_stats_t kcf_stats_ks_data_template = { 54*eda14cbcSMatt Macy { "kcf_ops_total", KSTAT_DATA_UINT64 }, 55*eda14cbcSMatt Macy { "kcf_ops_passed", KSTAT_DATA_UINT64 }, 56*eda14cbcSMatt Macy { "kcf_ops_failed", KSTAT_DATA_UINT64 }, 57*eda14cbcSMatt Macy { "kcf_ops_returned_busy", KSTAT_DATA_UINT64 } 58*eda14cbcSMatt Macy }; 59*eda14cbcSMatt Macy 60*eda14cbcSMatt Macy #define KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \ 61*eda14cbcSMatt Macy *((dst)->ops) = *((src)->ops); 62*eda14cbcSMatt Macy 63*eda14cbcSMatt Macy /* 64*eda14cbcSMatt Macy * Copy an ops vector from src to dst. Used during provider registration 65*eda14cbcSMatt Macy * to copy the ops vector from the provider info structure to the 66*eda14cbcSMatt Macy * provider descriptor maintained by KCF. 67*eda14cbcSMatt Macy * Copying the ops vector specified by the provider is needed since the 68*eda14cbcSMatt Macy * framework does not require the provider info structure to be 69*eda14cbcSMatt Macy * persistent. 70*eda14cbcSMatt Macy */ 71*eda14cbcSMatt Macy static void 72*eda14cbcSMatt Macy copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) 73*eda14cbcSMatt Macy { 74*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops); 75*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops); 76*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops); 77*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops); 78*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops); 79*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops); 80*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops); 81*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops); 82*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops); 83*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops); 84*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops); 85*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops); 86*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops); 87*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops); 88*eda14cbcSMatt Macy } 89*eda14cbcSMatt Macy 90*eda14cbcSMatt Macy static void 91*eda14cbcSMatt Macy copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) 92*eda14cbcSMatt Macy { 93*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops); 94*eda14cbcSMatt Macy } 95*eda14cbcSMatt Macy 96*eda14cbcSMatt Macy static void 97*eda14cbcSMatt Macy copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) 98*eda14cbcSMatt Macy { 99*eda14cbcSMatt Macy KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops); 100*eda14cbcSMatt Macy } 101*eda14cbcSMatt Macy 102*eda14cbcSMatt Macy /* 103*eda14cbcSMatt Macy * This routine is used to add cryptographic providers to the KEF framework. 104*eda14cbcSMatt Macy * Providers pass a crypto_provider_info structure to crypto_register_provider() 105*eda14cbcSMatt Macy * and get back a handle. The crypto_provider_info structure contains a 106*eda14cbcSMatt Macy * list of mechanisms supported by the provider and an ops vector containing 107*eda14cbcSMatt Macy * provider entry points. Hardware providers call this routine in their attach 108*eda14cbcSMatt Macy * routines. Software providers call this routine in their _init() routine. 109*eda14cbcSMatt Macy */ 110*eda14cbcSMatt Macy int 111*eda14cbcSMatt Macy crypto_register_provider(crypto_provider_info_t *info, 112*eda14cbcSMatt Macy crypto_kcf_provider_handle_t *handle) 113*eda14cbcSMatt Macy { 114*eda14cbcSMatt Macy char *ks_name; 115*eda14cbcSMatt Macy 116*eda14cbcSMatt Macy kcf_provider_desc_t *prov_desc = NULL; 117*eda14cbcSMatt Macy int ret = CRYPTO_ARGUMENTS_BAD; 118*eda14cbcSMatt Macy 119*eda14cbcSMatt Macy if (info->pi_interface_version > CRYPTO_SPI_VERSION_3) 120*eda14cbcSMatt Macy return (CRYPTO_VERSION_MISMATCH); 121*eda14cbcSMatt Macy 122*eda14cbcSMatt Macy /* 123*eda14cbcSMatt Macy * Check provider type, must be software, hardware, or logical. 124*eda14cbcSMatt Macy */ 125*eda14cbcSMatt Macy if (info->pi_provider_type != CRYPTO_HW_PROVIDER && 126*eda14cbcSMatt Macy info->pi_provider_type != CRYPTO_SW_PROVIDER && 127*eda14cbcSMatt Macy info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) 128*eda14cbcSMatt Macy return (CRYPTO_ARGUMENTS_BAD); 129*eda14cbcSMatt Macy 130*eda14cbcSMatt Macy /* 131*eda14cbcSMatt Macy * Allocate and initialize a new provider descriptor. We also 132*eda14cbcSMatt Macy * hold it and release it when done. 133*eda14cbcSMatt Macy */ 134*eda14cbcSMatt Macy prov_desc = kcf_alloc_provider_desc(info); 135*eda14cbcSMatt Macy KCF_PROV_REFHOLD(prov_desc); 136*eda14cbcSMatt Macy 137*eda14cbcSMatt Macy prov_desc->pd_prov_type = info->pi_provider_type; 138*eda14cbcSMatt Macy 139*eda14cbcSMatt Macy /* provider-private handle, opaque to KCF */ 140*eda14cbcSMatt Macy prov_desc->pd_prov_handle = info->pi_provider_handle; 141*eda14cbcSMatt Macy 142*eda14cbcSMatt Macy /* copy provider description string */ 143*eda14cbcSMatt Macy if (info->pi_provider_description != NULL) { 144*eda14cbcSMatt Macy /* 145*eda14cbcSMatt Macy * pi_provider_descriptor is a string that can contain 146*eda14cbcSMatt Macy * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters 147*eda14cbcSMatt Macy * INCLUDING the terminating null character. A bcopy() 148*eda14cbcSMatt Macy * is necessary here as pd_description should not have 149*eda14cbcSMatt Macy * a null character. See comments in kcf_alloc_provider_desc() 150*eda14cbcSMatt Macy * for details on pd_description field. 151*eda14cbcSMatt Macy */ 152*eda14cbcSMatt Macy bcopy(info->pi_provider_description, prov_desc->pd_description, 153*eda14cbcSMatt Macy MIN(strlen(info->pi_provider_description), 154*eda14cbcSMatt Macy (size_t)CRYPTO_PROVIDER_DESCR_MAX_LEN)); 155*eda14cbcSMatt Macy } 156*eda14cbcSMatt Macy 157*eda14cbcSMatt Macy if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) { 158*eda14cbcSMatt Macy if (info->pi_ops_vector == NULL) { 159*eda14cbcSMatt Macy goto bail; 160*eda14cbcSMatt Macy } 161*eda14cbcSMatt Macy copy_ops_vector_v1(info->pi_ops_vector, 162*eda14cbcSMatt Macy prov_desc->pd_ops_vector); 163*eda14cbcSMatt Macy if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) { 164*eda14cbcSMatt Macy copy_ops_vector_v2(info->pi_ops_vector, 165*eda14cbcSMatt Macy prov_desc->pd_ops_vector); 166*eda14cbcSMatt Macy prov_desc->pd_flags = info->pi_flags; 167*eda14cbcSMatt Macy } 168*eda14cbcSMatt Macy if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) { 169*eda14cbcSMatt Macy copy_ops_vector_v3(info->pi_ops_vector, 170*eda14cbcSMatt Macy prov_desc->pd_ops_vector); 171*eda14cbcSMatt Macy } 172*eda14cbcSMatt Macy } 173*eda14cbcSMatt Macy 174*eda14cbcSMatt Macy /* object_ops and nostore_key_ops are mutually exclusive */ 175*eda14cbcSMatt Macy if (prov_desc->pd_ops_vector->co_object_ops && 176*eda14cbcSMatt Macy prov_desc->pd_ops_vector->co_nostore_key_ops) { 177*eda14cbcSMatt Macy goto bail; 178*eda14cbcSMatt Macy } 179*eda14cbcSMatt Macy 180*eda14cbcSMatt Macy /* process the mechanisms supported by the provider */ 181*eda14cbcSMatt Macy if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS) 182*eda14cbcSMatt Macy goto bail; 183*eda14cbcSMatt Macy 184*eda14cbcSMatt Macy /* 185*eda14cbcSMatt Macy * Add provider to providers tables, also sets the descriptor 186*eda14cbcSMatt Macy * pd_prov_id field. 187*eda14cbcSMatt Macy */ 188*eda14cbcSMatt Macy if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) { 189*eda14cbcSMatt Macy undo_register_provider(prov_desc, B_FALSE); 190*eda14cbcSMatt Macy goto bail; 191*eda14cbcSMatt Macy } 192*eda14cbcSMatt Macy 193*eda14cbcSMatt Macy /* 194*eda14cbcSMatt Macy * We create a taskq only for a hardware provider. The global 195*eda14cbcSMatt Macy * software queue is used for software providers. We handle ordering 196*eda14cbcSMatt Macy * of multi-part requests in the taskq routine. So, it is safe to 197*eda14cbcSMatt Macy * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag 198*eda14cbcSMatt Macy * to keep some entries cached to improve performance. 199*eda14cbcSMatt Macy */ 200*eda14cbcSMatt Macy if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) 201*eda14cbcSMatt Macy prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq", 202*eda14cbcSMatt Macy crypto_taskq_threads, minclsyspri, 203*eda14cbcSMatt Macy crypto_taskq_minalloc, crypto_taskq_maxalloc, 204*eda14cbcSMatt Macy TASKQ_PREPOPULATE); 205*eda14cbcSMatt Macy else 206*eda14cbcSMatt Macy prov_desc->pd_sched_info.ks_taskq = NULL; 207*eda14cbcSMatt Macy 208*eda14cbcSMatt Macy /* no kernel session to logical providers */ 209*eda14cbcSMatt Macy if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 210*eda14cbcSMatt Macy /* 211*eda14cbcSMatt Macy * Open a session for session-oriented providers. This session 212*eda14cbcSMatt Macy * is used for all kernel consumers. This is fine as a provider 213*eda14cbcSMatt Macy * is required to support multiple thread access to a session. 214*eda14cbcSMatt Macy * We can do this only after the taskq has been created as we 215*eda14cbcSMatt Macy * do a kcf_submit_request() to open the session. 216*eda14cbcSMatt Macy */ 217*eda14cbcSMatt Macy if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) { 218*eda14cbcSMatt Macy kcf_req_params_t params; 219*eda14cbcSMatt Macy 220*eda14cbcSMatt Macy KCF_WRAP_SESSION_OPS_PARAMS(¶ms, 221*eda14cbcSMatt Macy KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0, 222*eda14cbcSMatt Macy CRYPTO_USER, NULL, 0, prov_desc); 223*eda14cbcSMatt Macy ret = kcf_submit_request(prov_desc, NULL, NULL, ¶ms, 224*eda14cbcSMatt Macy B_FALSE); 225*eda14cbcSMatt Macy 226*eda14cbcSMatt Macy if (ret != CRYPTO_SUCCESS) { 227*eda14cbcSMatt Macy undo_register_provider(prov_desc, B_TRUE); 228*eda14cbcSMatt Macy ret = CRYPTO_FAILED; 229*eda14cbcSMatt Macy goto bail; 230*eda14cbcSMatt Macy } 231*eda14cbcSMatt Macy } 232*eda14cbcSMatt Macy } 233*eda14cbcSMatt Macy 234*eda14cbcSMatt Macy if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 235*eda14cbcSMatt Macy /* 236*eda14cbcSMatt Macy * Create the kstat for this provider. There is a kstat 237*eda14cbcSMatt Macy * installed for each successfully registered provider. 238*eda14cbcSMatt Macy * This kstat is deleted, when the provider unregisters. 239*eda14cbcSMatt Macy */ 240*eda14cbcSMatt Macy if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 241*eda14cbcSMatt Macy ks_name = kmem_asprintf("%s_%s", 242*eda14cbcSMatt Macy "NONAME", "provider_stats"); 243*eda14cbcSMatt Macy } else { 244*eda14cbcSMatt Macy ks_name = kmem_asprintf("%s_%d_%u_%s", 245*eda14cbcSMatt Macy "NONAME", 0, prov_desc->pd_prov_id, 246*eda14cbcSMatt Macy "provider_stats"); 247*eda14cbcSMatt Macy } 248*eda14cbcSMatt Macy 249*eda14cbcSMatt Macy prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto", 250*eda14cbcSMatt Macy KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) / 251*eda14cbcSMatt Macy sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 252*eda14cbcSMatt Macy 253*eda14cbcSMatt Macy if (prov_desc->pd_kstat != NULL) { 254*eda14cbcSMatt Macy bcopy(&kcf_stats_ks_data_template, 255*eda14cbcSMatt Macy &prov_desc->pd_ks_data, 256*eda14cbcSMatt Macy sizeof (kcf_stats_ks_data_template)); 257*eda14cbcSMatt Macy prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data; 258*eda14cbcSMatt Macy KCF_PROV_REFHOLD(prov_desc); 259*eda14cbcSMatt Macy KCF_PROV_IREFHOLD(prov_desc); 260*eda14cbcSMatt Macy prov_desc->pd_kstat->ks_private = prov_desc; 261*eda14cbcSMatt Macy prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update; 262*eda14cbcSMatt Macy kstat_install(prov_desc->pd_kstat); 263*eda14cbcSMatt Macy } 264*eda14cbcSMatt Macy kmem_strfree(ks_name); 265*eda14cbcSMatt Macy } 266*eda14cbcSMatt Macy 267*eda14cbcSMatt Macy if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) 268*eda14cbcSMatt Macy process_logical_providers(info, prov_desc); 269*eda14cbcSMatt Macy 270*eda14cbcSMatt Macy mutex_enter(&prov_desc->pd_lock); 271*eda14cbcSMatt Macy prov_desc->pd_state = KCF_PROV_READY; 272*eda14cbcSMatt Macy mutex_exit(&prov_desc->pd_lock); 273*eda14cbcSMatt Macy kcf_do_notify(prov_desc, B_TRUE); 274*eda14cbcSMatt Macy 275*eda14cbcSMatt Macy *handle = prov_desc->pd_kcf_prov_handle; 276*eda14cbcSMatt Macy ret = CRYPTO_SUCCESS; 277*eda14cbcSMatt Macy 278*eda14cbcSMatt Macy bail: 279*eda14cbcSMatt Macy KCF_PROV_REFRELE(prov_desc); 280*eda14cbcSMatt Macy return (ret); 281*eda14cbcSMatt Macy } 282*eda14cbcSMatt Macy 283*eda14cbcSMatt Macy /* 284*eda14cbcSMatt Macy * This routine is used to notify the framework when a provider is being 285*eda14cbcSMatt Macy * removed. Hardware providers call this routine in their detach routines. 286*eda14cbcSMatt Macy * Software providers call this routine in their _fini() routine. 287*eda14cbcSMatt Macy */ 288*eda14cbcSMatt Macy int 289*eda14cbcSMatt Macy crypto_unregister_provider(crypto_kcf_provider_handle_t handle) 290*eda14cbcSMatt Macy { 291*eda14cbcSMatt Macy uint_t mech_idx; 292*eda14cbcSMatt Macy kcf_provider_desc_t *desc; 293*eda14cbcSMatt Macy kcf_prov_state_t saved_state; 294*eda14cbcSMatt Macy 295*eda14cbcSMatt Macy /* lookup provider descriptor */ 296*eda14cbcSMatt Macy if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL) 297*eda14cbcSMatt Macy return (CRYPTO_UNKNOWN_PROVIDER); 298*eda14cbcSMatt Macy 299*eda14cbcSMatt Macy mutex_enter(&desc->pd_lock); 300*eda14cbcSMatt Macy /* 301*eda14cbcSMatt Macy * Check if any other thread is disabling or removing 302*eda14cbcSMatt Macy * this provider. We return if this is the case. 303*eda14cbcSMatt Macy */ 304*eda14cbcSMatt Macy if (desc->pd_state >= KCF_PROV_DISABLED) { 305*eda14cbcSMatt Macy mutex_exit(&desc->pd_lock); 306*eda14cbcSMatt Macy /* Release reference held by kcf_prov_tab_lookup(). */ 307*eda14cbcSMatt Macy KCF_PROV_REFRELE(desc); 308*eda14cbcSMatt Macy return (CRYPTO_BUSY); 309*eda14cbcSMatt Macy } 310*eda14cbcSMatt Macy 311*eda14cbcSMatt Macy saved_state = desc->pd_state; 312*eda14cbcSMatt Macy desc->pd_state = KCF_PROV_REMOVED; 313*eda14cbcSMatt Macy 314*eda14cbcSMatt Macy if (saved_state == KCF_PROV_BUSY) { 315*eda14cbcSMatt Macy /* 316*eda14cbcSMatt Macy * The per-provider taskq threads may be waiting. We 317*eda14cbcSMatt Macy * signal them so that they can start failing requests. 318*eda14cbcSMatt Macy */ 319*eda14cbcSMatt Macy cv_broadcast(&desc->pd_resume_cv); 320*eda14cbcSMatt Macy } 321*eda14cbcSMatt Macy 322*eda14cbcSMatt Macy if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 323*eda14cbcSMatt Macy /* 324*eda14cbcSMatt Macy * Check if this provider is currently being used. 325*eda14cbcSMatt Macy * pd_irefcnt is the number of holds from the internal 326*eda14cbcSMatt Macy * structures. We add one to account for the above lookup. 327*eda14cbcSMatt Macy */ 328*eda14cbcSMatt Macy if (desc->pd_refcnt > desc->pd_irefcnt + 1) { 329*eda14cbcSMatt Macy desc->pd_state = saved_state; 330*eda14cbcSMatt Macy mutex_exit(&desc->pd_lock); 331*eda14cbcSMatt Macy /* Release reference held by kcf_prov_tab_lookup(). */ 332*eda14cbcSMatt Macy KCF_PROV_REFRELE(desc); 333*eda14cbcSMatt Macy /* 334*eda14cbcSMatt Macy * The administrator presumably will stop the clients 335*eda14cbcSMatt Macy * thus removing the holds, when they get the busy 336*eda14cbcSMatt Macy * return value. Any retry will succeed then. 337*eda14cbcSMatt Macy */ 338*eda14cbcSMatt Macy return (CRYPTO_BUSY); 339*eda14cbcSMatt Macy } 340*eda14cbcSMatt Macy } 341*eda14cbcSMatt Macy mutex_exit(&desc->pd_lock); 342*eda14cbcSMatt Macy 343*eda14cbcSMatt Macy if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) { 344*eda14cbcSMatt Macy remove_provider(desc); 345*eda14cbcSMatt Macy } 346*eda14cbcSMatt Macy 347*eda14cbcSMatt Macy if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 348*eda14cbcSMatt Macy /* remove the provider from the mechanisms tables */ 349*eda14cbcSMatt Macy for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; 350*eda14cbcSMatt Macy mech_idx++) { 351*eda14cbcSMatt Macy kcf_remove_mech_provider( 352*eda14cbcSMatt Macy desc->pd_mechanisms[mech_idx].cm_mech_name, desc); 353*eda14cbcSMatt Macy } 354*eda14cbcSMatt Macy } 355*eda14cbcSMatt Macy 356*eda14cbcSMatt Macy /* remove provider from providers table */ 357*eda14cbcSMatt Macy if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) != 358*eda14cbcSMatt Macy CRYPTO_SUCCESS) { 359*eda14cbcSMatt Macy /* Release reference held by kcf_prov_tab_lookup(). */ 360*eda14cbcSMatt Macy KCF_PROV_REFRELE(desc); 361*eda14cbcSMatt Macy return (CRYPTO_UNKNOWN_PROVIDER); 362*eda14cbcSMatt Macy } 363*eda14cbcSMatt Macy 364*eda14cbcSMatt Macy delete_kstat(desc); 365*eda14cbcSMatt Macy 366*eda14cbcSMatt Macy if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 367*eda14cbcSMatt Macy /* Release reference held by kcf_prov_tab_lookup(). */ 368*eda14cbcSMatt Macy KCF_PROV_REFRELE(desc); 369*eda14cbcSMatt Macy 370*eda14cbcSMatt Macy /* 371*eda14cbcSMatt Macy * Wait till the existing requests complete. 372*eda14cbcSMatt Macy */ 373*eda14cbcSMatt Macy mutex_enter(&desc->pd_lock); 374*eda14cbcSMatt Macy while (desc->pd_state != KCF_PROV_FREED) 375*eda14cbcSMatt Macy cv_wait(&desc->pd_remove_cv, &desc->pd_lock); 376*eda14cbcSMatt Macy mutex_exit(&desc->pd_lock); 377*eda14cbcSMatt Macy } else { 378*eda14cbcSMatt Macy /* 379*eda14cbcSMatt Macy * Wait until requests that have been sent to the provider 380*eda14cbcSMatt Macy * complete. 381*eda14cbcSMatt Macy */ 382*eda14cbcSMatt Macy mutex_enter(&desc->pd_lock); 383*eda14cbcSMatt Macy while (desc->pd_irefcnt > 0) 384*eda14cbcSMatt Macy cv_wait(&desc->pd_remove_cv, &desc->pd_lock); 385*eda14cbcSMatt Macy mutex_exit(&desc->pd_lock); 386*eda14cbcSMatt Macy } 387*eda14cbcSMatt Macy 388*eda14cbcSMatt Macy kcf_do_notify(desc, B_FALSE); 389*eda14cbcSMatt Macy 390*eda14cbcSMatt Macy if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 391*eda14cbcSMatt Macy /* 392*eda14cbcSMatt Macy * This is the only place where kcf_free_provider_desc() 393*eda14cbcSMatt Macy * is called directly. KCF_PROV_REFRELE() should free the 394*eda14cbcSMatt Macy * structure in all other places. 395*eda14cbcSMatt Macy */ 396*eda14cbcSMatt Macy ASSERT(desc->pd_state == KCF_PROV_FREED && 397*eda14cbcSMatt Macy desc->pd_refcnt == 0); 398*eda14cbcSMatt Macy kcf_free_provider_desc(desc); 399*eda14cbcSMatt Macy } else { 400*eda14cbcSMatt Macy KCF_PROV_REFRELE(desc); 401*eda14cbcSMatt Macy } 402*eda14cbcSMatt Macy 403*eda14cbcSMatt Macy return (CRYPTO_SUCCESS); 404*eda14cbcSMatt Macy } 405*eda14cbcSMatt Macy 406*eda14cbcSMatt Macy /* 407*eda14cbcSMatt Macy * This routine is used to notify the framework that the state of 408*eda14cbcSMatt Macy * a cryptographic provider has changed. Valid state codes are: 409*eda14cbcSMatt Macy * 410*eda14cbcSMatt Macy * CRYPTO_PROVIDER_READY 411*eda14cbcSMatt Macy * The provider indicates that it can process more requests. A provider 412*eda14cbcSMatt Macy * will notify with this event if it previously has notified us with a 413*eda14cbcSMatt Macy * CRYPTO_PROVIDER_BUSY. 414*eda14cbcSMatt Macy * 415*eda14cbcSMatt Macy * CRYPTO_PROVIDER_BUSY 416*eda14cbcSMatt Macy * The provider can not take more requests. 417*eda14cbcSMatt Macy * 418*eda14cbcSMatt Macy * CRYPTO_PROVIDER_FAILED 419*eda14cbcSMatt Macy * The provider encountered an internal error. The framework will not 420*eda14cbcSMatt Macy * be sending any more requests to the provider. The provider may notify 421*eda14cbcSMatt Macy * with a CRYPTO_PROVIDER_READY, if it is able to recover from the error. 422*eda14cbcSMatt Macy * 423*eda14cbcSMatt Macy * This routine can be called from user or interrupt context. 424*eda14cbcSMatt Macy */ 425*eda14cbcSMatt Macy void 426*eda14cbcSMatt Macy crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state) 427*eda14cbcSMatt Macy { 428*eda14cbcSMatt Macy kcf_provider_desc_t *pd; 429*eda14cbcSMatt Macy 430*eda14cbcSMatt Macy /* lookup the provider from the given handle */ 431*eda14cbcSMatt Macy if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL) 432*eda14cbcSMatt Macy return; 433*eda14cbcSMatt Macy 434*eda14cbcSMatt Macy mutex_enter(&pd->pd_lock); 435*eda14cbcSMatt Macy 436*eda14cbcSMatt Macy if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED) 437*eda14cbcSMatt Macy goto out; 438*eda14cbcSMatt Macy 439*eda14cbcSMatt Macy if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { 440*eda14cbcSMatt Macy cmn_err(CE_WARN, "crypto_provider_notification: " 441*eda14cbcSMatt Macy "logical provider (%x) ignored\n", handle); 442*eda14cbcSMatt Macy goto out; 443*eda14cbcSMatt Macy } 444*eda14cbcSMatt Macy switch (state) { 445*eda14cbcSMatt Macy case CRYPTO_PROVIDER_READY: 446*eda14cbcSMatt Macy switch (pd->pd_state) { 447*eda14cbcSMatt Macy case KCF_PROV_BUSY: 448*eda14cbcSMatt Macy pd->pd_state = KCF_PROV_READY; 449*eda14cbcSMatt Macy /* 450*eda14cbcSMatt Macy * Signal the per-provider taskq threads that they 451*eda14cbcSMatt Macy * can start submitting requests. 452*eda14cbcSMatt Macy */ 453*eda14cbcSMatt Macy cv_broadcast(&pd->pd_resume_cv); 454*eda14cbcSMatt Macy break; 455*eda14cbcSMatt Macy 456*eda14cbcSMatt Macy case KCF_PROV_FAILED: 457*eda14cbcSMatt Macy /* 458*eda14cbcSMatt Macy * The provider recovered from the error. Let us 459*eda14cbcSMatt Macy * use it now. 460*eda14cbcSMatt Macy */ 461*eda14cbcSMatt Macy pd->pd_state = KCF_PROV_READY; 462*eda14cbcSMatt Macy break; 463*eda14cbcSMatt Macy default: 464*eda14cbcSMatt Macy break; 465*eda14cbcSMatt Macy } 466*eda14cbcSMatt Macy break; 467*eda14cbcSMatt Macy 468*eda14cbcSMatt Macy case CRYPTO_PROVIDER_BUSY: 469*eda14cbcSMatt Macy switch (pd->pd_state) { 470*eda14cbcSMatt Macy case KCF_PROV_READY: 471*eda14cbcSMatt Macy pd->pd_state = KCF_PROV_BUSY; 472*eda14cbcSMatt Macy break; 473*eda14cbcSMatt Macy default: 474*eda14cbcSMatt Macy break; 475*eda14cbcSMatt Macy } 476*eda14cbcSMatt Macy break; 477*eda14cbcSMatt Macy 478*eda14cbcSMatt Macy case CRYPTO_PROVIDER_FAILED: 479*eda14cbcSMatt Macy /* 480*eda14cbcSMatt Macy * We note the failure and return. The per-provider taskq 481*eda14cbcSMatt Macy * threads check this flag and start failing the 482*eda14cbcSMatt Macy * requests, if it is set. See process_req_hwp() for details. 483*eda14cbcSMatt Macy */ 484*eda14cbcSMatt Macy switch (pd->pd_state) { 485*eda14cbcSMatt Macy case KCF_PROV_READY: 486*eda14cbcSMatt Macy pd->pd_state = KCF_PROV_FAILED; 487*eda14cbcSMatt Macy break; 488*eda14cbcSMatt Macy 489*eda14cbcSMatt Macy case KCF_PROV_BUSY: 490*eda14cbcSMatt Macy pd->pd_state = KCF_PROV_FAILED; 491*eda14cbcSMatt Macy /* 492*eda14cbcSMatt Macy * The per-provider taskq threads may be waiting. We 493*eda14cbcSMatt Macy * signal them so that they can start failing requests. 494*eda14cbcSMatt Macy */ 495*eda14cbcSMatt Macy cv_broadcast(&pd->pd_resume_cv); 496*eda14cbcSMatt Macy break; 497*eda14cbcSMatt Macy default: 498*eda14cbcSMatt Macy break; 499*eda14cbcSMatt Macy } 500*eda14cbcSMatt Macy break; 501*eda14cbcSMatt Macy default: 502*eda14cbcSMatt Macy break; 503*eda14cbcSMatt Macy } 504*eda14cbcSMatt Macy out: 505*eda14cbcSMatt Macy mutex_exit(&pd->pd_lock); 506*eda14cbcSMatt Macy KCF_PROV_REFRELE(pd); 507*eda14cbcSMatt Macy } 508*eda14cbcSMatt Macy 509*eda14cbcSMatt Macy /* 510*eda14cbcSMatt Macy * This routine is used to notify the framework the result of 511*eda14cbcSMatt Macy * an asynchronous request handled by a provider. Valid error 512*eda14cbcSMatt Macy * codes are the same as the CRYPTO_* errors defined in common.h. 513*eda14cbcSMatt Macy * 514*eda14cbcSMatt Macy * This routine can be called from user or interrupt context. 515*eda14cbcSMatt Macy */ 516*eda14cbcSMatt Macy void 517*eda14cbcSMatt Macy crypto_op_notification(crypto_req_handle_t handle, int error) 518*eda14cbcSMatt Macy { 519*eda14cbcSMatt Macy kcf_call_type_t ctype; 520*eda14cbcSMatt Macy 521*eda14cbcSMatt Macy if (handle == NULL) 522*eda14cbcSMatt Macy return; 523*eda14cbcSMatt Macy 524*eda14cbcSMatt Macy if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) { 525*eda14cbcSMatt Macy kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle; 526*eda14cbcSMatt Macy 527*eda14cbcSMatt Macy if (error != CRYPTO_SUCCESS) 528*eda14cbcSMatt Macy sreq->sn_provider->pd_sched_info.ks_nfails++; 529*eda14cbcSMatt Macy KCF_PROV_IREFRELE(sreq->sn_provider); 530*eda14cbcSMatt Macy kcf_sop_done(sreq, error); 531*eda14cbcSMatt Macy } else { 532*eda14cbcSMatt Macy kcf_areq_node_t *areq = (kcf_areq_node_t *)handle; 533*eda14cbcSMatt Macy 534*eda14cbcSMatt Macy ASSERT(ctype == CRYPTO_ASYNCH); 535*eda14cbcSMatt Macy if (error != CRYPTO_SUCCESS) 536*eda14cbcSMatt Macy areq->an_provider->pd_sched_info.ks_nfails++; 537*eda14cbcSMatt Macy KCF_PROV_IREFRELE(areq->an_provider); 538*eda14cbcSMatt Macy kcf_aop_done(areq, error); 539*eda14cbcSMatt Macy } 540*eda14cbcSMatt Macy } 541*eda14cbcSMatt Macy 542*eda14cbcSMatt Macy /* 543*eda14cbcSMatt Macy * This routine is used by software providers to determine 544*eda14cbcSMatt Macy * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation. 545*eda14cbcSMatt Macy * Note that hardware providers can always use KM_SLEEP. So, 546*eda14cbcSMatt Macy * they do not need to call this routine. 547*eda14cbcSMatt Macy * 548*eda14cbcSMatt Macy * This routine can be called from user or interrupt context. 549*eda14cbcSMatt Macy */ 550*eda14cbcSMatt Macy int 551*eda14cbcSMatt Macy crypto_kmflag(crypto_req_handle_t handle) 552*eda14cbcSMatt Macy { 553*eda14cbcSMatt Macy return (REQHNDL2_KMFLAG(handle)); 554*eda14cbcSMatt Macy } 555*eda14cbcSMatt Macy 556*eda14cbcSMatt Macy /* 557*eda14cbcSMatt Macy * Process the mechanism info structures specified by the provider 558*eda14cbcSMatt Macy * during registration. A NULL crypto_provider_info_t indicates 559*eda14cbcSMatt Macy * an already initialized provider descriptor. 560*eda14cbcSMatt Macy * 561*eda14cbcSMatt Macy * Mechanisms are not added to the kernel's mechanism table if the 562*eda14cbcSMatt Macy * provider is a logical provider. 563*eda14cbcSMatt Macy * 564*eda14cbcSMatt Macy * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one 565*eda14cbcSMatt Macy * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY 566*eda14cbcSMatt Macy * if the table of mechanisms is full. 567*eda14cbcSMatt Macy */ 568*eda14cbcSMatt Macy static int 569*eda14cbcSMatt Macy init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc) 570*eda14cbcSMatt Macy { 571*eda14cbcSMatt Macy uint_t mech_idx; 572*eda14cbcSMatt Macy uint_t cleanup_idx; 573*eda14cbcSMatt Macy int err = CRYPTO_SUCCESS; 574*eda14cbcSMatt Macy kcf_prov_mech_desc_t *pmd; 575*eda14cbcSMatt Macy int desc_use_count = 0; 576*eda14cbcSMatt Macy int mcount = desc->pd_mech_list_count; 577*eda14cbcSMatt Macy 578*eda14cbcSMatt Macy if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { 579*eda14cbcSMatt Macy if (info != NULL) { 580*eda14cbcSMatt Macy ASSERT(info->pi_mechanisms != NULL); 581*eda14cbcSMatt Macy bcopy(info->pi_mechanisms, desc->pd_mechanisms, 582*eda14cbcSMatt Macy sizeof (crypto_mech_info_t) * mcount); 583*eda14cbcSMatt Macy } 584*eda14cbcSMatt Macy return (CRYPTO_SUCCESS); 585*eda14cbcSMatt Macy } 586*eda14cbcSMatt Macy 587*eda14cbcSMatt Macy /* 588*eda14cbcSMatt Macy * Copy the mechanism list from the provider info to the provider 589*eda14cbcSMatt Macy * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t 590*eda14cbcSMatt Macy * element if the provider has random_ops since we keep an internal 591*eda14cbcSMatt Macy * mechanism, SUN_RANDOM, in this case. 592*eda14cbcSMatt Macy */ 593*eda14cbcSMatt Macy if (info != NULL) { 594*eda14cbcSMatt Macy if (info->pi_ops_vector->co_random_ops != NULL) { 595*eda14cbcSMatt Macy crypto_mech_info_t *rand_mi; 596*eda14cbcSMatt Macy 597*eda14cbcSMatt Macy /* 598*eda14cbcSMatt Macy * Need the following check as it is possible to have 599*eda14cbcSMatt Macy * a provider that implements just random_ops and has 600*eda14cbcSMatt Macy * pi_mechanisms == NULL. 601*eda14cbcSMatt Macy */ 602*eda14cbcSMatt Macy if (info->pi_mechanisms != NULL) { 603*eda14cbcSMatt Macy bcopy(info->pi_mechanisms, desc->pd_mechanisms, 604*eda14cbcSMatt Macy sizeof (crypto_mech_info_t) * (mcount - 1)); 605*eda14cbcSMatt Macy } 606*eda14cbcSMatt Macy rand_mi = &desc->pd_mechanisms[mcount - 1]; 607*eda14cbcSMatt Macy 608*eda14cbcSMatt Macy bzero(rand_mi, sizeof (crypto_mech_info_t)); 609*eda14cbcSMatt Macy (void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM, 610*eda14cbcSMatt Macy CRYPTO_MAX_MECH_NAME); 611*eda14cbcSMatt Macy rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM; 612*eda14cbcSMatt Macy } else { 613*eda14cbcSMatt Macy ASSERT(info->pi_mechanisms != NULL); 614*eda14cbcSMatt Macy bcopy(info->pi_mechanisms, desc->pd_mechanisms, 615*eda14cbcSMatt Macy sizeof (crypto_mech_info_t) * mcount); 616*eda14cbcSMatt Macy } 617*eda14cbcSMatt Macy } 618*eda14cbcSMatt Macy 619*eda14cbcSMatt Macy /* 620*eda14cbcSMatt Macy * For each mechanism support by the provider, add the provider 621*eda14cbcSMatt Macy * to the corresponding KCF mechanism mech_entry chain. 622*eda14cbcSMatt Macy */ 623*eda14cbcSMatt Macy for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) { 624*eda14cbcSMatt Macy crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx]; 625*eda14cbcSMatt Macy 626*eda14cbcSMatt Macy if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) && 627*eda14cbcSMatt Macy (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) { 628*eda14cbcSMatt Macy err = CRYPTO_ARGUMENTS_BAD; 629*eda14cbcSMatt Macy break; 630*eda14cbcSMatt Macy } 631*eda14cbcSMatt Macy 632*eda14cbcSMatt Macy if (desc->pd_flags & CRYPTO_HASH_NO_UPDATE && 633*eda14cbcSMatt Macy mi->cm_func_group_mask & CRYPTO_FG_DIGEST) { 634*eda14cbcSMatt Macy /* 635*eda14cbcSMatt Macy * We ask the provider to specify the limit 636*eda14cbcSMatt Macy * per hash mechanism. But, in practice, a 637*eda14cbcSMatt Macy * hardware limitation means all hash mechanisms 638*eda14cbcSMatt Macy * will have the same maximum size allowed for 639*eda14cbcSMatt Macy * input data. So, we make it a per provider 640*eda14cbcSMatt Macy * limit to keep it simple. 641*eda14cbcSMatt Macy */ 642*eda14cbcSMatt Macy if (mi->cm_max_input_length == 0) { 643*eda14cbcSMatt Macy err = CRYPTO_ARGUMENTS_BAD; 644*eda14cbcSMatt Macy break; 645*eda14cbcSMatt Macy } else { 646*eda14cbcSMatt Macy desc->pd_hash_limit = mi->cm_max_input_length; 647*eda14cbcSMatt Macy } 648*eda14cbcSMatt Macy } 649*eda14cbcSMatt Macy 650*eda14cbcSMatt Macy if ((err = kcf_add_mech_provider(mech_idx, desc, &pmd)) != 651*eda14cbcSMatt Macy KCF_SUCCESS) 652*eda14cbcSMatt Macy break; 653*eda14cbcSMatt Macy 654*eda14cbcSMatt Macy if (pmd == NULL) 655*eda14cbcSMatt Macy continue; 656*eda14cbcSMatt Macy 657*eda14cbcSMatt Macy /* The provider will be used for this mechanism */ 658*eda14cbcSMatt Macy desc_use_count++; 659*eda14cbcSMatt Macy } 660*eda14cbcSMatt Macy 661*eda14cbcSMatt Macy /* 662*eda14cbcSMatt Macy * Don't allow multiple software providers with disabled mechanisms 663*eda14cbcSMatt Macy * to register. Subsequent enabling of mechanisms will result in 664*eda14cbcSMatt Macy * an unsupported configuration, i.e. multiple software providers 665*eda14cbcSMatt Macy * per mechanism. 666*eda14cbcSMatt Macy */ 667*eda14cbcSMatt Macy if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER) 668*eda14cbcSMatt Macy return (CRYPTO_ARGUMENTS_BAD); 669*eda14cbcSMatt Macy 670*eda14cbcSMatt Macy if (err == KCF_SUCCESS) 671*eda14cbcSMatt Macy return (CRYPTO_SUCCESS); 672*eda14cbcSMatt Macy 673*eda14cbcSMatt Macy /* 674*eda14cbcSMatt Macy * An error occurred while adding the mechanism, cleanup 675*eda14cbcSMatt Macy * and bail. 676*eda14cbcSMatt Macy */ 677*eda14cbcSMatt Macy for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) { 678*eda14cbcSMatt Macy kcf_remove_mech_provider( 679*eda14cbcSMatt Macy desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc); 680*eda14cbcSMatt Macy } 681*eda14cbcSMatt Macy 682*eda14cbcSMatt Macy if (err == KCF_MECH_TAB_FULL) 683*eda14cbcSMatt Macy return (CRYPTO_HOST_MEMORY); 684*eda14cbcSMatt Macy 685*eda14cbcSMatt Macy return (CRYPTO_ARGUMENTS_BAD); 686*eda14cbcSMatt Macy } 687*eda14cbcSMatt Macy 688*eda14cbcSMatt Macy /* 689*eda14cbcSMatt Macy * Update routine for kstat. Only privileged users are allowed to 690*eda14cbcSMatt Macy * access this information, since this information is sensitive. 691*eda14cbcSMatt Macy * There are some cryptographic attacks (e.g. traffic analysis) 692*eda14cbcSMatt Macy * which can use this information. 693*eda14cbcSMatt Macy */ 694*eda14cbcSMatt Macy static int 695*eda14cbcSMatt Macy kcf_prov_kstat_update(kstat_t *ksp, int rw) 696*eda14cbcSMatt Macy { 697*eda14cbcSMatt Macy kcf_prov_stats_t *ks_data; 698*eda14cbcSMatt Macy kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private; 699*eda14cbcSMatt Macy 700*eda14cbcSMatt Macy if (rw == KSTAT_WRITE) 701*eda14cbcSMatt Macy return (EACCES); 702*eda14cbcSMatt Macy 703*eda14cbcSMatt Macy ks_data = ksp->ks_data; 704*eda14cbcSMatt Macy 705*eda14cbcSMatt Macy ks_data->ps_ops_total.value.ui64 = pd->pd_sched_info.ks_ndispatches; 706*eda14cbcSMatt Macy ks_data->ps_ops_failed.value.ui64 = pd->pd_sched_info.ks_nfails; 707*eda14cbcSMatt Macy ks_data->ps_ops_busy_rval.value.ui64 = pd->pd_sched_info.ks_nbusy_rval; 708*eda14cbcSMatt Macy ks_data->ps_ops_passed.value.ui64 = 709*eda14cbcSMatt Macy pd->pd_sched_info.ks_ndispatches - 710*eda14cbcSMatt Macy pd->pd_sched_info.ks_nfails - 711*eda14cbcSMatt Macy pd->pd_sched_info.ks_nbusy_rval; 712*eda14cbcSMatt Macy 713*eda14cbcSMatt Macy return (0); 714*eda14cbcSMatt Macy } 715*eda14cbcSMatt Macy 716*eda14cbcSMatt Macy 717*eda14cbcSMatt Macy /* 718*eda14cbcSMatt Macy * Utility routine called from failure paths in crypto_register_provider() 719*eda14cbcSMatt Macy * and from crypto_load_soft_disabled(). 720*eda14cbcSMatt Macy */ 721*eda14cbcSMatt Macy void 722*eda14cbcSMatt Macy undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov) 723*eda14cbcSMatt Macy { 724*eda14cbcSMatt Macy uint_t mech_idx; 725*eda14cbcSMatt Macy 726*eda14cbcSMatt Macy /* remove the provider from the mechanisms tables */ 727*eda14cbcSMatt Macy for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; 728*eda14cbcSMatt Macy mech_idx++) { 729*eda14cbcSMatt Macy kcf_remove_mech_provider( 730*eda14cbcSMatt Macy desc->pd_mechanisms[mech_idx].cm_mech_name, desc); 731*eda14cbcSMatt Macy } 732*eda14cbcSMatt Macy 733*eda14cbcSMatt Macy /* remove provider from providers table */ 734*eda14cbcSMatt Macy if (remove_prov) 735*eda14cbcSMatt Macy (void) kcf_prov_tab_rem_provider(desc->pd_prov_id); 736*eda14cbcSMatt Macy } 737*eda14cbcSMatt Macy 738*eda14cbcSMatt Macy /* 739*eda14cbcSMatt Macy * Utility routine called from crypto_load_soft_disabled(). Callers 740*eda14cbcSMatt Macy * should have done a prior undo_register_provider(). 741*eda14cbcSMatt Macy */ 742*eda14cbcSMatt Macy void 743*eda14cbcSMatt Macy redo_register_provider(kcf_provider_desc_t *pd) 744*eda14cbcSMatt Macy { 745*eda14cbcSMatt Macy /* process the mechanisms supported by the provider */ 746*eda14cbcSMatt Macy (void) init_prov_mechs(NULL, pd); 747*eda14cbcSMatt Macy 748*eda14cbcSMatt Macy /* 749*eda14cbcSMatt Macy * Hold provider in providers table. We should not call 750*eda14cbcSMatt Macy * kcf_prov_tab_add_provider() here as the provider descriptor 751*eda14cbcSMatt Macy * is still valid which means it has an entry in the provider 752*eda14cbcSMatt Macy * table. 753*eda14cbcSMatt Macy */ 754*eda14cbcSMatt Macy KCF_PROV_REFHOLD(pd); 755*eda14cbcSMatt Macy KCF_PROV_IREFHOLD(pd); 756*eda14cbcSMatt Macy } 757*eda14cbcSMatt Macy 758*eda14cbcSMatt Macy /* 759*eda14cbcSMatt Macy * Add provider (p1) to another provider's array of providers (p2). 760*eda14cbcSMatt Macy * Hardware and logical providers use this array to cross-reference 761*eda14cbcSMatt Macy * each other. 762*eda14cbcSMatt Macy */ 763*eda14cbcSMatt Macy static void 764*eda14cbcSMatt Macy add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2) 765*eda14cbcSMatt Macy { 766*eda14cbcSMatt Macy kcf_provider_list_t *new; 767*eda14cbcSMatt Macy 768*eda14cbcSMatt Macy new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP); 769*eda14cbcSMatt Macy mutex_enter(&p2->pd_lock); 770*eda14cbcSMatt Macy new->pl_next = p2->pd_provider_list; 771*eda14cbcSMatt Macy p2->pd_provider_list = new; 772*eda14cbcSMatt Macy KCF_PROV_IREFHOLD(p1); 773*eda14cbcSMatt Macy new->pl_provider = p1; 774*eda14cbcSMatt Macy mutex_exit(&p2->pd_lock); 775*eda14cbcSMatt Macy } 776*eda14cbcSMatt Macy 777*eda14cbcSMatt Macy /* 778*eda14cbcSMatt Macy * Remove provider (p1) from another provider's array of providers (p2). 779*eda14cbcSMatt Macy * Hardware and logical providers use this array to cross-reference 780*eda14cbcSMatt Macy * each other. 781*eda14cbcSMatt Macy */ 782*eda14cbcSMatt Macy static void 783*eda14cbcSMatt Macy remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2) 784*eda14cbcSMatt Macy { 785*eda14cbcSMatt Macy 786*eda14cbcSMatt Macy kcf_provider_list_t *pl = NULL, **prev; 787*eda14cbcSMatt Macy 788*eda14cbcSMatt Macy mutex_enter(&p2->pd_lock); 789*eda14cbcSMatt Macy for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list; 790*eda14cbcSMatt Macy pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) { 791*eda14cbcSMatt Macy if (pl->pl_provider == p1) { 792*eda14cbcSMatt Macy break; 793*eda14cbcSMatt Macy } 794*eda14cbcSMatt Macy } 795*eda14cbcSMatt Macy 796*eda14cbcSMatt Macy if (p1 == NULL) { 797*eda14cbcSMatt Macy mutex_exit(&p2->pd_lock); 798*eda14cbcSMatt Macy return; 799*eda14cbcSMatt Macy } 800*eda14cbcSMatt Macy 801*eda14cbcSMatt Macy /* detach and free kcf_provider_list structure */ 802*eda14cbcSMatt Macy KCF_PROV_IREFRELE(p1); 803*eda14cbcSMatt Macy *prev = pl->pl_next; 804*eda14cbcSMatt Macy kmem_free(pl, sizeof (*pl)); 805*eda14cbcSMatt Macy mutex_exit(&p2->pd_lock); 806*eda14cbcSMatt Macy } 807*eda14cbcSMatt Macy 808*eda14cbcSMatt Macy /* 809*eda14cbcSMatt Macy * Convert an array of logical provider handles (crypto_provider_id) 810*eda14cbcSMatt Macy * stored in a crypto_provider_info structure into an array of provider 811*eda14cbcSMatt Macy * descriptors (kcf_provider_desc_t) attached to a logical provider. 812*eda14cbcSMatt Macy */ 813*eda14cbcSMatt Macy static void 814*eda14cbcSMatt Macy process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp) 815*eda14cbcSMatt Macy { 816*eda14cbcSMatt Macy kcf_provider_desc_t *lp; 817*eda14cbcSMatt Macy crypto_provider_id_t handle; 818*eda14cbcSMatt Macy int count = info->pi_logical_provider_count; 819*eda14cbcSMatt Macy int i; 820*eda14cbcSMatt Macy 821*eda14cbcSMatt Macy /* add hardware provider to each logical provider */ 822*eda14cbcSMatt Macy for (i = 0; i < count; i++) { 823*eda14cbcSMatt Macy handle = info->pi_logical_providers[i]; 824*eda14cbcSMatt Macy lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle); 825*eda14cbcSMatt Macy if (lp == NULL) { 826*eda14cbcSMatt Macy continue; 827*eda14cbcSMatt Macy } 828*eda14cbcSMatt Macy add_provider_to_array(hp, lp); 829*eda14cbcSMatt Macy hp->pd_flags |= KCF_LPROV_MEMBER; 830*eda14cbcSMatt Macy 831*eda14cbcSMatt Macy /* 832*eda14cbcSMatt Macy * A hardware provider has to have the provider descriptor of 833*eda14cbcSMatt Macy * every logical provider it belongs to, so it can be removed 834*eda14cbcSMatt Macy * from the logical provider if the hardware provider 835*eda14cbcSMatt Macy * unregisters from the framework. 836*eda14cbcSMatt Macy */ 837*eda14cbcSMatt Macy add_provider_to_array(lp, hp); 838*eda14cbcSMatt Macy KCF_PROV_REFRELE(lp); 839*eda14cbcSMatt Macy } 840*eda14cbcSMatt Macy } 841*eda14cbcSMatt Macy 842*eda14cbcSMatt Macy /* 843*eda14cbcSMatt Macy * This routine removes a provider from all of the logical or 844*eda14cbcSMatt Macy * hardware providers it belongs to, and frees the provider's 845*eda14cbcSMatt Macy * array of pointers to providers. 846*eda14cbcSMatt Macy */ 847*eda14cbcSMatt Macy static void 848*eda14cbcSMatt Macy remove_provider(kcf_provider_desc_t *pp) 849*eda14cbcSMatt Macy { 850*eda14cbcSMatt Macy kcf_provider_desc_t *p; 851*eda14cbcSMatt Macy kcf_provider_list_t *e, *next; 852*eda14cbcSMatt Macy 853*eda14cbcSMatt Macy mutex_enter(&pp->pd_lock); 854*eda14cbcSMatt Macy for (e = pp->pd_provider_list; e != NULL; e = next) { 855*eda14cbcSMatt Macy p = e->pl_provider; 856*eda14cbcSMatt Macy remove_provider_from_array(pp, p); 857*eda14cbcSMatt Macy if (p->pd_prov_type == CRYPTO_HW_PROVIDER && 858*eda14cbcSMatt Macy p->pd_provider_list == NULL) 859*eda14cbcSMatt Macy p->pd_flags &= ~KCF_LPROV_MEMBER; 860*eda14cbcSMatt Macy KCF_PROV_IREFRELE(p); 861*eda14cbcSMatt Macy next = e->pl_next; 862*eda14cbcSMatt Macy kmem_free(e, sizeof (*e)); 863*eda14cbcSMatt Macy } 864*eda14cbcSMatt Macy pp->pd_provider_list = NULL; 865*eda14cbcSMatt Macy mutex_exit(&pp->pd_lock); 866*eda14cbcSMatt Macy } 867*eda14cbcSMatt Macy 868*eda14cbcSMatt Macy /* 869*eda14cbcSMatt Macy * Dispatch events as needed for a provider. is_added flag tells 870*eda14cbcSMatt Macy * whether the provider is registering or unregistering. 871*eda14cbcSMatt Macy */ 872*eda14cbcSMatt Macy void 873*eda14cbcSMatt Macy kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added) 874*eda14cbcSMatt Macy { 875*eda14cbcSMatt Macy int i; 876*eda14cbcSMatt Macy crypto_notify_event_change_t ec; 877*eda14cbcSMatt Macy 878*eda14cbcSMatt Macy ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED); 879*eda14cbcSMatt Macy 880*eda14cbcSMatt Macy /* 881*eda14cbcSMatt Macy * Inform interested clients of the mechanisms becoming 882*eda14cbcSMatt Macy * available/unavailable. We skip this for logical providers 883*eda14cbcSMatt Macy * as they do not affect mechanisms. 884*eda14cbcSMatt Macy */ 885*eda14cbcSMatt Macy if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 886*eda14cbcSMatt Macy ec.ec_provider_type = prov_desc->pd_prov_type; 887*eda14cbcSMatt Macy ec.ec_change = is_added ? CRYPTO_MECH_ADDED : 888*eda14cbcSMatt Macy CRYPTO_MECH_REMOVED; 889*eda14cbcSMatt Macy for (i = 0; i < prov_desc->pd_mech_list_count; i++) { 890*eda14cbcSMatt Macy (void) strlcpy(ec.ec_mech_name, 891*eda14cbcSMatt Macy prov_desc->pd_mechanisms[i].cm_mech_name, 892*eda14cbcSMatt Macy CRYPTO_MAX_MECH_NAME); 893*eda14cbcSMatt Macy kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec); 894*eda14cbcSMatt Macy } 895*eda14cbcSMatt Macy 896*eda14cbcSMatt Macy } 897*eda14cbcSMatt Macy 898*eda14cbcSMatt Macy /* 899*eda14cbcSMatt Macy * Inform interested clients about the new or departing provider. 900*eda14cbcSMatt Macy * In case of a logical provider, we need to notify the event only 901*eda14cbcSMatt Macy * for the logical provider and not for the underlying 902*eda14cbcSMatt Macy * providers which are known by the KCF_LPROV_MEMBER bit. 903*eda14cbcSMatt Macy */ 904*eda14cbcSMatt Macy if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER || 905*eda14cbcSMatt Macy (prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) { 906*eda14cbcSMatt Macy kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED : 907*eda14cbcSMatt Macy CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc); 908*eda14cbcSMatt Macy } 909*eda14cbcSMatt Macy } 910*eda14cbcSMatt Macy 911*eda14cbcSMatt Macy static void 912*eda14cbcSMatt Macy delete_kstat(kcf_provider_desc_t *desc) 913*eda14cbcSMatt Macy { 914*eda14cbcSMatt Macy /* destroy the kstat created for this provider */ 915*eda14cbcSMatt Macy if (desc->pd_kstat != NULL) { 916*eda14cbcSMatt Macy kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private; 917*eda14cbcSMatt Macy 918*eda14cbcSMatt Macy /* release reference held by desc->pd_kstat->ks_private */ 919*eda14cbcSMatt Macy ASSERT(desc == kspd); 920*eda14cbcSMatt Macy kstat_delete(kspd->pd_kstat); 921*eda14cbcSMatt Macy desc->pd_kstat = NULL; 922*eda14cbcSMatt Macy KCF_PROV_REFRELE(kspd); 923*eda14cbcSMatt Macy KCF_PROV_IREFRELE(kspd); 924*eda14cbcSMatt Macy } 925*eda14cbcSMatt Macy } 926