1*906Sgm89044 /* 2*906Sgm89044 * CDDL HEADER START 3*906Sgm89044 * 4*906Sgm89044 * The contents of this file are subject to the terms of the 5*906Sgm89044 * Common Development and Distribution License (the "License"). 6*906Sgm89044 * You may not use this file except in compliance with the License. 7*906Sgm89044 * 8*906Sgm89044 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9*906Sgm89044 * or http://www.opensolaris.org/os/licensing. 10*906Sgm89044 * See the License for the specific language governing permissions 11*906Sgm89044 * and limitations under the License. 12*906Sgm89044 * 13*906Sgm89044 * When distributing Covered Code, include this CDDL HEADER in each 14*906Sgm89044 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15*906Sgm89044 * If applicable, add the following below this CDDL HEADER, with the 16*906Sgm89044 * fields enclosed by brackets "[]" replaced with your own identifying 17*906Sgm89044 * information: Portions Copyright [yyyy] [name of copyright owner] 18*906Sgm89044 * 19*906Sgm89044 * CDDL HEADER END 20*906Sgm89044 */ 21*906Sgm89044 22*906Sgm89044 /* 23*906Sgm89044 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*906Sgm89044 * Use is subject to license terms. 25*906Sgm89044 */ 26*906Sgm89044 27*906Sgm89044 #pragma ident "%Z%%M% %I% %E% SMI" 28*906Sgm89044 29*906Sgm89044 /* 30*906Sgm89044 * Deimos - cryptographic acceleration based upon Broadcom 582x. 31*906Sgm89044 */ 32*906Sgm89044 33*906Sgm89044 #include <sys/types.h> 34*906Sgm89044 #include <sys/modctl.h> 35*906Sgm89044 #include <sys/conf.h> 36*906Sgm89044 #include <sys/devops.h> 37*906Sgm89044 #include <sys/ddi.h> 38*906Sgm89044 #include <sys/sunddi.h> 39*906Sgm89044 #include <sys/cmn_err.h> 40*906Sgm89044 #include <sys/varargs.h> 41*906Sgm89044 #include <sys/file.h> 42*906Sgm89044 #include <sys/stat.h> 43*906Sgm89044 #include <sys/kmem.h> 44*906Sgm89044 #include <sys/ioccom.h> 45*906Sgm89044 #include <sys/open.h> 46*906Sgm89044 #include <sys/cred.h> 47*906Sgm89044 #include <sys/kstat.h> 48*906Sgm89044 #include <sys/strsun.h> 49*906Sgm89044 #include <sys/note.h> 50*906Sgm89044 #include <sys/crypto/common.h> 51*906Sgm89044 #include <sys/crypto/spi.h> 52*906Sgm89044 #include <sys/ddifm.h> 53*906Sgm89044 #include <sys/fm/protocol.h> 54*906Sgm89044 #include <sys/fm/util.h> 55*906Sgm89044 #include <sys/fm/io/ddi.h> 56*906Sgm89044 #include <sys/crypto/dca.h> 57*906Sgm89044 58*906Sgm89044 /* 59*906Sgm89044 * Core Deimos driver. 60*906Sgm89044 */ 61*906Sgm89044 62*906Sgm89044 static void dca_enlist2(dca_listnode_t *, dca_listnode_t *, 63*906Sgm89044 kmutex_t *); 64*906Sgm89044 static void dca_rmlist2(dca_listnode_t *node, kmutex_t *); 65*906Sgm89044 static dca_listnode_t *dca_delist2(dca_listnode_t *q, kmutex_t *); 66*906Sgm89044 static void dca_free_context_list(dca_t *dca); 67*906Sgm89044 static int dca_free_context_low(crypto_ctx_t *ctx); 68*906Sgm89044 static int dca_attach(dev_info_t *, ddi_attach_cmd_t); 69*906Sgm89044 static int dca_detach(dev_info_t *, ddi_detach_cmd_t); 70*906Sgm89044 static int dca_suspend(dca_t *); 71*906Sgm89044 static int dca_resume(dca_t *); 72*906Sgm89044 static int dca_init(dca_t *); 73*906Sgm89044 static int dca_reset(dca_t *, int); 74*906Sgm89044 static int dca_initworklist(dca_t *, dca_worklist_t *); 75*906Sgm89044 static void dca_uninit(dca_t *); 76*906Sgm89044 static void dca_initq(dca_listnode_t *); 77*906Sgm89044 static void dca_enqueue(dca_listnode_t *, dca_listnode_t *); 78*906Sgm89044 static dca_listnode_t *dca_dequeue(dca_listnode_t *); 79*906Sgm89044 static dca_listnode_t *dca_unqueue(dca_listnode_t *); 80*906Sgm89044 static dca_request_t *dca_newreq(dca_t *); 81*906Sgm89044 static dca_work_t *dca_getwork(dca_t *, int); 82*906Sgm89044 static void dca_freework(dca_work_t *); 83*906Sgm89044 static dca_work_t *dca_newwork(dca_t *); 84*906Sgm89044 static void dca_destroywork(dca_work_t *); 85*906Sgm89044 static void dca_schedule(dca_t *, int); 86*906Sgm89044 static void dca_reclaim(dca_t *, int); 87*906Sgm89044 static uint_t dca_intr(char *); 88*906Sgm89044 static void dca_failure(dca_t *, ddi_fault_location_t, 89*906Sgm89044 dca_fma_eclass_t index, uint64_t, int, char *, ...); 90*906Sgm89044 static void dca_jobtimeout(void *); 91*906Sgm89044 static int dca_drain(dca_t *); 92*906Sgm89044 static void dca_undrain(dca_t *); 93*906Sgm89044 static void dca_rejectjobs(dca_t *); 94*906Sgm89044 95*906Sgm89044 #ifdef SCHEDDELAY 96*906Sgm89044 static void dca_schedtimeout(void *); 97*906Sgm89044 #endif 98*906Sgm89044 99*906Sgm89044 /* 100*906Sgm89044 * We want these inlined for performance. 101*906Sgm89044 */ 102*906Sgm89044 #ifndef DEBUG 103*906Sgm89044 #pragma inline(dca_freereq, dca_getreq, dca_freework, dca_getwork) 104*906Sgm89044 #pragma inline(dca_enqueue, dca_dequeue, dca_rmqueue, dca_done) 105*906Sgm89044 #pragma inline(dca_reverse, dca_length) 106*906Sgm89044 #endif 107*906Sgm89044 108*906Sgm89044 /* 109*906Sgm89044 * Device operations. 110*906Sgm89044 */ 111*906Sgm89044 static struct dev_ops devops = { 112*906Sgm89044 DEVO_REV, /* devo_rev */ 113*906Sgm89044 0, /* devo_refcnt */ 114*906Sgm89044 nodev, /* devo_getinfo */ 115*906Sgm89044 nulldev, /* devo_identify */ 116*906Sgm89044 nulldev, /* devo_probe */ 117*906Sgm89044 dca_attach, /* devo_attach */ 118*906Sgm89044 dca_detach, /* devo_detach */ 119*906Sgm89044 nodev, /* devo_reset */ 120*906Sgm89044 NULL, /* devo_cb_ops */ 121*906Sgm89044 NULL, /* devo_bus_ops */ 122*906Sgm89044 ddi_power /* devo_power */ 123*906Sgm89044 }; 124*906Sgm89044 125*906Sgm89044 #define IDENT "PCI Crypto Accelerator 2.0" 126*906Sgm89044 #define IDENT_SYM "Crypto Accel Sym 2.0" 127*906Sgm89044 #define IDENT_ASYM "Crypto Accel Asym 2.0" 128*906Sgm89044 129*906Sgm89044 /* Space-padded, will be filled in dynamically during registration */ 130*906Sgm89044 #define IDENT3 "PCI Crypto Accelerator Mod 2.0" 131*906Sgm89044 132*906Sgm89044 #define VENDOR "Sun Microsystems, Inc." 133*906Sgm89044 134*906Sgm89044 #define STALETIME (30 * SECOND) 135*906Sgm89044 136*906Sgm89044 #define crypto_prov_notify crypto_provider_notification 137*906Sgm89044 /* A 28 char function name doesn't leave much line space */ 138*906Sgm89044 139*906Sgm89044 /* 140*906Sgm89044 * Module linkage. 141*906Sgm89044 */ 142*906Sgm89044 static struct modldrv modldrv = { 143*906Sgm89044 &mod_driverops, /* drv_modops */ 144*906Sgm89044 IDENT, /* drv_linkinfo */ 145*906Sgm89044 &devops, /* drv_dev_ops */ 146*906Sgm89044 }; 147*906Sgm89044 148*906Sgm89044 extern struct mod_ops mod_cryptoops; 149*906Sgm89044 150*906Sgm89044 static struct modlcrypto modlcrypto = { 151*906Sgm89044 &mod_cryptoops, 152*906Sgm89044 IDENT3 153*906Sgm89044 }; 154*906Sgm89044 155*906Sgm89044 static struct modlinkage modlinkage = { 156*906Sgm89044 MODREV_1, /* ml_rev */ 157*906Sgm89044 &modldrv, /* ml_linkage */ 158*906Sgm89044 &modlcrypto, 159*906Sgm89044 NULL 160*906Sgm89044 }; 161*906Sgm89044 162*906Sgm89044 /* 163*906Sgm89044 * CSPI information (entry points, provider info, etc.) 164*906Sgm89044 */ 165*906Sgm89044 166*906Sgm89044 /* Mechanisms for the symmetric cipher provider */ 167*906Sgm89044 static crypto_mech_info_t dca_mech_info_tab1[] = { 168*906Sgm89044 /* DES-CBC */ 169*906Sgm89044 {SUN_CKM_DES_CBC, DES_CBC_MECH_INFO_TYPE, 170*906Sgm89044 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | 171*906Sgm89044 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC, 172*906Sgm89044 DES_KEY_LEN, DES_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, 173*906Sgm89044 /* 3DES-CBC */ 174*906Sgm89044 {SUN_CKM_DES3_CBC, DES3_CBC_MECH_INFO_TYPE, 175*906Sgm89044 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | 176*906Sgm89044 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC, 177*906Sgm89044 DES3_KEY_LEN, DES3_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES} 178*906Sgm89044 }; 179*906Sgm89044 180*906Sgm89044 /* Mechanisms for the asymmetric cipher provider */ 181*906Sgm89044 static crypto_mech_info_t dca_mech_info_tab2[] = { 182*906Sgm89044 /* DSA */ 183*906Sgm89044 {SUN_CKM_DSA, DSA_MECH_INFO_TYPE, 184*906Sgm89044 CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY | 185*906Sgm89044 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_VERIFY_ATOMIC, 186*906Sgm89044 DSA_MIN_KEY_LEN * 8, DSA_MAX_KEY_LEN * 8, 187*906Sgm89044 CRYPTO_KEYSIZE_UNIT_IN_BITS}, 188*906Sgm89044 189*906Sgm89044 /* RSA */ 190*906Sgm89044 {SUN_CKM_RSA_X_509, RSA_X_509_MECH_INFO_TYPE, 191*906Sgm89044 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN | 192*906Sgm89044 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY | 193*906Sgm89044 CRYPTO_FG_VERIFY_RECOVER | 194*906Sgm89044 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC | 195*906Sgm89044 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC | 196*906Sgm89044 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC, 197*906Sgm89044 RSA_MIN_KEY_LEN * 8, RSA_MAX_KEY_LEN * 8, 198*906Sgm89044 CRYPTO_KEYSIZE_UNIT_IN_BITS}, 199*906Sgm89044 {SUN_CKM_RSA_PKCS, RSA_PKCS_MECH_INFO_TYPE, 200*906Sgm89044 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN | 201*906Sgm89044 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY | 202*906Sgm89044 CRYPTO_FG_VERIFY_RECOVER | 203*906Sgm89044 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC | 204*906Sgm89044 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC | 205*906Sgm89044 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC, 206*906Sgm89044 RSA_MIN_KEY_LEN * 8, RSA_MAX_KEY_LEN * 8, 207*906Sgm89044 CRYPTO_KEYSIZE_UNIT_IN_BITS} 208*906Sgm89044 }; 209*906Sgm89044 210*906Sgm89044 static void dca_provider_status(crypto_provider_handle_t, uint_t *); 211*906Sgm89044 212*906Sgm89044 static crypto_control_ops_t dca_control_ops = { 213*906Sgm89044 dca_provider_status 214*906Sgm89044 }; 215*906Sgm89044 216*906Sgm89044 static int dca_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *, 217*906Sgm89044 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 218*906Sgm89044 static int dca_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 219*906Sgm89044 crypto_req_handle_t); 220*906Sgm89044 static int dca_encrypt_update(crypto_ctx_t *, crypto_data_t *, 221*906Sgm89044 crypto_data_t *, crypto_req_handle_t); 222*906Sgm89044 static int dca_encrypt_final(crypto_ctx_t *, crypto_data_t *, 223*906Sgm89044 crypto_req_handle_t); 224*906Sgm89044 static int dca_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 225*906Sgm89044 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 226*906Sgm89044 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 227*906Sgm89044 228*906Sgm89044 static int dca_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *, 229*906Sgm89044 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 230*906Sgm89044 static int dca_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 231*906Sgm89044 crypto_req_handle_t); 232*906Sgm89044 static int dca_decrypt_update(crypto_ctx_t *, crypto_data_t *, 233*906Sgm89044 crypto_data_t *, crypto_req_handle_t); 234*906Sgm89044 static int dca_decrypt_final(crypto_ctx_t *, crypto_data_t *, 235*906Sgm89044 crypto_req_handle_t); 236*906Sgm89044 static int dca_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 237*906Sgm89044 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 238*906Sgm89044 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 239*906Sgm89044 240*906Sgm89044 static crypto_cipher_ops_t dca_cipher_ops = { 241*906Sgm89044 dca_encrypt_init, 242*906Sgm89044 dca_encrypt, 243*906Sgm89044 dca_encrypt_update, 244*906Sgm89044 dca_encrypt_final, 245*906Sgm89044 dca_encrypt_atomic, 246*906Sgm89044 dca_decrypt_init, 247*906Sgm89044 dca_decrypt, 248*906Sgm89044 dca_decrypt_update, 249*906Sgm89044 dca_decrypt_final, 250*906Sgm89044 dca_decrypt_atomic 251*906Sgm89044 }; 252*906Sgm89044 253*906Sgm89044 static int dca_sign_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, 254*906Sgm89044 crypto_spi_ctx_template_t, crypto_req_handle_t); 255*906Sgm89044 static int dca_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 256*906Sgm89044 crypto_req_handle_t); 257*906Sgm89044 static int dca_sign_update(crypto_ctx_t *, crypto_data_t *, 258*906Sgm89044 crypto_req_handle_t); 259*906Sgm89044 static int dca_sign_final(crypto_ctx_t *, crypto_data_t *, 260*906Sgm89044 crypto_req_handle_t); 261*906Sgm89044 static int dca_sign_atomic(crypto_provider_handle_t, crypto_session_id_t, 262*906Sgm89044 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, 263*906Sgm89044 crypto_spi_ctx_template_t, crypto_req_handle_t); 264*906Sgm89044 static int dca_sign_recover_init(crypto_ctx_t *, crypto_mechanism_t *, 265*906Sgm89044 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 266*906Sgm89044 static int dca_sign_recover(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 267*906Sgm89044 crypto_req_handle_t); 268*906Sgm89044 static int dca_sign_recover_atomic(crypto_provider_handle_t, 269*906Sgm89044 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 270*906Sgm89044 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 271*906Sgm89044 272*906Sgm89044 static crypto_sign_ops_t dca_sign_ops = { 273*906Sgm89044 dca_sign_init, 274*906Sgm89044 dca_sign, 275*906Sgm89044 dca_sign_update, 276*906Sgm89044 dca_sign_final, 277*906Sgm89044 dca_sign_atomic, 278*906Sgm89044 dca_sign_recover_init, 279*906Sgm89044 dca_sign_recover, 280*906Sgm89044 dca_sign_recover_atomic 281*906Sgm89044 }; 282*906Sgm89044 283*906Sgm89044 static int dca_verify_init(crypto_ctx_t *, crypto_mechanism_t *, 284*906Sgm89044 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 285*906Sgm89044 static int dca_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 286*906Sgm89044 crypto_req_handle_t); 287*906Sgm89044 static int dca_verify_update(crypto_ctx_t *, crypto_data_t *, 288*906Sgm89044 crypto_req_handle_t); 289*906Sgm89044 static int dca_verify_final(crypto_ctx_t *, crypto_data_t *, 290*906Sgm89044 crypto_req_handle_t); 291*906Sgm89044 static int dca_verify_atomic(crypto_provider_handle_t, crypto_session_id_t, 292*906Sgm89044 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 293*906Sgm89044 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 294*906Sgm89044 static int dca_verify_recover_init(crypto_ctx_t *, crypto_mechanism_t *, 295*906Sgm89044 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 296*906Sgm89044 static int dca_verify_recover(crypto_ctx_t *, crypto_data_t *, 297*906Sgm89044 crypto_data_t *, crypto_req_handle_t); 298*906Sgm89044 static int dca_verify_recover_atomic(crypto_provider_handle_t, 299*906Sgm89044 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 300*906Sgm89044 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 301*906Sgm89044 302*906Sgm89044 static crypto_verify_ops_t dca_verify_ops = { 303*906Sgm89044 dca_verify_init, 304*906Sgm89044 dca_verify, 305*906Sgm89044 dca_verify_update, 306*906Sgm89044 dca_verify_final, 307*906Sgm89044 dca_verify_atomic, 308*906Sgm89044 dca_verify_recover_init, 309*906Sgm89044 dca_verify_recover, 310*906Sgm89044 dca_verify_recover_atomic 311*906Sgm89044 }; 312*906Sgm89044 313*906Sgm89044 static int dca_generate_random(crypto_provider_handle_t, crypto_session_id_t, 314*906Sgm89044 uchar_t *, size_t, crypto_req_handle_t); 315*906Sgm89044 316*906Sgm89044 static crypto_random_number_ops_t dca_random_number_ops = { 317*906Sgm89044 NULL, 318*906Sgm89044 dca_generate_random 319*906Sgm89044 }; 320*906Sgm89044 321*906Sgm89044 static int ext_info_sym(crypto_provider_handle_t prov, 322*906Sgm89044 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq); 323*906Sgm89044 static int ext_info_asym(crypto_provider_handle_t prov, 324*906Sgm89044 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq); 325*906Sgm89044 static int ext_info_base(crypto_provider_handle_t prov, 326*906Sgm89044 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id); 327*906Sgm89044 328*906Sgm89044 static crypto_provider_management_ops_t dca_provmanage_ops_1 = { 329*906Sgm89044 ext_info_sym, /* ext_info */ 330*906Sgm89044 NULL, /* init_token */ 331*906Sgm89044 NULL, /* init_pin */ 332*906Sgm89044 NULL /* set_pin */ 333*906Sgm89044 }; 334*906Sgm89044 335*906Sgm89044 static crypto_provider_management_ops_t dca_provmanage_ops_2 = { 336*906Sgm89044 ext_info_asym, /* ext_info */ 337*906Sgm89044 NULL, /* init_token */ 338*906Sgm89044 NULL, /* init_pin */ 339*906Sgm89044 NULL /* set_pin */ 340*906Sgm89044 }; 341*906Sgm89044 342*906Sgm89044 int dca_free_context(crypto_ctx_t *); 343*906Sgm89044 344*906Sgm89044 static crypto_ctx_ops_t dca_ctx_ops = { 345*906Sgm89044 NULL, 346*906Sgm89044 dca_free_context 347*906Sgm89044 }; 348*906Sgm89044 349*906Sgm89044 /* Operations for the symmetric cipher provider */ 350*906Sgm89044 static crypto_ops_t dca_crypto_ops1 = { 351*906Sgm89044 &dca_control_ops, 352*906Sgm89044 NULL, /* digest_ops */ 353*906Sgm89044 &dca_cipher_ops, 354*906Sgm89044 NULL, /* mac_ops */ 355*906Sgm89044 NULL, /* sign_ops */ 356*906Sgm89044 NULL, /* verify_ops */ 357*906Sgm89044 NULL, /* dual_ops */ 358*906Sgm89044 NULL, /* cipher_mac_ops */ 359*906Sgm89044 NULL, /* random_number_ops */ 360*906Sgm89044 NULL, /* session_ops */ 361*906Sgm89044 NULL, /* object_ops */ 362*906Sgm89044 NULL, /* key_ops */ 363*906Sgm89044 &dca_provmanage_ops_1, /* management_ops */ 364*906Sgm89044 &dca_ctx_ops 365*906Sgm89044 }; 366*906Sgm89044 367*906Sgm89044 /* Operations for the asymmetric cipher provider */ 368*906Sgm89044 static crypto_ops_t dca_crypto_ops2 = { 369*906Sgm89044 &dca_control_ops, 370*906Sgm89044 NULL, /* digest_ops */ 371*906Sgm89044 &dca_cipher_ops, 372*906Sgm89044 NULL, /* mac_ops */ 373*906Sgm89044 &dca_sign_ops, 374*906Sgm89044 &dca_verify_ops, 375*906Sgm89044 NULL, /* dual_ops */ 376*906Sgm89044 NULL, /* cipher_mac_ops */ 377*906Sgm89044 &dca_random_number_ops, 378*906Sgm89044 NULL, /* session_ops */ 379*906Sgm89044 NULL, /* object_ops */ 380*906Sgm89044 NULL, /* key_ops */ 381*906Sgm89044 &dca_provmanage_ops_2, /* management_ops */ 382*906Sgm89044 &dca_ctx_ops 383*906Sgm89044 }; 384*906Sgm89044 385*906Sgm89044 /* Provider information for the symmetric cipher provider */ 386*906Sgm89044 static crypto_provider_info_t dca_prov_info1 = { 387*906Sgm89044 CRYPTO_SPI_VERSION_1, 388*906Sgm89044 NULL, /* pi_provider_description */ 389*906Sgm89044 CRYPTO_HW_PROVIDER, 390*906Sgm89044 NULL, /* pi_provider_dev */ 391*906Sgm89044 NULL, /* pi_provider_handle */ 392*906Sgm89044 &dca_crypto_ops1, 393*906Sgm89044 sizeof (dca_mech_info_tab1)/sizeof (crypto_mech_info_t), 394*906Sgm89044 dca_mech_info_tab1, 395*906Sgm89044 0, /* pi_logical_provider_count */ 396*906Sgm89044 NULL /* pi_logical_providers */ 397*906Sgm89044 }; 398*906Sgm89044 399*906Sgm89044 /* Provider information for the asymmetric cipher provider */ 400*906Sgm89044 static crypto_provider_info_t dca_prov_info2 = { 401*906Sgm89044 CRYPTO_SPI_VERSION_1, 402*906Sgm89044 NULL, /* pi_provider_description */ 403*906Sgm89044 CRYPTO_HW_PROVIDER, 404*906Sgm89044 NULL, /* pi_provider_dev */ 405*906Sgm89044 NULL, /* pi_provider_handle */ 406*906Sgm89044 &dca_crypto_ops2, 407*906Sgm89044 sizeof (dca_mech_info_tab2)/sizeof (crypto_mech_info_t), 408*906Sgm89044 dca_mech_info_tab2, 409*906Sgm89044 0, /* pi_logical_provider_count */ 410*906Sgm89044 NULL /* pi_logical_providers */ 411*906Sgm89044 }; 412*906Sgm89044 413*906Sgm89044 /* Convenience macros */ 414*906Sgm89044 /* Retrieve the softc and instance number from a SPI crypto context */ 415*906Sgm89044 #define DCA_SOFTC_FROM_CTX(ctx, softc, instance) { \ 416*906Sgm89044 (softc) = (dca_t *)(ctx)->cc_provider; \ 417*906Sgm89044 (instance) = ddi_get_instance((softc)->dca_dip); \ 418*906Sgm89044 } 419*906Sgm89044 420*906Sgm89044 #define DCA_MECH_FROM_CTX(ctx) \ 421*906Sgm89044 (((dca_request_t *)(ctx)->cc_provider_private)->dr_ctx.ctx_cm_type) 422*906Sgm89044 423*906Sgm89044 static int dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset, 424*906Sgm89044 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags, 425*906Sgm89044 dca_chain_t *head, int *n_chain); 426*906Sgm89044 static uint64_t dca_ena(uint64_t ena); 427*906Sgm89044 static caddr_t dca_bufdaddr_out(crypto_data_t *data); 428*906Sgm89044 static char *dca_fma_eclass_string(char *model, dca_fma_eclass_t index); 429*906Sgm89044 static int dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle, 430*906Sgm89044 dca_fma_eclass_t eclass_index); 431*906Sgm89044 432*906Sgm89044 static void dca_fma_init(dca_t *dca); 433*906Sgm89044 static void dca_fma_fini(dca_t *dca); 434*906Sgm89044 static int dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 435*906Sgm89044 const void *impl_data); 436*906Sgm89044 437*906Sgm89044 438*906Sgm89044 static dca_device_t dca_devices[] = { 439*906Sgm89044 /* Broadcom vanilla variants */ 440*906Sgm89044 { 0x14e4, 0x5820, "Broadcom 5820" }, 441*906Sgm89044 { 0x14e4, 0x5821, "Broadcom 5821" }, 442*906Sgm89044 { 0x14e4, 0x5822, "Broadcom 5822" }, 443*906Sgm89044 { 0x14e4, 0x5825, "Broadcom 5825" }, 444*906Sgm89044 /* Sun specific OEMd variants */ 445*906Sgm89044 { 0x108e, 0x5454, "SCA" }, 446*906Sgm89044 { 0x108e, 0x5455, "SCA 1000" }, 447*906Sgm89044 { 0x108e, 0x5457, "SCA 500" }, 448*906Sgm89044 /* subsysid should be 0x5457, but got 0x1 from HW. Assume both here. */ 449*906Sgm89044 { 0x108e, 0x1, "SCA 500" }, 450*906Sgm89044 }; 451*906Sgm89044 452*906Sgm89044 /* 453*906Sgm89044 * Device attributes. 454*906Sgm89044 */ 455*906Sgm89044 static struct ddi_device_acc_attr dca_regsattr = { 456*906Sgm89044 DDI_DEVICE_ATTR_V0, 457*906Sgm89044 DDI_STRUCTURE_LE_ACC, 458*906Sgm89044 DDI_STRICTORDER_ACC, 459*906Sgm89044 DDI_FLAGERR_ACC 460*906Sgm89044 }; 461*906Sgm89044 462*906Sgm89044 static struct ddi_device_acc_attr dca_devattr = { 463*906Sgm89044 DDI_DEVICE_ATTR_V0, 464*906Sgm89044 DDI_STRUCTURE_LE_ACC, 465*906Sgm89044 DDI_STRICTORDER_ACC, 466*906Sgm89044 DDI_FLAGERR_ACC 467*906Sgm89044 }; 468*906Sgm89044 469*906Sgm89044 #if !defined(i386) && !defined(__i386) 470*906Sgm89044 static struct ddi_device_acc_attr dca_bufattr = { 471*906Sgm89044 DDI_DEVICE_ATTR_V0, 472*906Sgm89044 DDI_NEVERSWAP_ACC, 473*906Sgm89044 DDI_STRICTORDER_ACC, 474*906Sgm89044 DDI_FLAGERR_ACC 475*906Sgm89044 }; 476*906Sgm89044 #endif 477*906Sgm89044 478*906Sgm89044 static struct ddi_dma_attr dca_dmaattr = { 479*906Sgm89044 DMA_ATTR_V0, /* dma_attr_version */ 480*906Sgm89044 0x0, /* dma_attr_addr_lo */ 481*906Sgm89044 0xffffffffUL, /* dma_attr_addr_hi */ 482*906Sgm89044 0x00ffffffUL, /* dma_attr_count_max */ 483*906Sgm89044 0x40, /* dma_attr_align */ 484*906Sgm89044 0x40, /* dma_attr_burstsizes */ 485*906Sgm89044 0x1, /* dma_attr_minxfer */ 486*906Sgm89044 0x00ffffffUL, /* dma_attr_maxxfer */ 487*906Sgm89044 0xffffffffUL, /* dma_attr_seg */ 488*906Sgm89044 #if defined(i386) || defined(__i386) || defined(__amd64) 489*906Sgm89044 512, /* dma_attr_sgllen */ 490*906Sgm89044 #else 491*906Sgm89044 1, /* dma_attr_sgllen */ 492*906Sgm89044 #endif 493*906Sgm89044 1, /* dma_attr_granular */ 494*906Sgm89044 DDI_DMA_FLAGERR /* dma_attr_flags */ 495*906Sgm89044 }; 496*906Sgm89044 497*906Sgm89044 static void *dca_state = NULL; 498*906Sgm89044 int dca_mindma = 2500; 499*906Sgm89044 500*906Sgm89044 /* 501*906Sgm89044 * FMA eclass string definitions. Note that these string arrays must be 502*906Sgm89044 * consistent with the dca_fma_eclass_t enum. 503*906Sgm89044 */ 504*906Sgm89044 static char *dca_fma_eclass_sca1000[] = { 505*906Sgm89044 "sca1000.hw.device", 506*906Sgm89044 "sca1000.hw.timeout", 507*906Sgm89044 "sca1000.none" 508*906Sgm89044 }; 509*906Sgm89044 510*906Sgm89044 static char *dca_fma_eclass_sca500[] = { 511*906Sgm89044 "sca500.hw.device", 512*906Sgm89044 "sca500.hw.timeout", 513*906Sgm89044 "sca500.none" 514*906Sgm89044 }; 515*906Sgm89044 516*906Sgm89044 /* 517*906Sgm89044 * DDI entry points. 518*906Sgm89044 */ 519*906Sgm89044 int 520*906Sgm89044 _init(void) 521*906Sgm89044 { 522*906Sgm89044 int rv; 523*906Sgm89044 524*906Sgm89044 DBG(NULL, DMOD, "dca: in _init"); 525*906Sgm89044 526*906Sgm89044 if ((rv = ddi_soft_state_init(&dca_state, sizeof (dca_t), 1)) != 0) { 527*906Sgm89044 /* this should *never* happen! */ 528*906Sgm89044 return (rv); 529*906Sgm89044 } 530*906Sgm89044 531*906Sgm89044 if ((rv = mod_install(&modlinkage)) != 0) { 532*906Sgm89044 /* cleanup here */ 533*906Sgm89044 ddi_soft_state_fini(&dca_state); 534*906Sgm89044 return (rv); 535*906Sgm89044 } 536*906Sgm89044 537*906Sgm89044 return (0); 538*906Sgm89044 } 539*906Sgm89044 540*906Sgm89044 int 541*906Sgm89044 _fini(void) 542*906Sgm89044 { 543*906Sgm89044 int rv; 544*906Sgm89044 545*906Sgm89044 DBG(NULL, DMOD, "dca: in _fini"); 546*906Sgm89044 547*906Sgm89044 if ((rv = mod_remove(&modlinkage)) == 0) { 548*906Sgm89044 /* cleanup here */ 549*906Sgm89044 ddi_soft_state_fini(&dca_state); 550*906Sgm89044 } 551*906Sgm89044 return (rv); 552*906Sgm89044 } 553*906Sgm89044 554*906Sgm89044 int 555*906Sgm89044 _info(struct modinfo *modinfop) 556*906Sgm89044 { 557*906Sgm89044 DBG(NULL, DMOD, "dca: in _info"); 558*906Sgm89044 559*906Sgm89044 return (mod_info(&modlinkage, modinfop)); 560*906Sgm89044 } 561*906Sgm89044 562*906Sgm89044 int 563*906Sgm89044 dca_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 564*906Sgm89044 { 565*906Sgm89044 ddi_acc_handle_t pci; 566*906Sgm89044 int instance; 567*906Sgm89044 ddi_iblock_cookie_t ibc; 568*906Sgm89044 int intr_added = 0; 569*906Sgm89044 dca_t *dca; 570*906Sgm89044 ushort_t venid; 571*906Sgm89044 ushort_t devid; 572*906Sgm89044 ushort_t revid; 573*906Sgm89044 ushort_t subsysid; 574*906Sgm89044 ushort_t subvenid; 575*906Sgm89044 int i; 576*906Sgm89044 int ret; 577*906Sgm89044 char ID[64]; 578*906Sgm89044 static char *unknowndev = "Unknown device"; 579*906Sgm89044 580*906Sgm89044 #if DEBUG 581*906Sgm89044 /* these are only used for debugging */ 582*906Sgm89044 ushort_t pcicomm; 583*906Sgm89044 ushort_t pcistat; 584*906Sgm89044 uchar_t cachelinesz; 585*906Sgm89044 uchar_t mingnt; 586*906Sgm89044 uchar_t maxlat; 587*906Sgm89044 uchar_t lattmr; 588*906Sgm89044 #endif 589*906Sgm89044 590*906Sgm89044 instance = ddi_get_instance(dip); 591*906Sgm89044 592*906Sgm89044 DBG(NULL, DMOD, "dca: in dca_attach() for %d", instance); 593*906Sgm89044 594*906Sgm89044 switch (cmd) { 595*906Sgm89044 case DDI_RESUME: 596*906Sgm89044 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 597*906Sgm89044 dca_diperror(dip, "no soft state in detach"); 598*906Sgm89044 return (DDI_FAILURE); 599*906Sgm89044 } 600*906Sgm89044 /* assumption: we won't be DDI_DETACHed until we return */ 601*906Sgm89044 return (dca_resume(dca)); 602*906Sgm89044 case DDI_ATTACH: 603*906Sgm89044 break; 604*906Sgm89044 default: 605*906Sgm89044 return (DDI_FAILURE); 606*906Sgm89044 } 607*906Sgm89044 608*906Sgm89044 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 609*906Sgm89044 dca_diperror(dip, "slot does not support PCI bus-master"); 610*906Sgm89044 return (DDI_FAILURE); 611*906Sgm89044 } 612*906Sgm89044 613*906Sgm89044 if (ddi_intr_hilevel(dip, 0) != 0) { 614*906Sgm89044 dca_diperror(dip, "hilevel interrupts not supported"); 615*906Sgm89044 return (DDI_FAILURE); 616*906Sgm89044 } 617*906Sgm89044 618*906Sgm89044 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) { 619*906Sgm89044 dca_diperror(dip, "unable to setup PCI config handle"); 620*906Sgm89044 return (DDI_FAILURE); 621*906Sgm89044 } 622*906Sgm89044 623*906Sgm89044 /* common PCI attributes */ 624*906Sgm89044 venid = pci_config_get16(pci, PCI_VENID); 625*906Sgm89044 devid = pci_config_get16(pci, PCI_DEVID); 626*906Sgm89044 revid = pci_config_get8(pci, PCI_REVID); 627*906Sgm89044 subvenid = pci_config_get16(pci, PCI_SUBVENID); 628*906Sgm89044 subsysid = pci_config_get16(pci, PCI_SUBSYSID); 629*906Sgm89044 630*906Sgm89044 /* 631*906Sgm89044 * Broadcom-specific timings. 632*906Sgm89044 * We disable these timers/counters since they can cause 633*906Sgm89044 * incorrect false failures when the bus is just a little 634*906Sgm89044 * bit slow, or busy. 635*906Sgm89044 */ 636*906Sgm89044 pci_config_put8(pci, PCI_TRDYTO, 0); 637*906Sgm89044 pci_config_put8(pci, PCI_RETRIES, 0); 638*906Sgm89044 639*906Sgm89044 /* initialize PCI access settings */ 640*906Sgm89044 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE | 641*906Sgm89044 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE); 642*906Sgm89044 643*906Sgm89044 /* set up our PCI latency timer */ 644*906Sgm89044 pci_config_put8(pci, PCI_LATTMR, 0x40); 645*906Sgm89044 646*906Sgm89044 #if DEBUG 647*906Sgm89044 /* read registers (for debugging) */ 648*906Sgm89044 pcicomm = pci_config_get16(pci, PCI_COMM); 649*906Sgm89044 pcistat = pci_config_get16(pci, PCI_STATUS); 650*906Sgm89044 cachelinesz = pci_config_get8(pci, PCI_CACHELINESZ); 651*906Sgm89044 mingnt = pci_config_get8(pci, PCI_MINGNT); 652*906Sgm89044 maxlat = pci_config_get8(pci, PCI_MAXLAT); 653*906Sgm89044 lattmr = pci_config_get8(pci, PCI_LATTMR); 654*906Sgm89044 #endif 655*906Sgm89044 656*906Sgm89044 pci_config_teardown(&pci); 657*906Sgm89044 658*906Sgm89044 if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS) { 659*906Sgm89044 dca_diperror(dip, "unable to get iblock cookie"); 660*906Sgm89044 return (DDI_FAILURE); 661*906Sgm89044 } 662*906Sgm89044 663*906Sgm89044 if (ddi_soft_state_zalloc(dca_state, instance) != DDI_SUCCESS) { 664*906Sgm89044 dca_diperror(dip, "unable to allocate soft state"); 665*906Sgm89044 return (DDI_FAILURE); 666*906Sgm89044 } 667*906Sgm89044 668*906Sgm89044 dca = ddi_get_soft_state(dca_state, instance); 669*906Sgm89044 ASSERT(dca != NULL); 670*906Sgm89044 dca->dca_dip = dip; 671*906Sgm89044 WORKLIST(dca, MCR1)->dwl_prov = NULL; 672*906Sgm89044 WORKLIST(dca, MCR2)->dwl_prov = NULL; 673*906Sgm89044 /* figure pagesize */ 674*906Sgm89044 dca->dca_pagesize = ddi_ptob(dip, 1); 675*906Sgm89044 676*906Sgm89044 /* 677*906Sgm89044 * Search for the device in our supported devices table. This 678*906Sgm89044 * is here for two reasons. First, we want to ensure that 679*906Sgm89044 * only Sun-qualified (and presumably Sun-labeled) devices can 680*906Sgm89044 * be used with this driver. Second, some devices have 681*906Sgm89044 * specific differences. E.g. the 5821 has support for a 682*906Sgm89044 * special mode of RC4, deeper queues, power management, and 683*906Sgm89044 * other changes. Also, the export versions of some of these 684*906Sgm89044 * chips don't support RC4 or 3DES, so we catch that here. 685*906Sgm89044 * 686*906Sgm89044 * Note that we only look at the upper nibble of the device 687*906Sgm89044 * id, which is used to distinguish export vs. domestic 688*906Sgm89044 * versions of the chip. (The lower nibble is used for 689*906Sgm89044 * stepping information.) 690*906Sgm89044 */ 691*906Sgm89044 for (i = 0; i < (sizeof (dca_devices) / sizeof (dca_device_t)); i++) { 692*906Sgm89044 /* 693*906Sgm89044 * Try to match the subsystem information first. 694*906Sgm89044 */ 695*906Sgm89044 if (subvenid && (subvenid == dca_devices[i].dd_vendor_id) && 696*906Sgm89044 subsysid && (subsysid == dca_devices[i].dd_device_id)) { 697*906Sgm89044 dca->dca_model = dca_devices[i].dd_model; 698*906Sgm89044 break; 699*906Sgm89044 } 700*906Sgm89044 /* 701*906Sgm89044 * Failing that, try the generic vendor and device id. 702*906Sgm89044 * Even if we find a match, we keep searching anyway, 703*906Sgm89044 * since we would prefer to find a match based on the 704*906Sgm89044 * subsystem ids. 705*906Sgm89044 */ 706*906Sgm89044 if ((venid == dca_devices[i].dd_vendor_id) && 707*906Sgm89044 (devid == dca_devices[i].dd_device_id)) { 708*906Sgm89044 dca->dca_model = dca_devices[i].dd_model; 709*906Sgm89044 } 710*906Sgm89044 } 711*906Sgm89044 /* try and handle an unrecognized device */ 712*906Sgm89044 if (dca->dca_model == NULL) { 713*906Sgm89044 dca->dca_model = unknowndev; 714*906Sgm89044 dca_error(dca, "device not recognized, not supported"); 715*906Sgm89044 DBG(dca, DPCI, "i=%d venid=%x devid=%x rev=%d", 716*906Sgm89044 i, venid, devid, revid); 717*906Sgm89044 } 718*906Sgm89044 719*906Sgm89044 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "description", 720*906Sgm89044 dca->dca_model) != DDI_SUCCESS) { 721*906Sgm89044 dca_error(dca, "unable to create description property"); 722*906Sgm89044 return (DDI_FAILURE); 723*906Sgm89044 } 724*906Sgm89044 725*906Sgm89044 DBG(dca, DPCI, "PCI command=0x%x status=%x cachelinesz=%x", 726*906Sgm89044 pcicomm, pcistat, cachelinesz); 727*906Sgm89044 DBG(dca, DPCI, "mingnt=0x%x maxlat=0x%x lattmr=0x%x", 728*906Sgm89044 mingnt, maxlat, lattmr); 729*906Sgm89044 730*906Sgm89044 /* 731*906Sgm89044 * initialize locks, etc. 732*906Sgm89044 */ 733*906Sgm89044 (void) mutex_init(&dca->dca_intrlock, NULL, MUTEX_DRIVER, ibc); 734*906Sgm89044 735*906Sgm89044 /* use RNGSHA1 by default */ 736*906Sgm89044 if (ddi_getprop(DDI_DEV_T_ANY, dip, 737*906Sgm89044 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rngdirect", 0) == 0) { 738*906Sgm89044 dca->dca_flags |= DCA_RNGSHA1; 739*906Sgm89044 } 740*906Sgm89044 741*906Sgm89044 /* initialize FMA */ 742*906Sgm89044 dca_fma_init(dca); 743*906Sgm89044 744*906Sgm89044 /* initialize some key data structures */ 745*906Sgm89044 if (dca_init(dca) != DDI_SUCCESS) { 746*906Sgm89044 goto failed; 747*906Sgm89044 } 748*906Sgm89044 749*906Sgm89044 /* initialize kstats */ 750*906Sgm89044 dca_ksinit(dca); 751*906Sgm89044 752*906Sgm89044 /* setup access to registers */ 753*906Sgm89044 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&dca->dca_regs, 754*906Sgm89044 0, 0, &dca_regsattr, &dca->dca_regs_handle) != DDI_SUCCESS) { 755*906Sgm89044 dca_error(dca, "unable to map registers"); 756*906Sgm89044 goto failed; 757*906Sgm89044 } 758*906Sgm89044 759*906Sgm89044 DBG(dca, DCHATTY, "MCR1 = %x", GETCSR(dca, CSR_MCR1)); 760*906Sgm89044 DBG(dca, DCHATTY, "CONTROL = %x", GETCSR(dca, CSR_DMACTL)); 761*906Sgm89044 DBG(dca, DCHATTY, "STATUS = %x", GETCSR(dca, CSR_DMASTAT)); 762*906Sgm89044 DBG(dca, DCHATTY, "DMAEA = %x", GETCSR(dca, CSR_DMAEA)); 763*906Sgm89044 DBG(dca, DCHATTY, "MCR2 = %x", GETCSR(dca, CSR_MCR2)); 764*906Sgm89044 765*906Sgm89044 /* reset the chip */ 766*906Sgm89044 if (dca_reset(dca, 0) < 0) { 767*906Sgm89044 goto failed; 768*906Sgm89044 } 769*906Sgm89044 770*906Sgm89044 /* initialize the chip */ 771*906Sgm89044 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64); 772*906Sgm89044 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 773*906Sgm89044 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 774*906Sgm89044 goto failed; 775*906Sgm89044 } 776*906Sgm89044 777*906Sgm89044 /* add the interrupt */ 778*906Sgm89044 if (ddi_add_intr(dip, 0, &dca->dca_icookie, NULL, dca_intr, 779*906Sgm89044 (void *)dca) != DDI_SUCCESS) { 780*906Sgm89044 DBG(dca, DWARN, "ddi_add_intr failed"); 781*906Sgm89044 goto failed; 782*906Sgm89044 } else { 783*906Sgm89044 intr_added = 1; 784*906Sgm89044 } 785*906Sgm89044 786*906Sgm89044 /* enable interrupts on the device */ 787*906Sgm89044 /* 788*906Sgm89044 * XXX: Note, 5820A1 errata indicates that this may clobber 789*906Sgm89044 * bits 24 and 23, which affect the speed of the RNG. Since 790*906Sgm89044 * we always want to run in full-speed mode, this should be 791*906Sgm89044 * harmless. 792*906Sgm89044 */ 793*906Sgm89044 SETBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 794*906Sgm89044 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 795*906Sgm89044 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 796*906Sgm89044 goto failed; 797*906Sgm89044 } 798*906Sgm89044 799*906Sgm89044 /* register MCR1 with the crypto framework */ 800*906Sgm89044 /* Be careful not to exceed 32 chars */ 801*906Sgm89044 (void) sprintf(ID, "%s/%d %s", 802*906Sgm89044 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_SYM); 803*906Sgm89044 dca_prov_info1.pi_provider_description = ID; 804*906Sgm89044 dca_prov_info1.pi_provider_dev.pd_hw = dip; 805*906Sgm89044 dca_prov_info1.pi_provider_handle = dca; 806*906Sgm89044 if ((ret = crypto_register_provider(&dca_prov_info1, 807*906Sgm89044 &WORKLIST(dca, MCR1)->dwl_prov)) != CRYPTO_SUCCESS) { 808*906Sgm89044 cmn_err(CE_WARN, 809*906Sgm89044 "crypto_register_provider() failed (%d) for MCR1", ret); 810*906Sgm89044 goto failed; 811*906Sgm89044 } 812*906Sgm89044 813*906Sgm89044 /* register MCR2 with the crypto framework */ 814*906Sgm89044 /* Be careful not to exceed 32 chars */ 815*906Sgm89044 (void) sprintf(ID, "%s/%d %s", 816*906Sgm89044 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_ASYM); 817*906Sgm89044 dca_prov_info2.pi_provider_description = ID; 818*906Sgm89044 dca_prov_info2.pi_provider_dev.pd_hw = dip; 819*906Sgm89044 dca_prov_info2.pi_provider_handle = dca; 820*906Sgm89044 if ((ret = crypto_register_provider(&dca_prov_info2, 821*906Sgm89044 &WORKLIST(dca, MCR2)->dwl_prov)) != CRYPTO_SUCCESS) { 822*906Sgm89044 cmn_err(CE_WARN, 823*906Sgm89044 "crypto_register_provider() failed (%d) for MCR2", ret); 824*906Sgm89044 goto failed; 825*906Sgm89044 } 826*906Sgm89044 827*906Sgm89044 crypto_prov_notify(WORKLIST(dca, MCR1)->dwl_prov, 828*906Sgm89044 CRYPTO_PROVIDER_READY); 829*906Sgm89044 crypto_prov_notify(WORKLIST(dca, MCR2)->dwl_prov, 830*906Sgm89044 CRYPTO_PROVIDER_READY); 831*906Sgm89044 832*906Sgm89044 /* Initialize the local random number pool for this instance */ 833*906Sgm89044 if ((ret = dca_random_init(dca)) != CRYPTO_SUCCESS) { 834*906Sgm89044 goto failed; 835*906Sgm89044 } 836*906Sgm89044 837*906Sgm89044 mutex_enter(&dca->dca_intrlock); 838*906Sgm89044 dca->dca_jobtid = timeout(dca_jobtimeout, (void *)dca, 839*906Sgm89044 drv_usectohz(SECOND)); 840*906Sgm89044 mutex_exit(&dca->dca_intrlock); 841*906Sgm89044 842*906Sgm89044 ddi_set_driver_private(dip, (caddr_t)dca); 843*906Sgm89044 844*906Sgm89044 ddi_report_dev(dip); 845*906Sgm89044 846*906Sgm89044 if (ddi_get_devstate(dca->dca_dip) != DDI_DEVSTATE_UP) { 847*906Sgm89044 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_RESTORED); 848*906Sgm89044 } 849*906Sgm89044 850*906Sgm89044 return (DDI_SUCCESS); 851*906Sgm89044 852*906Sgm89044 failed: 853*906Sgm89044 /* unregister from the crypto framework */ 854*906Sgm89044 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) { 855*906Sgm89044 (void) crypto_unregister_provider(WORKLIST(dca, MCR1)->dwl_prov); 856*906Sgm89044 } 857*906Sgm89044 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) { 858*906Sgm89044 (void) crypto_unregister_provider(WORKLIST(dca, MCR2)->dwl_prov); 859*906Sgm89044 } 860*906Sgm89044 if (intr_added) { 861*906Sgm89044 CLRBIT(dca, CSR_DMACTL, 862*906Sgm89044 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 863*906Sgm89044 /* unregister intr handler */ 864*906Sgm89044 ddi_remove_intr(dip, 0, dca->dca_icookie); 865*906Sgm89044 } 866*906Sgm89044 if (dca->dca_regs_handle) { 867*906Sgm89044 ddi_regs_map_free(&dca->dca_regs_handle); 868*906Sgm89044 } 869*906Sgm89044 if (dca->dca_intrstats) { 870*906Sgm89044 kstat_delete(dca->dca_intrstats); 871*906Sgm89044 } 872*906Sgm89044 if (dca->dca_ksp) { 873*906Sgm89044 kstat_delete(dca->dca_ksp); 874*906Sgm89044 } 875*906Sgm89044 dca_uninit(dca); 876*906Sgm89044 877*906Sgm89044 /* finalize FMA */ 878*906Sgm89044 dca_fma_fini(dca); 879*906Sgm89044 880*906Sgm89044 mutex_destroy(&dca->dca_intrlock); 881*906Sgm89044 ddi_soft_state_free(dca_state, instance); 882*906Sgm89044 return (DDI_FAILURE); 883*906Sgm89044 884*906Sgm89044 } 885*906Sgm89044 886*906Sgm89044 int 887*906Sgm89044 dca_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 888*906Sgm89044 { 889*906Sgm89044 int instance; 890*906Sgm89044 dca_t *dca; 891*906Sgm89044 timeout_id_t tid; 892*906Sgm89044 893*906Sgm89044 instance = ddi_get_instance(dip); 894*906Sgm89044 895*906Sgm89044 DBG(NULL, DMOD, "dca: in dca_detach() for %d", instance); 896*906Sgm89044 897*906Sgm89044 switch (cmd) { 898*906Sgm89044 case DDI_SUSPEND: 899*906Sgm89044 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 900*906Sgm89044 dca_diperror(dip, "no soft state in detach"); 901*906Sgm89044 return (DDI_FAILURE); 902*906Sgm89044 } 903*906Sgm89044 /* assumption: we won't be DDI_DETACHed until we return */ 904*906Sgm89044 return (dca_suspend(dca)); 905*906Sgm89044 906*906Sgm89044 case DDI_DETACH: 907*906Sgm89044 break; 908*906Sgm89044 default: 909*906Sgm89044 return (DDI_FAILURE); 910*906Sgm89044 } 911*906Sgm89044 912*906Sgm89044 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 913*906Sgm89044 dca_diperror(dip, "no soft state in detach"); 914*906Sgm89044 return (DDI_FAILURE); 915*906Sgm89044 } 916*906Sgm89044 917*906Sgm89044 /* 918*906Sgm89044 * Unregister from kCF. 919*906Sgm89044 * This needs to be done at the beginning of detach. 920*906Sgm89044 */ 921*906Sgm89044 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) { 922*906Sgm89044 if (crypto_unregister_provider(WORKLIST(dca, MCR1)->dwl_prov) != 923*906Sgm89044 CRYPTO_SUCCESS) { 924*906Sgm89044 dca_error(dca, "unable to unregister MCR1 from kcf"); 925*906Sgm89044 return (DDI_FAILURE); 926*906Sgm89044 } 927*906Sgm89044 } 928*906Sgm89044 929*906Sgm89044 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) { 930*906Sgm89044 if (crypto_unregister_provider(WORKLIST(dca, MCR2)->dwl_prov) != 931*906Sgm89044 CRYPTO_SUCCESS) { 932*906Sgm89044 dca_error(dca, "unable to unregister MCR2 from kcf"); 933*906Sgm89044 return (DDI_FAILURE); 934*906Sgm89044 } 935*906Sgm89044 } 936*906Sgm89044 937*906Sgm89044 /* 938*906Sgm89044 * Cleanup the private context list. Once the 939*906Sgm89044 * crypto_unregister_provider returns, it is safe to do so. 940*906Sgm89044 */ 941*906Sgm89044 dca_free_context_list(dca); 942*906Sgm89044 943*906Sgm89044 /* Cleanup the local random number pool */ 944*906Sgm89044 dca_random_fini(dca); 945*906Sgm89044 946*906Sgm89044 /* send any jobs in the waitq back to kCF */ 947*906Sgm89044 dca_rejectjobs(dca); 948*906Sgm89044 949*906Sgm89044 /* untimeout the timeouts */ 950*906Sgm89044 mutex_enter(&dca->dca_intrlock); 951*906Sgm89044 tid = dca->dca_jobtid; 952*906Sgm89044 dca->dca_jobtid = 0; 953*906Sgm89044 mutex_exit(&dca->dca_intrlock); 954*906Sgm89044 if (tid) { 955*906Sgm89044 (void) untimeout(tid); 956*906Sgm89044 } 957*906Sgm89044 958*906Sgm89044 /* disable device interrupts */ 959*906Sgm89044 CLRBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 960*906Sgm89044 961*906Sgm89044 /* unregister interrupt handlers */ 962*906Sgm89044 ddi_remove_intr(dip, 0, dca->dca_icookie); 963*906Sgm89044 964*906Sgm89044 /* release our regs handle */ 965*906Sgm89044 ddi_regs_map_free(&dca->dca_regs_handle); 966*906Sgm89044 967*906Sgm89044 /* toss out kstats */ 968*906Sgm89044 if (dca->dca_intrstats) { 969*906Sgm89044 kstat_delete(dca->dca_intrstats); 970*906Sgm89044 } 971*906Sgm89044 if (dca->dca_ksp) { 972*906Sgm89044 kstat_delete(dca->dca_ksp); 973*906Sgm89044 } 974*906Sgm89044 975*906Sgm89044 mutex_destroy(&dca->dca_intrlock); 976*906Sgm89044 dca_uninit(dca); 977*906Sgm89044 978*906Sgm89044 /* finalize FMA */ 979*906Sgm89044 dca_fma_fini(dca); 980*906Sgm89044 981*906Sgm89044 ddi_soft_state_free(dca_state, instance); 982*906Sgm89044 983*906Sgm89044 return (DDI_SUCCESS); 984*906Sgm89044 } 985*906Sgm89044 986*906Sgm89044 int 987*906Sgm89044 dca_resume(dca_t *dca) 988*906Sgm89044 { 989*906Sgm89044 ddi_acc_handle_t pci; 990*906Sgm89044 991*906Sgm89044 if (pci_config_setup(dca->dca_dip, &pci) != DDI_SUCCESS) { 992*906Sgm89044 dca_error(dca, "unable to setup PCI config handle"); 993*906Sgm89044 return (DDI_FAILURE); 994*906Sgm89044 } 995*906Sgm89044 996*906Sgm89044 /* 997*906Sgm89044 * Reprogram registers in PCI configuration space. 998*906Sgm89044 */ 999*906Sgm89044 1000*906Sgm89044 /* Broadcom-specific timers -- we disable them. */ 1001*906Sgm89044 pci_config_put8(pci, PCI_TRDYTO, 0); 1002*906Sgm89044 pci_config_put8(pci, PCI_RETRIES, 0); 1003*906Sgm89044 1004*906Sgm89044 /* initialize PCI access settings */ 1005*906Sgm89044 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE | 1006*906Sgm89044 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE); 1007*906Sgm89044 1008*906Sgm89044 /* set up our PCI latency timer */ 1009*906Sgm89044 pci_config_put8(pci, PCI_LATTMR, 0x40); 1010*906Sgm89044 1011*906Sgm89044 pci_config_teardown(&pci); 1012*906Sgm89044 1013*906Sgm89044 if (dca_reset(dca, 0) < 0) { 1014*906Sgm89044 dca_error(dca, "unable to reset device during resume"); 1015*906Sgm89044 return (DDI_FAILURE); 1016*906Sgm89044 } 1017*906Sgm89044 1018*906Sgm89044 /* 1019*906Sgm89044 * Now restore the card-specific CSRs. 1020*906Sgm89044 */ 1021*906Sgm89044 1022*906Sgm89044 /* restore endianness settings */ 1023*906Sgm89044 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64); 1024*906Sgm89044 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1025*906Sgm89044 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1026*906Sgm89044 return (DDI_FAILURE); 1027*906Sgm89044 1028*906Sgm89044 /* restore interrupt enables */ 1029*906Sgm89044 SETBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 1030*906Sgm89044 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1031*906Sgm89044 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1032*906Sgm89044 return (DDI_FAILURE); 1033*906Sgm89044 1034*906Sgm89044 /* resume scheduling jobs on the device */ 1035*906Sgm89044 dca_undrain(dca); 1036*906Sgm89044 1037*906Sgm89044 return (DDI_SUCCESS); 1038*906Sgm89044 } 1039*906Sgm89044 1040*906Sgm89044 int 1041*906Sgm89044 dca_suspend(dca_t *dca) 1042*906Sgm89044 { 1043*906Sgm89044 if ((dca_drain(dca)) != 0) { 1044*906Sgm89044 return (DDI_FAILURE); 1045*906Sgm89044 } 1046*906Sgm89044 if (dca_reset(dca, 0) < 0) { 1047*906Sgm89044 dca_error(dca, "unable to reset device during suspend"); 1048*906Sgm89044 return (DDI_FAILURE); 1049*906Sgm89044 } 1050*906Sgm89044 return (DDI_SUCCESS); 1051*906Sgm89044 } 1052*906Sgm89044 1053*906Sgm89044 /* 1054*906Sgm89044 * Hardware access stuff. 1055*906Sgm89044 */ 1056*906Sgm89044 int 1057*906Sgm89044 dca_reset(dca_t *dca, int failreset) 1058*906Sgm89044 { 1059*906Sgm89044 int i; 1060*906Sgm89044 1061*906Sgm89044 if (dca->dca_regs_handle == NULL) { 1062*906Sgm89044 return (-1); 1063*906Sgm89044 } 1064*906Sgm89044 1065*906Sgm89044 PUTCSR(dca, CSR_DMACTL, DMACTL_RESET); 1066*906Sgm89044 if (!failreset) { 1067*906Sgm89044 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1068*906Sgm89044 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1069*906Sgm89044 return (-1); 1070*906Sgm89044 } 1071*906Sgm89044 1072*906Sgm89044 /* now wait for a reset */ 1073*906Sgm89044 for (i = 1; i < 100; i++) { 1074*906Sgm89044 uint32_t dmactl; 1075*906Sgm89044 drv_usecwait(100); 1076*906Sgm89044 dmactl = GETCSR(dca, CSR_DMACTL); 1077*906Sgm89044 if (!failreset) { 1078*906Sgm89044 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1079*906Sgm89044 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1080*906Sgm89044 return (-1); 1081*906Sgm89044 } 1082*906Sgm89044 if ((dmactl & DMACTL_RESET) == 0) { 1083*906Sgm89044 DBG(dca, DCHATTY, "reset in %d usec", i * 100); 1084*906Sgm89044 return (0); 1085*906Sgm89044 } 1086*906Sgm89044 } 1087*906Sgm89044 if (!failreset) { 1088*906Sgm89044 dca_failure(dca, DDI_DEVICE_FAULT, 1089*906Sgm89044 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR, 1090*906Sgm89044 "timeout waiting for reset after %d usec", i * 100); 1091*906Sgm89044 } 1092*906Sgm89044 return (-1); 1093*906Sgm89044 } 1094*906Sgm89044 1095*906Sgm89044 int 1096*906Sgm89044 dca_initworklist(dca_t *dca, dca_worklist_t *wlp) 1097*906Sgm89044 { 1098*906Sgm89044 int i; 1099*906Sgm89044 int reqprealloc = wlp->dwl_hiwater + (MAXWORK * MAXREQSPERMCR); 1100*906Sgm89044 1101*906Sgm89044 /* 1102*906Sgm89044 * Set up work queue. 1103*906Sgm89044 */ 1104*906Sgm89044 mutex_init(&wlp->dwl_lock, NULL, MUTEX_DRIVER, dca->dca_icookie); 1105*906Sgm89044 mutex_init(&wlp->dwl_freereqslock, NULL, MUTEX_DRIVER, 1106*906Sgm89044 dca->dca_icookie); 1107*906Sgm89044 cv_init(&wlp->dwl_cv, NULL, CV_DRIVER, NULL); 1108*906Sgm89044 1109*906Sgm89044 mutex_enter(&wlp->dwl_lock); 1110*906Sgm89044 1111*906Sgm89044 dca_initq(&wlp->dwl_freereqs); 1112*906Sgm89044 dca_initq(&wlp->dwl_waitq); 1113*906Sgm89044 dca_initq(&wlp->dwl_freework); 1114*906Sgm89044 dca_initq(&wlp->dwl_runq); 1115*906Sgm89044 1116*906Sgm89044 for (i = 0; i < MAXWORK; i++) { 1117*906Sgm89044 dca_work_t *workp; 1118*906Sgm89044 1119*906Sgm89044 if ((workp = dca_newwork(dca)) == NULL) { 1120*906Sgm89044 dca_error(dca, "unable to allocate work"); 1121*906Sgm89044 mutex_exit(&wlp->dwl_lock); 1122*906Sgm89044 return (DDI_FAILURE); 1123*906Sgm89044 } 1124*906Sgm89044 workp->dw_wlp = wlp; 1125*906Sgm89044 dca_freework(workp); 1126*906Sgm89044 } 1127*906Sgm89044 mutex_exit(&wlp->dwl_lock); 1128*906Sgm89044 1129*906Sgm89044 for (i = 0; i < reqprealloc; i++) { 1130*906Sgm89044 dca_request_t *reqp; 1131*906Sgm89044 1132*906Sgm89044 if ((reqp = dca_newreq(dca)) == NULL) { 1133*906Sgm89044 dca_error(dca, "unable to allocate request"); 1134*906Sgm89044 return (DDI_FAILURE); 1135*906Sgm89044 } 1136*906Sgm89044 reqp->dr_dca = dca; 1137*906Sgm89044 reqp->dr_wlp = wlp; 1138*906Sgm89044 dca_freereq(reqp); 1139*906Sgm89044 } 1140*906Sgm89044 return (DDI_SUCCESS); 1141*906Sgm89044 } 1142*906Sgm89044 1143*906Sgm89044 int 1144*906Sgm89044 dca_init(dca_t *dca) 1145*906Sgm89044 { 1146*906Sgm89044 dca_worklist_t *wlp; 1147*906Sgm89044 1148*906Sgm89044 /* Initialize the private context list and the corresponding lock. */ 1149*906Sgm89044 mutex_init(&dca->dca_ctx_list_lock, NULL, MUTEX_DRIVER, NULL); 1150*906Sgm89044 dca_initq(&dca->dca_ctx_list); 1151*906Sgm89044 1152*906Sgm89044 /* 1153*906Sgm89044 * MCR1 algorithms. 1154*906Sgm89044 */ 1155*906Sgm89044 wlp = WORKLIST(dca, MCR1); 1156*906Sgm89044 (void) sprintf(wlp->dwl_name, "dca%d:mcr1", 1157*906Sgm89044 ddi_get_instance(dca->dca_dip)); 1158*906Sgm89044 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY, 1159*906Sgm89044 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1160*906Sgm89044 "mcr1_lowater", MCR1LOWATER); 1161*906Sgm89044 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY, 1162*906Sgm89044 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1163*906Sgm89044 "mcr1_hiwater", MCR1HIWATER); 1164*906Sgm89044 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY, 1165*906Sgm89044 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1166*906Sgm89044 "mcr1_maxreqs", MCR1MAXREQS), MAXREQSPERMCR); 1167*906Sgm89044 wlp->dwl_dca = dca; 1168*906Sgm89044 wlp->dwl_mcr = MCR1; 1169*906Sgm89044 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) { 1170*906Sgm89044 return (DDI_FAILURE); 1171*906Sgm89044 } 1172*906Sgm89044 1173*906Sgm89044 /* 1174*906Sgm89044 * MCR2 algorithms. 1175*906Sgm89044 */ 1176*906Sgm89044 wlp = WORKLIST(dca, MCR2); 1177*906Sgm89044 (void) sprintf(wlp->dwl_name, "dca%d:mcr2", 1178*906Sgm89044 ddi_get_instance(dca->dca_dip)); 1179*906Sgm89044 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY, 1180*906Sgm89044 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1181*906Sgm89044 "mcr2_lowater", MCR2LOWATER); 1182*906Sgm89044 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY, 1183*906Sgm89044 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1184*906Sgm89044 "mcr2_hiwater", MCR2HIWATER); 1185*906Sgm89044 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY, 1186*906Sgm89044 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1187*906Sgm89044 "mcr2_maxreqs", MCR2MAXREQS), MAXREQSPERMCR); 1188*906Sgm89044 wlp->dwl_dca = dca; 1189*906Sgm89044 wlp->dwl_mcr = MCR2; 1190*906Sgm89044 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) { 1191*906Sgm89044 return (DDI_FAILURE); 1192*906Sgm89044 } 1193*906Sgm89044 return (DDI_SUCCESS); 1194*906Sgm89044 } 1195*906Sgm89044 1196*906Sgm89044 /* 1197*906Sgm89044 * Uninitialize worklists. This routine should only be called when no 1198*906Sgm89044 * active jobs (hence DMA mappings) exist. One way to ensure this is 1199*906Sgm89044 * to unregister from kCF before calling this routine. (This is done 1200*906Sgm89044 * e.g. in detach(9e).) 1201*906Sgm89044 */ 1202*906Sgm89044 void 1203*906Sgm89044 dca_uninit(dca_t *dca) 1204*906Sgm89044 { 1205*906Sgm89044 int mcr; 1206*906Sgm89044 1207*906Sgm89044 mutex_destroy(&dca->dca_ctx_list_lock); 1208*906Sgm89044 1209*906Sgm89044 for (mcr = MCR1; mcr <= MCR2; mcr++) { 1210*906Sgm89044 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1211*906Sgm89044 dca_work_t *workp; 1212*906Sgm89044 dca_request_t *reqp; 1213*906Sgm89044 1214*906Sgm89044 if (dca->dca_regs_handle == NULL) { 1215*906Sgm89044 continue; 1216*906Sgm89044 } 1217*906Sgm89044 1218*906Sgm89044 mutex_enter(&wlp->dwl_lock); 1219*906Sgm89044 while ((workp = dca_getwork(dca, mcr)) != NULL) { 1220*906Sgm89044 dca_destroywork(workp); 1221*906Sgm89044 } 1222*906Sgm89044 mutex_exit(&wlp->dwl_lock); 1223*906Sgm89044 while ((reqp = dca_getreq(dca, mcr, 0)) != NULL) { 1224*906Sgm89044 dca_destroyreq(reqp); 1225*906Sgm89044 } 1226*906Sgm89044 1227*906Sgm89044 mutex_destroy(&wlp->dwl_lock); 1228*906Sgm89044 mutex_destroy(&wlp->dwl_freereqslock); 1229*906Sgm89044 cv_destroy(&wlp->dwl_cv); 1230*906Sgm89044 wlp->dwl_prov = NULL; 1231*906Sgm89044 } 1232*906Sgm89044 } 1233*906Sgm89044 1234*906Sgm89044 static void 1235*906Sgm89044 dca_enlist2(dca_listnode_t *q, dca_listnode_t *node, kmutex_t *lock) 1236*906Sgm89044 { 1237*906Sgm89044 if (!q || !node) 1238*906Sgm89044 return; 1239*906Sgm89044 1240*906Sgm89044 mutex_enter(lock); 1241*906Sgm89044 node->dl_next2 = q; 1242*906Sgm89044 node->dl_prev2 = q->dl_prev2; 1243*906Sgm89044 node->dl_next2->dl_prev2 = node; 1244*906Sgm89044 node->dl_prev2->dl_next2 = node; 1245*906Sgm89044 mutex_exit(lock); 1246*906Sgm89044 } 1247*906Sgm89044 1248*906Sgm89044 static void 1249*906Sgm89044 dca_rmlist2(dca_listnode_t *node, kmutex_t *lock) 1250*906Sgm89044 { 1251*906Sgm89044 if (!node) 1252*906Sgm89044 return; 1253*906Sgm89044 1254*906Sgm89044 mutex_enter(lock); 1255*906Sgm89044 node->dl_next2->dl_prev2 = node->dl_prev2; 1256*906Sgm89044 node->dl_prev2->dl_next2 = node->dl_next2; 1257*906Sgm89044 node->dl_next2 = NULL; 1258*906Sgm89044 node->dl_prev2 = NULL; 1259*906Sgm89044 mutex_exit(lock); 1260*906Sgm89044 } 1261*906Sgm89044 1262*906Sgm89044 static dca_listnode_t * 1263*906Sgm89044 dca_delist2(dca_listnode_t *q, kmutex_t *lock) 1264*906Sgm89044 { 1265*906Sgm89044 dca_listnode_t *node; 1266*906Sgm89044 1267*906Sgm89044 mutex_enter(lock); 1268*906Sgm89044 if ((node = q->dl_next2) == q) { 1269*906Sgm89044 mutex_exit(lock); 1270*906Sgm89044 return (NULL); 1271*906Sgm89044 } 1272*906Sgm89044 1273*906Sgm89044 node->dl_next2->dl_prev2 = node->dl_prev2; 1274*906Sgm89044 node->dl_prev2->dl_next2 = node->dl_next2; 1275*906Sgm89044 node->dl_next2 = NULL; 1276*906Sgm89044 node->dl_prev2 = NULL; 1277*906Sgm89044 mutex_exit(lock); 1278*906Sgm89044 1279*906Sgm89044 return (node); 1280*906Sgm89044 } 1281*906Sgm89044 1282*906Sgm89044 void 1283*906Sgm89044 dca_initq(dca_listnode_t *q) 1284*906Sgm89044 { 1285*906Sgm89044 q->dl_next = q; 1286*906Sgm89044 q->dl_prev = q; 1287*906Sgm89044 q->dl_next2 = q; 1288*906Sgm89044 q->dl_prev2 = q; 1289*906Sgm89044 } 1290*906Sgm89044 1291*906Sgm89044 void 1292*906Sgm89044 dca_enqueue(dca_listnode_t *q, dca_listnode_t *node) 1293*906Sgm89044 { 1294*906Sgm89044 /* 1295*906Sgm89044 * Enqueue submits at the "tail" of the list, i.e. just 1296*906Sgm89044 * behind the sentinel. 1297*906Sgm89044 */ 1298*906Sgm89044 node->dl_next = q; 1299*906Sgm89044 node->dl_prev = q->dl_prev; 1300*906Sgm89044 node->dl_next->dl_prev = node; 1301*906Sgm89044 node->dl_prev->dl_next = node; 1302*906Sgm89044 } 1303*906Sgm89044 1304*906Sgm89044 void 1305*906Sgm89044 dca_rmqueue(dca_listnode_t *node) 1306*906Sgm89044 { 1307*906Sgm89044 node->dl_next->dl_prev = node->dl_prev; 1308*906Sgm89044 node->dl_prev->dl_next = node->dl_next; 1309*906Sgm89044 node->dl_next = NULL; 1310*906Sgm89044 node->dl_prev = NULL; 1311*906Sgm89044 } 1312*906Sgm89044 1313*906Sgm89044 dca_listnode_t * 1314*906Sgm89044 dca_dequeue(dca_listnode_t *q) 1315*906Sgm89044 { 1316*906Sgm89044 dca_listnode_t *node; 1317*906Sgm89044 /* 1318*906Sgm89044 * Dequeue takes from the "head" of the list, i.e. just after 1319*906Sgm89044 * the sentinel. 1320*906Sgm89044 */ 1321*906Sgm89044 if ((node = q->dl_next) == q) { 1322*906Sgm89044 /* queue is empty */ 1323*906Sgm89044 return (NULL); 1324*906Sgm89044 } 1325*906Sgm89044 dca_rmqueue(node); 1326*906Sgm89044 return (node); 1327*906Sgm89044 } 1328*906Sgm89044 1329*906Sgm89044 /* this is the opposite of dequeue, it takes things off in LIFO order */ 1330*906Sgm89044 dca_listnode_t * 1331*906Sgm89044 dca_unqueue(dca_listnode_t *q) 1332*906Sgm89044 { 1333*906Sgm89044 dca_listnode_t *node; 1334*906Sgm89044 /* 1335*906Sgm89044 * unqueue takes from the "tail" of the list, i.e. just before 1336*906Sgm89044 * the sentinel. 1337*906Sgm89044 */ 1338*906Sgm89044 if ((node = q->dl_prev) == q) {; 1339*906Sgm89044 /* queue is empty */ 1340*906Sgm89044 return (NULL); 1341*906Sgm89044 } 1342*906Sgm89044 dca_rmqueue(node); 1343*906Sgm89044 return (node); 1344*906Sgm89044 } 1345*906Sgm89044 1346*906Sgm89044 dca_listnode_t * 1347*906Sgm89044 dca_peekqueue(dca_listnode_t *q) 1348*906Sgm89044 { 1349*906Sgm89044 dca_listnode_t *node; 1350*906Sgm89044 1351*906Sgm89044 if ((node = q->dl_next) == q) { 1352*906Sgm89044 return (NULL); 1353*906Sgm89044 } else { 1354*906Sgm89044 return (node); 1355*906Sgm89044 } 1356*906Sgm89044 } 1357*906Sgm89044 1358*906Sgm89044 /* 1359*906Sgm89044 * Interrupt service routine. 1360*906Sgm89044 */ 1361*906Sgm89044 uint_t 1362*906Sgm89044 dca_intr(char *arg) 1363*906Sgm89044 { 1364*906Sgm89044 dca_t *dca = (dca_t *)arg; 1365*906Sgm89044 uint32_t status; 1366*906Sgm89044 1367*906Sgm89044 mutex_enter(&dca->dca_intrlock); 1368*906Sgm89044 status = GETCSR(dca, CSR_DMASTAT); 1369*906Sgm89044 PUTCSR(dca, CSR_DMASTAT, status & DMASTAT_INTERRUPTS); 1370*906Sgm89044 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1371*906Sgm89044 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 1372*906Sgm89044 mutex_exit(&dca->dca_intrlock); 1373*906Sgm89044 return ((uint_t)DDI_FAILURE); 1374*906Sgm89044 } 1375*906Sgm89044 1376*906Sgm89044 DBG(dca, DINTR, "interrupted, status = 0x%x!", status); 1377*906Sgm89044 1378*906Sgm89044 if ((status & DMASTAT_INTERRUPTS) == 0) { 1379*906Sgm89044 /* increment spurious interrupt kstat */ 1380*906Sgm89044 if (dca->dca_intrstats) { 1381*906Sgm89044 KIOIP(dca)->intrs[KSTAT_INTR_SPURIOUS]++; 1382*906Sgm89044 } 1383*906Sgm89044 mutex_exit(&dca->dca_intrlock); 1384*906Sgm89044 return (DDI_INTR_UNCLAIMED); 1385*906Sgm89044 } 1386*906Sgm89044 1387*906Sgm89044 if (dca->dca_intrstats) { 1388*906Sgm89044 KIOIP(dca)->intrs[KSTAT_INTR_HARD]++; 1389*906Sgm89044 } 1390*906Sgm89044 if (status & DMASTAT_MCR1INT) { 1391*906Sgm89044 DBG(dca, DINTR, "MCR1 interrupted"); 1392*906Sgm89044 mutex_enter(&(WORKLIST(dca, MCR1)->dwl_lock)); 1393*906Sgm89044 dca_schedule(dca, MCR1); 1394*906Sgm89044 dca_reclaim(dca, MCR1); 1395*906Sgm89044 mutex_exit(&(WORKLIST(dca, MCR1)->dwl_lock)); 1396*906Sgm89044 } 1397*906Sgm89044 1398*906Sgm89044 if (status & DMASTAT_MCR2INT) { 1399*906Sgm89044 DBG(dca, DINTR, "MCR2 interrupted"); 1400*906Sgm89044 mutex_enter(&(WORKLIST(dca, MCR2)->dwl_lock)); 1401*906Sgm89044 dca_schedule(dca, MCR2); 1402*906Sgm89044 dca_reclaim(dca, MCR2); 1403*906Sgm89044 mutex_exit(&(WORKLIST(dca, MCR2)->dwl_lock)); 1404*906Sgm89044 } 1405*906Sgm89044 1406*906Sgm89044 if (status & DMASTAT_ERRINT) { 1407*906Sgm89044 uint32_t erraddr; 1408*906Sgm89044 erraddr = GETCSR(dca, CSR_DMAEA); 1409*906Sgm89044 mutex_exit(&dca->dca_intrlock); 1410*906Sgm89044 1411*906Sgm89044 /* 1412*906Sgm89044 * bit 1 of the error address indicates failure during 1413*906Sgm89044 * read if set, during write otherwise. 1414*906Sgm89044 */ 1415*906Sgm89044 dca_failure(dca, DDI_DEVICE_FAULT, 1416*906Sgm89044 DCA_FM_ECLASS_HW_DEVICE, dca_ena(0), CRYPTO_DEVICE_ERROR, 1417*906Sgm89044 "DMA master access error %s address 0x%x", 1418*906Sgm89044 erraddr & 0x1 ? "reading" : "writing", erraddr & ~1); 1419*906Sgm89044 return (DDI_INTR_CLAIMED); 1420*906Sgm89044 } 1421*906Sgm89044 1422*906Sgm89044 mutex_exit(&dca->dca_intrlock); 1423*906Sgm89044 1424*906Sgm89044 return (DDI_INTR_CLAIMED); 1425*906Sgm89044 } 1426*906Sgm89044 1427*906Sgm89044 /* 1428*906Sgm89044 * Reverse a string of bytes from s1 into s2. The reversal happens 1429*906Sgm89044 * from the tail of s1. If len1 < len2, then null bytes will be 1430*906Sgm89044 * padded to the end of s2. If len2 < len1, then (presumably null) 1431*906Sgm89044 * bytes will be dropped from the start of s1. 1432*906Sgm89044 * 1433*906Sgm89044 * The rationale here is that when s1 (source) is shorter, then we 1434*906Sgm89044 * are reversing from big-endian ordering, into device ordering, and 1435*906Sgm89044 * want to add some extra nulls to the tail (MSB) side of the device. 1436*906Sgm89044 * 1437*906Sgm89044 * Similarly, when s2 (dest) is shorter, then we are truncating what 1438*906Sgm89044 * are presumably null MSB bits from the device. 1439*906Sgm89044 * 1440*906Sgm89044 * There is an expectation when reversing from the device back into 1441*906Sgm89044 * big-endian, that the number of bytes to reverse and the target size 1442*906Sgm89044 * will match, and no truncation or padding occurs. 1443*906Sgm89044 */ 1444*906Sgm89044 void 1445*906Sgm89044 dca_reverse(void *s1, void *s2, int len1, int len2) 1446*906Sgm89044 { 1447*906Sgm89044 caddr_t src, dst; 1448*906Sgm89044 1449*906Sgm89044 if (len1 == 0) { 1450*906Sgm89044 if (len2) { 1451*906Sgm89044 bzero(s2, len2); 1452*906Sgm89044 } 1453*906Sgm89044 return; 1454*906Sgm89044 } 1455*906Sgm89044 src = (caddr_t)s1 + len1 - 1; 1456*906Sgm89044 dst = s2; 1457*906Sgm89044 while ((src >= (caddr_t)s1) && (len2)) { 1458*906Sgm89044 *dst++ = *src--; 1459*906Sgm89044 len2--; 1460*906Sgm89044 } 1461*906Sgm89044 while (len2 > 0) { 1462*906Sgm89044 *dst++ = 0; 1463*906Sgm89044 len2--; 1464*906Sgm89044 } 1465*906Sgm89044 } 1466*906Sgm89044 1467*906Sgm89044 uint16_t 1468*906Sgm89044 dca_padfull(int num) 1469*906Sgm89044 { 1470*906Sgm89044 if (num <= 512) { 1471*906Sgm89044 return (BITS2BYTES(512)); 1472*906Sgm89044 } 1473*906Sgm89044 if (num <= 768) { 1474*906Sgm89044 return (BITS2BYTES(768)); 1475*906Sgm89044 } 1476*906Sgm89044 if (num <= 1024) { 1477*906Sgm89044 return (BITS2BYTES(1024)); 1478*906Sgm89044 } 1479*906Sgm89044 if (num <= 1536) { 1480*906Sgm89044 return (BITS2BYTES(1536)); 1481*906Sgm89044 } 1482*906Sgm89044 if (num <= 2048) { 1483*906Sgm89044 return (BITS2BYTES(2048)); 1484*906Sgm89044 } 1485*906Sgm89044 return (0); 1486*906Sgm89044 } 1487*906Sgm89044 1488*906Sgm89044 uint16_t 1489*906Sgm89044 dca_padhalf(int num) 1490*906Sgm89044 { 1491*906Sgm89044 if (num <= 256) { 1492*906Sgm89044 return (BITS2BYTES(256)); 1493*906Sgm89044 } 1494*906Sgm89044 if (num <= 384) { 1495*906Sgm89044 return (BITS2BYTES(384)); 1496*906Sgm89044 } 1497*906Sgm89044 if (num <= 512) { 1498*906Sgm89044 return (BITS2BYTES(512)); 1499*906Sgm89044 } 1500*906Sgm89044 if (num <= 768) { 1501*906Sgm89044 return (BITS2BYTES(768)); 1502*906Sgm89044 } 1503*906Sgm89044 if (num <= 1024) { 1504*906Sgm89044 return (BITS2BYTES(1024)); 1505*906Sgm89044 } 1506*906Sgm89044 return (0); 1507*906Sgm89044 } 1508*906Sgm89044 1509*906Sgm89044 dca_work_t * 1510*906Sgm89044 dca_newwork(dca_t *dca) 1511*906Sgm89044 { 1512*906Sgm89044 dca_work_t *workp; 1513*906Sgm89044 size_t size; 1514*906Sgm89044 ddi_dma_cookie_t c; 1515*906Sgm89044 unsigned nc; 1516*906Sgm89044 int rv; 1517*906Sgm89044 1518*906Sgm89044 workp = kmem_zalloc(sizeof (dca_work_t), KM_SLEEP); 1519*906Sgm89044 1520*906Sgm89044 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1521*906Sgm89044 DDI_DMA_SLEEP, NULL, &workp->dw_mcr_dmah); 1522*906Sgm89044 if (rv != 0) { 1523*906Sgm89044 dca_error(dca, "unable to alloc MCR DMA handle"); 1524*906Sgm89044 dca_destroywork(workp); 1525*906Sgm89044 return (NULL); 1526*906Sgm89044 } 1527*906Sgm89044 1528*906Sgm89044 rv = ddi_dma_mem_alloc(workp->dw_mcr_dmah, 1529*906Sgm89044 ROUNDUP(MCR_SIZE, dca->dca_pagesize), 1530*906Sgm89044 &dca_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 1531*906Sgm89044 &workp->dw_mcr_kaddr, &size, &workp->dw_mcr_acch); 1532*906Sgm89044 if (rv != 0) { 1533*906Sgm89044 dca_error(dca, "unable to alloc MCR DMA memory"); 1534*906Sgm89044 dca_destroywork(workp); 1535*906Sgm89044 return (NULL); 1536*906Sgm89044 } 1537*906Sgm89044 1538*906Sgm89044 rv = ddi_dma_addr_bind_handle(workp->dw_mcr_dmah, NULL, 1539*906Sgm89044 workp->dw_mcr_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR, 1540*906Sgm89044 DDI_DMA_SLEEP, NULL, &c, &nc); 1541*906Sgm89044 if (rv != DDI_DMA_MAPPED) { 1542*906Sgm89044 dca_error(dca, "unable to map MCR DMA memory"); 1543*906Sgm89044 dca_destroywork(workp); 1544*906Sgm89044 return (NULL); 1545*906Sgm89044 } 1546*906Sgm89044 1547*906Sgm89044 workp->dw_mcr_paddr = c.dmac_address; 1548*906Sgm89044 return (workp); 1549*906Sgm89044 } 1550*906Sgm89044 1551*906Sgm89044 void 1552*906Sgm89044 dca_destroywork(dca_work_t *workp) 1553*906Sgm89044 { 1554*906Sgm89044 if (workp->dw_mcr_paddr) { 1555*906Sgm89044 (void) ddi_dma_unbind_handle(workp->dw_mcr_dmah); 1556*906Sgm89044 } 1557*906Sgm89044 if (workp->dw_mcr_acch) { 1558*906Sgm89044 ddi_dma_mem_free(&workp->dw_mcr_acch); 1559*906Sgm89044 } 1560*906Sgm89044 if (workp->dw_mcr_dmah) { 1561*906Sgm89044 ddi_dma_free_handle(&workp->dw_mcr_dmah); 1562*906Sgm89044 } 1563*906Sgm89044 kmem_free(workp, sizeof (dca_work_t)); 1564*906Sgm89044 } 1565*906Sgm89044 1566*906Sgm89044 dca_request_t * 1567*906Sgm89044 dca_newreq(dca_t *dca) 1568*906Sgm89044 { 1569*906Sgm89044 dca_request_t *reqp; 1570*906Sgm89044 size_t size; 1571*906Sgm89044 ddi_dma_cookie_t c; 1572*906Sgm89044 unsigned nc; 1573*906Sgm89044 int rv; 1574*906Sgm89044 int n_chain = 0; 1575*906Sgm89044 1576*906Sgm89044 size = (DESC_SIZE * MAXFRAGS) + CTX_MAXLENGTH; 1577*906Sgm89044 1578*906Sgm89044 reqp = kmem_zalloc(sizeof (dca_request_t), KM_SLEEP); 1579*906Sgm89044 1580*906Sgm89044 reqp->dr_dca = dca; 1581*906Sgm89044 1582*906Sgm89044 /* 1583*906Sgm89044 * Setup the DMA region for the context and descriptors. 1584*906Sgm89044 */ 1585*906Sgm89044 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, DDI_DMA_SLEEP, 1586*906Sgm89044 NULL, &reqp->dr_ctx_dmah); 1587*906Sgm89044 if (rv != DDI_SUCCESS) { 1588*906Sgm89044 dca_error(dca, "failure allocating request DMA handle"); 1589*906Sgm89044 dca_destroyreq(reqp); 1590*906Sgm89044 return (NULL); 1591*906Sgm89044 } 1592*906Sgm89044 1593*906Sgm89044 /* for driver hardening, allocate in whole pages */ 1594*906Sgm89044 rv = ddi_dma_mem_alloc(reqp->dr_ctx_dmah, 1595*906Sgm89044 ROUNDUP(size, dca->dca_pagesize), &dca_devattr, DDI_DMA_CONSISTENT, 1596*906Sgm89044 DDI_DMA_SLEEP, NULL, &reqp->dr_ctx_kaddr, &size, 1597*906Sgm89044 &reqp->dr_ctx_acch); 1598*906Sgm89044 if (rv != DDI_SUCCESS) { 1599*906Sgm89044 dca_error(dca, "unable to alloc request DMA memory"); 1600*906Sgm89044 dca_destroyreq(reqp); 1601*906Sgm89044 return (NULL); 1602*906Sgm89044 } 1603*906Sgm89044 1604*906Sgm89044 rv = ddi_dma_addr_bind_handle(reqp->dr_ctx_dmah, NULL, 1605*906Sgm89044 reqp->dr_ctx_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_WRITE, 1606*906Sgm89044 DDI_DMA_SLEEP, 0, &c, &nc); 1607*906Sgm89044 if (rv != DDI_DMA_MAPPED) { 1608*906Sgm89044 dca_error(dca, "failed binding request DMA handle"); 1609*906Sgm89044 dca_destroyreq(reqp); 1610*906Sgm89044 return (NULL); 1611*906Sgm89044 } 1612*906Sgm89044 reqp->dr_ctx_paddr = c.dmac_address; 1613*906Sgm89044 1614*906Sgm89044 reqp->dr_dma_size = size; 1615*906Sgm89044 1616*906Sgm89044 /* 1617*906Sgm89044 * Set up the dma for our scratch/shared buffers. 1618*906Sgm89044 */ 1619*906Sgm89044 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1620*906Sgm89044 DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_dmah); 1621*906Sgm89044 if (rv != DDI_SUCCESS) { 1622*906Sgm89044 dca_error(dca, "failure allocating ibuf DMA handle"); 1623*906Sgm89044 dca_destroyreq(reqp); 1624*906Sgm89044 return (NULL); 1625*906Sgm89044 } 1626*906Sgm89044 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1627*906Sgm89044 DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_dmah); 1628*906Sgm89044 if (rv != DDI_SUCCESS) { 1629*906Sgm89044 dca_error(dca, "failure allocating obuf DMA handle"); 1630*906Sgm89044 dca_destroyreq(reqp); 1631*906Sgm89044 return (NULL); 1632*906Sgm89044 } 1633*906Sgm89044 1634*906Sgm89044 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1635*906Sgm89044 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_in_dmah); 1636*906Sgm89044 if (rv != DDI_SUCCESS) { 1637*906Sgm89044 dca_error(dca, "failure allocating chain_in DMA handle"); 1638*906Sgm89044 dca_destroyreq(reqp); 1639*906Sgm89044 return (NULL); 1640*906Sgm89044 } 1641*906Sgm89044 1642*906Sgm89044 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1643*906Sgm89044 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_out_dmah); 1644*906Sgm89044 if (rv != DDI_SUCCESS) { 1645*906Sgm89044 dca_error(dca, "failure allocating chain_out DMA handle"); 1646*906Sgm89044 dca_destroyreq(reqp); 1647*906Sgm89044 return (NULL); 1648*906Sgm89044 } 1649*906Sgm89044 1650*906Sgm89044 /* 1651*906Sgm89044 * for driver hardening, allocate in whole pages. 1652*906Sgm89044 */ 1653*906Sgm89044 size = ROUNDUP(MAXPACKET, dca->dca_pagesize); 1654*906Sgm89044 #if defined(i386) || defined(__i386) 1655*906Sgm89044 /* 1656*906Sgm89044 * Use kmem_alloc instead of ddi_dma_mem_alloc here since the latter 1657*906Sgm89044 * may fail on x86 platform if a physically contigous memory chunk 1658*906Sgm89044 * cannot be found. From initial testing, we did not see performance 1659*906Sgm89044 * degration as seen on Sparc. 1660*906Sgm89044 */ 1661*906Sgm89044 if ((reqp->dr_ibuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) { 1662*906Sgm89044 dca_error(dca, "unable to alloc request ibuf memory"); 1663*906Sgm89044 dca_destroyreq(reqp); 1664*906Sgm89044 return (NULL); 1665*906Sgm89044 } 1666*906Sgm89044 if ((reqp->dr_obuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) { 1667*906Sgm89044 dca_error(dca, "unable to alloc request obuf memory"); 1668*906Sgm89044 dca_destroyreq(reqp); 1669*906Sgm89044 return (NULL); 1670*906Sgm89044 } 1671*906Sgm89044 #else 1672*906Sgm89044 /* 1673*906Sgm89044 * We could kmem_alloc for sparc too. However, it gives worse 1674*906Sgm89044 * performance when transfering more than one page data. For example, 1675*906Sgm89044 * using 4 threads and 12032 byte data and 3DES on 900MHZ sparc system, 1676*906Sgm89044 * kmem_alloc uses 80% CPU and ddi_dma_mem_alloc uses 50% CPU for 1677*906Sgm89044 * the same throughput. 1678*906Sgm89044 */ 1679*906Sgm89044 rv = ddi_dma_mem_alloc(reqp->dr_ibuf_dmah, 1680*906Sgm89044 size, &dca_bufattr, 1681*906Sgm89044 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_kaddr, 1682*906Sgm89044 &size, &reqp->dr_ibuf_acch); 1683*906Sgm89044 if (rv != DDI_SUCCESS) { 1684*906Sgm89044 dca_error(dca, "unable to alloc request DMA memory"); 1685*906Sgm89044 dca_destroyreq(reqp); 1686*906Sgm89044 return (NULL); 1687*906Sgm89044 } 1688*906Sgm89044 1689*906Sgm89044 rv = ddi_dma_mem_alloc(reqp->dr_obuf_dmah, 1690*906Sgm89044 size, &dca_bufattr, 1691*906Sgm89044 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_kaddr, 1692*906Sgm89044 &size, &reqp->dr_obuf_acch); 1693*906Sgm89044 if (rv != DDI_SUCCESS) { 1694*906Sgm89044 dca_error(dca, "unable to alloc request DMA memory"); 1695*906Sgm89044 dca_destroyreq(reqp); 1696*906Sgm89044 return (NULL); 1697*906Sgm89044 } 1698*906Sgm89044 #endif 1699*906Sgm89044 1700*906Sgm89044 /* Skip the used portion in the context page */ 1701*906Sgm89044 reqp->dr_offset = CTX_MAXLENGTH; 1702*906Sgm89044 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset, 1703*906Sgm89044 reqp->dr_ibuf_kaddr, reqp->dr_ibuf_dmah, 1704*906Sgm89044 DDI_DMA_WRITE | DDI_DMA_STREAMING, 1705*906Sgm89044 &reqp->dr_ibuf_head, &n_chain)) != DDI_SUCCESS) { 1706*906Sgm89044 (void) dca_destroyreq(reqp); 1707*906Sgm89044 return (NULL); 1708*906Sgm89044 } 1709*906Sgm89044 reqp->dr_ibuf_paddr = reqp->dr_ibuf_head.dc_buffer_paddr; 1710*906Sgm89044 /* Skip the space used by the input buffer */ 1711*906Sgm89044 reqp->dr_offset += DESC_SIZE * n_chain; 1712*906Sgm89044 1713*906Sgm89044 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset, 1714*906Sgm89044 reqp->dr_obuf_kaddr, reqp->dr_obuf_dmah, 1715*906Sgm89044 DDI_DMA_READ | DDI_DMA_STREAMING, 1716*906Sgm89044 &reqp->dr_obuf_head, &n_chain)) != DDI_SUCCESS) { 1717*906Sgm89044 (void) dca_destroyreq(reqp); 1718*906Sgm89044 return (NULL); 1719*906Sgm89044 } 1720*906Sgm89044 reqp->dr_obuf_paddr = reqp->dr_obuf_head.dc_buffer_paddr; 1721*906Sgm89044 /* Skip the space used by the output buffer */ 1722*906Sgm89044 reqp->dr_offset += DESC_SIZE * n_chain; 1723*906Sgm89044 1724*906Sgm89044 DBG(dca, DCHATTY, "CTX is 0x%p, phys 0x%x, len %d", 1725*906Sgm89044 reqp->dr_ctx_kaddr, reqp->dr_ctx_paddr, CTX_MAXLENGTH); 1726*906Sgm89044 return (reqp); 1727*906Sgm89044 } 1728*906Sgm89044 1729*906Sgm89044 void 1730*906Sgm89044 dca_destroyreq(dca_request_t *reqp) 1731*906Sgm89044 { 1732*906Sgm89044 #if defined(i386) || defined(__i386) 1733*906Sgm89044 dca_t *dca = reqp->dr_dca; 1734*906Sgm89044 size_t size = ROUNDUP(MAXPACKET, dca->dca_pagesize); 1735*906Sgm89044 #endif 1736*906Sgm89044 1737*906Sgm89044 /* 1738*906Sgm89044 * Clean up DMA for the context structure. 1739*906Sgm89044 */ 1740*906Sgm89044 if (reqp->dr_ctx_paddr) { 1741*906Sgm89044 (void) ddi_dma_unbind_handle(reqp->dr_ctx_dmah); 1742*906Sgm89044 } 1743*906Sgm89044 1744*906Sgm89044 if (reqp->dr_ctx_acch) { 1745*906Sgm89044 ddi_dma_mem_free(&reqp->dr_ctx_acch); 1746*906Sgm89044 } 1747*906Sgm89044 1748*906Sgm89044 if (reqp->dr_ctx_dmah) { 1749*906Sgm89044 ddi_dma_free_handle(&reqp->dr_ctx_dmah); 1750*906Sgm89044 } 1751*906Sgm89044 1752*906Sgm89044 /* 1753*906Sgm89044 * Clean up DMA for the scratch buffer. 1754*906Sgm89044 */ 1755*906Sgm89044 #if defined(i386) || defined(__i386) 1756*906Sgm89044 if (reqp->dr_ibuf_dmah) { 1757*906Sgm89044 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah); 1758*906Sgm89044 ddi_dma_free_handle(&reqp->dr_ibuf_dmah); 1759*906Sgm89044 } 1760*906Sgm89044 if (reqp->dr_obuf_dmah) { 1761*906Sgm89044 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah); 1762*906Sgm89044 ddi_dma_free_handle(&reqp->dr_obuf_dmah); 1763*906Sgm89044 } 1764*906Sgm89044 1765*906Sgm89044 kmem_free(reqp->dr_ibuf_kaddr, size); 1766*906Sgm89044 kmem_free(reqp->dr_obuf_kaddr, size); 1767*906Sgm89044 #else 1768*906Sgm89044 if (reqp->dr_ibuf_paddr) { 1769*906Sgm89044 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah); 1770*906Sgm89044 } 1771*906Sgm89044 if (reqp->dr_obuf_paddr) { 1772*906Sgm89044 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah); 1773*906Sgm89044 } 1774*906Sgm89044 1775*906Sgm89044 if (reqp->dr_ibuf_acch) { 1776*906Sgm89044 ddi_dma_mem_free(&reqp->dr_ibuf_acch); 1777*906Sgm89044 } 1778*906Sgm89044 if (reqp->dr_obuf_acch) { 1779*906Sgm89044 ddi_dma_mem_free(&reqp->dr_obuf_acch); 1780*906Sgm89044 } 1781*906Sgm89044 1782*906Sgm89044 if (reqp->dr_ibuf_dmah) { 1783*906Sgm89044 ddi_dma_free_handle(&reqp->dr_ibuf_dmah); 1784*906Sgm89044 } 1785*906Sgm89044 if (reqp->dr_obuf_dmah) { 1786*906Sgm89044 ddi_dma_free_handle(&reqp->dr_obuf_dmah); 1787*906Sgm89044 } 1788*906Sgm89044 #endif 1789*906Sgm89044 /* 1790*906Sgm89044 * These two DMA handles should have been unbinded in 1791*906Sgm89044 * dca_unbindchains() function 1792*906Sgm89044 */ 1793*906Sgm89044 if (reqp->dr_chain_in_dmah) { 1794*906Sgm89044 ddi_dma_free_handle(&reqp->dr_chain_in_dmah); 1795*906Sgm89044 } 1796*906Sgm89044 if (reqp->dr_chain_out_dmah) { 1797*906Sgm89044 ddi_dma_free_handle(&reqp->dr_chain_out_dmah); 1798*906Sgm89044 } 1799*906Sgm89044 1800*906Sgm89044 kmem_free(reqp, sizeof (dca_request_t)); 1801*906Sgm89044 } 1802*906Sgm89044 1803*906Sgm89044 dca_work_t * 1804*906Sgm89044 dca_getwork(dca_t *dca, int mcr) 1805*906Sgm89044 { 1806*906Sgm89044 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1807*906Sgm89044 dca_work_t *workp; 1808*906Sgm89044 1809*906Sgm89044 ASSERT(mutex_owned(&wlp->dwl_lock)); 1810*906Sgm89044 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_freework); 1811*906Sgm89044 if (workp) { 1812*906Sgm89044 int nreqs; 1813*906Sgm89044 bzero(workp->dw_mcr_kaddr, 8); 1814*906Sgm89044 1815*906Sgm89044 /* clear out old requests */ 1816*906Sgm89044 for (nreqs = 0; nreqs < MAXREQSPERMCR; nreqs++) { 1817*906Sgm89044 workp->dw_reqs[nreqs] = NULL; 1818*906Sgm89044 } 1819*906Sgm89044 } 1820*906Sgm89044 return (workp); 1821*906Sgm89044 } 1822*906Sgm89044 1823*906Sgm89044 void 1824*906Sgm89044 dca_freework(dca_work_t *workp) 1825*906Sgm89044 { 1826*906Sgm89044 ASSERT(mutex_owned(&workp->dw_wlp->dwl_lock)); 1827*906Sgm89044 dca_enqueue(&workp->dw_wlp->dwl_freework, (dca_listnode_t *)workp); 1828*906Sgm89044 } 1829*906Sgm89044 1830*906Sgm89044 dca_request_t * 1831*906Sgm89044 dca_getreq(dca_t *dca, int mcr, int tryhard) 1832*906Sgm89044 { 1833*906Sgm89044 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1834*906Sgm89044 dca_request_t *reqp; 1835*906Sgm89044 1836*906Sgm89044 mutex_enter(&wlp->dwl_freereqslock); 1837*906Sgm89044 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_freereqs); 1838*906Sgm89044 mutex_exit(&wlp->dwl_freereqslock); 1839*906Sgm89044 if (reqp) { 1840*906Sgm89044 reqp->dr_flags = 0; 1841*906Sgm89044 reqp->dr_callback = NULL; 1842*906Sgm89044 } else if (tryhard) { 1843*906Sgm89044 /* 1844*906Sgm89044 * failed to get a free one, try an allocation, the hard way. 1845*906Sgm89044 * XXX: Kstat desired here. 1846*906Sgm89044 */ 1847*906Sgm89044 if ((reqp = dca_newreq(dca)) != NULL) { 1848*906Sgm89044 reqp->dr_wlp = wlp; 1849*906Sgm89044 reqp->dr_dca = dca; 1850*906Sgm89044 reqp->dr_flags = 0; 1851*906Sgm89044 reqp->dr_callback = NULL; 1852*906Sgm89044 } 1853*906Sgm89044 } 1854*906Sgm89044 return (reqp); 1855*906Sgm89044 } 1856*906Sgm89044 1857*906Sgm89044 void 1858*906Sgm89044 dca_freereq(dca_request_t *reqp) 1859*906Sgm89044 { 1860*906Sgm89044 reqp->dr_kcf_req = NULL; 1861*906Sgm89044 if (!(reqp->dr_flags & DR_NOCACHE)) { 1862*906Sgm89044 mutex_enter(&reqp->dr_wlp->dwl_freereqslock); 1863*906Sgm89044 dca_enqueue(&reqp->dr_wlp->dwl_freereqs, 1864*906Sgm89044 (dca_listnode_t *)reqp); 1865*906Sgm89044 mutex_exit(&reqp->dr_wlp->dwl_freereqslock); 1866*906Sgm89044 } 1867*906Sgm89044 } 1868*906Sgm89044 1869*906Sgm89044 /* 1870*906Sgm89044 * Binds user buffers to DMA handles dynamically. On Sparc, a user buffer 1871*906Sgm89044 * is mapped to a single physicall address. On x86, a user buffer is mapped 1872*906Sgm89044 * to multiple physically addresses. These phsyical addresses are chained 1873*906Sgm89044 * using the method specified in Broadcom BCM5820 specification 1874*906Sgm89044 */ 1875*906Sgm89044 int 1876*906Sgm89044 dca_bindchains(dca_request_t *reqp, size_t incnt, size_t outcnt) 1877*906Sgm89044 { 1878*906Sgm89044 int rv; 1879*906Sgm89044 caddr_t kaddr; 1880*906Sgm89044 uint_t flags; 1881*906Sgm89044 int n_chain = 0; 1882*906Sgm89044 1883*906Sgm89044 if (reqp->dr_flags & DR_INPLACE) { 1884*906Sgm89044 flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT; 1885*906Sgm89044 } else { 1886*906Sgm89044 flags = DDI_DMA_WRITE | DDI_DMA_STREAMING; 1887*906Sgm89044 } 1888*906Sgm89044 1889*906Sgm89044 /* first the input */ 1890*906Sgm89044 if (incnt) { 1891*906Sgm89044 if ((kaddr = dca_bufdaddr(reqp->dr_in)) == NULL) { 1892*906Sgm89044 DBG(NULL, DWARN, "unrecognised crypto data format"); 1893*906Sgm89044 return (DDI_FAILURE); 1894*906Sgm89044 } 1895*906Sgm89044 if ((rv = dca_bindchains_one(reqp, incnt, reqp->dr_offset, 1896*906Sgm89044 kaddr, reqp->dr_chain_in_dmah, flags, 1897*906Sgm89044 &reqp->dr_chain_in_head, &n_chain)) != DDI_SUCCESS) { 1898*906Sgm89044 (void) dca_unbindchains(reqp); 1899*906Sgm89044 return (rv); 1900*906Sgm89044 } 1901*906Sgm89044 1902*906Sgm89044 /* 1903*906Sgm89044 * The offset and length are altered by the calling routine 1904*906Sgm89044 * reqp->dr_in->cd_offset += incnt; 1905*906Sgm89044 * reqp->dr_in->cd_length -= incnt; 1906*906Sgm89044 */ 1907*906Sgm89044 /* Save the first one in the chain for MCR */ 1908*906Sgm89044 reqp->dr_in_paddr = reqp->dr_chain_in_head.dc_buffer_paddr; 1909*906Sgm89044 reqp->dr_in_next = reqp->dr_chain_in_head.dc_next_paddr; 1910*906Sgm89044 reqp->dr_in_len = reqp->dr_chain_in_head.dc_buffer_length; 1911*906Sgm89044 } else { 1912*906Sgm89044 reqp->dr_in_paddr = NULL; 1913*906Sgm89044 reqp->dr_in_next = 0; 1914*906Sgm89044 reqp->dr_in_len = 0; 1915*906Sgm89044 } 1916*906Sgm89044 1917*906Sgm89044 if (reqp->dr_flags & DR_INPLACE) { 1918*906Sgm89044 reqp->dr_out_paddr = reqp->dr_in_paddr; 1919*906Sgm89044 reqp->dr_out_len = reqp->dr_in_len; 1920*906Sgm89044 reqp->dr_out_next = reqp->dr_in_next; 1921*906Sgm89044 return (DDI_SUCCESS); 1922*906Sgm89044 } 1923*906Sgm89044 1924*906Sgm89044 /* then the output */ 1925*906Sgm89044 if (outcnt) { 1926*906Sgm89044 flags = DDI_DMA_READ | DDI_DMA_STREAMING; 1927*906Sgm89044 if ((kaddr = dca_bufdaddr_out(reqp->dr_out)) == NULL) { 1928*906Sgm89044 DBG(NULL, DWARN, "unrecognised crypto data format"); 1929*906Sgm89044 (void) dca_unbindchains(reqp); 1930*906Sgm89044 return (DDI_FAILURE); 1931*906Sgm89044 } 1932*906Sgm89044 rv = dca_bindchains_one(reqp, outcnt, reqp->dr_offset + 1933*906Sgm89044 n_chain * DESC_SIZE, kaddr, reqp->dr_chain_out_dmah, 1934*906Sgm89044 flags, &reqp->dr_chain_out_head, &n_chain); 1935*906Sgm89044 if (rv != DDI_SUCCESS) { 1936*906Sgm89044 (void) dca_unbindchains(reqp); 1937*906Sgm89044 return (DDI_FAILURE); 1938*906Sgm89044 } 1939*906Sgm89044 1940*906Sgm89044 /* Save the first one in the chain for MCR */ 1941*906Sgm89044 reqp->dr_out_paddr = reqp->dr_chain_out_head.dc_buffer_paddr; 1942*906Sgm89044 reqp->dr_out_next = reqp->dr_chain_out_head.dc_next_paddr; 1943*906Sgm89044 reqp->dr_out_len = reqp->dr_chain_out_head.dc_buffer_length; 1944*906Sgm89044 } else { 1945*906Sgm89044 reqp->dr_out_paddr = NULL; 1946*906Sgm89044 reqp->dr_out_next = 0; 1947*906Sgm89044 reqp->dr_out_len = 0; 1948*906Sgm89044 } 1949*906Sgm89044 1950*906Sgm89044 return (DDI_SUCCESS); 1951*906Sgm89044 } 1952*906Sgm89044 1953*906Sgm89044 /* 1954*906Sgm89044 * Unbind the user buffers from the DMA handles. 1955*906Sgm89044 */ 1956*906Sgm89044 int 1957*906Sgm89044 dca_unbindchains(dca_request_t *reqp) 1958*906Sgm89044 { 1959*906Sgm89044 int rv = DDI_SUCCESS; 1960*906Sgm89044 int rv1 = DDI_SUCCESS; 1961*906Sgm89044 1962*906Sgm89044 /* Clear the input chain */ 1963*906Sgm89044 if (reqp->dr_chain_in_head.dc_buffer_paddr != NULL) { 1964*906Sgm89044 (void) ddi_dma_unbind_handle(reqp->dr_chain_in_dmah); 1965*906Sgm89044 reqp->dr_chain_in_head.dc_buffer_paddr = 0; 1966*906Sgm89044 } 1967*906Sgm89044 1968*906Sgm89044 /* Clear the output chain */ 1969*906Sgm89044 if (reqp->dr_chain_out_head.dc_buffer_paddr != NULL) { 1970*906Sgm89044 (void) ddi_dma_unbind_handle(reqp->dr_chain_out_dmah); 1971*906Sgm89044 reqp->dr_chain_out_head.dc_buffer_paddr = 0; 1972*906Sgm89044 } 1973*906Sgm89044 1974*906Sgm89044 return ((rv != DDI_SUCCESS)? rv : rv1); 1975*906Sgm89044 } 1976*906Sgm89044 1977*906Sgm89044 /* 1978*906Sgm89044 * Build either input chain or output chain. It is single-item chain for Sparc, 1979*906Sgm89044 * and possible mutiple-item chain for x86. 1980*906Sgm89044 */ 1981*906Sgm89044 static int 1982*906Sgm89044 dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset, 1983*906Sgm89044 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags, 1984*906Sgm89044 dca_chain_t *head, int *n_chain) 1985*906Sgm89044 { 1986*906Sgm89044 ddi_dma_cookie_t c; 1987*906Sgm89044 uint_t nc; 1988*906Sgm89044 int rv; 1989*906Sgm89044 caddr_t chain_kaddr_pre; 1990*906Sgm89044 caddr_t chain_kaddr; 1991*906Sgm89044 uint32_t chain_paddr; 1992*906Sgm89044 int i; 1993*906Sgm89044 1994*906Sgm89044 /* Advance past the context structure to the starting address */ 1995*906Sgm89044 chain_paddr = reqp->dr_ctx_paddr + dr_offset; 1996*906Sgm89044 chain_kaddr = reqp->dr_ctx_kaddr + dr_offset; 1997*906Sgm89044 1998*906Sgm89044 /* 1999*906Sgm89044 * Bind the kernel address to the DMA handle. On x86, the actual 2000*906Sgm89044 * buffer is mapped into multiple physical addresses. On Sparc, 2001*906Sgm89044 * the actual buffer is mapped into a single address. 2002*906Sgm89044 */ 2003*906Sgm89044 rv = ddi_dma_addr_bind_handle(handle, 2004*906Sgm89044 NULL, kaddr, cnt, flags, DDI_DMA_DONTWAIT, NULL, &c, &nc); 2005*906Sgm89044 if (rv != DDI_DMA_MAPPED) { 2006*906Sgm89044 return (DDI_FAILURE); 2007*906Sgm89044 } 2008*906Sgm89044 2009*906Sgm89044 (void) ddi_dma_sync(handle, 0, cnt, DDI_DMA_SYNC_FORDEV); 2010*906Sgm89044 if ((rv = dca_check_dma_handle(reqp->dr_dca, handle, 2011*906Sgm89044 DCA_FM_ECLASS_NONE)) != DDI_SUCCESS) { 2012*906Sgm89044 reqp->destroy = TRUE; 2013*906Sgm89044 return (rv); 2014*906Sgm89044 } 2015*906Sgm89044 2016*906Sgm89044 *n_chain = nc; 2017*906Sgm89044 2018*906Sgm89044 /* Setup the data buffer chain for DMA transfer */ 2019*906Sgm89044 chain_kaddr_pre = NULL; 2020*906Sgm89044 head->dc_buffer_paddr = 0; 2021*906Sgm89044 head->dc_next_paddr = 0; 2022*906Sgm89044 head->dc_buffer_length = 0; 2023*906Sgm89044 for (i = 0; i < nc; i++) { 2024*906Sgm89044 /* PIO */ 2025*906Sgm89044 PUTDESC32(reqp, chain_kaddr, DESC_BUFADDR, c.dmac_address); 2026*906Sgm89044 PUTDESC16(reqp, chain_kaddr, DESC_RSVD, 0); 2027*906Sgm89044 PUTDESC16(reqp, chain_kaddr, DESC_LENGTH, c.dmac_size); 2028*906Sgm89044 2029*906Sgm89044 /* Remember the head of the chain */ 2030*906Sgm89044 if (head->dc_buffer_paddr == 0) { 2031*906Sgm89044 head->dc_buffer_paddr = c.dmac_address; 2032*906Sgm89044 head->dc_buffer_length = c.dmac_size; 2033*906Sgm89044 } 2034*906Sgm89044 2035*906Sgm89044 /* Link to the previous one if one exists */ 2036*906Sgm89044 if (chain_kaddr_pre) { 2037*906Sgm89044 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 2038*906Sgm89044 chain_paddr); 2039*906Sgm89044 if (head->dc_next_paddr == 0) 2040*906Sgm89044 head->dc_next_paddr = chain_paddr; 2041*906Sgm89044 } 2042*906Sgm89044 chain_kaddr_pre = chain_kaddr; 2043*906Sgm89044 2044*906Sgm89044 /* Maintain pointers */ 2045*906Sgm89044 chain_paddr += DESC_SIZE; 2046*906Sgm89044 chain_kaddr += DESC_SIZE; 2047*906Sgm89044 2048*906Sgm89044 /* Retrieve the next cookie if there is one */ 2049*906Sgm89044 if (i < nc-1) 2050*906Sgm89044 ddi_dma_nextcookie(handle, &c); 2051*906Sgm89044 } 2052*906Sgm89044 2053*906Sgm89044 /* Set the next pointer in the last entry to NULL */ 2054*906Sgm89044 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 0); 2055*906Sgm89044 2056*906Sgm89044 return (DDI_SUCCESS); 2057*906Sgm89044 } 2058*906Sgm89044 2059*906Sgm89044 /* 2060*906Sgm89044 * Schedule some work. 2061*906Sgm89044 */ 2062*906Sgm89044 int 2063*906Sgm89044 dca_start(dca_t *dca, dca_request_t *reqp, int mcr, int dosched) 2064*906Sgm89044 { 2065*906Sgm89044 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2066*906Sgm89044 2067*906Sgm89044 mutex_enter(&wlp->dwl_lock); 2068*906Sgm89044 2069*906Sgm89044 DBG(dca, DCHATTY, "req=%p, in=%p, out=%p, ctx=%p, ibuf=%p, obuf=%p", 2070*906Sgm89044 reqp, reqp->dr_in, reqp->dr_out, reqp->dr_ctx_kaddr, 2071*906Sgm89044 reqp->dr_ibuf_kaddr, reqp->dr_obuf_kaddr); 2072*906Sgm89044 DBG(dca, DCHATTY, "ctx paddr = %x, ibuf paddr = %x, obuf paddr = %x", 2073*906Sgm89044 reqp->dr_ctx_paddr, reqp->dr_ibuf_paddr, reqp->dr_obuf_paddr); 2074*906Sgm89044 /* sync out the entire context and descriptor chains */ 2075*906Sgm89044 (void) ddi_dma_sync(reqp->dr_ctx_dmah, 0, 0, DDI_DMA_SYNC_FORDEV); 2076*906Sgm89044 if (dca_check_dma_handle(dca, reqp->dr_ctx_dmah, 2077*906Sgm89044 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2078*906Sgm89044 reqp->destroy = TRUE; 2079*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2080*906Sgm89044 return (CRYPTO_DEVICE_ERROR); 2081*906Sgm89044 } 2082*906Sgm89044 2083*906Sgm89044 dca_enqueue(&wlp->dwl_waitq, (dca_listnode_t *)reqp); 2084*906Sgm89044 wlp->dwl_count++; 2085*906Sgm89044 wlp->dwl_lastsubmit = ddi_get_lbolt(); 2086*906Sgm89044 reqp->dr_wlp = wlp; 2087*906Sgm89044 2088*906Sgm89044 if ((wlp->dwl_count == wlp->dwl_hiwater) && (wlp->dwl_busy == 0)) { 2089*906Sgm89044 /* we are fully loaded now, let kCF know */ 2090*906Sgm89044 2091*906Sgm89044 wlp->dwl_flowctl++; 2092*906Sgm89044 wlp->dwl_busy = 1; 2093*906Sgm89044 2094*906Sgm89044 crypto_prov_notify(wlp->dwl_prov, CRYPTO_PROVIDER_BUSY); 2095*906Sgm89044 } 2096*906Sgm89044 2097*906Sgm89044 if (dosched) { 2098*906Sgm89044 #ifdef SCHEDDELAY 2099*906Sgm89044 /* possibly wait for more work to arrive */ 2100*906Sgm89044 if (wlp->dwl_count >= wlp->dwl_reqspermcr) { 2101*906Sgm89044 dca_schedule(dca, mcr); 2102*906Sgm89044 } else if (!wlp->dwl_schedtid) { 2103*906Sgm89044 /* wait 1 msec for more work before doing it */ 2104*906Sgm89044 wlp->dwl_schedtid = timeout(dca_schedtimeout, 2105*906Sgm89044 (void *)wlp, drv_usectohz(MSEC)); 2106*906Sgm89044 } 2107*906Sgm89044 #else 2108*906Sgm89044 dca_schedule(dca, mcr); 2109*906Sgm89044 #endif 2110*906Sgm89044 } 2111*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2112*906Sgm89044 2113*906Sgm89044 return (CRYPTO_QUEUED); 2114*906Sgm89044 } 2115*906Sgm89044 2116*906Sgm89044 void 2117*906Sgm89044 dca_schedule(dca_t *dca, int mcr) 2118*906Sgm89044 { 2119*906Sgm89044 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2120*906Sgm89044 int csr; 2121*906Sgm89044 int full; 2122*906Sgm89044 uint32_t status; 2123*906Sgm89044 2124*906Sgm89044 ASSERT(mutex_owned(&wlp->dwl_lock)); 2125*906Sgm89044 /* 2126*906Sgm89044 * If the card is draining or has an outstanding failure, 2127*906Sgm89044 * don't schedule any more work on it right now 2128*906Sgm89044 */ 2129*906Sgm89044 if (wlp->dwl_drain || (dca->dca_flags & DCA_FAILED)) { 2130*906Sgm89044 return; 2131*906Sgm89044 } 2132*906Sgm89044 2133*906Sgm89044 if (mcr == MCR2) { 2134*906Sgm89044 csr = CSR_MCR2; 2135*906Sgm89044 full = DMASTAT_MCR2FULL; 2136*906Sgm89044 } else { 2137*906Sgm89044 csr = CSR_MCR1; 2138*906Sgm89044 full = DMASTAT_MCR1FULL; 2139*906Sgm89044 } 2140*906Sgm89044 2141*906Sgm89044 for (;;) { 2142*906Sgm89044 dca_work_t *workp; 2143*906Sgm89044 uint32_t offset; 2144*906Sgm89044 int nreqs; 2145*906Sgm89044 2146*906Sgm89044 status = GETCSR(dca, CSR_DMASTAT); 2147*906Sgm89044 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 2148*906Sgm89044 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 2149*906Sgm89044 return; 2150*906Sgm89044 2151*906Sgm89044 if ((status & full) != 0) 2152*906Sgm89044 break; 2153*906Sgm89044 2154*906Sgm89044 #ifdef SCHEDDELAY 2155*906Sgm89044 /* if there isn't enough to do, don't bother now */ 2156*906Sgm89044 if ((wlp->dwl_count < wlp->dwl_reqspermcr) && 2157*906Sgm89044 (ddi_get_lbolt() < (wlp->dwl_lastsubmit + 2158*906Sgm89044 drv_usectohz(MSEC)))) { 2159*906Sgm89044 /* wait a bit longer... */ 2160*906Sgm89044 if (wlp->dwl_schedtid == 0) { 2161*906Sgm89044 wlp->dwl_schedtid = timeout(dca_schedtimeout, 2162*906Sgm89044 (void *)wlp, drv_usectohz(MSEC)); 2163*906Sgm89044 } 2164*906Sgm89044 return; 2165*906Sgm89044 } 2166*906Sgm89044 #endif 2167*906Sgm89044 2168*906Sgm89044 /* grab a work structure */ 2169*906Sgm89044 workp = dca_getwork(dca, mcr); 2170*906Sgm89044 2171*906Sgm89044 if (workp == NULL) { 2172*906Sgm89044 /* 2173*906Sgm89044 * There must be work ready to be reclaimed, 2174*906Sgm89044 * in this case, since the chip can only hold 2175*906Sgm89044 * less work outstanding than there are total. 2176*906Sgm89044 */ 2177*906Sgm89044 dca_reclaim(dca, mcr); 2178*906Sgm89044 continue; 2179*906Sgm89044 } 2180*906Sgm89044 2181*906Sgm89044 nreqs = 0; 2182*906Sgm89044 offset = MCR_CTXADDR; 2183*906Sgm89044 2184*906Sgm89044 while (nreqs < wlp->dwl_reqspermcr) { 2185*906Sgm89044 dca_request_t *reqp; 2186*906Sgm89044 2187*906Sgm89044 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_waitq); 2188*906Sgm89044 if (reqp == NULL) { 2189*906Sgm89044 /* nothing left to process */ 2190*906Sgm89044 break; 2191*906Sgm89044 } 2192*906Sgm89044 /* 2193*906Sgm89044 * Update flow control. 2194*906Sgm89044 */ 2195*906Sgm89044 wlp->dwl_count--; 2196*906Sgm89044 if ((wlp->dwl_count == wlp->dwl_lowater) && 2197*906Sgm89044 (wlp->dwl_busy)) { 2198*906Sgm89044 wlp->dwl_busy = 0; 2199*906Sgm89044 crypto_prov_notify(wlp->dwl_prov, 2200*906Sgm89044 CRYPTO_PROVIDER_READY); 2201*906Sgm89044 } 2202*906Sgm89044 2203*906Sgm89044 /* 2204*906Sgm89044 * Context address. 2205*906Sgm89044 */ 2206*906Sgm89044 PUTMCR32(workp, offset, reqp->dr_ctx_paddr); 2207*906Sgm89044 offset += 4; 2208*906Sgm89044 2209*906Sgm89044 /* 2210*906Sgm89044 * Input chain. 2211*906Sgm89044 */ 2212*906Sgm89044 /* input buffer address */ 2213*906Sgm89044 PUTMCR32(workp, offset, reqp->dr_in_paddr); 2214*906Sgm89044 offset += 4; 2215*906Sgm89044 /* next input buffer entry */ 2216*906Sgm89044 PUTMCR32(workp, offset, reqp->dr_in_next); 2217*906Sgm89044 offset += 4; 2218*906Sgm89044 /* input buffer length */ 2219*906Sgm89044 PUTMCR16(workp, offset, reqp->dr_in_len); 2220*906Sgm89044 offset += 2; 2221*906Sgm89044 /* zero the reserved field */ 2222*906Sgm89044 PUTMCR16(workp, offset, 0); 2223*906Sgm89044 offset += 2; 2224*906Sgm89044 2225*906Sgm89044 /* 2226*906Sgm89044 * Overall length. 2227*906Sgm89044 */ 2228*906Sgm89044 /* reserved field */ 2229*906Sgm89044 PUTMCR16(workp, offset, 0); 2230*906Sgm89044 offset += 2; 2231*906Sgm89044 /* total packet length */ 2232*906Sgm89044 PUTMCR16(workp, offset, reqp->dr_pkt_length); 2233*906Sgm89044 offset += 2; 2234*906Sgm89044 2235*906Sgm89044 /* 2236*906Sgm89044 * Output chain. 2237*906Sgm89044 */ 2238*906Sgm89044 /* output buffer address */ 2239*906Sgm89044 PUTMCR32(workp, offset, reqp->dr_out_paddr); 2240*906Sgm89044 offset += 4; 2241*906Sgm89044 /* next output buffer entry */ 2242*906Sgm89044 PUTMCR32(workp, offset, reqp->dr_out_next); 2243*906Sgm89044 offset += 4; 2244*906Sgm89044 /* output buffer length */ 2245*906Sgm89044 PUTMCR16(workp, offset, reqp->dr_out_len); 2246*906Sgm89044 offset += 2; 2247*906Sgm89044 /* zero the reserved field */ 2248*906Sgm89044 PUTMCR16(workp, offset, 0); 2249*906Sgm89044 offset += 2; 2250*906Sgm89044 2251*906Sgm89044 /* 2252*906Sgm89044 * Note submission. 2253*906Sgm89044 */ 2254*906Sgm89044 workp->dw_reqs[nreqs] = reqp; 2255*906Sgm89044 nreqs++; 2256*906Sgm89044 } 2257*906Sgm89044 2258*906Sgm89044 if (nreqs == 0) { 2259*906Sgm89044 /* nothing in the queue! */ 2260*906Sgm89044 dca_freework(workp); 2261*906Sgm89044 return; 2262*906Sgm89044 } 2263*906Sgm89044 2264*906Sgm89044 wlp->dwl_submit++; 2265*906Sgm89044 2266*906Sgm89044 PUTMCR16(workp, MCR_FLAGS, 0); 2267*906Sgm89044 PUTMCR16(workp, MCR_COUNT, nreqs); 2268*906Sgm89044 2269*906Sgm89044 DBG(dca, DCHATTY, 2270*906Sgm89044 "posting work (phys %x, virt 0x%p) (%d reqs) to MCR%d", 2271*906Sgm89044 workp->dw_mcr_paddr, workp->dw_mcr_kaddr, 2272*906Sgm89044 nreqs, mcr); 2273*906Sgm89044 2274*906Sgm89044 workp->dw_lbolt = ddi_get_lbolt(); 2275*906Sgm89044 /* Make sure MCR is synced out to device. */ 2276*906Sgm89044 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 0, 2277*906Sgm89044 DDI_DMA_SYNC_FORDEV); 2278*906Sgm89044 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah, 2279*906Sgm89044 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2280*906Sgm89044 dca_destroywork(workp); 2281*906Sgm89044 return; 2282*906Sgm89044 } 2283*906Sgm89044 2284*906Sgm89044 PUTCSR(dca, csr, workp->dw_mcr_paddr); 2285*906Sgm89044 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 2286*906Sgm89044 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2287*906Sgm89044 dca_destroywork(workp); 2288*906Sgm89044 return; 2289*906Sgm89044 } else { 2290*906Sgm89044 dca_enqueue(&wlp->dwl_runq, (dca_listnode_t *)workp); 2291*906Sgm89044 } 2292*906Sgm89044 2293*906Sgm89044 DBG(dca, DCHATTY, "posted"); 2294*906Sgm89044 } 2295*906Sgm89044 } 2296*906Sgm89044 2297*906Sgm89044 /* 2298*906Sgm89044 * Reclaim completed work, called in interrupt context. 2299*906Sgm89044 */ 2300*906Sgm89044 void 2301*906Sgm89044 dca_reclaim(dca_t *dca, int mcr) 2302*906Sgm89044 { 2303*906Sgm89044 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2304*906Sgm89044 dca_work_t *workp; 2305*906Sgm89044 ushort_t flags; 2306*906Sgm89044 int nreclaimed = 0; 2307*906Sgm89044 int i; 2308*906Sgm89044 2309*906Sgm89044 DBG(dca, DRECLAIM, "worklist = 0x%p (MCR%d)", wlp, mcr); 2310*906Sgm89044 ASSERT(mutex_owned(&wlp->dwl_lock)); 2311*906Sgm89044 /* 2312*906Sgm89044 * For each MCR in the submitted (runq), we check to see if 2313*906Sgm89044 * it has been processed. If so, then we note each individual 2314*906Sgm89044 * job in the MCR, and and do the completion processing for 2315*906Sgm89044 * each of such job. 2316*906Sgm89044 */ 2317*906Sgm89044 for (;;) { 2318*906Sgm89044 2319*906Sgm89044 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq); 2320*906Sgm89044 if (workp == NULL) { 2321*906Sgm89044 break; 2322*906Sgm89044 } 2323*906Sgm89044 2324*906Sgm89044 /* only sync the MCR flags, since that's all we need */ 2325*906Sgm89044 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 4, 2326*906Sgm89044 DDI_DMA_SYNC_FORKERNEL); 2327*906Sgm89044 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah, 2328*906Sgm89044 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2329*906Sgm89044 dca_rmqueue((dca_listnode_t *)workp); 2330*906Sgm89044 dca_destroywork(workp); 2331*906Sgm89044 return; 2332*906Sgm89044 } 2333*906Sgm89044 2334*906Sgm89044 flags = GETMCR16(workp, MCR_FLAGS); 2335*906Sgm89044 if ((flags & MCRFLAG_FINISHED) == 0) { 2336*906Sgm89044 /* chip is still working on it */ 2337*906Sgm89044 DBG(dca, DRECLAIM, 2338*906Sgm89044 "chip still working on it (MCR%d)", mcr); 2339*906Sgm89044 break; 2340*906Sgm89044 } 2341*906Sgm89044 2342*906Sgm89044 /* its really for us, so remove it from the queue */ 2343*906Sgm89044 dca_rmqueue((dca_listnode_t *)workp); 2344*906Sgm89044 2345*906Sgm89044 /* if we were draining, signal on the cv */ 2346*906Sgm89044 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) { 2347*906Sgm89044 cv_signal(&wlp->dwl_cv); 2348*906Sgm89044 } 2349*906Sgm89044 2350*906Sgm89044 /* update statistics, done under the lock */ 2351*906Sgm89044 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2352*906Sgm89044 dca_request_t *reqp = workp->dw_reqs[i]; 2353*906Sgm89044 if (reqp == NULL) { 2354*906Sgm89044 continue; 2355*906Sgm89044 } 2356*906Sgm89044 if (reqp->dr_byte_stat >= 0) { 2357*906Sgm89044 dca->dca_stats[reqp->dr_byte_stat] += 2358*906Sgm89044 reqp->dr_pkt_length; 2359*906Sgm89044 } 2360*906Sgm89044 if (reqp->dr_job_stat >= 0) { 2361*906Sgm89044 dca->dca_stats[reqp->dr_job_stat]++; 2362*906Sgm89044 } 2363*906Sgm89044 } 2364*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2365*906Sgm89044 2366*906Sgm89044 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2367*906Sgm89044 dca_request_t *reqp = workp->dw_reqs[i]; 2368*906Sgm89044 2369*906Sgm89044 if (reqp == NULL) { 2370*906Sgm89044 continue; 2371*906Sgm89044 } 2372*906Sgm89044 2373*906Sgm89044 /* Do the callback. */ 2374*906Sgm89044 workp->dw_reqs[i] = NULL; 2375*906Sgm89044 dca_done(reqp, CRYPTO_SUCCESS); 2376*906Sgm89044 2377*906Sgm89044 nreclaimed++; 2378*906Sgm89044 } 2379*906Sgm89044 2380*906Sgm89044 mutex_enter(&wlp->dwl_lock); 2381*906Sgm89044 2382*906Sgm89044 /* now we can release the work */ 2383*906Sgm89044 dca_freework(workp); 2384*906Sgm89044 } 2385*906Sgm89044 DBG(dca, DRECLAIM, "reclaimed %d cmds", nreclaimed); 2386*906Sgm89044 } 2387*906Sgm89044 2388*906Sgm89044 int 2389*906Sgm89044 dca_length(crypto_data_t *cdata) 2390*906Sgm89044 { 2391*906Sgm89044 return (cdata->cd_length); 2392*906Sgm89044 } 2393*906Sgm89044 2394*906Sgm89044 /* 2395*906Sgm89044 * This is the callback function called from the interrupt when a kCF job 2396*906Sgm89044 * completes. It does some driver-specific things, and then calls the 2397*906Sgm89044 * kCF-provided callback. Finally, it cleans up the state for the work 2398*906Sgm89044 * request and drops the reference count to allow for DR. 2399*906Sgm89044 */ 2400*906Sgm89044 void 2401*906Sgm89044 dca_done(dca_request_t *reqp, int err) 2402*906Sgm89044 { 2403*906Sgm89044 uint64_t ena = 0; 2404*906Sgm89044 2405*906Sgm89044 /* unbind any chains we were using */ 2406*906Sgm89044 if (dca_unbindchains(reqp) != DDI_SUCCESS) { 2407*906Sgm89044 /* DMA failure */ 2408*906Sgm89044 ena = dca_ena(ena); 2409*906Sgm89044 dca_failure(reqp->dr_dca, DDI_DATAPATH_FAULT, 2410*906Sgm89044 DCA_FM_ECLASS_NONE, ena, CRYPTO_DEVICE_ERROR, 2411*906Sgm89044 "fault on buffer DMA handle"); 2412*906Sgm89044 if (err == CRYPTO_SUCCESS) { 2413*906Sgm89044 err = CRYPTO_DEVICE_ERROR; 2414*906Sgm89044 } 2415*906Sgm89044 } 2416*906Sgm89044 2417*906Sgm89044 if (reqp->dr_callback != NULL) { 2418*906Sgm89044 reqp->dr_callback(reqp, err); 2419*906Sgm89044 } else { 2420*906Sgm89044 dca_freereq(reqp); 2421*906Sgm89044 } 2422*906Sgm89044 } 2423*906Sgm89044 2424*906Sgm89044 /* 2425*906Sgm89044 * Call this when a failure is detected. It will reset the chip, 2426*906Sgm89044 * log a message, alert kCF, and mark jobs in the runq as failed. 2427*906Sgm89044 */ 2428*906Sgm89044 /* ARGSUSED */ 2429*906Sgm89044 void 2430*906Sgm89044 dca_failure(dca_t *dca, ddi_fault_location_t loc, dca_fma_eclass_t index, 2431*906Sgm89044 uint64_t ena, int errno, char *mess, ...) 2432*906Sgm89044 { 2433*906Sgm89044 va_list ap; 2434*906Sgm89044 char buf[256]; 2435*906Sgm89044 int mcr; 2436*906Sgm89044 char *eclass; 2437*906Sgm89044 int have_mutex; 2438*906Sgm89044 2439*906Sgm89044 va_start(ap, mess); 2440*906Sgm89044 (void) vsprintf(buf, mess, ap); 2441*906Sgm89044 va_end(ap); 2442*906Sgm89044 2443*906Sgm89044 eclass = dca_fma_eclass_string(dca->dca_model, index); 2444*906Sgm89044 2445*906Sgm89044 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) && 2446*906Sgm89044 index != DCA_FM_ECLASS_NONE) { 2447*906Sgm89044 ddi_fm_ereport_post(dca->dca_dip, eclass, ena, 2448*906Sgm89044 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 2449*906Sgm89044 FM_EREPORT_VERS0, NULL); 2450*906Sgm89044 2451*906Sgm89044 /* Report the impact of the failure to the DDI. */ 2452*906Sgm89044 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_LOST); 2453*906Sgm89044 } else { 2454*906Sgm89044 /* Just log the error string to the message log */ 2455*906Sgm89044 dca_error(dca, buf); 2456*906Sgm89044 } 2457*906Sgm89044 2458*906Sgm89044 /* 2459*906Sgm89044 * Indicate a failure (keeps schedule from running). 2460*906Sgm89044 */ 2461*906Sgm89044 dca->dca_flags |= DCA_FAILED; 2462*906Sgm89044 2463*906Sgm89044 /* 2464*906Sgm89044 * Reset the chip. This should also have as a side effect, the 2465*906Sgm89044 * disabling of all interrupts from the device. 2466*906Sgm89044 */ 2467*906Sgm89044 (void) dca_reset(dca, 1); 2468*906Sgm89044 2469*906Sgm89044 /* 2470*906Sgm89044 * Report the failure to kCF. 2471*906Sgm89044 */ 2472*906Sgm89044 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2473*906Sgm89044 if (WORKLIST(dca, mcr)->dwl_prov) { 2474*906Sgm89044 crypto_prov_notify(WORKLIST(dca, mcr)->dwl_prov, 2475*906Sgm89044 CRYPTO_PROVIDER_FAILED); 2476*906Sgm89044 } 2477*906Sgm89044 } 2478*906Sgm89044 2479*906Sgm89044 /* 2480*906Sgm89044 * Return jobs not sent to hardware back to kCF. 2481*906Sgm89044 */ 2482*906Sgm89044 dca_rejectjobs(dca); 2483*906Sgm89044 2484*906Sgm89044 /* 2485*906Sgm89044 * From this point on, no new work should be arriving, and the 2486*906Sgm89044 * chip should not be doing any active DMA. 2487*906Sgm89044 */ 2488*906Sgm89044 2489*906Sgm89044 /* 2490*906Sgm89044 * Now find all the work submitted to the device and fail 2491*906Sgm89044 * them. 2492*906Sgm89044 */ 2493*906Sgm89044 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2494*906Sgm89044 dca_worklist_t *wlp; 2495*906Sgm89044 int i; 2496*906Sgm89044 2497*906Sgm89044 wlp = WORKLIST(dca, mcr); 2498*906Sgm89044 2499*906Sgm89044 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) { 2500*906Sgm89044 continue; 2501*906Sgm89044 } 2502*906Sgm89044 for (;;) { 2503*906Sgm89044 dca_work_t *workp; 2504*906Sgm89044 2505*906Sgm89044 have_mutex = mutex_tryenter(&wlp->dwl_lock); 2506*906Sgm89044 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_runq); 2507*906Sgm89044 if (workp == NULL) { 2508*906Sgm89044 if (have_mutex) 2509*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2510*906Sgm89044 break; 2511*906Sgm89044 } 2512*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2513*906Sgm89044 2514*906Sgm89044 /* 2515*906Sgm89044 * Free up requests 2516*906Sgm89044 */ 2517*906Sgm89044 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2518*906Sgm89044 dca_request_t *reqp = workp->dw_reqs[i]; 2519*906Sgm89044 if (reqp) { 2520*906Sgm89044 if (reqp->dr_flags & DR_INPLACE) { 2521*906Sgm89044 dca_done(reqp, errno); 2522*906Sgm89044 } else { 2523*906Sgm89044 /* 2524*906Sgm89044 * cause it to get retried 2525*906Sgm89044 * elsewhere (software) 2526*906Sgm89044 */ 2527*906Sgm89044 dca_done(reqp, CRYPTO_FAILED); 2528*906Sgm89044 } 2529*906Sgm89044 workp->dw_reqs[i] = NULL; 2530*906Sgm89044 } 2531*906Sgm89044 } 2532*906Sgm89044 2533*906Sgm89044 mutex_enter(&wlp->dwl_lock); 2534*906Sgm89044 /* 2535*906Sgm89044 * If waiting to drain, signal on the waiter. 2536*906Sgm89044 */ 2537*906Sgm89044 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) { 2538*906Sgm89044 cv_signal(&wlp->dwl_cv); 2539*906Sgm89044 } 2540*906Sgm89044 2541*906Sgm89044 /* 2542*906Sgm89044 * Return the work and request structures to 2543*906Sgm89044 * the free pool. 2544*906Sgm89044 */ 2545*906Sgm89044 dca_freework(workp); 2546*906Sgm89044 if (have_mutex) 2547*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2548*906Sgm89044 } 2549*906Sgm89044 } 2550*906Sgm89044 2551*906Sgm89044 } 2552*906Sgm89044 2553*906Sgm89044 #ifdef SCHEDDELAY 2554*906Sgm89044 /* 2555*906Sgm89044 * Reschedule worklist as needed. 2556*906Sgm89044 */ 2557*906Sgm89044 void 2558*906Sgm89044 dca_schedtimeout(void *arg) 2559*906Sgm89044 { 2560*906Sgm89044 dca_worklist_t *wlp = (dca_worklist_t *)arg; 2561*906Sgm89044 mutex_enter(&wlp->dwl_lock); 2562*906Sgm89044 wlp->dwl_schedtid = 0; 2563*906Sgm89044 dca_schedule(wlp->dwl_dca, wlp->dwl_mcr); 2564*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2565*906Sgm89044 } 2566*906Sgm89044 #endif 2567*906Sgm89044 2568*906Sgm89044 /* 2569*906Sgm89044 * Check for stalled jobs. 2570*906Sgm89044 */ 2571*906Sgm89044 void 2572*906Sgm89044 dca_jobtimeout(void *arg) 2573*906Sgm89044 { 2574*906Sgm89044 int mcr; 2575*906Sgm89044 dca_t *dca = (dca_t *)arg; 2576*906Sgm89044 int hung = 0; 2577*906Sgm89044 2578*906Sgm89044 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2579*906Sgm89044 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2580*906Sgm89044 dca_work_t *workp; 2581*906Sgm89044 clock_t when; 2582*906Sgm89044 2583*906Sgm89044 mutex_enter(&wlp->dwl_lock); 2584*906Sgm89044 when = ddi_get_lbolt(); 2585*906Sgm89044 2586*906Sgm89044 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq); 2587*906Sgm89044 if (workp == NULL) { 2588*906Sgm89044 /* nothing sitting in the queue */ 2589*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2590*906Sgm89044 continue; 2591*906Sgm89044 } 2592*906Sgm89044 2593*906Sgm89044 if ((when - workp->dw_lbolt) < drv_usectohz(STALETIME)) { 2594*906Sgm89044 /* request has been queued for less than STALETIME */ 2595*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2596*906Sgm89044 continue; 2597*906Sgm89044 } 2598*906Sgm89044 2599*906Sgm89044 /* job has been sitting around for over 1 second, badness */ 2600*906Sgm89044 DBG(dca, DWARN, "stale job (0x%p) found in MCR%d!", workp, 2601*906Sgm89044 mcr); 2602*906Sgm89044 2603*906Sgm89044 /* put it back in the queue, until we reset the chip */ 2604*906Sgm89044 hung++; 2605*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2606*906Sgm89044 } 2607*906Sgm89044 2608*906Sgm89044 if (hung) { 2609*906Sgm89044 dca_failure(dca, DDI_DEVICE_FAULT, 2610*906Sgm89044 DCA_FM_ECLASS_HW_TIMEOUT, dca_ena(0), CRYPTO_DEVICE_ERROR, 2611*906Sgm89044 "timeout processing job.)"); 2612*906Sgm89044 } 2613*906Sgm89044 2614*906Sgm89044 /* reschedule ourself */ 2615*906Sgm89044 mutex_enter(&dca->dca_intrlock); 2616*906Sgm89044 if (dca->dca_jobtid == 0) { 2617*906Sgm89044 /* timeout has been canceled, prior to DR */ 2618*906Sgm89044 mutex_exit(&dca->dca_intrlock); 2619*906Sgm89044 return; 2620*906Sgm89044 } 2621*906Sgm89044 2622*906Sgm89044 /* check again in 1 second */ 2623*906Sgm89044 dca->dca_jobtid = timeout(dca_jobtimeout, arg, 2624*906Sgm89044 drv_usectohz(SECOND)); 2625*906Sgm89044 mutex_exit(&dca->dca_intrlock); 2626*906Sgm89044 } 2627*906Sgm89044 2628*906Sgm89044 /* 2629*906Sgm89044 * This returns all jobs back to kCF. It assumes that processing 2630*906Sgm89044 * on the worklist has halted. 2631*906Sgm89044 */ 2632*906Sgm89044 void 2633*906Sgm89044 dca_rejectjobs(dca_t *dca) 2634*906Sgm89044 { 2635*906Sgm89044 int mcr; 2636*906Sgm89044 int have_mutex; 2637*906Sgm89044 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2638*906Sgm89044 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2639*906Sgm89044 dca_request_t *reqp; 2640*906Sgm89044 2641*906Sgm89044 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) { 2642*906Sgm89044 continue; 2643*906Sgm89044 } 2644*906Sgm89044 have_mutex = mutex_tryenter(&wlp->dwl_lock); 2645*906Sgm89044 for (;;) { 2646*906Sgm89044 reqp = (dca_request_t *)dca_unqueue(&wlp->dwl_waitq); 2647*906Sgm89044 if (reqp == NULL) { 2648*906Sgm89044 break; 2649*906Sgm89044 } 2650*906Sgm89044 /* update flow control */ 2651*906Sgm89044 wlp->dwl_count--; 2652*906Sgm89044 if ((wlp->dwl_count == wlp->dwl_lowater) && 2653*906Sgm89044 (wlp->dwl_busy)) { 2654*906Sgm89044 wlp->dwl_busy = 0; 2655*906Sgm89044 crypto_prov_notify(wlp->dwl_prov, 2656*906Sgm89044 CRYPTO_PROVIDER_READY); 2657*906Sgm89044 } 2658*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2659*906Sgm89044 2660*906Sgm89044 (void) dca_unbindchains(reqp); 2661*906Sgm89044 reqp->dr_callback(reqp, EAGAIN); 2662*906Sgm89044 mutex_enter(&wlp->dwl_lock); 2663*906Sgm89044 } 2664*906Sgm89044 if (have_mutex) 2665*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2666*906Sgm89044 } 2667*906Sgm89044 } 2668*906Sgm89044 2669*906Sgm89044 int 2670*906Sgm89044 dca_drain(dca_t *dca) 2671*906Sgm89044 { 2672*906Sgm89044 int mcr; 2673*906Sgm89044 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2674*906Sgm89044 #ifdef SCHEDDELAY 2675*906Sgm89044 timeout_id_t tid; 2676*906Sgm89044 #endif 2677*906Sgm89044 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2678*906Sgm89044 2679*906Sgm89044 mutex_enter(&wlp->dwl_lock); 2680*906Sgm89044 wlp->dwl_drain = 1; 2681*906Sgm89044 2682*906Sgm89044 /* give it up to a second to drain from the chip */ 2683*906Sgm89044 if (!QEMPTY(&wlp->dwl_runq)) { 2684*906Sgm89044 (void) cv_timedwait(&wlp->dwl_cv, &wlp->dwl_lock, 2685*906Sgm89044 ddi_get_time() + drv_usectohz(STALETIME)); 2686*906Sgm89044 2687*906Sgm89044 if (!QEMPTY(&wlp->dwl_runq)) { 2688*906Sgm89044 dca_error(dca, "unable to drain device"); 2689*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2690*906Sgm89044 dca_undrain(dca); 2691*906Sgm89044 return (EBUSY); 2692*906Sgm89044 } 2693*906Sgm89044 } 2694*906Sgm89044 2695*906Sgm89044 #ifdef SCHEDDELAY 2696*906Sgm89044 tid = wlp->dwl_schedtid; 2697*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2698*906Sgm89044 2699*906Sgm89044 /* 2700*906Sgm89044 * untimeout outside the lock -- this is safe because we 2701*906Sgm89044 * have set the drain flag, so dca_schedule() will not 2702*906Sgm89044 * reschedule another timeout 2703*906Sgm89044 */ 2704*906Sgm89044 if (tid) { 2705*906Sgm89044 untimeout(tid); 2706*906Sgm89044 } 2707*906Sgm89044 #else 2708*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2709*906Sgm89044 #endif 2710*906Sgm89044 } 2711*906Sgm89044 return (0); 2712*906Sgm89044 } 2713*906Sgm89044 2714*906Sgm89044 void 2715*906Sgm89044 dca_undrain(dca_t *dca) 2716*906Sgm89044 { 2717*906Sgm89044 int mcr; 2718*906Sgm89044 2719*906Sgm89044 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2720*906Sgm89044 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2721*906Sgm89044 mutex_enter(&wlp->dwl_lock); 2722*906Sgm89044 wlp->dwl_drain = 0; 2723*906Sgm89044 dca_schedule(dca, mcr); 2724*906Sgm89044 mutex_exit(&wlp->dwl_lock); 2725*906Sgm89044 } 2726*906Sgm89044 } 2727*906Sgm89044 2728*906Sgm89044 /* 2729*906Sgm89044 * Duplicate the crypto_data_t structure, but point to the original 2730*906Sgm89044 * buffers. 2731*906Sgm89044 */ 2732*906Sgm89044 int 2733*906Sgm89044 dca_dupcrypto(crypto_data_t *input, crypto_data_t *ninput) 2734*906Sgm89044 { 2735*906Sgm89044 ninput->cd_format = input->cd_format; 2736*906Sgm89044 ninput->cd_offset = input->cd_offset; 2737*906Sgm89044 ninput->cd_length = input->cd_length; 2738*906Sgm89044 ninput->cd_miscdata = input->cd_miscdata; 2739*906Sgm89044 2740*906Sgm89044 switch (input->cd_format) { 2741*906Sgm89044 case CRYPTO_DATA_RAW: 2742*906Sgm89044 ninput->cd_raw.iov_base = input->cd_raw.iov_base; 2743*906Sgm89044 ninput->cd_raw.iov_len = input->cd_raw.iov_len; 2744*906Sgm89044 break; 2745*906Sgm89044 2746*906Sgm89044 case CRYPTO_DATA_UIO: 2747*906Sgm89044 ninput->cd_uio = input->cd_uio; 2748*906Sgm89044 break; 2749*906Sgm89044 2750*906Sgm89044 case CRYPTO_DATA_MBLK: 2751*906Sgm89044 ninput->cd_mp = input->cd_mp; 2752*906Sgm89044 break; 2753*906Sgm89044 2754*906Sgm89044 default: 2755*906Sgm89044 DBG(NULL, DWARN, 2756*906Sgm89044 "dca_dupcrypto: unrecognised crypto data format"); 2757*906Sgm89044 return (CRYPTO_FAILED); 2758*906Sgm89044 } 2759*906Sgm89044 2760*906Sgm89044 return (CRYPTO_SUCCESS); 2761*906Sgm89044 } 2762*906Sgm89044 2763*906Sgm89044 /* 2764*906Sgm89044 * Performs validation checks on the input and output data structures. 2765*906Sgm89044 */ 2766*906Sgm89044 int 2767*906Sgm89044 dca_verifyio(crypto_data_t *input, crypto_data_t *output) 2768*906Sgm89044 { 2769*906Sgm89044 int rv = CRYPTO_SUCCESS; 2770*906Sgm89044 2771*906Sgm89044 switch (input->cd_format) { 2772*906Sgm89044 case CRYPTO_DATA_RAW: 2773*906Sgm89044 break; 2774*906Sgm89044 2775*906Sgm89044 case CRYPTO_DATA_UIO: 2776*906Sgm89044 /* we support only kernel buffer */ 2777*906Sgm89044 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) { 2778*906Sgm89044 DBG(NULL, DWARN, "non kernel input uio buffer"); 2779*906Sgm89044 rv = CRYPTO_ARGUMENTS_BAD; 2780*906Sgm89044 } 2781*906Sgm89044 break; 2782*906Sgm89044 2783*906Sgm89044 case CRYPTO_DATA_MBLK: 2784*906Sgm89044 break; 2785*906Sgm89044 2786*906Sgm89044 default: 2787*906Sgm89044 DBG(NULL, DWARN, "unrecognised input crypto data format"); 2788*906Sgm89044 rv = CRYPTO_ARGUMENTS_BAD; 2789*906Sgm89044 } 2790*906Sgm89044 2791*906Sgm89044 switch (output->cd_format) { 2792*906Sgm89044 case CRYPTO_DATA_RAW: 2793*906Sgm89044 break; 2794*906Sgm89044 2795*906Sgm89044 case CRYPTO_DATA_UIO: 2796*906Sgm89044 /* we support only kernel buffer */ 2797*906Sgm89044 if (output->cd_uio->uio_segflg != UIO_SYSSPACE) { 2798*906Sgm89044 DBG(NULL, DWARN, "non kernel output uio buffer"); 2799*906Sgm89044 rv = CRYPTO_ARGUMENTS_BAD; 2800*906Sgm89044 } 2801*906Sgm89044 break; 2802*906Sgm89044 2803*906Sgm89044 case CRYPTO_DATA_MBLK: 2804*906Sgm89044 break; 2805*906Sgm89044 2806*906Sgm89044 default: 2807*906Sgm89044 DBG(NULL, DWARN, "unrecognised output crypto data format"); 2808*906Sgm89044 rv = CRYPTO_ARGUMENTS_BAD; 2809*906Sgm89044 } 2810*906Sgm89044 2811*906Sgm89044 return (rv); 2812*906Sgm89044 } 2813*906Sgm89044 2814*906Sgm89044 /* 2815*906Sgm89044 * data: source crypto_data_t struct 2816*906Sgm89044 * off: offset into the source before commencing copy 2817*906Sgm89044 * count: the amount of data to copy 2818*906Sgm89044 * dest: destination buffer 2819*906Sgm89044 */ 2820*906Sgm89044 int 2821*906Sgm89044 dca_getbufbytes(crypto_data_t *data, size_t off, int count, uchar_t *dest) 2822*906Sgm89044 { 2823*906Sgm89044 int rv = CRYPTO_SUCCESS; 2824*906Sgm89044 uio_t *uiop; 2825*906Sgm89044 uint_t vec_idx; 2826*906Sgm89044 size_t cur_len; 2827*906Sgm89044 mblk_t *mp; 2828*906Sgm89044 2829*906Sgm89044 if (count == 0) { 2830*906Sgm89044 /* We don't want anything so we're done. */ 2831*906Sgm89044 return (rv); 2832*906Sgm89044 } 2833*906Sgm89044 2834*906Sgm89044 /* 2835*906Sgm89044 * Sanity check that we haven't specified a length greater than the 2836*906Sgm89044 * offset adjusted size of the buffer. 2837*906Sgm89044 */ 2838*906Sgm89044 if (count > (data->cd_length - off)) { 2839*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 2840*906Sgm89044 } 2841*906Sgm89044 2842*906Sgm89044 /* Add the internal crypto_data offset to the requested offset. */ 2843*906Sgm89044 off += data->cd_offset; 2844*906Sgm89044 2845*906Sgm89044 switch (data->cd_format) { 2846*906Sgm89044 case CRYPTO_DATA_RAW: 2847*906Sgm89044 bcopy(data->cd_raw.iov_base + off, dest, count); 2848*906Sgm89044 break; 2849*906Sgm89044 2850*906Sgm89044 case CRYPTO_DATA_UIO: 2851*906Sgm89044 /* 2852*906Sgm89044 * Jump to the first iovec containing data to be 2853*906Sgm89044 * processed. 2854*906Sgm89044 */ 2855*906Sgm89044 uiop = data->cd_uio; 2856*906Sgm89044 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 2857*906Sgm89044 off >= uiop->uio_iov[vec_idx].iov_len; 2858*906Sgm89044 off -= uiop->uio_iov[vec_idx++].iov_len); 2859*906Sgm89044 if (vec_idx == uiop->uio_iovcnt) { 2860*906Sgm89044 /* 2861*906Sgm89044 * The caller specified an offset that is larger than 2862*906Sgm89044 * the total size of the buffers it provided. 2863*906Sgm89044 */ 2864*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 2865*906Sgm89044 } 2866*906Sgm89044 2867*906Sgm89044 /* 2868*906Sgm89044 * Now process the iovecs. 2869*906Sgm89044 */ 2870*906Sgm89044 while (vec_idx < uiop->uio_iovcnt && count > 0) { 2871*906Sgm89044 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 2872*906Sgm89044 off, count); 2873*906Sgm89044 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest, 2874*906Sgm89044 cur_len); 2875*906Sgm89044 count -= cur_len; 2876*906Sgm89044 dest += cur_len; 2877*906Sgm89044 vec_idx++; 2878*906Sgm89044 off = 0; 2879*906Sgm89044 } 2880*906Sgm89044 2881*906Sgm89044 if (vec_idx == uiop->uio_iovcnt && count > 0) { 2882*906Sgm89044 /* 2883*906Sgm89044 * The end of the specified iovec's was reached but 2884*906Sgm89044 * the length requested could not be processed 2885*906Sgm89044 * (requested to digest more data than it provided). 2886*906Sgm89044 */ 2887*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 2888*906Sgm89044 } 2889*906Sgm89044 break; 2890*906Sgm89044 2891*906Sgm89044 case CRYPTO_DATA_MBLK: 2892*906Sgm89044 /* 2893*906Sgm89044 * Jump to the first mblk_t containing data to be processed. 2894*906Sgm89044 */ 2895*906Sgm89044 for (mp = data->cd_mp; mp != NULL && off >= MBLKL(mp); 2896*906Sgm89044 off -= MBLKL(mp), mp = mp->b_cont); 2897*906Sgm89044 if (mp == NULL) { 2898*906Sgm89044 /* 2899*906Sgm89044 * The caller specified an offset that is larger than 2900*906Sgm89044 * the total size of the buffers it provided. 2901*906Sgm89044 */ 2902*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 2903*906Sgm89044 } 2904*906Sgm89044 2905*906Sgm89044 /* 2906*906Sgm89044 * Now do the processing on the mblk chain. 2907*906Sgm89044 */ 2908*906Sgm89044 while (mp != NULL && count > 0) { 2909*906Sgm89044 cur_len = min(MBLKL(mp) - off, count); 2910*906Sgm89044 bcopy((char *)(mp->b_rptr + off), dest, cur_len); 2911*906Sgm89044 count -= cur_len; 2912*906Sgm89044 dest += cur_len; 2913*906Sgm89044 mp = mp->b_cont; 2914*906Sgm89044 off = 0; 2915*906Sgm89044 } 2916*906Sgm89044 2917*906Sgm89044 if (mp == NULL && count > 0) { 2918*906Sgm89044 /* 2919*906Sgm89044 * The end of the mblk was reached but the length 2920*906Sgm89044 * requested could not be processed, (requested to 2921*906Sgm89044 * digest more data than it provided). 2922*906Sgm89044 */ 2923*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 2924*906Sgm89044 } 2925*906Sgm89044 break; 2926*906Sgm89044 2927*906Sgm89044 default: 2928*906Sgm89044 DBG(NULL, DWARN, "unrecognised crypto data format"); 2929*906Sgm89044 rv = CRYPTO_ARGUMENTS_BAD; 2930*906Sgm89044 } 2931*906Sgm89044 return (rv); 2932*906Sgm89044 } 2933*906Sgm89044 2934*906Sgm89044 2935*906Sgm89044 /* 2936*906Sgm89044 * Performs the input, output or hard scatter/gather checks on the specified 2937*906Sgm89044 * crypto_data_t struct. Returns true if the data is scatter/gather in nature 2938*906Sgm89044 * ie fails the test. 2939*906Sgm89044 */ 2940*906Sgm89044 int 2941*906Sgm89044 dca_sgcheck(dca_t *dca, crypto_data_t *data, dca_sg_param_t val) 2942*906Sgm89044 { 2943*906Sgm89044 uio_t *uiop; 2944*906Sgm89044 mblk_t *mp; 2945*906Sgm89044 int rv = FALSE; 2946*906Sgm89044 2947*906Sgm89044 switch (val) { 2948*906Sgm89044 case DCA_SG_CONTIG: 2949*906Sgm89044 /* 2950*906Sgm89044 * Check for a contiguous data buffer. 2951*906Sgm89044 */ 2952*906Sgm89044 switch (data->cd_format) { 2953*906Sgm89044 case CRYPTO_DATA_RAW: 2954*906Sgm89044 /* Contiguous in nature */ 2955*906Sgm89044 break; 2956*906Sgm89044 2957*906Sgm89044 case CRYPTO_DATA_UIO: 2958*906Sgm89044 if (data->cd_uio->uio_iovcnt > 1) 2959*906Sgm89044 rv = TRUE; 2960*906Sgm89044 break; 2961*906Sgm89044 2962*906Sgm89044 case CRYPTO_DATA_MBLK: 2963*906Sgm89044 mp = data->cd_mp; 2964*906Sgm89044 if (mp->b_cont != NULL) 2965*906Sgm89044 rv = TRUE; 2966*906Sgm89044 break; 2967*906Sgm89044 2968*906Sgm89044 default: 2969*906Sgm89044 DBG(NULL, DWARN, "unrecognised crypto data format"); 2970*906Sgm89044 } 2971*906Sgm89044 break; 2972*906Sgm89044 2973*906Sgm89044 case DCA_SG_WALIGN: 2974*906Sgm89044 /* 2975*906Sgm89044 * Check for a contiguous data buffer that is 32-bit word 2976*906Sgm89044 * aligned and is of word multiples in size. 2977*906Sgm89044 */ 2978*906Sgm89044 switch (data->cd_format) { 2979*906Sgm89044 case CRYPTO_DATA_RAW: 2980*906Sgm89044 if ((data->cd_raw.iov_len % sizeof (uint32_t)) || 2981*906Sgm89044 ((uintptr_t)data->cd_raw.iov_base % 2982*906Sgm89044 sizeof (uint32_t))) { 2983*906Sgm89044 rv = TRUE; 2984*906Sgm89044 } 2985*906Sgm89044 break; 2986*906Sgm89044 2987*906Sgm89044 case CRYPTO_DATA_UIO: 2988*906Sgm89044 uiop = data->cd_uio; 2989*906Sgm89044 if (uiop->uio_iovcnt > 1) { 2990*906Sgm89044 return (TRUE); 2991*906Sgm89044 } 2992*906Sgm89044 /* So there is only one iovec */ 2993*906Sgm89044 if ((uiop->uio_iov[0].iov_len % sizeof (uint32_t)) || 2994*906Sgm89044 ((uintptr_t)uiop->uio_iov[0].iov_base % 2995*906Sgm89044 sizeof (uint32_t))) { 2996*906Sgm89044 rv = TRUE; 2997*906Sgm89044 } 2998*906Sgm89044 break; 2999*906Sgm89044 3000*906Sgm89044 case CRYPTO_DATA_MBLK: 3001*906Sgm89044 mp = data->cd_mp; 3002*906Sgm89044 if (mp->b_cont != NULL) { 3003*906Sgm89044 return (TRUE); 3004*906Sgm89044 } 3005*906Sgm89044 /* So there is only one mblk in the chain */ 3006*906Sgm89044 if ((MBLKL(mp) % sizeof (uint32_t)) || 3007*906Sgm89044 ((uintptr_t)mp->b_rptr % sizeof (uint32_t))) { 3008*906Sgm89044 rv = TRUE; 3009*906Sgm89044 } 3010*906Sgm89044 break; 3011*906Sgm89044 3012*906Sgm89044 default: 3013*906Sgm89044 DBG(NULL, DWARN, "unrecognised crypto data format"); 3014*906Sgm89044 } 3015*906Sgm89044 break; 3016*906Sgm89044 3017*906Sgm89044 case DCA_SG_PALIGN: 3018*906Sgm89044 /* 3019*906Sgm89044 * Check that the data buffer is page aligned and is of 3020*906Sgm89044 * page multiples in size. 3021*906Sgm89044 */ 3022*906Sgm89044 switch (data->cd_format) { 3023*906Sgm89044 case CRYPTO_DATA_RAW: 3024*906Sgm89044 if ((data->cd_length % dca->dca_pagesize) || 3025*906Sgm89044 ((uintptr_t)data->cd_raw.iov_base % 3026*906Sgm89044 dca->dca_pagesize)) { 3027*906Sgm89044 rv = TRUE; 3028*906Sgm89044 } 3029*906Sgm89044 break; 3030*906Sgm89044 3031*906Sgm89044 case CRYPTO_DATA_UIO: 3032*906Sgm89044 uiop = data->cd_uio; 3033*906Sgm89044 if ((uiop->uio_iov[0].iov_len % dca->dca_pagesize) || 3034*906Sgm89044 ((uintptr_t)uiop->uio_iov[0].iov_base % 3035*906Sgm89044 dca->dca_pagesize)) { 3036*906Sgm89044 rv = TRUE; 3037*906Sgm89044 } 3038*906Sgm89044 break; 3039*906Sgm89044 3040*906Sgm89044 case CRYPTO_DATA_MBLK: 3041*906Sgm89044 mp = data->cd_mp; 3042*906Sgm89044 if ((MBLKL(mp) % dca->dca_pagesize) || 3043*906Sgm89044 ((uintptr_t)mp->b_rptr % dca->dca_pagesize)) { 3044*906Sgm89044 rv = TRUE; 3045*906Sgm89044 } 3046*906Sgm89044 break; 3047*906Sgm89044 3048*906Sgm89044 default: 3049*906Sgm89044 DBG(NULL, DWARN, "unrecognised crypto data format"); 3050*906Sgm89044 } 3051*906Sgm89044 break; 3052*906Sgm89044 3053*906Sgm89044 default: 3054*906Sgm89044 DBG(NULL, DWARN, "unrecognised scatter/gather param type"); 3055*906Sgm89044 } 3056*906Sgm89044 3057*906Sgm89044 return (rv); 3058*906Sgm89044 } 3059*906Sgm89044 3060*906Sgm89044 /* 3061*906Sgm89044 * Increments the cd_offset and decrements the cd_length as the data is 3062*906Sgm89044 * gathered from the crypto_data_t struct. 3063*906Sgm89044 * The data is reverse-copied into the dest buffer if the flag is true. 3064*906Sgm89044 */ 3065*906Sgm89044 int 3066*906Sgm89044 dca_gather(crypto_data_t *in, char *dest, int count, int reverse) 3067*906Sgm89044 { 3068*906Sgm89044 int rv = CRYPTO_SUCCESS; 3069*906Sgm89044 uint_t vec_idx; 3070*906Sgm89044 uio_t *uiop; 3071*906Sgm89044 off_t off = in->cd_offset; 3072*906Sgm89044 size_t cur_len; 3073*906Sgm89044 mblk_t *mp; 3074*906Sgm89044 3075*906Sgm89044 switch (in->cd_format) { 3076*906Sgm89044 case CRYPTO_DATA_RAW: 3077*906Sgm89044 if (count > in->cd_length) { 3078*906Sgm89044 /* 3079*906Sgm89044 * The caller specified a length greater than the 3080*906Sgm89044 * size of the buffer. 3081*906Sgm89044 */ 3082*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3083*906Sgm89044 } 3084*906Sgm89044 if (reverse) 3085*906Sgm89044 dca_reverse(in->cd_raw.iov_base + off, dest, count, 3086*906Sgm89044 count); 3087*906Sgm89044 else 3088*906Sgm89044 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count); 3089*906Sgm89044 in->cd_offset += count; 3090*906Sgm89044 in->cd_length -= count; 3091*906Sgm89044 break; 3092*906Sgm89044 3093*906Sgm89044 case CRYPTO_DATA_UIO: 3094*906Sgm89044 /* 3095*906Sgm89044 * Jump to the first iovec containing data to be processed. 3096*906Sgm89044 */ 3097*906Sgm89044 uiop = in->cd_uio; 3098*906Sgm89044 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3099*906Sgm89044 off >= uiop->uio_iov[vec_idx].iov_len; 3100*906Sgm89044 off -= uiop->uio_iov[vec_idx++].iov_len); 3101*906Sgm89044 if (vec_idx == uiop->uio_iovcnt) { 3102*906Sgm89044 /* 3103*906Sgm89044 * The caller specified an offset that is larger than 3104*906Sgm89044 * the total size of the buffers it provided. 3105*906Sgm89044 */ 3106*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3107*906Sgm89044 } 3108*906Sgm89044 3109*906Sgm89044 /* 3110*906Sgm89044 * Now process the iovecs. 3111*906Sgm89044 */ 3112*906Sgm89044 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3113*906Sgm89044 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3114*906Sgm89044 off, count); 3115*906Sgm89044 count -= cur_len; 3116*906Sgm89044 if (reverse) { 3117*906Sgm89044 /* Fill the dest buffer from the end */ 3118*906Sgm89044 dca_reverse(uiop->uio_iov[vec_idx].iov_base + 3119*906Sgm89044 off, dest+count, cur_len, cur_len); 3120*906Sgm89044 } else { 3121*906Sgm89044 bcopy(uiop->uio_iov[vec_idx].iov_base + off, 3122*906Sgm89044 dest, cur_len); 3123*906Sgm89044 dest += cur_len; 3124*906Sgm89044 } 3125*906Sgm89044 in->cd_offset += cur_len; 3126*906Sgm89044 in->cd_length -= cur_len; 3127*906Sgm89044 vec_idx++; 3128*906Sgm89044 off = 0; 3129*906Sgm89044 } 3130*906Sgm89044 3131*906Sgm89044 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3132*906Sgm89044 /* 3133*906Sgm89044 * The end of the specified iovec's was reached but 3134*906Sgm89044 * the length requested could not be processed 3135*906Sgm89044 * (requested to digest more data than it provided). 3136*906Sgm89044 */ 3137*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3138*906Sgm89044 } 3139*906Sgm89044 break; 3140*906Sgm89044 3141*906Sgm89044 case CRYPTO_DATA_MBLK: 3142*906Sgm89044 /* 3143*906Sgm89044 * Jump to the first mblk_t containing data to be processed. 3144*906Sgm89044 */ 3145*906Sgm89044 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp); 3146*906Sgm89044 off -= MBLKL(mp), mp = mp->b_cont); 3147*906Sgm89044 if (mp == NULL) { 3148*906Sgm89044 /* 3149*906Sgm89044 * The caller specified an offset that is larger than 3150*906Sgm89044 * the total size of the buffers it provided. 3151*906Sgm89044 */ 3152*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3153*906Sgm89044 } 3154*906Sgm89044 3155*906Sgm89044 /* 3156*906Sgm89044 * Now do the processing on the mblk chain. 3157*906Sgm89044 */ 3158*906Sgm89044 while (mp != NULL && count > 0) { 3159*906Sgm89044 cur_len = min(MBLKL(mp) - off, count); 3160*906Sgm89044 count -= cur_len; 3161*906Sgm89044 if (reverse) { 3162*906Sgm89044 /* Fill the dest buffer from the end */ 3163*906Sgm89044 dca_reverse((char *)(mp->b_rptr + off), 3164*906Sgm89044 dest+count, cur_len, cur_len); 3165*906Sgm89044 } else { 3166*906Sgm89044 bcopy((char *)(mp->b_rptr + off), dest, 3167*906Sgm89044 cur_len); 3168*906Sgm89044 dest += cur_len; 3169*906Sgm89044 } 3170*906Sgm89044 in->cd_offset += cur_len; 3171*906Sgm89044 in->cd_length -= cur_len; 3172*906Sgm89044 mp = mp->b_cont; 3173*906Sgm89044 off = 0; 3174*906Sgm89044 } 3175*906Sgm89044 3176*906Sgm89044 if (mp == NULL && count > 0) { 3177*906Sgm89044 /* 3178*906Sgm89044 * The end of the mblk was reached but the length 3179*906Sgm89044 * requested could not be processed, (requested to 3180*906Sgm89044 * digest more data than it provided). 3181*906Sgm89044 */ 3182*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3183*906Sgm89044 } 3184*906Sgm89044 break; 3185*906Sgm89044 3186*906Sgm89044 default: 3187*906Sgm89044 DBG(NULL, DWARN, "dca_gather: unrecognised crypto data format"); 3188*906Sgm89044 rv = CRYPTO_ARGUMENTS_BAD; 3189*906Sgm89044 } 3190*906Sgm89044 return (rv); 3191*906Sgm89044 } 3192*906Sgm89044 3193*906Sgm89044 /* 3194*906Sgm89044 * Increments the cd_offset and decrements the cd_length as the data is 3195*906Sgm89044 * gathered from the crypto_data_t struct. 3196*906Sgm89044 */ 3197*906Sgm89044 int 3198*906Sgm89044 dca_resid_gather(crypto_data_t *in, char *resid, int *residlen, char *dest, 3199*906Sgm89044 int count) 3200*906Sgm89044 { 3201*906Sgm89044 int rv = CRYPTO_SUCCESS; 3202*906Sgm89044 caddr_t baddr; 3203*906Sgm89044 uint_t vec_idx; 3204*906Sgm89044 uio_t *uiop; 3205*906Sgm89044 off_t off = in->cd_offset; 3206*906Sgm89044 size_t cur_len; 3207*906Sgm89044 mblk_t *mp; 3208*906Sgm89044 3209*906Sgm89044 /* Process the residual first */ 3210*906Sgm89044 if (*residlen > 0) { 3211*906Sgm89044 uint_t num = min(count, *residlen); 3212*906Sgm89044 bcopy(resid, dest, num); 3213*906Sgm89044 *residlen -= num; 3214*906Sgm89044 if (*residlen > 0) { 3215*906Sgm89044 /* 3216*906Sgm89044 * Requested amount 'count' is less than what's in 3217*906Sgm89044 * the residual, so shuffle any remaining resid to 3218*906Sgm89044 * the front. 3219*906Sgm89044 */ 3220*906Sgm89044 baddr = resid + num; 3221*906Sgm89044 bcopy(baddr, resid, *residlen); 3222*906Sgm89044 } 3223*906Sgm89044 dest += num; 3224*906Sgm89044 count -= num; 3225*906Sgm89044 } 3226*906Sgm89044 3227*906Sgm89044 /* Now process what's in the crypto_data_t structs */ 3228*906Sgm89044 switch (in->cd_format) { 3229*906Sgm89044 case CRYPTO_DATA_RAW: 3230*906Sgm89044 if (count > in->cd_length) { 3231*906Sgm89044 /* 3232*906Sgm89044 * The caller specified a length greater than the 3233*906Sgm89044 * size of the buffer. 3234*906Sgm89044 */ 3235*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3236*906Sgm89044 } 3237*906Sgm89044 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count); 3238*906Sgm89044 in->cd_offset += count; 3239*906Sgm89044 in->cd_length -= count; 3240*906Sgm89044 break; 3241*906Sgm89044 3242*906Sgm89044 case CRYPTO_DATA_UIO: 3243*906Sgm89044 /* 3244*906Sgm89044 * Jump to the first iovec containing data to be processed. 3245*906Sgm89044 */ 3246*906Sgm89044 uiop = in->cd_uio; 3247*906Sgm89044 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3248*906Sgm89044 off >= uiop->uio_iov[vec_idx].iov_len; 3249*906Sgm89044 off -= uiop->uio_iov[vec_idx++].iov_len); 3250*906Sgm89044 if (vec_idx == uiop->uio_iovcnt) { 3251*906Sgm89044 /* 3252*906Sgm89044 * The caller specified an offset that is larger than 3253*906Sgm89044 * the total size of the buffers it provided. 3254*906Sgm89044 */ 3255*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3256*906Sgm89044 } 3257*906Sgm89044 3258*906Sgm89044 /* 3259*906Sgm89044 * Now process the iovecs. 3260*906Sgm89044 */ 3261*906Sgm89044 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3262*906Sgm89044 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3263*906Sgm89044 off, count); 3264*906Sgm89044 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest, 3265*906Sgm89044 cur_len); 3266*906Sgm89044 count -= cur_len; 3267*906Sgm89044 dest += cur_len; 3268*906Sgm89044 in->cd_offset += cur_len; 3269*906Sgm89044 in->cd_length -= cur_len; 3270*906Sgm89044 vec_idx++; 3271*906Sgm89044 off = 0; 3272*906Sgm89044 } 3273*906Sgm89044 3274*906Sgm89044 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3275*906Sgm89044 /* 3276*906Sgm89044 * The end of the specified iovec's was reached but 3277*906Sgm89044 * the length requested could not be processed 3278*906Sgm89044 * (requested to digest more data than it provided). 3279*906Sgm89044 */ 3280*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3281*906Sgm89044 } 3282*906Sgm89044 break; 3283*906Sgm89044 3284*906Sgm89044 case CRYPTO_DATA_MBLK: 3285*906Sgm89044 /* 3286*906Sgm89044 * Jump to the first mblk_t containing data to be processed. 3287*906Sgm89044 */ 3288*906Sgm89044 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp); 3289*906Sgm89044 off -= MBLKL(mp), mp = mp->b_cont); 3290*906Sgm89044 if (mp == NULL) { 3291*906Sgm89044 /* 3292*906Sgm89044 * The caller specified an offset that is larger than 3293*906Sgm89044 * the total size of the buffers it provided. 3294*906Sgm89044 */ 3295*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3296*906Sgm89044 } 3297*906Sgm89044 3298*906Sgm89044 /* 3299*906Sgm89044 * Now do the processing on the mblk chain. 3300*906Sgm89044 */ 3301*906Sgm89044 while (mp != NULL && count > 0) { 3302*906Sgm89044 cur_len = min(MBLKL(mp) - off, count); 3303*906Sgm89044 bcopy((char *)(mp->b_rptr + off), dest, cur_len); 3304*906Sgm89044 count -= cur_len; 3305*906Sgm89044 dest += cur_len; 3306*906Sgm89044 in->cd_offset += cur_len; 3307*906Sgm89044 in->cd_length -= cur_len; 3308*906Sgm89044 mp = mp->b_cont; 3309*906Sgm89044 off = 0; 3310*906Sgm89044 } 3311*906Sgm89044 3312*906Sgm89044 if (mp == NULL && count > 0) { 3313*906Sgm89044 /* 3314*906Sgm89044 * The end of the mblk was reached but the length 3315*906Sgm89044 * requested could not be processed, (requested to 3316*906Sgm89044 * digest more data than it provided). 3317*906Sgm89044 */ 3318*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3319*906Sgm89044 } 3320*906Sgm89044 break; 3321*906Sgm89044 3322*906Sgm89044 default: 3323*906Sgm89044 DBG(NULL, DWARN, 3324*906Sgm89044 "dca_resid_gather: unrecognised crypto data format"); 3325*906Sgm89044 rv = CRYPTO_ARGUMENTS_BAD; 3326*906Sgm89044 } 3327*906Sgm89044 return (rv); 3328*906Sgm89044 } 3329*906Sgm89044 3330*906Sgm89044 /* 3331*906Sgm89044 * Appends the data to the crypto_data_t struct increasing cd_length. 3332*906Sgm89044 * cd_offset is left unchanged. 3333*906Sgm89044 * Data is reverse-copied if the flag is TRUE. 3334*906Sgm89044 */ 3335*906Sgm89044 int 3336*906Sgm89044 dca_scatter(const char *src, crypto_data_t *out, int count, int reverse) 3337*906Sgm89044 { 3338*906Sgm89044 int rv = CRYPTO_SUCCESS; 3339*906Sgm89044 off_t offset = out->cd_offset + out->cd_length; 3340*906Sgm89044 uint_t vec_idx; 3341*906Sgm89044 uio_t *uiop; 3342*906Sgm89044 size_t cur_len; 3343*906Sgm89044 mblk_t *mp; 3344*906Sgm89044 3345*906Sgm89044 switch (out->cd_format) { 3346*906Sgm89044 case CRYPTO_DATA_RAW: 3347*906Sgm89044 if (out->cd_raw.iov_len - offset < count) { 3348*906Sgm89044 /* Trying to write out more than space available. */ 3349*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3350*906Sgm89044 } 3351*906Sgm89044 if (reverse) 3352*906Sgm89044 dca_reverse((void*) src, out->cd_raw.iov_base + offset, 3353*906Sgm89044 count, count); 3354*906Sgm89044 else 3355*906Sgm89044 bcopy(src, out->cd_raw.iov_base + offset, count); 3356*906Sgm89044 out->cd_length += count; 3357*906Sgm89044 break; 3358*906Sgm89044 3359*906Sgm89044 case CRYPTO_DATA_UIO: 3360*906Sgm89044 /* 3361*906Sgm89044 * Jump to the first iovec that can be written to. 3362*906Sgm89044 */ 3363*906Sgm89044 uiop = out->cd_uio; 3364*906Sgm89044 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3365*906Sgm89044 offset >= uiop->uio_iov[vec_idx].iov_len; 3366*906Sgm89044 offset -= uiop->uio_iov[vec_idx++].iov_len); 3367*906Sgm89044 if (vec_idx == uiop->uio_iovcnt) { 3368*906Sgm89044 /* 3369*906Sgm89044 * The caller specified an offset that is larger than 3370*906Sgm89044 * the total size of the buffers it provided. 3371*906Sgm89044 */ 3372*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3373*906Sgm89044 } 3374*906Sgm89044 3375*906Sgm89044 /* 3376*906Sgm89044 * Now process the iovecs. 3377*906Sgm89044 */ 3378*906Sgm89044 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3379*906Sgm89044 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3380*906Sgm89044 offset, count); 3381*906Sgm89044 count -= cur_len; 3382*906Sgm89044 if (reverse) { 3383*906Sgm89044 dca_reverse((void*) (src+count), 3384*906Sgm89044 uiop->uio_iov[vec_idx].iov_base + 3385*906Sgm89044 offset, cur_len, cur_len); 3386*906Sgm89044 } else { 3387*906Sgm89044 bcopy(src, uiop->uio_iov[vec_idx].iov_base + 3388*906Sgm89044 offset, cur_len); 3389*906Sgm89044 src += cur_len; 3390*906Sgm89044 } 3391*906Sgm89044 out->cd_length += cur_len; 3392*906Sgm89044 vec_idx++; 3393*906Sgm89044 offset = 0; 3394*906Sgm89044 } 3395*906Sgm89044 3396*906Sgm89044 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3397*906Sgm89044 /* 3398*906Sgm89044 * The end of the specified iovec's was reached but 3399*906Sgm89044 * the length requested could not be processed 3400*906Sgm89044 * (requested to write more data than space provided). 3401*906Sgm89044 */ 3402*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3403*906Sgm89044 } 3404*906Sgm89044 break; 3405*906Sgm89044 3406*906Sgm89044 case CRYPTO_DATA_MBLK: 3407*906Sgm89044 /* 3408*906Sgm89044 * Jump to the first mblk_t that can be written to. 3409*906Sgm89044 */ 3410*906Sgm89044 for (mp = out->cd_mp; mp != NULL && offset >= MBLKL(mp); 3411*906Sgm89044 offset -= MBLKL(mp), mp = mp->b_cont); 3412*906Sgm89044 if (mp == NULL) { 3413*906Sgm89044 /* 3414*906Sgm89044 * The caller specified an offset that is larger than 3415*906Sgm89044 * the total size of the buffers it provided. 3416*906Sgm89044 */ 3417*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3418*906Sgm89044 } 3419*906Sgm89044 3420*906Sgm89044 /* 3421*906Sgm89044 * Now do the processing on the mblk chain. 3422*906Sgm89044 */ 3423*906Sgm89044 while (mp != NULL && count > 0) { 3424*906Sgm89044 cur_len = min(MBLKL(mp) - offset, count); 3425*906Sgm89044 count -= cur_len; 3426*906Sgm89044 if (reverse) { 3427*906Sgm89044 dca_reverse((void*) (src+count), 3428*906Sgm89044 (char *)(mp->b_rptr + offset), cur_len, 3429*906Sgm89044 cur_len); 3430*906Sgm89044 } else { 3431*906Sgm89044 bcopy(src, (char *)(mp->b_rptr + offset), 3432*906Sgm89044 cur_len); 3433*906Sgm89044 src += cur_len; 3434*906Sgm89044 } 3435*906Sgm89044 out->cd_length += cur_len; 3436*906Sgm89044 mp = mp->b_cont; 3437*906Sgm89044 offset = 0; 3438*906Sgm89044 } 3439*906Sgm89044 3440*906Sgm89044 if (mp == NULL && count > 0) { 3441*906Sgm89044 /* 3442*906Sgm89044 * The end of the mblk was reached but the length 3443*906Sgm89044 * requested could not be processed, (requested to 3444*906Sgm89044 * digest more data than it provided). 3445*906Sgm89044 */ 3446*906Sgm89044 return (CRYPTO_DATA_LEN_RANGE); 3447*906Sgm89044 } 3448*906Sgm89044 break; 3449*906Sgm89044 3450*906Sgm89044 default: 3451*906Sgm89044 DBG(NULL, DWARN, "unrecognised crypto data format"); 3452*906Sgm89044 rv = CRYPTO_ARGUMENTS_BAD; 3453*906Sgm89044 } 3454*906Sgm89044 return (rv); 3455*906Sgm89044 } 3456*906Sgm89044 3457*906Sgm89044 /* 3458*906Sgm89044 * Compare two byte arrays in reverse order. 3459*906Sgm89044 * Return 0 if they are identical, 1 otherwise. 3460*906Sgm89044 */ 3461*906Sgm89044 int 3462*906Sgm89044 dca_bcmp_reverse(const void *s1, const void *s2, size_t n) 3463*906Sgm89044 { 3464*906Sgm89044 int i; 3465*906Sgm89044 caddr_t src, dst; 3466*906Sgm89044 3467*906Sgm89044 if (!n) 3468*906Sgm89044 return (0); 3469*906Sgm89044 3470*906Sgm89044 src = ((caddr_t)s1) + n - 1; 3471*906Sgm89044 dst = (caddr_t)s2; 3472*906Sgm89044 for (i = 0; i < n; i++) { 3473*906Sgm89044 if (*src != *dst) 3474*906Sgm89044 return (1); 3475*906Sgm89044 src--; 3476*906Sgm89044 dst++; 3477*906Sgm89044 } 3478*906Sgm89044 3479*906Sgm89044 return (0); 3480*906Sgm89044 } 3481*906Sgm89044 3482*906Sgm89044 3483*906Sgm89044 /* 3484*906Sgm89044 * This calculates the size of a bignum in bits, specifically not counting 3485*906Sgm89044 * leading zero bits. This size calculation must be done *before* any 3486*906Sgm89044 * endian reversal takes place (i.e. the numbers are in absolute big-endian 3487*906Sgm89044 * order.) 3488*906Sgm89044 */ 3489*906Sgm89044 int 3490*906Sgm89044 dca_bitlen(unsigned char *bignum, int bytelen) 3491*906Sgm89044 { 3492*906Sgm89044 unsigned char msbyte; 3493*906Sgm89044 int i, j; 3494*906Sgm89044 3495*906Sgm89044 for (i = 0; i < bytelen - 1; i++) { 3496*906Sgm89044 if (bignum[i] != 0) { 3497*906Sgm89044 break; 3498*906Sgm89044 } 3499*906Sgm89044 } 3500*906Sgm89044 msbyte = bignum[i]; 3501*906Sgm89044 for (j = 8; j > 1; j--) { 3502*906Sgm89044 if (msbyte & 0x80) { 3503*906Sgm89044 break; 3504*906Sgm89044 } 3505*906Sgm89044 msbyte <<= 1; 3506*906Sgm89044 } 3507*906Sgm89044 return ((8 * (bytelen - i - 1)) + j); 3508*906Sgm89044 } 3509*906Sgm89044 3510*906Sgm89044 /* 3511*906Sgm89044 * This compares to bignums (in big-endian order). It ignores leading 3512*906Sgm89044 * null bytes. The result semantics follow bcmp, mempcmp, strcmp, etc. 3513*906Sgm89044 */ 3514*906Sgm89044 int 3515*906Sgm89044 dca_numcmp(caddr_t n1, int n1len, caddr_t n2, int n2len) 3516*906Sgm89044 { 3517*906Sgm89044 while ((n1len > 1) && (*n1 == 0)) { 3518*906Sgm89044 n1len--; 3519*906Sgm89044 n1++; 3520*906Sgm89044 } 3521*906Sgm89044 while ((n2len > 1) && (*n2 == 0)) { 3522*906Sgm89044 n2len--; 3523*906Sgm89044 n2++; 3524*906Sgm89044 } 3525*906Sgm89044 if (n1len != n2len) { 3526*906Sgm89044 return (n1len - n2len); 3527*906Sgm89044 } 3528*906Sgm89044 while ((n1len > 1) && (*n1 == *n2)) { 3529*906Sgm89044 n1++; 3530*906Sgm89044 n2++; 3531*906Sgm89044 n1len--; 3532*906Sgm89044 } 3533*906Sgm89044 return ((int)(*(uchar_t *)n1) - (int)(*(uchar_t *)n2)); 3534*906Sgm89044 } 3535*906Sgm89044 3536*906Sgm89044 /* 3537*906Sgm89044 * Return array of key attributes. 3538*906Sgm89044 */ 3539*906Sgm89044 crypto_object_attribute_t * 3540*906Sgm89044 dca_get_key_attr(crypto_key_t *key) 3541*906Sgm89044 { 3542*906Sgm89044 if ((key->ck_format != CRYPTO_KEY_ATTR_LIST) || 3543*906Sgm89044 (key->ck_count == 0)) { 3544*906Sgm89044 return (NULL); 3545*906Sgm89044 } 3546*906Sgm89044 3547*906Sgm89044 return (key->ck_attrs); 3548*906Sgm89044 } 3549*906Sgm89044 3550*906Sgm89044 /* 3551*906Sgm89044 * If attribute type exists valp points to it's 32-bit value. 3552*906Sgm89044 */ 3553*906Sgm89044 int 3554*906Sgm89044 dca_attr_lookup_uint32(crypto_object_attribute_t *attrp, uint_t atnum, 3555*906Sgm89044 uint64_t atype, uint32_t *valp) 3556*906Sgm89044 { 3557*906Sgm89044 crypto_object_attribute_t *bap; 3558*906Sgm89044 3559*906Sgm89044 bap = dca_find_attribute(attrp, atnum, atype); 3560*906Sgm89044 if (bap == NULL) { 3561*906Sgm89044 return (CRYPTO_ATTRIBUTE_TYPE_INVALID); 3562*906Sgm89044 } 3563*906Sgm89044 3564*906Sgm89044 *valp = *bap->oa_value; 3565*906Sgm89044 3566*906Sgm89044 return (CRYPTO_SUCCESS); 3567*906Sgm89044 } 3568*906Sgm89044 3569*906Sgm89044 /* 3570*906Sgm89044 * If attribute type exists data contains the start address of the value, 3571*906Sgm89044 * and numelems contains it's length. 3572*906Sgm89044 */ 3573*906Sgm89044 int 3574*906Sgm89044 dca_attr_lookup_uint8_array(crypto_object_attribute_t *attrp, uint_t atnum, 3575*906Sgm89044 uint64_t atype, void **data, unsigned int *numelems) 3576*906Sgm89044 { 3577*906Sgm89044 crypto_object_attribute_t *bap; 3578*906Sgm89044 3579*906Sgm89044 bap = dca_find_attribute(attrp, atnum, atype); 3580*906Sgm89044 if (bap == NULL) { 3581*906Sgm89044 return (CRYPTO_ATTRIBUTE_TYPE_INVALID); 3582*906Sgm89044 } 3583*906Sgm89044 3584*906Sgm89044 *data = bap->oa_value; 3585*906Sgm89044 *numelems = bap->oa_value_len; 3586*906Sgm89044 3587*906Sgm89044 return (CRYPTO_SUCCESS); 3588*906Sgm89044 } 3589*906Sgm89044 3590*906Sgm89044 /* 3591*906Sgm89044 * Finds entry of specified name. If it is not found dca_find_attribute returns 3592*906Sgm89044 * NULL. 3593*906Sgm89044 */ 3594*906Sgm89044 crypto_object_attribute_t * 3595*906Sgm89044 dca_find_attribute(crypto_object_attribute_t *attrp, uint_t atnum, 3596*906Sgm89044 uint64_t atype) 3597*906Sgm89044 { 3598*906Sgm89044 while (atnum) { 3599*906Sgm89044 if (attrp->oa_type == atype) 3600*906Sgm89044 return (attrp); 3601*906Sgm89044 atnum--; 3602*906Sgm89044 attrp++; 3603*906Sgm89044 } 3604*906Sgm89044 return (NULL); 3605*906Sgm89044 } 3606*906Sgm89044 3607*906Sgm89044 /* 3608*906Sgm89044 * Return the address of the first data buffer. If the data format is 3609*906Sgm89044 * unrecognised return NULL. 3610*906Sgm89044 */ 3611*906Sgm89044 caddr_t 3612*906Sgm89044 dca_bufdaddr(crypto_data_t *data) 3613*906Sgm89044 { 3614*906Sgm89044 switch (data->cd_format) { 3615*906Sgm89044 case CRYPTO_DATA_RAW: 3616*906Sgm89044 return (data->cd_raw.iov_base + data->cd_offset); 3617*906Sgm89044 case CRYPTO_DATA_UIO: 3618*906Sgm89044 return (data->cd_uio->uio_iov[0].iov_base + data->cd_offset); 3619*906Sgm89044 case CRYPTO_DATA_MBLK: 3620*906Sgm89044 return ((char *)data->cd_mp->b_rptr + data->cd_offset); 3621*906Sgm89044 default: 3622*906Sgm89044 DBG(NULL, DWARN, 3623*906Sgm89044 "dca_bufdaddr: unrecognised crypto data format"); 3624*906Sgm89044 return (NULL); 3625*906Sgm89044 } 3626*906Sgm89044 } 3627*906Sgm89044 3628*906Sgm89044 static caddr_t 3629*906Sgm89044 dca_bufdaddr_out(crypto_data_t *data) 3630*906Sgm89044 { 3631*906Sgm89044 size_t offset = data->cd_offset + data->cd_length; 3632*906Sgm89044 3633*906Sgm89044 switch (data->cd_format) { 3634*906Sgm89044 case CRYPTO_DATA_RAW: 3635*906Sgm89044 return (data->cd_raw.iov_base + offset); 3636*906Sgm89044 case CRYPTO_DATA_UIO: 3637*906Sgm89044 return (data->cd_uio->uio_iov[0].iov_base + offset); 3638*906Sgm89044 case CRYPTO_DATA_MBLK: 3639*906Sgm89044 return ((char *)data->cd_mp->b_rptr + offset); 3640*906Sgm89044 default: 3641*906Sgm89044 DBG(NULL, DWARN, 3642*906Sgm89044 "dca_bufdaddr_out: unrecognised crypto data format"); 3643*906Sgm89044 return (NULL); 3644*906Sgm89044 } 3645*906Sgm89044 } 3646*906Sgm89044 3647*906Sgm89044 /* 3648*906Sgm89044 * Control entry points. 3649*906Sgm89044 */ 3650*906Sgm89044 3651*906Sgm89044 /* ARGSUSED */ 3652*906Sgm89044 static void 3653*906Sgm89044 dca_provider_status(crypto_provider_handle_t provider, uint_t *status) 3654*906Sgm89044 { 3655*906Sgm89044 *status = CRYPTO_PROVIDER_READY; 3656*906Sgm89044 } 3657*906Sgm89044 3658*906Sgm89044 /* 3659*906Sgm89044 * Cipher (encrypt/decrypt) entry points. 3660*906Sgm89044 */ 3661*906Sgm89044 3662*906Sgm89044 /* ARGSUSED */ 3663*906Sgm89044 static int 3664*906Sgm89044 dca_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 3665*906Sgm89044 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 3666*906Sgm89044 crypto_req_handle_t req) 3667*906Sgm89044 { 3668*906Sgm89044 int error = CRYPTO_FAILED; 3669*906Sgm89044 dca_t *softc; 3670*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 3671*906Sgm89044 int instance; 3672*906Sgm89044 3673*906Sgm89044 /* extract softc and instance number from context */ 3674*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3675*906Sgm89044 DBG(softc, DENTRY, "dca_encrypt_init: started"); 3676*906Sgm89044 3677*906Sgm89044 /* check mechanism */ 3678*906Sgm89044 switch (mechanism->cm_type) { 3679*906Sgm89044 case DES_CBC_MECH_INFO_TYPE: 3680*906Sgm89044 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3681*906Sgm89044 DR_ENCRYPT); 3682*906Sgm89044 break; 3683*906Sgm89044 case DES3_CBC_MECH_INFO_TYPE: 3684*906Sgm89044 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3685*906Sgm89044 DR_ENCRYPT | DR_TRIPLE); 3686*906Sgm89044 break; 3687*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 3688*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 3689*906Sgm89044 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 3690*906Sgm89044 break; 3691*906Sgm89044 default: 3692*906Sgm89044 cmn_err(CE_WARN, "dca_encrypt_init: unexpected mech type " 3693*906Sgm89044 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3694*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 3695*906Sgm89044 } 3696*906Sgm89044 3697*906Sgm89044 DBG(softc, DENTRY, "dca_encrypt_init: done, err = 0x%x", error); 3698*906Sgm89044 3699*906Sgm89044 if (error == CRYPTO_SUCCESS) 3700*906Sgm89044 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 3701*906Sgm89044 &softc->dca_ctx_list_lock); 3702*906Sgm89044 3703*906Sgm89044 return (error); 3704*906Sgm89044 } 3705*906Sgm89044 3706*906Sgm89044 /* ARGSUSED */ 3707*906Sgm89044 static int 3708*906Sgm89044 dca_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, 3709*906Sgm89044 crypto_data_t *ciphertext, crypto_req_handle_t req) 3710*906Sgm89044 { 3711*906Sgm89044 int error = CRYPTO_FAILED; 3712*906Sgm89044 dca_t *softc; 3713*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 3714*906Sgm89044 int instance; 3715*906Sgm89044 3716*906Sgm89044 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3717*906Sgm89044 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3718*906Sgm89044 3719*906Sgm89044 /* extract softc and instance number from context */ 3720*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3721*906Sgm89044 DBG(softc, DENTRY, "dca_encrypt: started"); 3722*906Sgm89044 3723*906Sgm89044 /* check mechanism */ 3724*906Sgm89044 switch (DCA_MECH_FROM_CTX(ctx)) { 3725*906Sgm89044 case DES_CBC_MECH_INFO_TYPE: 3726*906Sgm89044 error = dca_3des(ctx, plaintext, ciphertext, req, DR_ENCRYPT); 3727*906Sgm89044 break; 3728*906Sgm89044 case DES3_CBC_MECH_INFO_TYPE: 3729*906Sgm89044 error = dca_3des(ctx, plaintext, ciphertext, req, 3730*906Sgm89044 DR_ENCRYPT | DR_TRIPLE); 3731*906Sgm89044 break; 3732*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 3733*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 3734*906Sgm89044 error = dca_rsastart(ctx, plaintext, ciphertext, req, 3735*906Sgm89044 DCA_RSA_ENC); 3736*906Sgm89044 break; 3737*906Sgm89044 default: 3738*906Sgm89044 /* Should never reach here */ 3739*906Sgm89044 cmn_err(CE_WARN, "dca_encrypt: unexpected mech type " 3740*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3741*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 3742*906Sgm89044 } 3743*906Sgm89044 3744*906Sgm89044 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) && 3745*906Sgm89044 (error != CRYPTO_BUFFER_TOO_SMALL)) { 3746*906Sgm89044 ciphertext->cd_length = 0; 3747*906Sgm89044 } 3748*906Sgm89044 3749*906Sgm89044 DBG(softc, DENTRY, "dca_encrypt: done, err = 0x%x", error); 3750*906Sgm89044 3751*906Sgm89044 return (error); 3752*906Sgm89044 } 3753*906Sgm89044 3754*906Sgm89044 /* ARGSUSED */ 3755*906Sgm89044 static int 3756*906Sgm89044 dca_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext, 3757*906Sgm89044 crypto_data_t *ciphertext, crypto_req_handle_t req) 3758*906Sgm89044 { 3759*906Sgm89044 int error = CRYPTO_FAILED; 3760*906Sgm89044 dca_t *softc; 3761*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 3762*906Sgm89044 int instance; 3763*906Sgm89044 3764*906Sgm89044 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3765*906Sgm89044 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3766*906Sgm89044 3767*906Sgm89044 /* extract softc and instance number from context */ 3768*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3769*906Sgm89044 DBG(softc, DENTRY, "dca_encrypt_update: started"); 3770*906Sgm89044 3771*906Sgm89044 /* check mechanism */ 3772*906Sgm89044 switch (DCA_MECH_FROM_CTX(ctx)) { 3773*906Sgm89044 case DES_CBC_MECH_INFO_TYPE: 3774*906Sgm89044 error = dca_3desupdate(ctx, plaintext, ciphertext, req, 3775*906Sgm89044 DR_ENCRYPT); 3776*906Sgm89044 break; 3777*906Sgm89044 case DES3_CBC_MECH_INFO_TYPE: 3778*906Sgm89044 error = dca_3desupdate(ctx, plaintext, ciphertext, req, 3779*906Sgm89044 DR_ENCRYPT | DR_TRIPLE); 3780*906Sgm89044 break; 3781*906Sgm89044 default: 3782*906Sgm89044 /* Should never reach here */ 3783*906Sgm89044 cmn_err(CE_WARN, "dca_encrypt_update: unexpected mech type " 3784*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3785*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 3786*906Sgm89044 } 3787*906Sgm89044 3788*906Sgm89044 DBG(softc, DENTRY, "dca_encrypt_update: done, err = 0x%x", error); 3789*906Sgm89044 3790*906Sgm89044 return (error); 3791*906Sgm89044 } 3792*906Sgm89044 3793*906Sgm89044 /* ARGSUSED */ 3794*906Sgm89044 static int 3795*906Sgm89044 dca_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 3796*906Sgm89044 crypto_req_handle_t req) 3797*906Sgm89044 { 3798*906Sgm89044 int error = CRYPTO_FAILED; 3799*906Sgm89044 dca_t *softc; 3800*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 3801*906Sgm89044 int instance; 3802*906Sgm89044 3803*906Sgm89044 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3804*906Sgm89044 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3805*906Sgm89044 3806*906Sgm89044 /* extract softc and instance number from context */ 3807*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3808*906Sgm89044 DBG(softc, DENTRY, "dca_encrypt_final: started"); 3809*906Sgm89044 3810*906Sgm89044 /* check mechanism */ 3811*906Sgm89044 switch (DCA_MECH_FROM_CTX(ctx)) { 3812*906Sgm89044 case DES_CBC_MECH_INFO_TYPE: 3813*906Sgm89044 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT); 3814*906Sgm89044 break; 3815*906Sgm89044 case DES3_CBC_MECH_INFO_TYPE: 3816*906Sgm89044 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT | DR_TRIPLE); 3817*906Sgm89044 break; 3818*906Sgm89044 default: 3819*906Sgm89044 /* Should never reach here */ 3820*906Sgm89044 cmn_err(CE_WARN, "dca_encrypt_final: unexpected mech type " 3821*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3822*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 3823*906Sgm89044 } 3824*906Sgm89044 3825*906Sgm89044 DBG(softc, DENTRY, "dca_encrypt_final: done, err = 0x%x", error); 3826*906Sgm89044 3827*906Sgm89044 return (error); 3828*906Sgm89044 } 3829*906Sgm89044 3830*906Sgm89044 /* ARGSUSED */ 3831*906Sgm89044 static int 3832*906Sgm89044 dca_encrypt_atomic(crypto_provider_handle_t provider, 3833*906Sgm89044 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 3834*906Sgm89044 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext, 3835*906Sgm89044 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 3836*906Sgm89044 { 3837*906Sgm89044 int error = CRYPTO_FAILED; 3838*906Sgm89044 dca_t *softc = (dca_t *)provider; 3839*906Sgm89044 3840*906Sgm89044 DBG(softc, DENTRY, "dca_encrypt_atomic: started"); 3841*906Sgm89044 3842*906Sgm89044 if (ctx_template != NULL) 3843*906Sgm89044 return (CRYPTO_ARGUMENTS_BAD); 3844*906Sgm89044 3845*906Sgm89044 /* check mechanism */ 3846*906Sgm89044 switch (mechanism->cm_type) { 3847*906Sgm89044 case DES_CBC_MECH_INFO_TYPE: 3848*906Sgm89044 error = dca_3desatomic(provider, session_id, mechanism, key, 3849*906Sgm89044 plaintext, ciphertext, KM_SLEEP, req, 3850*906Sgm89044 DR_ENCRYPT | DR_ATOMIC); 3851*906Sgm89044 break; 3852*906Sgm89044 case DES3_CBC_MECH_INFO_TYPE: 3853*906Sgm89044 error = dca_3desatomic(provider, session_id, mechanism, key, 3854*906Sgm89044 plaintext, ciphertext, KM_SLEEP, req, 3855*906Sgm89044 DR_ENCRYPT | DR_TRIPLE | DR_ATOMIC); 3856*906Sgm89044 break; 3857*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 3858*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 3859*906Sgm89044 error = dca_rsaatomic(provider, session_id, mechanism, key, 3860*906Sgm89044 plaintext, ciphertext, KM_SLEEP, req, DCA_RSA_ENC); 3861*906Sgm89044 break; 3862*906Sgm89044 default: 3863*906Sgm89044 cmn_err(CE_WARN, "dca_encrypt_atomic: unexpected mech type " 3864*906Sgm89044 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3865*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 3866*906Sgm89044 } 3867*906Sgm89044 3868*906Sgm89044 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) { 3869*906Sgm89044 ciphertext->cd_length = 0; 3870*906Sgm89044 } 3871*906Sgm89044 3872*906Sgm89044 DBG(softc, DENTRY, "dca_encrypt_atomic: done, err = 0x%x", error); 3873*906Sgm89044 3874*906Sgm89044 return (error); 3875*906Sgm89044 } 3876*906Sgm89044 3877*906Sgm89044 /* ARGSUSED */ 3878*906Sgm89044 static int 3879*906Sgm89044 dca_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 3880*906Sgm89044 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 3881*906Sgm89044 crypto_req_handle_t req) 3882*906Sgm89044 { 3883*906Sgm89044 int error = CRYPTO_FAILED; 3884*906Sgm89044 dca_t *softc; 3885*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 3886*906Sgm89044 int instance; 3887*906Sgm89044 3888*906Sgm89044 /* extract softc and instance number from context */ 3889*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3890*906Sgm89044 DBG(softc, DENTRY, "dca_decrypt_init: started"); 3891*906Sgm89044 3892*906Sgm89044 /* check mechanism */ 3893*906Sgm89044 switch (mechanism->cm_type) { 3894*906Sgm89044 case DES_CBC_MECH_INFO_TYPE: 3895*906Sgm89044 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3896*906Sgm89044 DR_DECRYPT); 3897*906Sgm89044 break; 3898*906Sgm89044 case DES3_CBC_MECH_INFO_TYPE: 3899*906Sgm89044 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3900*906Sgm89044 DR_DECRYPT | DR_TRIPLE); 3901*906Sgm89044 break; 3902*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 3903*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 3904*906Sgm89044 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 3905*906Sgm89044 break; 3906*906Sgm89044 default: 3907*906Sgm89044 cmn_err(CE_WARN, "dca_decrypt_init: unexpected mech type " 3908*906Sgm89044 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3909*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 3910*906Sgm89044 } 3911*906Sgm89044 3912*906Sgm89044 DBG(softc, DENTRY, "dca_decrypt_init: done, err = 0x%x", error); 3913*906Sgm89044 3914*906Sgm89044 if (error == CRYPTO_SUCCESS) 3915*906Sgm89044 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 3916*906Sgm89044 &softc->dca_ctx_list_lock); 3917*906Sgm89044 3918*906Sgm89044 return (error); 3919*906Sgm89044 } 3920*906Sgm89044 3921*906Sgm89044 /* ARGSUSED */ 3922*906Sgm89044 static int 3923*906Sgm89044 dca_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 3924*906Sgm89044 crypto_data_t *plaintext, crypto_req_handle_t req) 3925*906Sgm89044 { 3926*906Sgm89044 int error = CRYPTO_FAILED; 3927*906Sgm89044 dca_t *softc; 3928*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 3929*906Sgm89044 int instance; 3930*906Sgm89044 3931*906Sgm89044 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3932*906Sgm89044 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3933*906Sgm89044 3934*906Sgm89044 /* extract softc and instance number from context */ 3935*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3936*906Sgm89044 DBG(softc, DENTRY, "dca_decrypt: started"); 3937*906Sgm89044 3938*906Sgm89044 /* check mechanism */ 3939*906Sgm89044 switch (DCA_MECH_FROM_CTX(ctx)) { 3940*906Sgm89044 case DES_CBC_MECH_INFO_TYPE: 3941*906Sgm89044 error = dca_3des(ctx, ciphertext, plaintext, req, DR_DECRYPT); 3942*906Sgm89044 break; 3943*906Sgm89044 case DES3_CBC_MECH_INFO_TYPE: 3944*906Sgm89044 error = dca_3des(ctx, ciphertext, plaintext, req, 3945*906Sgm89044 DR_DECRYPT | DR_TRIPLE); 3946*906Sgm89044 break; 3947*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 3948*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 3949*906Sgm89044 error = dca_rsastart(ctx, ciphertext, plaintext, req, 3950*906Sgm89044 DCA_RSA_DEC); 3951*906Sgm89044 break; 3952*906Sgm89044 default: 3953*906Sgm89044 /* Should never reach here */ 3954*906Sgm89044 cmn_err(CE_WARN, "dca_decrypt: unexpected mech type " 3955*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3956*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 3957*906Sgm89044 } 3958*906Sgm89044 3959*906Sgm89044 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) && 3960*906Sgm89044 (error != CRYPTO_BUFFER_TOO_SMALL)) { 3961*906Sgm89044 if (plaintext) 3962*906Sgm89044 plaintext->cd_length = 0; 3963*906Sgm89044 } 3964*906Sgm89044 3965*906Sgm89044 DBG(softc, DENTRY, "dca_decrypt: done, err = 0x%x", error); 3966*906Sgm89044 3967*906Sgm89044 return (error); 3968*906Sgm89044 } 3969*906Sgm89044 3970*906Sgm89044 /* ARGSUSED */ 3971*906Sgm89044 static int 3972*906Sgm89044 dca_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 3973*906Sgm89044 crypto_data_t *plaintext, crypto_req_handle_t req) 3974*906Sgm89044 { 3975*906Sgm89044 int error = CRYPTO_FAILED; 3976*906Sgm89044 dca_t *softc; 3977*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 3978*906Sgm89044 int instance; 3979*906Sgm89044 3980*906Sgm89044 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3981*906Sgm89044 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3982*906Sgm89044 3983*906Sgm89044 /* extract softc and instance number from context */ 3984*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3985*906Sgm89044 DBG(softc, DENTRY, "dca_decrypt_update: started"); 3986*906Sgm89044 3987*906Sgm89044 /* check mechanism */ 3988*906Sgm89044 switch (DCA_MECH_FROM_CTX(ctx)) { 3989*906Sgm89044 case DES_CBC_MECH_INFO_TYPE: 3990*906Sgm89044 error = dca_3desupdate(ctx, ciphertext, plaintext, req, 3991*906Sgm89044 DR_DECRYPT); 3992*906Sgm89044 break; 3993*906Sgm89044 case DES3_CBC_MECH_INFO_TYPE: 3994*906Sgm89044 error = dca_3desupdate(ctx, ciphertext, plaintext, req, 3995*906Sgm89044 DR_DECRYPT | DR_TRIPLE); 3996*906Sgm89044 break; 3997*906Sgm89044 default: 3998*906Sgm89044 /* Should never reach here */ 3999*906Sgm89044 cmn_err(CE_WARN, "dca_decrypt_update: unexpected mech type " 4000*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4001*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 4002*906Sgm89044 } 4003*906Sgm89044 4004*906Sgm89044 DBG(softc, DENTRY, "dca_decrypt_update: done, err = 0x%x", error); 4005*906Sgm89044 4006*906Sgm89044 return (error); 4007*906Sgm89044 } 4008*906Sgm89044 4009*906Sgm89044 /* ARGSUSED */ 4010*906Sgm89044 static int 4011*906Sgm89044 dca_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *plaintext, 4012*906Sgm89044 crypto_req_handle_t req) 4013*906Sgm89044 { 4014*906Sgm89044 int error = CRYPTO_FAILED; 4015*906Sgm89044 dca_t *softc; 4016*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4017*906Sgm89044 int instance; 4018*906Sgm89044 4019*906Sgm89044 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4020*906Sgm89044 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4021*906Sgm89044 4022*906Sgm89044 /* extract softc and instance number from context */ 4023*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4024*906Sgm89044 DBG(softc, DENTRY, "dca_decrypt_final: started"); 4025*906Sgm89044 4026*906Sgm89044 /* check mechanism */ 4027*906Sgm89044 switch (DCA_MECH_FROM_CTX(ctx)) { 4028*906Sgm89044 case DES_CBC_MECH_INFO_TYPE: 4029*906Sgm89044 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT); 4030*906Sgm89044 break; 4031*906Sgm89044 case DES3_CBC_MECH_INFO_TYPE: 4032*906Sgm89044 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT | DR_TRIPLE); 4033*906Sgm89044 break; 4034*906Sgm89044 default: 4035*906Sgm89044 /* Should never reach here */ 4036*906Sgm89044 cmn_err(CE_WARN, "dca_decrypt_final: unexpected mech type " 4037*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4038*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 4039*906Sgm89044 } 4040*906Sgm89044 4041*906Sgm89044 DBG(softc, DENTRY, "dca_decrypt_final: done, err = 0x%x", error); 4042*906Sgm89044 4043*906Sgm89044 return (error); 4044*906Sgm89044 } 4045*906Sgm89044 4046*906Sgm89044 /* ARGSUSED */ 4047*906Sgm89044 static int 4048*906Sgm89044 dca_decrypt_atomic(crypto_provider_handle_t provider, 4049*906Sgm89044 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4050*906Sgm89044 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext, 4051*906Sgm89044 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4052*906Sgm89044 { 4053*906Sgm89044 int error = CRYPTO_FAILED; 4054*906Sgm89044 dca_t *softc = (dca_t *)provider; 4055*906Sgm89044 4056*906Sgm89044 DBG(softc, DENTRY, "dca_decrypt_atomic: started"); 4057*906Sgm89044 4058*906Sgm89044 if (ctx_template != NULL) 4059*906Sgm89044 return (CRYPTO_ARGUMENTS_BAD); 4060*906Sgm89044 4061*906Sgm89044 /* check mechanism */ 4062*906Sgm89044 switch (mechanism->cm_type) { 4063*906Sgm89044 case DES_CBC_MECH_INFO_TYPE: 4064*906Sgm89044 error = dca_3desatomic(provider, session_id, mechanism, key, 4065*906Sgm89044 ciphertext, plaintext, KM_SLEEP, req, 4066*906Sgm89044 DR_DECRYPT | DR_ATOMIC); 4067*906Sgm89044 break; 4068*906Sgm89044 case DES3_CBC_MECH_INFO_TYPE: 4069*906Sgm89044 error = dca_3desatomic(provider, session_id, mechanism, key, 4070*906Sgm89044 ciphertext, plaintext, KM_SLEEP, req, 4071*906Sgm89044 DR_DECRYPT | DR_TRIPLE | DR_ATOMIC); 4072*906Sgm89044 break; 4073*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 4074*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 4075*906Sgm89044 error = dca_rsaatomic(provider, session_id, mechanism, key, 4076*906Sgm89044 ciphertext, plaintext, KM_SLEEP, req, DCA_RSA_DEC); 4077*906Sgm89044 break; 4078*906Sgm89044 default: 4079*906Sgm89044 cmn_err(CE_WARN, "dca_decrypt_atomic: unexpected mech type " 4080*906Sgm89044 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4081*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 4082*906Sgm89044 } 4083*906Sgm89044 4084*906Sgm89044 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) { 4085*906Sgm89044 plaintext->cd_length = 0; 4086*906Sgm89044 } 4087*906Sgm89044 4088*906Sgm89044 DBG(softc, DENTRY, "dca_decrypt_atomic: done, err = 0x%x", error); 4089*906Sgm89044 4090*906Sgm89044 return (error); 4091*906Sgm89044 } 4092*906Sgm89044 4093*906Sgm89044 /* 4094*906Sgm89044 * Sign entry points. 4095*906Sgm89044 */ 4096*906Sgm89044 4097*906Sgm89044 /* ARGSUSED */ 4098*906Sgm89044 static int 4099*906Sgm89044 dca_sign_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4100*906Sgm89044 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4101*906Sgm89044 crypto_req_handle_t req) 4102*906Sgm89044 { 4103*906Sgm89044 int error = CRYPTO_FAILED; 4104*906Sgm89044 dca_t *softc; 4105*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4106*906Sgm89044 int instance; 4107*906Sgm89044 4108*906Sgm89044 /* extract softc and instance number from context */ 4109*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4110*906Sgm89044 DBG(softc, DENTRY, "dca_sign_init: started\n"); 4111*906Sgm89044 4112*906Sgm89044 if (ctx_template != NULL) 4113*906Sgm89044 return (CRYPTO_ARGUMENTS_BAD); 4114*906Sgm89044 4115*906Sgm89044 /* check mechanism */ 4116*906Sgm89044 switch (mechanism->cm_type) { 4117*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 4118*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 4119*906Sgm89044 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4120*906Sgm89044 break; 4121*906Sgm89044 case DSA_MECH_INFO_TYPE: 4122*906Sgm89044 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP, 4123*906Sgm89044 DCA_DSA_SIGN); 4124*906Sgm89044 break; 4125*906Sgm89044 default: 4126*906Sgm89044 cmn_err(CE_WARN, "dca_sign_init: unexpected mech type " 4127*906Sgm89044 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4128*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 4129*906Sgm89044 } 4130*906Sgm89044 4131*906Sgm89044 DBG(softc, DENTRY, "dca_sign_init: done, err = 0x%x", error); 4132*906Sgm89044 4133*906Sgm89044 if (error == CRYPTO_SUCCESS) 4134*906Sgm89044 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4135*906Sgm89044 &softc->dca_ctx_list_lock); 4136*906Sgm89044 4137*906Sgm89044 return (error); 4138*906Sgm89044 } 4139*906Sgm89044 4140*906Sgm89044 static int 4141*906Sgm89044 dca_sign(crypto_ctx_t *ctx, crypto_data_t *data, 4142*906Sgm89044 crypto_data_t *signature, crypto_req_handle_t req) 4143*906Sgm89044 { 4144*906Sgm89044 int error = CRYPTO_FAILED; 4145*906Sgm89044 dca_t *softc; 4146*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4147*906Sgm89044 int instance; 4148*906Sgm89044 4149*906Sgm89044 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4150*906Sgm89044 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4151*906Sgm89044 4152*906Sgm89044 /* extract softc and instance number from context */ 4153*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4154*906Sgm89044 DBG(softc, DENTRY, "dca_sign: started\n"); 4155*906Sgm89044 4156*906Sgm89044 /* check mechanism */ 4157*906Sgm89044 switch (DCA_MECH_FROM_CTX(ctx)) { 4158*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 4159*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 4160*906Sgm89044 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGN); 4161*906Sgm89044 break; 4162*906Sgm89044 case DSA_MECH_INFO_TYPE: 4163*906Sgm89044 error = dca_dsa_sign(ctx, data, signature, req); 4164*906Sgm89044 break; 4165*906Sgm89044 default: 4166*906Sgm89044 cmn_err(CE_WARN, "dca_sign: unexpected mech type " 4167*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4168*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 4169*906Sgm89044 } 4170*906Sgm89044 4171*906Sgm89044 DBG(softc, DENTRY, "dca_sign: done, err = 0x%x", error); 4172*906Sgm89044 4173*906Sgm89044 return (error); 4174*906Sgm89044 } 4175*906Sgm89044 4176*906Sgm89044 /* ARGSUSED */ 4177*906Sgm89044 static int 4178*906Sgm89044 dca_sign_update(crypto_ctx_t *ctx, crypto_data_t *data, 4179*906Sgm89044 crypto_req_handle_t req) 4180*906Sgm89044 { 4181*906Sgm89044 int error = CRYPTO_MECHANISM_INVALID; 4182*906Sgm89044 dca_t *softc; 4183*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4184*906Sgm89044 int instance; 4185*906Sgm89044 4186*906Sgm89044 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4187*906Sgm89044 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4188*906Sgm89044 4189*906Sgm89044 /* extract softc and instance number from context */ 4190*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4191*906Sgm89044 DBG(softc, DENTRY, "dca_sign_update: started\n"); 4192*906Sgm89044 4193*906Sgm89044 cmn_err(CE_WARN, "dca_sign_update: unexpected mech type " 4194*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4195*906Sgm89044 4196*906Sgm89044 DBG(softc, DENTRY, "dca_sign_update: done, err = 0x%x", error); 4197*906Sgm89044 4198*906Sgm89044 return (error); 4199*906Sgm89044 } 4200*906Sgm89044 4201*906Sgm89044 /* ARGSUSED */ 4202*906Sgm89044 static int 4203*906Sgm89044 dca_sign_final(crypto_ctx_t *ctx, crypto_data_t *signature, 4204*906Sgm89044 crypto_req_handle_t req) 4205*906Sgm89044 { 4206*906Sgm89044 int error = CRYPTO_MECHANISM_INVALID; 4207*906Sgm89044 dca_t *softc; 4208*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4209*906Sgm89044 int instance; 4210*906Sgm89044 4211*906Sgm89044 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4212*906Sgm89044 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4213*906Sgm89044 4214*906Sgm89044 /* extract softc and instance number from context */ 4215*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4216*906Sgm89044 DBG(softc, DENTRY, "dca_sign_final: started\n"); 4217*906Sgm89044 4218*906Sgm89044 cmn_err(CE_WARN, "dca_sign_final: unexpected mech type " 4219*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4220*906Sgm89044 4221*906Sgm89044 DBG(softc, DENTRY, "dca_sign_final: done, err = 0x%x", error); 4222*906Sgm89044 4223*906Sgm89044 return (error); 4224*906Sgm89044 } 4225*906Sgm89044 4226*906Sgm89044 static int 4227*906Sgm89044 dca_sign_atomic(crypto_provider_handle_t provider, 4228*906Sgm89044 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4229*906Sgm89044 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4230*906Sgm89044 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4231*906Sgm89044 { 4232*906Sgm89044 int error = CRYPTO_FAILED; 4233*906Sgm89044 dca_t *softc = (dca_t *)provider; 4234*906Sgm89044 4235*906Sgm89044 DBG(softc, DENTRY, "dca_sign_atomic: started\n"); 4236*906Sgm89044 4237*906Sgm89044 if (ctx_template != NULL) 4238*906Sgm89044 return (CRYPTO_ARGUMENTS_BAD); 4239*906Sgm89044 4240*906Sgm89044 /* check mechanism */ 4241*906Sgm89044 switch (mechanism->cm_type) { 4242*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 4243*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 4244*906Sgm89044 error = dca_rsaatomic(provider, session_id, mechanism, key, 4245*906Sgm89044 data, signature, KM_SLEEP, req, DCA_RSA_SIGN); 4246*906Sgm89044 break; 4247*906Sgm89044 case DSA_MECH_INFO_TYPE: 4248*906Sgm89044 error = dca_dsaatomic(provider, session_id, mechanism, key, 4249*906Sgm89044 data, signature, KM_SLEEP, req, DCA_DSA_SIGN); 4250*906Sgm89044 break; 4251*906Sgm89044 default: 4252*906Sgm89044 cmn_err(CE_WARN, "dca_sign_atomic: unexpected mech type " 4253*906Sgm89044 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4254*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 4255*906Sgm89044 } 4256*906Sgm89044 4257*906Sgm89044 DBG(softc, DENTRY, "dca_sign_atomic: done, err = 0x%x", error); 4258*906Sgm89044 4259*906Sgm89044 return (error); 4260*906Sgm89044 } 4261*906Sgm89044 4262*906Sgm89044 /* ARGSUSED */ 4263*906Sgm89044 static int 4264*906Sgm89044 dca_sign_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4265*906Sgm89044 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4266*906Sgm89044 crypto_req_handle_t req) 4267*906Sgm89044 { 4268*906Sgm89044 int error = CRYPTO_FAILED; 4269*906Sgm89044 dca_t *softc; 4270*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4271*906Sgm89044 int instance; 4272*906Sgm89044 4273*906Sgm89044 /* extract softc and instance number from context */ 4274*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4275*906Sgm89044 DBG(softc, DENTRY, "dca_sign_recover_init: started\n"); 4276*906Sgm89044 4277*906Sgm89044 if (ctx_template != NULL) 4278*906Sgm89044 return (CRYPTO_ARGUMENTS_BAD); 4279*906Sgm89044 4280*906Sgm89044 /* check mechanism */ 4281*906Sgm89044 switch (mechanism->cm_type) { 4282*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 4283*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 4284*906Sgm89044 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4285*906Sgm89044 break; 4286*906Sgm89044 default: 4287*906Sgm89044 cmn_err(CE_WARN, "dca_sign_recover_init: unexpected mech type " 4288*906Sgm89044 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4289*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 4290*906Sgm89044 } 4291*906Sgm89044 4292*906Sgm89044 DBG(softc, DENTRY, "dca_sign_recover_init: done, err = 0x%x", error); 4293*906Sgm89044 4294*906Sgm89044 if (error == CRYPTO_SUCCESS) 4295*906Sgm89044 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4296*906Sgm89044 &softc->dca_ctx_list_lock); 4297*906Sgm89044 4298*906Sgm89044 return (error); 4299*906Sgm89044 } 4300*906Sgm89044 4301*906Sgm89044 static int 4302*906Sgm89044 dca_sign_recover(crypto_ctx_t *ctx, crypto_data_t *data, 4303*906Sgm89044 crypto_data_t *signature, crypto_req_handle_t req) 4304*906Sgm89044 { 4305*906Sgm89044 int error = CRYPTO_FAILED; 4306*906Sgm89044 dca_t *softc; 4307*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4308*906Sgm89044 int instance; 4309*906Sgm89044 4310*906Sgm89044 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4311*906Sgm89044 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4312*906Sgm89044 4313*906Sgm89044 /* extract softc and instance number from context */ 4314*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4315*906Sgm89044 DBG(softc, DENTRY, "dca_sign_recover: started\n"); 4316*906Sgm89044 4317*906Sgm89044 /* check mechanism */ 4318*906Sgm89044 switch (DCA_MECH_FROM_CTX(ctx)) { 4319*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 4320*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 4321*906Sgm89044 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGNR); 4322*906Sgm89044 break; 4323*906Sgm89044 default: 4324*906Sgm89044 cmn_err(CE_WARN, "dca_sign_recover: unexpected mech type " 4325*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4326*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 4327*906Sgm89044 } 4328*906Sgm89044 4329*906Sgm89044 DBG(softc, DENTRY, "dca_sign_recover: done, err = 0x%x", error); 4330*906Sgm89044 4331*906Sgm89044 return (error); 4332*906Sgm89044 } 4333*906Sgm89044 4334*906Sgm89044 static int 4335*906Sgm89044 dca_sign_recover_atomic(crypto_provider_handle_t provider, 4336*906Sgm89044 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4337*906Sgm89044 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4338*906Sgm89044 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4339*906Sgm89044 { 4340*906Sgm89044 int error = CRYPTO_FAILED; 4341*906Sgm89044 dca_t *softc = (dca_t *)provider; 4342*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4343*906Sgm89044 int instance; 4344*906Sgm89044 4345*906Sgm89044 instance = ddi_get_instance(softc->dca_dip); 4346*906Sgm89044 DBG(softc, DENTRY, "dca_sign_recover_atomic: started\n"); 4347*906Sgm89044 4348*906Sgm89044 if (ctx_template != NULL) 4349*906Sgm89044 return (CRYPTO_ARGUMENTS_BAD); 4350*906Sgm89044 4351*906Sgm89044 /* check mechanism */ 4352*906Sgm89044 switch (mechanism->cm_type) { 4353*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 4354*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 4355*906Sgm89044 error = dca_rsaatomic(provider, session_id, mechanism, key, 4356*906Sgm89044 data, signature, KM_SLEEP, req, DCA_RSA_SIGNR); 4357*906Sgm89044 break; 4358*906Sgm89044 default: 4359*906Sgm89044 cmn_err(CE_WARN, "dca_sign_recover_atomic: unexpected mech type" 4360*906Sgm89044 " 0x%llx\n", (unsigned long long)mechanism->cm_type); 4361*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 4362*906Sgm89044 } 4363*906Sgm89044 4364*906Sgm89044 DBG(softc, DENTRY, "dca_sign_recover_atomic: done, err = 0x%x", error); 4365*906Sgm89044 4366*906Sgm89044 return (error); 4367*906Sgm89044 } 4368*906Sgm89044 4369*906Sgm89044 /* 4370*906Sgm89044 * Verify entry points. 4371*906Sgm89044 */ 4372*906Sgm89044 4373*906Sgm89044 /* ARGSUSED */ 4374*906Sgm89044 static int 4375*906Sgm89044 dca_verify_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4376*906Sgm89044 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4377*906Sgm89044 crypto_req_handle_t req) 4378*906Sgm89044 { 4379*906Sgm89044 int error = CRYPTO_FAILED; 4380*906Sgm89044 dca_t *softc; 4381*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4382*906Sgm89044 int instance; 4383*906Sgm89044 4384*906Sgm89044 /* extract softc and instance number from context */ 4385*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4386*906Sgm89044 DBG(softc, DENTRY, "dca_verify_init: started\n"); 4387*906Sgm89044 4388*906Sgm89044 if (ctx_template != NULL) 4389*906Sgm89044 return (CRYPTO_ARGUMENTS_BAD); 4390*906Sgm89044 4391*906Sgm89044 /* check mechanism */ 4392*906Sgm89044 switch (mechanism->cm_type) { 4393*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 4394*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 4395*906Sgm89044 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4396*906Sgm89044 break; 4397*906Sgm89044 case DSA_MECH_INFO_TYPE: 4398*906Sgm89044 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP, 4399*906Sgm89044 DCA_DSA_VRFY); 4400*906Sgm89044 break; 4401*906Sgm89044 default: 4402*906Sgm89044 cmn_err(CE_WARN, "dca_verify_init: unexpected mech type " 4403*906Sgm89044 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4404*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 4405*906Sgm89044 } 4406*906Sgm89044 4407*906Sgm89044 DBG(softc, DENTRY, "dca_verify_init: done, err = 0x%x", error); 4408*906Sgm89044 4409*906Sgm89044 if (error == CRYPTO_SUCCESS) 4410*906Sgm89044 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4411*906Sgm89044 &softc->dca_ctx_list_lock); 4412*906Sgm89044 4413*906Sgm89044 return (error); 4414*906Sgm89044 } 4415*906Sgm89044 4416*906Sgm89044 static int 4417*906Sgm89044 dca_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *signature, 4418*906Sgm89044 crypto_req_handle_t req) 4419*906Sgm89044 { 4420*906Sgm89044 int error = CRYPTO_FAILED; 4421*906Sgm89044 dca_t *softc; 4422*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4423*906Sgm89044 int instance; 4424*906Sgm89044 4425*906Sgm89044 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4426*906Sgm89044 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4427*906Sgm89044 4428*906Sgm89044 /* extract softc and instance number from context */ 4429*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4430*906Sgm89044 DBG(softc, DENTRY, "dca_verify: started\n"); 4431*906Sgm89044 4432*906Sgm89044 /* check mechanism */ 4433*906Sgm89044 switch (DCA_MECH_FROM_CTX(ctx)) { 4434*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 4435*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 4436*906Sgm89044 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFY); 4437*906Sgm89044 break; 4438*906Sgm89044 case DSA_MECH_INFO_TYPE: 4439*906Sgm89044 error = dca_dsa_verify(ctx, data, signature, req); 4440*906Sgm89044 break; 4441*906Sgm89044 default: 4442*906Sgm89044 cmn_err(CE_WARN, "dca_verify: unexpected mech type " 4443*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4444*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 4445*906Sgm89044 } 4446*906Sgm89044 4447*906Sgm89044 DBG(softc, DENTRY, "dca_verify: done, err = 0x%x", error); 4448*906Sgm89044 4449*906Sgm89044 return (error); 4450*906Sgm89044 } 4451*906Sgm89044 4452*906Sgm89044 /* ARGSUSED */ 4453*906Sgm89044 static int 4454*906Sgm89044 dca_verify_update(crypto_ctx_t *ctx, crypto_data_t *data, 4455*906Sgm89044 crypto_req_handle_t req) 4456*906Sgm89044 { 4457*906Sgm89044 int error = CRYPTO_MECHANISM_INVALID; 4458*906Sgm89044 dca_t *softc; 4459*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4460*906Sgm89044 int instance; 4461*906Sgm89044 4462*906Sgm89044 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4463*906Sgm89044 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4464*906Sgm89044 4465*906Sgm89044 /* extract softc and instance number from context */ 4466*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4467*906Sgm89044 DBG(softc, DENTRY, "dca_verify_update: started\n"); 4468*906Sgm89044 4469*906Sgm89044 cmn_err(CE_WARN, "dca_verify_update: unexpected mech type " 4470*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4471*906Sgm89044 4472*906Sgm89044 DBG(softc, DENTRY, "dca_verify_update: done, err = 0x%x", error); 4473*906Sgm89044 4474*906Sgm89044 return (error); 4475*906Sgm89044 } 4476*906Sgm89044 4477*906Sgm89044 /* ARGSUSED */ 4478*906Sgm89044 static int 4479*906Sgm89044 dca_verify_final(crypto_ctx_t *ctx, crypto_data_t *signature, 4480*906Sgm89044 crypto_req_handle_t req) 4481*906Sgm89044 { 4482*906Sgm89044 int error = CRYPTO_MECHANISM_INVALID; 4483*906Sgm89044 dca_t *softc; 4484*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4485*906Sgm89044 int instance; 4486*906Sgm89044 4487*906Sgm89044 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4488*906Sgm89044 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4489*906Sgm89044 4490*906Sgm89044 /* extract softc and instance number from context */ 4491*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4492*906Sgm89044 DBG(softc, DENTRY, "dca_verify_final: started\n"); 4493*906Sgm89044 4494*906Sgm89044 cmn_err(CE_WARN, "dca_verify_final: unexpected mech type " 4495*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4496*906Sgm89044 4497*906Sgm89044 DBG(softc, DENTRY, "dca_verify_final: done, err = 0x%x", error); 4498*906Sgm89044 4499*906Sgm89044 return (error); 4500*906Sgm89044 } 4501*906Sgm89044 4502*906Sgm89044 static int 4503*906Sgm89044 dca_verify_atomic(crypto_provider_handle_t provider, 4504*906Sgm89044 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4505*906Sgm89044 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4506*906Sgm89044 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4507*906Sgm89044 { 4508*906Sgm89044 int error = CRYPTO_FAILED; 4509*906Sgm89044 dca_t *softc = (dca_t *)provider; 4510*906Sgm89044 4511*906Sgm89044 DBG(softc, DENTRY, "dca_verify_atomic: started\n"); 4512*906Sgm89044 4513*906Sgm89044 if (ctx_template != NULL) 4514*906Sgm89044 return (CRYPTO_ARGUMENTS_BAD); 4515*906Sgm89044 4516*906Sgm89044 /* check mechanism */ 4517*906Sgm89044 switch (mechanism->cm_type) { 4518*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 4519*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 4520*906Sgm89044 error = dca_rsaatomic(provider, session_id, mechanism, key, 4521*906Sgm89044 signature, data, KM_SLEEP, req, DCA_RSA_VRFY); 4522*906Sgm89044 break; 4523*906Sgm89044 case DSA_MECH_INFO_TYPE: 4524*906Sgm89044 error = dca_dsaatomic(provider, session_id, mechanism, key, 4525*906Sgm89044 data, signature, KM_SLEEP, req, DCA_DSA_VRFY); 4526*906Sgm89044 break; 4527*906Sgm89044 default: 4528*906Sgm89044 cmn_err(CE_WARN, "dca_verify_atomic: unexpected mech type " 4529*906Sgm89044 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4530*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 4531*906Sgm89044 } 4532*906Sgm89044 4533*906Sgm89044 DBG(softc, DENTRY, "dca_verify_atomic: done, err = 0x%x", error); 4534*906Sgm89044 4535*906Sgm89044 return (error); 4536*906Sgm89044 } 4537*906Sgm89044 4538*906Sgm89044 /* ARGSUSED */ 4539*906Sgm89044 static int 4540*906Sgm89044 dca_verify_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4541*906Sgm89044 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4542*906Sgm89044 crypto_req_handle_t req) 4543*906Sgm89044 { 4544*906Sgm89044 int error = CRYPTO_MECHANISM_INVALID; 4545*906Sgm89044 dca_t *softc; 4546*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4547*906Sgm89044 int instance; 4548*906Sgm89044 4549*906Sgm89044 /* extract softc and instance number from context */ 4550*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4551*906Sgm89044 DBG(softc, DENTRY, "dca_verify_recover_init: started\n"); 4552*906Sgm89044 4553*906Sgm89044 if (ctx_template != NULL) 4554*906Sgm89044 return (CRYPTO_ARGUMENTS_BAD); 4555*906Sgm89044 4556*906Sgm89044 /* check mechanism */ 4557*906Sgm89044 switch (mechanism->cm_type) { 4558*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 4559*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 4560*906Sgm89044 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4561*906Sgm89044 break; 4562*906Sgm89044 default: 4563*906Sgm89044 cmn_err(CE_WARN, "dca_verify_recover_init: unexpected mech type" 4564*906Sgm89044 " 0x%llx\n", (unsigned long long)mechanism->cm_type); 4565*906Sgm89044 } 4566*906Sgm89044 4567*906Sgm89044 DBG(softc, DENTRY, "dca_verify_recover_init: done, err = 0x%x", error); 4568*906Sgm89044 4569*906Sgm89044 if (error == CRYPTO_SUCCESS) 4570*906Sgm89044 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4571*906Sgm89044 &softc->dca_ctx_list_lock); 4572*906Sgm89044 4573*906Sgm89044 return (error); 4574*906Sgm89044 } 4575*906Sgm89044 4576*906Sgm89044 static int 4577*906Sgm89044 dca_verify_recover(crypto_ctx_t *ctx, crypto_data_t *signature, 4578*906Sgm89044 crypto_data_t *data, crypto_req_handle_t req) 4579*906Sgm89044 { 4580*906Sgm89044 int error = CRYPTO_MECHANISM_INVALID; 4581*906Sgm89044 dca_t *softc; 4582*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4583*906Sgm89044 int instance; 4584*906Sgm89044 4585*906Sgm89044 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4586*906Sgm89044 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4587*906Sgm89044 4588*906Sgm89044 /* extract softc and instance number from context */ 4589*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4590*906Sgm89044 DBG(softc, DENTRY, "dca_verify_recover: started\n"); 4591*906Sgm89044 4592*906Sgm89044 /* check mechanism */ 4593*906Sgm89044 switch (DCA_MECH_FROM_CTX(ctx)) { 4594*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 4595*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 4596*906Sgm89044 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFYR); 4597*906Sgm89044 break; 4598*906Sgm89044 default: 4599*906Sgm89044 cmn_err(CE_WARN, "dca_verify_recover: unexpected mech type " 4600*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4601*906Sgm89044 } 4602*906Sgm89044 4603*906Sgm89044 DBG(softc, DENTRY, "dca_verify_recover: done, err = 0x%x", error); 4604*906Sgm89044 4605*906Sgm89044 return (error); 4606*906Sgm89044 } 4607*906Sgm89044 4608*906Sgm89044 static int 4609*906Sgm89044 dca_verify_recover_atomic(crypto_provider_handle_t provider, 4610*906Sgm89044 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4611*906Sgm89044 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4612*906Sgm89044 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4613*906Sgm89044 { 4614*906Sgm89044 int error = CRYPTO_MECHANISM_INVALID; 4615*906Sgm89044 dca_t *softc = (dca_t *)provider; 4616*906Sgm89044 4617*906Sgm89044 DBG(softc, DENTRY, "dca_verify_recover_atomic: started\n"); 4618*906Sgm89044 4619*906Sgm89044 if (ctx_template != NULL) 4620*906Sgm89044 return (CRYPTO_ARGUMENTS_BAD); 4621*906Sgm89044 4622*906Sgm89044 /* check mechanism */ 4623*906Sgm89044 switch (mechanism->cm_type) { 4624*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 4625*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 4626*906Sgm89044 error = dca_rsaatomic(provider, session_id, mechanism, key, 4627*906Sgm89044 signature, data, KM_SLEEP, req, DCA_RSA_VRFYR); 4628*906Sgm89044 break; 4629*906Sgm89044 default: 4630*906Sgm89044 cmn_err(CE_WARN, "dca_verify_recover_atomic: unexpected mech " 4631*906Sgm89044 "type 0x%llx\n", (unsigned long long)mechanism->cm_type); 4632*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 4633*906Sgm89044 } 4634*906Sgm89044 4635*906Sgm89044 DBG(softc, DENTRY, 4636*906Sgm89044 "dca_verify_recover_atomic: done, err = 0x%x", error); 4637*906Sgm89044 4638*906Sgm89044 return (error); 4639*906Sgm89044 } 4640*906Sgm89044 4641*906Sgm89044 /* 4642*906Sgm89044 * Random number entry points. 4643*906Sgm89044 */ 4644*906Sgm89044 4645*906Sgm89044 /* ARGSUSED */ 4646*906Sgm89044 static int 4647*906Sgm89044 dca_generate_random(crypto_provider_handle_t provider, 4648*906Sgm89044 crypto_session_id_t session_id, 4649*906Sgm89044 uchar_t *buf, size_t len, crypto_req_handle_t req) 4650*906Sgm89044 { 4651*906Sgm89044 int error = CRYPTO_FAILED; 4652*906Sgm89044 dca_t *softc = (dca_t *)provider; 4653*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4654*906Sgm89044 int instance; 4655*906Sgm89044 4656*906Sgm89044 instance = ddi_get_instance(softc->dca_dip); 4657*906Sgm89044 DBG(softc, DENTRY, "dca_generate_random: started"); 4658*906Sgm89044 4659*906Sgm89044 error = dca_rng(softc, buf, len, req); 4660*906Sgm89044 4661*906Sgm89044 DBG(softc, DENTRY, "dca_generate_random: done, err = 0x%x", error); 4662*906Sgm89044 4663*906Sgm89044 return (error); 4664*906Sgm89044 } 4665*906Sgm89044 4666*906Sgm89044 /* 4667*906Sgm89044 * Context management entry points. 4668*906Sgm89044 */ 4669*906Sgm89044 4670*906Sgm89044 int 4671*906Sgm89044 dca_free_context(crypto_ctx_t *ctx) 4672*906Sgm89044 { 4673*906Sgm89044 int error = CRYPTO_SUCCESS; 4674*906Sgm89044 dca_t *softc; 4675*906Sgm89044 /* LINTED E_FUNC_SET_NOT_USED */ 4676*906Sgm89044 int instance; 4677*906Sgm89044 4678*906Sgm89044 /* extract softc and instance number from context */ 4679*906Sgm89044 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4680*906Sgm89044 DBG(softc, DENTRY, "dca_free_context: entered"); 4681*906Sgm89044 4682*906Sgm89044 if (ctx->cc_provider_private == NULL) 4683*906Sgm89044 return (error); 4684*906Sgm89044 4685*906Sgm89044 dca_rmlist2(ctx->cc_provider_private, &softc->dca_ctx_list_lock); 4686*906Sgm89044 4687*906Sgm89044 error = dca_free_context_low(ctx); 4688*906Sgm89044 4689*906Sgm89044 DBG(softc, DENTRY, "dca_free_context: done, err = 0x%x", error); 4690*906Sgm89044 4691*906Sgm89044 return (error); 4692*906Sgm89044 } 4693*906Sgm89044 4694*906Sgm89044 static int 4695*906Sgm89044 dca_free_context_low(crypto_ctx_t *ctx) 4696*906Sgm89044 { 4697*906Sgm89044 int error = CRYPTO_SUCCESS; 4698*906Sgm89044 4699*906Sgm89044 /* check mechanism */ 4700*906Sgm89044 switch (DCA_MECH_FROM_CTX(ctx)) { 4701*906Sgm89044 case DES_CBC_MECH_INFO_TYPE: 4702*906Sgm89044 case DES3_CBC_MECH_INFO_TYPE: 4703*906Sgm89044 dca_3desctxfree(ctx); 4704*906Sgm89044 break; 4705*906Sgm89044 case RSA_PKCS_MECH_INFO_TYPE: 4706*906Sgm89044 case RSA_X_509_MECH_INFO_TYPE: 4707*906Sgm89044 dca_rsactxfree(ctx); 4708*906Sgm89044 break; 4709*906Sgm89044 case DSA_MECH_INFO_TYPE: 4710*906Sgm89044 dca_dsactxfree(ctx); 4711*906Sgm89044 break; 4712*906Sgm89044 default: 4713*906Sgm89044 /* Should never reach here */ 4714*906Sgm89044 cmn_err(CE_WARN, "dca_free_context_low: unexpected mech type " 4715*906Sgm89044 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4716*906Sgm89044 error = CRYPTO_MECHANISM_INVALID; 4717*906Sgm89044 } 4718*906Sgm89044 4719*906Sgm89044 return (error); 4720*906Sgm89044 } 4721*906Sgm89044 4722*906Sgm89044 4723*906Sgm89044 /* Free any unfreed private context. It is called in detach. */ 4724*906Sgm89044 static void 4725*906Sgm89044 dca_free_context_list(dca_t *dca) 4726*906Sgm89044 { 4727*906Sgm89044 dca_listnode_t *node; 4728*906Sgm89044 crypto_ctx_t ctx; 4729*906Sgm89044 4730*906Sgm89044 (void) memset(&ctx, 0, sizeof (ctx)); 4731*906Sgm89044 ctx.cc_provider = dca; 4732*906Sgm89044 4733*906Sgm89044 while ((node = dca_delist2(&dca->dca_ctx_list, 4734*906Sgm89044 &dca->dca_ctx_list_lock)) != NULL) { 4735*906Sgm89044 ctx.cc_provider_private = node; 4736*906Sgm89044 (void) dca_free_context_low(&ctx); 4737*906Sgm89044 } 4738*906Sgm89044 } 4739*906Sgm89044 4740*906Sgm89044 static int 4741*906Sgm89044 ext_info_sym(crypto_provider_handle_t prov, 4742*906Sgm89044 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq) 4743*906Sgm89044 { 4744*906Sgm89044 return (ext_info_base(prov, ext_info, cfreq, IDENT_SYM)); 4745*906Sgm89044 } 4746*906Sgm89044 4747*906Sgm89044 static int 4748*906Sgm89044 ext_info_asym(crypto_provider_handle_t prov, 4749*906Sgm89044 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq) 4750*906Sgm89044 { 4751*906Sgm89044 int rv; 4752*906Sgm89044 4753*906Sgm89044 rv = ext_info_base(prov, ext_info, cfreq, IDENT_ASYM); 4754*906Sgm89044 /* The asymmetric cipher slot supports random */ 4755*906Sgm89044 ext_info->ei_flags |= CRYPTO_EXTF_RNG; 4756*906Sgm89044 4757*906Sgm89044 return (rv); 4758*906Sgm89044 } 4759*906Sgm89044 4760*906Sgm89044 /* ARGSUSED */ 4761*906Sgm89044 static int 4762*906Sgm89044 ext_info_base(crypto_provider_handle_t prov, 4763*906Sgm89044 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id) 4764*906Sgm89044 { 4765*906Sgm89044 dca_t *dca = (dca_t *)prov; 4766*906Sgm89044 int len; 4767*906Sgm89044 4768*906Sgm89044 /* Label */ 4769*906Sgm89044 (void) sprintf((char *)ext_info->ei_label, "%s/%d %s", 4770*906Sgm89044 ddi_driver_name(dca->dca_dip), ddi_get_instance(dca->dca_dip), id); 4771*906Sgm89044 len = strlen((char *)ext_info->ei_label); 4772*906Sgm89044 (void) memset(ext_info->ei_label + len, ' ', 4773*906Sgm89044 CRYPTO_EXT_SIZE_LABEL - len); 4774*906Sgm89044 4775*906Sgm89044 /* Manufacturer ID */ 4776*906Sgm89044 (void) sprintf((char *)ext_info->ei_manufacturerID, "%s", 4777*906Sgm89044 DCA_MANUFACTURER_ID); 4778*906Sgm89044 len = strlen((char *)ext_info->ei_manufacturerID); 4779*906Sgm89044 (void) memset(ext_info->ei_manufacturerID + len, ' ', 4780*906Sgm89044 CRYPTO_EXT_SIZE_MANUF - len); 4781*906Sgm89044 4782*906Sgm89044 /* Model */ 4783*906Sgm89044 (void) sprintf((char *)ext_info->ei_model, dca->dca_model); 4784*906Sgm89044 4785*906Sgm89044 DBG(dca, DWARN, "kCF MODEL: %s", (char *)ext_info->ei_model); 4786*906Sgm89044 4787*906Sgm89044 len = strlen((char *)ext_info->ei_model); 4788*906Sgm89044 (void) memset(ext_info->ei_model + len, ' ', 4789*906Sgm89044 CRYPTO_EXT_SIZE_MODEL - len); 4790*906Sgm89044 4791*906Sgm89044 /* Serial Number. Blank for Deimos */ 4792*906Sgm89044 (void) memset(ext_info->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL); 4793*906Sgm89044 4794*906Sgm89044 ext_info->ei_flags = CRYPTO_EXTF_WRITE_PROTECTED; 4795*906Sgm89044 4796*906Sgm89044 ext_info->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO; 4797*906Sgm89044 ext_info->ei_max_pin_len = CRYPTO_UNAVAILABLE_INFO; 4798*906Sgm89044 ext_info->ei_min_pin_len = CRYPTO_UNAVAILABLE_INFO; 4799*906Sgm89044 ext_info->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO; 4800*906Sgm89044 ext_info->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO; 4801*906Sgm89044 ext_info->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO; 4802*906Sgm89044 ext_info->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO; 4803*906Sgm89044 ext_info->ei_hardware_version.cv_major = 0; 4804*906Sgm89044 ext_info->ei_hardware_version.cv_minor = 0; 4805*906Sgm89044 ext_info->ei_firmware_version.cv_major = 0; 4806*906Sgm89044 ext_info->ei_firmware_version.cv_minor = 0; 4807*906Sgm89044 4808*906Sgm89044 /* Time. No need to be supplied for token without a clock */ 4809*906Sgm89044 ext_info->ei_time[0] = '\000'; 4810*906Sgm89044 4811*906Sgm89044 return (CRYPTO_SUCCESS); 4812*906Sgm89044 } 4813*906Sgm89044 4814*906Sgm89044 static void 4815*906Sgm89044 dca_fma_init(dca_t *dca) 4816*906Sgm89044 { 4817*906Sgm89044 ddi_iblock_cookie_t fm_ibc; 4818*906Sgm89044 int fm_capabilities = DDI_FM_EREPORT_CAPABLE | 4819*906Sgm89044 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE | 4820*906Sgm89044 DDI_FM_ERRCB_CAPABLE; 4821*906Sgm89044 4822*906Sgm89044 /* Read FMA capabilities from dca.conf file (if present) */ 4823*906Sgm89044 dca->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip, 4824*906Sgm89044 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 4825*906Sgm89044 fm_capabilities); 4826*906Sgm89044 4827*906Sgm89044 DBG(dca, DWARN, "dca->fm_capabilities = 0x%x", dca->fm_capabilities); 4828*906Sgm89044 4829*906Sgm89044 /* Only register with IO Fault Services if we have some capability */ 4830*906Sgm89044 if (dca->fm_capabilities) { 4831*906Sgm89044 dca_regsattr.devacc_attr_access = DDI_FLAGERR_ACC; 4832*906Sgm89044 dca_devattr.devacc_attr_access = DDI_FLAGERR_ACC; 4833*906Sgm89044 dca_dmaattr.dma_attr_flags = DDI_DMA_FLAGERR; 4834*906Sgm89044 4835*906Sgm89044 /* Register capabilities with IO Fault Services */ 4836*906Sgm89044 ddi_fm_init(dca->dca_dip, &dca->fm_capabilities, &fm_ibc); 4837*906Sgm89044 DBG(dca, DWARN, "fm_capable() = 0x%x", 4838*906Sgm89044 ddi_fm_capable(dca->dca_dip)); 4839*906Sgm89044 4840*906Sgm89044 /* 4841*906Sgm89044 * Initialize pci ereport capabilities if ereport capable 4842*906Sgm89044 */ 4843*906Sgm89044 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities)) 4844*906Sgm89044 pci_ereport_setup(dca->dca_dip); 4845*906Sgm89044 4846*906Sgm89044 /* 4847*906Sgm89044 * Initialize callback mutex and register error callback if 4848*906Sgm89044 * error callback capable. 4849*906Sgm89044 */ 4850*906Sgm89044 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) { 4851*906Sgm89044 ddi_fm_handler_register(dca->dca_dip, dca_fm_error_cb, 4852*906Sgm89044 (void *)dca); 4853*906Sgm89044 } 4854*906Sgm89044 } else { 4855*906Sgm89044 /* 4856*906Sgm89044 * These fields have to be cleared of FMA if there are no 4857*906Sgm89044 * FMA capabilities at runtime. 4858*906Sgm89044 */ 4859*906Sgm89044 dca_regsattr.devacc_attr_access = DDI_DEFAULT_ACC; 4860*906Sgm89044 dca_devattr.devacc_attr_access = DDI_DEFAULT_ACC; 4861*906Sgm89044 dca_dmaattr.dma_attr_flags = 0; 4862*906Sgm89044 } 4863*906Sgm89044 } 4864*906Sgm89044 4865*906Sgm89044 4866*906Sgm89044 static void 4867*906Sgm89044 dca_fma_fini(dca_t *dca) 4868*906Sgm89044 { 4869*906Sgm89044 /* Only unregister FMA capabilities if we registered some */ 4870*906Sgm89044 if (dca->fm_capabilities) { 4871*906Sgm89044 4872*906Sgm89044 /* 4873*906Sgm89044 * Release any resources allocated by pci_ereport_setup() 4874*906Sgm89044 */ 4875*906Sgm89044 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities)) { 4876*906Sgm89044 pci_ereport_teardown(dca->dca_dip); 4877*906Sgm89044 } 4878*906Sgm89044 4879*906Sgm89044 /* 4880*906Sgm89044 * Free callback mutex and un-register error callback if 4881*906Sgm89044 * error callback capable. 4882*906Sgm89044 */ 4883*906Sgm89044 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) { 4884*906Sgm89044 ddi_fm_handler_unregister(dca->dca_dip); 4885*906Sgm89044 } 4886*906Sgm89044 4887*906Sgm89044 /* Unregister from IO Fault Services */ 4888*906Sgm89044 ddi_fm_fini(dca->dca_dip); 4889*906Sgm89044 DBG(dca, DWARN, "fm_capable() = 0x%x", 4890*906Sgm89044 ddi_fm_capable(dca->dca_dip)); 4891*906Sgm89044 } 4892*906Sgm89044 } 4893*906Sgm89044 4894*906Sgm89044 4895*906Sgm89044 /* 4896*906Sgm89044 * The IO fault service error handling callback function 4897*906Sgm89044 */ 4898*906Sgm89044 static int 4899*906Sgm89044 dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 4900*906Sgm89044 { 4901*906Sgm89044 int rv; 4902*906Sgm89044 dca_t *dca = (dca_t *)impl_data; 4903*906Sgm89044 uint16_t pci_status; 4904*906Sgm89044 ddi_fm_error_t dca_err; 4905*906Sgm89044 4906*906Sgm89044 rv = err->fme_status = DDI_FM_OK; 4907*906Sgm89044 if (err->fme_flag == DDI_FM_ERR_EXPECTED) { 4908*906Sgm89044 /* 4909*906Sgm89044 * dca never perfrom DDI_ACC_CAUTIOUS protected operations 4910*906Sgm89044 * but if it did. we would handle it here 4911*906Sgm89044 */ 4912*906Sgm89044 return (rv); 4913*906Sgm89044 } 4914*906Sgm89044 4915*906Sgm89044 /* 4916*906Sgm89044 * See if there is a pci error as well 4917*906Sgm89044 * The updated pci_ereport_post function requires a reinitialized 4918*906Sgm89044 * ddi_fm_error_t structure with a zero ena field. 4919*906Sgm89044 */ 4920*906Sgm89044 bzero(&dca_err, sizeof (ddi_fm_error_t)); 4921*906Sgm89044 dca_err.fme_version = DDI_FME_VERSION; 4922*906Sgm89044 dca_err.fme_flag = DDI_FM_ERR_UNEXPECTED; 4923*906Sgm89044 pci_ereport_post(dip, &dca_err, &pci_status); 4924*906Sgm89044 if (pci_status != 0) { 4925*906Sgm89044 dca_failure(dca, DDI_DATAPATH_FAULT, 4926*906Sgm89044 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR, 4927*906Sgm89044 "fault PCI in FMA callback."); 4928*906Sgm89044 4929*906Sgm89044 rv = err->fme_status = DDI_FM_FATAL; 4930*906Sgm89044 return (rv); 4931*906Sgm89044 } 4932*906Sgm89044 4933*906Sgm89044 return (rv); 4934*906Sgm89044 } 4935*906Sgm89044 4936*906Sgm89044 4937*906Sgm89044 static int 4938*906Sgm89044 dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle, 4939*906Sgm89044 dca_fma_eclass_t eclass_index) 4940*906Sgm89044 { 4941*906Sgm89044 ddi_fm_error_t de; 4942*906Sgm89044 int version = 0; 4943*906Sgm89044 uint16_t pci_status; 4944*906Sgm89044 4945*906Sgm89044 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities)) { 4946*906Sgm89044 ddi_fm_acc_err_get(handle, &de, version); 4947*906Sgm89044 if (de.fme_status != DDI_FM_OK) { 4948*906Sgm89044 pci_ereport_post(dca->dca_dip, &de, &pci_status); 4949*906Sgm89044 dca_failure(dca, DDI_DATAPATH_FAULT, 4950*906Sgm89044 eclass_index, fm_ena_increment(de.fme_ena), 4951*906Sgm89044 CRYPTO_DEVICE_ERROR, ""); 4952*906Sgm89044 return (DDI_FAILURE); 4953*906Sgm89044 } 4954*906Sgm89044 } 4955*906Sgm89044 4956*906Sgm89044 return (DDI_SUCCESS); 4957*906Sgm89044 } 4958*906Sgm89044 4959*906Sgm89044 int 4960*906Sgm89044 dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle, 4961*906Sgm89044 dca_fma_eclass_t eclass_index) 4962*906Sgm89044 { 4963*906Sgm89044 ddi_fm_error_t de; 4964*906Sgm89044 int version = 0; 4965*906Sgm89044 uint16_t pci_status; 4966*906Sgm89044 4967*906Sgm89044 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities)) { 4968*906Sgm89044 ddi_fm_dma_err_get(handle, &de, version); 4969*906Sgm89044 if (de.fme_status != DDI_FM_OK) { 4970*906Sgm89044 pci_ereport_post(dca->dca_dip, &de, &pci_status); 4971*906Sgm89044 dca_failure(dca, DDI_DATAPATH_FAULT, 4972*906Sgm89044 eclass_index, fm_ena_increment(de.fme_ena), 4973*906Sgm89044 CRYPTO_DEVICE_ERROR, ""); 4974*906Sgm89044 return (DDI_FAILURE); 4975*906Sgm89044 } 4976*906Sgm89044 } 4977*906Sgm89044 4978*906Sgm89044 return (DDI_SUCCESS); 4979*906Sgm89044 } 4980*906Sgm89044 4981*906Sgm89044 static uint64_t 4982*906Sgm89044 dca_ena(uint64_t ena) 4983*906Sgm89044 { 4984*906Sgm89044 if (ena == 0) 4985*906Sgm89044 ena = fm_ena_generate(0, FM_ENA_FMT1); 4986*906Sgm89044 else 4987*906Sgm89044 ena = fm_ena_increment(ena); 4988*906Sgm89044 return (ena); 4989*906Sgm89044 } 4990*906Sgm89044 4991*906Sgm89044 static char * 4992*906Sgm89044 dca_fma_eclass_string(char *model, dca_fma_eclass_t index) 4993*906Sgm89044 { 4994*906Sgm89044 if (strstr(model, "500")) 4995*906Sgm89044 return (dca_fma_eclass_sca500[index]); 4996*906Sgm89044 else 4997*906Sgm89044 return (dca_fma_eclass_sca1000[index]); 4998*906Sgm89044 } 4999