1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * This file contains the core framework routines for the
27 * kernel cryptographic framework. These routines are at the
28 * layer, between the kernel API/ioctls and the SPI.
29 */
30
31 #include <sys/types.h>
32 #include <sys/errno.h>
33 #include <sys/kmem.h>
34 #include <sys/proc.h>
35 #include <sys/cpuvar.h>
36 #include <sys/cpupart.h>
37 #include <sys/ksynch.h>
38 #include <sys/callb.h>
39 #include <sys/cmn_err.h>
40 #include <sys/systm.h>
41 #include <sys/sysmacros.h>
42 #include <sys/kstat.h>
43 #include <sys/crypto/common.h>
44 #include <sys/crypto/impl.h>
45 #include <sys/crypto/sched_impl.h>
46 #include <sys/crypto/api.h>
47 #include <sys/crypto/spi.h>
48 #include <sys/taskq_impl.h>
49 #include <sys/ddi.h>
50 #include <sys/sunddi.h>
51
52
53 kcf_global_swq_t *gswq; /* Global software queue */
54
55 /* Thread pool related variables */
56 static kcf_pool_t *kcfpool; /* Thread pool of kcfd LWPs */
57 int kcf_maxthreads = 2;
58 int kcf_minthreads = 1;
59 int kcf_thr_multiple = 2; /* Boot-time tunable for experimentation */
60 static ulong_t kcf_idlethr_timeout;
61 static boolean_t kcf_sched_running = B_FALSE;
62 #define KCF_DEFAULT_THRTIMEOUT 60000000 /* 60 seconds */
63
64 /* kmem caches used by the scheduler */
65 static struct kmem_cache *kcf_sreq_cache;
66 static struct kmem_cache *kcf_areq_cache;
67 static struct kmem_cache *kcf_context_cache;
68
69 /* Global request ID table */
70 static kcf_reqid_table_t *kcf_reqid_table[REQID_TABLES];
71
72 /* KCF stats. Not protected. */
73 static kcf_stats_t kcf_ksdata = {
74 { "total threads in pool", KSTAT_DATA_UINT32},
75 { "idle threads in pool", KSTAT_DATA_UINT32},
76 { "min threads in pool", KSTAT_DATA_UINT32},
77 { "max threads in pool", KSTAT_DATA_UINT32},
78 { "requests in gswq", KSTAT_DATA_UINT32},
79 { "max requests in gswq", KSTAT_DATA_UINT32},
80 { "threads for HW taskq", KSTAT_DATA_UINT32},
81 { "minalloc for HW taskq", KSTAT_DATA_UINT32},
82 { "maxalloc for HW taskq", KSTAT_DATA_UINT32}
83 };
84
85 static kstat_t *kcf_misc_kstat = NULL;
86 ulong_t kcf_swprov_hndl = 0;
87
88 static kcf_areq_node_t *kcf_areqnode_alloc(kcf_provider_desc_t *,
89 kcf_context_t *, crypto_call_req_t *, kcf_req_params_t *, boolean_t);
90 static int kcf_disp_sw_request(kcf_areq_node_t *);
91 static void process_req_hwp(void *);
92 static kcf_areq_node_t *kcf_dequeue();
93 static int kcf_enqueue(kcf_areq_node_t *);
94 static void kcf_failover_thread();
95 static void kcfpool_alloc();
96 static void kcf_reqid_delete(kcf_areq_node_t *areq);
97 static crypto_req_id_t kcf_reqid_insert(kcf_areq_node_t *areq);
98 static int kcf_misc_kstat_update(kstat_t *ksp, int rw);
99 static void compute_min_max_threads();
100
101
102 /*
103 * Create a new context.
104 */
105 crypto_ctx_t *
kcf_new_ctx(crypto_call_req_t * crq,kcf_provider_desc_t * pd,crypto_session_id_t sid)106 kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd,
107 crypto_session_id_t sid)
108 {
109 crypto_ctx_t *ctx;
110 kcf_context_t *kcf_ctx;
111
112 kcf_ctx = kmem_cache_alloc(kcf_context_cache,
113 (crq == NULL) ? KM_SLEEP : KM_NOSLEEP);
114 if (kcf_ctx == NULL)
115 return (NULL);
116
117 /* initialize the context for the consumer */
118 kcf_ctx->kc_refcnt = 1;
119 kcf_ctx->kc_req_chain_first = NULL;
120 kcf_ctx->kc_req_chain_last = NULL;
121 kcf_ctx->kc_secondctx = NULL;
122 KCF_PROV_REFHOLD(pd);
123 kcf_ctx->kc_prov_desc = pd;
124 kcf_ctx->kc_sw_prov_desc = NULL;
125 kcf_ctx->kc_mech = NULL;
126
127 ctx = &kcf_ctx->kc_glbl_ctx;
128 ctx->cc_provider = pd->pd_prov_handle;
129 ctx->cc_session = sid;
130 ctx->cc_provider_private = NULL;
131 ctx->cc_framework_private = (void *)kcf_ctx;
132 ctx->cc_flags = 0;
133 ctx->cc_opstate = NULL;
134
135 return (ctx);
136 }
137
138 /*
139 * Allocate a new async request node.
140 *
141 * ictx - Framework private context pointer
142 * crq - Has callback function and argument. Should be non NULL.
143 * req - The parameters to pass to the SPI
144 */
145 static kcf_areq_node_t *
kcf_areqnode_alloc(kcf_provider_desc_t * pd,kcf_context_t * ictx,crypto_call_req_t * crq,kcf_req_params_t * req,boolean_t isdual)146 kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx,
147 crypto_call_req_t *crq, kcf_req_params_t *req, boolean_t isdual)
148 {
149 kcf_areq_node_t *arptr, *areq;
150
151 ASSERT(crq != NULL);
152 arptr = kmem_cache_alloc(kcf_areq_cache, KM_NOSLEEP);
153 if (arptr == NULL)
154 return (NULL);
155
156 arptr->an_state = REQ_ALLOCATED;
157 arptr->an_reqarg = *crq;
158 arptr->an_params = *req;
159 arptr->an_context = ictx;
160 arptr->an_isdual = isdual;
161
162 arptr->an_next = arptr->an_prev = NULL;
163 KCF_PROV_REFHOLD(pd);
164 arptr->an_provider = pd;
165 arptr->an_tried_plist = NULL;
166 arptr->an_refcnt = 1;
167 arptr->an_idnext = arptr->an_idprev = NULL;
168
169 /*
170 * Requests for context-less operations do not use the
171 * fields - an_is_my_turn, and an_ctxchain_next.
172 */
173 if (ictx == NULL)
174 return (arptr);
175
176 KCF_CONTEXT_REFHOLD(ictx);
177 /*
178 * Chain this request to the context.
179 */
180 mutex_enter(&ictx->kc_in_use_lock);
181 arptr->an_ctxchain_next = NULL;
182 if ((areq = ictx->kc_req_chain_last) == NULL) {
183 arptr->an_is_my_turn = B_TRUE;
184 ictx->kc_req_chain_last =
185 ictx->kc_req_chain_first = arptr;
186 } else {
187 ASSERT(ictx->kc_req_chain_first != NULL);
188 arptr->an_is_my_turn = B_FALSE;
189 /* Insert the new request to the end of the chain. */
190 areq->an_ctxchain_next = arptr;
191 ictx->kc_req_chain_last = arptr;
192 }
193 mutex_exit(&ictx->kc_in_use_lock);
194
195 return (arptr);
196 }
197
198 /*
199 * Queue the request node and do one of the following:
200 * - If there is an idle thread signal it to run.
201 * - If there is no idle thread and max running threads is not
202 * reached, signal the creator thread for more threads.
203 *
204 * If the two conditions above are not met, we don't need to do
205 * any thing. The request will be picked up by one of the
206 * worker threads when it becomes available.
207 */
208 static int
kcf_disp_sw_request(kcf_areq_node_t * areq)209 kcf_disp_sw_request(kcf_areq_node_t *areq)
210 {
211 int err;
212 int cnt = 0;
213
214 if ((err = kcf_enqueue(areq)) != 0)
215 return (err);
216
217 if (kcfpool->kp_idlethreads > 0) {
218 /* Signal an idle thread to run */
219 mutex_enter(&gswq->gs_lock);
220 cv_signal(&gswq->gs_cv);
221 mutex_exit(&gswq->gs_lock);
222
223 return (CRYPTO_QUEUED);
224 }
225
226 /*
227 * We keep the number of running threads to be at
228 * kcf_minthreads to reduce gs_lock contention.
229 */
230 cnt = kcf_minthreads -
231 (kcfpool->kp_threads - kcfpool->kp_blockedthreads);
232 if (cnt > 0) {
233 /*
234 * The following ensures the number of threads in pool
235 * does not exceed kcf_maxthreads.
236 */
237 cnt = min(cnt, kcf_maxthreads - kcfpool->kp_threads);
238 if (cnt > 0) {
239 /* Signal the creator thread for more threads */
240 mutex_enter(&kcfpool->kp_user_lock);
241 if (!kcfpool->kp_signal_create_thread) {
242 kcfpool->kp_signal_create_thread = B_TRUE;
243 kcfpool->kp_nthrs = cnt;
244 cv_signal(&kcfpool->kp_user_cv);
245 }
246 mutex_exit(&kcfpool->kp_user_lock);
247 }
248 }
249
250 return (CRYPTO_QUEUED);
251 }
252
253 /*
254 * This routine is called by the taskq associated with
255 * each hardware provider. We notify the kernel consumer
256 * via the callback routine in case of CRYPTO_SUCCESS or
257 * a failure.
258 *
259 * A request can be of type kcf_areq_node_t or of type
260 * kcf_sreq_node_t.
261 */
262 static void
process_req_hwp(void * ireq)263 process_req_hwp(void *ireq)
264 {
265 int error = 0;
266 crypto_ctx_t *ctx;
267 kcf_call_type_t ctype;
268 kcf_provider_desc_t *pd;
269 kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq;
270 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq;
271 kcf_prov_cpu_t *mp;
272
273 pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ?
274 sreq->sn_provider : areq->an_provider;
275
276 /*
277 * Wait if flow control is in effect for the provider. A
278 * CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED
279 * notification will signal us. We also get signaled if
280 * the provider is unregistering.
281 */
282 if (pd->pd_state == KCF_PROV_BUSY) {
283 mutex_enter(&pd->pd_lock);
284 while (pd->pd_state == KCF_PROV_BUSY)
285 cv_wait(&pd->pd_resume_cv, &pd->pd_lock);
286 mutex_exit(&pd->pd_lock);
287 }
288
289 /*
290 * Bump the internal reference count while the request is being
291 * processed. This is how we know when it's safe to unregister
292 * a provider. This step must precede the pd_state check below.
293 */
294 mp = &(pd->pd_percpu_bins[CPU_SEQID]);
295 KCF_PROV_JOB_HOLD(mp);
296
297 /*
298 * Fail the request if the provider has failed. We return a
299 * recoverable error and the notified clients attempt any
300 * recovery. For async clients this is done in kcf_aop_done()
301 * and for sync clients it is done in the k-api routines.
302 */
303 if (pd->pd_state >= KCF_PROV_FAILED) {
304 error = CRYPTO_DEVICE_ERROR;
305 goto bail;
306 }
307
308 if (ctype == CRYPTO_SYNCH) {
309 mutex_enter(&sreq->sn_lock);
310 sreq->sn_state = REQ_INPROGRESS;
311 sreq->sn_mp = mp;
312 mutex_exit(&sreq->sn_lock);
313
314 ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL;
315 error = common_submit_request(sreq->sn_provider, ctx,
316 sreq->sn_params, sreq);
317 } else {
318 kcf_context_t *ictx;
319 ASSERT(ctype == CRYPTO_ASYNCH);
320
321 /*
322 * We are in the per-hardware provider thread context and
323 * hence can sleep. Note that the caller would have done
324 * a taskq_dispatch(..., TQ_NOSLEEP) and would have returned.
325 */
326 ctx = (ictx = areq->an_context) ? &ictx->kc_glbl_ctx : NULL;
327
328 mutex_enter(&areq->an_lock);
329 /*
330 * We need to maintain ordering for multi-part requests.
331 * an_is_my_turn is set to B_TRUE initially for a request
332 * when it is enqueued and there are no other requests
333 * for that context. It is set later from kcf_aop_done() when
334 * the request before us in the chain of requests for the
335 * context completes. We get signaled at that point.
336 */
337 if (ictx != NULL) {
338 ASSERT(ictx->kc_prov_desc == areq->an_provider);
339
340 while (areq->an_is_my_turn == B_FALSE) {
341 cv_wait(&areq->an_turn_cv, &areq->an_lock);
342 }
343 }
344 areq->an_state = REQ_INPROGRESS;
345 areq->an_mp = mp;
346 mutex_exit(&areq->an_lock);
347
348 error = common_submit_request(areq->an_provider, ctx,
349 &areq->an_params, areq);
350 }
351
352 bail:
353 if (error == CRYPTO_QUEUED) {
354 /*
355 * The request is queued by the provider and we should
356 * get a crypto_op_notification() from the provider later.
357 * We notify the consumer at that time.
358 */
359 return;
360 } else { /* CRYPTO_SUCCESS or other failure */
361 KCF_PROV_JOB_RELE(mp);
362 if (ctype == CRYPTO_SYNCH)
363 kcf_sop_done(sreq, error);
364 else
365 kcf_aop_done(areq, error);
366 }
367 }
368
369 /*
370 * This routine checks if a request can be retried on another
371 * provider. If true, mech1 is initialized to point to the mechanism
372 * structure. mech2 is also initialized in case of a dual operation. fg
373 * is initialized to the correct crypto_func_group_t bit flag. They are
374 * initialized by this routine, so that the caller can pass them to a
375 * kcf_get_mech_provider() or kcf_get_dual_provider() with no further change.
376 *
377 * We check that the request is for a init or atomic routine and that
378 * it is for one of the operation groups used from k-api .
379 */
380 static boolean_t
can_resubmit(kcf_areq_node_t * areq,crypto_mechanism_t ** mech1,crypto_mechanism_t ** mech2,crypto_func_group_t * fg)381 can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1,
382 crypto_mechanism_t **mech2, crypto_func_group_t *fg)
383 {
384 kcf_req_params_t *params;
385 kcf_op_type_t optype;
386
387 params = &areq->an_params;
388 optype = params->rp_optype;
389
390 if (!(IS_INIT_OP(optype) || IS_ATOMIC_OP(optype)))
391 return (B_FALSE);
392
393 switch (params->rp_opgrp) {
394 case KCF_OG_DIGEST: {
395 kcf_digest_ops_params_t *dops = ¶ms->rp_u.digest_params;
396
397 dops->do_mech.cm_type = dops->do_framework_mechtype;
398 *mech1 = &dops->do_mech;
399 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DIGEST :
400 CRYPTO_FG_DIGEST_ATOMIC;
401 break;
402 }
403
404 case KCF_OG_MAC: {
405 kcf_mac_ops_params_t *mops = ¶ms->rp_u.mac_params;
406
407 mops->mo_mech.cm_type = mops->mo_framework_mechtype;
408 *mech1 = &mops->mo_mech;
409 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC :
410 CRYPTO_FG_MAC_ATOMIC;
411 break;
412 }
413
414 case KCF_OG_SIGN: {
415 kcf_sign_ops_params_t *sops = ¶ms->rp_u.sign_params;
416
417 sops->so_mech.cm_type = sops->so_framework_mechtype;
418 *mech1 = &sops->so_mech;
419 switch (optype) {
420 case KCF_OP_INIT:
421 *fg = CRYPTO_FG_SIGN;
422 break;
423 case KCF_OP_ATOMIC:
424 *fg = CRYPTO_FG_SIGN_ATOMIC;
425 break;
426 default:
427 ASSERT(optype == KCF_OP_SIGN_RECOVER_ATOMIC);
428 *fg = CRYPTO_FG_SIGN_RECOVER_ATOMIC;
429 }
430 break;
431 }
432
433 case KCF_OG_VERIFY: {
434 kcf_verify_ops_params_t *vops = ¶ms->rp_u.verify_params;
435
436 vops->vo_mech.cm_type = vops->vo_framework_mechtype;
437 *mech1 = &vops->vo_mech;
438 switch (optype) {
439 case KCF_OP_INIT:
440 *fg = CRYPTO_FG_VERIFY;
441 break;
442 case KCF_OP_ATOMIC:
443 *fg = CRYPTO_FG_VERIFY_ATOMIC;
444 break;
445 default:
446 ASSERT(optype == KCF_OP_VERIFY_RECOVER_ATOMIC);
447 *fg = CRYPTO_FG_VERIFY_RECOVER_ATOMIC;
448 }
449 break;
450 }
451
452 case KCF_OG_ENCRYPT: {
453 kcf_encrypt_ops_params_t *eops = ¶ms->rp_u.encrypt_params;
454
455 eops->eo_mech.cm_type = eops->eo_framework_mechtype;
456 *mech1 = &eops->eo_mech;
457 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT :
458 CRYPTO_FG_ENCRYPT_ATOMIC;
459 break;
460 }
461
462 case KCF_OG_DECRYPT: {
463 kcf_decrypt_ops_params_t *dcrops = ¶ms->rp_u.decrypt_params;
464
465 dcrops->dop_mech.cm_type = dcrops->dop_framework_mechtype;
466 *mech1 = &dcrops->dop_mech;
467 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DECRYPT :
468 CRYPTO_FG_DECRYPT_ATOMIC;
469 break;
470 }
471
472 case KCF_OG_ENCRYPT_MAC: {
473 kcf_encrypt_mac_ops_params_t *eops =
474 ¶ms->rp_u.encrypt_mac_params;
475
476 eops->em_encr_mech.cm_type = eops->em_framework_encr_mechtype;
477 *mech1 = &eops->em_encr_mech;
478 eops->em_mac_mech.cm_type = eops->em_framework_mac_mechtype;
479 *mech2 = &eops->em_mac_mech;
480 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT_MAC :
481 CRYPTO_FG_ENCRYPT_MAC_ATOMIC;
482 break;
483 }
484
485 case KCF_OG_MAC_DECRYPT: {
486 kcf_mac_decrypt_ops_params_t *dops =
487 ¶ms->rp_u.mac_decrypt_params;
488
489 dops->md_mac_mech.cm_type = dops->md_framework_mac_mechtype;
490 *mech1 = &dops->md_mac_mech;
491 dops->md_decr_mech.cm_type = dops->md_framework_decr_mechtype;
492 *mech2 = &dops->md_decr_mech;
493 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC_DECRYPT :
494 CRYPTO_FG_MAC_DECRYPT_ATOMIC;
495 break;
496 }
497
498 default:
499 return (B_FALSE);
500 }
501
502 return (B_TRUE);
503 }
504
505 /*
506 * This routine is called when a request to a provider has failed
507 * with a recoverable error. This routine tries to find another provider
508 * and dispatches the request to the new provider, if one is available.
509 * We reuse the request structure.
510 *
511 * A return value of NULL from kcf_get_mech_provider() indicates
512 * we have tried the last provider.
513 */
514 static int
kcf_resubmit_request(kcf_areq_node_t * areq)515 kcf_resubmit_request(kcf_areq_node_t *areq)
516 {
517 int error = CRYPTO_FAILED;
518 kcf_context_t *ictx;
519 kcf_provider_desc_t *old_pd;
520 kcf_provider_desc_t *new_pd;
521 crypto_mechanism_t *mech1 = NULL, *mech2 = NULL;
522 crypto_mech_type_t prov_mt1, prov_mt2;
523 crypto_func_group_t fg;
524
525 if (!can_resubmit(areq, &mech1, &mech2, &fg))
526 return (error);
527
528 old_pd = areq->an_provider;
529 /*
530 * Add old_pd to the list of providers already tried.
531 * We release the new hold on old_pd in kcf_free_triedlist().
532 */
533 if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd,
534 KM_NOSLEEP | KCF_HOLD_PROV) == NULL)
535 return (error);
536
537 if (mech1 && !mech2) {
538 new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, NULL,
539 &error, areq->an_tried_plist, fg, 0);
540 } else {
541 ASSERT(mech1 != NULL && mech2 != NULL);
542
543 new_pd = kcf_get_dual_provider(mech1, NULL, mech2, NULL,
544 NULL, &prov_mt1,
545 &prov_mt2, &error, areq->an_tried_plist, fg, fg, 0);
546 }
547
548 if (new_pd == NULL)
549 return (error);
550
551 /*
552 * We reuse the old context by resetting provider specific
553 * fields in it.
554 */
555 if ((ictx = areq->an_context) != NULL) {
556 crypto_ctx_t *ctx;
557
558 ASSERT(old_pd == ictx->kc_prov_desc);
559 KCF_PROV_REFRELE(ictx->kc_prov_desc);
560 KCF_PROV_REFHOLD(new_pd);
561 ictx->kc_prov_desc = new_pd;
562
563 ctx = &ictx->kc_glbl_ctx;
564 ctx->cc_provider = new_pd->pd_prov_handle;
565 ctx->cc_session = new_pd->pd_sid;
566 ctx->cc_provider_private = NULL;
567 }
568
569 /* We reuse areq. by resetting the provider and context fields. */
570 KCF_PROV_REFRELE(old_pd);
571 KCF_PROV_REFHOLD(new_pd);
572 areq->an_provider = new_pd;
573 mutex_enter(&areq->an_lock);
574 areq->an_state = REQ_WAITING;
575 mutex_exit(&areq->an_lock);
576
577 switch (new_pd->pd_prov_type) {
578 case CRYPTO_SW_PROVIDER:
579 error = kcf_disp_sw_request(areq);
580 break;
581
582 case CRYPTO_HW_PROVIDER: {
583 taskq_t *taskq = new_pd->pd_taskq;
584
585 if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) ==
586 (taskqid_t)0) {
587 error = CRYPTO_HOST_MEMORY;
588 } else {
589 error = CRYPTO_QUEUED;
590 }
591
592 break;
593 }
594 }
595
596 KCF_PROV_REFRELE(new_pd);
597 return (error);
598 }
599
600 #define EMPTY_TASKQ(tq) ((tq)->tq_task.tqent_next == &(tq)->tq_task)
601
602 /*
603 * Routine called by both ioctl and k-api. The consumer should
604 * bundle the parameters into a kcf_req_params_t structure. A bunch
605 * of macros are available in ops_impl.h for this bundling. They are:
606 *
607 * KCF_WRAP_DIGEST_OPS_PARAMS()
608 * KCF_WRAP_MAC_OPS_PARAMS()
609 * KCF_WRAP_ENCRYPT_OPS_PARAMS()
610 * KCF_WRAP_DECRYPT_OPS_PARAMS() ... etc.
611 *
612 * It is the caller's responsibility to free the ctx argument when
613 * appropriate. See the KCF_CONTEXT_COND_RELEASE macro for details.
614 */
615 int
kcf_submit_request(kcf_provider_desc_t * pd,crypto_ctx_t * ctx,crypto_call_req_t * crq,kcf_req_params_t * params,boolean_t cont)616 kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
617 crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont)
618 {
619 int error;
620 kcf_areq_node_t *areq;
621 kcf_sreq_node_t *sreq;
622 kcf_context_t *kcf_ctx;
623 taskq_t *taskq;
624 kcf_prov_cpu_t *mp;
625
626 kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL;
627
628 /* Synchronous cases */
629 if (crq == NULL) {
630 switch (pd->pd_prov_type) {
631 case CRYPTO_SW_PROVIDER:
632 error = common_submit_request(pd, ctx, params,
633 KCF_RHNDL(KM_SLEEP));
634 break;
635
636 case CRYPTO_HW_PROVIDER:
637 taskq = pd->pd_taskq;
638
639 /*
640 * Special case for CRYPTO_SYNCHRONOUS providers that
641 * never return a CRYPTO_QUEUED error. We skip any
642 * request allocation and call the SPI directly.
643 */
644 if ((pd->pd_flags & CRYPTO_SYNCHRONOUS) &&
645 EMPTY_TASKQ(taskq)) {
646 mp = &(pd->pd_percpu_bins[CPU_SEQID]);
647 KCF_PROV_JOB_HOLD(mp);
648
649 if (pd->pd_state == KCF_PROV_READY) {
650 error = common_submit_request(pd, ctx,
651 params, KCF_RHNDL(KM_SLEEP));
652 KCF_PROV_JOB_RELE(mp);
653 ASSERT(error != CRYPTO_QUEUED);
654 break;
655 }
656 KCF_PROV_JOB_RELE(mp);
657 }
658
659 sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP);
660 sreq->sn_state = REQ_ALLOCATED;
661 sreq->sn_rv = CRYPTO_FAILED;
662 sreq->sn_params = params;
663
664 /*
665 * Note that we do not need to hold the context
666 * for synchronous case as the context will never
667 * become invalid underneath us. We do not need to hold
668 * the provider here either as the caller has a hold.
669 */
670 sreq->sn_context = kcf_ctx;
671 ASSERT(KCF_PROV_REFHELD(pd));
672 sreq->sn_provider = pd;
673
674 ASSERT(taskq != NULL);
675 /*
676 * Call the SPI directly if the taskq is empty and the
677 * provider is not busy, else dispatch to the taskq.
678 * Calling directly is fine as this is the synchronous
679 * case. This is unlike the asynchronous case where we
680 * must always dispatch to the taskq.
681 */
682 if (EMPTY_TASKQ(taskq) &&
683 pd->pd_state == KCF_PROV_READY) {
684 process_req_hwp(sreq);
685 } else {
686 /*
687 * We can not tell from taskq_dispatch() return
688 * value if we exceeded maxalloc. Hence the
689 * check here. Since we are allowed to wait in
690 * the synchronous case, we wait for the taskq
691 * to become empty.
692 */
693 if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
694 taskq_wait(taskq);
695 }
696
697 (void) taskq_dispatch(taskq, process_req_hwp,
698 sreq, TQ_SLEEP);
699 }
700
701 /*
702 * Wait for the notification to arrive,
703 * if the operation is not done yet.
704 * Bug# 4722589 will make the wait a cv_wait_sig().
705 */
706 mutex_enter(&sreq->sn_lock);
707 while (sreq->sn_state < REQ_DONE)
708 cv_wait(&sreq->sn_cv, &sreq->sn_lock);
709 mutex_exit(&sreq->sn_lock);
710
711 error = sreq->sn_rv;
712 kmem_cache_free(kcf_sreq_cache, sreq);
713
714 break;
715
716 default:
717 error = CRYPTO_FAILED;
718 break;
719 }
720
721 } else { /* Asynchronous cases */
722 switch (pd->pd_prov_type) {
723 case CRYPTO_SW_PROVIDER:
724 if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) {
725 /*
726 * This case has less overhead since there is
727 * no switching of context.
728 */
729 error = common_submit_request(pd, ctx, params,
730 KCF_RHNDL(KM_NOSLEEP));
731 } else {
732 /*
733 * CRYPTO_ALWAYS_QUEUE is set. We need to
734 * queue the request and return.
735 */
736 areq = kcf_areqnode_alloc(pd, kcf_ctx, crq,
737 params, cont);
738 if (areq == NULL)
739 error = CRYPTO_HOST_MEMORY;
740 else {
741 if (!(crq->cr_flag
742 & CRYPTO_SKIP_REQID)) {
743 /*
744 * Set the request handle. This handle
745 * is used for any crypto_cancel_req(9f)
746 * calls from the consumer. We have to
747 * do this before dispatching the
748 * request.
749 */
750 crq->cr_reqid = kcf_reqid_insert(areq);
751 }
752
753 error = kcf_disp_sw_request(areq);
754 /*
755 * There is an error processing this
756 * request. Remove the handle and
757 * release the request structure.
758 */
759 if (error != CRYPTO_QUEUED) {
760 if (!(crq->cr_flag
761 & CRYPTO_SKIP_REQID))
762 kcf_reqid_delete(areq);
763 KCF_AREQ_REFRELE(areq);
764 }
765 }
766 }
767 break;
768
769 case CRYPTO_HW_PROVIDER:
770 /*
771 * We need to queue the request and return.
772 */
773 areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params,
774 cont);
775 if (areq == NULL) {
776 error = CRYPTO_HOST_MEMORY;
777 goto done;
778 }
779
780 taskq = pd->pd_taskq;
781 ASSERT(taskq != NULL);
782 /*
783 * We can not tell from taskq_dispatch() return
784 * value if we exceeded maxalloc. Hence the check
785 * here.
786 */
787 if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
788 error = CRYPTO_BUSY;
789 KCF_AREQ_REFRELE(areq);
790 goto done;
791 }
792
793 if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) {
794 /*
795 * Set the request handle. This handle is used
796 * for any crypto_cancel_req(9f) calls from the
797 * consumer. We have to do this before dispatching
798 * the request.
799 */
800 crq->cr_reqid = kcf_reqid_insert(areq);
801 }
802
803 if (taskq_dispatch(taskq,
804 process_req_hwp, areq, TQ_NOSLEEP) ==
805 (taskqid_t)0) {
806 error = CRYPTO_HOST_MEMORY;
807 if (!(crq->cr_flag & CRYPTO_SKIP_REQID))
808 kcf_reqid_delete(areq);
809 KCF_AREQ_REFRELE(areq);
810 } else {
811 error = CRYPTO_QUEUED;
812 }
813 break;
814
815 default:
816 error = CRYPTO_FAILED;
817 break;
818 }
819 }
820
821 done:
822 return (error);
823 }
824
825 /*
826 * We're done with this framework context, so free it. Note that freeing
827 * framework context (kcf_context) frees the global context (crypto_ctx).
828 *
829 * The provider is responsible for freeing provider private context after a
830 * final or single operation and resetting the cc_provider_private field
831 * to NULL. It should do this before it notifies the framework of the
832 * completion. We still need to call KCF_PROV_FREE_CONTEXT to handle cases
833 * like crypto_cancel_ctx(9f).
834 */
835 void
kcf_free_context(kcf_context_t * kcf_ctx)836 kcf_free_context(kcf_context_t *kcf_ctx)
837 {
838 kcf_provider_desc_t *pd = kcf_ctx->kc_prov_desc;
839 crypto_ctx_t *gctx = &kcf_ctx->kc_glbl_ctx;
840 kcf_context_t *kcf_secondctx = kcf_ctx->kc_secondctx;
841 kcf_prov_cpu_t *mp;
842
843 /* Release the second context, if any */
844
845 if (kcf_secondctx != NULL)
846 KCF_CONTEXT_REFRELE(kcf_secondctx);
847
848 if (gctx->cc_provider_private != NULL) {
849 mutex_enter(&pd->pd_lock);
850 if (!KCF_IS_PROV_REMOVED(pd)) {
851 /*
852 * Increment the provider's internal refcnt so it
853 * doesn't unregister from the framework while
854 * we're calling the entry point.
855 */
856 mp = &(pd->pd_percpu_bins[CPU_SEQID]);
857 KCF_PROV_JOB_HOLD(mp);
858 mutex_exit(&pd->pd_lock);
859 (void) KCF_PROV_FREE_CONTEXT(pd, gctx);
860 KCF_PROV_JOB_RELE(mp);
861 } else {
862 mutex_exit(&pd->pd_lock);
863 }
864 }
865
866 /* kcf_ctx->kc_prov_desc has a hold on pd */
867 KCF_PROV_REFRELE(kcf_ctx->kc_prov_desc);
868
869 /* check if this context is shared with a software provider */
870 if ((gctx->cc_flags & CRYPTO_INIT_OPSTATE) &&
871 kcf_ctx->kc_sw_prov_desc != NULL) {
872 KCF_PROV_REFRELE(kcf_ctx->kc_sw_prov_desc);
873 }
874
875 kmem_cache_free(kcf_context_cache, kcf_ctx);
876 }
877
878 /*
879 * Free the request after releasing all the holds.
880 */
881 void
kcf_free_req(kcf_areq_node_t * areq)882 kcf_free_req(kcf_areq_node_t *areq)
883 {
884 KCF_PROV_REFRELE(areq->an_provider);
885 if (areq->an_context != NULL)
886 KCF_CONTEXT_REFRELE(areq->an_context);
887
888 if (areq->an_tried_plist != NULL)
889 kcf_free_triedlist(areq->an_tried_plist);
890 kmem_cache_free(kcf_areq_cache, areq);
891 }
892
893 /*
894 * Utility routine to remove a request from the chain of requests
895 * hanging off a context.
896 */
897 void
kcf_removereq_in_ctxchain(kcf_context_t * ictx,kcf_areq_node_t * areq)898 kcf_removereq_in_ctxchain(kcf_context_t *ictx, kcf_areq_node_t *areq)
899 {
900 kcf_areq_node_t *cur, *prev;
901
902 /*
903 * Get context lock, search for areq in the chain and remove it.
904 */
905 ASSERT(ictx != NULL);
906 mutex_enter(&ictx->kc_in_use_lock);
907 prev = cur = ictx->kc_req_chain_first;
908
909 while (cur != NULL) {
910 if (cur == areq) {
911 if (prev == cur) {
912 if ((ictx->kc_req_chain_first =
913 cur->an_ctxchain_next) == NULL)
914 ictx->kc_req_chain_last = NULL;
915 } else {
916 if (cur == ictx->kc_req_chain_last)
917 ictx->kc_req_chain_last = prev;
918 prev->an_ctxchain_next = cur->an_ctxchain_next;
919 }
920
921 break;
922 }
923 prev = cur;
924 cur = cur->an_ctxchain_next;
925 }
926 mutex_exit(&ictx->kc_in_use_lock);
927 }
928
929 /*
930 * Remove the specified node from the global software queue.
931 *
932 * The caller must hold the queue lock and request lock (an_lock).
933 */
934 void
kcf_remove_node(kcf_areq_node_t * node)935 kcf_remove_node(kcf_areq_node_t *node)
936 {
937 kcf_areq_node_t *nextp = node->an_next;
938 kcf_areq_node_t *prevp = node->an_prev;
939
940 ASSERT(mutex_owned(&gswq->gs_lock));
941
942 if (nextp != NULL)
943 nextp->an_prev = prevp;
944 else
945 gswq->gs_last = prevp;
946
947 if (prevp != NULL)
948 prevp->an_next = nextp;
949 else
950 gswq->gs_first = nextp;
951
952 ASSERT(mutex_owned(&node->an_lock));
953 node->an_state = REQ_CANCELED;
954 }
955
956 /*
957 * Remove and return the first node in the global software queue.
958 *
959 * The caller must hold the queue lock.
960 */
961 static kcf_areq_node_t *
kcf_dequeue()962 kcf_dequeue()
963 {
964 kcf_areq_node_t *tnode = NULL;
965
966 ASSERT(mutex_owned(&gswq->gs_lock));
967 if ((tnode = gswq->gs_first) == NULL) {
968 return (NULL);
969 } else {
970 ASSERT(gswq->gs_first->an_prev == NULL);
971 gswq->gs_first = tnode->an_next;
972 if (tnode->an_next == NULL)
973 gswq->gs_last = NULL;
974 else
975 tnode->an_next->an_prev = NULL;
976 }
977
978 gswq->gs_njobs--;
979 return (tnode);
980 }
981
982 /*
983 * Add the request node to the end of the global software queue.
984 *
985 * The caller should not hold the queue lock. Returns 0 if the
986 * request is successfully queued. Returns CRYPTO_BUSY if the limit
987 * on the number of jobs is exceeded.
988 */
989 static int
kcf_enqueue(kcf_areq_node_t * node)990 kcf_enqueue(kcf_areq_node_t *node)
991 {
992 kcf_areq_node_t *tnode;
993
994 mutex_enter(&gswq->gs_lock);
995
996 if (gswq->gs_njobs >= gswq->gs_maxjobs) {
997 mutex_exit(&gswq->gs_lock);
998 return (CRYPTO_BUSY);
999 }
1000
1001 if (gswq->gs_last == NULL) {
1002 gswq->gs_first = gswq->gs_last = node;
1003 } else {
1004 ASSERT(gswq->gs_last->an_next == NULL);
1005 tnode = gswq->gs_last;
1006 tnode->an_next = node;
1007 gswq->gs_last = node;
1008 node->an_prev = tnode;
1009 }
1010
1011 gswq->gs_njobs++;
1012
1013 /* an_lock not needed here as we hold gs_lock */
1014 node->an_state = REQ_WAITING;
1015
1016 mutex_exit(&gswq->gs_lock);
1017
1018 return (0);
1019 }
1020
1021 /*
1022 * Decrement the thread pool count and signal the failover
1023 * thread if we are the last one out.
1024 */
1025 static void
kcf_decrcnt_andsignal()1026 kcf_decrcnt_andsignal()
1027 {
1028 KCF_ATOMIC_DECR(kcfpool->kp_threads);
1029
1030 mutex_enter(&kcfpool->kp_thread_lock);
1031 if (kcfpool->kp_threads == 0)
1032 cv_signal(&kcfpool->kp_nothr_cv);
1033 mutex_exit(&kcfpool->kp_thread_lock);
1034 }
1035
1036 /*
1037 * Function run by a thread from kcfpool to work on global software queue.
1038 * It is called from ioctl(CRYPTO_POOL_RUN, ...).
1039 */
1040 int
kcf_svc_do_run(void)1041 kcf_svc_do_run(void)
1042 {
1043 int error = 0;
1044 clock_t rv;
1045 clock_t timeout_val = drv_usectohz(kcf_idlethr_timeout);
1046 kcf_areq_node_t *req;
1047 kcf_context_t *ictx;
1048 kcf_provider_desc_t *pd;
1049
1050 KCF_ATOMIC_INCR(kcfpool->kp_threads);
1051
1052 for (;;) {
1053 mutex_enter(&gswq->gs_lock);
1054
1055 while ((req = kcf_dequeue()) == NULL) {
1056 KCF_ATOMIC_INCR(kcfpool->kp_idlethreads);
1057 rv = cv_reltimedwait_sig(&gswq->gs_cv,
1058 &gswq->gs_lock, timeout_val, TR_CLOCK_TICK);
1059 KCF_ATOMIC_DECR(kcfpool->kp_idlethreads);
1060
1061 switch (rv) {
1062 case 0:
1063 /*
1064 * A signal (as in kill(2)) is pending. We did
1065 * not get any cv_signal().
1066 */
1067 kcf_decrcnt_andsignal();
1068 mutex_exit(&gswq->gs_lock);
1069 return (EINTR);
1070
1071 case -1:
1072 /*
1073 * Timed out and we are not signaled. Let us
1074 * see if this thread should exit. We should
1075 * keep at least kcf_minthreads.
1076 */
1077 if (kcfpool->kp_threads > kcf_minthreads) {
1078 kcf_decrcnt_andsignal();
1079 mutex_exit(&gswq->gs_lock);
1080 return (0);
1081 }
1082
1083 /* Resume the wait for work */
1084 break;
1085
1086 default:
1087 /*
1088 * We are signaled to work on the queue.
1089 */
1090 break;
1091 }
1092 }
1093
1094 mutex_exit(&gswq->gs_lock);
1095
1096 ictx = req->an_context;
1097 if (ictx == NULL) { /* Context-less operation */
1098 pd = req->an_provider;
1099 error = common_submit_request(pd, NULL,
1100 &req->an_params, req);
1101 kcf_aop_done(req, error);
1102 continue;
1103 }
1104
1105 /*
1106 * We check if we can work on the request now.
1107 * Solaris does not guarantee any order on how the threads
1108 * are scheduled or how the waiters on a mutex are chosen.
1109 * So, we need to maintain our own order.
1110 *
1111 * is_my_turn is set to B_TRUE initially for a request when
1112 * it is enqueued and there are no other requests
1113 * for that context. Note that a thread sleeping on
1114 * an_turn_cv is not counted as an idle thread. This is
1115 * because we define an idle thread as one that sleeps on the
1116 * global queue waiting for new requests.
1117 */
1118 mutex_enter(&req->an_lock);
1119 while (req->an_is_my_turn == B_FALSE) {
1120 KCF_ATOMIC_INCR(kcfpool->kp_blockedthreads);
1121 cv_wait(&req->an_turn_cv, &req->an_lock);
1122 KCF_ATOMIC_DECR(kcfpool->kp_blockedthreads);
1123 }
1124
1125 req->an_state = REQ_INPROGRESS;
1126 mutex_exit(&req->an_lock);
1127
1128 pd = ictx->kc_prov_desc;
1129 ASSERT(pd == req->an_provider);
1130 error = common_submit_request(pd, &ictx->kc_glbl_ctx,
1131 &req->an_params, req);
1132
1133 kcf_aop_done(req, error);
1134 }
1135 }
1136
1137 /*
1138 * kmem_cache_alloc constructor for sync request structure.
1139 */
1140 /* ARGSUSED */
1141 static int
kcf_sreq_cache_constructor(void * buf,void * cdrarg,int kmflags)1142 kcf_sreq_cache_constructor(void *buf, void *cdrarg, int kmflags)
1143 {
1144 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf;
1145
1146 sreq->sn_type = CRYPTO_SYNCH;
1147 cv_init(&sreq->sn_cv, NULL, CV_DEFAULT, NULL);
1148 mutex_init(&sreq->sn_lock, NULL, MUTEX_DEFAULT, NULL);
1149
1150 return (0);
1151 }
1152
1153 /* ARGSUSED */
1154 static void
kcf_sreq_cache_destructor(void * buf,void * cdrarg)1155 kcf_sreq_cache_destructor(void *buf, void *cdrarg)
1156 {
1157 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf;
1158
1159 mutex_destroy(&sreq->sn_lock);
1160 cv_destroy(&sreq->sn_cv);
1161 }
1162
1163 /*
1164 * kmem_cache_alloc constructor for async request structure.
1165 */
1166 /* ARGSUSED */
1167 static int
kcf_areq_cache_constructor(void * buf,void * cdrarg,int kmflags)1168 kcf_areq_cache_constructor(void *buf, void *cdrarg, int kmflags)
1169 {
1170 kcf_areq_node_t *areq = (kcf_areq_node_t *)buf;
1171
1172 areq->an_type = CRYPTO_ASYNCH;
1173 areq->an_refcnt = 0;
1174 mutex_init(&areq->an_lock, NULL, MUTEX_DEFAULT, NULL);
1175 cv_init(&areq->an_done, NULL, CV_DEFAULT, NULL);
1176 cv_init(&areq->an_turn_cv, NULL, CV_DEFAULT, NULL);
1177
1178 return (0);
1179 }
1180
1181 /* ARGSUSED */
1182 static void
kcf_areq_cache_destructor(void * buf,void * cdrarg)1183 kcf_areq_cache_destructor(void *buf, void *cdrarg)
1184 {
1185 kcf_areq_node_t *areq = (kcf_areq_node_t *)buf;
1186
1187 ASSERT(areq->an_refcnt == 0);
1188 mutex_destroy(&areq->an_lock);
1189 cv_destroy(&areq->an_done);
1190 cv_destroy(&areq->an_turn_cv);
1191 }
1192
1193 /*
1194 * kmem_cache_alloc constructor for kcf_context structure.
1195 */
1196 /* ARGSUSED */
1197 static int
kcf_context_cache_constructor(void * buf,void * cdrarg,int kmflags)1198 kcf_context_cache_constructor(void *buf, void *cdrarg, int kmflags)
1199 {
1200 kcf_context_t *kctx = (kcf_context_t *)buf;
1201
1202 kctx->kc_refcnt = 0;
1203 mutex_init(&kctx->kc_in_use_lock, NULL, MUTEX_DEFAULT, NULL);
1204
1205 return (0);
1206 }
1207
1208 /* ARGSUSED */
1209 static void
kcf_context_cache_destructor(void * buf,void * cdrarg)1210 kcf_context_cache_destructor(void *buf, void *cdrarg)
1211 {
1212 kcf_context_t *kctx = (kcf_context_t *)buf;
1213
1214 ASSERT(kctx->kc_refcnt == 0);
1215 mutex_destroy(&kctx->kc_in_use_lock);
1216 }
1217
1218 /*
1219 * Creates and initializes all the structures needed by the framework.
1220 */
1221 void
kcf_sched_init(void)1222 kcf_sched_init(void)
1223 {
1224 int i;
1225 kcf_reqid_table_t *rt;
1226
1227 /*
1228 * Create all the kmem caches needed by the framework. We set the
1229 * align argument to 64, to get a slab aligned to 64-byte as well as
1230 * have the objects (cache_chunksize) to be a 64-byte multiple.
1231 * This helps to avoid false sharing as this is the size of the
1232 * CPU cache line.
1233 */
1234 kcf_sreq_cache = kmem_cache_create("kcf_sreq_cache",
1235 sizeof (struct kcf_sreq_node), 64, kcf_sreq_cache_constructor,
1236 kcf_sreq_cache_destructor, NULL, NULL, NULL, 0);
1237
1238 kcf_areq_cache = kmem_cache_create("kcf_areq_cache",
1239 sizeof (struct kcf_areq_node), 64, kcf_areq_cache_constructor,
1240 kcf_areq_cache_destructor, NULL, NULL, NULL, 0);
1241
1242 kcf_context_cache = kmem_cache_create("kcf_context_cache",
1243 sizeof (struct kcf_context), 64, kcf_context_cache_constructor,
1244 kcf_context_cache_destructor, NULL, NULL, NULL, 0);
1245
1246 mutex_init(&kcf_dh_lock, NULL, MUTEX_DEFAULT, NULL);
1247
1248 gswq = kmem_alloc(sizeof (kcf_global_swq_t), KM_SLEEP);
1249
1250 mutex_init(&gswq->gs_lock, NULL, MUTEX_DEFAULT, NULL);
1251 cv_init(&gswq->gs_cv, NULL, CV_DEFAULT, NULL);
1252 gswq->gs_njobs = 0;
1253 gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc;
1254 gswq->gs_first = gswq->gs_last = NULL;
1255
1256 /* Initialize the global reqid table */
1257 for (i = 0; i < REQID_TABLES; i++) {
1258 rt = kmem_zalloc(sizeof (kcf_reqid_table_t), KM_SLEEP);
1259 kcf_reqid_table[i] = rt;
1260 mutex_init(&rt->rt_lock, NULL, MUTEX_DEFAULT, NULL);
1261 rt->rt_curid = i;
1262 }
1263
1264 /* Allocate and initialize the thread pool */
1265 kcfpool_alloc();
1266
1267 /* Initialize the event notification list variables */
1268 mutex_init(&ntfy_list_lock, NULL, MUTEX_DEFAULT, NULL);
1269 cv_init(&ntfy_list_cv, NULL, CV_DEFAULT, NULL);
1270
1271 /* Initialize the crypto_bufcall list variables */
1272 mutex_init(&cbuf_list_lock, NULL, MUTEX_DEFAULT, NULL);
1273 cv_init(&cbuf_list_cv, NULL, CV_DEFAULT, NULL);
1274
1275 /* Create the kcf kstat */
1276 kcf_misc_kstat = kstat_create("kcf", 0, "framework_stats", "crypto",
1277 KSTAT_TYPE_NAMED, sizeof (kcf_stats_t) / sizeof (kstat_named_t),
1278 KSTAT_FLAG_VIRTUAL);
1279
1280 if (kcf_misc_kstat != NULL) {
1281 kcf_misc_kstat->ks_data = &kcf_ksdata;
1282 kcf_misc_kstat->ks_update = kcf_misc_kstat_update;
1283 kstat_install(kcf_misc_kstat);
1284 }
1285 }
1286
1287 /*
1288 * This routine should only be called by drv/cryptoadm.
1289 *
1290 * kcf_sched_running flag isn't protected by a lock. But, we are safe because
1291 * the first thread ("cryptoadm refresh") calling this routine during
1292 * boot time completes before any other thread that can call this routine.
1293 */
1294 void
kcf_sched_start(void)1295 kcf_sched_start(void)
1296 {
1297 if (kcf_sched_running)
1298 return;
1299
1300 /* Start the failover kernel thread for now */
1301 (void) thread_create(NULL, 0, &kcf_failover_thread, 0, 0, &p0,
1302 TS_RUN, minclsyspri);
1303
1304 /* Start the background processing thread. */
1305 (void) thread_create(NULL, 0, &crypto_bufcall_service, 0, 0, &p0,
1306 TS_RUN, minclsyspri);
1307
1308 kcf_sched_running = B_TRUE;
1309 }
1310
1311 /*
1312 * Signal the waiting sync client.
1313 */
1314 void
kcf_sop_done(kcf_sreq_node_t * sreq,int error)1315 kcf_sop_done(kcf_sreq_node_t *sreq, int error)
1316 {
1317 mutex_enter(&sreq->sn_lock);
1318 sreq->sn_state = REQ_DONE;
1319 sreq->sn_rv = error;
1320 cv_signal(&sreq->sn_cv);
1321 mutex_exit(&sreq->sn_lock);
1322 }
1323
1324 /*
1325 * Callback the async client with the operation status.
1326 * We free the async request node and possibly the context.
1327 * We also handle any chain of requests hanging off of
1328 * the context.
1329 */
1330 void
kcf_aop_done(kcf_areq_node_t * areq,int error)1331 kcf_aop_done(kcf_areq_node_t *areq, int error)
1332 {
1333 kcf_op_type_t optype;
1334 boolean_t skip_notify = B_FALSE;
1335 kcf_context_t *ictx;
1336 kcf_areq_node_t *nextreq;
1337
1338 /*
1339 * Handle recoverable errors. This has to be done first
1340 * before doing any thing else in this routine so that
1341 * we do not change the state of the request.
1342 */
1343 if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) {
1344 /*
1345 * We try another provider, if one is available. Else
1346 * we continue with the failure notification to the
1347 * client.
1348 */
1349 if (kcf_resubmit_request(areq) == CRYPTO_QUEUED)
1350 return;
1351 }
1352
1353 mutex_enter(&areq->an_lock);
1354 areq->an_state = REQ_DONE;
1355 mutex_exit(&areq->an_lock);
1356
1357 optype = (&areq->an_params)->rp_optype;
1358 if ((ictx = areq->an_context) != NULL) {
1359 /*
1360 * A request after it is removed from the request
1361 * queue, still stays on a chain of requests hanging
1362 * of its context structure. It needs to be removed
1363 * from this chain at this point.
1364 */
1365 mutex_enter(&ictx->kc_in_use_lock);
1366 nextreq = areq->an_ctxchain_next;
1367 if (nextreq != NULL) {
1368 mutex_enter(&nextreq->an_lock);
1369 nextreq->an_is_my_turn = B_TRUE;
1370 cv_signal(&nextreq->an_turn_cv);
1371 mutex_exit(&nextreq->an_lock);
1372 }
1373
1374 ictx->kc_req_chain_first = nextreq;
1375 if (nextreq == NULL)
1376 ictx->kc_req_chain_last = NULL;
1377 mutex_exit(&ictx->kc_in_use_lock);
1378
1379 if (IS_SINGLE_OP(optype) || IS_FINAL_OP(optype)) {
1380 ASSERT(nextreq == NULL);
1381 KCF_CONTEXT_REFRELE(ictx);
1382 } else if (error != CRYPTO_SUCCESS && IS_INIT_OP(optype)) {
1383 /*
1384 * NOTE - We do not release the context in case of update
1385 * operations. We require the consumer to free it explicitly,
1386 * in case it wants to abandon an update operation. This is done
1387 * as there may be mechanisms in ECB mode that can continue
1388 * even if an operation on a block fails.
1389 */
1390 KCF_CONTEXT_REFRELE(ictx);
1391 }
1392 }
1393
1394 /* Deal with the internal continuation to this request first */
1395
1396 if (areq->an_isdual) {
1397 kcf_dual_req_t *next_arg;
1398 next_arg = (kcf_dual_req_t *)areq->an_reqarg.cr_callback_arg;
1399 next_arg->kr_areq = areq;
1400 KCF_AREQ_REFHOLD(areq);
1401 areq->an_isdual = B_FALSE;
1402
1403 NOTIFY_CLIENT(areq, error);
1404 return;
1405 }
1406
1407 /*
1408 * If CRYPTO_NOTIFY_OPDONE flag is set, we should notify
1409 * always. If this flag is clear, we skip the notification
1410 * provided there are no errors. We check this flag for only
1411 * init or update operations. It is ignored for single, final or
1412 * atomic operations.
1413 */
1414 skip_notify = (IS_UPDATE_OP(optype) || IS_INIT_OP(optype)) &&
1415 (!(areq->an_reqarg.cr_flag & CRYPTO_NOTIFY_OPDONE)) &&
1416 (error == CRYPTO_SUCCESS);
1417
1418 if (!skip_notify) {
1419 NOTIFY_CLIENT(areq, error);
1420 }
1421
1422 if (!(areq->an_reqarg.cr_flag & CRYPTO_SKIP_REQID))
1423 kcf_reqid_delete(areq);
1424
1425 KCF_AREQ_REFRELE(areq);
1426 }
1427
1428 /*
1429 * Allocate the thread pool and initialize all the fields.
1430 */
1431 static void
kcfpool_alloc()1432 kcfpool_alloc()
1433 {
1434 kcfpool = kmem_alloc(sizeof (kcf_pool_t), KM_SLEEP);
1435
1436 kcfpool->kp_threads = kcfpool->kp_idlethreads = 0;
1437 kcfpool->kp_blockedthreads = 0;
1438 kcfpool->kp_signal_create_thread = B_FALSE;
1439 kcfpool->kp_nthrs = 0;
1440 kcfpool->kp_user_waiting = B_FALSE;
1441
1442 mutex_init(&kcfpool->kp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
1443 cv_init(&kcfpool->kp_nothr_cv, NULL, CV_DEFAULT, NULL);
1444
1445 mutex_init(&kcfpool->kp_user_lock, NULL, MUTEX_DEFAULT, NULL);
1446 cv_init(&kcfpool->kp_user_cv, NULL, CV_DEFAULT, NULL);
1447
1448 kcf_idlethr_timeout = KCF_DEFAULT_THRTIMEOUT;
1449 }
1450
1451 /*
1452 * This function is run by the 'creator' thread in the pool.
1453 * It is called from ioctl(CRYPTO_POOL_WAIT, ...).
1454 */
1455 int
kcf_svc_wait(int * nthrs)1456 kcf_svc_wait(int *nthrs)
1457 {
1458 clock_t rv;
1459 clock_t timeout_val = drv_usectohz(kcf_idlethr_timeout);
1460
1461 if (kcfpool == NULL)
1462 return (ENOENT);
1463
1464 mutex_enter(&kcfpool->kp_user_lock);
1465 /* Check if there's already a user thread waiting on this kcfpool */
1466 if (kcfpool->kp_user_waiting) {
1467 mutex_exit(&kcfpool->kp_user_lock);
1468 *nthrs = 0;
1469 return (EBUSY);
1470 }
1471
1472 kcfpool->kp_user_waiting = B_TRUE;
1473
1474 /* Go to sleep, waiting for the signaled flag. */
1475 while (!kcfpool->kp_signal_create_thread) {
1476 rv = cv_reltimedwait_sig(&kcfpool->kp_user_cv,
1477 &kcfpool->kp_user_lock, timeout_val, TR_CLOCK_TICK);
1478 switch (rv) {
1479 case 0:
1480 /* Interrupted, return to handle exit or signal */
1481 kcfpool->kp_user_waiting = B_FALSE;
1482 kcfpool->kp_signal_create_thread = B_FALSE;
1483 mutex_exit(&kcfpool->kp_user_lock);
1484 /*
1485 * kcfd is exiting. Release the door and
1486 * invalidate it.
1487 */
1488 mutex_enter(&kcf_dh_lock);
1489 if (kcf_dh != NULL) {
1490 door_ki_rele(kcf_dh);
1491 kcf_dh = NULL;
1492 }
1493 mutex_exit(&kcf_dh_lock);
1494 return (EINTR);
1495
1496 case -1:
1497 /* Timed out. Recalculate the min/max threads */
1498 compute_min_max_threads();
1499 break;
1500
1501 default:
1502 /* Worker thread did a cv_signal() */
1503 break;
1504 }
1505 }
1506
1507 kcfpool->kp_signal_create_thread = B_FALSE;
1508 kcfpool->kp_user_waiting = B_FALSE;
1509
1510 *nthrs = kcfpool->kp_nthrs;
1511 mutex_exit(&kcfpool->kp_user_lock);
1512
1513 /* Return to userland for possible thread creation. */
1514 return (0);
1515 }
1516
1517
1518 /*
1519 * This routine introduces a locking order for gswq->gs_lock followed
1520 * by cpu_lock.
1521 * This means that no consumer of the k-api should hold cpu_lock when calling
1522 * k-api routines.
1523 */
1524 static void
compute_min_max_threads()1525 compute_min_max_threads()
1526 {
1527 mutex_enter(&gswq->gs_lock);
1528 mutex_enter(&cpu_lock);
1529 kcf_minthreads = curthread->t_cpupart->cp_ncpus;
1530 mutex_exit(&cpu_lock);
1531 kcf_maxthreads = kcf_thr_multiple * kcf_minthreads;
1532 gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc;
1533 mutex_exit(&gswq->gs_lock);
1534 }
1535
1536 /*
1537 * This is the main routine of the failover kernel thread.
1538 * If there are any threads in the pool we sleep. The last thread in the
1539 * pool to exit will signal us to get to work. We get back to sleep
1540 * once we detect that the pool has threads.
1541 *
1542 * Note that in the hand-off from us to a pool thread we get to run once.
1543 * Since this hand-off is a rare event this should be fine.
1544 */
1545 static void
kcf_failover_thread()1546 kcf_failover_thread()
1547 {
1548 int error = 0;
1549 kcf_context_t *ictx;
1550 kcf_areq_node_t *req;
1551 callb_cpr_t cpr_info;
1552 kmutex_t cpr_lock;
1553 static boolean_t is_logged = B_FALSE;
1554
1555 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
1556 CALLB_CPR_INIT(&cpr_info, &cpr_lock, callb_generic_cpr,
1557 "kcf_failover_thread");
1558
1559 for (;;) {
1560 /*
1561 * Wait if there are any threads are in the pool.
1562 */
1563 if (kcfpool->kp_threads > 0) {
1564 mutex_enter(&cpr_lock);
1565 CALLB_CPR_SAFE_BEGIN(&cpr_info);
1566 mutex_exit(&cpr_lock);
1567
1568 mutex_enter(&kcfpool->kp_thread_lock);
1569 cv_wait(&kcfpool->kp_nothr_cv,
1570 &kcfpool->kp_thread_lock);
1571 mutex_exit(&kcfpool->kp_thread_lock);
1572
1573 mutex_enter(&cpr_lock);
1574 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
1575 mutex_exit(&cpr_lock);
1576 is_logged = B_FALSE;
1577 }
1578
1579 /*
1580 * Get the requests from the queue and wait if needed.
1581 */
1582 mutex_enter(&gswq->gs_lock);
1583
1584 while ((req = kcf_dequeue()) == NULL) {
1585 mutex_enter(&cpr_lock);
1586 CALLB_CPR_SAFE_BEGIN(&cpr_info);
1587 mutex_exit(&cpr_lock);
1588
1589 KCF_ATOMIC_INCR(kcfpool->kp_idlethreads);
1590 cv_wait(&gswq->gs_cv, &gswq->gs_lock);
1591 KCF_ATOMIC_DECR(kcfpool->kp_idlethreads);
1592
1593 mutex_enter(&cpr_lock);
1594 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
1595 mutex_exit(&cpr_lock);
1596 }
1597
1598 mutex_exit(&gswq->gs_lock);
1599
1600 /*
1601 * We check the kp_threads since kcfd could have started
1602 * while we are waiting on the global software queue.
1603 */
1604 if ((kcfpool->kp_threads == 0) && !is_logged) {
1605 cmn_err(CE_WARN, "kcfd is not running. Please check "
1606 "and restart kcfd. Using the failover kernel "
1607 "thread for now.\n");
1608 is_logged = B_TRUE;
1609 }
1610
1611 /*
1612 * Get to work on the request.
1613 */
1614 ictx = req->an_context;
1615 mutex_enter(&req->an_lock);
1616 req->an_state = REQ_INPROGRESS;
1617 mutex_exit(&req->an_lock);
1618
1619 error = common_submit_request(req->an_provider, ictx ?
1620 &ictx->kc_glbl_ctx : NULL, &req->an_params, req);
1621
1622 kcf_aop_done(req, error);
1623 }
1624 }
1625
1626 /*
1627 * Insert the async request in the hash table after assigning it
1628 * an ID. Returns the ID.
1629 *
1630 * The ID is used by the caller to pass as an argument to a
1631 * cancel_req() routine later.
1632 */
1633 static crypto_req_id_t
kcf_reqid_insert(kcf_areq_node_t * areq)1634 kcf_reqid_insert(kcf_areq_node_t *areq)
1635 {
1636 int indx;
1637 crypto_req_id_t id;
1638 kcf_areq_node_t *headp;
1639 kcf_reqid_table_t *rt =
1640 kcf_reqid_table[CPU->cpu_seqid & REQID_TABLE_MASK];
1641
1642 mutex_enter(&rt->rt_lock);
1643
1644 rt->rt_curid = id =
1645 (rt->rt_curid - REQID_COUNTER_LOW) | REQID_COUNTER_HIGH;
1646 SET_REQID(areq, id);
1647 indx = REQID_HASH(id);
1648 headp = areq->an_idnext = rt->rt_idhash[indx];
1649 areq->an_idprev = NULL;
1650 if (headp != NULL)
1651 headp->an_idprev = areq;
1652
1653 rt->rt_idhash[indx] = areq;
1654 mutex_exit(&rt->rt_lock);
1655
1656 return (id);
1657 }
1658
1659 /*
1660 * Delete the async request from the hash table.
1661 */
1662 static void
kcf_reqid_delete(kcf_areq_node_t * areq)1663 kcf_reqid_delete(kcf_areq_node_t *areq)
1664 {
1665 int indx;
1666 kcf_areq_node_t *nextp, *prevp;
1667 crypto_req_id_t id = GET_REQID(areq);
1668 kcf_reqid_table_t *rt;
1669
1670 rt = kcf_reqid_table[id & REQID_TABLE_MASK];
1671 indx = REQID_HASH(id);
1672
1673 mutex_enter(&rt->rt_lock);
1674
1675 nextp = areq->an_idnext;
1676 prevp = areq->an_idprev;
1677 if (nextp != NULL)
1678 nextp->an_idprev = prevp;
1679 if (prevp != NULL)
1680 prevp->an_idnext = nextp;
1681 else
1682 rt->rt_idhash[indx] = nextp;
1683
1684 SET_REQID(areq, 0);
1685 cv_broadcast(&areq->an_done);
1686
1687 mutex_exit(&rt->rt_lock);
1688 }
1689
1690 /*
1691 * Cancel a single asynchronous request.
1692 *
1693 * We guarantee that no problems will result from calling
1694 * crypto_cancel_req() for a request which is either running, or
1695 * has already completed. We remove the request from any queues
1696 * if it is possible. We wait for request completion if the
1697 * request is dispatched to a provider.
1698 *
1699 * Calling context:
1700 * Can be called from user context only.
1701 *
1702 * NOTE: We acquire the following locks in this routine (in order):
1703 * - rt_lock (kcf_reqid_table_t)
1704 * - gswq->gs_lock
1705 * - areq->an_lock
1706 * - ictx->kc_in_use_lock (from kcf_removereq_in_ctxchain())
1707 *
1708 * This locking order MUST be maintained in code every where else.
1709 */
1710 void
crypto_cancel_req(crypto_req_id_t id)1711 crypto_cancel_req(crypto_req_id_t id)
1712 {
1713 int indx;
1714 kcf_areq_node_t *areq;
1715 kcf_provider_desc_t *pd;
1716 kcf_context_t *ictx;
1717 kcf_reqid_table_t *rt;
1718
1719 rt = kcf_reqid_table[id & REQID_TABLE_MASK];
1720 indx = REQID_HASH(id);
1721
1722 mutex_enter(&rt->rt_lock);
1723 for (areq = rt->rt_idhash[indx]; areq; areq = areq->an_idnext) {
1724 if (GET_REQID(areq) == id) {
1725 /*
1726 * We found the request. It is either still waiting
1727 * in the framework queues or running at the provider.
1728 */
1729 pd = areq->an_provider;
1730 ASSERT(pd != NULL);
1731
1732 switch (pd->pd_prov_type) {
1733 case CRYPTO_SW_PROVIDER:
1734 mutex_enter(&gswq->gs_lock);
1735 mutex_enter(&areq->an_lock);
1736
1737 /* This request can be safely canceled. */
1738 if (areq->an_state <= REQ_WAITING) {
1739 /* Remove from gswq, global software queue. */
1740 kcf_remove_node(areq);
1741 if ((ictx = areq->an_context) != NULL)
1742 kcf_removereq_in_ctxchain(ictx, areq);
1743
1744 mutex_exit(&areq->an_lock);
1745 mutex_exit(&gswq->gs_lock);
1746 mutex_exit(&rt->rt_lock);
1747
1748 /* Remove areq from hash table and free it. */
1749 kcf_reqid_delete(areq);
1750 KCF_AREQ_REFRELE(areq);
1751 return;
1752 }
1753
1754 mutex_exit(&areq->an_lock);
1755 mutex_exit(&gswq->gs_lock);
1756 break;
1757
1758 case CRYPTO_HW_PROVIDER:
1759 /*
1760 * There is no interface to remove an entry
1761 * once it is on the taskq. So, we do not do
1762 * any thing for a hardware provider.
1763 */
1764 break;
1765 }
1766
1767 /*
1768 * The request is running. Wait for the request completion
1769 * to notify us.
1770 */
1771 KCF_AREQ_REFHOLD(areq);
1772 while (GET_REQID(areq) == id)
1773 cv_wait(&areq->an_done, &rt->rt_lock);
1774 KCF_AREQ_REFRELE(areq);
1775 break;
1776 }
1777 }
1778
1779 mutex_exit(&rt->rt_lock);
1780 }
1781
1782 /*
1783 * Cancel all asynchronous requests associated with the
1784 * passed in crypto context and free it.
1785 *
1786 * A client SHOULD NOT call this routine after calling a crypto_*_final
1787 * routine. This routine is called only during intermediate operations.
1788 * The client should not use the crypto context after this function returns
1789 * since we destroy it.
1790 *
1791 * Calling context:
1792 * Can be called from user context only.
1793 */
1794 void
crypto_cancel_ctx(crypto_context_t ctx)1795 crypto_cancel_ctx(crypto_context_t ctx)
1796 {
1797 kcf_context_t *ictx;
1798 kcf_areq_node_t *areq;
1799
1800 if (ctx == NULL)
1801 return;
1802
1803 ictx = (kcf_context_t *)((crypto_ctx_t *)ctx)->cc_framework_private;
1804
1805 mutex_enter(&ictx->kc_in_use_lock);
1806
1807 /* Walk the chain and cancel each request */
1808 while ((areq = ictx->kc_req_chain_first) != NULL) {
1809 /*
1810 * We have to drop the lock here as we may have
1811 * to wait for request completion. We hold the
1812 * request before dropping the lock though, so that it
1813 * won't be freed underneath us.
1814 */
1815 KCF_AREQ_REFHOLD(areq);
1816 mutex_exit(&ictx->kc_in_use_lock);
1817
1818 crypto_cancel_req(GET_REQID(areq));
1819 KCF_AREQ_REFRELE(areq);
1820
1821 mutex_enter(&ictx->kc_in_use_lock);
1822 }
1823
1824 mutex_exit(&ictx->kc_in_use_lock);
1825 KCF_CONTEXT_REFRELE(ictx);
1826 }
1827
1828 /*
1829 * Update kstats.
1830 */
1831 static int
kcf_misc_kstat_update(kstat_t * ksp,int rw)1832 kcf_misc_kstat_update(kstat_t *ksp, int rw)
1833 {
1834 uint_t tcnt;
1835 kcf_stats_t *ks_data;
1836
1837 if (rw == KSTAT_WRITE)
1838 return (EACCES);
1839
1840 ks_data = ksp->ks_data;
1841
1842 ks_data->ks_thrs_in_pool.value.ui32 = kcfpool->kp_threads;
1843 /*
1844 * The failover thread is counted in kp_idlethreads in
1845 * some corner cases. This is done to avoid doing more checks
1846 * when submitting a request. We account for those cases below.
1847 */
1848 if ((tcnt = kcfpool->kp_idlethreads) == (kcfpool->kp_threads + 1))
1849 tcnt--;
1850 ks_data->ks_idle_thrs.value.ui32 = tcnt;
1851 ks_data->ks_minthrs.value.ui32 = kcf_minthreads;
1852 ks_data->ks_maxthrs.value.ui32 = kcf_maxthreads;
1853 ks_data->ks_swq_njobs.value.ui32 = gswq->gs_njobs;
1854 ks_data->ks_swq_maxjobs.value.ui32 = gswq->gs_maxjobs;
1855 ks_data->ks_taskq_threads.value.ui32 = crypto_taskq_threads;
1856 ks_data->ks_taskq_minalloc.value.ui32 = crypto_taskq_minalloc;
1857 ks_data->ks_taskq_maxalloc.value.ui32 = crypto_taskq_maxalloc;
1858
1859 return (0);
1860 }
1861
1862 /*
1863 * Allocate and initiatize a kcf_dual_req, used for saving the arguments of
1864 * a dual operation or an atomic operation that has to be internally
1865 * simulated with multiple single steps.
1866 * crq determines the memory allocation flags.
1867 */
1868
1869 kcf_dual_req_t *
kcf_alloc_req(crypto_call_req_t * crq)1870 kcf_alloc_req(crypto_call_req_t *crq)
1871 {
1872 kcf_dual_req_t *kcr;
1873
1874 kcr = kmem_alloc(sizeof (kcf_dual_req_t), KCF_KMFLAG(crq));
1875
1876 if (kcr == NULL)
1877 return (NULL);
1878
1879 /* Copy the whole crypto_call_req struct, as it isn't persistent */
1880 if (crq != NULL)
1881 kcr->kr_callreq = *crq;
1882 else
1883 bzero(&(kcr->kr_callreq), sizeof (crypto_call_req_t));
1884 kcr->kr_areq = NULL;
1885 kcr->kr_saveoffset = 0;
1886 kcr->kr_savelen = 0;
1887
1888 return (kcr);
1889 }
1890
1891 /*
1892 * Callback routine for the next part of a simulated dual part.
1893 * Schedules the next step.
1894 *
1895 * This routine can be called from interrupt context.
1896 */
1897 void
kcf_next_req(void * next_req_arg,int status)1898 kcf_next_req(void *next_req_arg, int status)
1899 {
1900 kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg;
1901 kcf_req_params_t *params = &(next_req->kr_params);
1902 kcf_areq_node_t *areq = next_req->kr_areq;
1903 int error = status;
1904 kcf_provider_desc_t *pd;
1905 crypto_dual_data_t *ct;
1906
1907 /* Stop the processing if an error occurred at this step */
1908 if (error != CRYPTO_SUCCESS) {
1909 out:
1910 areq->an_reqarg = next_req->kr_callreq;
1911 KCF_AREQ_REFRELE(areq);
1912 kmem_free(next_req, sizeof (kcf_dual_req_t));
1913 areq->an_isdual = B_FALSE;
1914 kcf_aop_done(areq, error);
1915 return;
1916 }
1917
1918 switch (params->rp_opgrp) {
1919 case KCF_OG_MAC: {
1920
1921 /*
1922 * The next req is submitted with the same reqid as the
1923 * first part. The consumer only got back that reqid, and
1924 * should still be able to cancel the operation during its
1925 * second step.
1926 */
1927 kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
1928 crypto_ctx_template_t mac_tmpl;
1929 kcf_mech_entry_t *me;
1930
1931 ct = (crypto_dual_data_t *)mops->mo_data;
1932 mac_tmpl = (crypto_ctx_template_t)mops->mo_templ;
1933
1934 /* No expected recoverable failures, so no retry list */
1935 pd = kcf_get_mech_provider(mops->mo_framework_mechtype, NULL,
1936 &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC, ct->dd_len2);
1937
1938 if (pd == NULL) {
1939 error = CRYPTO_MECH_NOT_SUPPORTED;
1940 goto out;
1941 }
1942 /* Validate the MAC context template here */
1943 if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
1944 (mac_tmpl != NULL)) {
1945 kcf_ctx_template_t *ctx_mac_tmpl;
1946
1947 ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
1948
1949 if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
1950 KCF_PROV_REFRELE(pd);
1951 error = CRYPTO_OLD_CTX_TEMPLATE;
1952 goto out;
1953 }
1954 mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl;
1955 }
1956
1957 break;
1958 }
1959 case KCF_OG_DECRYPT: {
1960 kcf_decrypt_ops_params_t *dcrops =
1961 &(params->rp_u.decrypt_params);
1962
1963 ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
1964 /* No expected recoverable failures, so no retry list */
1965 pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype,
1966 NULL, NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC,
1967 ct->dd_len1);
1968
1969 if (pd == NULL) {
1970 error = CRYPTO_MECH_NOT_SUPPORTED;
1971 goto out;
1972 }
1973 break;
1974 }
1975 }
1976
1977 /* The second step uses len2 and offset2 of the dual_data */
1978 next_req->kr_saveoffset = ct->dd_offset1;
1979 next_req->kr_savelen = ct->dd_len1;
1980 ct->dd_offset1 = ct->dd_offset2;
1981 ct->dd_len1 = ct->dd_len2;
1982
1983 areq->an_reqarg.cr_flag = 0;
1984
1985 areq->an_reqarg.cr_callback_func = kcf_last_req;
1986 areq->an_reqarg.cr_callback_arg = next_req;
1987 areq->an_isdual = B_TRUE;
1988
1989 /*
1990 * We would like to call kcf_submit_request() here. But,
1991 * that is not possible as that routine allocates a new
1992 * kcf_areq_node_t request structure, while we need to
1993 * reuse the existing request structure.
1994 */
1995 switch (pd->pd_prov_type) {
1996 case CRYPTO_SW_PROVIDER:
1997 error = common_submit_request(pd, NULL, params,
1998 KCF_RHNDL(KM_NOSLEEP));
1999 break;
2000
2001 case CRYPTO_HW_PROVIDER: {
2002 kcf_provider_desc_t *old_pd;
2003 taskq_t *taskq = pd->pd_taskq;
2004
2005 /*
2006 * Set the params for the second step in the
2007 * dual-ops.
2008 */
2009 areq->an_params = *params;
2010 old_pd = areq->an_provider;
2011 KCF_PROV_REFRELE(old_pd);
2012 KCF_PROV_REFHOLD(pd);
2013 areq->an_provider = pd;
2014
2015 /*
2016 * Note that we have to do a taskq_dispatch()
2017 * here as we may be in interrupt context.
2018 */
2019 if (taskq_dispatch(taskq, process_req_hwp, areq,
2020 TQ_NOSLEEP) == (taskqid_t)0) {
2021 error = CRYPTO_HOST_MEMORY;
2022 } else {
2023 error = CRYPTO_QUEUED;
2024 }
2025 break;
2026 }
2027 }
2028
2029 /*
2030 * We have to release the holds on the request and the provider
2031 * in all cases.
2032 */
2033 KCF_AREQ_REFRELE(areq);
2034 KCF_PROV_REFRELE(pd);
2035
2036 if (error != CRYPTO_QUEUED) {
2037 /* restore, clean up, and invoke the client's callback */
2038
2039 ct->dd_offset1 = next_req->kr_saveoffset;
2040 ct->dd_len1 = next_req->kr_savelen;
2041 areq->an_reqarg = next_req->kr_callreq;
2042 kmem_free(next_req, sizeof (kcf_dual_req_t));
2043 areq->an_isdual = B_FALSE;
2044 kcf_aop_done(areq, error);
2045 }
2046 }
2047
2048 /*
2049 * Last part of an emulated dual operation.
2050 * Clean up and restore ...
2051 */
2052 void
kcf_last_req(void * last_req_arg,int status)2053 kcf_last_req(void *last_req_arg, int status)
2054 {
2055 kcf_dual_req_t *last_req = (kcf_dual_req_t *)last_req_arg;
2056
2057 kcf_req_params_t *params = &(last_req->kr_params);
2058 kcf_areq_node_t *areq = last_req->kr_areq;
2059 crypto_dual_data_t *ct;
2060
2061 switch (params->rp_opgrp) {
2062 case KCF_OG_MAC: {
2063 kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
2064
2065 ct = (crypto_dual_data_t *)mops->mo_data;
2066 break;
2067 }
2068 case KCF_OG_DECRYPT: {
2069 kcf_decrypt_ops_params_t *dcrops =
2070 &(params->rp_u.decrypt_params);
2071
2072 ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
2073 break;
2074 }
2075 }
2076 ct->dd_offset1 = last_req->kr_saveoffset;
2077 ct->dd_len1 = last_req->kr_savelen;
2078
2079 /* The submitter used kcf_last_req as its callback */
2080
2081 if (areq == NULL) {
2082 crypto_call_req_t *cr = &last_req->kr_callreq;
2083
2084 (*(cr->cr_callback_func))(cr->cr_callback_arg, status);
2085 kmem_free(last_req, sizeof (kcf_dual_req_t));
2086 return;
2087 }
2088 areq->an_reqarg = last_req->kr_callreq;
2089 KCF_AREQ_REFRELE(areq);
2090 kmem_free(last_req, sizeof (kcf_dual_req_t));
2091 areq->an_isdual = B_FALSE;
2092 kcf_aop_done(areq, status);
2093 }
2094