1 /* $OpenBSD: crypto.c,v 1.84 2021/07/21 11:11:41 bluhm Exp $ */ 2 /* 3 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 4 * 5 * This code was written by Angelos D. Keromytis in Athens, Greece, in 6 * February 2000. Network Security Technologies Inc. (NSTI) kindly 7 * supported the development of this code. 8 * 9 * Copyright (c) 2000, 2001 Angelos D. Keromytis 10 * 11 * Permission to use, copy, and modify this software with or without fee 12 * is hereby granted, provided that this entire notice is included in 13 * all source code copies of any software which is or includes a copy or 14 * modification of this software. 15 * 16 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 17 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 18 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 19 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 20 * PURPOSE. 21 */ 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/malloc.h> 26 #include <sys/pool.h> 27 28 #include <crypto/cryptodev.h> 29 30 /* 31 * Locks used to protect struct members in this file: 32 * A allocated during driver attach, no hotplug, no detach 33 * I immutable after creation 34 * K kernel lock 35 */ 36 37 struct cryptocap *crypto_drivers; /* [A] array allocated by driver 38 [K] driver data and session count */ 39 int crypto_drivers_num = 0; /* [A] attached drivers array size */ 40 41 struct pool cryptop_pool; /* [I] set of crypto descriptors */ 42 43 struct taskq *crypto_taskq; /* [I] run crypto_invoke() and callback 44 with kernel lock */ 45 struct taskq *crypto_taskq_mpsafe; /* [I] run crypto_invoke() 46 without kernel lock */ 47 48 /* 49 * Create a new session. 50 */ 51 int 52 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard) 53 { 54 u_int32_t hid, lid, hid2 = -1; 55 struct cryptocap *cpc; 56 struct cryptoini *cr; 57 int err, s, turn = 0; 58 59 if (crypto_drivers == NULL) 60 return EINVAL; 61 62 KERNEL_ASSERT_LOCKED(); 63 64 s = splvm(); 65 66 /* 67 * The algorithm we use here is pretty stupid; just use the 68 * first driver that supports all the algorithms we need. Do 69 * a double-pass over all the drivers, ignoring software ones 70 * at first, to deal with cases of drivers that register after 71 * the software one(s) --- e.g., PCMCIA crypto cards. 72 * 73 * XXX We need more smarts here (in real life too, but that's 74 * XXX another story altogether). 75 */ 76 do { 77 for (hid = 0; hid < crypto_drivers_num; hid++) { 78 cpc = &crypto_drivers[hid]; 79 80 /* 81 * If it's not initialized or has remaining sessions 82 * referencing it, skip. 83 */ 84 if (cpc->cc_newsession == NULL || 85 (cpc->cc_flags & CRYPTOCAP_F_CLEANUP)) 86 continue; 87 88 if (cpc->cc_flags & CRYPTOCAP_F_SOFTWARE) { 89 /* 90 * First round of search, ignore 91 * software drivers. 92 */ 93 if (turn == 0) 94 continue; 95 } else { /* !CRYPTOCAP_F_SOFTWARE */ 96 /* Second round of search, only software. */ 97 if (turn == 1) 98 continue; 99 } 100 101 /* See if all the algorithms are supported. */ 102 for (cr = cri; cr; cr = cr->cri_next) { 103 if (cpc->cc_alg[cr->cri_alg] == 0) 104 break; 105 } 106 107 /* 108 * If even one algorithm is not supported, 109 * keep searching. 110 */ 111 if (cr != NULL) 112 continue; 113 114 /* 115 * If we had a previous match, see how it compares 116 * to this one. Keep "remembering" whichever is 117 * the best of the two. 118 */ 119 if (hid2 != -1) { 120 /* 121 * Compare session numbers, pick the one 122 * with the lowest. 123 * XXX Need better metrics, this will 124 * XXX just do un-weighted round-robin. 125 */ 126 if (crypto_drivers[hid].cc_sessions <= 127 crypto_drivers[hid2].cc_sessions) 128 hid2 = hid; 129 } else { 130 /* 131 * Remember this one, for future 132 * comparisons. 133 */ 134 hid2 = hid; 135 } 136 } 137 138 /* 139 * If we found something worth remembering, leave. The 140 * side-effect is that we will always prefer a hardware 141 * driver over the software one. 142 */ 143 if (hid2 != -1) 144 break; 145 146 turn++; 147 148 /* If we only want hardware drivers, don't do second pass. */ 149 } while (turn <= 2 && hard == 0); 150 151 hid = hid2; 152 153 /* 154 * Can't do everything in one session. 155 * 156 * XXX Fix this. We need to inject a "virtual" session 157 * XXX layer right about here. 158 */ 159 160 if (hid == -1) { 161 splx(s); 162 return EINVAL; 163 } 164 165 /* Call the driver initialization routine. */ 166 lid = hid; /* Pass the driver ID. */ 167 err = crypto_drivers[hid].cc_newsession(&lid, cri); 168 if (err == 0) { 169 (*sid) = hid; 170 (*sid) <<= 32; 171 (*sid) |= (lid & 0xffffffff); 172 crypto_drivers[hid].cc_sessions++; 173 } 174 175 splx(s); 176 return err; 177 } 178 179 /* 180 * Delete an existing session (or a reserved session on an unregistered 181 * driver). 182 */ 183 int 184 crypto_freesession(u_int64_t sid) 185 { 186 int err = 0, s; 187 u_int32_t hid; 188 189 if (crypto_drivers == NULL) 190 return EINVAL; 191 192 /* Determine two IDs. */ 193 hid = (sid >> 32) & 0xffffffff; 194 195 if (hid >= crypto_drivers_num) 196 return ENOENT; 197 198 KERNEL_ASSERT_LOCKED(); 199 200 s = splvm(); 201 202 if (crypto_drivers[hid].cc_sessions) 203 crypto_drivers[hid].cc_sessions--; 204 205 /* Call the driver cleanup routine, if available. */ 206 if (crypto_drivers[hid].cc_freesession) 207 err = crypto_drivers[hid].cc_freesession(sid); 208 209 /* 210 * If this was the last session of a driver marked as invalid, 211 * make the entry available for reuse. 212 */ 213 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) && 214 crypto_drivers[hid].cc_sessions == 0) 215 explicit_bzero(&crypto_drivers[hid], sizeof(struct cryptocap)); 216 217 splx(s); 218 return err; 219 } 220 221 /* 222 * Find an empty slot. 223 */ 224 int32_t 225 crypto_get_driverid(u_int8_t flags) 226 { 227 struct cryptocap *newdrv; 228 int i, s; 229 230 /* called from attach routines */ 231 KERNEL_ASSERT_LOCKED(); 232 233 s = splvm(); 234 235 if (crypto_drivers_num == 0) { 236 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; 237 crypto_drivers = mallocarray(crypto_drivers_num, 238 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO); 239 if (crypto_drivers == NULL) { 240 crypto_drivers_num = 0; 241 splx(s); 242 return -1; 243 } 244 } 245 246 for (i = 0; i < crypto_drivers_num; i++) { 247 if (crypto_drivers[i].cc_process == NULL && 248 !(crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) && 249 crypto_drivers[i].cc_sessions == 0) { 250 crypto_drivers[i].cc_sessions = 1; /* Mark */ 251 crypto_drivers[i].cc_flags = flags; 252 splx(s); 253 return i; 254 } 255 } 256 257 /* Out of entries, allocate some more. */ 258 if (crypto_drivers_num >= CRYPTO_DRIVERS_MAX) { 259 splx(s); 260 return -1; 261 } 262 263 newdrv = mallocarray(crypto_drivers_num, 264 2 * sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT); 265 if (newdrv == NULL) { 266 splx(s); 267 return -1; 268 } 269 270 memcpy(newdrv, crypto_drivers, 271 crypto_drivers_num * sizeof(struct cryptocap)); 272 bzero(&newdrv[crypto_drivers_num], 273 crypto_drivers_num * sizeof(struct cryptocap)); 274 275 newdrv[i].cc_sessions = 1; /* Mark */ 276 newdrv[i].cc_flags = flags; 277 278 free(crypto_drivers, M_CRYPTO_DATA, 279 crypto_drivers_num * sizeof(struct cryptocap)); 280 281 crypto_drivers_num *= 2; 282 crypto_drivers = newdrv; 283 splx(s); 284 return i; 285 } 286 287 /* 288 * Register a crypto driver. It should be called once for each algorithm 289 * supported by the driver. 290 */ 291 int 292 crypto_register(u_int32_t driverid, int *alg, 293 int (*newses)(u_int32_t *, struct cryptoini *), 294 int (*freeses)(u_int64_t), int (*process)(struct cryptop *)) 295 { 296 int s, i; 297 298 if (driverid >= crypto_drivers_num || alg == NULL || 299 crypto_drivers == NULL) 300 return EINVAL; 301 302 /* called from attach routines */ 303 KERNEL_ASSERT_LOCKED(); 304 305 s = splvm(); 306 307 for (i = 0; i <= CRYPTO_ALGORITHM_MAX; i++) { 308 /* 309 * XXX Do some performance testing to determine 310 * placing. We probably need an auxiliary data 311 * structure that describes relative performances. 312 */ 313 314 crypto_drivers[driverid].cc_alg[i] = alg[i]; 315 } 316 317 318 crypto_drivers[driverid].cc_newsession = newses; 319 crypto_drivers[driverid].cc_process = process; 320 crypto_drivers[driverid].cc_freesession = freeses; 321 crypto_drivers[driverid].cc_sessions = 0; /* Unmark */ 322 323 splx(s); 324 325 return 0; 326 } 327 328 /* 329 * Unregister a crypto driver. If there are pending sessions using it, 330 * leave enough information around so that subsequent calls using those 331 * sessions will correctly detect the driver being unregistered and reroute 332 * the request. 333 */ 334 int 335 crypto_unregister(u_int32_t driverid, int alg) 336 { 337 int i = CRYPTO_ALGORITHM_MAX + 1, s; 338 u_int32_t ses; 339 340 /* may be called from detach routines, but not used */ 341 KERNEL_ASSERT_LOCKED(); 342 343 s = splvm(); 344 345 /* Sanity checks. */ 346 if (driverid >= crypto_drivers_num || crypto_drivers == NULL || 347 alg <= 0 || alg > (CRYPTO_ALGORITHM_MAX + 1)) { 348 splx(s); 349 return EINVAL; 350 } 351 352 if (alg != CRYPTO_ALGORITHM_MAX + 1) { 353 if (crypto_drivers[driverid].cc_alg[alg] == 0) { 354 splx(s); 355 return EINVAL; 356 } 357 crypto_drivers[driverid].cc_alg[alg] = 0; 358 359 /* Was this the last algorithm ? */ 360 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) 361 if (crypto_drivers[driverid].cc_alg[i] != 0) 362 break; 363 } 364 365 /* 366 * If a driver unregistered its last algorithm or all of them 367 * (alg == CRYPTO_ALGORITHM_MAX + 1), cleanup its entry. 368 */ 369 if (i == CRYPTO_ALGORITHM_MAX + 1 || alg == CRYPTO_ALGORITHM_MAX + 1) { 370 ses = crypto_drivers[driverid].cc_sessions; 371 bzero(&crypto_drivers[driverid], sizeof(struct cryptocap)); 372 if (ses != 0) { 373 /* 374 * If there are pending sessions, just mark as invalid. 375 */ 376 crypto_drivers[driverid].cc_flags |= CRYPTOCAP_F_CLEANUP; 377 crypto_drivers[driverid].cc_sessions = ses; 378 } 379 } 380 splx(s); 381 return 0; 382 } 383 384 /* 385 * Add crypto request to a queue, to be processed by a kernel thread. 386 */ 387 int 388 crypto_dispatch(struct cryptop *crp) 389 { 390 struct taskq *tq = crypto_taskq; 391 int error = 0, s; 392 u_int32_t hid; 393 394 s = splvm(); 395 hid = (crp->crp_sid >> 32) & 0xffffffff; 396 if (hid < crypto_drivers_num) { 397 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_MPSAFE) 398 tq = crypto_taskq_mpsafe; 399 } 400 splx(s); 401 402 if ((crp->crp_flags & CRYPTO_F_NOQUEUE) == 0) { 403 task_set(&crp->crp_task, (void (*))crypto_invoke, crp); 404 task_add(tq, &crp->crp_task); 405 } else { 406 error = crypto_invoke(crp); 407 } 408 409 return error; 410 } 411 412 /* 413 * Dispatch a crypto request to the appropriate crypto devices. 414 */ 415 int 416 crypto_invoke(struct cryptop *crp) 417 { 418 u_int64_t nid; 419 u_int32_t hid; 420 int error; 421 int s, i; 422 423 /* Sanity checks. */ 424 if (crp == NULL || crp->crp_callback == NULL) 425 return EINVAL; 426 427 s = splvm(); 428 if (crp->crp_ndesc < 1 || crypto_drivers == NULL) { 429 crp->crp_etype = EINVAL; 430 crypto_done(crp); 431 splx(s); 432 return 0; 433 } 434 435 hid = (crp->crp_sid >> 32) & 0xffffffff; 436 if (hid >= crypto_drivers_num) 437 goto migrate; 438 439 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) { 440 crypto_freesession(crp->crp_sid); 441 goto migrate; 442 } 443 444 if (crypto_drivers[hid].cc_process == NULL) 445 goto migrate; 446 447 crypto_drivers[hid].cc_operations++; 448 crypto_drivers[hid].cc_bytes += crp->crp_ilen; 449 450 error = crypto_drivers[hid].cc_process(crp); 451 if (error) { 452 if (error == ERESTART) { 453 /* Unregister driver and migrate session. */ 454 crypto_unregister(hid, CRYPTO_ALGORITHM_MAX + 1); 455 goto migrate; 456 } else { 457 crp->crp_etype = error; 458 } 459 } 460 461 splx(s); 462 return 0; 463 464 migrate: 465 /* Migrate session. */ 466 for (i = 0; i < crp->crp_ndesc - 1; i++) 467 crp->crp_desc[i].CRD_INI.cri_next = &crp->crp_desc[i+1].CRD_INI; 468 crp->crp_desc[crp->crp_ndesc].CRD_INI.cri_next = NULL; 469 470 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0) 471 crp->crp_sid = nid; 472 473 crp->crp_etype = EAGAIN; 474 crypto_done(crp); 475 splx(s); 476 return 0; 477 } 478 479 /* 480 * Release a set of crypto descriptors. 481 */ 482 void 483 crypto_freereq(struct cryptop *crp) 484 { 485 if (crp == NULL) 486 return; 487 488 if (crp->crp_ndescalloc > 2) 489 free(crp->crp_desc, M_CRYPTO_DATA, 490 crp->crp_ndescalloc * sizeof(struct cryptodesc)); 491 pool_put(&cryptop_pool, crp); 492 } 493 494 /* 495 * Acquire a set of crypto descriptors. 496 */ 497 struct cryptop * 498 crypto_getreq(int num) 499 { 500 struct cryptop *crp; 501 502 crp = pool_get(&cryptop_pool, PR_NOWAIT | PR_ZERO); 503 if (crp == NULL) 504 return NULL; 505 506 crp->crp_desc = crp->crp_sdesc; 507 crp->crp_ndescalloc = crp->crp_ndesc = num; 508 509 if (num > 2) { 510 crp->crp_desc = mallocarray(num, sizeof(struct cryptodesc), 511 M_CRYPTO_DATA, M_NOWAIT | M_ZERO); 512 if (crp->crp_desc == NULL) { 513 pool_put(&cryptop_pool, crp); 514 return NULL; 515 } 516 } 517 518 return crp; 519 } 520 521 void 522 crypto_init(void) 523 { 524 crypto_taskq = taskq_create("crypto", 1, IPL_VM, 0); 525 crypto_taskq_mpsafe = taskq_create("crynlk", 1, IPL_VM, TASKQ_MPSAFE); 526 527 pool_init(&cryptop_pool, sizeof(struct cryptop), 0, IPL_VM, 0, 528 "cryptop", NULL); 529 } 530 531 /* 532 * Invoke the callback on behalf of the driver. 533 */ 534 void 535 crypto_done(struct cryptop *crp) 536 { 537 crp->crp_flags |= CRYPTO_F_DONE; 538 if (crp->crp_flags & CRYPTO_F_NOQUEUE) { 539 /* not from the crypto queue, wakeup the userland process */ 540 crp->crp_callback(crp); 541 } else { 542 task_set(&crp->crp_task, (void (*))crp->crp_callback, crp); 543 task_add(crypto_taskq, &crp->crp_task); 544 } 545 } 546