1 /* $OpenBSD: softraid_crypto.c,v 1.40 2009/08/09 14:12:25 marco Exp $ */ 2 /* 3 * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us> 4 * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org> 5 * Copyright (c) 2008 Damien Miller <djm@mindrot.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "bio.h" 21 22 #include <sys/param.h> 23 #include <sys/systm.h> 24 #include <sys/buf.h> 25 #include <sys/device.h> 26 #include <sys/ioctl.h> 27 #include <sys/proc.h> 28 #include <sys/malloc.h> 29 #include <sys/pool.h> 30 #include <sys/kernel.h> 31 #include <sys/disk.h> 32 #include <sys/rwlock.h> 33 #include <sys/queue.h> 34 #include <sys/fcntl.h> 35 #include <sys/disklabel.h> 36 #include <sys/mount.h> 37 #include <sys/sensors.h> 38 #include <sys/stat.h> 39 #include <sys/conf.h> 40 #include <sys/uio.h> 41 42 #include <crypto/cryptodev.h> 43 #include <crypto/cryptosoft.h> 44 #include <crypto/rijndael.h> 45 #include <crypto/md5.h> 46 #include <crypto/sha1.h> 47 #include <crypto/sha2.h> 48 #include <crypto/hmac.h> 49 50 #include <scsi/scsi_all.h> 51 #include <scsi/scsiconf.h> 52 #include <scsi/scsi_disk.h> 53 54 #include <dev/softraidvar.h> 55 #include <dev/rndvar.h> 56 57 struct cryptop *sr_crypto_getcryptop(struct sr_workunit *, int); 58 int sr_crypto_create_keys(struct sr_discipline *); 59 void *sr_crypto_putcryptop(struct cryptop *); 60 int sr_crypto_get_kdf(struct bioc_createraid *, 61 struct sr_discipline *); 62 int sr_crypto_decrypt_key(struct sr_discipline *); 63 int sr_crypto_alloc_resources(struct sr_discipline *); 64 int sr_crypto_free_resources(struct sr_discipline *); 65 int sr_crypto_write(struct cryptop *); 66 int sr_crypto_rw(struct sr_workunit *); 67 int sr_crypto_rw2(struct sr_workunit *, struct cryptop *); 68 void sr_crypto_intr(struct buf *); 69 int sr_crypto_read(struct cryptop *); 70 void sr_crypto_finish_io(struct sr_workunit *); 71 void sr_crypto_calculate_check_hmac_sha1(struct sr_discipline *, 72 u_char[SHA1_DIGEST_LENGTH]); 73 void sr_crypto_hotplug(struct sr_discipline *, struct disk *, int); 74 75 #ifdef SR_DEBUG0 76 void sr_crypto_dumpkeys(struct sr_discipline *); 77 #endif 78 79 /* Discipline initialisation. */ 80 void 81 sr_crypto_discipline_init(struct sr_discipline *sd) 82 { 83 /* Fill out discipline members. */ 84 sd->sd_type = SR_MD_CRYPTO; 85 sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no; 86 sd->sd_max_wu = SR_CRYPTO_NOWU; 87 88 /* Setup discipline pointers. */ 89 sd->sd_alloc_resources = sr_crypto_alloc_resources; 90 sd->sd_free_resources = sr_crypto_free_resources; 91 sd->sd_start_discipline = NULL; 92 sd->sd_scsi_inquiry = sr_raid_inquiry; 93 sd->sd_scsi_read_cap = sr_raid_read_cap; 94 sd->sd_scsi_tur = sr_raid_tur; 95 sd->sd_scsi_req_sense = sr_raid_request_sense; 96 sd->sd_scsi_start_stop = sr_raid_start_stop; 97 sd->sd_scsi_sync = sr_raid_sync; 98 sd->sd_scsi_rw = sr_crypto_rw; 99 /* XXX reuse raid 1 functions for now FIXME */ 100 sd->sd_set_chunk_state = sr_raid1_set_chunk_state; 101 sd->sd_set_vol_state = sr_raid1_set_vol_state; 102 } 103 104 struct cryptop * 105 sr_crypto_getcryptop(struct sr_workunit *wu, int encrypt) 106 { 107 struct scsi_xfer *xs = wu->swu_xs; 108 struct sr_discipline *sd = wu->swu_dis; 109 struct cryptop *crp = NULL; 110 struct cryptodesc *crd; 111 struct uio *uio = NULL; 112 int flags, i, n, s; 113 daddr64_t blk = 0; 114 u_int keyndx; 115 116 DNPRINTF(SR_D_DIS, "%s: sr_crypto_getcryptop wu: %p encrypt: %d\n", 117 DEVNAME(sd->sd_sc), wu, encrypt); 118 119 s = splbio(); 120 uio = pool_get(&sd->mds.mdd_crypto.sr_uiopl, PR_ZERO); 121 if (uio == NULL) 122 goto unwind; 123 uio->uio_iov = pool_get(&sd->mds.mdd_crypto.sr_iovpl, 0); 124 if (uio->uio_iov == NULL) 125 goto unwind; 126 splx(s); 127 128 uio->uio_iovcnt = 1; 129 uio->uio_iov->iov_len = xs->datalen; 130 if (xs->flags & SCSI_DATA_OUT) { 131 uio->uio_iov->iov_base = malloc(xs->datalen, M_DEVBUF, 132 M_NOWAIT); 133 bcopy(xs->data, uio->uio_iov->iov_base, xs->datalen); 134 } else 135 uio->uio_iov->iov_base = xs->data; 136 137 if (xs->cmdlen == 10) 138 blk = _4btol(((struct scsi_rw_big *)xs->cmd)->addr); 139 else if (xs->cmdlen == 16) 140 blk = _8btol(((struct scsi_rw_16 *)xs->cmd)->addr); 141 else if (xs->cmdlen == 6) 142 blk = _3btol(((struct scsi_rw *)xs->cmd)->addr); 143 144 n = xs->datalen >> DEV_BSHIFT; 145 flags = (encrypt ? CRD_F_ENCRYPT : 0) | 146 CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT; 147 148 crp = crypto_getreq(n); 149 if (crp == NULL) 150 goto unwind; 151 152 /* Select crypto session based on block number */ 153 keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT; 154 if (keyndx >= SR_CRYPTO_MAXKEYS) 155 goto unwind; 156 crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx]; 157 if (crp->crp_sid == (u_int64_t)-1) 158 goto unwind; 159 160 crp->crp_ilen = xs->datalen; 161 crp->crp_alloctype = M_DEVBUF; 162 crp->crp_buf = uio; 163 for (i = 0, crd = crp->crp_desc; crd; i++, blk++, crd = crd->crd_next) { 164 crd->crd_skip = i << DEV_BSHIFT; 165 crd->crd_len = DEV_BSIZE; 166 crd->crd_inject = 0; 167 crd->crd_flags = flags; 168 crd->crd_alg = CRYPTO_AES_XTS; 169 170 switch (sd->mds.mdd_crypto.scr_meta.scm_alg) { 171 case SR_CRYPTOA_AES_XTS_128: 172 crd->crd_klen = 256; 173 break; 174 case SR_CRYPTOA_AES_XTS_256: 175 crd->crd_klen = 512; 176 break; 177 default: 178 goto unwind; 179 } 180 crd->crd_key = sd->mds.mdd_crypto.scr_key[0]; 181 bcopy(&blk, crd->crd_iv, sizeof(blk)); 182 } 183 184 return (crp); 185 unwind: 186 if (crp) 187 crypto_freereq(crp); 188 if (uio && uio->uio_iov) 189 if (wu->swu_xs->flags & SCSI_DATA_OUT) 190 free(uio->uio_iov->iov_base, M_DEVBUF); 191 192 s = splbio(); 193 if (uio && uio->uio_iov) 194 pool_put(&sd->mds.mdd_crypto.sr_iovpl, uio->uio_iov); 195 if (uio) 196 pool_put(&sd->mds.mdd_crypto.sr_uiopl, uio); 197 splx(s); 198 199 return (NULL); 200 } 201 202 void * 203 sr_crypto_putcryptop(struct cryptop *crp) 204 { 205 struct uio *uio = crp->crp_buf; 206 struct sr_workunit *wu = crp->crp_opaque; 207 struct sr_discipline *sd = wu->swu_dis; 208 int s; 209 210 DNPRINTF(SR_D_DIS, "%s: sr_crypto_putcryptop crp: %p\n", 211 DEVNAME(wu->swu_dis->sd_sc), crp); 212 213 if (wu->swu_xs->flags & SCSI_DATA_OUT) 214 free(uio->uio_iov->iov_base, M_DEVBUF); 215 s = splbio(); 216 pool_put(&sd->mds.mdd_crypto.sr_iovpl, uio->uio_iov); 217 pool_put(&sd->mds.mdd_crypto.sr_uiopl, uio); 218 splx(s); 219 crypto_freereq(crp); 220 221 return (wu); 222 } 223 224 int 225 sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd) 226 { 227 int rv = EINVAL; 228 struct sr_crypto_kdfinfo *kdfinfo; 229 230 if (!(bc->bc_opaque_flags & BIOC_SOIN)) 231 return (rv); 232 if (bc->bc_opaque == NULL) 233 return (rv); 234 if (bc->bc_opaque_size < sizeof(*kdfinfo)) 235 return (rv); 236 237 kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO); 238 if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size)) 239 goto out; 240 241 if (kdfinfo->len != bc->bc_opaque_size) 242 goto out; 243 244 /* copy KDF hint to disk meta data */ 245 if (kdfinfo->flags & SR_CRYPTOKDF_HINT) { 246 if (sizeof(sd->mds.mdd_crypto.scr_meta.scm_kdfhint) < 247 kdfinfo->genkdf.len) 248 goto out; 249 bcopy(&kdfinfo->genkdf, 250 sd->mds.mdd_crypto.scr_meta.scm_kdfhint, 251 kdfinfo->genkdf.len); 252 } 253 254 /* copy mask key to run-time meta data */ 255 if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) { 256 if (sizeof(sd->mds.mdd_crypto.scr_maskkey) < 257 sizeof(kdfinfo->maskkey)) 258 goto out; 259 bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey, 260 sizeof(kdfinfo->maskkey)); 261 } 262 263 bc->bc_opaque_status = BIOC_SOINOUT_OK; 264 rv = 0; 265 out: 266 bzero(kdfinfo, bc->bc_opaque_size); 267 free(kdfinfo, M_DEVBUF); 268 269 return (rv); 270 } 271 272 void 273 sr_crypto_calculate_check_hmac_sha1(struct sr_discipline *sd, 274 u_char check_digest[SHA1_DIGEST_LENGTH]) 275 { 276 u_char check_key[SHA1_DIGEST_LENGTH]; 277 HMAC_SHA1_CTX hmacctx; 278 SHA1_CTX shactx; 279 280 bzero(check_key, sizeof(check_key)); 281 bzero(&hmacctx, sizeof(hmacctx)); 282 bzero(&shactx, sizeof(shactx)); 283 284 /* k = SHA1(mask_key) */ 285 SHA1Init(&shactx); 286 SHA1Update(&shactx, sd->mds.mdd_crypto.scr_maskkey, 287 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 288 SHA1Final(check_key, &shactx); 289 290 /* sch_mac = HMAC_SHA1_k(unencrypted scm_key) */ 291 HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key)); 292 HMAC_SHA1_Update(&hmacctx, (u_int8_t *)sd->mds.mdd_crypto.scr_key, 293 sizeof(sd->mds.mdd_crypto.scr_key)); 294 HMAC_SHA1_Final(check_digest, &hmacctx); 295 296 bzero(check_key, sizeof(check_key)); 297 bzero(&hmacctx, sizeof(hmacctx)); 298 bzero(&shactx, sizeof(shactx)); 299 } 300 301 int 302 sr_crypto_decrypt_key(struct sr_discipline *sd) 303 { 304 rijndael_ctx ctx; 305 u_char *p, *c; 306 size_t ksz; 307 int i, rv = 1; 308 u_char check_digest[SHA1_DIGEST_LENGTH]; 309 310 DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc)); 311 312 if (sd->mds.mdd_crypto.scr_meta.scm_check_alg != SR_CRYPTOC_HMAC_SHA1) 313 goto out; 314 315 c = (u_char *)sd->mds.mdd_crypto.scr_meta.scm_key; 316 p = (u_char *)sd->mds.mdd_crypto.scr_key; 317 ksz = sizeof(sd->mds.mdd_crypto.scr_key); 318 319 switch (sd->mds.mdd_crypto.scr_meta.scm_mask_alg) { 320 case SR_CRYPTOM_AES_ECB_256: 321 if (rijndael_set_key(&ctx, sd->mds.mdd_crypto.scr_maskkey, 322 256) != 0) 323 goto out; 324 for (i = 0; i < ksz; i += RIJNDAEL128_BLOCK_LEN) 325 rijndael_decrypt(&ctx, &c[i], &p[i]); 326 break; 327 default: 328 DNPRINTF(SR_D_DIS, "%s: unsuppored scm_mask_alg %u\n", 329 DEVNAME(sd->sd_sc), 330 sd->mds.mdd_crypto.scr_meta.scm_mask_alg); 331 goto out; 332 } 333 #ifdef SR_DEBUG0 334 sr_crypto_dumpkeys(sd); 335 #endif 336 337 /* Check that the key decrypted properly */ 338 sr_crypto_calculate_check_hmac_sha1(sd, check_digest); 339 if (memcmp(sd->mds.mdd_crypto.scr_meta.chk_hmac_sha1.sch_mac, 340 check_digest, sizeof(check_digest)) != 0) { 341 bzero(sd->mds.mdd_crypto.scr_key, 342 sizeof(sd->mds.mdd_crypto.scr_key)); 343 bzero(check_digest, sizeof(check_digest)); 344 goto out; 345 } 346 bzero(check_digest, sizeof(check_digest)); 347 348 rv = 0; /* Success */ 349 out: 350 /* we don't need the mask key anymore */ 351 bzero(&sd->mds.mdd_crypto.scr_maskkey, 352 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 353 bzero(&ctx, sizeof(ctx)); 354 return rv; 355 } 356 357 int 358 sr_crypto_create_keys(struct sr_discipline *sd) 359 { 360 rijndael_ctx ctx; 361 u_char *p, *c; 362 size_t ksz; 363 int i; 364 365 DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n", 366 DEVNAME(sd->sd_sc)); 367 368 if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey)) 369 return (1); 370 371 /* XXX allow user to specify */ 372 sd->mds.mdd_crypto.scr_meta.scm_alg = SR_CRYPTOA_AES_XTS_256; 373 374 /* generate crypto keys */ 375 arc4random_buf(sd->mds.mdd_crypto.scr_key, 376 sizeof(sd->mds.mdd_crypto.scr_key)); 377 378 /* Mask the disk keys */ 379 sd->mds.mdd_crypto.scr_meta.scm_mask_alg = SR_CRYPTOM_AES_ECB_256; 380 if (rijndael_set_key_enc_only(&ctx, sd->mds.mdd_crypto.scr_maskkey, 381 256) != 0) { 382 bzero(sd->mds.mdd_crypto.scr_key, 383 sizeof(sd->mds.mdd_crypto.scr_key)); 384 bzero(&ctx, sizeof(ctx)); 385 return (1); 386 } 387 p = (u_char *)sd->mds.mdd_crypto.scr_key; 388 c = (u_char *)sd->mds.mdd_crypto.scr_meta.scm_key; 389 ksz = sizeof(sd->mds.mdd_crypto.scr_key); 390 for (i = 0; i < ksz; i += RIJNDAEL128_BLOCK_LEN) 391 rijndael_encrypt(&ctx, &p[i], &c[i]); 392 bzero(&ctx, sizeof(ctx)); 393 394 /* Prepare key decryption check code */ 395 sd->mds.mdd_crypto.scr_meta.scm_check_alg = SR_CRYPTOC_HMAC_SHA1; 396 sr_crypto_calculate_check_hmac_sha1(sd, 397 sd->mds.mdd_crypto.scr_meta.chk_hmac_sha1.sch_mac); 398 399 /* Erase the plaintext disk keys */ 400 bzero(sd->mds.mdd_crypto.scr_key, sizeof(sd->mds.mdd_crypto.scr_key)); 401 402 403 #ifdef SR_DEBUG0 404 sr_crypto_dumpkeys(sd); 405 #endif 406 407 sd->mds.mdd_crypto.scr_meta.scm_flags = SR_CRYPTOF_KEY | 408 SR_CRYPTOF_KDFHINT; 409 410 return (0); 411 } 412 413 int 414 sr_crypto_alloc_resources(struct sr_discipline *sd) 415 { 416 struct cryptoini cri; 417 u_int num_keys, i; 418 419 if (!sd) 420 return (EINVAL); 421 422 DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n", 423 DEVNAME(sd->sd_sc)); 424 425 pool_init(&sd->mds.mdd_crypto.sr_uiopl, sizeof(struct uio), 0, 0, 0, 426 "sr_uiopl", NULL); 427 pool_init(&sd->mds.mdd_crypto.sr_iovpl, sizeof(struct iovec), 0, 0, 0, 428 "sr_iovpl", NULL); 429 430 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) 431 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 432 433 if (sr_wu_alloc(sd)) 434 return (ENOMEM); 435 if (sr_ccb_alloc(sd)) 436 return (ENOMEM); 437 if (sr_crypto_decrypt_key(sd)) 438 return (EPERM); 439 440 bzero(&cri, sizeof(cri)); 441 cri.cri_alg = CRYPTO_AES_XTS; 442 switch (sd->mds.mdd_crypto.scr_meta.scm_alg) { 443 case SR_CRYPTOA_AES_XTS_128: 444 cri.cri_klen = 256; 445 break; 446 case SR_CRYPTOA_AES_XTS_256: 447 cri.cri_klen = 512; 448 break; 449 default: 450 return (EINVAL); 451 } 452 453 /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks */ 454 num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT; 455 if (num_keys >= SR_CRYPTO_MAXKEYS) 456 return (EFBIG); 457 for (i = 0; i <= num_keys; i++) { 458 cri.cri_key = sd->mds.mdd_crypto.scr_key[i]; 459 if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i], 460 &cri, 0) != 0) { 461 for (i = 0; 462 sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; 463 i++) { 464 crypto_freesession( 465 sd->mds.mdd_crypto.scr_sid[i]); 466 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 467 } 468 return (EINVAL); 469 } 470 } 471 472 sr_hotplug_register(sd, sr_crypto_hotplug); 473 474 return (0); 475 } 476 477 int 478 sr_crypto_free_resources(struct sr_discipline *sd) 479 { 480 int rv = EINVAL; 481 u_int i; 482 483 if (!sd) 484 return (rv); 485 486 DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n", 487 DEVNAME(sd->sd_sc)); 488 489 sr_hotplug_unregister(sd, sr_crypto_hotplug); 490 491 for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) { 492 crypto_freesession( 493 sd->mds.mdd_crypto.scr_sid[i]); 494 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 495 } 496 497 sr_wu_free(sd); 498 sr_ccb_free(sd); 499 500 pool_destroy(&sd->mds.mdd_crypto.sr_uiopl); 501 pool_destroy(&sd->mds.mdd_crypto.sr_iovpl); 502 503 rv = 0; 504 return (rv); 505 } 506 507 int 508 sr_crypto_rw(struct sr_workunit *wu) 509 { 510 struct cryptop *crp; 511 int s, rv = 0; 512 513 DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu: %p\n", 514 DEVNAME(wu->swu_dis->sd_sc), wu); 515 516 if (wu->swu_xs->flags & SCSI_DATA_OUT) { 517 crp = sr_crypto_getcryptop(wu, 1); 518 crp->crp_callback = sr_crypto_write; 519 crp->crp_opaque = wu; 520 s = splvm(); 521 if (crypto_invoke(crp)) 522 rv = 1; 523 else 524 rv = crp->crp_etype; 525 splx(s); 526 } else 527 rv = sr_crypto_rw2(wu, NULL); 528 529 return (rv); 530 } 531 532 int 533 sr_crypto_write(struct cryptop *crp) 534 { 535 int s; 536 struct sr_workunit *wu = crp->crp_opaque; 537 538 DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n", 539 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs); 540 541 if (crp->crp_etype) { 542 /* fail io */ 543 ((struct sr_workunit *)(crp->crp_opaque))->swu_xs->error = 544 XS_DRIVER_STUFFUP; 545 s = splbio(); 546 sr_crypto_finish_io(crp->crp_opaque); 547 splx(s); 548 } 549 550 return (sr_crypto_rw2(wu, crp)); 551 } 552 553 int 554 sr_crypto_rw2(struct sr_workunit *wu, struct cryptop *crp) 555 { 556 struct sr_discipline *sd = wu->swu_dis; 557 struct scsi_xfer *xs = wu->swu_xs; 558 struct sr_ccb *ccb; 559 struct uio *uio; 560 int s; 561 daddr64_t blk; 562 563 if (sr_validate_io(wu, &blk, "sr_crypto_rw2")) 564 goto bad; 565 566 blk += SR_META_SIZE + SR_META_OFFSET; 567 568 wu->swu_io_count = 1; 569 570 ccb = sr_ccb_get(sd); 571 if (!ccb) { 572 /* should never happen but handle more gracefully */ 573 printf("%s: %s: too many ccbs queued\n", 574 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname); 575 goto bad; 576 } 577 578 ccb->ccb_buf.b_flags = B_CALL | B_PHYS; 579 ccb->ccb_buf.b_iodone = sr_crypto_intr; 580 ccb->ccb_buf.b_blkno = blk; 581 ccb->ccb_buf.b_bcount = xs->datalen; 582 ccb->ccb_buf.b_bufsize = xs->datalen; 583 ccb->ccb_buf.b_resid = xs->datalen; 584 585 if (xs->flags & SCSI_DATA_IN) { 586 ccb->ccb_buf.b_flags |= B_READ; 587 ccb->ccb_buf.b_data = xs->data; 588 } else { 589 uio = crp->crp_buf; 590 ccb->ccb_buf.b_flags |= B_WRITE; 591 ccb->ccb_buf.b_data = uio->uio_iov->iov_base; 592 ccb->ccb_opaque = crp; 593 } 594 595 ccb->ccb_buf.b_error = 0; 596 ccb->ccb_buf.b_proc = curproc; 597 ccb->ccb_wu = wu; 598 ccb->ccb_target = 0; 599 ccb->ccb_buf.b_dev = sd->sd_vol.sv_chunks[0]->src_dev_mm; 600 ccb->ccb_buf.b_vp = sd->sd_vol.sv_chunks[0]->src_vn; 601 if ((ccb->ccb_buf.b_flags & B_READ) == 0) 602 ccb->ccb_buf.b_vp->v_numoutput++; 603 604 LIST_INIT(&ccb->ccb_buf.b_dep); 605 606 TAILQ_INSERT_TAIL(&wu->swu_ccb, ccb, ccb_link); 607 608 DNPRINTF(SR_D_DIS, "%s: %s: sr_crypto_rw2: b_bcount: %d " 609 "b_blkno: %x b_flags 0x%0x b_data %p\n", 610 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, 611 ccb->ccb_buf.b_bcount, ccb->ccb_buf.b_blkno, 612 ccb->ccb_buf.b_flags, ccb->ccb_buf.b_data); 613 614 s = splbio(); 615 616 if (sr_check_io_collision(wu)) 617 goto queued; 618 619 sr_raid_startwu(wu); 620 621 queued: 622 splx(s); 623 return (0); 624 bad: 625 /* wu is unwound by sr_wu_put */ 626 if (crp) 627 crp->crp_etype = EINVAL; 628 return (1); 629 } 630 631 void 632 sr_crypto_intr(struct buf *bp) 633 { 634 struct sr_ccb *ccb = (struct sr_ccb *)bp; 635 struct sr_workunit *wu = ccb->ccb_wu, *wup; 636 struct sr_discipline *sd = wu->swu_dis; 637 struct scsi_xfer *xs = wu->swu_xs; 638 struct sr_softc *sc = sd->sd_sc; 639 struct cryptop *crp; 640 int s, s2, pend; 641 642 DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr bp: %x xs: %x\n", 643 DEVNAME(sc), bp, wu->swu_xs); 644 645 DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: b_bcount: %d b_resid: %d" 646 " b_flags: 0x%0x\n", DEVNAME(sc), ccb->ccb_buf.b_bcount, 647 ccb->ccb_buf.b_resid, ccb->ccb_buf.b_flags); 648 649 s = splbio(); 650 651 if (ccb->ccb_buf.b_flags & B_ERROR) { 652 printf("%s: i/o error on block %lld\n", DEVNAME(sc), 653 ccb->ccb_buf.b_blkno); 654 wu->swu_ios_failed++; 655 ccb->ccb_state = SR_CCB_FAILED; 656 if (ccb->ccb_target != -1) 657 sd->sd_set_chunk_state(sd, ccb->ccb_target, 658 BIOC_SDOFFLINE); 659 else 660 panic("%s: invalid target on wu: %p", DEVNAME(sc), wu); 661 } else { 662 ccb->ccb_state = SR_CCB_OK; 663 wu->swu_ios_succeeded++; 664 } 665 wu->swu_ios_complete++; 666 667 DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: comp: %d count: %d\n", 668 DEVNAME(sc), wu->swu_ios_complete, wu->swu_io_count); 669 670 if (wu->swu_ios_complete == wu->swu_io_count) { 671 if (wu->swu_ios_failed == wu->swu_ios_complete) 672 xs->error = XS_DRIVER_STUFFUP; 673 else 674 xs->error = XS_NOERROR; 675 676 pend = 0; 677 TAILQ_FOREACH(wup, &sd->sd_wu_pendq, swu_link) { 678 if (wu == wup) { 679 TAILQ_REMOVE(&sd->sd_wu_pendq, wu, swu_link); 680 pend = 1; 681 682 if (wu->swu_collider) { 683 wu->swu_collider->swu_state = 684 SR_WU_INPROGRESS; 685 TAILQ_REMOVE(&sd->sd_wu_defq, 686 wu->swu_collider, swu_link); 687 sr_raid_startwu(wu->swu_collider); 688 } 689 break; 690 } 691 } 692 693 if (!pend) 694 printf("%s: wu: %p not on pending queue\n", 695 DEVNAME(sc), wu); 696 697 if ((xs->flags & SCSI_DATA_IN) && (xs->error == XS_NOERROR)) { 698 crp = sr_crypto_getcryptop(wu, 0); 699 ccb->ccb_opaque = crp; 700 crp->crp_callback = sr_crypto_read; 701 crp->crp_opaque = wu; 702 DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: crypto_invoke " 703 "%p\n", DEVNAME(sc), crp); 704 s2 = splvm(); 705 crypto_invoke(crp); 706 splx(s2); 707 goto done; 708 } 709 710 sr_crypto_finish_io(wu); 711 } 712 713 done: 714 splx(s); 715 } 716 717 void 718 sr_crypto_finish_io(struct sr_workunit *wu) 719 { 720 struct sr_discipline *sd = wu->swu_dis; 721 struct scsi_xfer *xs = wu->swu_xs; 722 struct sr_ccb *ccb; 723 #ifdef SR_DEBUG 724 struct sr_softc *sc = sd->sd_sc; 725 #endif /* SR_DEBUG */ 726 727 splassert(IPL_BIO); 728 729 DNPRINTF(SR_D_INTR, "%s: sr_crypto_finish_io: wu %x xs: %x\n", 730 DEVNAME(sc), wu, xs); 731 732 xs->resid = 0; 733 xs->flags |= ITSDONE; 734 735 TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link) { 736 if (ccb->ccb_opaque == NULL) 737 continue; 738 sr_crypto_putcryptop(ccb->ccb_opaque); 739 } 740 741 /* do not change the order of these 2 functions */ 742 sr_wu_put(wu); 743 sr_scsi_done(sd, xs); 744 745 if (sd->sd_sync && sd->sd_wu_pending == 0) 746 wakeup(sd); 747 } 748 749 int 750 sr_crypto_read(struct cryptop *crp) 751 { 752 int s; 753 struct sr_workunit *wu = crp->crp_opaque; 754 755 DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n", 756 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs); 757 758 if (crp->crp_etype) 759 wu->swu_xs->error = XS_DRIVER_STUFFUP; 760 761 s = splbio(); 762 sr_crypto_finish_io(wu); 763 splx(s); 764 765 return (0); 766 } 767 768 void 769 sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action) 770 { 771 DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n", 772 DEVNAME(sd->sd_sc), diskp->dk_name, action); 773 } 774 775 #ifdef SR_DEBUG0 776 void 777 sr_crypto_dumpkeys(struct sr_discipline *sd) 778 { 779 int i, j; 780 781 printf("sr_crypto_dumpkeys:\n"); 782 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) { 783 printf("\tscm_key[%d]: 0x", i); 784 for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) { 785 printf("%02x", 786 sd->mds.mdd_crypto.scr_meta.scm_key[i][j]); 787 } 788 printf("\n"); 789 } 790 printf("sr_crypto_dumpkeys: runtime data keys:\n"); 791 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) { 792 printf("\tscr_key[%d]: 0x", i); 793 for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) { 794 printf("%02x", 795 sd->mds.mdd_crypto.scr_key[i][j]); 796 } 797 printf("\n"); 798 } 799 } 800 #endif /* SR_DEBUG */ 801