1 /* $OpenBSD: softraid_crypto.c,v 1.109 2014/01/22 01:46:08 jsing Exp $ */ 2 /* 3 * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us> 4 * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org> 5 * Copyright (c) 2008 Damien Miller <djm@mindrot.org> 6 * Copyright (c) 2009 Joel Sing <jsing@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include "bio.h" 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/buf.h> 26 #include <sys/device.h> 27 #include <sys/ioctl.h> 28 #include <sys/proc.h> 29 #include <sys/malloc.h> 30 #include <sys/pool.h> 31 #include <sys/kernel.h> 32 #include <sys/disk.h> 33 #include <sys/rwlock.h> 34 #include <sys/queue.h> 35 #include <sys/fcntl.h> 36 #include <sys/disklabel.h> 37 #include <sys/vnode.h> 38 #include <sys/mount.h> 39 #include <sys/sensors.h> 40 #include <sys/stat.h> 41 #include <sys/conf.h> 42 #include <sys/uio.h> 43 #include <sys/dkio.h> 44 45 #include <crypto/cryptodev.h> 46 #include <crypto/cryptosoft.h> 47 #include <crypto/rijndael.h> 48 #include <crypto/md5.h> 49 #include <crypto/sha1.h> 50 #include <crypto/sha2.h> 51 #include <crypto/hmac.h> 52 53 #include <scsi/scsi_all.h> 54 #include <scsi/scsiconf.h> 55 #include <scsi/scsi_disk.h> 56 57 #include <dev/softraidvar.h> 58 #include <dev/rndvar.h> 59 60 /* 61 * The per-I/O data that we need to preallocate. We cannot afford to allow I/O 62 * to start failing when memory pressure kicks in. We can store this in the WU 63 * because we assert that only one ccb per WU will ever be active. 64 */ 65 struct sr_crypto_wu { 66 struct sr_workunit cr_wu; /* Must be first. */ 67 struct uio cr_uio; 68 struct iovec cr_iov; 69 struct cryptop *cr_crp; 70 struct cryptodesc *cr_descs; 71 void *cr_dmabuf; 72 }; 73 74 75 struct sr_crypto_wu *sr_crypto_prepare(struct sr_workunit *, int); 76 int sr_crypto_create_keys(struct sr_discipline *); 77 int sr_crypto_get_kdf(struct bioc_createraid *, 78 struct sr_discipline *); 79 int sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int); 80 int sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int); 81 int sr_crypto_decrypt_key(struct sr_discipline *); 82 int sr_crypto_change_maskkey(struct sr_discipline *, 83 struct sr_crypto_kdfinfo *, struct sr_crypto_kdfinfo *); 84 int sr_crypto_create(struct sr_discipline *, 85 struct bioc_createraid *, int, int64_t); 86 int sr_crypto_assemble(struct sr_discipline *, 87 struct bioc_createraid *, int, void *); 88 int sr_crypto_alloc_resources(struct sr_discipline *); 89 void sr_crypto_free_resources(struct sr_discipline *); 90 int sr_crypto_ioctl(struct sr_discipline *, 91 struct bioc_discipline *); 92 int sr_crypto_meta_opt_handler(struct sr_discipline *, 93 struct sr_meta_opt_hdr *); 94 int sr_crypto_write(struct cryptop *); 95 int sr_crypto_rw(struct sr_workunit *); 96 int sr_crypto_dev_rw(struct sr_workunit *, struct sr_crypto_wu *); 97 void sr_crypto_done(struct sr_workunit *); 98 int sr_crypto_read(struct cryptop *); 99 void sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int, 100 u_int8_t *, int, u_char *); 101 void sr_crypto_hotplug(struct sr_discipline *, struct disk *, int); 102 103 #ifdef SR_DEBUG0 104 void sr_crypto_dumpkeys(struct sr_discipline *); 105 #endif 106 107 /* Discipline initialisation. */ 108 void 109 sr_crypto_discipline_init(struct sr_discipline *sd) 110 { 111 int i; 112 113 /* Fill out discipline members. */ 114 sd->sd_type = SR_MD_CRYPTO; 115 strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name)); 116 sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE; 117 sd->sd_max_wu = SR_CRYPTO_NOWU; 118 119 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) 120 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 121 122 /* Setup discipline specific function pointers. */ 123 sd->sd_alloc_resources = sr_crypto_alloc_resources; 124 sd->sd_assemble = sr_crypto_assemble; 125 sd->sd_create = sr_crypto_create; 126 sd->sd_free_resources = sr_crypto_free_resources; 127 sd->sd_ioctl_handler = sr_crypto_ioctl; 128 sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler; 129 sd->sd_scsi_rw = sr_crypto_rw; 130 sd->sd_scsi_done = sr_crypto_done; 131 } 132 133 int 134 sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc, 135 int no_chunk, int64_t coerced_size) 136 { 137 struct sr_meta_opt_item *omi; 138 int rv = EINVAL; 139 140 if (no_chunk != 1) { 141 sr_error(sd->sd_sc, "%s requires exactly one chunk", 142 sd->sd_name); 143 goto done; 144 } 145 146 /* Create crypto optional metadata. */ 147 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF, 148 M_WAITOK | M_ZERO); 149 omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF, 150 M_WAITOK | M_ZERO); 151 omi->omi_som->som_type = SR_OPT_CRYPTO; 152 omi->omi_som->som_length = sizeof(struct sr_meta_crypto); 153 SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link); 154 sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)omi->omi_som; 155 sd->sd_meta->ssdi.ssd_opt_no++; 156 157 sd->mds.mdd_crypto.key_disk = NULL; 158 159 if (bc->bc_key_disk != NODEV) { 160 161 /* Create a key disk. */ 162 if (sr_crypto_get_kdf(bc, sd)) 163 goto done; 164 sd->mds.mdd_crypto.key_disk = 165 sr_crypto_create_key_disk(sd, bc->bc_key_disk); 166 if (sd->mds.mdd_crypto.key_disk == NULL) 167 goto done; 168 sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE; 169 170 } else if (bc->bc_opaque_flags & BIOC_SOOUT) { 171 172 /* No hint available yet. */ 173 bc->bc_opaque_status = BIOC_SOINOUT_FAILED; 174 rv = EAGAIN; 175 goto done; 176 177 } else if (sr_crypto_get_kdf(bc, sd)) 178 goto done; 179 180 /* Passphrase volumes cannot be automatically assembled. */ 181 if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV) 182 goto done; 183 184 sd->sd_meta->ssdi.ssd_size = coerced_size; 185 186 sr_crypto_create_keys(sd); 187 188 sd->sd_max_ccb_per_wu = no_chunk; 189 190 rv = 0; 191 done: 192 return (rv); 193 } 194 195 int 196 sr_crypto_assemble(struct sr_discipline *sd, struct bioc_createraid *bc, 197 int no_chunk, void *data) 198 { 199 int rv = EINVAL; 200 201 sd->mds.mdd_crypto.key_disk = NULL; 202 203 /* Crypto optional metadata must already exist... */ 204 if (sd->mds.mdd_crypto.scr_meta == NULL) 205 goto done; 206 207 if (data != NULL) { 208 /* Kernel already has mask key. */ 209 bcopy(data, sd->mds.mdd_crypto.scr_maskkey, 210 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 211 } else if (bc->bc_key_disk != NODEV) { 212 /* Read the mask key from the key disk. */ 213 sd->mds.mdd_crypto.key_disk = 214 sr_crypto_read_key_disk(sd, bc->bc_key_disk); 215 if (sd->mds.mdd_crypto.key_disk == NULL) 216 goto done; 217 } else if (bc->bc_opaque_flags & BIOC_SOOUT) { 218 /* provide userland with kdf hint */ 219 if (bc->bc_opaque == NULL) 220 goto done; 221 222 if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) < 223 bc->bc_opaque_size) 224 goto done; 225 226 if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint, 227 bc->bc_opaque, bc->bc_opaque_size)) 228 goto done; 229 230 /* we're done */ 231 bc->bc_opaque_status = BIOC_SOINOUT_OK; 232 rv = EAGAIN; 233 goto done; 234 } else if (bc->bc_opaque_flags & BIOC_SOIN) { 235 /* get kdf with maskkey from userland */ 236 if (sr_crypto_get_kdf(bc, sd)) 237 goto done; 238 } else 239 goto done; 240 241 sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no; 242 243 rv = 0; 244 done: 245 return (rv); 246 } 247 248 struct sr_crypto_wu * 249 sr_crypto_prepare(struct sr_workunit *wu, int encrypt) 250 { 251 struct scsi_xfer *xs = wu->swu_xs; 252 struct sr_discipline *sd = wu->swu_dis; 253 struct sr_crypto_wu *crwu; 254 struct cryptodesc *crd; 255 int flags, i, n; 256 daddr_t blk; 257 u_int keyndx; 258 259 DNPRINTF(SR_D_DIS, "%s: sr_crypto_prepare wu %p encrypt %d\n", 260 DEVNAME(sd->sd_sc), wu, encrypt); 261 262 crwu = (struct sr_crypto_wu *)wu; 263 crwu->cr_uio.uio_iovcnt = 1; 264 crwu->cr_uio.uio_iov->iov_len = xs->datalen; 265 if (xs->flags & SCSI_DATA_OUT) { 266 crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf; 267 bcopy(xs->data, crwu->cr_uio.uio_iov->iov_base, xs->datalen); 268 } else 269 crwu->cr_uio.uio_iov->iov_base = xs->data; 270 271 blk = wu->swu_blk_start; 272 n = xs->datalen >> DEV_BSHIFT; 273 274 /* 275 * We preallocated enough crypto descs for up to MAXPHYS of I/O. 276 * Since there may be less than that we need to tweak the linked list 277 * of crypto desc structures to be just long enough for our needs. 278 */ 279 crd = crwu->cr_descs; 280 for (i = 0; i < ((MAXPHYS >> DEV_BSHIFT) - n); i++) { 281 crd = crd->crd_next; 282 KASSERT(crd); 283 } 284 crwu->cr_crp->crp_desc = crd; 285 flags = (encrypt ? CRD_F_ENCRYPT : 0) | 286 CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT; 287 288 /* 289 * Select crypto session based on block number. 290 * 291 * XXX - this does not handle the case where the read/write spans 292 * across a different key blocks (e.g. 0.5TB boundary). Currently 293 * this is already broken by the use of scr_key[0] below. 294 */ 295 keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT; 296 crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx]; 297 298 crwu->cr_crp->crp_opaque = crwu; 299 crwu->cr_crp->crp_ilen = xs->datalen; 300 crwu->cr_crp->crp_alloctype = M_DEVBUF; 301 crwu->cr_crp->crp_buf = &crwu->cr_uio; 302 for (i = 0, crd = crwu->cr_crp->crp_desc; crd; 303 i++, blk++, crd = crd->crd_next) { 304 crd->crd_skip = i << DEV_BSHIFT; 305 crd->crd_len = DEV_BSIZE; 306 crd->crd_inject = 0; 307 crd->crd_flags = flags; 308 crd->crd_alg = sd->mds.mdd_crypto.scr_alg; 309 crd->crd_klen = sd->mds.mdd_crypto.scr_klen; 310 crd->crd_key = sd->mds.mdd_crypto.scr_key[0]; 311 bcopy(&blk, crd->crd_iv, sizeof(blk)); 312 } 313 314 return (crwu); 315 } 316 317 int 318 sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd) 319 { 320 int rv = EINVAL; 321 struct sr_crypto_kdfinfo *kdfinfo; 322 323 if (!(bc->bc_opaque_flags & BIOC_SOIN)) 324 return (rv); 325 if (bc->bc_opaque == NULL) 326 return (rv); 327 if (bc->bc_opaque_size != sizeof(*kdfinfo)) 328 return (rv); 329 330 kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO); 331 if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size)) 332 goto out; 333 334 if (kdfinfo->len != bc->bc_opaque_size) 335 goto out; 336 337 /* copy KDF hint to disk meta data */ 338 if (kdfinfo->flags & SR_CRYPTOKDF_HINT) { 339 if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) < 340 kdfinfo->genkdf.len) 341 goto out; 342 bcopy(&kdfinfo->genkdf, 343 sd->mds.mdd_crypto.scr_meta->scm_kdfhint, 344 kdfinfo->genkdf.len); 345 } 346 347 /* copy mask key to run-time meta data */ 348 if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) { 349 if (sizeof(sd->mds.mdd_crypto.scr_maskkey) < 350 sizeof(kdfinfo->maskkey)) 351 goto out; 352 bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey, 353 sizeof(kdfinfo->maskkey)); 354 } 355 356 bc->bc_opaque_status = BIOC_SOINOUT_OK; 357 rv = 0; 358 out: 359 explicit_bzero(kdfinfo, bc->bc_opaque_size); 360 free(kdfinfo, M_DEVBUF); 361 362 return (rv); 363 } 364 365 int 366 sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg) 367 { 368 rijndael_ctx ctx; 369 int i, rv = 1; 370 371 switch (alg) { 372 case SR_CRYPTOM_AES_ECB_256: 373 if (rijndael_set_key_enc_only(&ctx, key, 256) != 0) 374 goto out; 375 for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN) 376 rijndael_encrypt(&ctx, &p[i], &c[i]); 377 rv = 0; 378 break; 379 default: 380 DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n", 381 "softraid", alg); 382 rv = -1; 383 goto out; 384 } 385 386 out: 387 explicit_bzero(&ctx, sizeof(ctx)); 388 return (rv); 389 } 390 391 int 392 sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg) 393 { 394 rijndael_ctx ctx; 395 int i, rv = 1; 396 397 switch (alg) { 398 case SR_CRYPTOM_AES_ECB_256: 399 if (rijndael_set_key(&ctx, key, 256) != 0) 400 goto out; 401 for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN) 402 rijndael_decrypt(&ctx, &c[i], &p[i]); 403 rv = 0; 404 break; 405 default: 406 DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n", 407 "softraid", alg); 408 rv = -1; 409 goto out; 410 } 411 412 out: 413 explicit_bzero(&ctx, sizeof(ctx)); 414 return (rv); 415 } 416 417 void 418 sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size, 419 u_int8_t *key, int key_size, u_char *check_digest) 420 { 421 u_char check_key[SHA1_DIGEST_LENGTH]; 422 HMAC_SHA1_CTX hmacctx; 423 SHA1_CTX shactx; 424 425 bzero(check_key, sizeof(check_key)); 426 bzero(&hmacctx, sizeof(hmacctx)); 427 bzero(&shactx, sizeof(shactx)); 428 429 /* k = SHA1(mask_key) */ 430 SHA1Init(&shactx); 431 SHA1Update(&shactx, maskkey, maskkey_size); 432 SHA1Final(check_key, &shactx); 433 434 /* mac = HMAC_SHA1_k(unencrypted key) */ 435 HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key)); 436 HMAC_SHA1_Update(&hmacctx, key, key_size); 437 HMAC_SHA1_Final(check_digest, &hmacctx); 438 439 explicit_bzero(check_key, sizeof(check_key)); 440 explicit_bzero(&hmacctx, sizeof(hmacctx)); 441 explicit_bzero(&shactx, sizeof(shactx)); 442 } 443 444 int 445 sr_crypto_decrypt_key(struct sr_discipline *sd) 446 { 447 u_char check_digest[SHA1_DIGEST_LENGTH]; 448 int rv = 1; 449 450 DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc)); 451 452 if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1) 453 goto out; 454 455 if (sr_crypto_decrypt((u_char *)sd->mds.mdd_crypto.scr_meta->scm_key, 456 (u_char *)sd->mds.mdd_crypto.scr_key, 457 sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key), 458 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1) 459 goto out; 460 461 #ifdef SR_DEBUG0 462 sr_crypto_dumpkeys(sd); 463 #endif 464 465 /* Check that the key decrypted properly. */ 466 sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey, 467 sizeof(sd->mds.mdd_crypto.scr_maskkey), 468 (u_int8_t *)sd->mds.mdd_crypto.scr_key, 469 sizeof(sd->mds.mdd_crypto.scr_key), 470 check_digest); 471 if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, 472 check_digest, sizeof(check_digest)) != 0) { 473 explicit_bzero(sd->mds.mdd_crypto.scr_key, 474 sizeof(sd->mds.mdd_crypto.scr_key)); 475 goto out; 476 } 477 478 rv = 0; /* Success */ 479 out: 480 /* we don't need the mask key anymore */ 481 explicit_bzero(&sd->mds.mdd_crypto.scr_maskkey, 482 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 483 484 explicit_bzero(check_digest, sizeof(check_digest)); 485 486 return rv; 487 } 488 489 int 490 sr_crypto_create_keys(struct sr_discipline *sd) 491 { 492 493 DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n", 494 DEVNAME(sd->sd_sc)); 495 496 if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey)) 497 return (1); 498 499 /* XXX allow user to specify */ 500 sd->mds.mdd_crypto.scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256; 501 502 /* generate crypto keys */ 503 arc4random_buf(sd->mds.mdd_crypto.scr_key, 504 sizeof(sd->mds.mdd_crypto.scr_key)); 505 506 /* Mask the disk keys. */ 507 sd->mds.mdd_crypto.scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256; 508 sr_crypto_encrypt((u_char *)sd->mds.mdd_crypto.scr_key, 509 (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key, 510 sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key), 511 sd->mds.mdd_crypto.scr_meta->scm_mask_alg); 512 513 /* Prepare key decryption check code. */ 514 sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1; 515 sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey, 516 sizeof(sd->mds.mdd_crypto.scr_maskkey), 517 (u_int8_t *)sd->mds.mdd_crypto.scr_key, 518 sizeof(sd->mds.mdd_crypto.scr_key), 519 sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac); 520 521 /* Erase the plaintext disk keys */ 522 explicit_bzero(sd->mds.mdd_crypto.scr_key, 523 sizeof(sd->mds.mdd_crypto.scr_key)); 524 525 #ifdef SR_DEBUG0 526 sr_crypto_dumpkeys(sd); 527 #endif 528 529 sd->mds.mdd_crypto.scr_meta->scm_flags = SR_CRYPTOF_KEY | 530 SR_CRYPTOF_KDFHINT; 531 532 return (0); 533 } 534 535 int 536 sr_crypto_change_maskkey(struct sr_discipline *sd, 537 struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2) 538 { 539 u_char check_digest[SHA1_DIGEST_LENGTH]; 540 u_char *c, *p = NULL; 541 size_t ksz; 542 int rv = 1; 543 544 DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n", 545 DEVNAME(sd->sd_sc)); 546 547 if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1) 548 goto out; 549 550 c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key; 551 ksz = sizeof(sd->mds.mdd_crypto.scr_key); 552 p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO); 553 if (p == NULL) 554 goto out; 555 556 if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz, 557 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1) 558 goto out; 559 560 #ifdef SR_DEBUG0 561 sr_crypto_dumpkeys(sd); 562 #endif 563 564 sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey, 565 sizeof(kdfinfo1->maskkey), p, ksz, check_digest); 566 if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, 567 check_digest, sizeof(check_digest)) != 0) { 568 sr_error(sd->sd_sc, "incorrect key or passphrase"); 569 rv = EPERM; 570 goto out; 571 } 572 573 /* Mask the disk keys. */ 574 c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key; 575 if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz, 576 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1) 577 goto out; 578 579 /* Prepare key decryption check code. */ 580 sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1; 581 sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey, 582 sizeof(kdfinfo2->maskkey), (u_int8_t *)sd->mds.mdd_crypto.scr_key, 583 sizeof(sd->mds.mdd_crypto.scr_key), check_digest); 584 585 /* Copy new encrypted key and HMAC to metadata. */ 586 bcopy(check_digest, sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, 587 sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac)); 588 589 rv = 0; /* Success */ 590 591 out: 592 if (p) { 593 explicit_bzero(p, ksz); 594 free(p, M_DEVBUF); 595 } 596 597 explicit_bzero(check_digest, sizeof(check_digest)); 598 explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey)); 599 explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey)); 600 601 return (rv); 602 } 603 604 struct sr_chunk * 605 sr_crypto_create_key_disk(struct sr_discipline *sd, dev_t dev) 606 { 607 struct sr_softc *sc = sd->sd_sc; 608 struct sr_discipline *fakesd = NULL; 609 struct sr_metadata *sm = NULL; 610 struct sr_meta_chunk *km; 611 struct sr_meta_opt_item *omi = NULL; 612 struct sr_meta_keydisk *skm; 613 struct sr_chunk *key_disk = NULL; 614 struct disklabel label; 615 struct vnode *vn; 616 char devname[32]; 617 int c, part, open = 0; 618 619 /* 620 * Create a metadata structure on the key disk and store 621 * keying material in the optional metadata. 622 */ 623 624 sr_meta_getdevname(sc, dev, devname, sizeof(devname)); 625 626 /* Make sure chunk is not already in use. */ 627 c = sr_chunk_in_use(sc, dev); 628 if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) { 629 sr_error(sc, "%s is already in use", devname); 630 goto done; 631 } 632 633 /* Open device. */ 634 if (bdevvp(dev, &vn)) { 635 sr_error(sc, "cannot open key disk %s", devname); 636 goto done; 637 } 638 if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) { 639 DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot " 640 "open %s\n", DEVNAME(sc), devname); 641 vput(vn); 642 goto fail; 643 } 644 open = 1; /* close dev on error */ 645 646 /* Get partition details. */ 647 part = DISKPART(dev); 648 if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, 649 FREAD, NOCRED, curproc)) { 650 DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl " 651 "failed\n", DEVNAME(sc)); 652 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc); 653 vput(vn); 654 goto fail; 655 } 656 if (label.d_secsize != DEV_BSIZE) { 657 sr_error(sc, "%s has unsupported sector size (%d)", 658 devname, label.d_secsize); 659 goto fail; 660 } 661 if (label.d_partitions[part].p_fstype != FS_RAID) { 662 sr_error(sc, "%s partition not of type RAID (%d)\n", 663 devname, label.d_partitions[part].p_fstype); 664 goto fail; 665 } 666 667 /* 668 * Create and populate chunk metadata. 669 */ 670 671 key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO); 672 km = &key_disk->src_meta; 673 674 key_disk->src_dev_mm = dev; 675 key_disk->src_vn = vn; 676 strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname)); 677 key_disk->src_size = 0; 678 679 km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level; 680 km->scmi.scm_chunk_id = 0; 681 km->scmi.scm_size = 0; 682 km->scmi.scm_coerced_size = 0; 683 strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname)); 684 bcopy(&sd->sd_meta->ssdi.ssd_uuid, &km->scmi.scm_uuid, 685 sizeof(struct sr_uuid)); 686 687 sr_checksum(sc, km, &km->scm_checksum, 688 sizeof(struct sr_meta_chunk_invariant)); 689 690 km->scm_status = BIOC_SDONLINE; 691 692 /* 693 * Create and populate our own discipline and metadata. 694 */ 695 696 sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO); 697 sm->ssdi.ssd_magic = SR_MAGIC; 698 sm->ssdi.ssd_version = SR_META_VERSION; 699 sm->ssd_ondisk = 0; 700 sm->ssdi.ssd_vol_flags = 0; 701 bcopy(&sd->sd_meta->ssdi.ssd_uuid, &sm->ssdi.ssd_uuid, 702 sizeof(struct sr_uuid)); 703 sm->ssdi.ssd_chunk_no = 1; 704 sm->ssdi.ssd_volid = SR_KEYDISK_VOLID; 705 sm->ssdi.ssd_level = SR_KEYDISK_LEVEL; 706 sm->ssdi.ssd_size = 0; 707 strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor)); 708 snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product), 709 "SR %s", "KEYDISK"); 710 snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision), 711 "%03d", SR_META_VERSION); 712 713 fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF, 714 M_WAITOK | M_ZERO); 715 fakesd->sd_sc = sd->sd_sc; 716 fakesd->sd_meta = sm; 717 fakesd->sd_meta_type = SR_META_F_NATIVE; 718 fakesd->sd_vol_status = BIOC_SVONLINE; 719 strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name)); 720 SLIST_INIT(&fakesd->sd_meta_opt); 721 722 /* Add chunk to volume. */ 723 fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF, 724 M_WAITOK | M_ZERO); 725 fakesd->sd_vol.sv_chunks[0] = key_disk; 726 SLIST_INIT(&fakesd->sd_vol.sv_chunk_list); 727 SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link); 728 729 /* Generate mask key. */ 730 arc4random_buf(sd->mds.mdd_crypto.scr_maskkey, 731 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 732 733 /* Copy mask key to optional metadata area. */ 734 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF, 735 M_WAITOK | M_ZERO); 736 omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF, 737 M_WAITOK | M_ZERO); 738 omi->omi_som->som_type = SR_OPT_KEYDISK; 739 omi->omi_som->som_length = sizeof(struct sr_meta_keydisk); 740 skm = (struct sr_meta_keydisk *)omi->omi_som; 741 bcopy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey, 742 sizeof(skm->skm_maskkey)); 743 SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link); 744 fakesd->sd_meta->ssdi.ssd_opt_no++; 745 746 /* Save metadata. */ 747 if (sr_meta_save(fakesd, SR_META_DIRTY)) { 748 sr_error(sc, "could not save metadata to %s", devname); 749 goto fail; 750 } 751 752 goto done; 753 754 fail: 755 if (key_disk) 756 free(key_disk, M_DEVBUF); 757 key_disk = NULL; 758 759 done: 760 if (omi) 761 free(omi, M_DEVBUF); 762 if (fakesd && fakesd->sd_vol.sv_chunks) 763 free(fakesd->sd_vol.sv_chunks, M_DEVBUF); 764 if (fakesd) 765 free(fakesd, M_DEVBUF); 766 if (sm) 767 free(sm, M_DEVBUF); 768 if (open) { 769 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc); 770 vput(vn); 771 } 772 773 return key_disk; 774 } 775 776 struct sr_chunk * 777 sr_crypto_read_key_disk(struct sr_discipline *sd, dev_t dev) 778 { 779 struct sr_softc *sc = sd->sd_sc; 780 struct sr_metadata *sm = NULL; 781 struct sr_meta_opt_item *omi, *omi_next; 782 struct sr_meta_opt_hdr *omh; 783 struct sr_meta_keydisk *skm; 784 struct sr_meta_opt_head som; 785 struct sr_chunk *key_disk = NULL; 786 struct disklabel label; 787 struct vnode *vn = NULL; 788 char devname[32]; 789 int c, part, open = 0; 790 791 /* 792 * Load a key disk and load keying material into memory. 793 */ 794 795 SLIST_INIT(&som); 796 797 sr_meta_getdevname(sc, dev, devname, sizeof(devname)); 798 799 /* Make sure chunk is not already in use. */ 800 c = sr_chunk_in_use(sc, dev); 801 if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) { 802 sr_error(sc, "%s is already in use", devname); 803 goto done; 804 } 805 806 /* Open device. */ 807 if (bdevvp(dev, &vn)) { 808 sr_error(sc, "cannot open key disk %s", devname); 809 goto done; 810 } 811 if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) { 812 DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot " 813 "open %s\n", DEVNAME(sc), devname); 814 vput(vn); 815 goto done; 816 } 817 open = 1; /* close dev on error */ 818 819 /* Get partition details. */ 820 part = DISKPART(dev); 821 if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD, 822 NOCRED, curproc)) { 823 DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl " 824 "failed\n", DEVNAME(sc)); 825 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc); 826 vput(vn); 827 goto done; 828 } 829 if (label.d_secsize != DEV_BSIZE) { 830 sr_error(sc, "%s has unsupported sector size (%d)", 831 devname, label.d_secsize); 832 goto done; 833 } 834 if (label.d_partitions[part].p_fstype != FS_RAID) { 835 sr_error(sc, "%s partition not of type RAID (%d)\n", 836 devname, label.d_partitions[part].p_fstype); 837 goto done; 838 } 839 840 /* 841 * Read and validate key disk metadata. 842 */ 843 sm = malloc(SR_META_SIZE * 512, M_DEVBUF, M_WAITOK | M_ZERO); 844 if (sr_meta_native_read(sd, dev, sm, NULL)) { 845 sr_error(sc, "native bootprobe could not read native metadata"); 846 goto done; 847 } 848 849 if (sr_meta_validate(sd, dev, sm, NULL)) { 850 DNPRINTF(SR_D_META, "%s: invalid metadata\n", 851 DEVNAME(sc)); 852 goto done; 853 } 854 855 /* Make sure this is a key disk. */ 856 if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) { 857 sr_error(sc, "%s is not a key disk", devname); 858 goto done; 859 } 860 861 /* Construct key disk chunk. */ 862 key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO); 863 key_disk->src_dev_mm = dev; 864 key_disk->src_vn = vn; 865 key_disk->src_size = 0; 866 867 bcopy((struct sr_meta_chunk *)(sm + 1), &key_disk->src_meta, 868 sizeof(key_disk->src_meta)); 869 870 /* Read mask key from optional metadata. */ 871 sr_meta_opt_load(sc, sm, &som); 872 SLIST_FOREACH(omi, &som, omi_link) { 873 omh = omi->omi_som; 874 if (omh->som_type == SR_OPT_KEYDISK) { 875 skm = (struct sr_meta_keydisk *)omh; 876 bcopy(&skm->skm_maskkey, 877 sd->mds.mdd_crypto.scr_maskkey, 878 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 879 } else if (omh->som_type == SR_OPT_CRYPTO) { 880 /* Original keydisk format with key in crypto area. */ 881 bcopy(omh + sizeof(struct sr_meta_opt_hdr), 882 sd->mds.mdd_crypto.scr_maskkey, 883 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 884 } 885 } 886 887 open = 0; 888 889 done: 890 for (omi = SLIST_FIRST(&som); omi != SLIST_END(&som); omi = omi_next) { 891 omi_next = SLIST_NEXT(omi, omi_link); 892 if (omi->omi_som) 893 free(omi->omi_som, M_DEVBUF); 894 free(omi, M_DEVBUF); 895 } 896 897 if (sm) 898 free(sm, M_DEVBUF); 899 900 if (vn && open) { 901 VOP_CLOSE(vn, FREAD, NOCRED, curproc); 902 vput(vn); 903 } 904 905 return key_disk; 906 } 907 908 int 909 sr_crypto_alloc_resources(struct sr_discipline *sd) 910 { 911 struct sr_workunit *wu; 912 struct sr_crypto_wu *crwu; 913 struct cryptoini cri; 914 u_int num_keys, i; 915 916 DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n", 917 DEVNAME(sd->sd_sc)); 918 919 sd->mds.mdd_crypto.scr_alg = CRYPTO_AES_XTS; 920 switch (sd->mds.mdd_crypto.scr_meta->scm_alg) { 921 case SR_CRYPTOA_AES_XTS_128: 922 sd->mds.mdd_crypto.scr_klen = 256; 923 break; 924 case SR_CRYPTOA_AES_XTS_256: 925 sd->mds.mdd_crypto.scr_klen = 512; 926 break; 927 default: 928 sr_error(sd->sd_sc, "unknown crypto algorithm"); 929 return (EINVAL); 930 } 931 932 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) 933 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 934 935 if (sr_wu_alloc(sd, sizeof(struct sr_crypto_wu))) { 936 sr_error(sd->sd_sc, "unable to allocate work units"); 937 return (ENOMEM); 938 } 939 if (sr_ccb_alloc(sd)) { 940 sr_error(sd->sd_sc, "unable to allocate CCBs"); 941 return (ENOMEM); 942 } 943 if (sr_crypto_decrypt_key(sd)) { 944 sr_error(sd->sd_sc, "incorrect key or passphrase"); 945 return (EPERM); 946 } 947 948 /* 949 * For each work unit allocate the uio, iovec and crypto structures. 950 * These have to be allocated now because during runtime we cannot 951 * fail an allocation without failing the I/O (which can cause real 952 * problems). 953 */ 954 TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) { 955 crwu = (struct sr_crypto_wu *)wu; 956 crwu->cr_uio.uio_iov = &crwu->cr_iov; 957 crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK); 958 crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT); 959 if (crwu->cr_crp == NULL) 960 return (ENOMEM); 961 crwu->cr_descs = crwu->cr_crp->crp_desc; 962 } 963 964 memset(&cri, 0, sizeof(cri)); 965 cri.cri_alg = sd->mds.mdd_crypto.scr_alg; 966 cri.cri_klen = sd->mds.mdd_crypto.scr_klen; 967 968 /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks. */ 969 num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT; 970 if (num_keys >= SR_CRYPTO_MAXKEYS) 971 return (EFBIG); 972 for (i = 0; i <= num_keys; i++) { 973 cri.cri_key = sd->mds.mdd_crypto.scr_key[i]; 974 if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i], 975 &cri, 0) != 0) { 976 for (i = 0; 977 sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; 978 i++) { 979 crypto_freesession( 980 sd->mds.mdd_crypto.scr_sid[i]); 981 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 982 } 983 return (EINVAL); 984 } 985 } 986 987 sr_hotplug_register(sd, sr_crypto_hotplug); 988 989 return (0); 990 } 991 992 void 993 sr_crypto_free_resources(struct sr_discipline *sd) 994 { 995 struct sr_workunit *wu; 996 struct sr_crypto_wu *crwu; 997 u_int i; 998 999 DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n", 1000 DEVNAME(sd->sd_sc)); 1001 1002 if (sd->mds.mdd_crypto.key_disk != NULL) { 1003 explicit_bzero(sd->mds.mdd_crypto.key_disk, sizeof 1004 sd->mds.mdd_crypto.key_disk); 1005 free(sd->mds.mdd_crypto.key_disk, M_DEVBUF); 1006 } 1007 1008 sr_hotplug_unregister(sd, sr_crypto_hotplug); 1009 1010 for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) { 1011 crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]); 1012 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 1013 } 1014 1015 TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) { 1016 crwu = (struct sr_crypto_wu *)wu; 1017 if (crwu->cr_dmabuf) 1018 dma_free(crwu->cr_dmabuf, MAXPHYS); 1019 if (crwu->cr_crp) { 1020 crwu->cr_crp->crp_desc = crwu->cr_descs; 1021 crypto_freereq(crwu->cr_crp); 1022 } 1023 } 1024 1025 sr_wu_free(sd); 1026 sr_ccb_free(sd); 1027 } 1028 1029 int 1030 sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd) 1031 { 1032 struct sr_crypto_kdfpair kdfpair; 1033 struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2; 1034 int size, rv = 1; 1035 1036 DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n", 1037 DEVNAME(sd->sd_sc), bd->bd_cmd); 1038 1039 switch (bd->bd_cmd) { 1040 case SR_IOCTL_GET_KDFHINT: 1041 1042 /* Get KDF hint for userland. */ 1043 size = sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint); 1044 if (bd->bd_data == NULL || bd->bd_size > size) 1045 goto bad; 1046 if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint, 1047 bd->bd_data, bd->bd_size)) 1048 goto bad; 1049 1050 rv = 0; 1051 1052 break; 1053 1054 case SR_IOCTL_CHANGE_PASSPHRASE: 1055 1056 /* Attempt to change passphrase. */ 1057 1058 size = sizeof(kdfpair); 1059 if (bd->bd_data == NULL || bd->bd_size > size) 1060 goto bad; 1061 if (copyin(bd->bd_data, &kdfpair, size)) 1062 goto bad; 1063 1064 size = sizeof(kdfinfo1); 1065 if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size) 1066 goto bad; 1067 if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size)) 1068 goto bad; 1069 1070 size = sizeof(kdfinfo2); 1071 if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size) 1072 goto bad; 1073 if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size)) 1074 goto bad; 1075 1076 if (sr_crypto_change_maskkey(sd, &kdfinfo1, &kdfinfo2)) 1077 goto bad; 1078 1079 /* Save metadata to disk. */ 1080 rv = sr_meta_save(sd, SR_META_DIRTY); 1081 1082 break; 1083 } 1084 1085 bad: 1086 explicit_bzero(&kdfpair, sizeof(kdfpair)); 1087 explicit_bzero(&kdfinfo1, sizeof(kdfinfo1)); 1088 explicit_bzero(&kdfinfo2, sizeof(kdfinfo2)); 1089 1090 return (rv); 1091 } 1092 1093 int 1094 sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om) 1095 { 1096 int rv = EINVAL; 1097 1098 if (om->som_type == SR_OPT_CRYPTO) { 1099 sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)om; 1100 rv = 0; 1101 } 1102 1103 return (rv); 1104 } 1105 1106 int 1107 sr_crypto_rw(struct sr_workunit *wu) 1108 { 1109 struct sr_crypto_wu *crwu; 1110 daddr_t blk; 1111 int rv = 0; 1112 1113 DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu %p\n", 1114 DEVNAME(wu->swu_dis->sd_sc), wu); 1115 1116 if (sr_validate_io(wu, &blk, "sr_crypto_rw")) 1117 return (1); 1118 1119 if (wu->swu_xs->flags & SCSI_DATA_OUT) { 1120 crwu = sr_crypto_prepare(wu, 1); 1121 crwu->cr_crp->crp_callback = sr_crypto_write; 1122 rv = crypto_invoke(crwu->cr_crp); 1123 if (rv == 0) 1124 rv = crwu->cr_crp->crp_etype; 1125 } else 1126 rv = sr_crypto_dev_rw(wu, NULL); 1127 1128 return (rv); 1129 } 1130 1131 int 1132 sr_crypto_write(struct cryptop *crp) 1133 { 1134 struct sr_crypto_wu *crwu = crp->crp_opaque; 1135 struct sr_workunit *wu = &crwu->cr_wu; 1136 int s; 1137 1138 DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n", 1139 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs); 1140 1141 if (crp->crp_etype) { 1142 /* fail io */ 1143 wu->swu_xs->error = XS_DRIVER_STUFFUP; 1144 s = splbio(); 1145 sr_scsi_done(wu->swu_dis, wu->swu_xs); 1146 splx(s); 1147 } 1148 1149 return (sr_crypto_dev_rw(wu, crwu)); 1150 } 1151 1152 int 1153 sr_crypto_dev_rw(struct sr_workunit *wu, struct sr_crypto_wu *crwu) 1154 { 1155 struct sr_discipline *sd = wu->swu_dis; 1156 struct scsi_xfer *xs = wu->swu_xs; 1157 struct sr_ccb *ccb; 1158 struct uio *uio; 1159 daddr_t blk; 1160 1161 blk = wu->swu_blk_start; 1162 blk += sd->sd_meta->ssd_data_offset; 1163 1164 ccb = sr_ccb_rw(sd, 0, blk, xs->datalen, xs->data, xs->flags, 0); 1165 if (!ccb) { 1166 /* should never happen but handle more gracefully */ 1167 printf("%s: %s: too many ccbs queued\n", 1168 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname); 1169 goto bad; 1170 } 1171 if (!ISSET(xs->flags, SCSI_DATA_IN)) { 1172 uio = crwu->cr_crp->crp_buf; 1173 ccb->ccb_buf.b_data = uio->uio_iov->iov_base; 1174 ccb->ccb_opaque = crwu; 1175 } 1176 sr_wu_enqueue_ccb(wu, ccb); 1177 sr_schedule_wu(wu); 1178 1179 return (0); 1180 1181 bad: 1182 /* wu is unwound by sr_wu_put */ 1183 if (crwu) 1184 crwu->cr_crp->crp_etype = EINVAL; 1185 return (1); 1186 } 1187 1188 void 1189 sr_crypto_done(struct sr_workunit *wu) 1190 { 1191 struct scsi_xfer *xs = wu->swu_xs; 1192 struct sr_crypto_wu *crwu; 1193 int s; 1194 1195 /* If this was a successful read, initiate decryption of the data. */ 1196 if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) { 1197 crwu = sr_crypto_prepare(wu, 0); 1198 crwu->cr_crp->crp_callback = sr_crypto_read; 1199 DNPRINTF(SR_D_INTR, "%s: sr_crypto_done: crypto_invoke %p\n", 1200 DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp); 1201 crypto_invoke(crwu->cr_crp); 1202 return; 1203 } 1204 1205 s = splbio(); 1206 sr_scsi_done(wu->swu_dis, wu->swu_xs); 1207 splx(s); 1208 } 1209 1210 int 1211 sr_crypto_read(struct cryptop *crp) 1212 { 1213 struct sr_crypto_wu *crwu = crp->crp_opaque; 1214 struct sr_workunit *wu = &crwu->cr_wu; 1215 int s; 1216 1217 DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n", 1218 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs); 1219 1220 if (crp->crp_etype) 1221 wu->swu_xs->error = XS_DRIVER_STUFFUP; 1222 1223 s = splbio(); 1224 sr_scsi_done(wu->swu_dis, wu->swu_xs); 1225 splx(s); 1226 1227 return (0); 1228 } 1229 1230 void 1231 sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action) 1232 { 1233 DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n", 1234 DEVNAME(sd->sd_sc), diskp->dk_name, action); 1235 } 1236 1237 #ifdef SR_DEBUG0 1238 void 1239 sr_crypto_dumpkeys(struct sr_discipline *sd) 1240 { 1241 int i, j; 1242 1243 printf("sr_crypto_dumpkeys:\n"); 1244 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) { 1245 printf("\tscm_key[%d]: 0x", i); 1246 for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) { 1247 printf("%02x", 1248 sd->mds.mdd_crypto.scr_meta->scm_key[i][j]); 1249 } 1250 printf("\n"); 1251 } 1252 printf("sr_crypto_dumpkeys: runtime data keys:\n"); 1253 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) { 1254 printf("\tscr_key[%d]: 0x", i); 1255 for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) { 1256 printf("%02x", 1257 sd->mds.mdd_crypto.scr_key[i][j]); 1258 } 1259 printf("\n"); 1260 } 1261 } 1262 #endif /* SR_DEBUG */ 1263