1 /* $OpenBSD: softraid_crypto.c,v 1.80 2012/01/30 13:13:03 jsing Exp $ */ 2 /* 3 * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us> 4 * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org> 5 * Copyright (c) 2008 Damien Miller <djm@mindrot.org> 6 * Copyright (c) 2009 Joel Sing <jsing@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include "bio.h" 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/buf.h> 26 #include <sys/device.h> 27 #include <sys/ioctl.h> 28 #include <sys/proc.h> 29 #include <sys/malloc.h> 30 #include <sys/pool.h> 31 #include <sys/kernel.h> 32 #include <sys/disk.h> 33 #include <sys/rwlock.h> 34 #include <sys/queue.h> 35 #include <sys/fcntl.h> 36 #include <sys/disklabel.h> 37 #include <sys/mount.h> 38 #include <sys/sensors.h> 39 #include <sys/stat.h> 40 #include <sys/conf.h> 41 #include <sys/uio.h> 42 #include <sys/dkio.h> 43 44 #include <crypto/cryptodev.h> 45 #include <crypto/cryptosoft.h> 46 #include <crypto/rijndael.h> 47 #include <crypto/md5.h> 48 #include <crypto/sha1.h> 49 #include <crypto/sha2.h> 50 #include <crypto/hmac.h> 51 52 #include <scsi/scsi_all.h> 53 #include <scsi/scsiconf.h> 54 #include <scsi/scsi_disk.h> 55 56 #include <dev/softraidvar.h> 57 #include <dev/rndvar.h> 58 59 /* 60 * The per-I/O data that we need to preallocate. We cannot afford to allow I/O 61 * to start failing when memory pressure kicks in. We can store this in the WU 62 * because we assert that only one ccb per WU will ever be active. 63 */ 64 struct sr_crypto_wu { 65 TAILQ_ENTRY(sr_crypto_wu) cr_link; 66 struct uio cr_uio; 67 struct iovec cr_iov; 68 struct cryptop *cr_crp; 69 struct cryptodesc *cr_descs; 70 struct sr_workunit *cr_wu; 71 void *cr_dmabuf; 72 }; 73 74 75 struct sr_crypto_wu *sr_crypto_wu_get(struct sr_workunit *, int); 76 void sr_crypto_wu_put(struct sr_crypto_wu *); 77 int sr_crypto_create_keys(struct sr_discipline *); 78 int sr_crypto_get_kdf(struct bioc_createraid *, 79 struct sr_discipline *); 80 int sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int); 81 int sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int); 82 int sr_crypto_decrypt_key(struct sr_discipline *); 83 int sr_crypto_change_maskkey(struct sr_discipline *, 84 struct sr_crypto_kdfinfo *, struct sr_crypto_kdfinfo *); 85 int sr_crypto_create(struct sr_discipline *, 86 struct bioc_createraid *, int, int64_t); 87 int sr_crypto_assemble(struct sr_discipline *, 88 struct bioc_createraid *, int); 89 int sr_crypto_alloc_resources(struct sr_discipline *); 90 int sr_crypto_free_resources(struct sr_discipline *); 91 int sr_crypto_ioctl(struct sr_discipline *, 92 struct bioc_discipline *); 93 int sr_crypto_meta_opt_handler(struct sr_discipline *, 94 struct sr_meta_opt_hdr *); 95 int sr_crypto_write(struct cryptop *); 96 int sr_crypto_rw(struct sr_workunit *); 97 int sr_crypto_rw2(struct sr_workunit *, struct sr_crypto_wu *); 98 void sr_crypto_intr(struct buf *); 99 int sr_crypto_read(struct cryptop *); 100 void sr_crypto_finish_io(struct sr_workunit *); 101 void sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int, 102 u_int8_t *, int, u_char *); 103 void sr_crypto_hotplug(struct sr_discipline *, struct disk *, int); 104 105 #ifdef SR_DEBUG0 106 void sr_crypto_dumpkeys(struct sr_discipline *); 107 #endif 108 109 /* Discipline initialisation. */ 110 void 111 sr_crypto_discipline_init(struct sr_discipline *sd) 112 { 113 int i; 114 115 /* Fill out discipline members. */ 116 sd->sd_type = SR_MD_CRYPTO; 117 sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE; 118 sd->sd_max_wu = SR_CRYPTO_NOWU; 119 120 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) 121 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 122 123 /* Setup discipline specific function pointers. */ 124 sd->sd_alloc_resources = sr_crypto_alloc_resources; 125 sd->sd_assemble = sr_crypto_assemble; 126 sd->sd_create = sr_crypto_create; 127 sd->sd_free_resources = sr_crypto_free_resources; 128 sd->sd_ioctl_handler = sr_crypto_ioctl; 129 sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler; 130 sd->sd_scsi_rw = sr_crypto_rw; 131 } 132 133 int 134 sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc, 135 int no_chunk, int64_t coerced_size) 136 { 137 struct sr_meta_opt_item *omi; 138 int rv = EINVAL; 139 140 if (no_chunk != 1) { 141 sr_error(sd->sd_sc, "CRYPTO requires exactly one chunk"); 142 goto done; 143 } 144 145 /* Create crypto optional metadata. */ 146 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF, 147 M_WAITOK | M_ZERO); 148 omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF, 149 M_WAITOK | M_ZERO); 150 omi->omi_som->som_type = SR_OPT_CRYPTO; 151 omi->omi_som->som_length = sizeof(struct sr_meta_crypto); 152 SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link); 153 sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)omi->omi_som; 154 sd->sd_meta->ssdi.ssd_opt_no++; 155 156 sd->mds.mdd_crypto.key_disk = NULL; 157 158 if (bc->bc_key_disk != NODEV) { 159 160 /* Create a key disk. */ 161 if (sr_crypto_get_kdf(bc, sd)) 162 goto done; 163 sd->mds.mdd_crypto.key_disk = 164 sr_crypto_create_key_disk(sd, bc->bc_key_disk); 165 if (sd->mds.mdd_crypto.key_disk == NULL) 166 goto done; 167 sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE; 168 169 } else if (bc->bc_opaque_flags & BIOC_SOOUT) { 170 171 /* No hint available yet. */ 172 bc->bc_opaque_status = BIOC_SOINOUT_FAILED; 173 rv = EAGAIN; 174 goto done; 175 176 } else if (sr_crypto_get_kdf(bc, sd)) 177 goto done; 178 179 /* Passphrase volumes cannot be automatically assembled. */ 180 if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV) 181 goto done; 182 183 strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name)); 184 sd->sd_meta->ssdi.ssd_size = coerced_size; 185 186 sr_crypto_create_keys(sd); 187 188 sd->sd_max_ccb_per_wu = no_chunk; 189 190 rv = 0; 191 done: 192 return (rv); 193 } 194 195 int 196 sr_crypto_assemble(struct sr_discipline *sd, struct bioc_createraid *bc, 197 int no_chunk) 198 { 199 int rv = EINVAL; 200 201 sd->mds.mdd_crypto.key_disk = NULL; 202 203 /* Crypto optional metadata must already exist... */ 204 if (sd->mds.mdd_crypto.scr_meta == NULL) 205 goto done; 206 207 if (bc->bc_key_disk != NODEV) { 208 /* Read the mask key from the key disk. */ 209 sd->mds.mdd_crypto.key_disk = 210 sr_crypto_read_key_disk(sd, bc->bc_key_disk); 211 if (sd->mds.mdd_crypto.key_disk == NULL) 212 goto done; 213 } else if (bc->bc_opaque_flags & BIOC_SOOUT) { 214 /* provide userland with kdf hint */ 215 if (bc->bc_opaque == NULL) 216 goto done; 217 218 if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) < 219 bc->bc_opaque_size) 220 goto done; 221 222 if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint, 223 bc->bc_opaque, bc->bc_opaque_size)) 224 goto done; 225 226 /* we're done */ 227 bc->bc_opaque_status = BIOC_SOINOUT_OK; 228 rv = EAGAIN; 229 goto done; 230 } else if (bc->bc_opaque_flags & BIOC_SOIN) { 231 /* get kdf with maskkey from userland */ 232 if (sr_crypto_get_kdf(bc, sd)) 233 goto done; 234 235 } 236 237 sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no; 238 239 rv = 0; 240 done: 241 return (rv); 242 } 243 244 struct sr_crypto_wu * 245 sr_crypto_wu_get(struct sr_workunit *wu, int encrypt) 246 { 247 struct scsi_xfer *xs = wu->swu_xs; 248 struct sr_discipline *sd = wu->swu_dis; 249 struct sr_crypto_wu *crwu; 250 struct cryptodesc *crd; 251 int flags, i, n; 252 daddr64_t blk = 0; 253 u_int keyndx; 254 255 DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_get wu: %p encrypt: %d\n", 256 DEVNAME(sd->sd_sc), wu, encrypt); 257 258 mtx_enter(&sd->mds.mdd_crypto.scr_mutex); 259 if ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL) 260 TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link); 261 mtx_leave(&sd->mds.mdd_crypto.scr_mutex); 262 if (crwu == NULL) 263 panic("sr_crypto_wu_get: out of wus"); 264 265 crwu->cr_uio.uio_iovcnt = 1; 266 crwu->cr_uio.uio_iov->iov_len = xs->datalen; 267 if (xs->flags & SCSI_DATA_OUT) { 268 crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf; 269 bcopy(xs->data, crwu->cr_uio.uio_iov->iov_base, xs->datalen); 270 } else 271 crwu->cr_uio.uio_iov->iov_base = xs->data; 272 273 if (xs->cmdlen == 10) 274 blk = _4btol(((struct scsi_rw_big *)xs->cmd)->addr); 275 else if (xs->cmdlen == 16) 276 blk = _8btol(((struct scsi_rw_16 *)xs->cmd)->addr); 277 else if (xs->cmdlen == 6) 278 blk = _3btol(((struct scsi_rw *)xs->cmd)->addr); 279 280 n = xs->datalen >> DEV_BSHIFT; 281 282 /* 283 * We preallocated enough crypto descs for up to MAXPHYS of I/O. 284 * Since there may be less than that we need to tweak the linked list 285 * of crypto desc structures to be just long enough for our needs. 286 */ 287 crd = crwu->cr_descs; 288 for (i = 0; i < ((MAXPHYS >> DEV_BSHIFT) - n); i++) { 289 crd = crd->crd_next; 290 KASSERT(crd); 291 } 292 crwu->cr_crp->crp_desc = crd; 293 flags = (encrypt ? CRD_F_ENCRYPT : 0) | 294 CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT; 295 296 /* Select crypto session based on block number */ 297 keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT; 298 if (keyndx >= SR_CRYPTO_MAXKEYS) 299 goto unwind; 300 crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx]; 301 if (crwu->cr_crp->crp_sid == (u_int64_t)-1) 302 goto unwind; 303 304 crwu->cr_crp->crp_ilen = xs->datalen; 305 crwu->cr_crp->crp_alloctype = M_DEVBUF; 306 crwu->cr_crp->crp_buf = &crwu->cr_uio; 307 for (i = 0, crd = crwu->cr_crp->crp_desc; crd; 308 i++, blk++, crd = crd->crd_next) { 309 crd->crd_skip = i << DEV_BSHIFT; 310 crd->crd_len = DEV_BSIZE; 311 crd->crd_inject = 0; 312 crd->crd_flags = flags; 313 crd->crd_alg = CRYPTO_AES_XTS; 314 315 switch (sd->mds.mdd_crypto.scr_meta->scm_alg) { 316 case SR_CRYPTOA_AES_XTS_128: 317 crd->crd_klen = 256; 318 break; 319 case SR_CRYPTOA_AES_XTS_256: 320 crd->crd_klen = 512; 321 break; 322 default: 323 goto unwind; 324 } 325 crd->crd_key = sd->mds.mdd_crypto.scr_key[0]; 326 bcopy(&blk, crd->crd_iv, sizeof(blk)); 327 } 328 crwu->cr_wu = wu; 329 crwu->cr_crp->crp_opaque = crwu; 330 331 return (crwu); 332 333 unwind: 334 /* steal the descriptors back from the cryptop */ 335 crwu->cr_crp->crp_desc = NULL; 336 337 return (NULL); 338 } 339 340 void 341 sr_crypto_wu_put(struct sr_crypto_wu *crwu) 342 { 343 struct cryptop *crp = crwu->cr_crp; 344 struct sr_workunit *wu = crwu->cr_wu; 345 struct sr_discipline *sd = wu->swu_dis; 346 347 DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_put crwu: %p\n", 348 DEVNAME(wu->swu_dis->sd_sc), crwu); 349 350 /* steal the descriptors back from the cryptop */ 351 crp->crp_desc = NULL; 352 353 mtx_enter(&sd->mds.mdd_crypto.scr_mutex); 354 TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link); 355 mtx_leave(&sd->mds.mdd_crypto.scr_mutex); 356 } 357 358 int 359 sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd) 360 { 361 int rv = EINVAL; 362 struct sr_crypto_kdfinfo *kdfinfo; 363 364 if (!(bc->bc_opaque_flags & BIOC_SOIN)) 365 return (rv); 366 if (bc->bc_opaque == NULL) 367 return (rv); 368 if (bc->bc_opaque_size != sizeof(*kdfinfo)) 369 return (rv); 370 371 kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO); 372 if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size)) 373 goto out; 374 375 if (kdfinfo->len != bc->bc_opaque_size) 376 goto out; 377 378 /* copy KDF hint to disk meta data */ 379 if (kdfinfo->flags & SR_CRYPTOKDF_HINT) { 380 if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) < 381 kdfinfo->genkdf.len) 382 goto out; 383 bcopy(&kdfinfo->genkdf, 384 sd->mds.mdd_crypto.scr_meta->scm_kdfhint, 385 kdfinfo->genkdf.len); 386 } 387 388 /* copy mask key to run-time meta data */ 389 if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) { 390 if (sizeof(sd->mds.mdd_crypto.scr_maskkey) < 391 sizeof(kdfinfo->maskkey)) 392 goto out; 393 bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey, 394 sizeof(kdfinfo->maskkey)); 395 } 396 397 bc->bc_opaque_status = BIOC_SOINOUT_OK; 398 rv = 0; 399 out: 400 explicit_bzero(kdfinfo, bc->bc_opaque_size); 401 free(kdfinfo, M_DEVBUF); 402 403 return (rv); 404 } 405 406 int 407 sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg) 408 { 409 rijndael_ctx ctx; 410 int i, rv = 1; 411 412 switch (alg) { 413 case SR_CRYPTOM_AES_ECB_256: 414 if (rijndael_set_key_enc_only(&ctx, key, 256) != 0) 415 goto out; 416 for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN) 417 rijndael_encrypt(&ctx, &p[i], &c[i]); 418 rv = 0; 419 break; 420 default: 421 DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n", 422 "softraid", alg); 423 rv = -1; 424 goto out; 425 } 426 427 out: 428 explicit_bzero(&ctx, sizeof(ctx)); 429 return (rv); 430 } 431 432 int 433 sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg) 434 { 435 rijndael_ctx ctx; 436 int i, rv = 1; 437 438 switch (alg) { 439 case SR_CRYPTOM_AES_ECB_256: 440 if (rijndael_set_key(&ctx, key, 256) != 0) 441 goto out; 442 for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN) 443 rijndael_decrypt(&ctx, &c[i], &p[i]); 444 rv = 0; 445 break; 446 default: 447 DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n", 448 "softraid", alg); 449 rv = -1; 450 goto out; 451 } 452 453 out: 454 explicit_bzero(&ctx, sizeof(ctx)); 455 return (rv); 456 } 457 458 void 459 sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size, 460 u_int8_t *key, int key_size, u_char *check_digest) 461 { 462 u_char check_key[SHA1_DIGEST_LENGTH]; 463 HMAC_SHA1_CTX hmacctx; 464 SHA1_CTX shactx; 465 466 bzero(check_key, sizeof(check_key)); 467 bzero(&hmacctx, sizeof(hmacctx)); 468 bzero(&shactx, sizeof(shactx)); 469 470 /* k = SHA1(mask_key) */ 471 SHA1Init(&shactx); 472 SHA1Update(&shactx, maskkey, maskkey_size); 473 SHA1Final(check_key, &shactx); 474 475 /* mac = HMAC_SHA1_k(unencrypted key) */ 476 HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key)); 477 HMAC_SHA1_Update(&hmacctx, key, key_size); 478 HMAC_SHA1_Final(check_digest, &hmacctx); 479 480 explicit_bzero(check_key, sizeof(check_key)); 481 explicit_bzero(&hmacctx, sizeof(hmacctx)); 482 explicit_bzero(&shactx, sizeof(shactx)); 483 } 484 485 int 486 sr_crypto_decrypt_key(struct sr_discipline *sd) 487 { 488 u_char check_digest[SHA1_DIGEST_LENGTH]; 489 int rv = 1; 490 491 DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc)); 492 493 if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1) 494 goto out; 495 496 if (sr_crypto_decrypt((u_char *)sd->mds.mdd_crypto.scr_meta->scm_key, 497 (u_char *)sd->mds.mdd_crypto.scr_key, 498 sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key), 499 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1) 500 goto out; 501 502 #ifdef SR_DEBUG0 503 sr_crypto_dumpkeys(sd); 504 #endif 505 506 /* Check that the key decrypted properly. */ 507 sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey, 508 sizeof(sd->mds.mdd_crypto.scr_maskkey), 509 (u_int8_t *)sd->mds.mdd_crypto.scr_key, 510 sizeof(sd->mds.mdd_crypto.scr_key), 511 check_digest); 512 if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, 513 check_digest, sizeof(check_digest)) != 0) { 514 explicit_bzero(sd->mds.mdd_crypto.scr_key, 515 sizeof(sd->mds.mdd_crypto.scr_key)); 516 goto out; 517 } 518 519 rv = 0; /* Success */ 520 out: 521 /* we don't need the mask key anymore */ 522 explicit_bzero(&sd->mds.mdd_crypto.scr_maskkey, 523 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 524 525 explicit_bzero(check_digest, sizeof(check_digest)); 526 527 return rv; 528 } 529 530 int 531 sr_crypto_create_keys(struct sr_discipline *sd) 532 { 533 534 DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n", 535 DEVNAME(sd->sd_sc)); 536 537 if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey)) 538 return (1); 539 540 /* XXX allow user to specify */ 541 sd->mds.mdd_crypto.scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256; 542 543 /* generate crypto keys */ 544 arc4random_buf(sd->mds.mdd_crypto.scr_key, 545 sizeof(sd->mds.mdd_crypto.scr_key)); 546 547 /* Mask the disk keys. */ 548 sd->mds.mdd_crypto.scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256; 549 sr_crypto_encrypt((u_char *)sd->mds.mdd_crypto.scr_key, 550 (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key, 551 sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key), 552 sd->mds.mdd_crypto.scr_meta->scm_mask_alg); 553 554 /* Prepare key decryption check code. */ 555 sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1; 556 sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey, 557 sizeof(sd->mds.mdd_crypto.scr_maskkey), 558 (u_int8_t *)sd->mds.mdd_crypto.scr_key, 559 sizeof(sd->mds.mdd_crypto.scr_key), 560 sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac); 561 562 /* Erase the plaintext disk keys */ 563 explicit_bzero(sd->mds.mdd_crypto.scr_key, 564 sizeof(sd->mds.mdd_crypto.scr_key)); 565 566 #ifdef SR_DEBUG0 567 sr_crypto_dumpkeys(sd); 568 #endif 569 570 sd->mds.mdd_crypto.scr_meta->scm_flags = SR_CRYPTOF_KEY | 571 SR_CRYPTOF_KDFHINT; 572 573 return (0); 574 } 575 576 int 577 sr_crypto_change_maskkey(struct sr_discipline *sd, 578 struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2) 579 { 580 u_char check_digest[SHA1_DIGEST_LENGTH]; 581 u_char *c, *p = NULL; 582 size_t ksz; 583 int rv = 1; 584 585 DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n", 586 DEVNAME(sd->sd_sc)); 587 588 if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1) 589 goto out; 590 591 c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key; 592 ksz = sizeof(sd->mds.mdd_crypto.scr_key); 593 p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO); 594 if (p == NULL) 595 goto out; 596 597 if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz, 598 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1) 599 goto out; 600 601 #ifdef SR_DEBUG0 602 sr_crypto_dumpkeys(sd); 603 #endif 604 605 sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey, 606 sizeof(kdfinfo1->maskkey), p, ksz, check_digest); 607 if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, 608 check_digest, sizeof(check_digest)) != 0) { 609 sr_error(sd->sd_sc, "incorrect key or passphrase"); 610 rv = EPERM; 611 goto out; 612 } 613 614 /* Mask the disk keys. */ 615 c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key; 616 if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz, 617 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1) 618 goto out; 619 620 /* Prepare key decryption check code. */ 621 sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1; 622 sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey, 623 sizeof(kdfinfo2->maskkey), (u_int8_t *)sd->mds.mdd_crypto.scr_key, 624 sizeof(sd->mds.mdd_crypto.scr_key), check_digest); 625 626 /* Copy new encrypted key and HMAC to metadata. */ 627 bcopy(check_digest, sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, 628 sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac)); 629 630 rv = 0; /* Success */ 631 632 out: 633 if (p) { 634 explicit_bzero(p, ksz); 635 free(p, M_DEVBUF); 636 } 637 638 explicit_bzero(check_digest, sizeof(check_digest)); 639 explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey)); 640 explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey)); 641 642 return (rv); 643 } 644 645 struct sr_chunk * 646 sr_crypto_create_key_disk(struct sr_discipline *sd, dev_t dev) 647 { 648 struct sr_softc *sc = sd->sd_sc; 649 struct sr_discipline *fakesd = NULL; 650 struct sr_metadata *sm = NULL; 651 struct sr_meta_chunk *km; 652 struct sr_meta_opt_item *omi = NULL; 653 struct sr_meta_keydisk *skm; 654 struct sr_chunk *key_disk = NULL; 655 struct disklabel label; 656 struct vnode *vn; 657 char devname[32]; 658 int c, part, open = 0; 659 660 /* 661 * Create a metadata structure on the key disk and store 662 * keying material in the optional metadata. 663 */ 664 665 sr_meta_getdevname(sc, dev, devname, sizeof(devname)); 666 667 /* Make sure chunk is not already in use. */ 668 c = sr_chunk_in_use(sc, dev); 669 if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) { 670 sr_error(sc, "%s is already in use", devname); 671 goto done; 672 } 673 674 /* Open device. */ 675 if (bdevvp(dev, &vn)) { 676 sr_error(sc, "cannot open key disk %s", devname); 677 goto done; 678 } 679 if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) { 680 DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot " 681 "open %s\n", DEVNAME(sc), devname); 682 vput(vn); 683 goto fail; 684 } 685 open = 1; /* close dev on error */ 686 687 /* Get partition details. */ 688 part = DISKPART(dev); 689 if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, 690 FREAD, NOCRED, curproc)) { 691 DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl " 692 "failed\n", DEVNAME(sc)); 693 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc); 694 vput(vn); 695 goto fail; 696 } 697 if (label.d_secsize != DEV_BSIZE) { 698 sr_error(sc, "%s has unsupported sector size (%d)", 699 devname, label.d_secsize); 700 goto fail; 701 } 702 if (label.d_partitions[part].p_fstype != FS_RAID) { 703 sr_error(sc, "%s partition not of type RAID (%d)\n", 704 devname, label.d_partitions[part].p_fstype); 705 goto fail; 706 } 707 708 /* 709 * Create and populate chunk metadata. 710 */ 711 712 key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO); 713 km = &key_disk->src_meta; 714 715 key_disk->src_dev_mm = dev; 716 key_disk->src_vn = vn; 717 strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname)); 718 key_disk->src_size = 0; 719 720 km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level; 721 km->scmi.scm_chunk_id = 0; 722 km->scmi.scm_size = 0; 723 km->scmi.scm_coerced_size = 0; 724 strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname)); 725 bcopy(&sd->sd_meta->ssdi.ssd_uuid, &km->scmi.scm_uuid, 726 sizeof(struct sr_uuid)); 727 728 sr_checksum(sc, km, &km->scm_checksum, 729 sizeof(struct sr_meta_chunk_invariant)); 730 731 km->scm_status = BIOC_SDONLINE; 732 733 /* 734 * Create and populate our own discipline and metadata. 735 */ 736 737 sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO); 738 sm->ssdi.ssd_magic = SR_MAGIC; 739 sm->ssdi.ssd_version = SR_META_VERSION; 740 sm->ssd_ondisk = 0; 741 sm->ssdi.ssd_vol_flags = 0; 742 bcopy(&sd->sd_meta->ssdi.ssd_uuid, &sm->ssdi.ssd_uuid, 743 sizeof(struct sr_uuid)); 744 sm->ssdi.ssd_chunk_no = 1; 745 sm->ssdi.ssd_volid = SR_KEYDISK_VOLID; 746 sm->ssdi.ssd_level = SR_KEYDISK_LEVEL; 747 sm->ssdi.ssd_size = 0; 748 strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor)); 749 snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product), 750 "SR %s", "KEYDISK"); 751 snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision), 752 "%03d", SR_META_VERSION); 753 754 fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF, 755 M_WAITOK | M_ZERO); 756 fakesd->sd_sc = sd->sd_sc; 757 fakesd->sd_meta = sm; 758 fakesd->sd_meta_type = SR_META_F_NATIVE; 759 fakesd->sd_vol_status = BIOC_SVONLINE; 760 strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name)); 761 SLIST_INIT(&fakesd->sd_meta_opt); 762 763 /* Add chunk to volume. */ 764 fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF, 765 M_WAITOK | M_ZERO); 766 fakesd->sd_vol.sv_chunks[0] = key_disk; 767 SLIST_INIT(&fakesd->sd_vol.sv_chunk_list); 768 SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link); 769 770 /* Generate mask key. */ 771 arc4random_buf(sd->mds.mdd_crypto.scr_maskkey, 772 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 773 774 /* Copy mask key to optional metadata area. */ 775 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF, 776 M_WAITOK | M_ZERO); 777 omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF, 778 M_WAITOK | M_ZERO); 779 omi->omi_som->som_type = SR_OPT_KEYDISK; 780 omi->omi_som->som_length = sizeof(struct sr_meta_keydisk); 781 skm = (struct sr_meta_keydisk *)omi->omi_som; 782 bcopy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey, 783 sizeof(skm->skm_maskkey)); 784 SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link); 785 fakesd->sd_meta->ssdi.ssd_opt_no++; 786 787 /* Save metadata. */ 788 if (sr_meta_save(fakesd, SR_META_DIRTY)) { 789 sr_error(sc, "could not save metadata to %s", devname); 790 goto fail; 791 } 792 793 goto done; 794 795 fail: 796 if (key_disk) 797 free(key_disk, M_DEVBUF); 798 key_disk = NULL; 799 800 done: 801 if (omi) 802 free(omi, M_DEVBUF); 803 if (fakesd && fakesd->sd_vol.sv_chunks) 804 free(fakesd->sd_vol.sv_chunks, M_DEVBUF); 805 if (fakesd) 806 free(fakesd, M_DEVBUF); 807 if (sm) 808 free(sm, M_DEVBUF); 809 if (open) { 810 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc); 811 vput(vn); 812 } 813 814 return key_disk; 815 } 816 817 struct sr_chunk * 818 sr_crypto_read_key_disk(struct sr_discipline *sd, dev_t dev) 819 { 820 struct sr_softc *sc = sd->sd_sc; 821 struct sr_metadata *sm = NULL; 822 struct sr_meta_opt_item *omi, *omi_next; 823 struct sr_meta_opt_hdr *omh; 824 struct sr_meta_keydisk *skm; 825 struct sr_meta_opt_head som; 826 struct sr_chunk *key_disk = NULL; 827 struct disklabel label; 828 struct vnode *vn = NULL; 829 char devname[32]; 830 int c, part, open = 0; 831 832 /* 833 * Load a key disk and load keying material into memory. 834 */ 835 836 SLIST_INIT(&som); 837 838 sr_meta_getdevname(sc, dev, devname, sizeof(devname)); 839 840 /* Make sure chunk is not already in use. */ 841 c = sr_chunk_in_use(sc, dev); 842 if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) { 843 sr_error(sc, "%s is already in use", devname); 844 goto done; 845 } 846 847 /* Open device. */ 848 if (bdevvp(dev, &vn)) { 849 sr_error(sc, "cannot open key disk %s", devname); 850 goto done; 851 } 852 if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) { 853 DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot " 854 "open %s\n", DEVNAME(sc), devname); 855 vput(vn); 856 goto done; 857 } 858 open = 1; /* close dev on error */ 859 860 /* Get partition details. */ 861 part = DISKPART(dev); 862 if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD, 863 NOCRED, curproc)) { 864 DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl " 865 "failed\n", DEVNAME(sc)); 866 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc); 867 vput(vn); 868 goto done; 869 } 870 if (label.d_secsize != DEV_BSIZE) { 871 sr_error(sc, "%s has unsupported sector size (%d)", 872 devname, label.d_secsize); 873 goto done; 874 } 875 if (label.d_partitions[part].p_fstype != FS_RAID) { 876 sr_error(sc, "%s partition not of type RAID (%d)\n", 877 devname, label.d_partitions[part].p_fstype); 878 goto done; 879 } 880 881 /* 882 * Read and validate key disk metadata. 883 */ 884 sm = malloc(SR_META_SIZE * 512, M_DEVBUF, M_WAITOK | M_ZERO); 885 if (sr_meta_native_read(sd, dev, sm, NULL)) { 886 sr_error(sc, "native bootprobe could not read native metadata"); 887 goto done; 888 } 889 890 if (sr_meta_validate(sd, dev, sm, NULL)) { 891 DNPRINTF(SR_D_META, "%s: invalid metadata\n", 892 DEVNAME(sc)); 893 goto done; 894 } 895 896 /* Make sure this is a key disk. */ 897 if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) { 898 sr_error(sc, "%s is not a key disk", devname); 899 goto done; 900 } 901 902 /* Construct key disk chunk. */ 903 key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO); 904 key_disk->src_dev_mm = dev; 905 key_disk->src_vn = vn; 906 key_disk->src_size = 0; 907 908 bcopy((struct sr_meta_chunk *)(sm + 1), &key_disk->src_meta, 909 sizeof(key_disk->src_meta)); 910 911 /* Read mask key from optional metadata. */ 912 sr_meta_opt_load(sc, sm, &som); 913 SLIST_FOREACH(omi, &som, omi_link) { 914 omh = omi->omi_som; 915 if (omh->som_type == SR_OPT_KEYDISK) { 916 skm = (struct sr_meta_keydisk *)omh; 917 bcopy(&skm->skm_maskkey, 918 sd->mds.mdd_crypto.scr_maskkey, 919 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 920 } else if (omh->som_type == SR_OPT_CRYPTO) { 921 /* Original keydisk format with key in crypto area. */ 922 bcopy(omh + sizeof(struct sr_meta_opt_hdr), 923 sd->mds.mdd_crypto.scr_maskkey, 924 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 925 } 926 } 927 928 open = 0; 929 930 done: 931 for (omi = SLIST_FIRST(&som); omi != SLIST_END(&som); omi = omi_next) { 932 omi_next = SLIST_NEXT(omi, omi_link); 933 if (omi->omi_som) 934 free(omi->omi_som, M_DEVBUF); 935 free(omi, M_DEVBUF); 936 } 937 938 if (sm) 939 free(sm, M_DEVBUF); 940 941 if (vn && open) { 942 VOP_CLOSE(vn, FREAD, NOCRED, curproc); 943 vput(vn); 944 } 945 946 return key_disk; 947 } 948 949 int 950 sr_crypto_alloc_resources(struct sr_discipline *sd) 951 { 952 struct cryptoini cri; 953 struct sr_crypto_wu *crwu; 954 u_int num_keys, i; 955 956 if (!sd) 957 return (EINVAL); 958 959 DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n", 960 DEVNAME(sd->sd_sc)); 961 962 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) 963 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 964 965 if (sr_wu_alloc(sd)) { 966 sr_error(sd->sd_sc, "unable to allocate work units"); 967 return (ENOMEM); 968 } 969 if (sr_ccb_alloc(sd)) { 970 sr_error(sd->sd_sc, "unable to allocate CCBs"); 971 return (ENOMEM); 972 } 973 if (sr_crypto_decrypt_key(sd)) { 974 sr_error(sd->sd_sc, "incorrect key or passphrase"); 975 return (EPERM); 976 } 977 978 /* 979 * For each wu allocate the uio, iovec and crypto structures. 980 * these have to be allocated now because during runtime we can't 981 * fail an allocation without failing the io (which can cause real 982 * problems). 983 */ 984 mtx_init(&sd->mds.mdd_crypto.scr_mutex, IPL_BIO); 985 TAILQ_INIT(&sd->mds.mdd_crypto.scr_wus); 986 for (i = 0; i < sd->sd_max_wu; i++) { 987 crwu = malloc(sizeof(*crwu), M_DEVBUF, 988 M_WAITOK | M_ZERO | M_CANFAIL); 989 if (crwu == NULL) 990 return (ENOMEM); 991 /* put it on the list now so if we fail it'll be freed */ 992 mtx_enter(&sd->mds.mdd_crypto.scr_mutex); 993 TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link); 994 mtx_leave(&sd->mds.mdd_crypto.scr_mutex); 995 996 crwu->cr_uio.uio_iov = &crwu->cr_iov; 997 crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK); 998 crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT); 999 if (crwu->cr_crp == NULL) 1000 return (ENOMEM); 1001 /* steal the list of cryptodescs */ 1002 crwu->cr_descs = crwu->cr_crp->crp_desc; 1003 crwu->cr_crp->crp_desc = NULL; 1004 } 1005 1006 bzero(&cri, sizeof(cri)); 1007 cri.cri_alg = CRYPTO_AES_XTS; 1008 switch (sd->mds.mdd_crypto.scr_meta->scm_alg) { 1009 case SR_CRYPTOA_AES_XTS_128: 1010 cri.cri_klen = 256; 1011 break; 1012 case SR_CRYPTOA_AES_XTS_256: 1013 cri.cri_klen = 512; 1014 break; 1015 default: 1016 return (EINVAL); 1017 } 1018 1019 /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks */ 1020 num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT; 1021 if (num_keys >= SR_CRYPTO_MAXKEYS) 1022 return (EFBIG); 1023 for (i = 0; i <= num_keys; i++) { 1024 cri.cri_key = sd->mds.mdd_crypto.scr_key[i]; 1025 if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i], 1026 &cri, 0) != 0) { 1027 for (i = 0; 1028 sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; 1029 i++) { 1030 crypto_freesession( 1031 sd->mds.mdd_crypto.scr_sid[i]); 1032 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 1033 } 1034 return (EINVAL); 1035 } 1036 } 1037 1038 sr_hotplug_register(sd, sr_crypto_hotplug); 1039 1040 return (0); 1041 } 1042 1043 int 1044 sr_crypto_free_resources(struct sr_discipline *sd) 1045 { 1046 int rv = EINVAL; 1047 struct sr_crypto_wu *crwu; 1048 u_int i; 1049 1050 if (!sd) 1051 return (rv); 1052 1053 DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n", 1054 DEVNAME(sd->sd_sc)); 1055 1056 if (sd->mds.mdd_crypto.key_disk != NULL) { 1057 explicit_bzero(sd->mds.mdd_crypto.key_disk, sizeof 1058 sd->mds.mdd_crypto.key_disk); 1059 free(sd->mds.mdd_crypto.key_disk, M_DEVBUF); 1060 } 1061 1062 sr_hotplug_unregister(sd, sr_crypto_hotplug); 1063 1064 for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) { 1065 crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]); 1066 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 1067 } 1068 1069 mtx_enter(&sd->mds.mdd_crypto.scr_mutex); 1070 while ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL) { 1071 TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link); 1072 1073 if (crwu->cr_dmabuf != NULL) 1074 dma_free(crwu->cr_dmabuf, MAXPHYS); 1075 if (crwu->cr_crp) { 1076 /* twiddle cryptoreq back */ 1077 crwu->cr_crp->crp_desc = crwu->cr_descs; 1078 crypto_freereq(crwu->cr_crp); 1079 } 1080 free(crwu, M_DEVBUF); 1081 } 1082 mtx_leave(&sd->mds.mdd_crypto.scr_mutex); 1083 1084 sr_wu_free(sd); 1085 sr_ccb_free(sd); 1086 1087 rv = 0; 1088 return (rv); 1089 } 1090 1091 int 1092 sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd) 1093 { 1094 struct sr_crypto_kdfpair kdfpair; 1095 struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2; 1096 int size, rv = 1; 1097 1098 DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n", 1099 DEVNAME(sd->sd_sc), bd->bd_cmd); 1100 1101 switch (bd->bd_cmd) { 1102 case SR_IOCTL_GET_KDFHINT: 1103 1104 /* Get KDF hint for userland. */ 1105 size = sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint); 1106 if (bd->bd_data == NULL || bd->bd_size > size) 1107 goto bad; 1108 if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint, 1109 bd->bd_data, bd->bd_size)) 1110 goto bad; 1111 1112 rv = 0; 1113 1114 break; 1115 1116 case SR_IOCTL_CHANGE_PASSPHRASE: 1117 1118 /* Attempt to change passphrase. */ 1119 1120 size = sizeof(kdfpair); 1121 if (bd->bd_data == NULL || bd->bd_size > size) 1122 goto bad; 1123 if (copyin(bd->bd_data, &kdfpair, size)) 1124 goto bad; 1125 1126 size = sizeof(kdfinfo1); 1127 if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size) 1128 goto bad; 1129 if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size)) 1130 goto bad; 1131 1132 size = sizeof(kdfinfo2); 1133 if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size) 1134 goto bad; 1135 if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size)) 1136 goto bad; 1137 1138 if (sr_crypto_change_maskkey(sd, &kdfinfo1, &kdfinfo2)) 1139 goto bad; 1140 1141 /* Save metadata to disk. */ 1142 rv = sr_meta_save(sd, SR_META_DIRTY); 1143 1144 break; 1145 } 1146 1147 bad: 1148 explicit_bzero(&kdfpair, sizeof(kdfpair)); 1149 explicit_bzero(&kdfinfo1, sizeof(kdfinfo1)); 1150 explicit_bzero(&kdfinfo2, sizeof(kdfinfo2)); 1151 1152 return (rv); 1153 } 1154 1155 int 1156 sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om) 1157 { 1158 int rv = EINVAL; 1159 1160 if (om->som_type == SR_OPT_CRYPTO) { 1161 sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)om; 1162 rv = 0; 1163 } 1164 1165 return (rv); 1166 } 1167 1168 int 1169 sr_crypto_rw(struct sr_workunit *wu) 1170 { 1171 struct sr_crypto_wu *crwu; 1172 int s, rv = 0; 1173 1174 DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu: %p\n", 1175 DEVNAME(wu->swu_dis->sd_sc), wu); 1176 1177 if (wu->swu_xs->flags & SCSI_DATA_OUT) { 1178 crwu = sr_crypto_wu_get(wu, 1); 1179 if (crwu == NULL) 1180 return (1); 1181 crwu->cr_crp->crp_callback = sr_crypto_write; 1182 s = splvm(); 1183 if (crypto_invoke(crwu->cr_crp)) 1184 rv = 1; 1185 else 1186 rv = crwu->cr_crp->crp_etype; 1187 splx(s); 1188 } else 1189 rv = sr_crypto_rw2(wu, NULL); 1190 1191 return (rv); 1192 } 1193 1194 int 1195 sr_crypto_write(struct cryptop *crp) 1196 { 1197 struct sr_crypto_wu *crwu = crp->crp_opaque; 1198 struct sr_workunit *wu = crwu->cr_wu; 1199 int s; 1200 1201 DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n", 1202 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs); 1203 1204 if (crp->crp_etype) { 1205 /* fail io */ 1206 wu->swu_xs->error = XS_DRIVER_STUFFUP; 1207 s = splbio(); 1208 sr_crypto_finish_io(wu); 1209 splx(s); 1210 } 1211 1212 return (sr_crypto_rw2(wu, crwu)); 1213 } 1214 1215 int 1216 sr_crypto_rw2(struct sr_workunit *wu, struct sr_crypto_wu *crwu) 1217 { 1218 struct sr_discipline *sd = wu->swu_dis; 1219 struct scsi_xfer *xs = wu->swu_xs; 1220 struct sr_ccb *ccb; 1221 struct uio *uio; 1222 int s; 1223 daddr64_t blk; 1224 1225 if (sr_validate_io(wu, &blk, "sr_crypto_rw2")) 1226 goto bad; 1227 1228 blk += sd->sd_meta->ssd_data_offset; 1229 1230 wu->swu_io_count = 1; 1231 1232 ccb = sr_ccb_get(sd); 1233 if (!ccb) { 1234 /* should never happen but handle more gracefully */ 1235 printf("%s: %s: too many ccbs queued\n", 1236 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname); 1237 goto bad; 1238 } 1239 1240 ccb->ccb_buf.b_flags = B_CALL | B_PHYS; 1241 ccb->ccb_buf.b_iodone = sr_crypto_intr; 1242 ccb->ccb_buf.b_blkno = blk; 1243 ccb->ccb_buf.b_bcount = xs->datalen; 1244 ccb->ccb_buf.b_bufsize = xs->datalen; 1245 ccb->ccb_buf.b_resid = xs->datalen; 1246 1247 if (xs->flags & SCSI_DATA_IN) { 1248 ccb->ccb_buf.b_flags |= B_READ; 1249 ccb->ccb_buf.b_data = xs->data; 1250 } else { 1251 uio = crwu->cr_crp->crp_buf; 1252 ccb->ccb_buf.b_flags |= B_WRITE; 1253 ccb->ccb_buf.b_data = uio->uio_iov->iov_base; 1254 ccb->ccb_opaque = crwu; 1255 } 1256 1257 ccb->ccb_buf.b_error = 0; 1258 ccb->ccb_buf.b_proc = curproc; 1259 ccb->ccb_wu = wu; 1260 ccb->ccb_target = 0; 1261 ccb->ccb_buf.b_dev = sd->sd_vol.sv_chunks[0]->src_dev_mm; 1262 ccb->ccb_buf.b_vp = sd->sd_vol.sv_chunks[0]->src_vn; 1263 if ((ccb->ccb_buf.b_flags & B_READ) == 0) 1264 ccb->ccb_buf.b_vp->v_numoutput++; 1265 1266 LIST_INIT(&ccb->ccb_buf.b_dep); 1267 1268 if (wu->swu_cb_active == 1) 1269 panic("%s: sr_crypto_rw2", DEVNAME(sd->sd_sc)); 1270 TAILQ_INSERT_TAIL(&wu->swu_ccb, ccb, ccb_link); 1271 1272 DNPRINTF(SR_D_DIS, "%s: %s: sr_crypto_rw2: b_bcount: %d " 1273 "b_blkno: %x b_flags 0x%0x b_data %p\n", 1274 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, 1275 ccb->ccb_buf.b_bcount, ccb->ccb_buf.b_blkno, 1276 ccb->ccb_buf.b_flags, ccb->ccb_buf.b_data); 1277 1278 s = splbio(); 1279 1280 if (sr_check_io_collision(wu)) 1281 goto queued; 1282 1283 sr_raid_startwu(wu); 1284 1285 queued: 1286 splx(s); 1287 return (0); 1288 bad: 1289 /* wu is unwound by sr_wu_put */ 1290 if (crwu) 1291 crwu->cr_crp->crp_etype = EINVAL; 1292 return (1); 1293 } 1294 1295 void 1296 sr_crypto_intr(struct buf *bp) 1297 { 1298 struct sr_ccb *ccb = (struct sr_ccb *)bp; 1299 struct sr_workunit *wu = ccb->ccb_wu, *wup; 1300 struct sr_discipline *sd = wu->swu_dis; 1301 struct scsi_xfer *xs = wu->swu_xs; 1302 struct sr_softc *sc = sd->sd_sc; 1303 struct sr_crypto_wu *crwu; 1304 int s, s2, pend; 1305 1306 DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr bp: %x xs: %x\n", 1307 DEVNAME(sc), bp, wu->swu_xs); 1308 1309 DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: b_bcount: %d b_resid: %d" 1310 " b_flags: 0x%0x\n", DEVNAME(sc), ccb->ccb_buf.b_bcount, 1311 ccb->ccb_buf.b_resid, ccb->ccb_buf.b_flags); 1312 1313 s = splbio(); 1314 1315 if (ccb->ccb_buf.b_flags & B_ERROR) { 1316 printf("%s: i/o error on block %lld\n", DEVNAME(sc), 1317 ccb->ccb_buf.b_blkno); 1318 wu->swu_ios_failed++; 1319 ccb->ccb_state = SR_CCB_FAILED; 1320 if (ccb->ccb_target != -1) 1321 sd->sd_set_chunk_state(sd, ccb->ccb_target, 1322 BIOC_SDOFFLINE); 1323 else 1324 panic("%s: invalid target on wu: %p", DEVNAME(sc), wu); 1325 } else { 1326 ccb->ccb_state = SR_CCB_OK; 1327 wu->swu_ios_succeeded++; 1328 } 1329 wu->swu_ios_complete++; 1330 1331 DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: comp: %d count: %d\n", 1332 DEVNAME(sc), wu->swu_ios_complete, wu->swu_io_count); 1333 1334 if (wu->swu_ios_complete == wu->swu_io_count) { 1335 if (wu->swu_ios_failed == wu->swu_ios_complete) 1336 xs->error = XS_DRIVER_STUFFUP; 1337 else 1338 xs->error = XS_NOERROR; 1339 1340 pend = 0; 1341 TAILQ_FOREACH(wup, &sd->sd_wu_pendq, swu_link) { 1342 if (wu == wup) { 1343 TAILQ_REMOVE(&sd->sd_wu_pendq, wu, swu_link); 1344 pend = 1; 1345 1346 if (wu->swu_collider) { 1347 wu->swu_collider->swu_state = 1348 SR_WU_INPROGRESS; 1349 TAILQ_REMOVE(&sd->sd_wu_defq, 1350 wu->swu_collider, swu_link); 1351 sr_raid_startwu(wu->swu_collider); 1352 } 1353 break; 1354 } 1355 } 1356 1357 if (!pend) 1358 printf("%s: wu: %p not on pending queue\n", 1359 DEVNAME(sc), wu); 1360 1361 if ((xs->flags & SCSI_DATA_IN) && (xs->error == XS_NOERROR)) { 1362 /* only fails on implementation error */ 1363 crwu = sr_crypto_wu_get(wu, 0); 1364 if (crwu == NULL) 1365 panic("sr_crypto_intr: no wu"); 1366 crwu->cr_crp->crp_callback = sr_crypto_read; 1367 ccb->ccb_opaque = crwu; 1368 DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: crypto_invoke " 1369 "%p\n", DEVNAME(sc), crwu->cr_crp); 1370 s2 = splvm(); 1371 crypto_invoke(crwu->cr_crp); 1372 splx(s2); 1373 goto done; 1374 } 1375 1376 sr_crypto_finish_io(wu); 1377 } 1378 1379 done: 1380 splx(s); 1381 } 1382 1383 void 1384 sr_crypto_finish_io(struct sr_workunit *wu) 1385 { 1386 struct sr_discipline *sd = wu->swu_dis; 1387 struct scsi_xfer *xs = wu->swu_xs; 1388 struct sr_ccb *ccb; 1389 #ifdef SR_DEBUG 1390 struct sr_softc *sc = sd->sd_sc; 1391 #endif /* SR_DEBUG */ 1392 1393 splassert(IPL_BIO); 1394 1395 DNPRINTF(SR_D_INTR, "%s: sr_crypto_finish_io: wu %x xs: %x\n", 1396 DEVNAME(sc), wu, xs); 1397 1398 xs->resid = 0; 1399 1400 if (wu->swu_cb_active == 1) 1401 panic("%s: sr_crypto_finish_io", DEVNAME(sd->sd_sc)); 1402 TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link) { 1403 if (ccb->ccb_opaque == NULL) 1404 continue; 1405 sr_crypto_wu_put(ccb->ccb_opaque); 1406 } 1407 1408 sr_scsi_done(sd, xs); 1409 1410 if (sd->sd_sync && sd->sd_wu_pending == 0) 1411 wakeup(sd); 1412 } 1413 1414 int 1415 sr_crypto_read(struct cryptop *crp) 1416 { 1417 struct sr_crypto_wu *crwu = crp->crp_opaque; 1418 struct sr_workunit *wu = crwu->cr_wu; 1419 int s; 1420 1421 DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n", 1422 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs); 1423 1424 if (crp->crp_etype) 1425 wu->swu_xs->error = XS_DRIVER_STUFFUP; 1426 1427 s = splbio(); 1428 sr_crypto_finish_io(wu); 1429 splx(s); 1430 1431 return (0); 1432 } 1433 1434 void 1435 sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action) 1436 { 1437 DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n", 1438 DEVNAME(sd->sd_sc), diskp->dk_name, action); 1439 } 1440 1441 #ifdef SR_DEBUG0 1442 void 1443 sr_crypto_dumpkeys(struct sr_discipline *sd) 1444 { 1445 int i, j; 1446 1447 printf("sr_crypto_dumpkeys:\n"); 1448 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) { 1449 printf("\tscm_key[%d]: 0x", i); 1450 for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) { 1451 printf("%02x", 1452 sd->mds.mdd_crypto.scr_meta->scm_key[i][j]); 1453 } 1454 printf("\n"); 1455 } 1456 printf("sr_crypto_dumpkeys: runtime data keys:\n"); 1457 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) { 1458 printf("\tscr_key[%d]: 0x", i); 1459 for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) { 1460 printf("%02x", 1461 sd->mds.mdd_crypto.scr_key[i][j]); 1462 } 1463 printf("\n"); 1464 } 1465 } 1466 #endif /* SR_DEBUG */ 1467