1 /* $OpenBSD: softraid_crypto.c,v 1.72 2011/09/18 13:11:08 jsing Exp $ */ 2 /* 3 * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us> 4 * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org> 5 * Copyright (c) 2008 Damien Miller <djm@mindrot.org> 6 * Copyright (c) 2009 Joel Sing <jsing@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include "bio.h" 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/buf.h> 26 #include <sys/device.h> 27 #include <sys/ioctl.h> 28 #include <sys/proc.h> 29 #include <sys/malloc.h> 30 #include <sys/pool.h> 31 #include <sys/kernel.h> 32 #include <sys/disk.h> 33 #include <sys/rwlock.h> 34 #include <sys/queue.h> 35 #include <sys/fcntl.h> 36 #include <sys/disklabel.h> 37 #include <sys/mount.h> 38 #include <sys/sensors.h> 39 #include <sys/stat.h> 40 #include <sys/conf.h> 41 #include <sys/uio.h> 42 #include <sys/dkio.h> 43 44 #include <crypto/cryptodev.h> 45 #include <crypto/cryptosoft.h> 46 #include <crypto/rijndael.h> 47 #include <crypto/md5.h> 48 #include <crypto/sha1.h> 49 #include <crypto/sha2.h> 50 #include <crypto/hmac.h> 51 52 #include <scsi/scsi_all.h> 53 #include <scsi/scsiconf.h> 54 #include <scsi/scsi_disk.h> 55 56 #include <dev/softraidvar.h> 57 #include <dev/rndvar.h> 58 59 /* 60 * the per-io data that we need to preallocate. We can't afford to allow io 61 * to start failing when memory pressure kicks in. 62 * We can store this in the WU because we assert that only one 63 * ccb per WU will ever be active. 64 */ 65 struct sr_crypto_wu { 66 TAILQ_ENTRY(sr_crypto_wu) cr_link; 67 struct uio cr_uio; 68 struct iovec cr_iov; 69 struct cryptop *cr_crp; 70 struct cryptodesc *cr_descs; 71 struct sr_workunit *cr_wu; 72 void *cr_dmabuf; 73 }; 74 75 76 struct sr_crypto_wu *sr_crypto_wu_get(struct sr_workunit *, int); 77 void sr_crypto_wu_put(struct sr_crypto_wu *); 78 int sr_crypto_create_keys(struct sr_discipline *); 79 int sr_crypto_get_kdf(struct bioc_createraid *, 80 struct sr_discipline *); 81 int sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int); 82 int sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int); 83 int sr_crypto_decrypt_key(struct sr_discipline *); 84 int sr_crypto_change_maskkey(struct sr_discipline *, 85 struct sr_crypto_kdfinfo *, struct sr_crypto_kdfinfo *); 86 int sr_crypto_create(struct sr_discipline *, 87 struct bioc_createraid *, int, int64_t); 88 int sr_crypto_assemble(struct sr_discipline *, 89 struct bioc_createraid *, int); 90 int sr_crypto_alloc_resources(struct sr_discipline *); 91 int sr_crypto_free_resources(struct sr_discipline *); 92 int sr_crypto_ioctl(struct sr_discipline *, 93 struct bioc_discipline *); 94 int sr_crypto_meta_opt_handler(struct sr_discipline *, 95 struct sr_meta_opt *); 96 int sr_crypto_write(struct cryptop *); 97 int sr_crypto_rw(struct sr_workunit *); 98 int sr_crypto_rw2(struct sr_workunit *, struct sr_crypto_wu *); 99 void sr_crypto_intr(struct buf *); 100 int sr_crypto_read(struct cryptop *); 101 void sr_crypto_finish_io(struct sr_workunit *); 102 void sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int, 103 u_int8_t *, int, u_char *); 104 void sr_crypto_hotplug(struct sr_discipline *, struct disk *, int); 105 106 #ifdef SR_DEBUG0 107 void sr_crypto_dumpkeys(struct sr_discipline *); 108 #endif 109 110 /* Discipline initialisation. */ 111 void 112 sr_crypto_discipline_init(struct sr_discipline *sd) 113 { 114 int i; 115 116 /* Fill out discipline members. */ 117 sd->sd_type = SR_MD_CRYPTO; 118 sd->sd_capabilities = SR_CAP_SYSTEM_DISK; 119 sd->sd_max_wu = SR_CRYPTO_NOWU; 120 121 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) 122 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 123 124 /* Setup discipline pointers. */ 125 sd->sd_create = sr_crypto_create; 126 sd->sd_assemble = sr_crypto_assemble; 127 sd->sd_alloc_resources = sr_crypto_alloc_resources; 128 sd->sd_free_resources = sr_crypto_free_resources; 129 sd->sd_start_discipline = NULL; 130 sd->sd_ioctl_handler = sr_crypto_ioctl; 131 sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler; 132 sd->sd_scsi_inquiry = sr_raid_inquiry; 133 sd->sd_scsi_read_cap = sr_raid_read_cap; 134 sd->sd_scsi_tur = sr_raid_tur; 135 sd->sd_scsi_req_sense = sr_raid_request_sense; 136 sd->sd_scsi_start_stop = sr_raid_start_stop; 137 sd->sd_scsi_sync = sr_raid_sync; 138 sd->sd_scsi_rw = sr_crypto_rw; 139 /* XXX reuse raid 1 functions for now FIXME */ 140 sd->sd_set_chunk_state = sr_raid1_set_chunk_state; 141 sd->sd_set_vol_state = sr_raid1_set_vol_state; 142 } 143 144 int 145 sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc, 146 int no_chunk, int64_t coerced_size) 147 { 148 struct sr_meta_opt_item *omi; 149 int rv = EINVAL; 150 151 if (no_chunk != 1) 152 goto done; 153 154 /* Create crypto optional metadata. */ 155 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF, 156 M_WAITOK | M_ZERO); 157 omi->omi_om.somi.som_type = SR_OPT_CRYPTO; 158 SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link); 159 sd->mds.mdd_crypto.scr_meta = &omi->omi_om.somi.som_meta.smm_crypto; 160 sd->sd_meta->ssdi.ssd_opt_no++; 161 162 sd->mds.mdd_crypto.key_disk = NULL; 163 164 if (bc->bc_key_disk != NODEV) { 165 166 /* Create a key disk. */ 167 if (sr_crypto_get_kdf(bc, sd)) 168 goto done; 169 sd->mds.mdd_crypto.key_disk = 170 sr_crypto_create_key_disk(sd, bc->bc_key_disk); 171 if (sd->mds.mdd_crypto.key_disk == NULL) 172 goto done; 173 sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE; 174 175 } else if (bc->bc_opaque_flags & BIOC_SOOUT) { 176 177 /* No hint available yet. */ 178 bc->bc_opaque_status = BIOC_SOINOUT_FAILED; 179 rv = EAGAIN; 180 goto done; 181 182 } else if (sr_crypto_get_kdf(bc, sd)) 183 goto done; 184 185 /* Passphrase volumes cannot be automatically assembled. */ 186 if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV) 187 goto done; 188 189 strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name)); 190 sd->sd_meta->ssdi.ssd_size = coerced_size; 191 192 sr_crypto_create_keys(sd); 193 194 sd->sd_max_ccb_per_wu = no_chunk; 195 196 rv = 0; 197 done: 198 return (rv); 199 } 200 201 int 202 sr_crypto_assemble(struct sr_discipline *sd, struct bioc_createraid *bc, 203 int no_chunk) 204 { 205 int rv = EINVAL; 206 207 sd->mds.mdd_crypto.key_disk = NULL; 208 209 /* Crypto optional metadata must already exist... */ 210 if (sd->mds.mdd_crypto.scr_meta == NULL) 211 goto done; 212 213 if (bc->bc_key_disk != NODEV) { 214 /* Read the mask key from the key disk. */ 215 sd->mds.mdd_crypto.key_disk = 216 sr_crypto_read_key_disk(sd, bc->bc_key_disk); 217 if (sd->mds.mdd_crypto.key_disk == NULL) 218 goto done; 219 } else if (bc->bc_opaque_flags & BIOC_SOOUT) { 220 /* provide userland with kdf hint */ 221 if (bc->bc_opaque == NULL) 222 goto done; 223 224 if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) < 225 bc->bc_opaque_size) 226 goto done; 227 228 if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint, 229 bc->bc_opaque, bc->bc_opaque_size)) 230 goto done; 231 232 /* we're done */ 233 bc->bc_opaque_status = BIOC_SOINOUT_OK; 234 rv = EAGAIN; 235 goto done; 236 } else if (bc->bc_opaque_flags & BIOC_SOIN) { 237 /* get kdf with maskkey from userland */ 238 if (sr_crypto_get_kdf(bc, sd)) 239 goto done; 240 241 } 242 243 sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no; 244 245 rv = 0; 246 done: 247 return (rv); 248 } 249 250 251 struct sr_crypto_wu * 252 sr_crypto_wu_get(struct sr_workunit *wu, int encrypt) 253 { 254 struct scsi_xfer *xs = wu->swu_xs; 255 struct sr_discipline *sd = wu->swu_dis; 256 struct sr_crypto_wu *crwu; 257 struct cryptodesc *crd; 258 int flags, i, n; 259 daddr64_t blk = 0; 260 u_int keyndx; 261 262 DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_get wu: %p encrypt: %d\n", 263 DEVNAME(sd->sd_sc), wu, encrypt); 264 265 mtx_enter(&sd->mds.mdd_crypto.scr_mutex); 266 if ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL) 267 TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link); 268 mtx_leave(&sd->mds.mdd_crypto.scr_mutex); 269 if (crwu == NULL) 270 panic("sr_crypto_wu_get: out of wus"); 271 272 crwu->cr_uio.uio_iovcnt = 1; 273 crwu->cr_uio.uio_iov->iov_len = xs->datalen; 274 if (xs->flags & SCSI_DATA_OUT) { 275 crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf; 276 bcopy(xs->data, crwu->cr_uio.uio_iov->iov_base, xs->datalen); 277 } else 278 crwu->cr_uio.uio_iov->iov_base = xs->data; 279 280 if (xs->cmdlen == 10) 281 blk = _4btol(((struct scsi_rw_big *)xs->cmd)->addr); 282 else if (xs->cmdlen == 16) 283 blk = _8btol(((struct scsi_rw_16 *)xs->cmd)->addr); 284 else if (xs->cmdlen == 6) 285 blk = _3btol(((struct scsi_rw *)xs->cmd)->addr); 286 287 n = xs->datalen >> DEV_BSHIFT; 288 289 /* 290 * we preallocated enough crypto descs for up to MAXPHYS of io. 291 * since ios may be less than that we need to tweak the linked list 292 * of crypto desc structures to be just long enough for our needs. 293 * Otherwise crypto will get upset with us. So put n descs on the crp 294 * and keep the rest. 295 */ 296 crd = crwu->cr_descs; 297 i = 0; 298 while (++i < n) { 299 crd = crd->crd_next; 300 KASSERT(crd); 301 } 302 crwu->cr_crp->crp_desc = crwu->cr_descs; 303 crwu->cr_descs = crd->crd_next; 304 crd->crd_next = NULL; 305 306 flags = (encrypt ? CRD_F_ENCRYPT : 0) | 307 CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT; 308 309 /* Select crypto session based on block number */ 310 keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT; 311 if (keyndx >= SR_CRYPTO_MAXKEYS) 312 goto unwind; 313 crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx]; 314 if (crwu->cr_crp->crp_sid == (u_int64_t)-1) 315 goto unwind; 316 317 crwu->cr_crp->crp_ilen = xs->datalen; 318 crwu->cr_crp->crp_alloctype = M_DEVBUF; 319 crwu->cr_crp->crp_buf = &crwu->cr_uio; 320 for (i = 0, crd = crwu->cr_crp->crp_desc; crd; 321 i++, blk++, crd = crd->crd_next) { 322 crd->crd_skip = i << DEV_BSHIFT; 323 crd->crd_len = DEV_BSIZE; 324 crd->crd_inject = 0; 325 crd->crd_flags = flags; 326 crd->crd_alg = CRYPTO_AES_XTS; 327 328 switch (sd->mds.mdd_crypto.scr_meta->scm_alg) { 329 case SR_CRYPTOA_AES_XTS_128: 330 crd->crd_klen = 256; 331 break; 332 case SR_CRYPTOA_AES_XTS_256: 333 crd->crd_klen = 512; 334 break; 335 default: 336 goto unwind; 337 } 338 crd->crd_key = sd->mds.mdd_crypto.scr_key[0]; 339 bcopy(&blk, crd->crd_iv, sizeof(blk)); 340 } 341 crwu->cr_wu = wu; 342 crwu->cr_crp->crp_opaque = crwu; 343 344 return (crwu); 345 unwind: 346 /* steal the descriptors back from the cryptop */ 347 crd = crwu->cr_crp->crp_desc; 348 while (crd->crd_next != NULL) 349 crd = crd->crd_next; 350 351 /* join the lists back again */ 352 crd->crd_next = crwu->cr_descs; 353 crwu->cr_descs = crwu->cr_crp->crp_desc; 354 crwu->cr_crp->crp_desc = NULL; 355 return (NULL); 356 } 357 358 void 359 sr_crypto_wu_put(struct sr_crypto_wu *crwu) 360 { 361 struct cryptop *crp = crwu->cr_crp; 362 struct sr_workunit *wu = crwu->cr_wu; 363 struct sr_discipline *sd = wu->swu_dis; 364 struct cryptodesc *crd; 365 366 DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_put crwu: %p\n", 367 DEVNAME(wu->swu_dis->sd_sc), crwu); 368 369 /* steal the descrptions back from the cryptop */ 370 crd = crp->crp_desc; 371 KASSERT(crd); 372 while (crd->crd_next != NULL) 373 crd = crd->crd_next; 374 375 /* join the lists back again */ 376 crd->crd_next = crwu->cr_descs; 377 crwu->cr_descs = crp->crp_desc; 378 crp->crp_desc = NULL; 379 380 mtx_enter(&sd->mds.mdd_crypto.scr_mutex); 381 TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link); 382 mtx_leave(&sd->mds.mdd_crypto.scr_mutex); 383 } 384 385 int 386 sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd) 387 { 388 int rv = EINVAL; 389 struct sr_crypto_kdfinfo *kdfinfo; 390 391 if (!(bc->bc_opaque_flags & BIOC_SOIN)) 392 return (rv); 393 if (bc->bc_opaque == NULL) 394 return (rv); 395 if (bc->bc_opaque_size != sizeof(*kdfinfo)) 396 return (rv); 397 398 kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO); 399 if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size)) 400 goto out; 401 402 if (kdfinfo->len != bc->bc_opaque_size) 403 goto out; 404 405 /* copy KDF hint to disk meta data */ 406 if (kdfinfo->flags & SR_CRYPTOKDF_HINT) { 407 if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) < 408 kdfinfo->genkdf.len) 409 goto out; 410 bcopy(&kdfinfo->genkdf, 411 sd->mds.mdd_crypto.scr_meta->scm_kdfhint, 412 kdfinfo->genkdf.len); 413 } 414 415 /* copy mask key to run-time meta data */ 416 if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) { 417 if (sizeof(sd->mds.mdd_crypto.scr_maskkey) < 418 sizeof(kdfinfo->maskkey)) 419 goto out; 420 bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey, 421 sizeof(kdfinfo->maskkey)); 422 } 423 424 bc->bc_opaque_status = BIOC_SOINOUT_OK; 425 rv = 0; 426 out: 427 explicit_bzero(kdfinfo, bc->bc_opaque_size); 428 free(kdfinfo, M_DEVBUF); 429 430 return (rv); 431 } 432 433 int 434 sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg) 435 { 436 rijndael_ctx ctx; 437 int i, rv = 1; 438 439 switch (alg) { 440 case SR_CRYPTOM_AES_ECB_256: 441 if (rijndael_set_key_enc_only(&ctx, key, 256) != 0) 442 goto out; 443 for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN) 444 rijndael_encrypt(&ctx, &p[i], &c[i]); 445 rv = 0; 446 break; 447 default: 448 DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n", 449 "softraid", alg); 450 rv = -1; 451 goto out; 452 } 453 454 out: 455 explicit_bzero(&ctx, sizeof(ctx)); 456 return (rv); 457 } 458 459 int 460 sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg) 461 { 462 rijndael_ctx ctx; 463 int i, rv = 1; 464 465 switch (alg) { 466 case SR_CRYPTOM_AES_ECB_256: 467 if (rijndael_set_key(&ctx, key, 256) != 0) 468 goto out; 469 for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN) 470 rijndael_decrypt(&ctx, &c[i], &p[i]); 471 rv = 0; 472 break; 473 default: 474 DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n", 475 "softraid", alg); 476 rv = -1; 477 goto out; 478 } 479 480 out: 481 explicit_bzero(&ctx, sizeof(ctx)); 482 return (rv); 483 } 484 485 void 486 sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size, 487 u_int8_t *key, int key_size, u_char *check_digest) 488 { 489 u_char check_key[SHA1_DIGEST_LENGTH]; 490 HMAC_SHA1_CTX hmacctx; 491 SHA1_CTX shactx; 492 493 bzero(check_key, sizeof(check_key)); 494 bzero(&hmacctx, sizeof(hmacctx)); 495 bzero(&shactx, sizeof(shactx)); 496 497 /* k = SHA1(mask_key) */ 498 SHA1Init(&shactx); 499 SHA1Update(&shactx, maskkey, maskkey_size); 500 SHA1Final(check_key, &shactx); 501 502 /* mac = HMAC_SHA1_k(unencrypted key) */ 503 HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key)); 504 HMAC_SHA1_Update(&hmacctx, key, key_size); 505 HMAC_SHA1_Final(check_digest, &hmacctx); 506 507 explicit_bzero(check_key, sizeof(check_key)); 508 explicit_bzero(&hmacctx, sizeof(hmacctx)); 509 explicit_bzero(&shactx, sizeof(shactx)); 510 } 511 512 int 513 sr_crypto_decrypt_key(struct sr_discipline *sd) 514 { 515 u_char check_digest[SHA1_DIGEST_LENGTH]; 516 int rv = 1; 517 518 DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc)); 519 520 if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1) 521 goto out; 522 523 if (sr_crypto_decrypt((u_char *)sd->mds.mdd_crypto.scr_meta->scm_key, 524 (u_char *)sd->mds.mdd_crypto.scr_key, 525 sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key), 526 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1) 527 goto out; 528 529 #ifdef SR_DEBUG0 530 sr_crypto_dumpkeys(sd); 531 #endif 532 533 /* Check that the key decrypted properly. */ 534 sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey, 535 sizeof(sd->mds.mdd_crypto.scr_maskkey), 536 (u_int8_t *)sd->mds.mdd_crypto.scr_key, 537 sizeof(sd->mds.mdd_crypto.scr_key), 538 check_digest); 539 if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, 540 check_digest, sizeof(check_digest)) != 0) { 541 explicit_bzero(sd->mds.mdd_crypto.scr_key, 542 sizeof(sd->mds.mdd_crypto.scr_key)); 543 goto out; 544 } 545 546 rv = 0; /* Success */ 547 out: 548 /* we don't need the mask key anymore */ 549 explicit_bzero(&sd->mds.mdd_crypto.scr_maskkey, 550 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 551 552 explicit_bzero(check_digest, sizeof(check_digest)); 553 554 return rv; 555 } 556 557 int 558 sr_crypto_create_keys(struct sr_discipline *sd) 559 { 560 561 DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n", 562 DEVNAME(sd->sd_sc)); 563 564 if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey)) 565 return (1); 566 567 /* XXX allow user to specify */ 568 sd->mds.mdd_crypto.scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256; 569 570 /* generate crypto keys */ 571 arc4random_buf(sd->mds.mdd_crypto.scr_key, 572 sizeof(sd->mds.mdd_crypto.scr_key)); 573 574 /* Mask the disk keys. */ 575 sd->mds.mdd_crypto.scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256; 576 sr_crypto_encrypt((u_char *)sd->mds.mdd_crypto.scr_key, 577 (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key, 578 sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key), 579 sd->mds.mdd_crypto.scr_meta->scm_mask_alg); 580 581 /* Prepare key decryption check code. */ 582 sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1; 583 sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey, 584 sizeof(sd->mds.mdd_crypto.scr_maskkey), 585 (u_int8_t *)sd->mds.mdd_crypto.scr_key, 586 sizeof(sd->mds.mdd_crypto.scr_key), 587 sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac); 588 589 /* Erase the plaintext disk keys */ 590 explicit_bzero(sd->mds.mdd_crypto.scr_key, 591 sizeof(sd->mds.mdd_crypto.scr_key)); 592 593 #ifdef SR_DEBUG0 594 sr_crypto_dumpkeys(sd); 595 #endif 596 597 sd->mds.mdd_crypto.scr_meta->scm_flags = SR_CRYPTOF_KEY | 598 SR_CRYPTOF_KDFHINT; 599 600 return (0); 601 } 602 603 int 604 sr_crypto_change_maskkey(struct sr_discipline *sd, 605 struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2) 606 { 607 u_char check_digest[SHA1_DIGEST_LENGTH]; 608 u_char *c, *p = NULL; 609 size_t ksz; 610 int rv = 1; 611 612 DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n", 613 DEVNAME(sd->sd_sc)); 614 615 if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1) 616 goto out; 617 618 c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key; 619 ksz = sizeof(sd->mds.mdd_crypto.scr_key); 620 p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO); 621 if (p == NULL) 622 goto out; 623 624 if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz, 625 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1) 626 goto out; 627 628 #ifdef SR_DEBUG0 629 sr_crypto_dumpkeys(sd); 630 #endif 631 632 sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey, 633 sizeof(kdfinfo1->maskkey), p, ksz, check_digest); 634 if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, 635 check_digest, sizeof(check_digest)) != 0) { 636 rv = EPERM; 637 goto out; 638 } 639 640 /* Mask the disk keys. */ 641 c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key; 642 if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz, 643 sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1) 644 goto out; 645 646 /* Prepare key decryption check code. */ 647 sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1; 648 sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey, 649 sizeof(kdfinfo2->maskkey), (u_int8_t *)sd->mds.mdd_crypto.scr_key, 650 sizeof(sd->mds.mdd_crypto.scr_key), check_digest); 651 652 /* Copy new encrypted key and HMAC to metadata. */ 653 bcopy(check_digest, sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, 654 sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac)); 655 656 rv = 0; /* Success */ 657 658 out: 659 if (p) { 660 explicit_bzero(p, ksz); 661 free(p, M_DEVBUF); 662 } 663 664 explicit_bzero(check_digest, sizeof(check_digest)); 665 explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey)); 666 explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey)); 667 668 return (rv); 669 } 670 671 struct sr_chunk * 672 sr_crypto_create_key_disk(struct sr_discipline *sd, dev_t dev) 673 { 674 struct sr_softc *sc = sd->sd_sc; 675 struct sr_discipline *fakesd = NULL; 676 struct sr_metadata *sm = NULL; 677 struct sr_meta_chunk *km; 678 struct sr_meta_opt_item *omi = NULL; 679 struct sr_chunk *key_disk = NULL; 680 struct disklabel label; 681 struct vnode *vn; 682 char devname[32]; 683 int c, part, open = 0; 684 685 /* 686 * Create a metadata structure on the key disk and store 687 * keying material in the optional metadata. 688 */ 689 690 sr_meta_getdevname(sc, dev, devname, sizeof(devname)); 691 692 /* Make sure chunk is not already in use. */ 693 c = sr_chunk_in_use(sc, dev); 694 if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) { 695 printf("%s: %s is already in use\n", DEVNAME(sc), devname); 696 goto done; 697 } 698 699 /* Open device. */ 700 if (bdevvp(dev, &vn)) { 701 printf("%s: cannot open key disk %s\n", DEVNAME(sc), devname); 702 goto done; 703 } 704 if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) { 705 DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot " 706 "open %s\n", DEVNAME(sc), devname); 707 vput(vn); 708 goto fail; 709 } 710 open = 1; /* close dev on error */ 711 712 /* Get partition details. */ 713 part = DISKPART(dev); 714 if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, 715 FREAD, NOCRED, curproc)) { 716 DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl " 717 "failed\n", DEVNAME(sc)); 718 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc); 719 vput(vn); 720 goto fail; 721 } 722 if (label.d_partitions[part].p_fstype != FS_RAID) { 723 printf("%s: %s partition not of type RAID (%d)\n", 724 DEVNAME(sc), devname, 725 label.d_partitions[part].p_fstype); 726 goto fail; 727 } 728 729 /* 730 * Create and populate chunk metadata. 731 */ 732 733 key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO); 734 km = &key_disk->src_meta; 735 736 key_disk->src_dev_mm = dev; 737 key_disk->src_vn = vn; 738 strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname)); 739 key_disk->src_size = 0; 740 741 km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level; 742 km->scmi.scm_chunk_id = 0; 743 km->scmi.scm_size = 0; 744 km->scmi.scm_coerced_size = 0; 745 strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname)); 746 bcopy(&sd->sd_meta->ssdi.ssd_uuid, &km->scmi.scm_uuid, 747 sizeof(struct sr_uuid)); 748 749 sr_checksum(sc, km, &km->scm_checksum, 750 sizeof(struct sr_meta_chunk_invariant)); 751 752 km->scm_status = BIOC_SDONLINE; 753 754 /* 755 * Create and populate our own discipline and metadata. 756 */ 757 758 sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO); 759 sm->ssdi.ssd_magic = SR_MAGIC; 760 sm->ssdi.ssd_version = SR_META_VERSION; 761 sm->ssd_ondisk = 0; 762 sm->ssdi.ssd_vol_flags = 0; 763 bcopy(&sd->sd_meta->ssdi.ssd_uuid, &sm->ssdi.ssd_uuid, 764 sizeof(struct sr_uuid)); 765 sm->ssdi.ssd_chunk_no = 1; 766 sm->ssdi.ssd_volid = SR_KEYDISK_VOLID; 767 sm->ssdi.ssd_level = SR_KEYDISK_LEVEL; 768 sm->ssdi.ssd_size = 0; 769 strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor)); 770 snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product), 771 "SR %s", "KEYDISK"); 772 snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision), 773 "%03d", SR_META_VERSION); 774 775 fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF, 776 M_WAITOK | M_ZERO); 777 fakesd->sd_sc = sd->sd_sc; 778 fakesd->sd_meta = sm; 779 fakesd->sd_meta_type = SR_META_F_NATIVE; 780 fakesd->sd_vol_status = BIOC_SVONLINE; 781 strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name)); 782 SLIST_INIT(&fakesd->sd_meta_opt); 783 784 /* Add chunk to volume. */ 785 fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF, 786 M_WAITOK | M_ZERO); 787 fakesd->sd_vol.sv_chunks[0] = key_disk; 788 SLIST_INIT(&fakesd->sd_vol.sv_chunk_list); 789 SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link); 790 791 /* Generate mask key. */ 792 arc4random_buf(sd->mds.mdd_crypto.scr_maskkey, 793 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 794 795 /* Copy mask key to optional metadata area. */ 796 sm->ssdi.ssd_opt_no = 1; 797 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF, 798 M_WAITOK | M_ZERO); 799 omi->omi_om.somi.som_type = SR_OPT_KEYDISK; 800 bcopy(sd->mds.mdd_crypto.scr_maskkey, 801 omi->omi_om.somi.som_meta.smm_keydisk.skm_maskkey, 802 sizeof(omi->omi_om.somi.som_meta.smm_keydisk.skm_maskkey)); 803 SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link); 804 805 /* Save metadata. */ 806 if (sr_meta_save(fakesd, SR_META_DIRTY)) { 807 printf("%s: could not save metadata to %s\n", 808 DEVNAME(sc), devname); 809 goto fail; 810 } 811 812 goto done; 813 814 fail: 815 if (key_disk) 816 free(key_disk, M_DEVBUF); 817 key_disk = NULL; 818 819 done: 820 if (omi) 821 free(omi, M_DEVBUF); 822 if (fakesd && fakesd->sd_vol.sv_chunks) 823 free(fakesd->sd_vol.sv_chunks, M_DEVBUF); 824 if (fakesd) 825 free(fakesd, M_DEVBUF); 826 if (sm) 827 free(sm, M_DEVBUF); 828 if (open) { 829 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc); 830 vput(vn); 831 } 832 833 return key_disk; 834 } 835 836 struct sr_chunk * 837 sr_crypto_read_key_disk(struct sr_discipline *sd, dev_t dev) 838 { 839 struct sr_softc *sc = sd->sd_sc; 840 struct sr_metadata *sm = NULL; 841 struct sr_meta_opt *om; 842 struct sr_chunk *key_disk = NULL; 843 struct disklabel label; 844 struct vnode *vn = NULL; 845 char devname[32]; 846 int c, part, open = 0; 847 848 /* 849 * Load a key disk and load keying material into memory. 850 */ 851 852 sr_meta_getdevname(sc, dev, devname, sizeof(devname)); 853 854 /* Make sure chunk is not already in use. */ 855 c = sr_chunk_in_use(sc, dev); 856 if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) { 857 printf("%s: %s is already in use\n", DEVNAME(sc), devname); 858 goto done; 859 } 860 861 /* Open device. */ 862 if (bdevvp(dev, &vn)) { 863 printf("%s: cannot open key disk %s\n", DEVNAME(sc), devname); 864 goto done; 865 } 866 if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) { 867 DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot " 868 "open %s\n", DEVNAME(sc), devname); 869 vput(vn); 870 goto done; 871 } 872 open = 1; /* close dev on error */ 873 874 /* Get partition details. */ 875 part = DISKPART(dev); 876 if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD, 877 NOCRED, curproc)) { 878 DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl " 879 "failed\n", DEVNAME(sc)); 880 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc); 881 vput(vn); 882 goto done; 883 } 884 if (label.d_partitions[part].p_fstype != FS_RAID) { 885 printf("%s: %s partition not of type RAID (%d)\n", 886 DEVNAME(sc), devname, 887 label.d_partitions[part].p_fstype); 888 goto done; 889 } 890 891 /* 892 * Read and validate key disk metadata. 893 */ 894 sm = malloc(SR_META_SIZE * 512, M_DEVBUF, M_WAITOK | M_ZERO); 895 if (sr_meta_native_read(sd, dev, sm, NULL)) { 896 printf("%s: native bootprobe could not read native " 897 "metadata\n", DEVNAME(sc)); 898 goto done; 899 } 900 901 if (sr_meta_validate(sd, dev, sm, NULL)) { 902 DNPRINTF(SR_D_META, "%s: invalid metadata\n", 903 DEVNAME(sc)); 904 goto done; 905 } 906 907 /* Make sure this is a key disk. */ 908 if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) { 909 printf("%s: %s is not a key disk\n", DEVNAME(sc), devname); 910 goto done; 911 } 912 913 /* Construct key disk chunk. */ 914 key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO); 915 key_disk->src_dev_mm = dev; 916 key_disk->src_vn = vn; 917 key_disk->src_size = 0; 918 919 bcopy((struct sr_meta_chunk *)(sm + 1), &key_disk->src_meta, 920 sizeof(key_disk->src_meta)); 921 922 /* Read mask key from optional metadata. */ 923 om = (struct sr_meta_opt *)((u_int8_t *)(sm + 1) + 924 sizeof(struct sr_meta_chunk) * sm->ssdi.ssd_chunk_no); 925 for (c = 0; c < sm->ssdi.ssd_opt_no; c++) { 926 if (om->somi.som_type == SR_OPT_KEYDISK) { 927 bcopy(&om->somi.som_meta.smm_keydisk.skm_maskkey, 928 sd->mds.mdd_crypto.scr_maskkey, 929 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 930 break; 931 } else if (om->somi.som_type == SR_OPT_CRYPTO) { 932 bcopy(&om->somi.som_meta.smm_crypto, 933 sd->mds.mdd_crypto.scr_maskkey, 934 sizeof(sd->mds.mdd_crypto.scr_maskkey)); 935 break; 936 } 937 om++; 938 } 939 940 open = 0; 941 942 done: 943 if (sm) 944 free(sm, M_DEVBUF); 945 946 if (vn && open) { 947 VOP_CLOSE(vn, FREAD, NOCRED, curproc); 948 vput(vn); 949 } 950 951 return key_disk; 952 } 953 954 int 955 sr_crypto_alloc_resources(struct sr_discipline *sd) 956 { 957 struct cryptoini cri; 958 struct sr_crypto_wu *crwu; 959 u_int num_keys, i; 960 961 if (!sd) 962 return (EINVAL); 963 964 DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n", 965 DEVNAME(sd->sd_sc)); 966 967 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) 968 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 969 970 if (sr_wu_alloc(sd)) 971 return (ENOMEM); 972 if (sr_ccb_alloc(sd)) 973 return (ENOMEM); 974 if (sr_crypto_decrypt_key(sd)) 975 return (EPERM); 976 /* 977 * For each wu allocate the uio, iovec and crypto structures. 978 * these have to be allocated now because during runtime we can't 979 * fail an allocation without failing the io (which can cause real 980 * problems). 981 */ 982 mtx_init(&sd->mds.mdd_crypto.scr_mutex, IPL_BIO); 983 TAILQ_INIT(&sd->mds.mdd_crypto.scr_wus); 984 for (i = 0; i < sd->sd_max_wu; i++) { 985 crwu = malloc(sizeof(*crwu), M_DEVBUF, 986 M_WAITOK | M_ZERO | M_CANFAIL); 987 if (crwu == NULL) 988 return (ENOMEM); 989 /* put it on the list now so if we fail it'll be freed */ 990 mtx_enter(&sd->mds.mdd_crypto.scr_mutex); 991 TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link); 992 mtx_leave(&sd->mds.mdd_crypto.scr_mutex); 993 994 crwu->cr_uio.uio_iov = &crwu->cr_iov; 995 crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK); 996 crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT); 997 if (crwu->cr_crp == NULL) 998 return (ENOMEM); 999 /* steal the list of cryptodescs */ 1000 crwu->cr_descs = crwu->cr_crp->crp_desc; 1001 crwu->cr_crp->crp_desc = NULL; 1002 } 1003 1004 bzero(&cri, sizeof(cri)); 1005 cri.cri_alg = CRYPTO_AES_XTS; 1006 switch (sd->mds.mdd_crypto.scr_meta->scm_alg) { 1007 case SR_CRYPTOA_AES_XTS_128: 1008 cri.cri_klen = 256; 1009 break; 1010 case SR_CRYPTOA_AES_XTS_256: 1011 cri.cri_klen = 512; 1012 break; 1013 default: 1014 return (EINVAL); 1015 } 1016 1017 /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks */ 1018 num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT; 1019 if (num_keys >= SR_CRYPTO_MAXKEYS) 1020 return (EFBIG); 1021 for (i = 0; i <= num_keys; i++) { 1022 cri.cri_key = sd->mds.mdd_crypto.scr_key[i]; 1023 if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i], 1024 &cri, 0) != 0) { 1025 for (i = 0; 1026 sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; 1027 i++) { 1028 crypto_freesession( 1029 sd->mds.mdd_crypto.scr_sid[i]); 1030 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 1031 } 1032 return (EINVAL); 1033 } 1034 } 1035 1036 sr_hotplug_register(sd, sr_crypto_hotplug); 1037 1038 return (0); 1039 } 1040 1041 int 1042 sr_crypto_free_resources(struct sr_discipline *sd) 1043 { 1044 int rv = EINVAL; 1045 struct sr_crypto_wu *crwu; 1046 u_int i; 1047 1048 if (!sd) 1049 return (rv); 1050 1051 DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n", 1052 DEVNAME(sd->sd_sc)); 1053 1054 if (sd->mds.mdd_crypto.key_disk != NULL) { 1055 explicit_bzero(sd->mds.mdd_crypto.key_disk, sizeof 1056 sd->mds.mdd_crypto.key_disk); 1057 free(sd->mds.mdd_crypto.key_disk, M_DEVBUF); 1058 } 1059 1060 sr_hotplug_unregister(sd, sr_crypto_hotplug); 1061 1062 for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) { 1063 crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]); 1064 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; 1065 } 1066 1067 mtx_enter(&sd->mds.mdd_crypto.scr_mutex); 1068 while ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL) { 1069 TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link); 1070 1071 if (crwu->cr_dmabuf != NULL) 1072 dma_free(crwu->cr_dmabuf, MAXPHYS); 1073 /* twiddle cryptoreq back */ 1074 if (crwu->cr_crp) { 1075 crwu->cr_crp->crp_desc = crwu->cr_descs; 1076 crypto_freereq(crwu->cr_crp); 1077 } 1078 free(crwu, M_DEVBUF); 1079 } 1080 mtx_leave(&sd->mds.mdd_crypto.scr_mutex); 1081 1082 sr_wu_free(sd); 1083 sr_ccb_free(sd); 1084 1085 rv = 0; 1086 return (rv); 1087 } 1088 1089 int 1090 sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd) 1091 { 1092 struct sr_crypto_kdfpair kdfpair; 1093 struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2; 1094 int size, rv = 1; 1095 1096 DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n", 1097 DEVNAME(sd->sd_sc), bd->bd_cmd); 1098 1099 switch (bd->bd_cmd) { 1100 case SR_IOCTL_GET_KDFHINT: 1101 1102 /* Get KDF hint for userland. */ 1103 size = sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint); 1104 if (bd->bd_data == NULL || bd->bd_size > size) 1105 goto bad; 1106 if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint, 1107 bd->bd_data, bd->bd_size)) 1108 goto bad; 1109 1110 rv = 0; 1111 1112 break; 1113 1114 case SR_IOCTL_CHANGE_PASSPHRASE: 1115 1116 /* Attempt to change passphrase. */ 1117 1118 size = sizeof(kdfpair); 1119 if (bd->bd_data == NULL || bd->bd_size > size) 1120 goto bad; 1121 if (copyin(bd->bd_data, &kdfpair, size)) 1122 goto bad; 1123 1124 size = sizeof(kdfinfo1); 1125 if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size) 1126 goto bad; 1127 if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size)) 1128 goto bad; 1129 1130 size = sizeof(kdfinfo2); 1131 if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size) 1132 goto bad; 1133 if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size)) 1134 goto bad; 1135 1136 if (sr_crypto_change_maskkey(sd, &kdfinfo1, &kdfinfo2)) 1137 goto bad; 1138 1139 /* Save metadata to disk. */ 1140 rv = sr_meta_save(sd, SR_META_DIRTY); 1141 1142 break; 1143 } 1144 1145 bad: 1146 explicit_bzero(&kdfpair, sizeof(kdfpair)); 1147 explicit_bzero(&kdfinfo1, sizeof(kdfinfo1)); 1148 explicit_bzero(&kdfinfo2, sizeof(kdfinfo2)); 1149 return (rv); 1150 } 1151 1152 int 1153 sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt *om) 1154 { 1155 int rv = EINVAL; 1156 1157 if (om->somi.som_type == SR_OPT_CRYPTO) { 1158 sd->mds.mdd_crypto.scr_meta = &om->somi.som_meta.smm_crypto; 1159 rv = 0; 1160 } 1161 1162 return (rv); 1163 } 1164 1165 int 1166 sr_crypto_rw(struct sr_workunit *wu) 1167 { 1168 struct sr_crypto_wu *crwu; 1169 int s, rv = 0; 1170 1171 DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu: %p\n", 1172 DEVNAME(wu->swu_dis->sd_sc), wu); 1173 1174 if (wu->swu_xs->flags & SCSI_DATA_OUT) { 1175 crwu = sr_crypto_wu_get(wu, 1); 1176 if (crwu == NULL) 1177 return (1); 1178 crwu->cr_crp->crp_callback = sr_crypto_write; 1179 s = splvm(); 1180 if (crypto_invoke(crwu->cr_crp)) 1181 rv = 1; 1182 else 1183 rv = crwu->cr_crp->crp_etype; 1184 splx(s); 1185 } else 1186 rv = sr_crypto_rw2(wu, NULL); 1187 1188 return (rv); 1189 } 1190 1191 int 1192 sr_crypto_write(struct cryptop *crp) 1193 { 1194 struct sr_crypto_wu *crwu = crp->crp_opaque; 1195 struct sr_workunit *wu = crwu->cr_wu; 1196 int s; 1197 1198 DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n", 1199 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs); 1200 1201 if (crp->crp_etype) { 1202 /* fail io */ 1203 wu->swu_xs->error = XS_DRIVER_STUFFUP; 1204 s = splbio(); 1205 sr_crypto_finish_io(wu); 1206 splx(s); 1207 } 1208 1209 return (sr_crypto_rw2(wu, crwu)); 1210 } 1211 1212 int 1213 sr_crypto_rw2(struct sr_workunit *wu, struct sr_crypto_wu *crwu) 1214 { 1215 struct sr_discipline *sd = wu->swu_dis; 1216 struct scsi_xfer *xs = wu->swu_xs; 1217 struct sr_ccb *ccb; 1218 struct uio *uio; 1219 int s; 1220 daddr64_t blk; 1221 1222 if (sr_validate_io(wu, &blk, "sr_crypto_rw2")) 1223 goto bad; 1224 1225 blk += sd->sd_meta->ssd_data_offset; 1226 1227 wu->swu_io_count = 1; 1228 1229 ccb = sr_ccb_get(sd); 1230 if (!ccb) { 1231 /* should never happen but handle more gracefully */ 1232 printf("%s: %s: too many ccbs queued\n", 1233 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname); 1234 goto bad; 1235 } 1236 1237 ccb->ccb_buf.b_flags = B_CALL | B_PHYS; 1238 ccb->ccb_buf.b_iodone = sr_crypto_intr; 1239 ccb->ccb_buf.b_blkno = blk; 1240 ccb->ccb_buf.b_bcount = xs->datalen; 1241 ccb->ccb_buf.b_bufsize = xs->datalen; 1242 ccb->ccb_buf.b_resid = xs->datalen; 1243 1244 if (xs->flags & SCSI_DATA_IN) { 1245 ccb->ccb_buf.b_flags |= B_READ; 1246 ccb->ccb_buf.b_data = xs->data; 1247 } else { 1248 uio = crwu->cr_crp->crp_buf; 1249 ccb->ccb_buf.b_flags |= B_WRITE; 1250 ccb->ccb_buf.b_data = uio->uio_iov->iov_base; 1251 ccb->ccb_opaque = crwu; 1252 } 1253 1254 ccb->ccb_buf.b_error = 0; 1255 ccb->ccb_buf.b_proc = curproc; 1256 ccb->ccb_wu = wu; 1257 ccb->ccb_target = 0; 1258 ccb->ccb_buf.b_dev = sd->sd_vol.sv_chunks[0]->src_dev_mm; 1259 ccb->ccb_buf.b_vp = sd->sd_vol.sv_chunks[0]->src_vn; 1260 if ((ccb->ccb_buf.b_flags & B_READ) == 0) 1261 ccb->ccb_buf.b_vp->v_numoutput++; 1262 1263 LIST_INIT(&ccb->ccb_buf.b_dep); 1264 1265 if (wu->swu_cb_active == 1) 1266 panic("%s: sr_crypto_rw2", DEVNAME(sd->sd_sc)); 1267 TAILQ_INSERT_TAIL(&wu->swu_ccb, ccb, ccb_link); 1268 1269 DNPRINTF(SR_D_DIS, "%s: %s: sr_crypto_rw2: b_bcount: %d " 1270 "b_blkno: %x b_flags 0x%0x b_data %p\n", 1271 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, 1272 ccb->ccb_buf.b_bcount, ccb->ccb_buf.b_blkno, 1273 ccb->ccb_buf.b_flags, ccb->ccb_buf.b_data); 1274 1275 s = splbio(); 1276 1277 if (sr_check_io_collision(wu)) 1278 goto queued; 1279 1280 sr_raid_startwu(wu); 1281 1282 queued: 1283 splx(s); 1284 return (0); 1285 bad: 1286 /* wu is unwound by sr_wu_put */ 1287 if (crwu) 1288 crwu->cr_crp->crp_etype = EINVAL; 1289 return (1); 1290 } 1291 1292 void 1293 sr_crypto_intr(struct buf *bp) 1294 { 1295 struct sr_ccb *ccb = (struct sr_ccb *)bp; 1296 struct sr_workunit *wu = ccb->ccb_wu, *wup; 1297 struct sr_discipline *sd = wu->swu_dis; 1298 struct scsi_xfer *xs = wu->swu_xs; 1299 struct sr_softc *sc = sd->sd_sc; 1300 struct sr_crypto_wu *crwu; 1301 int s, s2, pend; 1302 1303 DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr bp: %x xs: %x\n", 1304 DEVNAME(sc), bp, wu->swu_xs); 1305 1306 DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: b_bcount: %d b_resid: %d" 1307 " b_flags: 0x%0x\n", DEVNAME(sc), ccb->ccb_buf.b_bcount, 1308 ccb->ccb_buf.b_resid, ccb->ccb_buf.b_flags); 1309 1310 s = splbio(); 1311 1312 if (ccb->ccb_buf.b_flags & B_ERROR) { 1313 printf("%s: i/o error on block %lld\n", DEVNAME(sc), 1314 ccb->ccb_buf.b_blkno); 1315 wu->swu_ios_failed++; 1316 ccb->ccb_state = SR_CCB_FAILED; 1317 if (ccb->ccb_target != -1) 1318 sd->sd_set_chunk_state(sd, ccb->ccb_target, 1319 BIOC_SDOFFLINE); 1320 else 1321 panic("%s: invalid target on wu: %p", DEVNAME(sc), wu); 1322 } else { 1323 ccb->ccb_state = SR_CCB_OK; 1324 wu->swu_ios_succeeded++; 1325 } 1326 wu->swu_ios_complete++; 1327 1328 DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: comp: %d count: %d\n", 1329 DEVNAME(sc), wu->swu_ios_complete, wu->swu_io_count); 1330 1331 if (wu->swu_ios_complete == wu->swu_io_count) { 1332 if (wu->swu_ios_failed == wu->swu_ios_complete) 1333 xs->error = XS_DRIVER_STUFFUP; 1334 else 1335 xs->error = XS_NOERROR; 1336 1337 pend = 0; 1338 TAILQ_FOREACH(wup, &sd->sd_wu_pendq, swu_link) { 1339 if (wu == wup) { 1340 TAILQ_REMOVE(&sd->sd_wu_pendq, wu, swu_link); 1341 pend = 1; 1342 1343 if (wu->swu_collider) { 1344 wu->swu_collider->swu_state = 1345 SR_WU_INPROGRESS; 1346 TAILQ_REMOVE(&sd->sd_wu_defq, 1347 wu->swu_collider, swu_link); 1348 sr_raid_startwu(wu->swu_collider); 1349 } 1350 break; 1351 } 1352 } 1353 1354 if (!pend) 1355 printf("%s: wu: %p not on pending queue\n", 1356 DEVNAME(sc), wu); 1357 1358 if ((xs->flags & SCSI_DATA_IN) && (xs->error == XS_NOERROR)) { 1359 /* only fails on implementation error */ 1360 crwu = sr_crypto_wu_get(wu, 0); 1361 if (crwu == NULL) 1362 panic("sr_crypto_intr: no wu"); 1363 crwu->cr_crp->crp_callback = sr_crypto_read; 1364 ccb->ccb_opaque = crwu; 1365 DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: crypto_invoke " 1366 "%p\n", DEVNAME(sc), crwu->cr_crp); 1367 s2 = splvm(); 1368 crypto_invoke(crwu->cr_crp); 1369 splx(s2); 1370 goto done; 1371 } 1372 1373 sr_crypto_finish_io(wu); 1374 } 1375 1376 done: 1377 splx(s); 1378 } 1379 1380 void 1381 sr_crypto_finish_io(struct sr_workunit *wu) 1382 { 1383 struct sr_discipline *sd = wu->swu_dis; 1384 struct scsi_xfer *xs = wu->swu_xs; 1385 struct sr_ccb *ccb; 1386 #ifdef SR_DEBUG 1387 struct sr_softc *sc = sd->sd_sc; 1388 #endif /* SR_DEBUG */ 1389 1390 splassert(IPL_BIO); 1391 1392 DNPRINTF(SR_D_INTR, "%s: sr_crypto_finish_io: wu %x xs: %x\n", 1393 DEVNAME(sc), wu, xs); 1394 1395 xs->resid = 0; 1396 1397 if (wu->swu_cb_active == 1) 1398 panic("%s: sr_crypto_finish_io", DEVNAME(sd->sd_sc)); 1399 TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link) { 1400 if (ccb->ccb_opaque == NULL) 1401 continue; 1402 sr_crypto_wu_put(ccb->ccb_opaque); 1403 } 1404 1405 sr_scsi_done(sd, xs); 1406 1407 if (sd->sd_sync && sd->sd_wu_pending == 0) 1408 wakeup(sd); 1409 } 1410 1411 int 1412 sr_crypto_read(struct cryptop *crp) 1413 { 1414 struct sr_crypto_wu *crwu = crp->crp_opaque; 1415 struct sr_workunit *wu = crwu->cr_wu; 1416 int s; 1417 1418 DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n", 1419 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs); 1420 1421 if (crp->crp_etype) 1422 wu->swu_xs->error = XS_DRIVER_STUFFUP; 1423 1424 s = splbio(); 1425 sr_crypto_finish_io(wu); 1426 splx(s); 1427 1428 return (0); 1429 } 1430 1431 void 1432 sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action) 1433 { 1434 DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n", 1435 DEVNAME(sd->sd_sc), diskp->dk_name, action); 1436 } 1437 1438 #ifdef SR_DEBUG0 1439 void 1440 sr_crypto_dumpkeys(struct sr_discipline *sd) 1441 { 1442 int i, j; 1443 1444 printf("sr_crypto_dumpkeys:\n"); 1445 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) { 1446 printf("\tscm_key[%d]: 0x", i); 1447 for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) { 1448 printf("%02x", 1449 sd->mds.mdd_crypto.scr_meta->scm_key[i][j]); 1450 } 1451 printf("\n"); 1452 } 1453 printf("sr_crypto_dumpkeys: runtime data keys:\n"); 1454 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) { 1455 printf("\tscr_key[%d]: 0x", i); 1456 for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) { 1457 printf("%02x", 1458 sd->mds.mdd_crypto.scr_key[i][j]); 1459 } 1460 printf("\n"); 1461 } 1462 } 1463 #endif /* SR_DEBUG */ 1464