xref: /openbsd-src/sys/dev/softraid_crypto.c (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /* $OpenBSD: softraid_crypto.c,v 1.143 2021/10/22 05:06:37 anton Exp $ */
2 /*
3  * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
4  * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@mindrot.org>
6  * Copyright (c) 2009 Joel Sing <jsing@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include "bio.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/buf.h>
26 #include <sys/device.h>
27 #include <sys/ioctl.h>
28 #include <sys/malloc.h>
29 #include <sys/pool.h>
30 #include <sys/kernel.h>
31 #include <sys/disk.h>
32 #include <sys/rwlock.h>
33 #include <sys/queue.h>
34 #include <sys/fcntl.h>
35 #include <sys/disklabel.h>
36 #include <sys/vnode.h>
37 #include <sys/mount.h>
38 #include <sys/sensors.h>
39 #include <sys/stat.h>
40 #include <sys/conf.h>
41 #include <sys/uio.h>
42 #include <sys/dkio.h>
43 
44 #include <crypto/cryptodev.h>
45 #include <crypto/rijndael.h>
46 #include <crypto/md5.h>
47 #include <crypto/sha1.h>
48 #include <crypto/sha2.h>
49 #include <crypto/hmac.h>
50 
51 #include <scsi/scsi_all.h>
52 #include <scsi/scsiconf.h>
53 #include <scsi/scsi_disk.h>
54 
55 #include <dev/softraidvar.h>
56 
57 struct sr_crypto_wu *sr_crypto_prepare(struct sr_workunit *,
58 		    struct sr_crypto *, int);
59 int		sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int);
60 int		sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int);
61 int		sr_crypto_decrypt_key(struct sr_discipline *,
62 		    struct sr_crypto *);
63 int		sr_crypto_change_maskkey(struct sr_discipline *,
64 		    struct sr_crypto *, struct sr_crypto_kdfinfo *,
65 		    struct sr_crypto_kdfinfo *);
66 int		sr_crypto_create(struct sr_discipline *,
67 		    struct bioc_createraid *, int, int64_t);
68 int		sr_crypto_meta_create(struct sr_discipline *,
69 		    struct sr_crypto *, struct bioc_createraid *);
70 int		sr_crypto_set_key(struct sr_discipline *, struct sr_crypto *,
71 		    struct bioc_createraid *, int, void *);
72 int		sr_crypto_assemble(struct sr_discipline *,
73 		    struct bioc_createraid *, int, void *);
74 void		sr_crypto_free_sessions(struct sr_discipline *,
75 		    struct sr_crypto *);
76 int		sr_crypto_alloc_resources_internal(struct sr_discipline *,
77 		    struct sr_crypto *);
78 int		sr_crypto_alloc_resources(struct sr_discipline *);
79 void		sr_crypto_free_resources_internal(struct sr_discipline *,
80 		    struct sr_crypto *);
81 void		sr_crypto_free_resources(struct sr_discipline *);
82 int		sr_crypto_ioctl_internal(struct sr_discipline *,
83 		    struct sr_crypto *, struct bioc_discipline *);
84 int		sr_crypto_ioctl(struct sr_discipline *,
85 		    struct bioc_discipline *);
86 int		sr_crypto_meta_opt_handler_internal(struct sr_discipline *,
87 		    struct sr_crypto *, struct sr_meta_opt_hdr *);
88 int		sr_crypto_meta_opt_handler(struct sr_discipline *,
89 		    struct sr_meta_opt_hdr *);
90 void		sr_crypto_write(struct cryptop *);
91 int		sr_crypto_rw(struct sr_workunit *);
92 int		sr_crypto_dev_rw(struct sr_workunit *, struct sr_crypto_wu *);
93 void		sr_crypto_done_internal(struct sr_workunit *,
94 		    struct sr_crypto *);
95 void		sr_crypto_done(struct sr_workunit *);
96 void		sr_crypto_read(struct cryptop *);
97 void		sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
98 		   u_int8_t *, int, u_char *);
99 void		sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
100 
101 #ifdef SR_DEBUG0
102 void		 sr_crypto_dumpkeys(struct sr_crypto *);
103 #endif
104 
105 /* Discipline initialisation. */
106 void
107 sr_crypto_discipline_init(struct sr_discipline *sd)
108 {
109 	int i;
110 
111 	/* Fill out discipline members. */
112 	sd->sd_wu_size = sizeof(struct sr_crypto_wu);
113 	sd->sd_type = SR_MD_CRYPTO;
114 	strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
115 	sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
116 	sd->sd_max_wu = SR_CRYPTO_NOWU;
117 
118 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
119 		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
120 
121 	/* Setup discipline specific function pointers. */
122 	sd->sd_alloc_resources = sr_crypto_alloc_resources;
123 	sd->sd_assemble = sr_crypto_assemble;
124 	sd->sd_create = sr_crypto_create;
125 	sd->sd_free_resources = sr_crypto_free_resources;
126 	sd->sd_ioctl_handler = sr_crypto_ioctl;
127 	sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler;
128 	sd->sd_scsi_rw = sr_crypto_rw;
129 	sd->sd_scsi_done = sr_crypto_done;
130 }
131 
132 int
133 sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc,
134     int no_chunk, int64_t coerced_size)
135 {
136 	int rv = EINVAL;
137 
138 	if (no_chunk != 1) {
139 		sr_error(sd->sd_sc, "%s requires exactly one chunk",
140 		    sd->sd_name);
141 		return (rv);
142 	}
143 
144 	sd->sd_meta->ssdi.ssd_size = coerced_size;
145 
146 	rv = sr_crypto_meta_create(sd, &sd->mds.mdd_crypto, bc);
147 	if (rv)
148 		return (rv);
149 
150 	sd->sd_max_ccb_per_wu = no_chunk;
151 	return (0);
152 }
153 
154 int
155 sr_crypto_meta_create(struct sr_discipline *sd, struct sr_crypto *mdd_crypto,
156     struct bioc_createraid *bc)
157 {
158 	struct sr_meta_opt_item	*omi;
159 	int			rv = EINVAL;
160 
161 	if (sd->sd_meta->ssdi.ssd_size > SR_CRYPTO_MAXSIZE) {
162 		sr_error(sd->sd_sc, "%s exceeds maximum size (%lli > %llu)",
163 		    sd->sd_name, sd->sd_meta->ssdi.ssd_size,
164 		    SR_CRYPTO_MAXSIZE);
165 		goto done;
166 	}
167 
168 	/* Create crypto optional metadata. */
169 	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
170 	    M_WAITOK | M_ZERO);
171 	omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF,
172 	    M_WAITOK | M_ZERO);
173 	omi->omi_som->som_type = SR_OPT_CRYPTO;
174 	omi->omi_som->som_length = sizeof(struct sr_meta_crypto);
175 	SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link);
176 	mdd_crypto->scr_meta = (struct sr_meta_crypto *)omi->omi_som;
177 	sd->sd_meta->ssdi.ssd_opt_no++;
178 
179 	mdd_crypto->key_disk = NULL;
180 
181 	if (bc->bc_key_disk != NODEV) {
182 
183 		/* Create a key disk. */
184 		if (sr_crypto_get_kdf(bc, sd, mdd_crypto))
185 			goto done;
186 		mdd_crypto->key_disk =
187 		    sr_crypto_create_key_disk(sd, mdd_crypto, bc->bc_key_disk);
188 		if (mdd_crypto->key_disk == NULL)
189 			goto done;
190 		sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE;
191 
192 	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
193 
194 		/* No hint available yet. */
195 		bc->bc_opaque_status = BIOC_SOINOUT_FAILED;
196 		rv = EAGAIN;
197 		goto done;
198 
199 	} else if (sr_crypto_get_kdf(bc, sd, mdd_crypto))
200 		goto done;
201 
202 	/* Passphrase volumes cannot be automatically assembled. */
203 	if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV)
204 		goto done;
205 
206 	sr_crypto_create_keys(sd, mdd_crypto);
207 
208 	rv = 0;
209 done:
210 	return (rv);
211 }
212 
213 int
214 sr_crypto_set_key(struct sr_discipline *sd, struct sr_crypto *mdd_crypto,
215     struct bioc_createraid *bc, int no_chunk, void *data)
216 {
217 	int	rv = EINVAL;
218 
219 	mdd_crypto->key_disk = NULL;
220 
221 	/* Crypto optional metadata must already exist... */
222 	if (mdd_crypto->scr_meta == NULL)
223 		goto done;
224 
225 	if (data != NULL) {
226 		/* Kernel already has mask key. */
227 		memcpy(mdd_crypto->scr_maskkey, data,
228 		    sizeof(mdd_crypto->scr_maskkey));
229 	} else if (bc->bc_key_disk != NODEV) {
230 		/* Read the mask key from the key disk. */
231 		mdd_crypto->key_disk =
232 		    sr_crypto_read_key_disk(sd, mdd_crypto, bc->bc_key_disk);
233 		if (mdd_crypto->key_disk == NULL)
234 			goto done;
235 	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
236 		/* provide userland with kdf hint */
237 		if (bc->bc_opaque == NULL)
238 			goto done;
239 
240 		if (sizeof(mdd_crypto->scr_meta->scm_kdfhint) <
241 		    bc->bc_opaque_size)
242 			goto done;
243 
244 		if (copyout(mdd_crypto->scr_meta->scm_kdfhint,
245 		    bc->bc_opaque, bc->bc_opaque_size))
246 			goto done;
247 
248 		/* we're done */
249 		bc->bc_opaque_status = BIOC_SOINOUT_OK;
250 		rv = EAGAIN;
251 		goto done;
252 	} else if (bc->bc_opaque_flags & BIOC_SOIN) {
253 		/* get kdf with maskkey from userland */
254 		if (sr_crypto_get_kdf(bc, sd, mdd_crypto))
255 			goto done;
256 	} else
257 		goto done;
258 
259 
260 	rv = 0;
261 done:
262 	return (rv);
263 }
264 
265 int
266 sr_crypto_assemble(struct sr_discipline *sd,
267     struct bioc_createraid *bc, int no_chunk, void *data)
268 {
269 	int rv;
270 
271 	rv = sr_crypto_set_key(sd, &sd->mds.mdd_crypto, bc, no_chunk, data);
272 	if (rv)
273 		return (rv);
274 
275 	sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no;
276 	return (0);
277 }
278 
279 struct sr_crypto_wu *
280 sr_crypto_prepare(struct sr_workunit *wu, struct sr_crypto *mdd_crypto,
281     int encrypt)
282 {
283 	struct scsi_xfer	*xs = wu->swu_xs;
284 	struct sr_crypto_wu	*crwu;
285 	struct cryptodesc	*crd;
286 	int			flags, i, n;
287 	daddr_t			blkno;
288 	u_int			keyndx;
289 
290 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_prepare wu %p encrypt %d\n",
291 	    DEVNAME(wu->swu_dis->sd_sc), wu, encrypt);
292 
293 	crwu = (struct sr_crypto_wu *)wu;
294 	crwu->cr_uio.uio_iovcnt = 1;
295 	crwu->cr_uio.uio_iov->iov_len = xs->datalen;
296 	if (xs->flags & SCSI_DATA_OUT) {
297 		crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
298 		memcpy(crwu->cr_uio.uio_iov->iov_base, xs->data, xs->datalen);
299 	} else
300 		crwu->cr_uio.uio_iov->iov_base = xs->data;
301 
302 	blkno = wu->swu_blk_start;
303 	n = xs->datalen >> DEV_BSHIFT;
304 
305 	/*
306 	 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
307 	 * Since there may be less than that we need to tweak the amount
308 	 * of crypto desc structures to be just long enough for our needs.
309 	 */
310 	KASSERT(crwu->cr_crp->crp_ndescalloc >= n);
311 	crwu->cr_crp->crp_ndesc = n;
312 	flags = (encrypt ? CRD_F_ENCRYPT : 0) |
313 	    CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
314 
315 	/*
316 	 * Select crypto session based on block number.
317 	 *
318 	 * XXX - this does not handle the case where the read/write spans
319 	 * across a different key blocks (e.g. 0.5TB boundary). Currently
320 	 * this is already broken by the use of scr_key[0] below.
321 	 */
322 	keyndx = blkno >> SR_CRYPTO_KEY_BLKSHIFT;
323 	crwu->cr_crp->crp_sid = mdd_crypto->scr_sid[keyndx];
324 
325 	crwu->cr_crp->crp_opaque = crwu;
326 	crwu->cr_crp->crp_ilen = xs->datalen;
327 	crwu->cr_crp->crp_alloctype = M_DEVBUF;
328 	crwu->cr_crp->crp_flags = CRYPTO_F_IOV;
329 	crwu->cr_crp->crp_buf = &crwu->cr_uio;
330 	for (i = 0; i < crwu->cr_crp->crp_ndesc; i++, blkno++) {
331 		crd = &crwu->cr_crp->crp_desc[i];
332 		crd->crd_skip = i << DEV_BSHIFT;
333 		crd->crd_len = DEV_BSIZE;
334 		crd->crd_inject = 0;
335 		crd->crd_flags = flags;
336 		crd->crd_alg = mdd_crypto->scr_alg;
337 		crd->crd_klen = mdd_crypto->scr_klen;
338 		crd->crd_key = mdd_crypto->scr_key[0];
339 		memcpy(crd->crd_iv, &blkno, sizeof(blkno));
340 	}
341 
342 	return (crwu);
343 }
344 
345 int
346 sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd,
347     struct sr_crypto *mdd_crypto)
348 {
349 	int			rv = EINVAL;
350 	struct sr_crypto_kdfinfo *kdfinfo;
351 
352 	if (!(bc->bc_opaque_flags & BIOC_SOIN))
353 		return (rv);
354 	if (bc->bc_opaque == NULL)
355 		return (rv);
356 	if (bc->bc_opaque_size != sizeof(*kdfinfo))
357 		return (rv);
358 
359 	kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO);
360 	if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size))
361 		goto out;
362 
363 	if (kdfinfo->len != bc->bc_opaque_size)
364 		goto out;
365 
366 	/* copy KDF hint to disk meta data */
367 	if (kdfinfo->flags & SR_CRYPTOKDF_HINT) {
368 		if (sizeof(mdd_crypto->scr_meta->scm_kdfhint) <
369 		    kdfinfo->genkdf.len)
370 			goto out;
371 		memcpy(mdd_crypto->scr_meta->scm_kdfhint,
372 		    &kdfinfo->genkdf, kdfinfo->genkdf.len);
373 	}
374 
375 	/* copy mask key to run-time meta data */
376 	if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) {
377 		if (sizeof(mdd_crypto->scr_maskkey) < sizeof(kdfinfo->maskkey))
378 			goto out;
379 		memcpy(mdd_crypto->scr_maskkey, &kdfinfo->maskkey,
380 		    sizeof(kdfinfo->maskkey));
381 	}
382 
383 	bc->bc_opaque_status = BIOC_SOINOUT_OK;
384 	rv = 0;
385 out:
386 	explicit_bzero(kdfinfo, bc->bc_opaque_size);
387 	free(kdfinfo, M_DEVBUF, bc->bc_opaque_size);
388 
389 	return (rv);
390 }
391 
392 int
393 sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg)
394 {
395 	rijndael_ctx		ctx;
396 	int			i, rv = 1;
397 
398 	switch (alg) {
399 	case SR_CRYPTOM_AES_ECB_256:
400 		if (rijndael_set_key_enc_only(&ctx, key, 256) != 0)
401 			goto out;
402 		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
403 			rijndael_encrypt(&ctx, &p[i], &c[i]);
404 		rv = 0;
405 		break;
406 	default:
407 		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
408 		    "softraid", alg);
409 		rv = -1;
410 		goto out;
411 	}
412 
413 out:
414 	explicit_bzero(&ctx, sizeof(ctx));
415 	return (rv);
416 }
417 
418 int
419 sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg)
420 {
421 	rijndael_ctx		ctx;
422 	int			i, rv = 1;
423 
424 	switch (alg) {
425 	case SR_CRYPTOM_AES_ECB_256:
426 		if (rijndael_set_key(&ctx, key, 256) != 0)
427 			goto out;
428 		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
429 			rijndael_decrypt(&ctx, &c[i], &p[i]);
430 		rv = 0;
431 		break;
432 	default:
433 		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
434 		    "softraid", alg);
435 		rv = -1;
436 		goto out;
437 	}
438 
439 out:
440 	explicit_bzero(&ctx, sizeof(ctx));
441 	return (rv);
442 }
443 
444 void
445 sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size,
446     u_int8_t *key, int key_size, u_char *check_digest)
447 {
448 	u_char			check_key[SHA1_DIGEST_LENGTH];
449 	HMAC_SHA1_CTX		hmacctx;
450 	SHA1_CTX		shactx;
451 
452 	bzero(check_key, sizeof(check_key));
453 	bzero(&hmacctx, sizeof(hmacctx));
454 	bzero(&shactx, sizeof(shactx));
455 
456 	/* k = SHA1(mask_key) */
457 	SHA1Init(&shactx);
458 	SHA1Update(&shactx, maskkey, maskkey_size);
459 	SHA1Final(check_key, &shactx);
460 
461 	/* mac = HMAC_SHA1_k(unencrypted key) */
462 	HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key));
463 	HMAC_SHA1_Update(&hmacctx, key, key_size);
464 	HMAC_SHA1_Final(check_digest, &hmacctx);
465 
466 	explicit_bzero(check_key, sizeof(check_key));
467 	explicit_bzero(&hmacctx, sizeof(hmacctx));
468 	explicit_bzero(&shactx, sizeof(shactx));
469 }
470 
471 int
472 sr_crypto_decrypt_key(struct sr_discipline *sd, struct sr_crypto *mdd_crypto)
473 {
474 	u_char			check_digest[SHA1_DIGEST_LENGTH];
475 	int			rv = 1;
476 
477 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc));
478 
479 	if (mdd_crypto->scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
480 		goto out;
481 
482 	if (sr_crypto_decrypt((u_char *)mdd_crypto->scr_meta->scm_key,
483 	    (u_char *)mdd_crypto->scr_key,
484 	    mdd_crypto->scr_maskkey, sizeof(mdd_crypto->scr_key),
485 	    mdd_crypto->scr_meta->scm_mask_alg) == -1)
486 		goto out;
487 
488 #ifdef SR_DEBUG0
489 	sr_crypto_dumpkeys(mdd_crypto);
490 #endif
491 
492 	/* Check that the key decrypted properly. */
493 	sr_crypto_calculate_check_hmac_sha1(mdd_crypto->scr_maskkey,
494 	    sizeof(mdd_crypto->scr_maskkey), (u_int8_t *)mdd_crypto->scr_key,
495 	    sizeof(mdd_crypto->scr_key), check_digest);
496 	if (memcmp(mdd_crypto->scr_meta->chk_hmac_sha1.sch_mac,
497 	    check_digest, sizeof(check_digest)) != 0) {
498 		explicit_bzero(mdd_crypto->scr_key,
499 		    sizeof(mdd_crypto->scr_key));
500 		goto out;
501 	}
502 
503 	rv = 0; /* Success */
504 out:
505 	/* we don't need the mask key anymore */
506 	explicit_bzero(&mdd_crypto->scr_maskkey,
507 	    sizeof(mdd_crypto->scr_maskkey));
508 
509 	explicit_bzero(check_digest, sizeof(check_digest));
510 
511 	return rv;
512 }
513 
514 int
515 sr_crypto_create_keys(struct sr_discipline *sd, struct sr_crypto *mdd_crypto)
516 {
517 
518 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n",
519 	    DEVNAME(sd->sd_sc));
520 
521 	if (AES_MAXKEYBYTES < sizeof(mdd_crypto->scr_maskkey))
522 		return (1);
523 
524 	/* XXX allow user to specify */
525 	mdd_crypto->scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256;
526 
527 	/* generate crypto keys */
528 	arc4random_buf(mdd_crypto->scr_key, sizeof(mdd_crypto->scr_key));
529 
530 	/* Mask the disk keys. */
531 	mdd_crypto->scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256;
532 	sr_crypto_encrypt((u_char *)mdd_crypto->scr_key,
533 	    (u_char *)mdd_crypto->scr_meta->scm_key,
534 	    mdd_crypto->scr_maskkey, sizeof(mdd_crypto->scr_key),
535 	    mdd_crypto->scr_meta->scm_mask_alg);
536 
537 	/* Prepare key decryption check code. */
538 	mdd_crypto->scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
539 	sr_crypto_calculate_check_hmac_sha1(mdd_crypto->scr_maskkey,
540 	    sizeof(mdd_crypto->scr_maskkey),
541 	    (u_int8_t *)mdd_crypto->scr_key, sizeof(mdd_crypto->scr_key),
542 	    mdd_crypto->scr_meta->chk_hmac_sha1.sch_mac);
543 
544 	/* Erase the plaintext disk keys */
545 	explicit_bzero(mdd_crypto->scr_key, sizeof(mdd_crypto->scr_key));
546 
547 #ifdef SR_DEBUG0
548 	sr_crypto_dumpkeys(mdd_crypto);
549 #endif
550 
551 	mdd_crypto->scr_meta->scm_flags = SR_CRYPTOF_KEY | SR_CRYPTOF_KDFHINT;
552 
553 	return (0);
554 }
555 
556 int
557 sr_crypto_change_maskkey(struct sr_discipline *sd, struct sr_crypto *mdd_crypto,
558   struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2)
559 {
560 	u_char			check_digest[SHA1_DIGEST_LENGTH];
561 	u_char			*c, *p = NULL;
562 	size_t			ksz;
563 	int			rv = 1;
564 
565 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n",
566 	    DEVNAME(sd->sd_sc));
567 
568 	if (mdd_crypto->scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
569 		goto out;
570 
571 	c = (u_char *)mdd_crypto->scr_meta->scm_key;
572 	ksz = sizeof(mdd_crypto->scr_key);
573 	p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
574 	if (p == NULL)
575 		goto out;
576 
577 	if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz,
578 	    mdd_crypto->scr_meta->scm_mask_alg) == -1)
579 		goto out;
580 
581 #ifdef SR_DEBUG0
582 	sr_crypto_dumpkeys(mdd_crypto);
583 #endif
584 
585 	sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey,
586 	    sizeof(kdfinfo1->maskkey), p, ksz, check_digest);
587 	if (memcmp(mdd_crypto->scr_meta->chk_hmac_sha1.sch_mac,
588 	    check_digest, sizeof(check_digest)) != 0) {
589 		sr_error(sd->sd_sc, "incorrect key or passphrase");
590 		rv = EPERM;
591 		goto out;
592 	}
593 
594 	/* Copy new KDF hint to metadata, if supplied. */
595 	if (kdfinfo2->flags & SR_CRYPTOKDF_HINT) {
596 		if (kdfinfo2->genkdf.len >
597 		    sizeof(mdd_crypto->scr_meta->scm_kdfhint))
598 			goto out;
599 		explicit_bzero(mdd_crypto->scr_meta->scm_kdfhint,
600 		    sizeof(mdd_crypto->scr_meta->scm_kdfhint));
601 		memcpy(mdd_crypto->scr_meta->scm_kdfhint,
602 		    &kdfinfo2->genkdf, kdfinfo2->genkdf.len);
603 	}
604 
605 	/* Mask the disk keys. */
606 	c = (u_char *)mdd_crypto->scr_meta->scm_key;
607 	if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
608 	    mdd_crypto->scr_meta->scm_mask_alg) == -1)
609 		goto out;
610 
611 	/* Prepare key decryption check code. */
612 	mdd_crypto->scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
613 	sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey,
614 	    sizeof(kdfinfo2->maskkey), (u_int8_t *)mdd_crypto->scr_key,
615 	    sizeof(mdd_crypto->scr_key), check_digest);
616 
617 	/* Copy new encrypted key and HMAC to metadata. */
618 	memcpy(mdd_crypto->scr_meta->chk_hmac_sha1.sch_mac, check_digest,
619 	    sizeof(mdd_crypto->scr_meta->chk_hmac_sha1.sch_mac));
620 
621 	rv = 0; /* Success */
622 
623 out:
624 	if (p) {
625 		explicit_bzero(p, ksz);
626 		free(p, M_DEVBUF, ksz);
627 	}
628 
629 	explicit_bzero(check_digest, sizeof(check_digest));
630 	explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey));
631 	explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey));
632 
633 	return (rv);
634 }
635 
636 struct sr_chunk *
637 sr_crypto_create_key_disk(struct sr_discipline *sd,
638     struct sr_crypto *mdd_crypto, dev_t dev)
639 {
640 	struct sr_softc		*sc = sd->sd_sc;
641 	struct sr_discipline	*fakesd = NULL;
642 	struct sr_metadata	*sm = NULL;
643 	struct sr_meta_chunk    *km;
644 	struct sr_meta_opt_item *omi = NULL;
645 	struct sr_meta_keydisk	*skm;
646 	struct sr_chunk		*key_disk = NULL;
647 	struct disklabel	label;
648 	struct vnode		*vn;
649 	char			devname[32];
650 	int			c, part, open = 0;
651 
652 	/*
653 	 * Create a metadata structure on the key disk and store
654 	 * keying material in the optional metadata.
655 	 */
656 
657 	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
658 
659 	/* Make sure chunk is not already in use. */
660 	c = sr_chunk_in_use(sc, dev);
661 	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
662 		sr_error(sc, "%s is already in use", devname);
663 		goto done;
664 	}
665 
666 	/* Open device. */
667 	if (bdevvp(dev, &vn)) {
668 		sr_error(sc, "cannot open key disk %s", devname);
669 		goto done;
670 	}
671 	if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
672 		DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
673 		    "open %s\n", DEVNAME(sc), devname);
674 		vput(vn);
675 		goto done;
676 	}
677 	open = 1; /* close dev on error */
678 
679 	/* Get partition details. */
680 	part = DISKPART(dev);
681 	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label,
682 	    FREAD, NOCRED, curproc)) {
683 		DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
684 		    "failed\n", DEVNAME(sc));
685 		goto done;
686 	}
687 	if (label.d_partitions[part].p_fstype != FS_RAID) {
688 		sr_error(sc, "%s partition not of type RAID (%d)",
689 		    devname, label.d_partitions[part].p_fstype);
690 		goto done;
691 	}
692 
693 	/*
694 	 * Create and populate chunk metadata.
695 	 */
696 
697 	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
698 	km = &key_disk->src_meta;
699 
700 	key_disk->src_dev_mm = dev;
701 	key_disk->src_vn = vn;
702 	strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname));
703 	key_disk->src_size = 0;
704 
705 	km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level;
706 	km->scmi.scm_chunk_id = 0;
707 	km->scmi.scm_size = 0;
708 	km->scmi.scm_coerced_size = 0;
709 	strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
710 	memcpy(&km->scmi.scm_uuid, &sd->sd_meta->ssdi.ssd_uuid,
711 	    sizeof(struct sr_uuid));
712 
713 	sr_checksum(sc, km, &km->scm_checksum,
714 	    sizeof(struct sr_meta_chunk_invariant));
715 
716 	km->scm_status = BIOC_SDONLINE;
717 
718 	/*
719 	 * Create and populate our own discipline and metadata.
720 	 */
721 
722 	sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO);
723 	sm->ssdi.ssd_magic = SR_MAGIC;
724 	sm->ssdi.ssd_version = SR_META_VERSION;
725 	sm->ssd_ondisk = 0;
726 	sm->ssdi.ssd_vol_flags = 0;
727 	memcpy(&sm->ssdi.ssd_uuid, &sd->sd_meta->ssdi.ssd_uuid,
728 	    sizeof(struct sr_uuid));
729 	sm->ssdi.ssd_chunk_no = 1;
730 	sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
731 	sm->ssdi.ssd_level = SR_KEYDISK_LEVEL;
732 	sm->ssdi.ssd_size = 0;
733 	strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor));
734 	snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product),
735 	    "SR %s", "KEYDISK");
736 	snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision),
737 	    "%03d", SR_META_VERSION);
738 
739 	fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF,
740 	    M_WAITOK | M_ZERO);
741 	fakesd->sd_sc = sd->sd_sc;
742 	fakesd->sd_meta = sm;
743 	fakesd->sd_meta_type = SR_META_F_NATIVE;
744 	fakesd->sd_vol_status = BIOC_SVONLINE;
745 	strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name));
746 	SLIST_INIT(&fakesd->sd_meta_opt);
747 
748 	/* Add chunk to volume. */
749 	fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF,
750 	    M_WAITOK | M_ZERO);
751 	fakesd->sd_vol.sv_chunks[0] = key_disk;
752 	SLIST_INIT(&fakesd->sd_vol.sv_chunk_list);
753 	SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link);
754 
755 	/* Generate mask key. */
756 	arc4random_buf(mdd_crypto->scr_maskkey,
757 	    sizeof(mdd_crypto->scr_maskkey));
758 
759 	/* Copy mask key to optional metadata area. */
760 	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
761 	    M_WAITOK | M_ZERO);
762 	omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF,
763 	    M_WAITOK | M_ZERO);
764 	omi->omi_som->som_type = SR_OPT_KEYDISK;
765 	omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
766 	skm = (struct sr_meta_keydisk *)omi->omi_som;
767 	memcpy(&skm->skm_maskkey, mdd_crypto->scr_maskkey,
768 	    sizeof(skm->skm_maskkey));
769 	SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
770 	fakesd->sd_meta->ssdi.ssd_opt_no++;
771 
772 	/* Save metadata. */
773 	if (sr_meta_save(fakesd, SR_META_DIRTY)) {
774 		sr_error(sc, "could not save metadata to %s", devname);
775 		goto fail;
776 	}
777 
778 	goto done;
779 
780 fail:
781 	free(key_disk, M_DEVBUF, sizeof(struct sr_chunk));
782 	key_disk = NULL;
783 
784 done:
785 	free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
786 	if (fakesd && fakesd->sd_vol.sv_chunks)
787 		free(fakesd->sd_vol.sv_chunks, M_DEVBUF,
788 		    sizeof(struct sr_chunk *));
789 	free(fakesd, M_DEVBUF, sizeof(struct sr_discipline));
790 	free(sm, M_DEVBUF, sizeof(struct sr_metadata));
791 	if (open) {
792 		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
793 		vput(vn);
794 	}
795 
796 	return key_disk;
797 }
798 
799 struct sr_chunk *
800 sr_crypto_read_key_disk(struct sr_discipline *sd, struct sr_crypto *mdd_crypto,
801     dev_t dev)
802 {
803 	struct sr_softc		*sc = sd->sd_sc;
804 	struct sr_metadata	*sm = NULL;
805 	struct sr_meta_opt_item *omi, *omi_next;
806 	struct sr_meta_opt_hdr	*omh;
807 	struct sr_meta_keydisk	*skm;
808 	struct sr_meta_opt_head som;
809 	struct sr_chunk		*key_disk = NULL;
810 	struct disklabel	label;
811 	struct vnode		*vn = NULL;
812 	char			devname[32];
813 	int			c, part, open = 0;
814 
815 	/*
816 	 * Load a key disk and load keying material into memory.
817 	 */
818 
819 	SLIST_INIT(&som);
820 
821 	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
822 
823 	/* Make sure chunk is not already in use. */
824 	c = sr_chunk_in_use(sc, dev);
825 	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
826 		sr_error(sc, "%s is already in use", devname);
827 		goto done;
828 	}
829 
830 	/* Open device. */
831 	if (bdevvp(dev, &vn)) {
832 		sr_error(sc, "cannot open key disk %s", devname);
833 		goto done;
834 	}
835 	if (VOP_OPEN(vn, FREAD, NOCRED, curproc)) {
836 		DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
837 		    "open %s\n", DEVNAME(sc), devname);
838 		vput(vn);
839 		goto done;
840 	}
841 	open = 1; /* close dev on error */
842 
843 	/* Get partition details. */
844 	part = DISKPART(dev);
845 	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD,
846 	    NOCRED, curproc)) {
847 		DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
848 		    "failed\n", DEVNAME(sc));
849 		goto done;
850 	}
851 	if (label.d_partitions[part].p_fstype != FS_RAID) {
852 		sr_error(sc, "%s partition not of type RAID (%d)",
853 		    devname, label.d_partitions[part].p_fstype);
854 		goto done;
855 	}
856 
857 	/*
858 	 * Read and validate key disk metadata.
859 	 */
860 	sm = malloc(SR_META_SIZE * DEV_BSIZE, M_DEVBUF, M_WAITOK | M_ZERO);
861 	if (sr_meta_native_read(sd, dev, sm, NULL)) {
862 		sr_error(sc, "native bootprobe could not read native metadata");
863 		goto done;
864 	}
865 
866 	if (sr_meta_validate(sd, dev, sm, NULL)) {
867 		DNPRINTF(SR_D_META, "%s: invalid metadata\n",
868 		    DEVNAME(sc));
869 		goto done;
870 	}
871 
872 	/* Make sure this is a key disk. */
873 	if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) {
874 		sr_error(sc, "%s is not a key disk", devname);
875 		goto done;
876 	}
877 
878 	/* Construct key disk chunk. */
879 	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
880 	key_disk->src_dev_mm = dev;
881 	key_disk->src_vn = vn;
882 	key_disk->src_size = 0;
883 
884 	memcpy(&key_disk->src_meta, (struct sr_meta_chunk *)(sm + 1),
885 	    sizeof(key_disk->src_meta));
886 
887 	/* Read mask key from optional metadata. */
888 	sr_meta_opt_load(sc, sm, &som);
889 	SLIST_FOREACH(omi, &som, omi_link) {
890 		omh = omi->omi_som;
891 		if (omh->som_type == SR_OPT_KEYDISK) {
892 			skm = (struct sr_meta_keydisk *)omh;
893 			memcpy(mdd_crypto->scr_maskkey, &skm->skm_maskkey,
894 			    sizeof(mdd_crypto->scr_maskkey));
895 		} else if (omh->som_type == SR_OPT_CRYPTO) {
896 			/* Original keydisk format with key in crypto area. */
897 			memcpy(mdd_crypto->scr_maskkey,
898 			    omh + sizeof(struct sr_meta_opt_hdr),
899 			    sizeof(mdd_crypto->scr_maskkey));
900 		}
901 	}
902 
903 	open = 0;
904 
905 done:
906 	for (omi = SLIST_FIRST(&som); omi != NULL; omi = omi_next) {
907 		omi_next = SLIST_NEXT(omi, omi_link);
908 		free(omi->omi_som, M_DEVBUF, 0);
909 		free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
910 	}
911 
912 	free(sm, M_DEVBUF, SR_META_SIZE * DEV_BSIZE);
913 
914 	if (vn && open) {
915 		VOP_CLOSE(vn, FREAD, NOCRED, curproc);
916 		vput(vn);
917 	}
918 
919 	return key_disk;
920 }
921 
922 void
923 sr_crypto_free_sessions(struct sr_discipline *sd, struct sr_crypto *mdd_crypto)
924 {
925 	u_int			i;
926 
927 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
928 		if (mdd_crypto->scr_sid[i] != (u_int64_t)-1) {
929 			crypto_freesession(mdd_crypto->scr_sid[i]);
930 			mdd_crypto->scr_sid[i] = (u_int64_t)-1;
931 		}
932 	}
933 }
934 
935 int
936 sr_crypto_alloc_resources_internal(struct sr_discipline *sd,
937     struct sr_crypto *mdd_crypto)
938 {
939 	struct sr_workunit	*wu;
940 	struct sr_crypto_wu	*crwu;
941 	struct cryptoini	cri;
942 	u_int			num_keys, i;
943 
944 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
945 	    DEVNAME(sd->sd_sc));
946 
947 	mdd_crypto->scr_alg = CRYPTO_AES_XTS;
948 	switch (mdd_crypto->scr_meta->scm_alg) {
949 	case SR_CRYPTOA_AES_XTS_128:
950 		mdd_crypto->scr_klen = 256;
951 		break;
952 	case SR_CRYPTOA_AES_XTS_256:
953 		mdd_crypto->scr_klen = 512;
954 		break;
955 	default:
956 		sr_error(sd->sd_sc, "unknown crypto algorithm");
957 		return (EINVAL);
958 	}
959 
960 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
961 		mdd_crypto->scr_sid[i] = (u_int64_t)-1;
962 
963 	if (sr_wu_alloc(sd)) {
964 		sr_error(sd->sd_sc, "unable to allocate work units");
965 		return (ENOMEM);
966 	}
967 	if (sr_ccb_alloc(sd)) {
968 		sr_error(sd->sd_sc, "unable to allocate CCBs");
969 		return (ENOMEM);
970 	}
971 	if (sr_crypto_decrypt_key(sd, mdd_crypto)) {
972 		sr_error(sd->sd_sc, "incorrect key or passphrase");
973 		return (EPERM);
974 	}
975 
976 	/*
977 	 * For each work unit allocate the uio, iovec and crypto structures.
978 	 * These have to be allocated now because during runtime we cannot
979 	 * fail an allocation without failing the I/O (which can cause real
980 	 * problems).
981 	 */
982 	TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
983 		crwu = (struct sr_crypto_wu *)wu;
984 		crwu->cr_uio.uio_iov = &crwu->cr_iov;
985 		crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
986 		crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
987 		if (crwu->cr_crp == NULL)
988 			return (ENOMEM);
989 	}
990 
991 	memset(&cri, 0, sizeof(cri));
992 	cri.cri_alg = mdd_crypto->scr_alg;
993 	cri.cri_klen = mdd_crypto->scr_klen;
994 
995 	/* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks. */
996 	num_keys = ((sd->sd_meta->ssdi.ssd_size - 1) >>
997 	    SR_CRYPTO_KEY_BLKSHIFT) + 1;
998 	if (num_keys > SR_CRYPTO_MAXKEYS)
999 		return (EFBIG);
1000 	for (i = 0; i < num_keys; i++) {
1001 		cri.cri_key = mdd_crypto->scr_key[i];
1002 		if (crypto_newsession(&mdd_crypto->scr_sid[i],
1003 		    &cri, 0) != 0) {
1004 			sr_crypto_free_sessions(sd, mdd_crypto);
1005 			return (EINVAL);
1006 		}
1007 	}
1008 
1009 	sr_hotplug_register(sd, sr_crypto_hotplug);
1010 
1011 	return (0);
1012 }
1013 
1014 int
1015 sr_crypto_alloc_resources(struct sr_discipline *sd)
1016 {
1017 	return sr_crypto_alloc_resources_internal(sd, &sd->mds.mdd_crypto);
1018 }
1019 
1020 void
1021 sr_crypto_free_resources_internal(struct sr_discipline *sd,
1022     struct sr_crypto *mdd_crypto)
1023 {
1024 	struct sr_workunit	*wu;
1025 	struct sr_crypto_wu	*crwu;
1026 
1027 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
1028 	    DEVNAME(sd->sd_sc));
1029 
1030 	if (mdd_crypto->key_disk != NULL) {
1031 		explicit_bzero(mdd_crypto->key_disk,
1032 		    sizeof(*mdd_crypto->key_disk));
1033 		free(mdd_crypto->key_disk, M_DEVBUF,
1034 		    sizeof(*mdd_crypto->key_disk));
1035 	}
1036 
1037 	sr_hotplug_unregister(sd, sr_crypto_hotplug);
1038 
1039 	sr_crypto_free_sessions(sd, mdd_crypto);
1040 
1041 	TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
1042 		crwu = (struct sr_crypto_wu *)wu;
1043 		if (crwu->cr_dmabuf)
1044 			dma_free(crwu->cr_dmabuf, MAXPHYS);
1045 		if (crwu->cr_crp)
1046 			crypto_freereq(crwu->cr_crp);
1047 	}
1048 
1049 	sr_wu_free(sd);
1050 	sr_ccb_free(sd);
1051 }
1052 
1053 void
1054 sr_crypto_free_resources(struct sr_discipline *sd)
1055 {
1056 	struct sr_crypto *mdd_crypto = &sd->mds.mdd_crypto;
1057 	sr_crypto_free_resources_internal(sd, mdd_crypto);
1058 }
1059 
1060 int
1061 sr_crypto_ioctl_internal(struct sr_discipline *sd,
1062     struct sr_crypto *mdd_crypto, struct bioc_discipline *bd)
1063 {
1064 	struct sr_crypto_kdfpair kdfpair;
1065 	struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2;
1066 	int			size, rv = 1;
1067 
1068 	DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n",
1069 	    DEVNAME(sd->sd_sc), bd->bd_cmd);
1070 
1071 	switch (bd->bd_cmd) {
1072 	case SR_IOCTL_GET_KDFHINT:
1073 
1074 		/* Get KDF hint for userland. */
1075 		size = sizeof(mdd_crypto->scr_meta->scm_kdfhint);
1076 		if (bd->bd_data == NULL || bd->bd_size > size)
1077 			goto bad;
1078 		if (copyout(mdd_crypto->scr_meta->scm_kdfhint,
1079 		    bd->bd_data, bd->bd_size))
1080 			goto bad;
1081 
1082 		rv = 0;
1083 
1084 		break;
1085 
1086 	case SR_IOCTL_CHANGE_PASSPHRASE:
1087 
1088 		/* Attempt to change passphrase. */
1089 
1090 		size = sizeof(kdfpair);
1091 		if (bd->bd_data == NULL || bd->bd_size > size)
1092 			goto bad;
1093 		if (copyin(bd->bd_data, &kdfpair, size))
1094 			goto bad;
1095 
1096 		size = sizeof(kdfinfo1);
1097 		if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size)
1098 			goto bad;
1099 		if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size))
1100 			goto bad;
1101 
1102 		size = sizeof(kdfinfo2);
1103 		if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size)
1104 			goto bad;
1105 		if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size))
1106 			goto bad;
1107 
1108 		if (sr_crypto_change_maskkey(sd, mdd_crypto, &kdfinfo1,
1109 		    &kdfinfo2))
1110 			goto bad;
1111 
1112 		/* Save metadata to disk. */
1113 		rv = sr_meta_save(sd, SR_META_DIRTY);
1114 
1115 		break;
1116 	}
1117 
1118 bad:
1119 	explicit_bzero(&kdfpair, sizeof(kdfpair));
1120 	explicit_bzero(&kdfinfo1, sizeof(kdfinfo1));
1121 	explicit_bzero(&kdfinfo2, sizeof(kdfinfo2));
1122 
1123 	return (rv);
1124 }
1125 
1126 int
1127 sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd)
1128 {
1129 	struct sr_crypto *mdd_crypto = &sd->mds.mdd_crypto;
1130 	return sr_crypto_ioctl_internal(sd, mdd_crypto, bd);
1131 }
1132 
1133 int
1134 sr_crypto_meta_opt_handler_internal(struct sr_discipline *sd,
1135     struct sr_crypto *mdd_crypto, struct sr_meta_opt_hdr *om)
1136 {
1137 	int rv = EINVAL;
1138 
1139 	if (om->som_type == SR_OPT_CRYPTO) {
1140 		mdd_crypto->scr_meta = (struct sr_meta_crypto *)om;
1141 		rv = 0;
1142 	}
1143 
1144 	return (rv);
1145 }
1146 
1147 int
1148 sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om)
1149 {
1150 	struct sr_crypto *mdd_crypto = &sd->mds.mdd_crypto;
1151 	return sr_crypto_meta_opt_handler_internal(sd, mdd_crypto, om);
1152 }
1153 
1154 int
1155 sr_crypto_rw(struct sr_workunit *wu)
1156 {
1157 	struct sr_crypto_wu	*crwu;
1158 	struct sr_crypto	*mdd_crypto;
1159 	daddr_t			blkno;
1160 	int			rv;
1161 
1162 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu %p\n",
1163 	    DEVNAME(wu->swu_dis->sd_sc), wu);
1164 
1165 	if (sr_validate_io(wu, &blkno, "sr_crypto_rw"))
1166 		return (1);
1167 
1168 	if (wu->swu_xs->flags & SCSI_DATA_OUT) {
1169 		mdd_crypto = &wu->swu_dis->mds.mdd_crypto;
1170 		crwu = sr_crypto_prepare(wu, mdd_crypto, 1);
1171 		crwu->cr_crp->crp_callback = sr_crypto_write;
1172 		crypto_dispatch(crwu->cr_crp);
1173 		rv = crwu->cr_crp->crp_etype;
1174 	} else
1175 		rv = sr_crypto_dev_rw(wu, NULL);
1176 
1177 	return (rv);
1178 }
1179 
1180 void
1181 sr_crypto_write(struct cryptop *crp)
1182 {
1183 	struct sr_crypto_wu	*crwu = crp->crp_opaque;
1184 	struct sr_workunit	*wu = &crwu->cr_wu;
1185 	int			s;
1186 
1187 	DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %p xs: %p\n",
1188 	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1189 
1190 	if (crp->crp_etype) {
1191 		/* fail io */
1192 		wu->swu_xs->error = XS_DRIVER_STUFFUP;
1193 		s = splbio();
1194 		sr_scsi_done(wu->swu_dis, wu->swu_xs);
1195 		splx(s);
1196 	}
1197 
1198 	sr_crypto_dev_rw(wu, crwu);
1199 }
1200 
1201 int
1202 sr_crypto_dev_rw(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
1203 {
1204 	struct sr_discipline	*sd = wu->swu_dis;
1205 	struct scsi_xfer	*xs = wu->swu_xs;
1206 	struct sr_ccb		*ccb;
1207 	struct uio		*uio;
1208 	daddr_t			blkno;
1209 
1210 	blkno = wu->swu_blk_start;
1211 
1212 	ccb = sr_ccb_rw(sd, 0, blkno, xs->datalen, xs->data, xs->flags, 0);
1213 	if (!ccb) {
1214 		/* should never happen but handle more gracefully */
1215 		printf("%s: %s: too many ccbs queued\n",
1216 		    DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
1217 		goto bad;
1218 	}
1219 	if (!ISSET(xs->flags, SCSI_DATA_IN)) {
1220 		uio = crwu->cr_crp->crp_buf;
1221 		ccb->ccb_buf.b_data = uio->uio_iov->iov_base;
1222 		ccb->ccb_opaque = crwu;
1223 	}
1224 	sr_wu_enqueue_ccb(wu, ccb);
1225 	sr_schedule_wu(wu);
1226 
1227 	return (0);
1228 
1229 bad:
1230 	/* wu is unwound by sr_wu_put */
1231 	if (crwu)
1232 		crwu->cr_crp->crp_etype = EINVAL;
1233 	return (1);
1234 }
1235 
1236 void
1237 sr_crypto_done_internal(struct sr_workunit *wu, struct sr_crypto *mdd_crypto)
1238 {
1239 	struct scsi_xfer	*xs = wu->swu_xs;
1240 	struct sr_crypto_wu	*crwu;
1241 	int			s;
1242 
1243 	if (ISSET(wu->swu_flags, SR_WUF_REBUILD)) /* RAID 1C */
1244 		return;
1245 
1246 	/* If this was a successful read, initiate decryption of the data. */
1247 	if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
1248 		crwu = sr_crypto_prepare(wu, mdd_crypto, 0);
1249 		crwu->cr_crp->crp_callback = sr_crypto_read;
1250 		DNPRINTF(SR_D_INTR, "%s: sr_crypto_done: crypto_dispatch %p\n",
1251 		    DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
1252 		crypto_dispatch(crwu->cr_crp);
1253 		return;
1254 	}
1255 
1256 	s = splbio();
1257 	sr_scsi_done(wu->swu_dis, wu->swu_xs);
1258 	splx(s);
1259 }
1260 
1261 void
1262 sr_crypto_done(struct sr_workunit *wu)
1263 {
1264 	struct sr_crypto *mdd_crypto = &wu->swu_dis->mds.mdd_crypto;
1265 	sr_crypto_done_internal(wu, mdd_crypto);
1266 }
1267 
1268 void
1269 sr_crypto_read(struct cryptop *crp)
1270 {
1271 	struct sr_crypto_wu	*crwu = crp->crp_opaque;
1272 	struct sr_workunit	*wu = &crwu->cr_wu;
1273 	int			s;
1274 
1275 	DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %p xs: %p\n",
1276 	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1277 
1278 	if (crp->crp_etype)
1279 		wu->swu_xs->error = XS_DRIVER_STUFFUP;
1280 
1281 	s = splbio();
1282 	sr_scsi_done(wu->swu_dis, wu->swu_xs);
1283 	splx(s);
1284 }
1285 
1286 void
1287 sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action)
1288 {
1289 	DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n",
1290 	    DEVNAME(sd->sd_sc), diskp->dk_name, action);
1291 }
1292 
1293 #ifdef SR_DEBUG0
1294 void
1295 sr_crypto_dumpkeys(struct sr_crypto *mdd_crypto)
1296 {
1297 	int			i, j;
1298 
1299 	printf("sr_crypto_dumpkeys:\n");
1300 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1301 		printf("\tscm_key[%d]: 0x", i);
1302 		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1303 			printf("%02x", mdd_crypto->scr_meta->scm_key[i][j]);
1304 		}
1305 		printf("\n");
1306 	}
1307 	printf("sr_crypto_dumpkeys: runtime data keys:\n");
1308 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1309 		printf("\tscr_key[%d]: 0x", i);
1310 		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1311 			printf("%02x", mdd_crypto->scr_key[i][j]);
1312 		}
1313 		printf("\n");
1314 	}
1315 }
1316 #endif	/* SR_DEBUG */
1317