xref: /openbsd-src/sys/dev/softraid_crypto.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /* $OpenBSD: softraid_crypto.c,v 1.131 2016/09/08 17:39:08 jsing Exp $ */
2 /*
3  * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
4  * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@mindrot.org>
6  * Copyright (c) 2009 Joel Sing <jsing@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include "bio.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/buf.h>
26 #include <sys/device.h>
27 #include <sys/ioctl.h>
28 #include <sys/malloc.h>
29 #include <sys/pool.h>
30 #include <sys/kernel.h>
31 #include <sys/disk.h>
32 #include <sys/rwlock.h>
33 #include <sys/queue.h>
34 #include <sys/fcntl.h>
35 #include <sys/disklabel.h>
36 #include <sys/vnode.h>
37 #include <sys/mount.h>
38 #include <sys/sensors.h>
39 #include <sys/stat.h>
40 #include <sys/conf.h>
41 #include <sys/uio.h>
42 #include <sys/dkio.h>
43 
44 #include <crypto/cryptodev.h>
45 #include <crypto/rijndael.h>
46 #include <crypto/md5.h>
47 #include <crypto/sha1.h>
48 #include <crypto/sha2.h>
49 #include <crypto/hmac.h>
50 
51 #include <scsi/scsi_all.h>
52 #include <scsi/scsiconf.h>
53 #include <scsi/scsi_disk.h>
54 
55 #include <dev/softraidvar.h>
56 
57 /*
58  * The per-I/O data that we need to preallocate. We cannot afford to allow I/O
59  * to start failing when memory pressure kicks in. We can store this in the WU
60  * because we assert that only one ccb per WU will ever be active.
61  */
62 struct sr_crypto_wu {
63 	struct sr_workunit		 cr_wu;		/* Must be first. */
64 	struct uio			 cr_uio;
65 	struct iovec			 cr_iov;
66 	struct cryptop	 		*cr_crp;
67 	struct cryptodesc		*cr_descs;
68 	void				*cr_dmabuf;
69 };
70 
71 
72 struct sr_crypto_wu *sr_crypto_prepare(struct sr_workunit *, int);
73 int		sr_crypto_create_keys(struct sr_discipline *);
74 int		sr_crypto_get_kdf(struct bioc_createraid *,
75 		    struct sr_discipline *);
76 int		sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int);
77 int		sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int);
78 int		sr_crypto_decrypt_key(struct sr_discipline *);
79 int		sr_crypto_change_maskkey(struct sr_discipline *,
80 		    struct sr_crypto_kdfinfo *, struct sr_crypto_kdfinfo *);
81 int		sr_crypto_create(struct sr_discipline *,
82 		    struct bioc_createraid *, int, int64_t);
83 int		sr_crypto_assemble(struct sr_discipline *,
84 		    struct bioc_createraid *, int, void *);
85 int		sr_crypto_alloc_resources(struct sr_discipline *);
86 void		sr_crypto_free_resources(struct sr_discipline *);
87 int		sr_crypto_ioctl(struct sr_discipline *,
88 		    struct bioc_discipline *);
89 int		sr_crypto_meta_opt_handler(struct sr_discipline *,
90 		    struct sr_meta_opt_hdr *);
91 int		sr_crypto_write(struct cryptop *);
92 int		sr_crypto_rw(struct sr_workunit *);
93 int		sr_crypto_dev_rw(struct sr_workunit *, struct sr_crypto_wu *);
94 void		sr_crypto_done(struct sr_workunit *);
95 int		sr_crypto_read(struct cryptop *);
96 void		sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
97 		   u_int8_t *, int, u_char *);
98 void		sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
99 
100 #ifdef SR_DEBUG0
101 void		 sr_crypto_dumpkeys(struct sr_discipline *);
102 #endif
103 
104 /* Discipline initialisation. */
105 void
106 sr_crypto_discipline_init(struct sr_discipline *sd)
107 {
108 	int i;
109 
110 	/* Fill out discipline members. */
111 	sd->sd_type = SR_MD_CRYPTO;
112 	strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
113 	sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
114 	sd->sd_max_wu = SR_CRYPTO_NOWU;
115 
116 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
117 		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
118 
119 	/* Setup discipline specific function pointers. */
120 	sd->sd_alloc_resources = sr_crypto_alloc_resources;
121 	sd->sd_assemble = sr_crypto_assemble;
122 	sd->sd_create = sr_crypto_create;
123 	sd->sd_free_resources = sr_crypto_free_resources;
124 	sd->sd_ioctl_handler = sr_crypto_ioctl;
125 	sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler;
126 	sd->sd_scsi_rw = sr_crypto_rw;
127 	sd->sd_scsi_done = sr_crypto_done;
128 }
129 
130 int
131 sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc,
132     int no_chunk, int64_t coerced_size)
133 {
134 	struct sr_meta_opt_item	*omi;
135 	int			rv = EINVAL;
136 
137 	if (no_chunk != 1) {
138 		sr_error(sd->sd_sc, "%s requires exactly one chunk",
139 		    sd->sd_name);
140 		goto done;
141         }
142 
143 	/* Create crypto optional metadata. */
144 	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
145 	    M_WAITOK | M_ZERO);
146 	omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF,
147 	    M_WAITOK | M_ZERO);
148 	omi->omi_som->som_type = SR_OPT_CRYPTO;
149 	omi->omi_som->som_length = sizeof(struct sr_meta_crypto);
150 	SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link);
151 	sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)omi->omi_som;
152 	sd->sd_meta->ssdi.ssd_opt_no++;
153 
154 	sd->mds.mdd_crypto.key_disk = NULL;
155 
156 	if (bc->bc_key_disk != NODEV) {
157 
158 		/* Create a key disk. */
159 		if (sr_crypto_get_kdf(bc, sd))
160 			goto done;
161 		sd->mds.mdd_crypto.key_disk =
162 		    sr_crypto_create_key_disk(sd, bc->bc_key_disk);
163 		if (sd->mds.mdd_crypto.key_disk == NULL)
164 			goto done;
165 		sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE;
166 
167 	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
168 
169 		/* No hint available yet. */
170 		bc->bc_opaque_status = BIOC_SOINOUT_FAILED;
171 		rv = EAGAIN;
172 		goto done;
173 
174 	} else if (sr_crypto_get_kdf(bc, sd))
175 		goto done;
176 
177 	/* Passphrase volumes cannot be automatically assembled. */
178 	if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV)
179 		goto done;
180 
181 	sd->sd_meta->ssdi.ssd_size = coerced_size;
182 
183 	sr_crypto_create_keys(sd);
184 
185 	sd->sd_max_ccb_per_wu = no_chunk;
186 
187 	rv = 0;
188 done:
189 	return (rv);
190 }
191 
192 int
193 sr_crypto_assemble(struct sr_discipline *sd, struct bioc_createraid *bc,
194     int no_chunk, void *data)
195 {
196 	int	rv = EINVAL;
197 
198 	sd->mds.mdd_crypto.key_disk = NULL;
199 
200 	/* Crypto optional metadata must already exist... */
201 	if (sd->mds.mdd_crypto.scr_meta == NULL)
202 		goto done;
203 
204 	if (data != NULL) {
205 		/* Kernel already has mask key. */
206 		memcpy(sd->mds.mdd_crypto.scr_maskkey, data,
207 		    sizeof(sd->mds.mdd_crypto.scr_maskkey));
208 	} else if (bc->bc_key_disk != NODEV) {
209 		/* Read the mask key from the key disk. */
210 		sd->mds.mdd_crypto.key_disk =
211 		    sr_crypto_read_key_disk(sd, bc->bc_key_disk);
212 		if (sd->mds.mdd_crypto.key_disk == NULL)
213 			goto done;
214 	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
215 		/* provide userland with kdf hint */
216 		if (bc->bc_opaque == NULL)
217 			goto done;
218 
219 		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
220 		    bc->bc_opaque_size)
221 			goto done;
222 
223 		if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
224 		    bc->bc_opaque, bc->bc_opaque_size))
225 			goto done;
226 
227 		/* we're done */
228 		bc->bc_opaque_status = BIOC_SOINOUT_OK;
229 		rv = EAGAIN;
230 		goto done;
231 	} else if (bc->bc_opaque_flags & BIOC_SOIN) {
232 		/* get kdf with maskkey from userland */
233 		if (sr_crypto_get_kdf(bc, sd))
234 			goto done;
235 	} else
236 		goto done;
237 
238 	sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no;
239 
240 	rv = 0;
241 done:
242 	return (rv);
243 }
244 
245 struct sr_crypto_wu *
246 sr_crypto_prepare(struct sr_workunit *wu, int encrypt)
247 {
248 	struct scsi_xfer	*xs = wu->swu_xs;
249 	struct sr_discipline	*sd = wu->swu_dis;
250 	struct sr_crypto_wu	*crwu;
251 	struct cryptodesc	*crd;
252 	int			flags, i, n;
253 	daddr_t			blkno;
254 	u_int			keyndx;
255 
256 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_prepare wu %p encrypt %d\n",
257 	    DEVNAME(sd->sd_sc), wu, encrypt);
258 
259 	crwu = (struct sr_crypto_wu *)wu;
260 	crwu->cr_uio.uio_iovcnt = 1;
261 	crwu->cr_uio.uio_iov->iov_len = xs->datalen;
262 	if (xs->flags & SCSI_DATA_OUT) {
263 		crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
264 		memcpy(crwu->cr_uio.uio_iov->iov_base, xs->data, xs->datalen);
265 	} else
266 		crwu->cr_uio.uio_iov->iov_base = xs->data;
267 
268 	blkno = wu->swu_blk_start;
269 	n = xs->datalen >> DEV_BSHIFT;
270 
271 	/*
272 	 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
273 	 * Since there may be less than that we need to tweak the linked list
274 	 * of crypto desc structures to be just long enough for our needs.
275 	 */
276 	crd = crwu->cr_descs;
277 	for (i = 0; i < ((MAXPHYS >> DEV_BSHIFT) - n); i++) {
278 		crd = crd->crd_next;
279 		KASSERT(crd);
280 	}
281 	crwu->cr_crp->crp_desc = crd;
282 	flags = (encrypt ? CRD_F_ENCRYPT : 0) |
283 	    CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
284 
285 	/*
286 	 * Select crypto session based on block number.
287 	 *
288 	 * XXX - this does not handle the case where the read/write spans
289 	 * across a different key blocks (e.g. 0.5TB boundary). Currently
290 	 * this is already broken by the use of scr_key[0] below.
291 	 */
292 	keyndx = blkno >> SR_CRYPTO_KEY_BLKSHIFT;
293 	crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
294 
295 	crwu->cr_crp->crp_opaque = crwu;
296 	crwu->cr_crp->crp_ilen = xs->datalen;
297 	crwu->cr_crp->crp_alloctype = M_DEVBUF;
298 	crwu->cr_crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_NOQUEUE;
299 	crwu->cr_crp->crp_buf = &crwu->cr_uio;
300 	for (i = 0, crd = crwu->cr_crp->crp_desc; crd;
301 	    i++, blkno++, crd = crd->crd_next) {
302 		crd->crd_skip = i << DEV_BSHIFT;
303 		crd->crd_len = DEV_BSIZE;
304 		crd->crd_inject = 0;
305 		crd->crd_flags = flags;
306 		crd->crd_alg = sd->mds.mdd_crypto.scr_alg;
307 		crd->crd_klen = sd->mds.mdd_crypto.scr_klen;
308 		crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
309 		memcpy(crd->crd_iv, &blkno, sizeof(blkno));
310 	}
311 
312 	return (crwu);
313 }
314 
315 int
316 sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd)
317 {
318 	int			rv = EINVAL;
319 	struct sr_crypto_kdfinfo *kdfinfo;
320 
321 	if (!(bc->bc_opaque_flags & BIOC_SOIN))
322 		return (rv);
323 	if (bc->bc_opaque == NULL)
324 		return (rv);
325 	if (bc->bc_opaque_size != sizeof(*kdfinfo))
326 		return (rv);
327 
328 	kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO);
329 	if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size))
330 		goto out;
331 
332 	if (kdfinfo->len != bc->bc_opaque_size)
333 		goto out;
334 
335 	/* copy KDF hint to disk meta data */
336 	if (kdfinfo->flags & SR_CRYPTOKDF_HINT) {
337 		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
338 		    kdfinfo->genkdf.len)
339 			goto out;
340 		memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
341 		    &kdfinfo->genkdf, kdfinfo->genkdf.len);
342 	}
343 
344 	/* copy mask key to run-time meta data */
345 	if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) {
346 		if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
347 		    sizeof(kdfinfo->maskkey))
348 			goto out;
349 		memcpy(sd->mds.mdd_crypto.scr_maskkey, &kdfinfo->maskkey,
350 		    sizeof(kdfinfo->maskkey));
351 	}
352 
353 	bc->bc_opaque_status = BIOC_SOINOUT_OK;
354 	rv = 0;
355 out:
356 	explicit_bzero(kdfinfo, bc->bc_opaque_size);
357 	free(kdfinfo, M_DEVBUF, bc->bc_opaque_size);
358 
359 	return (rv);
360 }
361 
362 int
363 sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg)
364 {
365 	rijndael_ctx		ctx;
366 	int			i, rv = 1;
367 
368 	switch (alg) {
369 	case SR_CRYPTOM_AES_ECB_256:
370 		if (rijndael_set_key_enc_only(&ctx, key, 256) != 0)
371 			goto out;
372 		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
373 			rijndael_encrypt(&ctx, &p[i], &c[i]);
374 		rv = 0;
375 		break;
376 	default:
377 		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
378 		    "softraid", alg);
379 		rv = -1;
380 		goto out;
381 	}
382 
383 out:
384 	explicit_bzero(&ctx, sizeof(ctx));
385 	return (rv);
386 }
387 
388 int
389 sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg)
390 {
391 	rijndael_ctx		ctx;
392 	int			i, rv = 1;
393 
394 	switch (alg) {
395 	case SR_CRYPTOM_AES_ECB_256:
396 		if (rijndael_set_key(&ctx, key, 256) != 0)
397 			goto out;
398 		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
399 			rijndael_decrypt(&ctx, &c[i], &p[i]);
400 		rv = 0;
401 		break;
402 	default:
403 		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
404 		    "softraid", alg);
405 		rv = -1;
406 		goto out;
407 	}
408 
409 out:
410 	explicit_bzero(&ctx, sizeof(ctx));
411 	return (rv);
412 }
413 
414 void
415 sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size,
416     u_int8_t *key, int key_size, u_char *check_digest)
417 {
418 	u_char			check_key[SHA1_DIGEST_LENGTH];
419 	HMAC_SHA1_CTX		hmacctx;
420 	SHA1_CTX		shactx;
421 
422 	bzero(check_key, sizeof(check_key));
423 	bzero(&hmacctx, sizeof(hmacctx));
424 	bzero(&shactx, sizeof(shactx));
425 
426 	/* k = SHA1(mask_key) */
427 	SHA1Init(&shactx);
428 	SHA1Update(&shactx, maskkey, maskkey_size);
429 	SHA1Final(check_key, &shactx);
430 
431 	/* mac = HMAC_SHA1_k(unencrypted key) */
432 	HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key));
433 	HMAC_SHA1_Update(&hmacctx, key, key_size);
434 	HMAC_SHA1_Final(check_digest, &hmacctx);
435 
436 	explicit_bzero(check_key, sizeof(check_key));
437 	explicit_bzero(&hmacctx, sizeof(hmacctx));
438 	explicit_bzero(&shactx, sizeof(shactx));
439 }
440 
441 int
442 sr_crypto_decrypt_key(struct sr_discipline *sd)
443 {
444 	u_char			check_digest[SHA1_DIGEST_LENGTH];
445 	int			rv = 1;
446 
447 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc));
448 
449 	if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
450 		goto out;
451 
452 	if (sr_crypto_decrypt((u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
453 	    (u_char *)sd->mds.mdd_crypto.scr_key,
454 	    sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
455 	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
456 		goto out;
457 
458 #ifdef SR_DEBUG0
459 	sr_crypto_dumpkeys(sd);
460 #endif
461 
462 	/* Check that the key decrypted properly. */
463 	sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
464 	    sizeof(sd->mds.mdd_crypto.scr_maskkey),
465 	    (u_int8_t *)sd->mds.mdd_crypto.scr_key,
466 	    sizeof(sd->mds.mdd_crypto.scr_key),
467 	    check_digest);
468 	if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
469 	    check_digest, sizeof(check_digest)) != 0) {
470 		explicit_bzero(sd->mds.mdd_crypto.scr_key,
471 		    sizeof(sd->mds.mdd_crypto.scr_key));
472 		goto out;
473 	}
474 
475 	rv = 0; /* Success */
476 out:
477 	/* we don't need the mask key anymore */
478 	explicit_bzero(&sd->mds.mdd_crypto.scr_maskkey,
479 	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
480 
481 	explicit_bzero(check_digest, sizeof(check_digest));
482 
483 	return rv;
484 }
485 
486 int
487 sr_crypto_create_keys(struct sr_discipline *sd)
488 {
489 
490 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n",
491 	    DEVNAME(sd->sd_sc));
492 
493 	if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey))
494 		return (1);
495 
496 	/* XXX allow user to specify */
497 	sd->mds.mdd_crypto.scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256;
498 
499 	/* generate crypto keys */
500 	arc4random_buf(sd->mds.mdd_crypto.scr_key,
501 	    sizeof(sd->mds.mdd_crypto.scr_key));
502 
503 	/* Mask the disk keys. */
504 	sd->mds.mdd_crypto.scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256;
505 	sr_crypto_encrypt((u_char *)sd->mds.mdd_crypto.scr_key,
506 	    (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
507 	    sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
508 	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg);
509 
510 	/* Prepare key decryption check code. */
511 	sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
512 	sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
513 	    sizeof(sd->mds.mdd_crypto.scr_maskkey),
514 	    (u_int8_t *)sd->mds.mdd_crypto.scr_key,
515 	    sizeof(sd->mds.mdd_crypto.scr_key),
516 	    sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac);
517 
518 	/* Erase the plaintext disk keys */
519 	explicit_bzero(sd->mds.mdd_crypto.scr_key,
520 	    sizeof(sd->mds.mdd_crypto.scr_key));
521 
522 #ifdef SR_DEBUG0
523 	sr_crypto_dumpkeys(sd);
524 #endif
525 
526 	sd->mds.mdd_crypto.scr_meta->scm_flags = SR_CRYPTOF_KEY |
527 	    SR_CRYPTOF_KDFHINT;
528 
529 	return (0);
530 }
531 
532 int
533 sr_crypto_change_maskkey(struct sr_discipline *sd,
534   struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2)
535 {
536 	u_char			check_digest[SHA1_DIGEST_LENGTH];
537 	u_char			*c, *p = NULL;
538 	size_t			ksz;
539 	int			rv = 1;
540 
541 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n",
542 	    DEVNAME(sd->sd_sc));
543 
544 	if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
545 		goto out;
546 
547 	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
548 	ksz = sizeof(sd->mds.mdd_crypto.scr_key);
549 	p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
550 	if (p == NULL)
551 		goto out;
552 
553 	if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz,
554 	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
555 		goto out;
556 
557 #ifdef SR_DEBUG0
558 	sr_crypto_dumpkeys(sd);
559 #endif
560 
561 	sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey,
562 	    sizeof(kdfinfo1->maskkey), p, ksz, check_digest);
563 	if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
564 	    check_digest, sizeof(check_digest)) != 0) {
565 		sr_error(sd->sd_sc, "incorrect key or passphrase");
566 		rv = EPERM;
567 		goto out;
568 	}
569 
570 	/* Copy new KDF hint to metadata, if supplied. */
571 	if (kdfinfo2->flags & SR_CRYPTOKDF_HINT) {
572 		if (kdfinfo2->genkdf.len >
573 		    sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint))
574 			goto out;
575 		explicit_bzero(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
576 		    sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint));
577 		memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
578 		    &kdfinfo2->genkdf, kdfinfo2->genkdf.len);
579 	}
580 
581 	/* Mask the disk keys. */
582 	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
583 	if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
584 	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
585 		goto out;
586 
587 	/* Prepare key decryption check code. */
588 	sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
589 	sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey,
590 	    sizeof(kdfinfo2->maskkey), (u_int8_t *)sd->mds.mdd_crypto.scr_key,
591 	    sizeof(sd->mds.mdd_crypto.scr_key), check_digest);
592 
593 	/* Copy new encrypted key and HMAC to metadata. */
594 	memcpy(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, check_digest,
595 	    sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac));
596 
597 	rv = 0; /* Success */
598 
599 out:
600 	if (p) {
601 		explicit_bzero(p, ksz);
602 		free(p, M_DEVBUF, ksz);
603 	}
604 
605 	explicit_bzero(check_digest, sizeof(check_digest));
606 	explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey));
607 	explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey));
608 
609 	return (rv);
610 }
611 
612 struct sr_chunk *
613 sr_crypto_create_key_disk(struct sr_discipline *sd, dev_t dev)
614 {
615 	struct sr_softc		*sc = sd->sd_sc;
616 	struct sr_discipline	*fakesd = NULL;
617 	struct sr_metadata	*sm = NULL;
618 	struct sr_meta_chunk    *km;
619 	struct sr_meta_opt_item *omi = NULL;
620 	struct sr_meta_keydisk	*skm;
621 	struct sr_chunk		*key_disk = NULL;
622 	struct disklabel	label;
623 	struct vnode		*vn;
624 	char			devname[32];
625 	int			c, part, open = 0;
626 
627 	/*
628 	 * Create a metadata structure on the key disk and store
629 	 * keying material in the optional metadata.
630 	 */
631 
632 	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
633 
634 	/* Make sure chunk is not already in use. */
635 	c = sr_chunk_in_use(sc, dev);
636 	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
637 		sr_error(sc, "%s is already in use", devname);
638 		goto done;
639 	}
640 
641 	/* Open device. */
642 	if (bdevvp(dev, &vn)) {
643 		sr_error(sc, "cannot open key disk %s", devname);
644 		goto done;
645 	}
646 	if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
647 		DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
648 		    "open %s\n", DEVNAME(sc), devname);
649 		vput(vn);
650 		goto done;
651 	}
652 	open = 1; /* close dev on error */
653 
654 	/* Get partition details. */
655 	part = DISKPART(dev);
656 	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label,
657 	    FREAD, NOCRED, curproc)) {
658 		DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
659 		    "failed\n", DEVNAME(sc));
660 		goto done;
661 	}
662 	if (label.d_partitions[part].p_fstype != FS_RAID) {
663 		sr_error(sc, "%s partition not of type RAID (%d)",
664 		    devname, label.d_partitions[part].p_fstype);
665 		goto done;
666 	}
667 
668 	/*
669 	 * Create and populate chunk metadata.
670 	 */
671 
672 	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
673 	km = &key_disk->src_meta;
674 
675 	key_disk->src_dev_mm = dev;
676 	key_disk->src_vn = vn;
677 	strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname));
678 	key_disk->src_size = 0;
679 
680 	km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level;
681 	km->scmi.scm_chunk_id = 0;
682 	km->scmi.scm_size = 0;
683 	km->scmi.scm_coerced_size = 0;
684 	strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
685 	memcpy(&km->scmi.scm_uuid, &sd->sd_meta->ssdi.ssd_uuid,
686 	    sizeof(struct sr_uuid));
687 
688 	sr_checksum(sc, km, &km->scm_checksum,
689 	    sizeof(struct sr_meta_chunk_invariant));
690 
691 	km->scm_status = BIOC_SDONLINE;
692 
693 	/*
694 	 * Create and populate our own discipline and metadata.
695 	 */
696 
697 	sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO);
698 	sm->ssdi.ssd_magic = SR_MAGIC;
699 	sm->ssdi.ssd_version = SR_META_VERSION;
700 	sm->ssd_ondisk = 0;
701 	sm->ssdi.ssd_vol_flags = 0;
702 	memcpy(&sm->ssdi.ssd_uuid, &sd->sd_meta->ssdi.ssd_uuid,
703 	    sizeof(struct sr_uuid));
704 	sm->ssdi.ssd_chunk_no = 1;
705 	sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
706 	sm->ssdi.ssd_level = SR_KEYDISK_LEVEL;
707 	sm->ssdi.ssd_size = 0;
708 	strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor));
709 	snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product),
710 	    "SR %s", "KEYDISK");
711 	snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision),
712 	    "%03d", SR_META_VERSION);
713 
714 	fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF,
715 	    M_WAITOK | M_ZERO);
716 	fakesd->sd_sc = sd->sd_sc;
717 	fakesd->sd_meta = sm;
718 	fakesd->sd_meta_type = SR_META_F_NATIVE;
719 	fakesd->sd_vol_status = BIOC_SVONLINE;
720 	strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name));
721 	SLIST_INIT(&fakesd->sd_meta_opt);
722 
723 	/* Add chunk to volume. */
724 	fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF,
725 	    M_WAITOK | M_ZERO);
726 	fakesd->sd_vol.sv_chunks[0] = key_disk;
727 	SLIST_INIT(&fakesd->sd_vol.sv_chunk_list);
728 	SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link);
729 
730 	/* Generate mask key. */
731 	arc4random_buf(sd->mds.mdd_crypto.scr_maskkey,
732 	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
733 
734 	/* Copy mask key to optional metadata area. */
735 	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
736 	    M_WAITOK | M_ZERO);
737 	omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF,
738 	    M_WAITOK | M_ZERO);
739 	omi->omi_som->som_type = SR_OPT_KEYDISK;
740 	omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
741 	skm = (struct sr_meta_keydisk *)omi->omi_som;
742 	memcpy(&skm->skm_maskkey, sd->mds.mdd_crypto.scr_maskkey,
743 	    sizeof(skm->skm_maskkey));
744 	SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
745 	fakesd->sd_meta->ssdi.ssd_opt_no++;
746 
747 	/* Save metadata. */
748 	if (sr_meta_save(fakesd, SR_META_DIRTY)) {
749 		sr_error(sc, "could not save metadata to %s", devname);
750 		goto fail;
751 	}
752 
753 	goto done;
754 
755 fail:
756 	free(key_disk, M_DEVBUF, sizeof(struct sr_chunk));
757 	key_disk = NULL;
758 
759 done:
760 	free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
761 	if (fakesd && fakesd->sd_vol.sv_chunks)
762 		free(fakesd->sd_vol.sv_chunks, M_DEVBUF,
763 		    sizeof(struct sr_chunk *));
764 	free(fakesd, M_DEVBUF, sizeof(struct sr_discipline));
765 	free(sm, M_DEVBUF, sizeof(struct sr_metadata));
766 	if (open) {
767 		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
768 		vput(vn);
769 	}
770 
771 	return key_disk;
772 }
773 
774 struct sr_chunk *
775 sr_crypto_read_key_disk(struct sr_discipline *sd, dev_t dev)
776 {
777 	struct sr_softc		*sc = sd->sd_sc;
778 	struct sr_metadata	*sm = NULL;
779 	struct sr_meta_opt_item *omi, *omi_next;
780 	struct sr_meta_opt_hdr	*omh;
781 	struct sr_meta_keydisk	*skm;
782 	struct sr_meta_opt_head som;
783 	struct sr_chunk		*key_disk = NULL;
784 	struct disklabel	label;
785 	struct vnode		*vn = NULL;
786 	char			devname[32];
787 	int			c, part, open = 0;
788 
789 	/*
790 	 * Load a key disk and load keying material into memory.
791 	 */
792 
793 	SLIST_INIT(&som);
794 
795 	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
796 
797 	/* Make sure chunk is not already in use. */
798 	c = sr_chunk_in_use(sc, dev);
799 	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
800 		sr_error(sc, "%s is already in use", devname);
801 		goto done;
802 	}
803 
804 	/* Open device. */
805 	if (bdevvp(dev, &vn)) {
806 		sr_error(sc, "cannot open key disk %s", devname);
807 		goto done;
808 	}
809 	if (VOP_OPEN(vn, FREAD, NOCRED, curproc)) {
810 		DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
811 		    "open %s\n", DEVNAME(sc), devname);
812 		vput(vn);
813 		goto done;
814 	}
815 	open = 1; /* close dev on error */
816 
817 	/* Get partition details. */
818 	part = DISKPART(dev);
819 	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD,
820 	    NOCRED, curproc)) {
821 		DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
822 		    "failed\n", DEVNAME(sc));
823 		goto done;
824 	}
825 	if (label.d_partitions[part].p_fstype != FS_RAID) {
826 		sr_error(sc, "%s partition not of type RAID (%d)",
827 		    devname, label.d_partitions[part].p_fstype);
828 		goto done;
829 	}
830 
831 	/*
832 	 * Read and validate key disk metadata.
833 	 */
834 	sm = malloc(SR_META_SIZE * DEV_BSIZE, M_DEVBUF, M_WAITOK | M_ZERO);
835 	if (sr_meta_native_read(sd, dev, sm, NULL)) {
836 		sr_error(sc, "native bootprobe could not read native metadata");
837 		goto done;
838 	}
839 
840 	if (sr_meta_validate(sd, dev, sm, NULL)) {
841 		DNPRINTF(SR_D_META, "%s: invalid metadata\n",
842 		    DEVNAME(sc));
843 		goto done;
844 	}
845 
846 	/* Make sure this is a key disk. */
847 	if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) {
848 		sr_error(sc, "%s is not a key disk", devname);
849 		goto done;
850 	}
851 
852 	/* Construct key disk chunk. */
853 	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
854 	key_disk->src_dev_mm = dev;
855 	key_disk->src_vn = vn;
856 	key_disk->src_size = 0;
857 
858 	memcpy(&key_disk->src_meta, (struct sr_meta_chunk *)(sm + 1),
859 	    sizeof(key_disk->src_meta));
860 
861 	/* Read mask key from optional metadata. */
862 	sr_meta_opt_load(sc, sm, &som);
863 	SLIST_FOREACH(omi, &som, omi_link) {
864 		omh = omi->omi_som;
865 		if (omh->som_type == SR_OPT_KEYDISK) {
866 			skm = (struct sr_meta_keydisk *)omh;
867 			memcpy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
868 			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
869 		} else if (omh->som_type == SR_OPT_CRYPTO) {
870 			/* Original keydisk format with key in crypto area. */
871 			memcpy(sd->mds.mdd_crypto.scr_maskkey,
872 			    omh + sizeof(struct sr_meta_opt_hdr),
873 			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
874 		}
875 	}
876 
877 	open = 0;
878 
879 done:
880 	for (omi = SLIST_FIRST(&som); omi != NULL; omi = omi_next) {
881 		omi_next = SLIST_NEXT(omi, omi_link);
882 		free(omi->omi_som, M_DEVBUF, 0);
883 		free(omi, M_DEVBUF, 0);
884 	}
885 
886 	free(sm, M_DEVBUF, SR_META_SIZE * DEV_BSIZE);
887 
888 	if (vn && open) {
889 		VOP_CLOSE(vn, FREAD, NOCRED, curproc);
890 		vput(vn);
891 	}
892 
893 	return key_disk;
894 }
895 
896 int
897 sr_crypto_alloc_resources(struct sr_discipline *sd)
898 {
899 	struct sr_workunit	*wu;
900 	struct sr_crypto_wu	*crwu;
901 	struct cryptoini	cri;
902 	u_int			num_keys, i;
903 
904 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
905 	    DEVNAME(sd->sd_sc));
906 
907 	sd->mds.mdd_crypto.scr_alg = CRYPTO_AES_XTS;
908 	switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
909 	case SR_CRYPTOA_AES_XTS_128:
910 		sd->mds.mdd_crypto.scr_klen = 256;
911 		break;
912 	case SR_CRYPTOA_AES_XTS_256:
913 		sd->mds.mdd_crypto.scr_klen = 512;
914 		break;
915 	default:
916 		sr_error(sd->sd_sc, "unknown crypto algorithm");
917 		return (EINVAL);
918 	}
919 
920 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
921 		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
922 
923 	if (sr_wu_alloc(sd, sizeof(struct sr_crypto_wu))) {
924 		sr_error(sd->sd_sc, "unable to allocate work units");
925 		return (ENOMEM);
926 	}
927 	if (sr_ccb_alloc(sd)) {
928 		sr_error(sd->sd_sc, "unable to allocate CCBs");
929 		return (ENOMEM);
930 	}
931 	if (sr_crypto_decrypt_key(sd)) {
932 		sr_error(sd->sd_sc, "incorrect key or passphrase");
933 		return (EPERM);
934 	}
935 
936 	/*
937 	 * For each work unit allocate the uio, iovec and crypto structures.
938 	 * These have to be allocated now because during runtime we cannot
939 	 * fail an allocation without failing the I/O (which can cause real
940 	 * problems).
941 	 */
942 	TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
943 		crwu = (struct sr_crypto_wu *)wu;
944 		crwu->cr_uio.uio_iov = &crwu->cr_iov;
945 		crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
946 		crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
947 		if (crwu->cr_crp == NULL)
948 			return (ENOMEM);
949 		crwu->cr_descs = crwu->cr_crp->crp_desc;
950 	}
951 
952 	memset(&cri, 0, sizeof(cri));
953 	cri.cri_alg = sd->mds.mdd_crypto.scr_alg;
954 	cri.cri_klen = sd->mds.mdd_crypto.scr_klen;
955 
956 	/* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks. */
957 	num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT;
958 	if (num_keys >= SR_CRYPTO_MAXKEYS)
959 		return (EFBIG);
960 	for (i = 0; i <= num_keys; i++) {
961 		cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
962 		if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
963 		    &cri, 0) != 0) {
964 			for (i = 0;
965 			     sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1;
966 			     i++) {
967 				crypto_freesession(
968 				    sd->mds.mdd_crypto.scr_sid[i]);
969 				sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
970 			}
971 			return (EINVAL);
972 		}
973 	}
974 
975 	sr_hotplug_register(sd, sr_crypto_hotplug);
976 
977 	return (0);
978 }
979 
980 void
981 sr_crypto_free_resources(struct sr_discipline *sd)
982 {
983 	struct sr_workunit	*wu;
984 	struct sr_crypto_wu	*crwu;
985 	u_int			i;
986 
987 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
988 	    DEVNAME(sd->sd_sc));
989 
990 	if (sd->mds.mdd_crypto.key_disk != NULL) {
991 		explicit_bzero(sd->mds.mdd_crypto.key_disk,
992 		    sizeof(*sd->mds.mdd_crypto.key_disk));
993 		free(sd->mds.mdd_crypto.key_disk, M_DEVBUF,
994 		    sizeof(*sd->mds.mdd_crypto.key_disk));
995 	}
996 
997 	sr_hotplug_unregister(sd, sr_crypto_hotplug);
998 
999 	for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) {
1000 		crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
1001 		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
1002 	}
1003 
1004 	TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
1005 		crwu = (struct sr_crypto_wu *)wu;
1006 		if (crwu->cr_dmabuf)
1007 			dma_free(crwu->cr_dmabuf, MAXPHYS);
1008 		if (crwu->cr_crp) {
1009 			crwu->cr_crp->crp_desc = crwu->cr_descs;
1010 			crypto_freereq(crwu->cr_crp);
1011 		}
1012 	}
1013 
1014 	sr_wu_free(sd);
1015 	sr_ccb_free(sd);
1016 }
1017 
1018 int
1019 sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd)
1020 {
1021 	struct sr_crypto_kdfpair kdfpair;
1022 	struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2;
1023 	int			size, rv = 1;
1024 
1025 	DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n",
1026 	    DEVNAME(sd->sd_sc), bd->bd_cmd);
1027 
1028 	switch (bd->bd_cmd) {
1029 	case SR_IOCTL_GET_KDFHINT:
1030 
1031 		/* Get KDF hint for userland. */
1032 		size = sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint);
1033 		if (bd->bd_data == NULL || bd->bd_size > size)
1034 			goto bad;
1035 		if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
1036 		    bd->bd_data, bd->bd_size))
1037 			goto bad;
1038 
1039 		rv = 0;
1040 
1041 		break;
1042 
1043 	case SR_IOCTL_CHANGE_PASSPHRASE:
1044 
1045 		/* Attempt to change passphrase. */
1046 
1047 		size = sizeof(kdfpair);
1048 		if (bd->bd_data == NULL || bd->bd_size > size)
1049 			goto bad;
1050 		if (copyin(bd->bd_data, &kdfpair, size))
1051 			goto bad;
1052 
1053 		size = sizeof(kdfinfo1);
1054 		if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size)
1055 			goto bad;
1056 		if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size))
1057 			goto bad;
1058 
1059 		size = sizeof(kdfinfo2);
1060 		if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size)
1061 			goto bad;
1062 		if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size))
1063 			goto bad;
1064 
1065 		if (sr_crypto_change_maskkey(sd, &kdfinfo1, &kdfinfo2))
1066 			goto bad;
1067 
1068 		/* Save metadata to disk. */
1069 		rv = sr_meta_save(sd, SR_META_DIRTY);
1070 
1071 		break;
1072 	}
1073 
1074 bad:
1075 	explicit_bzero(&kdfpair, sizeof(kdfpair));
1076 	explicit_bzero(&kdfinfo1, sizeof(kdfinfo1));
1077 	explicit_bzero(&kdfinfo2, sizeof(kdfinfo2));
1078 
1079 	return (rv);
1080 }
1081 
1082 int
1083 sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om)
1084 {
1085 	int rv = EINVAL;
1086 
1087 	if (om->som_type == SR_OPT_CRYPTO) {
1088 		sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)om;
1089 		rv = 0;
1090 	}
1091 
1092 	return (rv);
1093 }
1094 
1095 int
1096 sr_crypto_rw(struct sr_workunit *wu)
1097 {
1098 	struct sr_crypto_wu	*crwu;
1099 	daddr_t			blkno;
1100 	int			rv = 0;
1101 
1102 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu %p\n",
1103 	    DEVNAME(wu->swu_dis->sd_sc), wu);
1104 
1105 	if (sr_validate_io(wu, &blkno, "sr_crypto_rw"))
1106 		return (1);
1107 
1108 	if (wu->swu_xs->flags & SCSI_DATA_OUT) {
1109 		crwu = sr_crypto_prepare(wu, 1);
1110 		crwu->cr_crp->crp_callback = sr_crypto_write;
1111 		rv = crypto_dispatch(crwu->cr_crp);
1112 		if (rv == 0)
1113 			rv = crwu->cr_crp->crp_etype;
1114 	} else
1115 		rv = sr_crypto_dev_rw(wu, NULL);
1116 
1117 	return (rv);
1118 }
1119 
1120 int
1121 sr_crypto_write(struct cryptop *crp)
1122 {
1123 	struct sr_crypto_wu	*crwu = crp->crp_opaque;
1124 	struct sr_workunit	*wu = &crwu->cr_wu;
1125 	int			s;
1126 
1127 	DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %p xs: %p\n",
1128 	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1129 
1130 	if (crp->crp_etype) {
1131 		/* fail io */
1132 		wu->swu_xs->error = XS_DRIVER_STUFFUP;
1133 		s = splbio();
1134 		sr_scsi_done(wu->swu_dis, wu->swu_xs);
1135 		splx(s);
1136 	}
1137 
1138 	return (sr_crypto_dev_rw(wu, crwu));
1139 }
1140 
1141 int
1142 sr_crypto_dev_rw(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
1143 {
1144 	struct sr_discipline	*sd = wu->swu_dis;
1145 	struct scsi_xfer	*xs = wu->swu_xs;
1146 	struct sr_ccb		*ccb;
1147 	struct uio		*uio;
1148 	daddr_t			blkno;
1149 
1150 	blkno = wu->swu_blk_start;
1151 
1152 	ccb = sr_ccb_rw(sd, 0, blkno, xs->datalen, xs->data, xs->flags, 0);
1153 	if (!ccb) {
1154 		/* should never happen but handle more gracefully */
1155 		printf("%s: %s: too many ccbs queued\n",
1156 		    DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
1157 		goto bad;
1158 	}
1159 	if (!ISSET(xs->flags, SCSI_DATA_IN)) {
1160 		uio = crwu->cr_crp->crp_buf;
1161 		ccb->ccb_buf.b_data = uio->uio_iov->iov_base;
1162 		ccb->ccb_opaque = crwu;
1163 	}
1164 	sr_wu_enqueue_ccb(wu, ccb);
1165 	sr_schedule_wu(wu);
1166 
1167 	return (0);
1168 
1169 bad:
1170 	/* wu is unwound by sr_wu_put */
1171 	if (crwu)
1172 		crwu->cr_crp->crp_etype = EINVAL;
1173 	return (1);
1174 }
1175 
1176 void
1177 sr_crypto_done(struct sr_workunit *wu)
1178 {
1179 	struct scsi_xfer	*xs = wu->swu_xs;
1180 	struct sr_crypto_wu	*crwu;
1181 	int			s;
1182 
1183 	/* If this was a successful read, initiate decryption of the data. */
1184 	if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
1185 		crwu = sr_crypto_prepare(wu, 0);
1186 		crwu->cr_crp->crp_callback = sr_crypto_read;
1187 		DNPRINTF(SR_D_INTR, "%s: sr_crypto_done: crypto_dispatch %p\n",
1188 		    DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
1189 		crypto_dispatch(crwu->cr_crp);
1190 		return;
1191 	}
1192 
1193 	s = splbio();
1194 	sr_scsi_done(wu->swu_dis, wu->swu_xs);
1195 	splx(s);
1196 }
1197 
1198 int
1199 sr_crypto_read(struct cryptop *crp)
1200 {
1201 	struct sr_crypto_wu	*crwu = crp->crp_opaque;
1202 	struct sr_workunit	*wu = &crwu->cr_wu;
1203 	int			s;
1204 
1205 	DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %p xs: %p\n",
1206 	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1207 
1208 	if (crp->crp_etype)
1209 		wu->swu_xs->error = XS_DRIVER_STUFFUP;
1210 
1211 	s = splbio();
1212 	sr_scsi_done(wu->swu_dis, wu->swu_xs);
1213 	splx(s);
1214 
1215 	return (0);
1216 }
1217 
1218 void
1219 sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action)
1220 {
1221 	DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n",
1222 	    DEVNAME(sd->sd_sc), diskp->dk_name, action);
1223 }
1224 
1225 #ifdef SR_DEBUG0
1226 void
1227 sr_crypto_dumpkeys(struct sr_discipline *sd)
1228 {
1229 	int			i, j;
1230 
1231 	printf("sr_crypto_dumpkeys:\n");
1232 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1233 		printf("\tscm_key[%d]: 0x", i);
1234 		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1235 			printf("%02x",
1236 			    sd->mds.mdd_crypto.scr_meta->scm_key[i][j]);
1237 		}
1238 		printf("\n");
1239 	}
1240 	printf("sr_crypto_dumpkeys: runtime data keys:\n");
1241 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1242 		printf("\tscr_key[%d]: 0x", i);
1243 		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1244 			printf("%02x",
1245 			    sd->mds.mdd_crypto.scr_key[i][j]);
1246 		}
1247 		printf("\n");
1248 	}
1249 }
1250 #endif	/* SR_DEBUG */
1251