xref: /openbsd-src/sys/dev/softraid_crypto.c (revision d59bb9942320b767f2a19aaa7690c8c6e30b724c)
1 /* $OpenBSD: softraid_crypto.c,v 1.133 2017/02/07 17:25:46 patrick Exp $ */
2 /*
3  * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
4  * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@mindrot.org>
6  * Copyright (c) 2009 Joel Sing <jsing@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include "bio.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/buf.h>
26 #include <sys/device.h>
27 #include <sys/ioctl.h>
28 #include <sys/malloc.h>
29 #include <sys/pool.h>
30 #include <sys/kernel.h>
31 #include <sys/disk.h>
32 #include <sys/rwlock.h>
33 #include <sys/queue.h>
34 #include <sys/fcntl.h>
35 #include <sys/disklabel.h>
36 #include <sys/vnode.h>
37 #include <sys/mount.h>
38 #include <sys/sensors.h>
39 #include <sys/stat.h>
40 #include <sys/conf.h>
41 #include <sys/uio.h>
42 #include <sys/dkio.h>
43 
44 #include <crypto/cryptodev.h>
45 #include <crypto/rijndael.h>
46 #include <crypto/md5.h>
47 #include <crypto/sha1.h>
48 #include <crypto/sha2.h>
49 #include <crypto/hmac.h>
50 
51 #include <scsi/scsi_all.h>
52 #include <scsi/scsiconf.h>
53 #include <scsi/scsi_disk.h>
54 
55 #include <dev/softraidvar.h>
56 
57 /*
58  * The per-I/O data that we need to preallocate. We cannot afford to allow I/O
59  * to start failing when memory pressure kicks in. We can store this in the WU
60  * because we assert that only one ccb per WU will ever be active.
61  */
62 struct sr_crypto_wu {
63 	struct sr_workunit		 cr_wu;		/* Must be first. */
64 	struct uio			 cr_uio;
65 	struct iovec			 cr_iov;
66 	struct cryptop	 		*cr_crp;
67 	void				*cr_dmabuf;
68 };
69 
70 
71 struct sr_crypto_wu *sr_crypto_prepare(struct sr_workunit *, int);
72 int		sr_crypto_create_keys(struct sr_discipline *);
73 int		sr_crypto_get_kdf(struct bioc_createraid *,
74 		    struct sr_discipline *);
75 int		sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int);
76 int		sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int);
77 int		sr_crypto_decrypt_key(struct sr_discipline *);
78 int		sr_crypto_change_maskkey(struct sr_discipline *,
79 		    struct sr_crypto_kdfinfo *, struct sr_crypto_kdfinfo *);
80 int		sr_crypto_create(struct sr_discipline *,
81 		    struct bioc_createraid *, int, int64_t);
82 int		sr_crypto_assemble(struct sr_discipline *,
83 		    struct bioc_createraid *, int, void *);
84 int		sr_crypto_alloc_resources(struct sr_discipline *);
85 void		sr_crypto_free_resources(struct sr_discipline *);
86 int		sr_crypto_ioctl(struct sr_discipline *,
87 		    struct bioc_discipline *);
88 int		sr_crypto_meta_opt_handler(struct sr_discipline *,
89 		    struct sr_meta_opt_hdr *);
90 void		sr_crypto_write(struct cryptop *);
91 int		sr_crypto_rw(struct sr_workunit *);
92 int		sr_crypto_dev_rw(struct sr_workunit *, struct sr_crypto_wu *);
93 void		sr_crypto_done(struct sr_workunit *);
94 void		sr_crypto_read(struct cryptop *);
95 void		sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
96 		   u_int8_t *, int, u_char *);
97 void		sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
98 
99 #ifdef SR_DEBUG0
100 void		 sr_crypto_dumpkeys(struct sr_discipline *);
101 #endif
102 
103 /* Discipline initialisation. */
104 void
105 sr_crypto_discipline_init(struct sr_discipline *sd)
106 {
107 	int i;
108 
109 	/* Fill out discipline members. */
110 	sd->sd_type = SR_MD_CRYPTO;
111 	strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
112 	sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
113 	sd->sd_max_wu = SR_CRYPTO_NOWU;
114 
115 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
116 		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
117 
118 	/* Setup discipline specific function pointers. */
119 	sd->sd_alloc_resources = sr_crypto_alloc_resources;
120 	sd->sd_assemble = sr_crypto_assemble;
121 	sd->sd_create = sr_crypto_create;
122 	sd->sd_free_resources = sr_crypto_free_resources;
123 	sd->sd_ioctl_handler = sr_crypto_ioctl;
124 	sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler;
125 	sd->sd_scsi_rw = sr_crypto_rw;
126 	sd->sd_scsi_done = sr_crypto_done;
127 }
128 
129 int
130 sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc,
131     int no_chunk, int64_t coerced_size)
132 {
133 	struct sr_meta_opt_item	*omi;
134 	int			rv = EINVAL;
135 
136 	if (no_chunk != 1) {
137 		sr_error(sd->sd_sc, "%s requires exactly one chunk",
138 		    sd->sd_name);
139 		goto done;
140         }
141 
142 	/* Create crypto optional metadata. */
143 	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
144 	    M_WAITOK | M_ZERO);
145 	omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF,
146 	    M_WAITOK | M_ZERO);
147 	omi->omi_som->som_type = SR_OPT_CRYPTO;
148 	omi->omi_som->som_length = sizeof(struct sr_meta_crypto);
149 	SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link);
150 	sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)omi->omi_som;
151 	sd->sd_meta->ssdi.ssd_opt_no++;
152 
153 	sd->mds.mdd_crypto.key_disk = NULL;
154 
155 	if (bc->bc_key_disk != NODEV) {
156 
157 		/* Create a key disk. */
158 		if (sr_crypto_get_kdf(bc, sd))
159 			goto done;
160 		sd->mds.mdd_crypto.key_disk =
161 		    sr_crypto_create_key_disk(sd, bc->bc_key_disk);
162 		if (sd->mds.mdd_crypto.key_disk == NULL)
163 			goto done;
164 		sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE;
165 
166 	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
167 
168 		/* No hint available yet. */
169 		bc->bc_opaque_status = BIOC_SOINOUT_FAILED;
170 		rv = EAGAIN;
171 		goto done;
172 
173 	} else if (sr_crypto_get_kdf(bc, sd))
174 		goto done;
175 
176 	/* Passphrase volumes cannot be automatically assembled. */
177 	if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV)
178 		goto done;
179 
180 	sd->sd_meta->ssdi.ssd_size = coerced_size;
181 
182 	sr_crypto_create_keys(sd);
183 
184 	sd->sd_max_ccb_per_wu = no_chunk;
185 
186 	rv = 0;
187 done:
188 	return (rv);
189 }
190 
191 int
192 sr_crypto_assemble(struct sr_discipline *sd, struct bioc_createraid *bc,
193     int no_chunk, void *data)
194 {
195 	int	rv = EINVAL;
196 
197 	sd->mds.mdd_crypto.key_disk = NULL;
198 
199 	/* Crypto optional metadata must already exist... */
200 	if (sd->mds.mdd_crypto.scr_meta == NULL)
201 		goto done;
202 
203 	if (data != NULL) {
204 		/* Kernel already has mask key. */
205 		memcpy(sd->mds.mdd_crypto.scr_maskkey, data,
206 		    sizeof(sd->mds.mdd_crypto.scr_maskkey));
207 	} else if (bc->bc_key_disk != NODEV) {
208 		/* Read the mask key from the key disk. */
209 		sd->mds.mdd_crypto.key_disk =
210 		    sr_crypto_read_key_disk(sd, bc->bc_key_disk);
211 		if (sd->mds.mdd_crypto.key_disk == NULL)
212 			goto done;
213 	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
214 		/* provide userland with kdf hint */
215 		if (bc->bc_opaque == NULL)
216 			goto done;
217 
218 		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
219 		    bc->bc_opaque_size)
220 			goto done;
221 
222 		if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
223 		    bc->bc_opaque, bc->bc_opaque_size))
224 			goto done;
225 
226 		/* we're done */
227 		bc->bc_opaque_status = BIOC_SOINOUT_OK;
228 		rv = EAGAIN;
229 		goto done;
230 	} else if (bc->bc_opaque_flags & BIOC_SOIN) {
231 		/* get kdf with maskkey from userland */
232 		if (sr_crypto_get_kdf(bc, sd))
233 			goto done;
234 	} else
235 		goto done;
236 
237 	sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no;
238 
239 	rv = 0;
240 done:
241 	return (rv);
242 }
243 
244 struct sr_crypto_wu *
245 sr_crypto_prepare(struct sr_workunit *wu, int encrypt)
246 {
247 	struct scsi_xfer	*xs = wu->swu_xs;
248 	struct sr_discipline	*sd = wu->swu_dis;
249 	struct sr_crypto_wu	*crwu;
250 	struct cryptodesc	*crd;
251 	int			flags, i, n;
252 	daddr_t			blkno;
253 	u_int			keyndx;
254 
255 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_prepare wu %p encrypt %d\n",
256 	    DEVNAME(sd->sd_sc), wu, encrypt);
257 
258 	crwu = (struct sr_crypto_wu *)wu;
259 	crwu->cr_uio.uio_iovcnt = 1;
260 	crwu->cr_uio.uio_iov->iov_len = xs->datalen;
261 	if (xs->flags & SCSI_DATA_OUT) {
262 		crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
263 		memcpy(crwu->cr_uio.uio_iov->iov_base, xs->data, xs->datalen);
264 	} else
265 		crwu->cr_uio.uio_iov->iov_base = xs->data;
266 
267 	blkno = wu->swu_blk_start;
268 	n = xs->datalen >> DEV_BSHIFT;
269 
270 	/*
271 	 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
272 	 * Since there may be less than that we need to tweak the amount
273 	 * of crypto desc structures to be just long enough for our needs.
274 	 */
275 	KASSERT(crwu->cr_crp->crp_ndescalloc >= n);
276 	crwu->cr_crp->crp_ndesc = n;
277 	flags = (encrypt ? CRD_F_ENCRYPT : 0) |
278 	    CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
279 
280 	/*
281 	 * Select crypto session based on block number.
282 	 *
283 	 * XXX - this does not handle the case where the read/write spans
284 	 * across a different key blocks (e.g. 0.5TB boundary). Currently
285 	 * this is already broken by the use of scr_key[0] below.
286 	 */
287 	keyndx = blkno >> SR_CRYPTO_KEY_BLKSHIFT;
288 	crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
289 
290 	crwu->cr_crp->crp_opaque = crwu;
291 	crwu->cr_crp->crp_ilen = xs->datalen;
292 	crwu->cr_crp->crp_alloctype = M_DEVBUF;
293 	crwu->cr_crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_NOQUEUE;
294 	crwu->cr_crp->crp_buf = &crwu->cr_uio;
295 	for (i = 0; i < crwu->cr_crp->crp_ndesc; i++, blkno++) {
296 		crd = &crwu->cr_crp->crp_desc[i];
297 		crd->crd_skip = i << DEV_BSHIFT;
298 		crd->crd_len = DEV_BSIZE;
299 		crd->crd_inject = 0;
300 		crd->crd_flags = flags;
301 		crd->crd_alg = sd->mds.mdd_crypto.scr_alg;
302 		crd->crd_klen = sd->mds.mdd_crypto.scr_klen;
303 		crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
304 		memcpy(crd->crd_iv, &blkno, sizeof(blkno));
305 	}
306 
307 	return (crwu);
308 }
309 
310 int
311 sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd)
312 {
313 	int			rv = EINVAL;
314 	struct sr_crypto_kdfinfo *kdfinfo;
315 
316 	if (!(bc->bc_opaque_flags & BIOC_SOIN))
317 		return (rv);
318 	if (bc->bc_opaque == NULL)
319 		return (rv);
320 	if (bc->bc_opaque_size != sizeof(*kdfinfo))
321 		return (rv);
322 
323 	kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO);
324 	if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size))
325 		goto out;
326 
327 	if (kdfinfo->len != bc->bc_opaque_size)
328 		goto out;
329 
330 	/* copy KDF hint to disk meta data */
331 	if (kdfinfo->flags & SR_CRYPTOKDF_HINT) {
332 		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
333 		    kdfinfo->genkdf.len)
334 			goto out;
335 		memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
336 		    &kdfinfo->genkdf, kdfinfo->genkdf.len);
337 	}
338 
339 	/* copy mask key to run-time meta data */
340 	if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) {
341 		if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
342 		    sizeof(kdfinfo->maskkey))
343 			goto out;
344 		memcpy(sd->mds.mdd_crypto.scr_maskkey, &kdfinfo->maskkey,
345 		    sizeof(kdfinfo->maskkey));
346 	}
347 
348 	bc->bc_opaque_status = BIOC_SOINOUT_OK;
349 	rv = 0;
350 out:
351 	explicit_bzero(kdfinfo, bc->bc_opaque_size);
352 	free(kdfinfo, M_DEVBUF, bc->bc_opaque_size);
353 
354 	return (rv);
355 }
356 
357 int
358 sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg)
359 {
360 	rijndael_ctx		ctx;
361 	int			i, rv = 1;
362 
363 	switch (alg) {
364 	case SR_CRYPTOM_AES_ECB_256:
365 		if (rijndael_set_key_enc_only(&ctx, key, 256) != 0)
366 			goto out;
367 		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
368 			rijndael_encrypt(&ctx, &p[i], &c[i]);
369 		rv = 0;
370 		break;
371 	default:
372 		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
373 		    "softraid", alg);
374 		rv = -1;
375 		goto out;
376 	}
377 
378 out:
379 	explicit_bzero(&ctx, sizeof(ctx));
380 	return (rv);
381 }
382 
383 int
384 sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg)
385 {
386 	rijndael_ctx		ctx;
387 	int			i, rv = 1;
388 
389 	switch (alg) {
390 	case SR_CRYPTOM_AES_ECB_256:
391 		if (rijndael_set_key(&ctx, key, 256) != 0)
392 			goto out;
393 		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
394 			rijndael_decrypt(&ctx, &c[i], &p[i]);
395 		rv = 0;
396 		break;
397 	default:
398 		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
399 		    "softraid", alg);
400 		rv = -1;
401 		goto out;
402 	}
403 
404 out:
405 	explicit_bzero(&ctx, sizeof(ctx));
406 	return (rv);
407 }
408 
409 void
410 sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size,
411     u_int8_t *key, int key_size, u_char *check_digest)
412 {
413 	u_char			check_key[SHA1_DIGEST_LENGTH];
414 	HMAC_SHA1_CTX		hmacctx;
415 	SHA1_CTX		shactx;
416 
417 	bzero(check_key, sizeof(check_key));
418 	bzero(&hmacctx, sizeof(hmacctx));
419 	bzero(&shactx, sizeof(shactx));
420 
421 	/* k = SHA1(mask_key) */
422 	SHA1Init(&shactx);
423 	SHA1Update(&shactx, maskkey, maskkey_size);
424 	SHA1Final(check_key, &shactx);
425 
426 	/* mac = HMAC_SHA1_k(unencrypted key) */
427 	HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key));
428 	HMAC_SHA1_Update(&hmacctx, key, key_size);
429 	HMAC_SHA1_Final(check_digest, &hmacctx);
430 
431 	explicit_bzero(check_key, sizeof(check_key));
432 	explicit_bzero(&hmacctx, sizeof(hmacctx));
433 	explicit_bzero(&shactx, sizeof(shactx));
434 }
435 
436 int
437 sr_crypto_decrypt_key(struct sr_discipline *sd)
438 {
439 	u_char			check_digest[SHA1_DIGEST_LENGTH];
440 	int			rv = 1;
441 
442 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc));
443 
444 	if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
445 		goto out;
446 
447 	if (sr_crypto_decrypt((u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
448 	    (u_char *)sd->mds.mdd_crypto.scr_key,
449 	    sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
450 	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
451 		goto out;
452 
453 #ifdef SR_DEBUG0
454 	sr_crypto_dumpkeys(sd);
455 #endif
456 
457 	/* Check that the key decrypted properly. */
458 	sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
459 	    sizeof(sd->mds.mdd_crypto.scr_maskkey),
460 	    (u_int8_t *)sd->mds.mdd_crypto.scr_key,
461 	    sizeof(sd->mds.mdd_crypto.scr_key),
462 	    check_digest);
463 	if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
464 	    check_digest, sizeof(check_digest)) != 0) {
465 		explicit_bzero(sd->mds.mdd_crypto.scr_key,
466 		    sizeof(sd->mds.mdd_crypto.scr_key));
467 		goto out;
468 	}
469 
470 	rv = 0; /* Success */
471 out:
472 	/* we don't need the mask key anymore */
473 	explicit_bzero(&sd->mds.mdd_crypto.scr_maskkey,
474 	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
475 
476 	explicit_bzero(check_digest, sizeof(check_digest));
477 
478 	return rv;
479 }
480 
481 int
482 sr_crypto_create_keys(struct sr_discipline *sd)
483 {
484 
485 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n",
486 	    DEVNAME(sd->sd_sc));
487 
488 	if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey))
489 		return (1);
490 
491 	/* XXX allow user to specify */
492 	sd->mds.mdd_crypto.scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256;
493 
494 	/* generate crypto keys */
495 	arc4random_buf(sd->mds.mdd_crypto.scr_key,
496 	    sizeof(sd->mds.mdd_crypto.scr_key));
497 
498 	/* Mask the disk keys. */
499 	sd->mds.mdd_crypto.scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256;
500 	sr_crypto_encrypt((u_char *)sd->mds.mdd_crypto.scr_key,
501 	    (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
502 	    sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
503 	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg);
504 
505 	/* Prepare key decryption check code. */
506 	sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
507 	sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
508 	    sizeof(sd->mds.mdd_crypto.scr_maskkey),
509 	    (u_int8_t *)sd->mds.mdd_crypto.scr_key,
510 	    sizeof(sd->mds.mdd_crypto.scr_key),
511 	    sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac);
512 
513 	/* Erase the plaintext disk keys */
514 	explicit_bzero(sd->mds.mdd_crypto.scr_key,
515 	    sizeof(sd->mds.mdd_crypto.scr_key));
516 
517 #ifdef SR_DEBUG0
518 	sr_crypto_dumpkeys(sd);
519 #endif
520 
521 	sd->mds.mdd_crypto.scr_meta->scm_flags = SR_CRYPTOF_KEY |
522 	    SR_CRYPTOF_KDFHINT;
523 
524 	return (0);
525 }
526 
527 int
528 sr_crypto_change_maskkey(struct sr_discipline *sd,
529   struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2)
530 {
531 	u_char			check_digest[SHA1_DIGEST_LENGTH];
532 	u_char			*c, *p = NULL;
533 	size_t			ksz;
534 	int			rv = 1;
535 
536 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n",
537 	    DEVNAME(sd->sd_sc));
538 
539 	if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
540 		goto out;
541 
542 	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
543 	ksz = sizeof(sd->mds.mdd_crypto.scr_key);
544 	p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
545 	if (p == NULL)
546 		goto out;
547 
548 	if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz,
549 	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
550 		goto out;
551 
552 #ifdef SR_DEBUG0
553 	sr_crypto_dumpkeys(sd);
554 #endif
555 
556 	sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey,
557 	    sizeof(kdfinfo1->maskkey), p, ksz, check_digest);
558 	if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
559 	    check_digest, sizeof(check_digest)) != 0) {
560 		sr_error(sd->sd_sc, "incorrect key or passphrase");
561 		rv = EPERM;
562 		goto out;
563 	}
564 
565 	/* Copy new KDF hint to metadata, if supplied. */
566 	if (kdfinfo2->flags & SR_CRYPTOKDF_HINT) {
567 		if (kdfinfo2->genkdf.len >
568 		    sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint))
569 			goto out;
570 		explicit_bzero(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
571 		    sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint));
572 		memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
573 		    &kdfinfo2->genkdf, kdfinfo2->genkdf.len);
574 	}
575 
576 	/* Mask the disk keys. */
577 	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
578 	if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
579 	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
580 		goto out;
581 
582 	/* Prepare key decryption check code. */
583 	sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
584 	sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey,
585 	    sizeof(kdfinfo2->maskkey), (u_int8_t *)sd->mds.mdd_crypto.scr_key,
586 	    sizeof(sd->mds.mdd_crypto.scr_key), check_digest);
587 
588 	/* Copy new encrypted key and HMAC to metadata. */
589 	memcpy(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, check_digest,
590 	    sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac));
591 
592 	rv = 0; /* Success */
593 
594 out:
595 	if (p) {
596 		explicit_bzero(p, ksz);
597 		free(p, M_DEVBUF, ksz);
598 	}
599 
600 	explicit_bzero(check_digest, sizeof(check_digest));
601 	explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey));
602 	explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey));
603 
604 	return (rv);
605 }
606 
607 struct sr_chunk *
608 sr_crypto_create_key_disk(struct sr_discipline *sd, dev_t dev)
609 {
610 	struct sr_softc		*sc = sd->sd_sc;
611 	struct sr_discipline	*fakesd = NULL;
612 	struct sr_metadata	*sm = NULL;
613 	struct sr_meta_chunk    *km;
614 	struct sr_meta_opt_item *omi = NULL;
615 	struct sr_meta_keydisk	*skm;
616 	struct sr_chunk		*key_disk = NULL;
617 	struct disklabel	label;
618 	struct vnode		*vn;
619 	char			devname[32];
620 	int			c, part, open = 0;
621 
622 	/*
623 	 * Create a metadata structure on the key disk and store
624 	 * keying material in the optional metadata.
625 	 */
626 
627 	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
628 
629 	/* Make sure chunk is not already in use. */
630 	c = sr_chunk_in_use(sc, dev);
631 	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
632 		sr_error(sc, "%s is already in use", devname);
633 		goto done;
634 	}
635 
636 	/* Open device. */
637 	if (bdevvp(dev, &vn)) {
638 		sr_error(sc, "cannot open key disk %s", devname);
639 		goto done;
640 	}
641 	if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
642 		DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
643 		    "open %s\n", DEVNAME(sc), devname);
644 		vput(vn);
645 		goto done;
646 	}
647 	open = 1; /* close dev on error */
648 
649 	/* Get partition details. */
650 	part = DISKPART(dev);
651 	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label,
652 	    FREAD, NOCRED, curproc)) {
653 		DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
654 		    "failed\n", DEVNAME(sc));
655 		goto done;
656 	}
657 	if (label.d_partitions[part].p_fstype != FS_RAID) {
658 		sr_error(sc, "%s partition not of type RAID (%d)",
659 		    devname, label.d_partitions[part].p_fstype);
660 		goto done;
661 	}
662 
663 	/*
664 	 * Create and populate chunk metadata.
665 	 */
666 
667 	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
668 	km = &key_disk->src_meta;
669 
670 	key_disk->src_dev_mm = dev;
671 	key_disk->src_vn = vn;
672 	strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname));
673 	key_disk->src_size = 0;
674 
675 	km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level;
676 	km->scmi.scm_chunk_id = 0;
677 	km->scmi.scm_size = 0;
678 	km->scmi.scm_coerced_size = 0;
679 	strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
680 	memcpy(&km->scmi.scm_uuid, &sd->sd_meta->ssdi.ssd_uuid,
681 	    sizeof(struct sr_uuid));
682 
683 	sr_checksum(sc, km, &km->scm_checksum,
684 	    sizeof(struct sr_meta_chunk_invariant));
685 
686 	km->scm_status = BIOC_SDONLINE;
687 
688 	/*
689 	 * Create and populate our own discipline and metadata.
690 	 */
691 
692 	sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO);
693 	sm->ssdi.ssd_magic = SR_MAGIC;
694 	sm->ssdi.ssd_version = SR_META_VERSION;
695 	sm->ssd_ondisk = 0;
696 	sm->ssdi.ssd_vol_flags = 0;
697 	memcpy(&sm->ssdi.ssd_uuid, &sd->sd_meta->ssdi.ssd_uuid,
698 	    sizeof(struct sr_uuid));
699 	sm->ssdi.ssd_chunk_no = 1;
700 	sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
701 	sm->ssdi.ssd_level = SR_KEYDISK_LEVEL;
702 	sm->ssdi.ssd_size = 0;
703 	strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor));
704 	snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product),
705 	    "SR %s", "KEYDISK");
706 	snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision),
707 	    "%03d", SR_META_VERSION);
708 
709 	fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF,
710 	    M_WAITOK | M_ZERO);
711 	fakesd->sd_sc = sd->sd_sc;
712 	fakesd->sd_meta = sm;
713 	fakesd->sd_meta_type = SR_META_F_NATIVE;
714 	fakesd->sd_vol_status = BIOC_SVONLINE;
715 	strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name));
716 	SLIST_INIT(&fakesd->sd_meta_opt);
717 
718 	/* Add chunk to volume. */
719 	fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF,
720 	    M_WAITOK | M_ZERO);
721 	fakesd->sd_vol.sv_chunks[0] = key_disk;
722 	SLIST_INIT(&fakesd->sd_vol.sv_chunk_list);
723 	SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link);
724 
725 	/* Generate mask key. */
726 	arc4random_buf(sd->mds.mdd_crypto.scr_maskkey,
727 	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
728 
729 	/* Copy mask key to optional metadata area. */
730 	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
731 	    M_WAITOK | M_ZERO);
732 	omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF,
733 	    M_WAITOK | M_ZERO);
734 	omi->omi_som->som_type = SR_OPT_KEYDISK;
735 	omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
736 	skm = (struct sr_meta_keydisk *)omi->omi_som;
737 	memcpy(&skm->skm_maskkey, sd->mds.mdd_crypto.scr_maskkey,
738 	    sizeof(skm->skm_maskkey));
739 	SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
740 	fakesd->sd_meta->ssdi.ssd_opt_no++;
741 
742 	/* Save metadata. */
743 	if (sr_meta_save(fakesd, SR_META_DIRTY)) {
744 		sr_error(sc, "could not save metadata to %s", devname);
745 		goto fail;
746 	}
747 
748 	goto done;
749 
750 fail:
751 	free(key_disk, M_DEVBUF, sizeof(struct sr_chunk));
752 	key_disk = NULL;
753 
754 done:
755 	free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
756 	if (fakesd && fakesd->sd_vol.sv_chunks)
757 		free(fakesd->sd_vol.sv_chunks, M_DEVBUF,
758 		    sizeof(struct sr_chunk *));
759 	free(fakesd, M_DEVBUF, sizeof(struct sr_discipline));
760 	free(sm, M_DEVBUF, sizeof(struct sr_metadata));
761 	if (open) {
762 		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
763 		vput(vn);
764 	}
765 
766 	return key_disk;
767 }
768 
769 struct sr_chunk *
770 sr_crypto_read_key_disk(struct sr_discipline *sd, dev_t dev)
771 {
772 	struct sr_softc		*sc = sd->sd_sc;
773 	struct sr_metadata	*sm = NULL;
774 	struct sr_meta_opt_item *omi, *omi_next;
775 	struct sr_meta_opt_hdr	*omh;
776 	struct sr_meta_keydisk	*skm;
777 	struct sr_meta_opt_head som;
778 	struct sr_chunk		*key_disk = NULL;
779 	struct disklabel	label;
780 	struct vnode		*vn = NULL;
781 	char			devname[32];
782 	int			c, part, open = 0;
783 
784 	/*
785 	 * Load a key disk and load keying material into memory.
786 	 */
787 
788 	SLIST_INIT(&som);
789 
790 	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
791 
792 	/* Make sure chunk is not already in use. */
793 	c = sr_chunk_in_use(sc, dev);
794 	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
795 		sr_error(sc, "%s is already in use", devname);
796 		goto done;
797 	}
798 
799 	/* Open device. */
800 	if (bdevvp(dev, &vn)) {
801 		sr_error(sc, "cannot open key disk %s", devname);
802 		goto done;
803 	}
804 	if (VOP_OPEN(vn, FREAD, NOCRED, curproc)) {
805 		DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
806 		    "open %s\n", DEVNAME(sc), devname);
807 		vput(vn);
808 		goto done;
809 	}
810 	open = 1; /* close dev on error */
811 
812 	/* Get partition details. */
813 	part = DISKPART(dev);
814 	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD,
815 	    NOCRED, curproc)) {
816 		DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
817 		    "failed\n", DEVNAME(sc));
818 		goto done;
819 	}
820 	if (label.d_partitions[part].p_fstype != FS_RAID) {
821 		sr_error(sc, "%s partition not of type RAID (%d)",
822 		    devname, label.d_partitions[part].p_fstype);
823 		goto done;
824 	}
825 
826 	/*
827 	 * Read and validate key disk metadata.
828 	 */
829 	sm = malloc(SR_META_SIZE * DEV_BSIZE, M_DEVBUF, M_WAITOK | M_ZERO);
830 	if (sr_meta_native_read(sd, dev, sm, NULL)) {
831 		sr_error(sc, "native bootprobe could not read native metadata");
832 		goto done;
833 	}
834 
835 	if (sr_meta_validate(sd, dev, sm, NULL)) {
836 		DNPRINTF(SR_D_META, "%s: invalid metadata\n",
837 		    DEVNAME(sc));
838 		goto done;
839 	}
840 
841 	/* Make sure this is a key disk. */
842 	if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) {
843 		sr_error(sc, "%s is not a key disk", devname);
844 		goto done;
845 	}
846 
847 	/* Construct key disk chunk. */
848 	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
849 	key_disk->src_dev_mm = dev;
850 	key_disk->src_vn = vn;
851 	key_disk->src_size = 0;
852 
853 	memcpy(&key_disk->src_meta, (struct sr_meta_chunk *)(sm + 1),
854 	    sizeof(key_disk->src_meta));
855 
856 	/* Read mask key from optional metadata. */
857 	sr_meta_opt_load(sc, sm, &som);
858 	SLIST_FOREACH(omi, &som, omi_link) {
859 		omh = omi->omi_som;
860 		if (omh->som_type == SR_OPT_KEYDISK) {
861 			skm = (struct sr_meta_keydisk *)omh;
862 			memcpy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
863 			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
864 		} else if (omh->som_type == SR_OPT_CRYPTO) {
865 			/* Original keydisk format with key in crypto area. */
866 			memcpy(sd->mds.mdd_crypto.scr_maskkey,
867 			    omh + sizeof(struct sr_meta_opt_hdr),
868 			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
869 		}
870 	}
871 
872 	open = 0;
873 
874 done:
875 	for (omi = SLIST_FIRST(&som); omi != NULL; omi = omi_next) {
876 		omi_next = SLIST_NEXT(omi, omi_link);
877 		free(omi->omi_som, M_DEVBUF, 0);
878 		free(omi, M_DEVBUF, 0);
879 	}
880 
881 	free(sm, M_DEVBUF, SR_META_SIZE * DEV_BSIZE);
882 
883 	if (vn && open) {
884 		VOP_CLOSE(vn, FREAD, NOCRED, curproc);
885 		vput(vn);
886 	}
887 
888 	return key_disk;
889 }
890 
891 int
892 sr_crypto_alloc_resources(struct sr_discipline *sd)
893 {
894 	struct sr_workunit	*wu;
895 	struct sr_crypto_wu	*crwu;
896 	struct cryptoini	cri;
897 	u_int			num_keys, i;
898 
899 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
900 	    DEVNAME(sd->sd_sc));
901 
902 	sd->mds.mdd_crypto.scr_alg = CRYPTO_AES_XTS;
903 	switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
904 	case SR_CRYPTOA_AES_XTS_128:
905 		sd->mds.mdd_crypto.scr_klen = 256;
906 		break;
907 	case SR_CRYPTOA_AES_XTS_256:
908 		sd->mds.mdd_crypto.scr_klen = 512;
909 		break;
910 	default:
911 		sr_error(sd->sd_sc, "unknown crypto algorithm");
912 		return (EINVAL);
913 	}
914 
915 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
916 		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
917 
918 	if (sr_wu_alloc(sd, sizeof(struct sr_crypto_wu))) {
919 		sr_error(sd->sd_sc, "unable to allocate work units");
920 		return (ENOMEM);
921 	}
922 	if (sr_ccb_alloc(sd)) {
923 		sr_error(sd->sd_sc, "unable to allocate CCBs");
924 		return (ENOMEM);
925 	}
926 	if (sr_crypto_decrypt_key(sd)) {
927 		sr_error(sd->sd_sc, "incorrect key or passphrase");
928 		return (EPERM);
929 	}
930 
931 	/*
932 	 * For each work unit allocate the uio, iovec and crypto structures.
933 	 * These have to be allocated now because during runtime we cannot
934 	 * fail an allocation without failing the I/O (which can cause real
935 	 * problems).
936 	 */
937 	TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
938 		crwu = (struct sr_crypto_wu *)wu;
939 		crwu->cr_uio.uio_iov = &crwu->cr_iov;
940 		crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
941 		crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
942 		if (crwu->cr_crp == NULL)
943 			return (ENOMEM);
944 	}
945 
946 	memset(&cri, 0, sizeof(cri));
947 	cri.cri_alg = sd->mds.mdd_crypto.scr_alg;
948 	cri.cri_klen = sd->mds.mdd_crypto.scr_klen;
949 
950 	/* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks. */
951 	num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT;
952 	if (num_keys >= SR_CRYPTO_MAXKEYS)
953 		return (EFBIG);
954 	for (i = 0; i <= num_keys; i++) {
955 		cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
956 		if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
957 		    &cri, 0) != 0) {
958 			for (i = 0;
959 			     sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1;
960 			     i++) {
961 				crypto_freesession(
962 				    sd->mds.mdd_crypto.scr_sid[i]);
963 				sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
964 			}
965 			return (EINVAL);
966 		}
967 	}
968 
969 	sr_hotplug_register(sd, sr_crypto_hotplug);
970 
971 	return (0);
972 }
973 
974 void
975 sr_crypto_free_resources(struct sr_discipline *sd)
976 {
977 	struct sr_workunit	*wu;
978 	struct sr_crypto_wu	*crwu;
979 	u_int			i;
980 
981 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
982 	    DEVNAME(sd->sd_sc));
983 
984 	if (sd->mds.mdd_crypto.key_disk != NULL) {
985 		explicit_bzero(sd->mds.mdd_crypto.key_disk,
986 		    sizeof(*sd->mds.mdd_crypto.key_disk));
987 		free(sd->mds.mdd_crypto.key_disk, M_DEVBUF,
988 		    sizeof(*sd->mds.mdd_crypto.key_disk));
989 	}
990 
991 	sr_hotplug_unregister(sd, sr_crypto_hotplug);
992 
993 	for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) {
994 		crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
995 		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
996 	}
997 
998 	TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
999 		crwu = (struct sr_crypto_wu *)wu;
1000 		if (crwu->cr_dmabuf)
1001 			dma_free(crwu->cr_dmabuf, MAXPHYS);
1002 		if (crwu->cr_crp)
1003 			crypto_freereq(crwu->cr_crp);
1004 	}
1005 
1006 	sr_wu_free(sd);
1007 	sr_ccb_free(sd);
1008 }
1009 
1010 int
1011 sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd)
1012 {
1013 	struct sr_crypto_kdfpair kdfpair;
1014 	struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2;
1015 	int			size, rv = 1;
1016 
1017 	DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n",
1018 	    DEVNAME(sd->sd_sc), bd->bd_cmd);
1019 
1020 	switch (bd->bd_cmd) {
1021 	case SR_IOCTL_GET_KDFHINT:
1022 
1023 		/* Get KDF hint for userland. */
1024 		size = sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint);
1025 		if (bd->bd_data == NULL || bd->bd_size > size)
1026 			goto bad;
1027 		if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
1028 		    bd->bd_data, bd->bd_size))
1029 			goto bad;
1030 
1031 		rv = 0;
1032 
1033 		break;
1034 
1035 	case SR_IOCTL_CHANGE_PASSPHRASE:
1036 
1037 		/* Attempt to change passphrase. */
1038 
1039 		size = sizeof(kdfpair);
1040 		if (bd->bd_data == NULL || bd->bd_size > size)
1041 			goto bad;
1042 		if (copyin(bd->bd_data, &kdfpair, size))
1043 			goto bad;
1044 
1045 		size = sizeof(kdfinfo1);
1046 		if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size)
1047 			goto bad;
1048 		if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size))
1049 			goto bad;
1050 
1051 		size = sizeof(kdfinfo2);
1052 		if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size)
1053 			goto bad;
1054 		if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size))
1055 			goto bad;
1056 
1057 		if (sr_crypto_change_maskkey(sd, &kdfinfo1, &kdfinfo2))
1058 			goto bad;
1059 
1060 		/* Save metadata to disk. */
1061 		rv = sr_meta_save(sd, SR_META_DIRTY);
1062 
1063 		break;
1064 	}
1065 
1066 bad:
1067 	explicit_bzero(&kdfpair, sizeof(kdfpair));
1068 	explicit_bzero(&kdfinfo1, sizeof(kdfinfo1));
1069 	explicit_bzero(&kdfinfo2, sizeof(kdfinfo2));
1070 
1071 	return (rv);
1072 }
1073 
1074 int
1075 sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om)
1076 {
1077 	int rv = EINVAL;
1078 
1079 	if (om->som_type == SR_OPT_CRYPTO) {
1080 		sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)om;
1081 		rv = 0;
1082 	}
1083 
1084 	return (rv);
1085 }
1086 
1087 int
1088 sr_crypto_rw(struct sr_workunit *wu)
1089 {
1090 	struct sr_crypto_wu	*crwu;
1091 	daddr_t			blkno;
1092 	int			rv = 0;
1093 
1094 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu %p\n",
1095 	    DEVNAME(wu->swu_dis->sd_sc), wu);
1096 
1097 	if (sr_validate_io(wu, &blkno, "sr_crypto_rw"))
1098 		return (1);
1099 
1100 	if (wu->swu_xs->flags & SCSI_DATA_OUT) {
1101 		crwu = sr_crypto_prepare(wu, 1);
1102 		crwu->cr_crp->crp_callback = sr_crypto_write;
1103 		rv = crypto_dispatch(crwu->cr_crp);
1104 		if (rv == 0)
1105 			rv = crwu->cr_crp->crp_etype;
1106 	} else
1107 		rv = sr_crypto_dev_rw(wu, NULL);
1108 
1109 	return (rv);
1110 }
1111 
1112 void
1113 sr_crypto_write(struct cryptop *crp)
1114 {
1115 	struct sr_crypto_wu	*crwu = crp->crp_opaque;
1116 	struct sr_workunit	*wu = &crwu->cr_wu;
1117 	int			s;
1118 
1119 	DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %p xs: %p\n",
1120 	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1121 
1122 	if (crp->crp_etype) {
1123 		/* fail io */
1124 		wu->swu_xs->error = XS_DRIVER_STUFFUP;
1125 		s = splbio();
1126 		sr_scsi_done(wu->swu_dis, wu->swu_xs);
1127 		splx(s);
1128 	}
1129 
1130 	sr_crypto_dev_rw(wu, crwu);
1131 }
1132 
1133 int
1134 sr_crypto_dev_rw(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
1135 {
1136 	struct sr_discipline	*sd = wu->swu_dis;
1137 	struct scsi_xfer	*xs = wu->swu_xs;
1138 	struct sr_ccb		*ccb;
1139 	struct uio		*uio;
1140 	daddr_t			blkno;
1141 
1142 	blkno = wu->swu_blk_start;
1143 
1144 	ccb = sr_ccb_rw(sd, 0, blkno, xs->datalen, xs->data, xs->flags, 0);
1145 	if (!ccb) {
1146 		/* should never happen but handle more gracefully */
1147 		printf("%s: %s: too many ccbs queued\n",
1148 		    DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
1149 		goto bad;
1150 	}
1151 	if (!ISSET(xs->flags, SCSI_DATA_IN)) {
1152 		uio = crwu->cr_crp->crp_buf;
1153 		ccb->ccb_buf.b_data = uio->uio_iov->iov_base;
1154 		ccb->ccb_opaque = crwu;
1155 	}
1156 	sr_wu_enqueue_ccb(wu, ccb);
1157 	sr_schedule_wu(wu);
1158 
1159 	return (0);
1160 
1161 bad:
1162 	/* wu is unwound by sr_wu_put */
1163 	if (crwu)
1164 		crwu->cr_crp->crp_etype = EINVAL;
1165 	return (1);
1166 }
1167 
1168 void
1169 sr_crypto_done(struct sr_workunit *wu)
1170 {
1171 	struct scsi_xfer	*xs = wu->swu_xs;
1172 	struct sr_crypto_wu	*crwu;
1173 	int			s;
1174 
1175 	/* If this was a successful read, initiate decryption of the data. */
1176 	if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
1177 		crwu = sr_crypto_prepare(wu, 0);
1178 		crwu->cr_crp->crp_callback = sr_crypto_read;
1179 		DNPRINTF(SR_D_INTR, "%s: sr_crypto_done: crypto_dispatch %p\n",
1180 		    DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
1181 		crypto_dispatch(crwu->cr_crp);
1182 		return;
1183 	}
1184 
1185 	s = splbio();
1186 	sr_scsi_done(wu->swu_dis, wu->swu_xs);
1187 	splx(s);
1188 }
1189 
1190 void
1191 sr_crypto_read(struct cryptop *crp)
1192 {
1193 	struct sr_crypto_wu	*crwu = crp->crp_opaque;
1194 	struct sr_workunit	*wu = &crwu->cr_wu;
1195 	int			s;
1196 
1197 	DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %p xs: %p\n",
1198 	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1199 
1200 	if (crp->crp_etype)
1201 		wu->swu_xs->error = XS_DRIVER_STUFFUP;
1202 
1203 	s = splbio();
1204 	sr_scsi_done(wu->swu_dis, wu->swu_xs);
1205 	splx(s);
1206 }
1207 
1208 void
1209 sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action)
1210 {
1211 	DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n",
1212 	    DEVNAME(sd->sd_sc), diskp->dk_name, action);
1213 }
1214 
1215 #ifdef SR_DEBUG0
1216 void
1217 sr_crypto_dumpkeys(struct sr_discipline *sd)
1218 {
1219 	int			i, j;
1220 
1221 	printf("sr_crypto_dumpkeys:\n");
1222 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1223 		printf("\tscm_key[%d]: 0x", i);
1224 		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1225 			printf("%02x",
1226 			    sd->mds.mdd_crypto.scr_meta->scm_key[i][j]);
1227 		}
1228 		printf("\n");
1229 	}
1230 	printf("sr_crypto_dumpkeys: runtime data keys:\n");
1231 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1232 		printf("\tscr_key[%d]: 0x", i);
1233 		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1234 			printf("%02x",
1235 			    sd->mds.mdd_crypto.scr_key[i][j]);
1236 		}
1237 		printf("\n");
1238 	}
1239 }
1240 #endif	/* SR_DEBUG */
1241