xref: /openbsd-src/sys/dev/softraid_crypto.c (revision e5157e49389faebcb42b7237d55fbf096d9c2523)
1 /* $OpenBSD: softraid_crypto.c,v 1.114 2014/10/30 17:26:23 tedu Exp $ */
2 /*
3  * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
4  * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@mindrot.org>
6  * Copyright (c) 2009 Joel Sing <jsing@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include "bio.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/buf.h>
26 #include <sys/device.h>
27 #include <sys/ioctl.h>
28 #include <sys/malloc.h>
29 #include <sys/pool.h>
30 #include <sys/kernel.h>
31 #include <sys/disk.h>
32 #include <sys/rwlock.h>
33 #include <sys/queue.h>
34 #include <sys/fcntl.h>
35 #include <sys/disklabel.h>
36 #include <sys/vnode.h>
37 #include <sys/mount.h>
38 #include <sys/sensors.h>
39 #include <sys/stat.h>
40 #include <sys/conf.h>
41 #include <sys/uio.h>
42 #include <sys/dkio.h>
43 
44 #include <crypto/cryptodev.h>
45 #include <crypto/cryptosoft.h>
46 #include <crypto/rijndael.h>
47 #include <crypto/md5.h>
48 #include <crypto/sha1.h>
49 #include <crypto/sha2.h>
50 #include <crypto/hmac.h>
51 
52 #include <scsi/scsi_all.h>
53 #include <scsi/scsiconf.h>
54 #include <scsi/scsi_disk.h>
55 
56 #include <dev/softraidvar.h>
57 #include <dev/rndvar.h>
58 
59 /*
60  * The per-I/O data that we need to preallocate. We cannot afford to allow I/O
61  * to start failing when memory pressure kicks in. We can store this in the WU
62  * because we assert that only one ccb per WU will ever be active.
63  */
64 struct sr_crypto_wu {
65 	struct sr_workunit		 cr_wu;		/* Must be first. */
66 	struct uio			 cr_uio;
67 	struct iovec			 cr_iov;
68 	struct cryptop	 		*cr_crp;
69 	struct cryptodesc		*cr_descs;
70 	void				*cr_dmabuf;
71 };
72 
73 
74 struct sr_crypto_wu *sr_crypto_prepare(struct sr_workunit *, int);
75 int		sr_crypto_create_keys(struct sr_discipline *);
76 int		sr_crypto_get_kdf(struct bioc_createraid *,
77 		    struct sr_discipline *);
78 int		sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int);
79 int		sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int);
80 int		sr_crypto_decrypt_key(struct sr_discipline *);
81 int		sr_crypto_change_maskkey(struct sr_discipline *,
82 		    struct sr_crypto_kdfinfo *, struct sr_crypto_kdfinfo *);
83 int		sr_crypto_create(struct sr_discipline *,
84 		    struct bioc_createraid *, int, int64_t);
85 int		sr_crypto_assemble(struct sr_discipline *,
86 		    struct bioc_createraid *, int, void *);
87 int		sr_crypto_alloc_resources(struct sr_discipline *);
88 void		sr_crypto_free_resources(struct sr_discipline *);
89 int		sr_crypto_ioctl(struct sr_discipline *,
90 		    struct bioc_discipline *);
91 int		sr_crypto_meta_opt_handler(struct sr_discipline *,
92 		    struct sr_meta_opt_hdr *);
93 int		sr_crypto_write(struct cryptop *);
94 int		sr_crypto_rw(struct sr_workunit *);
95 int		sr_crypto_dev_rw(struct sr_workunit *, struct sr_crypto_wu *);
96 void		sr_crypto_done(struct sr_workunit *);
97 int		sr_crypto_read(struct cryptop *);
98 void		sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
99 		   u_int8_t *, int, u_char *);
100 void		sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
101 
102 #ifdef SR_DEBUG0
103 void		 sr_crypto_dumpkeys(struct sr_discipline *);
104 #endif
105 
106 /* Discipline initialisation. */
107 void
108 sr_crypto_discipline_init(struct sr_discipline *sd)
109 {
110 	int i;
111 
112 	/* Fill out discipline members. */
113 	sd->sd_type = SR_MD_CRYPTO;
114 	strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
115 	sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
116 	sd->sd_max_wu = SR_CRYPTO_NOWU;
117 
118 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
119 		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
120 
121 	/* Setup discipline specific function pointers. */
122 	sd->sd_alloc_resources = sr_crypto_alloc_resources;
123 	sd->sd_assemble = sr_crypto_assemble;
124 	sd->sd_create = sr_crypto_create;
125 	sd->sd_free_resources = sr_crypto_free_resources;
126 	sd->sd_ioctl_handler = sr_crypto_ioctl;
127 	sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler;
128 	sd->sd_scsi_rw = sr_crypto_rw;
129 	sd->sd_scsi_done = sr_crypto_done;
130 }
131 
132 int
133 sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc,
134     int no_chunk, int64_t coerced_size)
135 {
136 	struct sr_meta_opt_item	*omi;
137 	int			rv = EINVAL;
138 
139 	if (no_chunk != 1) {
140 		sr_error(sd->sd_sc, "%s requires exactly one chunk",
141 		    sd->sd_name);
142 		goto done;
143         }
144 
145 	/* Create crypto optional metadata. */
146 	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
147 	    M_WAITOK | M_ZERO);
148 	omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF,
149 	    M_WAITOK | M_ZERO);
150 	omi->omi_som->som_type = SR_OPT_CRYPTO;
151 	omi->omi_som->som_length = sizeof(struct sr_meta_crypto);
152 	SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link);
153 	sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)omi->omi_som;
154 	sd->sd_meta->ssdi.ssd_opt_no++;
155 
156 	sd->mds.mdd_crypto.key_disk = NULL;
157 
158 	if (bc->bc_key_disk != NODEV) {
159 
160 		/* Create a key disk. */
161 		if (sr_crypto_get_kdf(bc, sd))
162 			goto done;
163 		sd->mds.mdd_crypto.key_disk =
164 		    sr_crypto_create_key_disk(sd, bc->bc_key_disk);
165 		if (sd->mds.mdd_crypto.key_disk == NULL)
166 			goto done;
167 		sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE;
168 
169 	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
170 
171 		/* No hint available yet. */
172 		bc->bc_opaque_status = BIOC_SOINOUT_FAILED;
173 		rv = EAGAIN;
174 		goto done;
175 
176 	} else if (sr_crypto_get_kdf(bc, sd))
177 		goto done;
178 
179 	/* Passphrase volumes cannot be automatically assembled. */
180 	if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV)
181 		goto done;
182 
183 	sd->sd_meta->ssdi.ssd_size = coerced_size;
184 
185 	sr_crypto_create_keys(sd);
186 
187 	sd->sd_max_ccb_per_wu = no_chunk;
188 
189 	rv = 0;
190 done:
191 	return (rv);
192 }
193 
194 int
195 sr_crypto_assemble(struct sr_discipline *sd, struct bioc_createraid *bc,
196     int no_chunk, void *data)
197 {
198 	int	rv = EINVAL;
199 
200 	sd->mds.mdd_crypto.key_disk = NULL;
201 
202 	/* Crypto optional metadata must already exist... */
203 	if (sd->mds.mdd_crypto.scr_meta == NULL)
204 		goto done;
205 
206 	if (data != NULL) {
207 		/* Kernel already has mask key. */
208 		bcopy(data, sd->mds.mdd_crypto.scr_maskkey,
209 		    sizeof(sd->mds.mdd_crypto.scr_maskkey));
210 	} else if (bc->bc_key_disk != NODEV) {
211 		/* Read the mask key from the key disk. */
212 		sd->mds.mdd_crypto.key_disk =
213 		    sr_crypto_read_key_disk(sd, bc->bc_key_disk);
214 		if (sd->mds.mdd_crypto.key_disk == NULL)
215 			goto done;
216 	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
217 		/* provide userland with kdf hint */
218 		if (bc->bc_opaque == NULL)
219 			goto done;
220 
221 		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
222 		    bc->bc_opaque_size)
223 			goto done;
224 
225 		if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
226 		    bc->bc_opaque, bc->bc_opaque_size))
227 			goto done;
228 
229 		/* we're done */
230 		bc->bc_opaque_status = BIOC_SOINOUT_OK;
231 		rv = EAGAIN;
232 		goto done;
233 	} else if (bc->bc_opaque_flags & BIOC_SOIN) {
234 		/* get kdf with maskkey from userland */
235 		if (sr_crypto_get_kdf(bc, sd))
236 			goto done;
237 	} else
238 		goto done;
239 
240 	sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no;
241 
242 	rv = 0;
243 done:
244 	return (rv);
245 }
246 
247 struct sr_crypto_wu *
248 sr_crypto_prepare(struct sr_workunit *wu, int encrypt)
249 {
250 	struct scsi_xfer	*xs = wu->swu_xs;
251 	struct sr_discipline	*sd = wu->swu_dis;
252 	struct sr_crypto_wu	*crwu;
253 	struct cryptodesc	*crd;
254 	int			flags, i, n;
255 	daddr_t			blk;
256 	u_int			keyndx;
257 
258 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_prepare wu %p encrypt %d\n",
259 	    DEVNAME(sd->sd_sc), wu, encrypt);
260 
261 	crwu = (struct sr_crypto_wu *)wu;
262 	crwu->cr_uio.uio_iovcnt = 1;
263 	crwu->cr_uio.uio_iov->iov_len = xs->datalen;
264 	if (xs->flags & SCSI_DATA_OUT) {
265 		crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
266 		bcopy(xs->data, crwu->cr_uio.uio_iov->iov_base, xs->datalen);
267 	} else
268 		crwu->cr_uio.uio_iov->iov_base = xs->data;
269 
270 	blk = wu->swu_blk_start;
271 	n = xs->datalen >> DEV_BSHIFT;
272 
273 	/*
274 	 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
275 	 * Since there may be less than that we need to tweak the linked list
276 	 * of crypto desc structures to be just long enough for our needs.
277 	 */
278 	crd = crwu->cr_descs;
279 	for (i = 0; i < ((MAXPHYS >> DEV_BSHIFT) - n); i++) {
280 		crd = crd->crd_next;
281 		KASSERT(crd);
282 	}
283 	crwu->cr_crp->crp_desc = crd;
284 	flags = (encrypt ? CRD_F_ENCRYPT : 0) |
285 	    CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
286 
287 	/*
288 	 * Select crypto session based on block number.
289 	 *
290 	 * XXX - this does not handle the case where the read/write spans
291 	 * across a different key blocks (e.g. 0.5TB boundary). Currently
292 	 * this is already broken by the use of scr_key[0] below.
293 	 */
294 	keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT;
295 	crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
296 
297 	crwu->cr_crp->crp_opaque = crwu;
298 	crwu->cr_crp->crp_ilen = xs->datalen;
299 	crwu->cr_crp->crp_alloctype = M_DEVBUF;
300 	crwu->cr_crp->crp_buf = &crwu->cr_uio;
301 	for (i = 0, crd = crwu->cr_crp->crp_desc; crd;
302 	    i++, blk++, crd = crd->crd_next) {
303 		crd->crd_skip = i << DEV_BSHIFT;
304 		crd->crd_len = DEV_BSIZE;
305 		crd->crd_inject = 0;
306 		crd->crd_flags = flags;
307 		crd->crd_alg = sd->mds.mdd_crypto.scr_alg;
308 		crd->crd_klen = sd->mds.mdd_crypto.scr_klen;
309 		crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
310 		bcopy(&blk, crd->crd_iv, sizeof(blk));
311 	}
312 
313 	return (crwu);
314 }
315 
316 int
317 sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd)
318 {
319 	int			rv = EINVAL;
320 	struct sr_crypto_kdfinfo *kdfinfo;
321 
322 	if (!(bc->bc_opaque_flags & BIOC_SOIN))
323 		return (rv);
324 	if (bc->bc_opaque == NULL)
325 		return (rv);
326 	if (bc->bc_opaque_size != sizeof(*kdfinfo))
327 		return (rv);
328 
329 	kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO);
330 	if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size))
331 		goto out;
332 
333 	if (kdfinfo->len != bc->bc_opaque_size)
334 		goto out;
335 
336 	/* copy KDF hint to disk meta data */
337 	if (kdfinfo->flags & SR_CRYPTOKDF_HINT) {
338 		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
339 		    kdfinfo->genkdf.len)
340 			goto out;
341 		bcopy(&kdfinfo->genkdf,
342 		    sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
343 		    kdfinfo->genkdf.len);
344 	}
345 
346 	/* copy mask key to run-time meta data */
347 	if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) {
348 		if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
349 		    sizeof(kdfinfo->maskkey))
350 			goto out;
351 		bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey,
352 		    sizeof(kdfinfo->maskkey));
353 	}
354 
355 	bc->bc_opaque_status = BIOC_SOINOUT_OK;
356 	rv = 0;
357 out:
358 	explicit_bzero(kdfinfo, bc->bc_opaque_size);
359 	free(kdfinfo, M_DEVBUF, bc->bc_opaque_size);
360 
361 	return (rv);
362 }
363 
364 int
365 sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg)
366 {
367 	rijndael_ctx		ctx;
368 	int			i, rv = 1;
369 
370 	switch (alg) {
371 	case SR_CRYPTOM_AES_ECB_256:
372 		if (rijndael_set_key_enc_only(&ctx, key, 256) != 0)
373 			goto out;
374 		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
375 			rijndael_encrypt(&ctx, &p[i], &c[i]);
376 		rv = 0;
377 		break;
378 	default:
379 		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
380 		    "softraid", alg);
381 		rv = -1;
382 		goto out;
383 	}
384 
385 out:
386 	explicit_bzero(&ctx, sizeof(ctx));
387 	return (rv);
388 }
389 
390 int
391 sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg)
392 {
393 	rijndael_ctx		ctx;
394 	int			i, rv = 1;
395 
396 	switch (alg) {
397 	case SR_CRYPTOM_AES_ECB_256:
398 		if (rijndael_set_key(&ctx, key, 256) != 0)
399 			goto out;
400 		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
401 			rijndael_decrypt(&ctx, &c[i], &p[i]);
402 		rv = 0;
403 		break;
404 	default:
405 		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
406 		    "softraid", alg);
407 		rv = -1;
408 		goto out;
409 	}
410 
411 out:
412 	explicit_bzero(&ctx, sizeof(ctx));
413 	return (rv);
414 }
415 
416 void
417 sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size,
418     u_int8_t *key, int key_size, u_char *check_digest)
419 {
420 	u_char			check_key[SHA1_DIGEST_LENGTH];
421 	HMAC_SHA1_CTX		hmacctx;
422 	SHA1_CTX		shactx;
423 
424 	bzero(check_key, sizeof(check_key));
425 	bzero(&hmacctx, sizeof(hmacctx));
426 	bzero(&shactx, sizeof(shactx));
427 
428 	/* k = SHA1(mask_key) */
429 	SHA1Init(&shactx);
430 	SHA1Update(&shactx, maskkey, maskkey_size);
431 	SHA1Final(check_key, &shactx);
432 
433 	/* mac = HMAC_SHA1_k(unencrypted key) */
434 	HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key));
435 	HMAC_SHA1_Update(&hmacctx, key, key_size);
436 	HMAC_SHA1_Final(check_digest, &hmacctx);
437 
438 	explicit_bzero(check_key, sizeof(check_key));
439 	explicit_bzero(&hmacctx, sizeof(hmacctx));
440 	explicit_bzero(&shactx, sizeof(shactx));
441 }
442 
443 int
444 sr_crypto_decrypt_key(struct sr_discipline *sd)
445 {
446 	u_char			check_digest[SHA1_DIGEST_LENGTH];
447 	int			rv = 1;
448 
449 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc));
450 
451 	if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
452 		goto out;
453 
454 	if (sr_crypto_decrypt((u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
455 	    (u_char *)sd->mds.mdd_crypto.scr_key,
456 	    sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
457 	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
458 		goto out;
459 
460 #ifdef SR_DEBUG0
461 	sr_crypto_dumpkeys(sd);
462 #endif
463 
464 	/* Check that the key decrypted properly. */
465 	sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
466 	    sizeof(sd->mds.mdd_crypto.scr_maskkey),
467 	    (u_int8_t *)sd->mds.mdd_crypto.scr_key,
468 	    sizeof(sd->mds.mdd_crypto.scr_key),
469 	    check_digest);
470 	if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
471 	    check_digest, sizeof(check_digest)) != 0) {
472 		explicit_bzero(sd->mds.mdd_crypto.scr_key,
473 		    sizeof(sd->mds.mdd_crypto.scr_key));
474 		goto out;
475 	}
476 
477 	rv = 0; /* Success */
478 out:
479 	/* we don't need the mask key anymore */
480 	explicit_bzero(&sd->mds.mdd_crypto.scr_maskkey,
481 	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
482 
483 	explicit_bzero(check_digest, sizeof(check_digest));
484 
485 	return rv;
486 }
487 
488 int
489 sr_crypto_create_keys(struct sr_discipline *sd)
490 {
491 
492 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n",
493 	    DEVNAME(sd->sd_sc));
494 
495 	if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey))
496 		return (1);
497 
498 	/* XXX allow user to specify */
499 	sd->mds.mdd_crypto.scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256;
500 
501 	/* generate crypto keys */
502 	arc4random_buf(sd->mds.mdd_crypto.scr_key,
503 	    sizeof(sd->mds.mdd_crypto.scr_key));
504 
505 	/* Mask the disk keys. */
506 	sd->mds.mdd_crypto.scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256;
507 	sr_crypto_encrypt((u_char *)sd->mds.mdd_crypto.scr_key,
508 	    (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
509 	    sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
510 	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg);
511 
512 	/* Prepare key decryption check code. */
513 	sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
514 	sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
515 	    sizeof(sd->mds.mdd_crypto.scr_maskkey),
516 	    (u_int8_t *)sd->mds.mdd_crypto.scr_key,
517 	    sizeof(sd->mds.mdd_crypto.scr_key),
518 	    sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac);
519 
520 	/* Erase the plaintext disk keys */
521 	explicit_bzero(sd->mds.mdd_crypto.scr_key,
522 	    sizeof(sd->mds.mdd_crypto.scr_key));
523 
524 #ifdef SR_DEBUG0
525 	sr_crypto_dumpkeys(sd);
526 #endif
527 
528 	sd->mds.mdd_crypto.scr_meta->scm_flags = SR_CRYPTOF_KEY |
529 	    SR_CRYPTOF_KDFHINT;
530 
531 	return (0);
532 }
533 
534 int
535 sr_crypto_change_maskkey(struct sr_discipline *sd,
536   struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2)
537 {
538 	u_char			check_digest[SHA1_DIGEST_LENGTH];
539 	u_char			*c, *p = NULL;
540 	size_t			ksz;
541 	int			rv = 1;
542 
543 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n",
544 	    DEVNAME(sd->sd_sc));
545 
546 	if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
547 		goto out;
548 
549 	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
550 	ksz = sizeof(sd->mds.mdd_crypto.scr_key);
551 	p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
552 	if (p == NULL)
553 		goto out;
554 
555 	if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz,
556 	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
557 		goto out;
558 
559 #ifdef SR_DEBUG0
560 	sr_crypto_dumpkeys(sd);
561 #endif
562 
563 	sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey,
564 	    sizeof(kdfinfo1->maskkey), p, ksz, check_digest);
565 	if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
566 	    check_digest, sizeof(check_digest)) != 0) {
567 		sr_error(sd->sd_sc, "incorrect key or passphrase");
568 		rv = EPERM;
569 		goto out;
570 	}
571 
572 	/* Mask the disk keys. */
573 	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
574 	if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
575 	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
576 		goto out;
577 
578 	/* Prepare key decryption check code. */
579 	sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
580 	sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey,
581 	    sizeof(kdfinfo2->maskkey), (u_int8_t *)sd->mds.mdd_crypto.scr_key,
582 	    sizeof(sd->mds.mdd_crypto.scr_key), check_digest);
583 
584 	/* Copy new encrypted key and HMAC to metadata. */
585 	bcopy(check_digest, sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
586 	    sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac));
587 
588 	rv = 0; /* Success */
589 
590 out:
591 	if (p) {
592 		explicit_bzero(p, ksz);
593 		free(p, M_DEVBUF, ksz);
594 	}
595 
596 	explicit_bzero(check_digest, sizeof(check_digest));
597 	explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey));
598 	explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey));
599 
600 	return (rv);
601 }
602 
603 struct sr_chunk *
604 sr_crypto_create_key_disk(struct sr_discipline *sd, dev_t dev)
605 {
606 	struct sr_softc		*sc = sd->sd_sc;
607 	struct sr_discipline	*fakesd = NULL;
608 	struct sr_metadata	*sm = NULL;
609 	struct sr_meta_chunk    *km;
610 	struct sr_meta_opt_item *omi = NULL;
611 	struct sr_meta_keydisk	*skm;
612 	struct sr_chunk		*key_disk = NULL;
613 	struct disklabel	label;
614 	struct vnode		*vn;
615 	char			devname[32];
616 	int			c, part, open = 0;
617 
618 	/*
619 	 * Create a metadata structure on the key disk and store
620 	 * keying material in the optional metadata.
621 	 */
622 
623 	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
624 
625 	/* Make sure chunk is not already in use. */
626 	c = sr_chunk_in_use(sc, dev);
627 	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
628 		sr_error(sc, "%s is already in use", devname);
629 		goto done;
630 	}
631 
632 	/* Open device. */
633 	if (bdevvp(dev, &vn)) {
634 		sr_error(sc, "cannot open key disk %s", devname);
635 		goto done;
636 	}
637 	if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
638 		DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
639 		    "open %s\n", DEVNAME(sc), devname);
640 		vput(vn);
641 		goto fail;
642 	}
643 	open = 1; /* close dev on error */
644 
645 	/* Get partition details. */
646 	part = DISKPART(dev);
647 	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label,
648 	    FREAD, NOCRED, curproc)) {
649 		DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
650 		    "failed\n", DEVNAME(sc));
651 		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
652 		vput(vn);
653 		goto fail;
654 	}
655 	if (label.d_secsize != DEV_BSIZE) {
656 		sr_error(sc, "%s has unsupported sector size (%d)",
657 		    devname, label.d_secsize);
658 		goto fail;
659 	}
660 	if (label.d_partitions[part].p_fstype != FS_RAID) {
661 		sr_error(sc, "%s partition not of type RAID (%d)\n",
662 		    devname, label.d_partitions[part].p_fstype);
663 		goto fail;
664 	}
665 
666 	/*
667 	 * Create and populate chunk metadata.
668 	 */
669 
670 	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
671 	km = &key_disk->src_meta;
672 
673 	key_disk->src_dev_mm = dev;
674 	key_disk->src_vn = vn;
675 	strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname));
676 	key_disk->src_size = 0;
677 
678 	km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level;
679 	km->scmi.scm_chunk_id = 0;
680 	km->scmi.scm_size = 0;
681 	km->scmi.scm_coerced_size = 0;
682 	strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
683 	bcopy(&sd->sd_meta->ssdi.ssd_uuid, &km->scmi.scm_uuid,
684 	    sizeof(struct sr_uuid));
685 
686 	sr_checksum(sc, km, &km->scm_checksum,
687 	    sizeof(struct sr_meta_chunk_invariant));
688 
689 	km->scm_status = BIOC_SDONLINE;
690 
691 	/*
692 	 * Create and populate our own discipline and metadata.
693 	 */
694 
695 	sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO);
696 	sm->ssdi.ssd_magic = SR_MAGIC;
697 	sm->ssdi.ssd_version = SR_META_VERSION;
698 	sm->ssd_ondisk = 0;
699 	sm->ssdi.ssd_vol_flags = 0;
700 	bcopy(&sd->sd_meta->ssdi.ssd_uuid, &sm->ssdi.ssd_uuid,
701 	    sizeof(struct sr_uuid));
702 	sm->ssdi.ssd_chunk_no = 1;
703 	sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
704 	sm->ssdi.ssd_level = SR_KEYDISK_LEVEL;
705 	sm->ssdi.ssd_size = 0;
706 	strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor));
707 	snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product),
708 	    "SR %s", "KEYDISK");
709 	snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision),
710 	    "%03d", SR_META_VERSION);
711 
712 	fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF,
713 	    M_WAITOK | M_ZERO);
714 	fakesd->sd_sc = sd->sd_sc;
715 	fakesd->sd_meta = sm;
716 	fakesd->sd_meta_type = SR_META_F_NATIVE;
717 	fakesd->sd_vol_status = BIOC_SVONLINE;
718 	strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name));
719 	SLIST_INIT(&fakesd->sd_meta_opt);
720 
721 	/* Add chunk to volume. */
722 	fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF,
723 	    M_WAITOK | M_ZERO);
724 	fakesd->sd_vol.sv_chunks[0] = key_disk;
725 	SLIST_INIT(&fakesd->sd_vol.sv_chunk_list);
726 	SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link);
727 
728 	/* Generate mask key. */
729 	arc4random_buf(sd->mds.mdd_crypto.scr_maskkey,
730 	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
731 
732 	/* Copy mask key to optional metadata area. */
733 	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
734 	    M_WAITOK | M_ZERO);
735 	omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF,
736 	    M_WAITOK | M_ZERO);
737 	omi->omi_som->som_type = SR_OPT_KEYDISK;
738 	omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
739 	skm = (struct sr_meta_keydisk *)omi->omi_som;
740 	bcopy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
741 	    sizeof(skm->skm_maskkey));
742 	SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
743 	fakesd->sd_meta->ssdi.ssd_opt_no++;
744 
745 	/* Save metadata. */
746 	if (sr_meta_save(fakesd, SR_META_DIRTY)) {
747 		sr_error(sc, "could not save metadata to %s", devname);
748 		goto fail;
749 	}
750 
751 	goto done;
752 
753 fail:
754 	free(key_disk, M_DEVBUF, sizeof(struct sr_chunk));
755 	key_disk = NULL;
756 
757 done:
758 	free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
759 	if (fakesd && fakesd->sd_vol.sv_chunks)
760 		free(fakesd->sd_vol.sv_chunks, M_DEVBUF,
761 		    sizeof(struct sr_chunk *));
762 	free(fakesd, M_DEVBUF, sizeof(struct sr_discipline));
763 	free(sm, M_DEVBUF, sizeof(struct sr_metadata));
764 	if (open) {
765 		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
766 		vput(vn);
767 	}
768 
769 	return key_disk;
770 }
771 
772 struct sr_chunk *
773 sr_crypto_read_key_disk(struct sr_discipline *sd, dev_t dev)
774 {
775 	struct sr_softc		*sc = sd->sd_sc;
776 	struct sr_metadata	*sm = NULL;
777 	struct sr_meta_opt_item *omi, *omi_next;
778 	struct sr_meta_opt_hdr	*omh;
779 	struct sr_meta_keydisk	*skm;
780 	struct sr_meta_opt_head som;
781 	struct sr_chunk		*key_disk = NULL;
782 	struct disklabel	label;
783 	struct vnode		*vn = NULL;
784 	char			devname[32];
785 	int			c, part, open = 0;
786 
787 	/*
788 	 * Load a key disk and load keying material into memory.
789 	 */
790 
791 	SLIST_INIT(&som);
792 
793 	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
794 
795 	/* Make sure chunk is not already in use. */
796 	c = sr_chunk_in_use(sc, dev);
797 	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
798 		sr_error(sc, "%s is already in use", devname);
799 		goto done;
800 	}
801 
802 	/* Open device. */
803 	if (bdevvp(dev, &vn)) {
804 		sr_error(sc, "cannot open key disk %s", devname);
805 		goto done;
806 	}
807 	if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
808 		DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
809 		    "open %s\n", DEVNAME(sc), devname);
810 		vput(vn);
811 		goto done;
812 	}
813 	open = 1; /* close dev on error */
814 
815 	/* Get partition details. */
816 	part = DISKPART(dev);
817 	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD,
818 	    NOCRED, curproc)) {
819 		DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
820 		    "failed\n", DEVNAME(sc));
821 		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
822 		vput(vn);
823 		goto done;
824 	}
825 	if (label.d_secsize != DEV_BSIZE) {
826 		sr_error(sc, "%s has unsupported sector size (%d)",
827 		    devname, label.d_secsize);
828 		goto done;
829 	}
830 	if (label.d_partitions[part].p_fstype != FS_RAID) {
831 		sr_error(sc, "%s partition not of type RAID (%d)\n",
832 		    devname, label.d_partitions[part].p_fstype);
833 		goto done;
834 	}
835 
836 	/*
837 	 * Read and validate key disk metadata.
838 	 */
839 	sm = malloc(SR_META_SIZE * 512, M_DEVBUF, M_WAITOK | M_ZERO);
840 	if (sr_meta_native_read(sd, dev, sm, NULL)) {
841 		sr_error(sc, "native bootprobe could not read native metadata");
842 		goto done;
843 	}
844 
845 	if (sr_meta_validate(sd, dev, sm, NULL)) {
846 		DNPRINTF(SR_D_META, "%s: invalid metadata\n",
847 		    DEVNAME(sc));
848 		goto done;
849 	}
850 
851 	/* Make sure this is a key disk. */
852 	if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) {
853 		sr_error(sc, "%s is not a key disk", devname);
854 		goto done;
855 	}
856 
857 	/* Construct key disk chunk. */
858 	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
859 	key_disk->src_dev_mm = dev;
860 	key_disk->src_vn = vn;
861 	key_disk->src_size = 0;
862 
863 	bcopy((struct sr_meta_chunk *)(sm + 1), &key_disk->src_meta,
864 	    sizeof(key_disk->src_meta));
865 
866 	/* Read mask key from optional metadata. */
867 	sr_meta_opt_load(sc, sm, &som);
868 	SLIST_FOREACH(omi, &som, omi_link) {
869 		omh = omi->omi_som;
870 		if (omh->som_type == SR_OPT_KEYDISK) {
871 			skm = (struct sr_meta_keydisk *)omh;
872 			bcopy(&skm->skm_maskkey,
873 			    sd->mds.mdd_crypto.scr_maskkey,
874 			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
875 		} else if (omh->som_type == SR_OPT_CRYPTO) {
876 			/* Original keydisk format with key in crypto area. */
877 			bcopy(omh + sizeof(struct sr_meta_opt_hdr),
878 			    sd->mds.mdd_crypto.scr_maskkey,
879 			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
880 		}
881 	}
882 
883 	open = 0;
884 
885 done:
886 	for (omi = SLIST_FIRST(&som); omi != NULL; omi = omi_next) {
887 		omi_next = SLIST_NEXT(omi, omi_link);
888 		if (omi->omi_som)
889 			free(omi->omi_som, M_DEVBUF, 0);
890 		free(omi, M_DEVBUF, 0);
891 	}
892 
893 	free(sm, M_DEVBUF, SR_META_SIZE * 512);
894 
895 	if (vn && open) {
896 		VOP_CLOSE(vn, FREAD, NOCRED, curproc);
897 		vput(vn);
898 	}
899 
900 	return key_disk;
901 }
902 
903 int
904 sr_crypto_alloc_resources(struct sr_discipline *sd)
905 {
906 	struct sr_workunit	*wu;
907 	struct sr_crypto_wu	*crwu;
908 	struct cryptoini	cri;
909 	u_int			num_keys, i;
910 
911 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
912 	    DEVNAME(sd->sd_sc));
913 
914 	sd->mds.mdd_crypto.scr_alg = CRYPTO_AES_XTS;
915 	switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
916 	case SR_CRYPTOA_AES_XTS_128:
917 		sd->mds.mdd_crypto.scr_klen = 256;
918 		break;
919 	case SR_CRYPTOA_AES_XTS_256:
920 		sd->mds.mdd_crypto.scr_klen = 512;
921 		break;
922 	default:
923 		sr_error(sd->sd_sc, "unknown crypto algorithm");
924 		return (EINVAL);
925 	}
926 
927 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
928 		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
929 
930 	if (sr_wu_alloc(sd, sizeof(struct sr_crypto_wu))) {
931 		sr_error(sd->sd_sc, "unable to allocate work units");
932 		return (ENOMEM);
933 	}
934 	if (sr_ccb_alloc(sd)) {
935 		sr_error(sd->sd_sc, "unable to allocate CCBs");
936 		return (ENOMEM);
937 	}
938 	if (sr_crypto_decrypt_key(sd)) {
939 		sr_error(sd->sd_sc, "incorrect key or passphrase");
940 		return (EPERM);
941 	}
942 
943 	/*
944 	 * For each work unit allocate the uio, iovec and crypto structures.
945 	 * These have to be allocated now because during runtime we cannot
946 	 * fail an allocation without failing the I/O (which can cause real
947 	 * problems).
948 	 */
949 	TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
950 		crwu = (struct sr_crypto_wu *)wu;
951 		crwu->cr_uio.uio_iov = &crwu->cr_iov;
952 		crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
953 		crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
954 		if (crwu->cr_crp == NULL)
955 			return (ENOMEM);
956 		crwu->cr_descs = crwu->cr_crp->crp_desc;
957 	}
958 
959 	memset(&cri, 0, sizeof(cri));
960 	cri.cri_alg = sd->mds.mdd_crypto.scr_alg;
961 	cri.cri_klen = sd->mds.mdd_crypto.scr_klen;
962 
963 	/* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks. */
964 	num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT;
965 	if (num_keys >= SR_CRYPTO_MAXKEYS)
966 		return (EFBIG);
967 	for (i = 0; i <= num_keys; i++) {
968 		cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
969 		if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
970 		    &cri, 0) != 0) {
971 			for (i = 0;
972 			     sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1;
973 			     i++) {
974 				crypto_freesession(
975 				    sd->mds.mdd_crypto.scr_sid[i]);
976 				sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
977 			}
978 			return (EINVAL);
979 		}
980 	}
981 
982 	sr_hotplug_register(sd, sr_crypto_hotplug);
983 
984 	return (0);
985 }
986 
987 void
988 sr_crypto_free_resources(struct sr_discipline *sd)
989 {
990 	struct sr_workunit	*wu;
991 	struct sr_crypto_wu	*crwu;
992 	u_int			i;
993 
994 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
995 	    DEVNAME(sd->sd_sc));
996 
997 	if (sd->mds.mdd_crypto.key_disk != NULL) {
998 		explicit_bzero(sd->mds.mdd_crypto.key_disk,
999 		    sizeof(*sd->mds.mdd_crypto.key_disk));
1000 		free(sd->mds.mdd_crypto.key_disk, M_DEVBUF,
1001 		    sizeof(*sd->mds.mdd_crypto.key_disk));
1002 	}
1003 
1004 	sr_hotplug_unregister(sd, sr_crypto_hotplug);
1005 
1006 	for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) {
1007 		crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
1008 		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
1009 	}
1010 
1011 	TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
1012 		crwu = (struct sr_crypto_wu *)wu;
1013 		if (crwu->cr_dmabuf)
1014 			dma_free(crwu->cr_dmabuf, MAXPHYS);
1015 		if (crwu->cr_crp) {
1016 			crwu->cr_crp->crp_desc = crwu->cr_descs;
1017 			crypto_freereq(crwu->cr_crp);
1018 		}
1019 	}
1020 
1021 	sr_wu_free(sd);
1022 	sr_ccb_free(sd);
1023 }
1024 
1025 int
1026 sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd)
1027 {
1028 	struct sr_crypto_kdfpair kdfpair;
1029 	struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2;
1030 	int			size, rv = 1;
1031 
1032 	DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n",
1033 	    DEVNAME(sd->sd_sc), bd->bd_cmd);
1034 
1035 	switch (bd->bd_cmd) {
1036 	case SR_IOCTL_GET_KDFHINT:
1037 
1038 		/* Get KDF hint for userland. */
1039 		size = sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint);
1040 		if (bd->bd_data == NULL || bd->bd_size > size)
1041 			goto bad;
1042 		if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
1043 		    bd->bd_data, bd->bd_size))
1044 			goto bad;
1045 
1046 		rv = 0;
1047 
1048 		break;
1049 
1050 	case SR_IOCTL_CHANGE_PASSPHRASE:
1051 
1052 		/* Attempt to change passphrase. */
1053 
1054 		size = sizeof(kdfpair);
1055 		if (bd->bd_data == NULL || bd->bd_size > size)
1056 			goto bad;
1057 		if (copyin(bd->bd_data, &kdfpair, size))
1058 			goto bad;
1059 
1060 		size = sizeof(kdfinfo1);
1061 		if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size)
1062 			goto bad;
1063 		if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size))
1064 			goto bad;
1065 
1066 		size = sizeof(kdfinfo2);
1067 		if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size)
1068 			goto bad;
1069 		if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size))
1070 			goto bad;
1071 
1072 		if (sr_crypto_change_maskkey(sd, &kdfinfo1, &kdfinfo2))
1073 			goto bad;
1074 
1075 		/* Save metadata to disk. */
1076 		rv = sr_meta_save(sd, SR_META_DIRTY);
1077 
1078 		break;
1079 	}
1080 
1081 bad:
1082 	explicit_bzero(&kdfpair, sizeof(kdfpair));
1083 	explicit_bzero(&kdfinfo1, sizeof(kdfinfo1));
1084 	explicit_bzero(&kdfinfo2, sizeof(kdfinfo2));
1085 
1086 	return (rv);
1087 }
1088 
1089 int
1090 sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om)
1091 {
1092 	int rv = EINVAL;
1093 
1094 	if (om->som_type == SR_OPT_CRYPTO) {
1095 		sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)om;
1096 		rv = 0;
1097 	}
1098 
1099 	return (rv);
1100 }
1101 
1102 int
1103 sr_crypto_rw(struct sr_workunit *wu)
1104 {
1105 	struct sr_crypto_wu	*crwu;
1106 	daddr_t			blk;
1107 	int			rv = 0;
1108 
1109 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu %p\n",
1110 	    DEVNAME(wu->swu_dis->sd_sc), wu);
1111 
1112 	if (sr_validate_io(wu, &blk, "sr_crypto_rw"))
1113 		return (1);
1114 
1115 	if (wu->swu_xs->flags & SCSI_DATA_OUT) {
1116 		crwu = sr_crypto_prepare(wu, 1);
1117 		crwu->cr_crp->crp_callback = sr_crypto_write;
1118 		rv = crypto_invoke(crwu->cr_crp);
1119 		if (rv == 0)
1120 			rv = crwu->cr_crp->crp_etype;
1121 	} else
1122 		rv = sr_crypto_dev_rw(wu, NULL);
1123 
1124 	return (rv);
1125 }
1126 
1127 int
1128 sr_crypto_write(struct cryptop *crp)
1129 {
1130 	struct sr_crypto_wu	*crwu = crp->crp_opaque;
1131 	struct sr_workunit	*wu = &crwu->cr_wu;
1132 	int			s;
1133 
1134 	DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n",
1135 	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1136 
1137 	if (crp->crp_etype) {
1138 		/* fail io */
1139 		wu->swu_xs->error = XS_DRIVER_STUFFUP;
1140 		s = splbio();
1141 		sr_scsi_done(wu->swu_dis, wu->swu_xs);
1142 		splx(s);
1143 	}
1144 
1145 	return (sr_crypto_dev_rw(wu, crwu));
1146 }
1147 
1148 int
1149 sr_crypto_dev_rw(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
1150 {
1151 	struct sr_discipline	*sd = wu->swu_dis;
1152 	struct scsi_xfer	*xs = wu->swu_xs;
1153 	struct sr_ccb		*ccb;
1154 	struct uio		*uio;
1155 	daddr_t			blk;
1156 
1157 	blk = wu->swu_blk_start;
1158 	blk += sd->sd_meta->ssd_data_offset;
1159 
1160 	ccb = sr_ccb_rw(sd, 0, blk, xs->datalen, xs->data, xs->flags, 0);
1161 	if (!ccb) {
1162 		/* should never happen but handle more gracefully */
1163 		printf("%s: %s: too many ccbs queued\n",
1164 		    DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
1165 		goto bad;
1166 	}
1167 	if (!ISSET(xs->flags, SCSI_DATA_IN)) {
1168 		uio = crwu->cr_crp->crp_buf;
1169 		ccb->ccb_buf.b_data = uio->uio_iov->iov_base;
1170 		ccb->ccb_opaque = crwu;
1171 	}
1172 	sr_wu_enqueue_ccb(wu, ccb);
1173 	sr_schedule_wu(wu);
1174 
1175 	return (0);
1176 
1177 bad:
1178 	/* wu is unwound by sr_wu_put */
1179 	if (crwu)
1180 		crwu->cr_crp->crp_etype = EINVAL;
1181 	return (1);
1182 }
1183 
1184 void
1185 sr_crypto_done(struct sr_workunit *wu)
1186 {
1187 	struct scsi_xfer	*xs = wu->swu_xs;
1188 	struct sr_crypto_wu	*crwu;
1189 	int			s;
1190 
1191 	/* If this was a successful read, initiate decryption of the data. */
1192 	if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
1193 		crwu = sr_crypto_prepare(wu, 0);
1194 		crwu->cr_crp->crp_callback = sr_crypto_read;
1195 		DNPRINTF(SR_D_INTR, "%s: sr_crypto_done: crypto_invoke %p\n",
1196 		    DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
1197 		crypto_invoke(crwu->cr_crp);
1198 		return;
1199 	}
1200 
1201 	s = splbio();
1202 	sr_scsi_done(wu->swu_dis, wu->swu_xs);
1203 	splx(s);
1204 }
1205 
1206 int
1207 sr_crypto_read(struct cryptop *crp)
1208 {
1209 	struct sr_crypto_wu	*crwu = crp->crp_opaque;
1210 	struct sr_workunit	*wu = &crwu->cr_wu;
1211 	int			s;
1212 
1213 	DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n",
1214 	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1215 
1216 	if (crp->crp_etype)
1217 		wu->swu_xs->error = XS_DRIVER_STUFFUP;
1218 
1219 	s = splbio();
1220 	sr_scsi_done(wu->swu_dis, wu->swu_xs);
1221 	splx(s);
1222 
1223 	return (0);
1224 }
1225 
1226 void
1227 sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action)
1228 {
1229 	DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n",
1230 	    DEVNAME(sd->sd_sc), diskp->dk_name, action);
1231 }
1232 
1233 #ifdef SR_DEBUG0
1234 void
1235 sr_crypto_dumpkeys(struct sr_discipline *sd)
1236 {
1237 	int			i, j;
1238 
1239 	printf("sr_crypto_dumpkeys:\n");
1240 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1241 		printf("\tscm_key[%d]: 0x", i);
1242 		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1243 			printf("%02x",
1244 			    sd->mds.mdd_crypto.scr_meta->scm_key[i][j]);
1245 		}
1246 		printf("\n");
1247 	}
1248 	printf("sr_crypto_dumpkeys: runtime data keys:\n");
1249 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1250 		printf("\tscr_key[%d]: 0x", i);
1251 		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1252 			printf("%02x",
1253 			    sd->mds.mdd_crypto.scr_key[i][j]);
1254 		}
1255 		printf("\n");
1256 	}
1257 }
1258 #endif	/* SR_DEBUG */
1259