xref: /openbsd-src/sys/dev/softraid_crypto.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /* $OpenBSD: softraid_crypto.c,v 1.32 2008/11/25 23:05:17 marco Exp $ */
2 /*
3  * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
4  * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@mindrot.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bio.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/buf.h>
25 #include <sys/device.h>
26 #include <sys/ioctl.h>
27 #include <sys/proc.h>
28 #include <sys/malloc.h>
29 #include <sys/pool.h>
30 #include <sys/kernel.h>
31 #include <sys/disk.h>
32 #include <sys/rwlock.h>
33 #include <sys/queue.h>
34 #include <sys/fcntl.h>
35 #include <sys/disklabel.h>
36 #include <sys/mount.h>
37 #include <sys/sensors.h>
38 #include <sys/stat.h>
39 #include <sys/conf.h>
40 #include <sys/uio.h>
41 
42 #include <crypto/cryptodev.h>
43 #include <crypto/cryptosoft.h>
44 #include <crypto/rijndael.h>
45 #include <crypto/md5.h>
46 #include <crypto/sha1.h>
47 #include <crypto/sha2.h>
48 #include <crypto/hmac.h>
49 
50 #include <scsi/scsi_all.h>
51 #include <scsi/scsiconf.h>
52 #include <scsi/scsi_disk.h>
53 
54 #include <dev/softraidvar.h>
55 #include <dev/rndvar.h>
56 
57 struct cryptop	*sr_crypto_getcryptop(struct sr_workunit *, int);
58 int		 sr_crypto_create_keys(struct sr_discipline *);
59 void		*sr_crypto_putcryptop(struct cryptop *);
60 int		 sr_crypto_get_kdf(struct bioc_createraid *,
61 		     struct sr_discipline *);
62 int		 sr_crypto_decrypt_key(struct sr_discipline *);
63 int		 sr_crypto_alloc_resources(struct sr_discipline *);
64 int		 sr_crypto_free_resources(struct sr_discipline *);
65 int		 sr_crypto_write(struct cryptop *);
66 int		 sr_crypto_rw(struct sr_workunit *);
67 int		 sr_crypto_rw2(struct sr_workunit *, struct cryptop *);
68 void		 sr_crypto_intr(struct buf *);
69 int		 sr_crypto_read(struct cryptop *);
70 void		 sr_crypto_finish_io(struct sr_workunit *);
71 void		 sr_crypto_calculate_check_hmac_sha1(struct sr_discipline *,
72 		    u_char[SHA1_DIGEST_LENGTH]);
73 
74 #ifdef SR_DEBUG0
75 void		 sr_crypto_dumpkeys(struct sr_discipline *);
76 #endif
77 
78 struct cryptop *
79 sr_crypto_getcryptop(struct sr_workunit *wu, int encrypt)
80 {
81 	struct scsi_xfer	*xs = wu->swu_xs;
82 	struct sr_discipline	*sd = wu->swu_dis;
83 	struct cryptop		*crp = NULL;
84 	struct cryptodesc	*crd;
85 	struct uio		*uio = NULL;
86 	int			flags, i, n, s;
87 	daddr64_t		blk = 0;
88 	u_int			keyndx;
89 
90 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_getcryptop wu: %p encrypt: %d\n",
91 	    DEVNAME(sd->sd_sc), wu, encrypt);
92 
93 	s = splbio();
94 	uio = pool_get(&sd->mds.mdd_crypto.sr_uiopl, PR_ZERO);
95 	if (uio == NULL)
96 		goto unwind;
97 	uio->uio_iov = pool_get(&sd->mds.mdd_crypto.sr_iovpl, 0);
98 	if (uio->uio_iov == NULL)
99 		goto unwind;
100 	splx(s);
101 
102 	uio->uio_iovcnt = 1;
103 	uio->uio_iov->iov_len = xs->datalen;
104 	if (xs->flags & SCSI_DATA_OUT) {
105 		uio->uio_iov->iov_base = malloc(xs->datalen, M_DEVBUF,
106 		    M_NOWAIT);
107 		bcopy(xs->data, uio->uio_iov->iov_base, xs->datalen);
108 	} else
109 		uio->uio_iov->iov_base = xs->data;
110 
111 	if (xs->cmdlen == 10)
112 		blk = _4btol(((struct scsi_rw_big *)xs->cmd)->addr);
113 	else if (xs->cmdlen == 16)
114 		blk = _8btol(((struct scsi_rw_16 *)xs->cmd)->addr);
115 	else if (xs->cmdlen == 6)
116 		blk = _3btol(((struct scsi_rw *)xs->cmd)->addr);
117 
118 	n = xs->datalen >> DEV_BSHIFT;
119 	flags = (encrypt ? CRD_F_ENCRYPT : 0) |
120 	    CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
121 
122 	crp = crypto_getreq(n);
123 	if (crp == NULL)
124 		goto unwind;
125 
126 	/* Select crypto session based on block number */
127 	keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT;
128 	if (keyndx > SR_CRYPTO_MAXKEYS)
129 		goto unwind;
130 	crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
131 	if (crp->crp_sid == (u_int64_t)-1)
132 		goto unwind;
133 
134 	crp->crp_ilen = xs->datalen;
135 	crp->crp_alloctype = M_DEVBUF;
136 	crp->crp_buf = uio;
137 	for (i = 0, crd = crp->crp_desc; crd; i++, blk++, crd = crd->crd_next) {
138 		crd->crd_skip = i << DEV_BSHIFT;
139 		crd->crd_len = DEV_BSIZE;
140 		crd->crd_inject = 0;
141 		crd->crd_flags = flags;
142 		crd->crd_alg = CRYPTO_AES_XTS;
143 
144 		switch (sd->mds.mdd_crypto.scr_meta.scm_alg) {
145 		case SR_CRYPTOA_AES_XTS_128:
146 			crd->crd_klen = 256;
147 			break;
148 		case SR_CRYPTOA_AES_XTS_256:
149 			crd->crd_klen = 512;
150 			break;
151 		default:
152 			goto unwind;
153 		}
154 		crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
155 		bcopy(&blk, crd->crd_iv, sizeof(blk));
156 	}
157 
158 	return (crp);
159 unwind:
160 	if (crp)
161 		crypto_freereq(crp);
162 	if (wu->swu_xs->flags & SCSI_DATA_OUT)
163 		free(uio->uio_iov->iov_base, M_DEVBUF);
164 
165 	s = splbio();
166 	if (uio && uio->uio_iov)
167 		pool_put(&sd->mds.mdd_crypto.sr_iovpl, uio->uio_iov);
168 	if (uio)
169 		pool_put(&sd->mds.mdd_crypto.sr_uiopl, uio);
170 	splx(s);
171 
172 	return (NULL);
173 }
174 
175 void *
176 sr_crypto_putcryptop(struct cryptop *crp)
177 {
178 	struct uio		*uio = crp->crp_buf;
179 	struct sr_workunit	*wu = crp->crp_opaque;
180 	struct sr_discipline	*sd = wu->swu_dis;
181 	int			s;
182 
183 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_putcryptop crp: %p\n",
184 	    DEVNAME(wu->swu_dis->sd_sc), crp);
185 
186 	if (wu->swu_xs->flags & SCSI_DATA_OUT)
187 		free(uio->uio_iov->iov_base, M_DEVBUF);
188 	s = splbio();
189 	pool_put(&sd->mds.mdd_crypto.sr_iovpl, uio->uio_iov);
190 	pool_put(&sd->mds.mdd_crypto.sr_uiopl, uio);
191 	splx(s);
192 	crypto_freereq(crp);
193 
194 	return (wu);
195 }
196 
197 int
198 sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd)
199 {
200 	struct sr_crypto_kdfinfo	*kdfinfo;
201 	int				 rv = EINVAL;
202 
203 	if (!(bc->bc_opaque_flags & BIOC_SOIN))
204 		return (rv);
205 	if (bc->bc_opaque == NULL)
206 		return (rv);
207 	if (bc->bc_opaque_size < sizeof(*kdfinfo))
208 		return (rv);
209 
210 	kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO);
211 	if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size))
212 		goto out;
213 
214 	if (kdfinfo->len != bc->bc_opaque_size)
215 		goto out;
216 
217 	/* copy KDF hint to disk meta data */
218 	if (kdfinfo->flags & SR_CRYPTOKDF_HINT) {
219 		if (sizeof(sd->mds.mdd_crypto.scr_meta.scm_kdfhint) <
220 		    kdfinfo->genkdf.len)
221 			goto out;
222 		bcopy(&kdfinfo->genkdf,
223 		    sd->mds.mdd_crypto.scr_meta.scm_kdfhint,
224 		    kdfinfo->genkdf.len);
225 	}
226 
227 	/* copy mask key to run-time meta data */
228 	if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) {
229 		if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
230 		    sizeof(kdfinfo->maskkey))
231 			goto out;
232 		bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey,
233 		    sizeof(kdfinfo->maskkey));
234 	}
235 
236 	bc->bc_opaque_status = BIOC_SOINOUT_OK;
237 	rv = 0;
238 out:
239 	bzero(kdfinfo, bc->bc_opaque_size);
240 	free(kdfinfo, M_DEVBUF);
241 
242 	return (rv);
243 }
244 
245 void
246 sr_crypto_calculate_check_hmac_sha1(struct sr_discipline *sd,
247     u_char check_digest[SHA1_DIGEST_LENGTH])
248 {
249 	u_char		check_key[SHA1_DIGEST_LENGTH];
250 	HMAC_SHA1_CTX	hmacctx;
251 	SHA1_CTX	shactx;
252 
253 	bzero(check_key, sizeof(check_key));
254 	bzero(&hmacctx, sizeof(hmacctx));
255 	bzero(&shactx, sizeof(shactx));
256 
257 	/* k = SHA1(mask_key) */
258 	SHA1Init(&shactx);
259 	SHA1Update(&shactx, sd->mds.mdd_crypto.scr_maskkey,
260 	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
261 	SHA1Final(check_key, &shactx);
262 
263 	/* sch_mac = HMAC_SHA1_k(unencrypted scm_key) */
264 	HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key));
265 	HMAC_SHA1_Update(&hmacctx, (u_int8_t *)sd->mds.mdd_crypto.scr_key,
266 	    sizeof(sd->mds.mdd_crypto.scr_key));
267 	HMAC_SHA1_Final(check_digest, &hmacctx);
268 
269 	bzero(check_key, sizeof(check_key));
270 	bzero(&hmacctx, sizeof(hmacctx));
271 	bzero(&shactx, sizeof(shactx));
272 }
273 
274 int
275 sr_crypto_decrypt_key(struct sr_discipline *sd)
276 {
277 	rijndael_ctx	 ctx;
278 	u_char		*p, *c;
279 	size_t		 ksz;
280 	int		 i, rv = 1;
281 	u_char		check_digest[SHA1_DIGEST_LENGTH];
282 
283 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc));
284 
285 	if (sd->mds.mdd_crypto.scr_meta.scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
286 		goto out;
287 
288 	c = (u_char *)sd->mds.mdd_crypto.scr_meta.scm_key;
289 	p = (u_char *)sd->mds.mdd_crypto.scr_key;
290 	ksz = sizeof(sd->mds.mdd_crypto.scr_key);
291 
292 	switch (sd->mds.mdd_crypto.scr_meta.scm_mask_alg) {
293 	case SR_CRYPTOM_AES_ECB_256:
294 		if (rijndael_set_key(&ctx, sd->mds.mdd_crypto.scr_maskkey,
295 		    256) != 0)
296 			goto out;
297 		for (i = 0; i < ksz; i += RIJNDAEL128_BLOCK_LEN)
298 			rijndael_decrypt(&ctx, &c[i], &p[i]);
299 		break;
300 	default:
301 		DNPRINTF(SR_D_DIS, "%s: unsuppored scm_mask_alg %u\n",
302 		    DEVNAME(sd->sd_sc),
303 		    sd->mds.mdd_crypto.scr_meta.scm_mask_alg);
304 		goto out;
305 	}
306 #ifdef SR_DEBUG0
307 	sr_crypto_dumpkeys(sd);
308 #endif
309 
310 	/* Check that the key decrypted properly */
311 	sr_crypto_calculate_check_hmac_sha1(sd, check_digest);
312 	if (memcmp(sd->mds.mdd_crypto.scr_meta.chk_hmac_sha1.sch_mac,
313 	    check_digest, sizeof(check_digest)) != 0) {
314 		bzero(sd->mds.mdd_crypto.scr_key,
315 		    sizeof(sd->mds.mdd_crypto.scr_key));
316 		bzero(check_digest, sizeof(check_digest));
317 		goto out;
318 	}
319 	bzero(check_digest, sizeof(check_digest));
320 
321 	rv = 0; /* Success */
322  out:
323 	/* we don't need the mask key anymore */
324 	bzero(&sd->mds.mdd_crypto.scr_maskkey,
325 	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
326 	bzero(&ctx, sizeof(ctx));
327 	return rv;
328 }
329 
330 int
331 sr_crypto_create_keys(struct sr_discipline *sd)
332 {
333 	rijndael_ctx	 ctx;
334 	u_char		*p, *c;
335 	size_t		 ksz;
336 	int		 i;
337 
338 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n",
339 	    DEVNAME(sd->sd_sc));
340 
341 	if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey))
342 		return (1);
343 
344 	/* XXX allow user to specify */
345 	sd->mds.mdd_crypto.scr_meta.scm_alg = SR_CRYPTOA_AES_XTS_256;
346 
347 	/* generate crypto keys */
348 	arc4random_buf(sd->mds.mdd_crypto.scr_key,
349 	    sizeof(sd->mds.mdd_crypto.scr_key));
350 
351 	/* Mask the disk keys */
352 	sd->mds.mdd_crypto.scr_meta.scm_mask_alg = SR_CRYPTOM_AES_ECB_256;
353 	if (rijndael_set_key_enc_only(&ctx, sd->mds.mdd_crypto.scr_maskkey,
354 	    256) != 0) {
355 		bzero(sd->mds.mdd_crypto.scr_key,
356 		    sizeof(sd->mds.mdd_crypto.scr_key));
357 		bzero(&ctx, sizeof(ctx));
358 		return (1);
359 	}
360 	p = (u_char *)sd->mds.mdd_crypto.scr_key;
361 	c = (u_char *)sd->mds.mdd_crypto.scr_meta.scm_key;
362 	ksz = sizeof(sd->mds.mdd_crypto.scr_key);
363 	for (i = 0; i < ksz; i += RIJNDAEL128_BLOCK_LEN)
364 		rijndael_encrypt(&ctx, &p[i], &c[i]);
365 	bzero(&ctx, sizeof(ctx));
366 
367 	/* Prepare key decryption check code */
368 	sd->mds.mdd_crypto.scr_meta.scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
369 	sr_crypto_calculate_check_hmac_sha1(sd,
370 	    sd->mds.mdd_crypto.scr_meta.chk_hmac_sha1.sch_mac);
371 
372 	/* Erase the plaintext disk keys */
373 	bzero(sd->mds.mdd_crypto.scr_key, sizeof(sd->mds.mdd_crypto.scr_key));
374 
375 
376 #ifdef SR_DEBUG0
377 	sr_crypto_dumpkeys(sd);
378 #endif
379 
380 	sd->mds.mdd_crypto.scr_meta.scm_flags = SR_CRYPTOF_KEY |
381 	    SR_CRYPTOF_KDFHINT;
382 
383 	return (0);
384 }
385 
386 int
387 sr_crypto_alloc_resources(struct sr_discipline *sd)
388 {
389 	struct cryptoini	cri;
390 	u_int num_keys, i;
391 
392 	if (!sd)
393 		return (EINVAL);
394 
395 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
396 	    DEVNAME(sd->sd_sc));
397 
398 	pool_init(&sd->mds.mdd_crypto.sr_uiopl, sizeof(struct uio), 0, 0, 0,
399 	    "sr_uiopl", NULL);
400 	pool_init(&sd->mds.mdd_crypto.sr_iovpl, sizeof(struct iovec), 0, 0, 0,
401 	    "sr_iovpl", NULL);
402 
403 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
404 		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
405 
406 	if (sr_wu_alloc(sd))
407 		return (ENOMEM);
408 	if (sr_ccb_alloc(sd))
409 		return (ENOMEM);
410 	if (sr_crypto_decrypt_key(sd))
411 		return (EPERM);
412 
413 	bzero(&cri, sizeof(cri));
414 	cri.cri_alg = CRYPTO_AES_XTS;
415 	switch (sd->mds.mdd_crypto.scr_meta.scm_alg) {
416 	case SR_CRYPTOA_AES_XTS_128:
417 		cri.cri_klen = 256;
418 		break;
419 	case SR_CRYPTOA_AES_XTS_256:
420 		cri.cri_klen = 512;
421 		break;
422 	default:
423 		return (EINVAL);
424 	}
425 
426 	/* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks */
427 	num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT;
428 	if (num_keys >= SR_CRYPTO_MAXKEYS)
429 		return (EFBIG);
430 	for (i = 0; i <= num_keys; i++) {
431 		cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
432 		if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
433 		    &cri, 0) != 0) {
434 			for (i = 0;
435 			     sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1;
436 			     i++) {
437 				crypto_freesession(
438 				    sd->mds.mdd_crypto.scr_sid[i]);
439 				sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
440 			}
441 			return (EINVAL);
442 		}
443 	}
444 
445 	return (0);
446 }
447 
448 int
449 sr_crypto_free_resources(struct sr_discipline *sd)
450 {
451 	int		rv = EINVAL;
452 	u_int		i;
453 
454 	if (!sd)
455 		return (rv);
456 
457 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
458 	    DEVNAME(sd->sd_sc));
459 
460 	for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) {
461 		crypto_freesession(
462 		    sd->mds.mdd_crypto.scr_sid[i]);
463 		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
464 	}
465 
466 	sr_wu_free(sd);
467 	sr_ccb_free(sd);
468 
469 	pool_destroy(&sd->mds.mdd_crypto.sr_uiopl);
470 	pool_destroy(&sd->mds.mdd_crypto.sr_iovpl);
471 
472 	rv = 0;
473 	return (rv);
474 }
475 
476 int
477 sr_crypto_rw(struct sr_workunit *wu)
478 {
479 	struct cryptop		*crp;
480 	int			 s, rv = 0;
481 
482 	DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu: %p\n",
483 	    DEVNAME(wu->swu_dis->sd_sc), wu);
484 
485 	if (wu->swu_xs->flags & SCSI_DATA_OUT) {
486 		crp = sr_crypto_getcryptop(wu, 1);
487 		crp->crp_callback = sr_crypto_write;
488 		crp->crp_opaque = wu;
489 		s = splvm();
490 		if (crypto_invoke(crp))
491 			rv = 1;
492 		else
493 			rv = crp->crp_etype;
494 		splx(s);
495 	} else
496 		rv = sr_crypto_rw2(wu, NULL);
497 
498 	return (rv);
499 }
500 
501 int
502 sr_crypto_write(struct cryptop *crp)
503 {
504 	int		 	 s;
505 	struct sr_workunit	*wu = crp->crp_opaque;
506 
507 	DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n",
508 	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
509 
510 	if (crp->crp_etype) {
511 		/* fail io */
512 		((struct sr_workunit *)(crp->crp_opaque))->swu_xs->error =
513 		    XS_DRIVER_STUFFUP;
514 		s = splbio();
515 		sr_crypto_finish_io(crp->crp_opaque);
516 		splx(s);
517 	}
518 
519 	return (sr_crypto_rw2(wu, crp));
520 }
521 
522 int
523 sr_crypto_rw2(struct sr_workunit *wu, struct cryptop *crp)
524 {
525 	struct sr_discipline	*sd = wu->swu_dis;
526 	struct scsi_xfer	*xs = wu->swu_xs;
527 	struct sr_ccb		*ccb;
528 	struct uio		*uio;
529 	int			 s;
530 	daddr64_t		 blk;
531 
532 	if (sr_validate_io(wu, &blk, "sr_crypto_rw2"))
533 		goto bad;
534 
535 	blk += SR_META_SIZE + SR_META_OFFSET;
536 
537 	wu->swu_io_count = 1;
538 
539 	ccb = sr_ccb_get(sd);
540 	if (!ccb) {
541 		/* should never happen but handle more gracefully */
542 		printf("%s: %s: too many ccbs queued\n",
543 		    DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
544 		goto bad;
545 	}
546 
547 	ccb->ccb_buf.b_flags = B_CALL;
548 	ccb->ccb_buf.b_iodone = sr_crypto_intr;
549 	ccb->ccb_buf.b_blkno = blk;
550 	ccb->ccb_buf.b_bcount = xs->datalen;
551 	ccb->ccb_buf.b_bufsize = xs->datalen;
552 	ccb->ccb_buf.b_resid = xs->datalen;
553 
554 	if (xs->flags & SCSI_DATA_IN) {
555 		ccb->ccb_buf.b_flags |= B_READ;
556 		ccb->ccb_buf.b_data = xs->data;
557 	} else {
558 		uio = crp->crp_buf;
559 		ccb->ccb_buf.b_flags |= B_WRITE;
560 		ccb->ccb_buf.b_data = uio->uio_iov->iov_base;
561 		ccb->ccb_opaque = crp;
562 	}
563 
564 	ccb->ccb_buf.b_error = 0;
565 	ccb->ccb_buf.b_proc = curproc;
566 	ccb->ccb_wu = wu;
567 	ccb->ccb_target = 0;
568 	ccb->ccb_buf.b_dev = sd->sd_vol.sv_chunks[0]->src_dev_mm;
569 	ccb->ccb_buf.b_vp = NULL;
570 
571 	LIST_INIT(&ccb->ccb_buf.b_dep);
572 
573 	TAILQ_INSERT_TAIL(&wu->swu_ccb, ccb, ccb_link);
574 
575         DNPRINTF(SR_D_DIS, "%s: %s: sr_crypto_rw2: b_bcount: %d "
576             "b_blkno: %x b_flags 0x%0x b_data %p\n",
577             DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname,
578             ccb->ccb_buf.b_bcount, ccb->ccb_buf.b_blkno,
579             ccb->ccb_buf.b_flags, ccb->ccb_buf.b_data);
580 
581 	s = splbio();
582 
583 	if (sr_check_io_collision(wu))
584 		goto queued;
585 
586 	sr_raid_startwu(wu);
587 
588 queued:
589 	splx(s);
590 	return (0);
591 bad:
592 	/* wu is unwound by sr_wu_put */
593 	if (crp)
594 		crp->crp_etype = EINVAL;
595 	return (1);
596 }
597 
598 void
599 sr_crypto_intr(struct buf *bp)
600 {
601 	struct sr_ccb		*ccb = (struct sr_ccb *)bp;
602 	struct sr_workunit	*wu = ccb->ccb_wu, *wup;
603 	struct sr_discipline	*sd = wu->swu_dis;
604 	struct scsi_xfer	*xs = wu->swu_xs;
605 	struct sr_softc		*sc = sd->sd_sc;
606 	struct cryptop		*crp;
607 	int			 s, s2, pend;
608 
609         DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr bp: %x xs: %x\n",
610             DEVNAME(sc), bp, wu->swu_xs);
611 
612         DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: b_bcount: %d b_resid: %d"
613             " b_flags: 0x%0x\n", DEVNAME(sc), ccb->ccb_buf.b_bcount,
614             ccb->ccb_buf.b_resid, ccb->ccb_buf.b_flags);
615 
616         s = splbio();
617 
618 	if (ccb->ccb_buf.b_flags & B_ERROR) {
619 		printf("%s: i/o error on block %lld\n", DEVNAME(sc),
620 		    ccb->ccb_buf.b_blkno);
621 		wu->swu_ios_failed++;
622 		ccb->ccb_state = SR_CCB_FAILED;
623 		if (ccb->ccb_target != -1)
624 			sd->sd_set_chunk_state(sd, ccb->ccb_target,
625 			    BIOC_SDOFFLINE);
626 		else
627 			panic("%s: invalid target on wu: %p", DEVNAME(sc), wu);
628 	} else {
629 		ccb->ccb_state = SR_CCB_OK;
630 		wu->swu_ios_succeeded++;
631 	}
632 	wu->swu_ios_complete++;
633 
634 	DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: comp: %d count: %d\n",
635 	    DEVNAME(sc), wu->swu_ios_complete, wu->swu_io_count);
636 
637 	if (wu->swu_ios_complete == wu->swu_io_count) {
638 		if (wu->swu_ios_failed == wu->swu_ios_complete)
639 			xs->error = XS_DRIVER_STUFFUP;
640 		else
641 			xs->error = XS_NOERROR;
642 
643 		pend = 0;
644 		TAILQ_FOREACH(wup, &sd->sd_wu_pendq, swu_link) {
645 			if (wu == wup) {
646 				TAILQ_REMOVE(&sd->sd_wu_pendq, wu, swu_link);
647 				pend = 1;
648 
649 				if (wu->swu_collider) {
650 					wu->swu_collider->swu_state =
651 					    SR_WU_INPROGRESS;
652 					TAILQ_REMOVE(&sd->sd_wu_defq,
653 					    wu->swu_collider, swu_link);
654 					sr_raid_startwu(wu->swu_collider);
655 				}
656 				break;
657 			}
658 		}
659 
660 		if (!pend)
661 			printf("%s: wu: %p not on pending queue\n",
662 			    DEVNAME(sc), wu);
663 
664 		if ((xs->flags & SCSI_DATA_IN) && (xs->error == XS_NOERROR)) {
665 			crp = sr_crypto_getcryptop(wu, 0);
666 			ccb->ccb_opaque = crp;
667 			crp->crp_callback = sr_crypto_read;
668 			crp->crp_opaque = wu;
669 			DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: crypto_invoke "
670 			    "%p\n", DEVNAME(sc), crp);
671 			s2 = splvm();
672 			crypto_invoke(crp);
673 			splx(s2);
674 			goto done;
675 		}
676 
677 		sr_crypto_finish_io(wu);
678 	}
679 
680 done:
681 	splx(s);
682 }
683 
684 void
685 sr_crypto_finish_io(struct sr_workunit *wu)
686 {
687 	struct sr_discipline	*sd = wu->swu_dis;
688 	struct scsi_xfer	*xs = wu->swu_xs;
689 	struct sr_ccb		*ccb;
690 #ifdef SR_DEBUG
691 	struct sr_softc		*sc = sd->sd_sc;
692 #endif /* SR_DEBUG */
693 
694 	splassert(IPL_BIO);
695 
696 	DNPRINTF(SR_D_INTR, "%s: sr_crypto_finish_io: wu %x xs: %x\n",
697 	    DEVNAME(sc), wu, xs);
698 
699 	xs->resid = 0;
700 	xs->flags |= ITSDONE;
701 
702 	TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link) {
703 		if (ccb->ccb_opaque == NULL)
704 			continue;
705 		sr_crypto_putcryptop(ccb->ccb_opaque);
706 	}
707 
708 	/* do not change the order of these 2 functions */
709 	sr_wu_put(wu);
710 	sr_scsi_done(sd, xs);
711 
712 	if (sd->sd_sync && sd->sd_wu_pending == 0)
713 		wakeup(sd);
714 }
715 
716 int
717 sr_crypto_read(struct cryptop *crp)
718 {
719 	int			 s;
720 	struct sr_workunit	*wu = crp->crp_opaque;
721 
722 	DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n",
723 	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
724 
725 	if (crp->crp_etype)
726 		wu->swu_xs->error = XS_DRIVER_STUFFUP;
727 
728 	s = splbio();
729 	sr_crypto_finish_io(wu);
730 	splx(s);
731 
732 	return (0);
733 }
734 
735 #ifdef SR_DEBUG0
736 void
737 sr_crypto_dumpkeys(struct sr_discipline *sd)
738 {
739 	int	i, j;
740 
741 	printf("sr_crypto_dumpkeys:\n");
742 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
743 		printf("\tscm_key[%d]: 0x", i);
744 		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
745 			printf("%02x",
746 			    sd->mds.mdd_crypto.scr_meta.scm_key[i][j]);
747 		}
748 		printf("\n");
749 	}
750 	printf("sr_crypto_dumpkeys: runtime data keys:\n");
751 	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
752 		printf("\tscr_key[%d]: 0x", i);
753 		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
754 			printf("%02x",
755 			    sd->mds.mdd_crypto.scr_key[i][j]);
756 		}
757 		printf("\n");
758 	}
759 }
760 #endif	/* SR_DEBUG */
761