xref: /dflybsd-src/sys/dev/disk/dm/crypt/dm_target_crypt.c (revision 872a09d51adf63b4bdae6adb1d96a53f76e161e2)
1 /*
2  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Alex Hornung <ahornung@gmail.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * This file implements initial version of device-mapper crypt target.
37  */
38 #include <sys/endian.h>
39 
40 #include <sys/bio.h>
41 #include <sys/globaldata.h>
42 #include <sys/kerneldump.h>
43 #include <sys/malloc.h>
44 #include <sys/mpipe.h>
45 #include <sys/md5.h>
46 #include <sys/mutex2.h>
47 #include <crypto/sha1.h>
48 #include <crypto/sha2/sha2.h>
49 #include <opencrypto/cryptodev.h>
50 #include <opencrypto/rmd160.h>
51 #include <machine/cpufunc.h>
52 #include <cpu/atomic.h>
53 
54 #include <sys/ktr.h>
55 #include <sys/spinlock2.h>
56 
57 #include <dev/disk/dm/dm.h>
58 MALLOC_DEFINE(M_DMCRYPT, "dm_crypt", "Device Mapper Target Crypt");
59 
60 KTR_INFO_MASTER(dmcrypt);
61 
62 #if !defined(KTR_DMCRYPT)
63 #define KTR_DMCRYPT	KTR_ALL
64 #endif
65 
66 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_dispatch, 0,
67     "crypto_dispatch(%p)", struct cryptop *crp);
68 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypt_strategy, 0,
69     "crypt_strategy(b_cmd = %d, bp = %p)", int cmd, struct buf *bp);
70 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_write_start, 1,
71     "crypto_write_start(crp = %p, bp = %p, sector = %d/%d)",
72     struct cryptop *crp, struct buf *bp, int i, int sectors);
73 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_cb_write_done, 1,
74     "crypto_cb_write_done(crp = %p, bp = %p, n = %d)",
75     struct cryptop *crp, struct buf *bp, int n);
76 KTR_INFO(KTR_DMCRYPT, dmcrypt, bio_write_done, 1,
77     "bio_write_done(bp = %p)", struct buf *bp);
78 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_write_retry, 1,
79     "crypto_write_retry(crp = %p)", struct buf *bp);
80 KTR_INFO(KTR_DMCRYPT, dmcrypt, bio_read_done, 2,
81     "bio_read_done(bp = %p)", struct buf *bp);
82 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_read_start, 2,
83     "crypto_read_start(crp = %p, bp = %p, sector = %d/%d)",
84     struct cryptop *crp, struct buf *bp, int i, int sectors);
85 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_cb_read_done, 2,
86     "crypto_cb_read_done(crp = %p, bp = %p, n = %d)",
87     struct cryptop *crp, struct buf *bp, int n);
88 
89 struct target_crypt_config;
90 
91 typedef void dispatch_t(void *);
92 typedef void ivgen_t(struct target_crypt_config *, u_int8_t *, size_t, off_t,
93     void *);
94 
95 typedef int ivgen_ctor_t(struct target_crypt_config *, char *, void **);
96 typedef int ivgen_dtor_t(struct target_crypt_config *, void *);
97 
98 struct iv_generator {
99 	const char	*name;
100 	ivgen_ctor_t	*ctor;
101 	ivgen_dtor_t	*dtor;
102 	ivgen_t		*gen_iv;
103 };
104 
105 struct essiv_ivgen_data {
106 	struct essiv_ivgen_data *next;
107 	void		*ivpriv;
108 	void		*opaque;
109 	struct cryptop	crp;
110 	struct cryptodesc crd;
111 };
112 
113 struct essiv_ivgen_priv {
114 	struct cryptoini	crypto_session;
115 	struct spinlock		ivdata_spin;
116 	struct essiv_ivgen_data	*ivdata_base;
117 	u_int64_t		crypto_sid;
118 	size_t			keyhash_len;
119 	u_int8_t		crypto_keyhash[SHA512_DIGEST_LENGTH];
120 };
121 
122 typedef struct target_crypt_config {
123 	size_t	params_len;
124 	dm_pdev_t *pdev;
125 	char	*status_str;
126 	int	crypto_alg;
127 	int	crypto_klen;
128 	u_int8_t	crypto_key[512>>3];
129 
130 	u_int64_t	crypto_sid;
131 	u_int64_t	block_offset;
132 	int64_t		iv_offset;
133 	SHA512_CTX	essivsha512_ctx;
134 
135 	struct cryptoini	crypto_session;
136 
137 	struct iv_generator	*ivgen;
138 	void	*ivgen_priv;
139 
140 	struct malloc_pipe	read_mpipe;
141 	struct malloc_pipe	write_mpipe;
142 } dm_target_crypt_config_t;
143 
144 struct dmtc_helper {
145 	dm_target_crypt_config_t *priv;
146 	caddr_t	free_addr;
147 	caddr_t	orig_buf;
148 	caddr_t data_buf;
149 };
150 
151 struct dmtc_dump_helper {
152 	dm_target_crypt_config_t *priv;
153 	void *data;
154 	size_t length;
155 	off_t offset;
156 
157 	int sectors;
158 	int *ident;
159 
160 	struct cryptodesc crd[128];
161 	struct cryptop crp[128];
162 	u_char space[65536];
163 };
164 
165 #define DMTC_BUF_SIZE_WRITE \
166     (MAXPHYS + sizeof(struct dmtc_helper) + \
167      MAXPHYS/DEV_BSIZE*(sizeof(struct cryptop) + sizeof(struct cryptodesc)))
168 #define DMTC_BUF_SIZE_READ \
169     (sizeof(struct dmtc_helper) + \
170      MAXPHYS/DEV_BSIZE*(sizeof(struct cryptop) + sizeof(struct cryptodesc)))
171 
172 static void dmtc_crypto_dispatch(void *arg);
173 static void dmtc_crypto_dump_start(dm_target_crypt_config_t *priv,
174 				struct dmtc_dump_helper *dump_helper);
175 static void dmtc_crypto_read_start(dm_target_crypt_config_t *priv,
176 				struct bio *bio);
177 static void dmtc_crypto_write_start(dm_target_crypt_config_t *priv,
178 				struct bio *bio);
179 static void dmtc_bio_read_done(struct bio *bio);
180 static void dmtc_bio_write_done(struct bio *bio);
181 static int dmtc_crypto_cb_dump_done(struct cryptop *crp);
182 static int dmtc_crypto_cb_read_done(struct cryptop *crp);
183 static int dmtc_crypto_cb_write_done(struct cryptop *crp);
184 
185 static ivgen_ctor_t	essiv_ivgen_ctor;
186 static ivgen_dtor_t	essiv_ivgen_dtor;
187 static ivgen_t		essiv_ivgen;
188 static ivgen_t		plain_ivgen;
189 static ivgen_t		plain64_ivgen;
190 
191 static struct iv_generator ivgens[] = {
192 	{ .name = "essiv", .ctor = essiv_ivgen_ctor, .dtor = essiv_ivgen_dtor,
193 	    .gen_iv = essiv_ivgen },
194 	{ .name = "plain", .ctor = NULL, .dtor = NULL, .gen_iv = plain_ivgen },
195 	{ .name = "plain64", .ctor = NULL, .dtor = NULL, .gen_iv = plain64_ivgen },
196 	{ NULL, NULL, NULL, NULL }
197 };
198 
199 /*
200  * Number of crypto buffers.  All crypto buffers will be preallocated
201  * in order to avoid kmalloc() deadlocks in critical low-memory paging
202  * paths.
203  */
204 static __inline int
205 dmtc_get_nmax(void)
206 {
207 	int nmax;
208 
209 	nmax = (physmem * 2 / 1000 * PAGE_SIZE) /
210 	       (DMTC_BUF_SIZE_WRITE + DMTC_BUF_SIZE_READ) + 1;
211 
212 	if (nmax < 2)
213 		nmax = 2;
214 	if (nmax > 8 + ncpus * 2)
215 		nmax = 8 + ncpus * 2;
216 
217 	return nmax;
218 }
219 
220 /*
221  * Initialize the crypto buffer mpipe.  Preallocate all crypto buffers
222  * to avoid making any kmalloc()s in the critical path.
223  */
224 static void
225 dmtc_init_mpipe(struct target_crypt_config *priv)
226 {
227 	int nmax;
228 
229 	nmax = dmtc_get_nmax();
230 
231 	kprintf("dm_target_crypt: Setting %d mpipe buffers\n", nmax);
232 
233 	mpipe_init(&priv->write_mpipe, M_DMCRYPT, DMTC_BUF_SIZE_WRITE,
234 		   nmax, nmax, MPF_NOZERO | MPF_CALLBACK, NULL, NULL, NULL);
235 	mpipe_init(&priv->read_mpipe, M_DMCRYPT, DMTC_BUF_SIZE_READ,
236 		   nmax, nmax, MPF_NOZERO | MPF_CALLBACK, NULL, NULL, NULL);
237 }
238 
239 static void
240 dmtc_destroy_mpipe(struct target_crypt_config *priv)
241 {
242 	mpipe_done(&priv->write_mpipe);
243 	mpipe_done(&priv->read_mpipe);
244 }
245 
246 /*
247  * Overwrite private information (in buf) to avoid leaking it
248  */
249 static void
250 dmtc_crypto_clear(void *buf, size_t len)
251 {
252 	memset(buf, 0xFF, len);
253 	bzero(buf, len);
254 }
255 
256 /*
257  * ESSIV IV Generator Routines
258  */
259 static int
260 essiv_ivgen_ctor(struct target_crypt_config *priv, char *iv_hash, void **p_ivpriv)
261 {
262 	struct essiv_ivgen_priv *ivpriv;
263 	u_int8_t crypto_keyhash[SHA512_DIGEST_LENGTH];
264 	unsigned int klen, hashlen;
265 	int error;
266 	int nmax;
267 
268 	klen = (priv->crypto_klen >> 3);
269 
270 	if (iv_hash == NULL)
271 		return EINVAL;
272 
273 	if (!strcmp(iv_hash, "sha1")) {
274 		SHA1_CTX ctx;
275 
276 		hashlen = SHA1_RESULTLEN;
277 		SHA1Init(&ctx);
278 		SHA1Update(&ctx, priv->crypto_key, klen);
279 		SHA1Final(crypto_keyhash, &ctx);
280 	} else if (!strcmp(iv_hash, "sha256")) {
281 		SHA256_CTX ctx;
282 
283 		hashlen = SHA256_DIGEST_LENGTH;
284 		SHA256_Init(&ctx);
285 		SHA256_Update(&ctx, priv->crypto_key, klen);
286 		SHA256_Final(crypto_keyhash, &ctx);
287 	} else if (!strcmp(iv_hash, "sha384")) {
288 		SHA384_CTX ctx;
289 
290 		hashlen = SHA384_DIGEST_LENGTH;
291 		SHA384_Init(&ctx);
292 		SHA384_Update(&ctx, priv->crypto_key, klen);
293 		SHA384_Final(crypto_keyhash, &ctx);
294 	} else if (!strcmp(iv_hash, "sha512")) {
295 		SHA512_CTX ctx;
296 
297 		hashlen = SHA512_DIGEST_LENGTH;
298 		SHA512_Init(&ctx);
299 		SHA512_Update(&ctx, priv->crypto_key, klen);
300 		SHA512_Final(crypto_keyhash, &ctx);
301 	} else if (!strcmp(iv_hash, "md5")) {
302 		MD5_CTX ctx;
303 
304 		hashlen = MD5_DIGEST_LENGTH;
305 		MD5Init(&ctx);
306 		MD5Update(&ctx, priv->crypto_key, klen);
307 		MD5Final(crypto_keyhash, &ctx);
308 	} else if (!strcmp(iv_hash, "rmd160") ||
309 		   !strcmp(iv_hash, "ripemd160")) {
310 		RMD160_CTX ctx;
311 
312 		hashlen = 160/8;
313 		RMD160Init(&ctx);
314 		RMD160Update(&ctx, priv->crypto_key, klen);
315 		RMD160Final(crypto_keyhash, &ctx);
316 	} else {
317 		return EINVAL;
318 	}
319 
320 	/* Convert hashlen to bits */
321 	hashlen <<= 3;
322 
323 	ivpriv = kmalloc(sizeof(struct essiv_ivgen_priv), M_DMCRYPT,
324 			 M_WAITOK | M_ZERO);
325 	memcpy(ivpriv->crypto_keyhash, crypto_keyhash, sizeof(crypto_keyhash));
326 	ivpriv->keyhash_len = sizeof(crypto_keyhash);
327 	dmtc_crypto_clear(crypto_keyhash, sizeof(crypto_keyhash));
328 
329 	ivpriv->crypto_session.cri_alg = priv->crypto_alg;
330 	ivpriv->crypto_session.cri_key = (u_int8_t *)ivpriv->crypto_keyhash;
331 	ivpriv->crypto_session.cri_klen = hashlen;
332 	ivpriv->crypto_session.cri_mlen = 0;
333 	ivpriv->crypto_session.cri_next = NULL;
334 
335 	/*
336 	 * XXX: in principle we also need to check if the block size of the
337 	 *	cipher is a valid iv size for the block cipher.
338 	 */
339 
340 	error = crypto_newsession(&ivpriv->crypto_sid,
341 				  &ivpriv->crypto_session,
342 				  CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
343 	if (error) {
344 		kprintf("dm_target_crypt: Error during crypto_newsession "
345 			"for essiv_ivgen, error = %d\n",
346 			error);
347 		dmtc_crypto_clear(ivpriv->crypto_keyhash, ivpriv->keyhash_len);
348 		kfree(ivpriv, M_DMCRYPT);
349 		return ENOTSUP;
350 	}
351 
352 	/*
353 	 * mpipe for 512-byte ivgen elements, make sure there are enough
354 	 * to cover all in-flight read and write buffers.
355 	 */
356 	nmax = dmtc_get_nmax() * (int)(MAXPHYS / DEV_BSIZE) * 2;
357 
358 	spin_init(&ivpriv->ivdata_spin, "ivdata");
359 
360 	while (nmax) {
361 		struct essiv_ivgen_data *ivdata;
362 
363 		ivdata = kmalloc(sizeof(*ivdata), M_DMCRYPT, M_WAITOK|M_ZERO);
364 		ivdata->next = ivpriv->ivdata_base;
365 		ivpriv->ivdata_base = ivdata;
366 		--nmax;
367 	}
368 	*p_ivpriv = ivpriv;
369 
370 	return 0;
371 }
372 
373 static int
374 essiv_ivgen_dtor(struct target_crypt_config *priv, void *arg)
375 {
376 	struct essiv_ivgen_priv *ivpriv;
377 	struct essiv_ivgen_data *ivdata;
378 
379 	ivpriv = (struct essiv_ivgen_priv *)arg;
380 	KKASSERT(ivpriv != NULL);
381 
382 	crypto_freesession(ivpriv->crypto_sid);
383 
384 	while ((ivdata = ivpriv->ivdata_base) != NULL) {
385 		ivpriv->ivdata_base = ivdata->next;
386 		kfree(ivdata, M_DMCRYPT);
387 	}
388 	spin_uninit(&ivpriv->ivdata_spin);
389 
390 	dmtc_crypto_clear(ivpriv->crypto_keyhash, ivpriv->keyhash_len);
391 	kfree(ivpriv, M_DMCRYPT);
392 
393 	return 0;
394 }
395 
396 static int
397 essiv_ivgen_done(struct cryptop *crp)
398 {
399 	struct essiv_ivgen_priv *ivpriv;
400 	struct essiv_ivgen_data *ivdata;
401 	void *opaque;
402 
403 
404 	if (crp->crp_etype == EAGAIN)
405 		return crypto_dispatch(crp);
406 
407 	if (crp->crp_etype != 0) {
408 		kprintf("dm_target_crypt: essiv_ivgen_done, "
409 			"crp->crp_etype = %d\n", crp->crp_etype);
410 	}
411 
412 	ivdata = (void *)crp->crp_opaque;
413 
414 	/*
415 	 * In-memory structure is:
416 	 * |  ivpriv  |  opaque  |     crp     |      crd      |
417 	 * | (void *) | (void *) |   (cryptop) |  (cryptodesc) |
418 	 */
419 	ivpriv = ivdata->ivpriv;
420 	opaque = ivdata->opaque;
421 
422 	spin_lock(&ivpriv->ivdata_spin);
423 	ivdata->next = ivpriv->ivdata_base;
424 	ivpriv->ivdata_base = ivdata;
425 	spin_unlock(&ivpriv->ivdata_spin);
426 
427 	dmtc_crypto_dispatch(opaque);
428 
429 	return 0;
430 }
431 
432 static void
433 essiv_ivgen(dm_target_crypt_config_t *priv, u_int8_t *iv,
434 	    size_t iv_len, off_t sector, void *opaque)
435 {
436 	struct essiv_ivgen_priv *ivpriv;
437 	struct essiv_ivgen_data *ivdata;
438 	struct cryptodesc *crd;
439 	struct cryptop *crp;
440 	int error;
441 
442 	ivpriv = priv->ivgen_priv;
443 	KKASSERT(ivpriv != NULL);
444 
445 	/*
446 	 * We preallocated all necessary ivdata's, so pull one off and use
447 	 * it.
448 	 */
449 	spin_lock(&ivpriv->ivdata_spin);
450 	ivdata = ivpriv->ivdata_base;
451 	ivpriv->ivdata_base = ivdata->next;
452 	spin_unlock(&ivpriv->ivdata_spin);
453 
454 	KKASSERT(ivdata != NULL);
455 
456 	ivdata->ivpriv = ivpriv;
457 	ivdata->opaque = opaque;
458 	crp = &ivdata->crp;
459 	crd = &ivdata->crd;
460 
461 	bzero(iv, iv_len);
462 	bzero(crd, sizeof(struct cryptodesc));
463 	bzero(crp, sizeof(struct cryptop));
464 	*((off_t *)iv) = htole64(sector + priv->iv_offset);
465 	crp->crp_buf = (caddr_t)iv;
466 
467 	crp->crp_sid = ivpriv->crypto_sid;
468 	crp->crp_ilen = crp->crp_olen = iv_len;
469 
470 	crp->crp_opaque =  (caddr_t)ivdata;
471 
472 	crp->crp_callback = essiv_ivgen_done;
473 
474 	crp->crp_desc = crd;
475 	crp->crp_etype = 0;
476 	crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL | CRYPTO_F_BATCH;
477 
478 	crd->crd_alg = priv->crypto_alg;
479 #if 0
480 	crd->crd_key = (caddr_t)priv->crypto_keyhash;
481 	crd->crd_klen = priv->crypto_klen;
482 #endif
483 
484 	bzero(crd->crd_iv, sizeof(crd->crd_iv));
485 
486 	crd->crd_skip = 0;
487 	crd->crd_len = iv_len;
488 	crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
489 	crd->crd_flags |= CRD_F_ENCRYPT;
490 	crd->crd_next = NULL;
491 
492 	error = crypto_dispatch(crp);
493 	if (error)
494 		kprintf("dm_target_crypt: essiv_ivgen, error = %d\n", error);
495 }
496 
497 
498 static void
499 plain_ivgen(dm_target_crypt_config_t *priv, u_int8_t *iv,
500 	    size_t iv_len, off_t sector, void *opaque)
501 {
502 	bzero(iv, iv_len);
503 	*((uint32_t *)iv) = htole32((uint32_t)(sector + priv->iv_offset));
504 	dmtc_crypto_dispatch(opaque);
505 }
506 
507 static void
508 plain64_ivgen(dm_target_crypt_config_t *priv, u_int8_t *iv,
509     size_t iv_len, off_t sector, void *opaque)
510 {
511 	bzero(iv, iv_len);
512 	*((uint64_t *)iv) = htole64((uint64_t)(sector + priv->iv_offset));
513 	dmtc_crypto_dispatch(opaque);
514 }
515 
516 #if 0
517 static void
518 geli_ivgen(dm_target_crypt_config_t *priv, u_int8_t *iv,
519 	   size_t iv_len, off_t sector, void *opaque)
520 {
521 
522 	SHA512_CTX	ctx512;
523 	u_int8_t	md[SHA512_DIGEST_LENGTH]; /* Max. Digest Size */
524 
525 	memcpy(&ctx512, &priv->essivsha512_ctx, sizeof(SHA512_CTX));
526 	SHA512_Update(&ctx512, (u_int8_t*)&sector, sizeof(off_t));
527 	SHA512_Final(md, &ctx512);
528 
529 	memcpy(iv, md, iv_len);
530 	dmtc_crypto_dispatch(opaque);
531 }
532 #endif
533 
534 /*
535  * Init function called from dm_table_load_ioctl.
536  * cryptsetup actually passes us this:
537  * aes-cbc-essiv:sha256 7997f8af... 0 /dev/ad0s0a 8
538  */
539 static int
540 hex2key(char *hex, size_t key_len, u_int8_t *key)
541 {
542 	char hex_buf[3];
543 	size_t key_idx;
544 
545 	hex_buf[2] = 0;
546 	for (key_idx = 0; key_idx < key_len; ++key_idx) {
547 		hex_buf[0] = *hex++;
548 		hex_buf[1] = *hex++;
549 		key[key_idx] = (u_int8_t)strtoul(hex_buf, NULL, 16);
550 	}
551 	hex_buf[0] = 0;
552 	hex_buf[1] = 0;
553 
554 	return 0;
555 }
556 
557 static int
558 dm_target_crypt_init(dm_table_entry_t *table_en, int argc, char **argv)
559 {
560 	dm_target_crypt_config_t *priv;
561 	size_t len;
562 	char *crypto_alg, *crypto_mode, *iv_mode, *iv_opt, *key, *dev;
563 	char *status_str;
564 	int i, klen, error;
565 	uint64_t iv_offset, block_offset;
566 
567 	if (argc != 5) {
568 		kprintf("dm_target_crypt: not enough arguments, "
569 			"need exactly 5\n");
570 		return EINVAL;
571 	}
572 
573 	len = 0;
574 	for (i = 0; i < argc; i++) {
575 		len += strlen(argv[i]);
576 		len++;
577 	}
578 	/* len is strlen() of input string +1 */
579 	status_str = kmalloc(len, M_DMCRYPT, M_WAITOK);
580 
581 	crypto_alg = strsep(&argv[0], "-");
582 	crypto_mode = strsep(&argv[0], "-");
583 	iv_opt = strsep(&argv[0], "-");
584 	iv_mode = strsep(&iv_opt, ":");
585 	key = argv[1];
586 	iv_offset = strtouq(argv[2], NULL, 0);
587 	dev = argv[3];
588 	block_offset = strtouq(argv[4], NULL, 0);
589 	/* bits / 8 = bytes, 1 byte = 2 hexa chars, so << 2 */
590 	klen = strlen(key) << 2;
591 
592 #if 0
593 	kprintf("dm_target_crypt - new: dev=%s, crypto_alg=%s, crypto_mode=%s, "
594 		"iv_mode=%s, iv_opt=%s, key=%s, iv_offset=%ju, "
595 		"block_offset=%ju\n",
596 		dev, crypto_alg, crypto_mode, iv_mode, iv_opt, key, iv_offset,
597 		block_offset);
598 #endif
599 
600 	priv = kmalloc(sizeof(dm_target_crypt_config_t), M_DMCRYPT, M_WAITOK);
601 
602 	/* Insert dmp to global pdev list */
603 	if ((priv->pdev = dm_pdev_insert(dev)) == NULL) {
604 		kprintf("dm_target_crypt: dm_pdev_insert failed\n");
605 		kfree(status_str, M_DMCRYPT);
606 		return ENOENT;
607 	}
608 
609 	/*
610 	 * This code checks for valid combinations of algorithm and mode.
611 	 * Currently supported options are:
612 	 *
613 	 * *-cbc
614 	 * aes-xts
615 	 * twofish-xts
616 	 * serpent-xts
617 	 */
618 	if ((strcmp(crypto_mode, "cbc") != 0) &&
619 	    !((strcmp(crypto_mode, "xts") == 0) &&
620 	    ((strcmp(crypto_alg, "aes") == 0) ||
621 	    (strcmp(crypto_alg, "twofish") == 0) ||
622 	    (strcmp(crypto_alg, "serpent") == 0))))
623 	{
624 		kprintf("dm_target_crypt: only support 'cbc' chaining mode,"
625 		    " aes-xts, twofish-xts and serpent-xts, "
626 		    "invalid mode '%s-%s'\n",
627 		    crypto_alg, crypto_mode);
628 		goto notsup;
629 	}
630 
631 	if (!strcmp(crypto_alg, "aes")) {
632 		if (!strcmp(crypto_mode, "xts")) {
633 			priv->crypto_alg = CRYPTO_AES_XTS;
634 			if (klen != 256 && klen != 512)
635 				goto notsup;
636 		} else if (!strcmp(crypto_mode, "cbc")) {
637 			priv->crypto_alg = CRYPTO_AES_CBC;
638 			if (klen != 128 && klen != 192 && klen != 256)
639 				goto notsup;
640 		} else {
641 			goto notsup;
642 		}
643 		priv->crypto_klen = klen;
644 	} else if (!strcmp(crypto_alg, "twofish")) {
645 		if (!strcmp(crypto_mode, "xts")) {
646 			priv->crypto_alg = CRYPTO_TWOFISH_XTS;
647 			if (klen != 256 && klen != 512)
648 				goto notsup;
649 		} else if (!strcmp(crypto_mode, "cbc")) {
650 			priv->crypto_alg = CRYPTO_TWOFISH_CBC;
651 			if (klen != 128 && klen != 192 && klen != 256)
652 				goto notsup;
653 		} else {
654 			goto notsup;
655 		}
656 		priv->crypto_klen = klen;
657 	} else if (!strcmp(crypto_alg, "serpent")) {
658 		if (!strcmp(crypto_mode, "xts")) {
659 			priv->crypto_alg = CRYPTO_SERPENT_XTS;
660 			if (klen != 256 && klen != 512)
661 				goto notsup;
662 		} else if (!strcmp(crypto_mode, "cbc")) {
663 			priv->crypto_alg = CRYPTO_SERPENT_CBC;
664 			if (klen != 128 && klen != 192 && klen != 256)
665 				goto notsup;
666 		} else {
667 			goto notsup;
668 		}
669 		priv->crypto_klen = klen;
670 	} else if (!strcmp(crypto_alg, "blowfish")) {
671 		priv->crypto_alg = CRYPTO_BLF_CBC;
672 		if (klen < 128 || klen > 448 || (klen % 8) != 0)
673 			goto notsup;
674 		priv->crypto_klen = klen;
675 	} else if (!strcmp(crypto_alg, "3des") ||
676 		   !strncmp(crypto_alg, "des3", 4)) {
677 		priv->crypto_alg = CRYPTO_3DES_CBC;
678 		if (klen != 168)
679 			goto notsup;
680 		priv->crypto_klen = 168;
681 	} else if (!strcmp(crypto_alg, "camellia")) {
682 		priv->crypto_alg = CRYPTO_CAMELLIA_CBC;
683 		if (klen != 128 && klen != 192 && klen != 256)
684 			goto notsup;
685 		priv->crypto_klen = klen;
686 	} else if (!strcmp(crypto_alg, "skipjack")) {
687 		priv->crypto_alg = CRYPTO_SKIPJACK_CBC;
688 		if (klen != 80)
689 			goto notsup;
690 		priv->crypto_klen = 80;
691 	} else if (!strcmp(crypto_alg, "cast5")) {
692 		priv->crypto_alg = CRYPTO_CAST_CBC;
693 		if (klen != 128)
694 			goto notsup;
695 		priv->crypto_klen = 128;
696 	} else if (!strcmp(crypto_alg, "null")) {
697 		priv->crypto_alg = CRYPTO_NULL_CBC;
698 		if (klen != 128)
699 			goto notsup;
700 		priv->crypto_klen = 128;
701 	} else {
702 		kprintf("dm_target_crypt: Unsupported crypto algorithm: %s\n",
703 			crypto_alg);
704 		goto notsup;
705 	}
706 
707 	/* Save length of param string */
708 	priv->params_len = len;
709 	priv->block_offset = block_offset;
710 	priv->iv_offset = iv_offset - block_offset;
711 
712 	dm_table_add_deps(table_en, priv->pdev);
713 
714 	dm_table_init_target(table_en, priv);
715 
716 	error = hex2key(key, priv->crypto_klen >> 3,
717 			(u_int8_t *)priv->crypto_key);
718 
719 	if (error) {
720 		kprintf("dm_target_crypt: hex2key failed, "
721 			"invalid key format\n");
722 		goto notsup;
723 	}
724 
725 	/* Handle cmd */
726 	for(i = 0; ivgens[i].name != NULL; i++) {
727 		if (!strcmp(iv_mode, ivgens[i].name))
728 			break;
729 	}
730 
731 	if (ivgens[i].name == NULL) {
732 		kprintf("dm_target_crypt: iv_mode='%s' unsupported\n",
733 			iv_mode);
734 		goto notsup;
735 	}
736 
737 	/* Call our ivgen constructor */
738 	if (ivgens[i].ctor != NULL) {
739 		error = ivgens[i].ctor(priv, iv_opt,
740 		    &priv->ivgen_priv);
741 		if (error) {
742 			kprintf("dm_target_crypt: ctor for '%s' failed\n",
743 			    ivgens[i].name);
744 			goto notsup;
745 		}
746 	}
747 
748 	priv->ivgen = &ivgens[i];
749 
750 	priv->crypto_session.cri_alg = priv->crypto_alg;
751 	priv->crypto_session.cri_key = (u_int8_t *)priv->crypto_key;
752 	priv->crypto_session.cri_klen = priv->crypto_klen;
753 	priv->crypto_session.cri_mlen = 0;
754 	priv->crypto_session.cri_next = NULL;
755 
756 	error = crypto_newsession(&priv->crypto_sid,
757 				  &priv->crypto_session,
758 				  CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
759 	if (error) {
760 		kprintf("dm_target_crypt: Error during crypto_newsession, "
761 			"error = %d\n",
762 			error);
763 		goto notsup;
764 	}
765 
766 	memset(key, '0', strlen(key));
767 	if (iv_opt) {
768 		ksprintf(status_str, "%s-%s-%s:%s %s %ju %s %ju",
769 		    crypto_alg, crypto_mode, iv_mode, iv_opt,
770 		    key, iv_offset, dev, block_offset);
771 	} else {
772 		ksprintf(status_str, "%s-%s-%s %s %ju %s %ju",
773 		    crypto_alg, crypto_mode, iv_mode,
774 		    key, iv_offset, dev, block_offset);
775 	}
776 	priv->status_str = status_str;
777 
778 	/* Initialize mpipes */
779 	dmtc_init_mpipe(priv);
780 
781 	return 0;
782 
783 notsup:
784 	kprintf("dm_target_crypt: ENOTSUP\n");
785 	kfree(status_str, M_DMCRYPT);
786 	return ENOTSUP;
787 }
788 
789 /* Table routine called to get params string. */
790 static char *
791 dm_target_crypt_table(void *target_config)
792 {
793 	dm_target_crypt_config_t *priv;
794 	char *params;
795 
796 	priv = target_config;
797 
798 	params = dm_alloc_string(DM_MAX_PARAMS_SIZE);
799 
800 	ksnprintf(params, DM_MAX_PARAMS_SIZE, "%s",
801 	    priv->status_str);
802 
803 	return params;
804 }
805 
806 static int
807 dm_target_crypt_destroy(dm_table_entry_t *table_en)
808 {
809 	dm_target_crypt_config_t *priv;
810 
811 	/*
812 	 * Disconnect the crypt config before unbusying the target.
813 	 */
814 	priv = table_en->target_config;
815 	if (priv == NULL)
816 		return 0;
817 	dm_pdev_decr(priv->pdev);
818 
819 	/*
820 	 * Clean up the crypt config
821 	 *
822 	 * Overwrite the private information before freeing memory to
823 	 * avoid leaking it.
824 	 */
825 	if (priv->status_str) {
826 		dmtc_crypto_clear(priv->status_str, strlen(priv->status_str));
827 		kfree(priv->status_str, M_DMCRYPT);
828 		crypto_freesession(priv->crypto_sid);
829 	}
830 
831 	if ((priv->ivgen) && (priv->ivgen->dtor != NULL)) {
832 		priv->ivgen->dtor(priv, priv->ivgen_priv);
833 	}
834 
835 	/* Destroy mpipes */
836 	dmtc_destroy_mpipe(priv);
837 
838 	dmtc_crypto_clear(priv, sizeof(dm_target_crypt_config_t));
839 	kfree(priv, M_DMCRYPT);
840 
841 	return 0;
842 }
843 
844 /************************************************************************
845  *			STRATEGY SUPPORT FUNCTIONS			*
846  ************************************************************************
847  *
848  * READ PATH:	doio -> bio_read_done -> crypto_work -> crypto_cb_read_done
849  * WRITE PATH:	crypto_work -> crypto_cb_write_done -> doio -> bio_write_done
850  */
851 
852 /*
853  * Wrapper around crypto_dispatch() to match dispatch_t type
854  */
855 static void
856 dmtc_crypto_dispatch(void *arg)
857 {
858 	struct cryptop *crp;
859 
860 	crp = (struct cryptop *)arg;
861 	KKASSERT(crp != NULL);
862 	KTR_LOG(dmcrypt_crypto_dispatch, crp);
863 	crypto_dispatch(crp);
864 }
865 
866 /*
867  * Start IO operation, called from dmstrategy routine.
868  */
869 static int
870 dm_target_crypt_strategy(dm_table_entry_t *table_en, struct buf *bp)
871 {
872 	struct bio *bio;
873 
874 	dm_target_crypt_config_t *priv;
875 	priv = table_en->target_config;
876 
877 	/* Get rid of stuff we can't really handle */
878 	if ((bp->b_cmd == BUF_CMD_READ) || (bp->b_cmd == BUF_CMD_WRITE)) {
879 		if (((bp->b_bcount % DEV_BSIZE) != 0) || (bp->b_bcount == 0)) {
880 			kprintf("dm_target_crypt_strategy: can't really "
881 				"handle bp->b_bcount = %d\n",
882 				bp->b_bcount);
883 			bp->b_error = EINVAL;
884 			bp->b_flags |= B_ERROR | B_INVAL;
885 			biodone(&bp->b_bio1);
886 			return 0;
887 		}
888 	}
889 
890 	KTR_LOG(dmcrypt_crypt_strategy, bp->b_cmd, bp);
891 
892 	switch (bp->b_cmd) {
893 	case BUF_CMD_READ:
894 		bio = push_bio(&bp->b_bio1);
895 		bio->bio_offset = bp->b_bio1.bio_offset +
896 				  priv->block_offset * DEV_BSIZE;
897 		bio->bio_caller_info1.ptr = priv;
898 		bio->bio_done = dmtc_bio_read_done;
899 		vn_strategy(priv->pdev->pdev_vnode, bio);
900 		break;
901 	case BUF_CMD_WRITE:
902 		bio = push_bio(&bp->b_bio1);
903 		bio->bio_offset = bp->b_bio1.bio_offset +
904 				  priv->block_offset * DEV_BSIZE;
905 		bio->bio_caller_info1.ptr = priv;
906 		dmtc_crypto_write_start(priv, bio);
907 		break;
908 	default:
909 		vn_strategy(priv->pdev->pdev_vnode, &bp->b_bio1);
910 		break;
911 	}
912 	return 0;
913 }
914 
915 /*
916  * STRATEGY READ PATH PART 1/3 (after read BIO completes)
917  */
918 static void
919 dmtc_bio_read_done(struct bio *bio)
920 {
921 	struct bio *obio;
922 
923 	dm_target_crypt_config_t *priv;
924 
925 	KTR_LOG(dmcrypt_bio_read_done, bio->bio_buf);
926 
927 	/*
928 	 * If a read error occurs we shortcut the operation, otherwise
929 	 * go on to stage 2.
930 	 */
931 	if (bio->bio_buf->b_flags & B_ERROR) {
932 		obio = pop_bio(bio);
933 		biodone(obio);
934 	} else {
935 		priv = bio->bio_caller_info1.ptr;
936 		dmtc_crypto_read_start(priv, bio);
937 	}
938 }
939 
940 /*
941  * STRATEGY READ PATH PART 2/3
942  */
943 static void
944 dmtc_crypto_read_retry(void *arg1, void *arg2)
945 {
946 	dm_target_crypt_config_t *priv = arg1;
947 	struct bio *bio = arg2;
948 
949 	dmtc_crypto_read_start(priv, bio);
950 }
951 
952 static void
953 dmtc_crypto_read_start(dm_target_crypt_config_t *priv, struct bio *bio)
954 {
955 	struct dmtc_helper *dmtc;
956 	struct cryptodesc *crd;
957 	struct cryptop *crp;
958 	int i, bytes, sectors, sz;
959 	off_t isector;
960 	u_char *ptr, *space;
961 
962 	/*
963 	 * Note: b_resid no good after read I/O, it will be 0, use
964 	 *	 b_bcount.
965 	 */
966 	bytes = bio->bio_buf->b_bcount;
967 	isector = bio->bio_offset / DEV_BSIZE;	/* ivgen salt base? */
968 	sectors = bytes / DEV_BSIZE;		/* Number of sectors */
969 	sz = sectors * (sizeof(*crp) + sizeof(*crd));
970 
971 	/*
972 	 * For reads with bogus page we can't decrypt in place as stuff
973 	 * can get ripped out from under us.
974 	 *
975 	 * XXX actually it looks like we can, and in any case the initial
976 	 * read already completed and threw crypted data into the buffer
977 	 * cache buffer.  Disable for now.
978 	 */
979 	space = mpipe_alloc_callback(&priv->read_mpipe,
980 				     dmtc_crypto_read_retry, priv, bio);
981 	if (space == NULL)
982 		return;
983 
984 	dmtc = (struct dmtc_helper *)space;
985 	dmtc->free_addr = space;
986 	space += sizeof(struct dmtc_helper);
987 	dmtc->orig_buf = NULL;
988 	dmtc->data_buf = bio->bio_buf->b_data;
989 	dmtc->priv = priv;
990 	bio->bio_caller_info2.ptr = dmtc;
991 	bio->bio_buf->b_error = 0;
992 
993 	/*
994 	 * Load crypto descriptors (crp/crd loop)
995 	 */
996 	bzero(space, sz);
997 	ptr = space;
998 	bio->bio_caller_info3.value = sectors;
999 	cpu_sfence();
1000 #if 0
1001 	kprintf("Read, bytes = %d (b_bcount), "
1002 		"sectors = %d (bio = %p, b_cmd = %d)\n",
1003 		bytes, sectors, bio, bio->bio_buf->b_cmd);
1004 #endif
1005 	for (i = 0; i < sectors; i++) {
1006 		crp = (struct cryptop *)ptr;
1007 		ptr += sizeof(*crp);
1008 		crd = (struct cryptodesc *)ptr;
1009 		ptr += sizeof (*crd);
1010 
1011 		crp->crp_buf = dmtc->data_buf + i * DEV_BSIZE;
1012 
1013 		crp->crp_sid = priv->crypto_sid;
1014 		crp->crp_ilen = crp->crp_olen = DEV_BSIZE;
1015 
1016 		crp->crp_opaque = (void *)bio;
1017 
1018 		crp->crp_callback = dmtc_crypto_cb_read_done;
1019 		crp->crp_desc = crd;
1020 		crp->crp_etype = 0;
1021 		crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL |
1022 				 CRYPTO_F_BATCH;
1023 
1024 		crd->crd_alg = priv->crypto_alg;
1025 #if 0
1026 		crd->crd_key = (caddr_t)priv->crypto_key;
1027 		crd->crd_klen = priv->crypto_klen;
1028 #endif
1029 
1030 		crd->crd_skip = 0;
1031 		crd->crd_len = DEV_BSIZE /* XXX */;
1032 		crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
1033 		crd->crd_next = NULL;
1034 
1035 		crd->crd_flags &= ~CRD_F_ENCRYPT;
1036 
1037 		KTR_LOG(dmcrypt_crypto_read_start, crp, bio->bio_buf, i,
1038 		    sectors);
1039 
1040 		/*
1041 		 * Note: last argument is used to generate salt(?) and is
1042 		 *	 a 64 bit value, but the original code passed an
1043 		 *	 int.  Changing it now will break pre-existing
1044 		 *	 crypt volumes.
1045 		 */
1046 		priv->ivgen->gen_iv(priv, crd->crd_iv, sizeof(crd->crd_iv),
1047 				    isector + i, crp);
1048 	}
1049 }
1050 
1051 /*
1052  * STRATEGY READ PATH PART 3/3
1053  */
1054 static int
1055 dmtc_crypto_cb_read_done(struct cryptop *crp)
1056 {
1057 	struct dmtc_helper *dmtc;
1058 	struct bio *bio, *obio;
1059 	int n;
1060 
1061 	if (crp->crp_etype == EAGAIN)
1062 		return crypto_dispatch(crp);
1063 
1064 	bio = (struct bio *)crp->crp_opaque;
1065 	KKASSERT(bio != NULL);
1066 
1067 	/*
1068 	 * Cumulative error
1069 	 */
1070 	if (crp->crp_etype) {
1071 		kprintf("dm_target_crypt: dmtc_crypto_cb_read_done "
1072 			"crp_etype = %d\n",
1073 			crp->crp_etype);
1074 		bio->bio_buf->b_error = crp->crp_etype;
1075 	}
1076 
1077 	/*
1078 	 * On the last chunk of the decryption we do any required copybacks
1079 	 * and complete the I/O.
1080 	 */
1081 	n = atomic_fetchadd_int(&bio->bio_caller_info3.value, -1);
1082 #if 0
1083 	kprintf("dmtc_crypto_cb_read_done %p, n = %d\n", bio, n);
1084 #endif
1085 
1086 	KTR_LOG(dmcrypt_crypto_cb_read_done, crp, bio->bio_buf, n);
1087 
1088 	if (n == 1) {
1089 		/*
1090 		 * For the B_HASBOGUS case we didn't decrypt in place,
1091 		 * so we need to copy stuff back into the buf.
1092 		 *
1093 		 * (disabled for now).
1094 		 */
1095 		dmtc = bio->bio_caller_info2.ptr;
1096 		if (bio->bio_buf->b_error) {
1097 			bio->bio_buf->b_flags |= B_ERROR;
1098 		}
1099 #if 0
1100 		else if (bio->bio_buf->b_flags & B_HASBOGUS) {
1101 			memcpy(bio->bio_buf->b_data, dmtc->data_buf,
1102 			       bio->bio_buf->b_bcount);
1103 		}
1104 #endif
1105 		mpipe_free(&dmtc->priv->read_mpipe, dmtc->free_addr);
1106 		obio = pop_bio(bio);
1107 		biodone(obio);
1108 	}
1109 	return 0;
1110 }
1111 /* END OF STRATEGY READ SECTION */
1112 
1113 /*
1114  * STRATEGY WRITE PATH PART 1/3
1115  */
1116 
1117 static void
1118 dmtc_crypto_write_retry(void *arg1, void *arg2)
1119 {
1120 	dm_target_crypt_config_t *priv = arg1;
1121 	struct bio *bio = arg2;
1122 
1123 	KTR_LOG(dmcrypt_crypto_write_retry, bio->bio_buf);
1124 
1125 	dmtc_crypto_write_start(priv, bio);
1126 }
1127 
1128 static void
1129 dmtc_crypto_write_start(dm_target_crypt_config_t *priv, struct bio *bio)
1130 {
1131 	struct dmtc_helper *dmtc;
1132 	struct cryptodesc *crd;
1133 	struct cryptop *crp;
1134 	int i, bytes, sectors, sz;
1135 	off_t isector;
1136 	u_char *ptr, *space;
1137 
1138 	/*
1139 	 * Use b_bcount for consistency
1140 	 */
1141 	bytes = bio->bio_buf->b_bcount;
1142 
1143 	isector = bio->bio_offset / DEV_BSIZE;	/* ivgen salt base? */
1144 	sectors = bytes / DEV_BSIZE;		/* Number of sectors */
1145 	sz = sectors * (sizeof(*crp) + sizeof(*crd));
1146 
1147 	/*
1148 	 * For writes and reads with bogus page don't decrypt in place.
1149 	 */
1150 	space = mpipe_alloc_callback(&priv->write_mpipe,
1151 				     dmtc_crypto_write_retry, priv, bio);
1152 	if (space == NULL)
1153 		return;
1154 
1155 	dmtc = (struct dmtc_helper *)space;
1156 	dmtc->free_addr = space;
1157 	space += sizeof(struct dmtc_helper);
1158 	memcpy(space + sz, bio->bio_buf->b_data, bytes);
1159 
1160 	bio->bio_caller_info2.ptr = dmtc;
1161 	bio->bio_buf->b_error = 0;
1162 
1163 	dmtc->orig_buf = bio->bio_buf->b_data;
1164 	dmtc->data_buf = space + sz;
1165 	dmtc->priv = priv;
1166 
1167 	/*
1168 	 * Load crypto descriptors (crp/crd loop)
1169 	 */
1170 	bzero(space, sz);
1171 	ptr = space;
1172 	bio->bio_caller_info3.value = sectors;
1173 	cpu_sfence();
1174 #if 0
1175 	kprintf("Write, bytes = %d (b_bcount), "
1176 		"sectors = %d (bio = %p, b_cmd = %d)\n",
1177 		bytes, sectors, bio, bio->bio_buf->b_cmd);
1178 #endif
1179 	for (i = 0; i < sectors; i++) {
1180 		crp = (struct cryptop *)ptr;
1181 		ptr += sizeof(*crp);
1182 		crd = (struct cryptodesc *)ptr;
1183 		ptr += sizeof (*crd);
1184 
1185 		crp->crp_buf = dmtc->data_buf + i * DEV_BSIZE;
1186 
1187 		crp->crp_sid = priv->crypto_sid;
1188 		crp->crp_ilen = crp->crp_olen = DEV_BSIZE;
1189 
1190 		crp->crp_opaque = (void *)bio;
1191 
1192 		crp->crp_callback = dmtc_crypto_cb_write_done;
1193 		crp->crp_desc = crd;
1194 		crp->crp_etype = 0;
1195 		crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL |
1196 				 CRYPTO_F_BATCH;
1197 
1198 		crd->crd_alg = priv->crypto_alg;
1199 #if 0
1200 		crd->crd_key = (caddr_t)priv->crypto_key;
1201 		crd->crd_klen = priv->crypto_klen;
1202 #endif
1203 
1204 		crd->crd_skip = 0;
1205 		crd->crd_len = DEV_BSIZE /* XXX */;
1206 		crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
1207 		crd->crd_next = NULL;
1208 
1209 		crd->crd_flags |= CRD_F_ENCRYPT;
1210 
1211 		/*
1212 		 * Note: last argument is used to generate salt(?) and is
1213 		 *	 a 64 bit value, but the original code passed an
1214 		 *	 int.  Changing it now will break pre-existing
1215 		 *	 crypt volumes.
1216 		 */
1217 
1218 		KTR_LOG(dmcrypt_crypto_write_start, crp, bio->bio_buf,
1219 		    i, sectors);
1220 
1221 		priv->ivgen->gen_iv(priv, crd->crd_iv, sizeof(crd->crd_iv),
1222 				    isector + i, crp);
1223 	}
1224 }
1225 
1226 /*
1227  * STRATEGY WRITE PATH PART 2/3
1228  */
1229 static int
1230 dmtc_crypto_cb_write_done(struct cryptop *crp)
1231 {
1232 	struct dmtc_helper *dmtc;
1233 	dm_target_crypt_config_t *priv;
1234 	struct bio *bio, *obio;
1235 	int n;
1236 
1237 	if (crp->crp_etype == EAGAIN)
1238 		return crypto_dispatch(crp);
1239 
1240 	bio = (struct bio *)crp->crp_opaque;
1241 	KKASSERT(bio != NULL);
1242 
1243 	/*
1244 	 * Cumulative error
1245 	 */
1246 	if (crp->crp_etype != 0) {
1247 		kprintf("dm_target_crypt: dmtc_crypto_cb_write_done "
1248 			"crp_etype = %d\n",
1249 		crp->crp_etype);
1250 		bio->bio_buf->b_error = crp->crp_etype;
1251 	}
1252 
1253 	/*
1254 	 * On the last chunk of the encryption we issue the write
1255 	 */
1256 	n = atomic_fetchadd_int(&bio->bio_caller_info3.value, -1);
1257 #if 0
1258 	kprintf("dmtc_crypto_cb_write_done %p, n = %d\n", bio, n);
1259 #endif
1260 
1261 	KTR_LOG(dmcrypt_crypto_cb_write_done, crp, bio->bio_buf, n);
1262 
1263 	if (n == 1) {
1264 		dmtc = bio->bio_caller_info2.ptr;
1265 		priv = (dm_target_crypt_config_t *)bio->bio_caller_info1.ptr;
1266 
1267 		if (bio->bio_buf->b_error) {
1268 			bio->bio_buf->b_flags |= B_ERROR;
1269 			mpipe_free(&dmtc->priv->write_mpipe, dmtc->free_addr);
1270 			obio = pop_bio(bio);
1271 			biodone(obio);
1272 		} else {
1273 			dmtc->orig_buf = bio->bio_buf->b_data;
1274 			bio->bio_buf->b_data = dmtc->data_buf;
1275 			bio->bio_done = dmtc_bio_write_done;
1276 			vn_strategy(priv->pdev->pdev_vnode, bio);
1277 		}
1278 	}
1279 	return 0;
1280 }
1281 
1282 /*
1283  * STRATEGY WRITE PATH PART 3/3
1284  */
1285 static void
1286 dmtc_bio_write_done(struct bio *bio)
1287 {
1288 	struct dmtc_helper *dmtc;
1289 	struct bio *obio;
1290 
1291 	dmtc = bio->bio_caller_info2.ptr;
1292 	bio->bio_buf->b_data = dmtc->orig_buf;
1293 	mpipe_free(&dmtc->priv->write_mpipe, dmtc->free_addr);
1294 
1295 	KTR_LOG(dmcrypt_bio_write_done, bio->bio_buf);
1296 
1297 	obio = pop_bio(bio);
1298 	biodone(obio);
1299 }
1300 /* END OF STRATEGY WRITE SECTION */
1301 
1302 
1303 
1304 /* DUMPING MAGIC */
1305 
1306 extern int tsleep_crypto_dump;
1307 
1308 static int
1309 dm_target_crypt_dump(dm_table_entry_t *table_en, void *data, size_t length, off_t offset)
1310 {
1311 	static struct dmtc_dump_helper dump_helper;
1312 	dm_target_crypt_config_t *priv;
1313 	int id;
1314 	static int first_call = 1;
1315 
1316 	priv = table_en->target_config;
1317 
1318 	if (first_call) {
1319 		first_call = 0;
1320 		dump_reactivate_cpus();
1321 	}
1322 
1323 	/* Magically enable tsleep */
1324 	tsleep_crypto_dump = 1;
1325 	id = 0;
1326 
1327 	/*
1328 	 * 0 length means flush buffers and return
1329 	 */
1330 	if (length == 0) {
1331 		if (priv->pdev->pdev_vnode->v_rdev == NULL) {
1332 			tsleep_crypto_dump = 0;
1333 			return ENXIO;
1334 		}
1335 		dev_ddump(priv->pdev->pdev_vnode->v_rdev,
1336 		    data, 0, offset, 0);
1337 		tsleep_crypto_dump = 0;
1338 		return 0;
1339 	}
1340 
1341 	bzero(&dump_helper, sizeof(dump_helper));
1342 	dump_helper.priv = priv;
1343 	dump_helper.data = data;
1344 	dump_helper.length = length;
1345 	dump_helper.offset = offset +
1346 	    priv->block_offset * DEV_BSIZE;
1347 	dump_helper.ident = &id;
1348 	dmtc_crypto_dump_start(priv, &dump_helper);
1349 
1350 	/*
1351 	 * Hackery to make stuff appear synchronous. The crypto callback will
1352 	 * set id to 1 and call wakeup on it. If the request completed
1353 	 * synchronously, id will be 1 and we won't bother to sleep. If not,
1354 	 * the crypto request will complete asynchronously and we sleep until
1355 	 * it's done.
1356 	 */
1357 	if (id == 0)
1358 		tsleep(&dump_helper, 0, "cryptdump", 0);
1359 
1360 	dump_helper.offset = dm_pdev_correct_dump_offset(priv->pdev,
1361 	    dump_helper.offset);
1362 
1363 	dev_ddump(priv->pdev->pdev_vnode->v_rdev,
1364 	    dump_helper.space, 0, dump_helper.offset,
1365 	    dump_helper.length);
1366 
1367 	tsleep_crypto_dump = 0;
1368 	return 0;
1369 }
1370 
1371 static void
1372 dmtc_crypto_dump_start(dm_target_crypt_config_t *priv, struct dmtc_dump_helper *dump_helper)
1373 {
1374 	struct cryptodesc *crd;
1375 	struct cryptop *crp;
1376 	int i, bytes, sectors;
1377 	off_t isector;
1378 
1379 	bytes = dump_helper->length;
1380 
1381 	isector = dump_helper->offset / DEV_BSIZE;	/* ivgen salt base? */
1382 	sectors = bytes / DEV_BSIZE;		/* Number of sectors */
1383 	dump_helper->sectors = sectors;
1384 #if 0
1385 	kprintf("Dump, bytes = %d, "
1386 		"sectors = %d, LENGTH=%zu\n", bytes, sectors, dump_helper->length);
1387 #endif
1388 	KKASSERT(dump_helper->length <= 65536);
1389 
1390 	memcpy(dump_helper->space, dump_helper->data, bytes);
1391 
1392 	cpu_sfence();
1393 
1394 	for (i = 0; i < sectors; i++) {
1395 		crp = &dump_helper->crp[i];
1396 		crd = &dump_helper->crd[i];
1397 
1398 		crp->crp_buf = dump_helper->space + i * DEV_BSIZE;
1399 
1400 		crp->crp_sid = priv->crypto_sid;
1401 		crp->crp_ilen = crp->crp_olen = DEV_BSIZE;
1402 
1403 		crp->crp_opaque = (void *)dump_helper;
1404 
1405 		crp->crp_callback = dmtc_crypto_cb_dump_done;
1406 		crp->crp_desc = crd;
1407 		crp->crp_etype = 0;
1408 		crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL |
1409 				 CRYPTO_F_BATCH;
1410 
1411 		crd->crd_alg = priv->crypto_alg;
1412 
1413 		crd->crd_skip = 0;
1414 		crd->crd_len = DEV_BSIZE /* XXX */;
1415 		crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
1416 		crd->crd_next = NULL;
1417 
1418 		crd->crd_flags |= CRD_F_ENCRYPT;
1419 
1420 		/*
1421 		 * Note: last argument is used to generate salt(?) and is
1422 		 *	 a 64 bit value, but the original code passed an
1423 		 *	 int.  Changing it now will break pre-existing
1424 		 *	 crypt volumes.
1425 		 */
1426 		priv->ivgen->gen_iv(priv, crd->crd_iv, sizeof(crd->crd_iv),
1427 				    isector + i, crp);
1428 	}
1429 }
1430 
1431 static int
1432 dmtc_crypto_cb_dump_done(struct cryptop *crp)
1433 {
1434 	struct dmtc_dump_helper *dump_helper;
1435 	int n;
1436 
1437 	if (crp->crp_etype == EAGAIN)
1438 		return crypto_dispatch(crp);
1439 
1440 	dump_helper = (struct dmtc_dump_helper *)crp->crp_opaque;
1441 	KKASSERT(dump_helper != NULL);
1442 
1443 	if (crp->crp_etype != 0) {
1444 		kprintf("dm_target_crypt: dmtc_crypto_cb_dump_done "
1445 			"crp_etype = %d\n",
1446 		crp->crp_etype);
1447 		return crp->crp_etype;
1448 	}
1449 
1450 	/*
1451 	 * On the last chunk of the encryption we return control
1452 	 */
1453 	n = atomic_fetchadd_int(&dump_helper->sectors, -1);
1454 
1455 	if (n == 1) {
1456 		atomic_add_int(dump_helper->ident, 1);
1457 		wakeup(dump_helper);
1458 	}
1459 
1460 	return 0;
1461 }
1462 
1463 static int
1464 dmtc_mod_handler(module_t mod, int type, void *unused)
1465 {
1466 	dm_target_t *dmt = NULL;
1467 	int err = 0;
1468 
1469 	switch (type) {
1470 	case MOD_LOAD:
1471 		if ((dmt = dm_target_lookup("crypt")) != NULL) {
1472 			dm_target_unbusy(dmt);
1473 			return EEXIST;
1474 		}
1475 		dmt = dm_target_alloc("crypt");
1476 		dmt->version[0] = 1;
1477 		dmt->version[1] = 6;
1478 		dmt->version[2] = 0;
1479 		dmt->init = &dm_target_crypt_init;
1480 		dmt->destroy = &dm_target_crypt_destroy;
1481 		dmt->strategy = &dm_target_crypt_strategy;
1482 		dmt->table = &dm_target_crypt_table;
1483 		dmt->dump = &dm_target_crypt_dump;
1484 
1485 		err = dm_target_insert(dmt);
1486 		if (!err)
1487 			kprintf("dm_target_crypt: Successfully initialized\n");
1488 		break;
1489 
1490 	case MOD_UNLOAD:
1491 		err = dm_target_remove("crypt");
1492 		if (err == 0) {
1493 			kprintf("dm_target_crypt: unloaded\n");
1494 		}
1495 		break;
1496 	}
1497 
1498 	return err;
1499 }
1500 
1501 DM_TARGET_MODULE(dm_target_crypt, dmtc_mod_handler);
1502