xref: /netbsd-src/sys/dev/cgd_crypto.c (revision d90047b5d07facf36e6c01dcc0bded8997ce9cc2)
1 /* $NetBSD: cgd_crypto.c,v 1.26 2020/06/29 23:44:01 riastradh Exp $ */
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Roland C. Dowdeswell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  *  Crypto Framework For cgd.c
34  *
35  *	This framework is temporary and awaits a more complete
36  *	kernel wide crypto implementation.
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: cgd_crypto.c,v 1.26 2020/06/29 23:44:01 riastradh Exp $");
41 
42 #include <sys/param.h>
43 #include <sys/kmem.h>
44 #include <sys/systm.h>
45 
46 #include <dev/cgd_crypto.h>
47 
48 #include <crypto/adiantum/adiantum.h>
49 #include <crypto/aes/aes.h>
50 #include <crypto/blowfish/blowfish.h>
51 #include <crypto/des/des.h>
52 
53 /*
54  * The general framework provides only one generic function.
55  * It takes the name of an algorithm and returns a struct cryptfuncs *
56  * for it.  It is up to the initialisation routines of the algorithm
57  * to check key size and block size.
58  */
59 
60 static cfunc_init		cgd_cipher_aes_cbc_init;
61 static cfunc_destroy		cgd_cipher_aes_cbc_destroy;
62 static cfunc_cipher		cgd_cipher_aes_cbc;
63 
64 static cfunc_init		cgd_cipher_aes_xts_init;
65 static cfunc_destroy		cgd_cipher_aes_xts_destroy;
66 static cfunc_cipher		cgd_cipher_aes_xts;
67 
68 static cfunc_init		cgd_cipher_3des_init;
69 static cfunc_destroy		cgd_cipher_3des_destroy;
70 static cfunc_cipher		cgd_cipher_3des_cbc;
71 
72 static cfunc_init		cgd_cipher_bf_init;
73 static cfunc_destroy		cgd_cipher_bf_destroy;
74 static cfunc_cipher		cgd_cipher_bf_cbc;
75 
76 static cfunc_init		cgd_cipher_adiantum_init;
77 static cfunc_destroy		cgd_cipher_adiantum_destroy;
78 static cfunc_cipher		cgd_cipher_adiantum_crypt;
79 
80 static const struct cryptfuncs cf[] = {
81 	{
82 		.cf_name	= "aes-xts",
83 		.cf_init	= cgd_cipher_aes_xts_init,
84 		.cf_destroy	= cgd_cipher_aes_xts_destroy,
85 		.cf_cipher	= cgd_cipher_aes_xts,
86 	},
87 	{
88 		.cf_name	= "aes-cbc",
89 		.cf_init	= cgd_cipher_aes_cbc_init,
90 		.cf_destroy	= cgd_cipher_aes_cbc_destroy,
91 		.cf_cipher	= cgd_cipher_aes_cbc,
92 	},
93 	{
94 		.cf_name	= "3des-cbc",
95 		.cf_init	= cgd_cipher_3des_init,
96 		.cf_destroy	= cgd_cipher_3des_destroy,
97 		.cf_cipher	= cgd_cipher_3des_cbc,
98 	},
99 	{
100 		.cf_name	= "blowfish-cbc",
101 		.cf_init	= cgd_cipher_bf_init,
102 		.cf_destroy	= cgd_cipher_bf_destroy,
103 		.cf_cipher	= cgd_cipher_bf_cbc,
104 	},
105 	{
106 		.cf_name	= "adiantum",
107 		.cf_init	= cgd_cipher_adiantum_init,
108 		.cf_destroy	= cgd_cipher_adiantum_destroy,
109 		.cf_cipher	= cgd_cipher_adiantum_crypt,
110 	},
111 };
112 const struct cryptfuncs *
113 cryptfuncs_find(const char *alg)
114 {
115 
116 	for (size_t i = 0; i < __arraycount(cf); i++)
117 		if (strcmp(cf[i].cf_name, alg) == 0)
118 			return &cf[i];
119 
120 	return NULL;
121 }
122 
123 /*
124  *  AES Framework
125  */
126 
127 struct aes_privdata {
128 	struct aesenc	ap_enckey;
129 	struct aesdec	ap_deckey;
130 	uint32_t	ap_nrounds;
131 };
132 
133 static void *
134 cgd_cipher_aes_cbc_init(size_t keylen, const void *key, size_t *blocksize)
135 {
136 	struct	aes_privdata *ap;
137 
138 	if (!blocksize)
139 		return NULL;
140 	if (keylen != 128 && keylen != 192 && keylen != 256)
141 		return NULL;
142 	if (*blocksize == (size_t)-1)
143 		*blocksize = 128;
144 	if (*blocksize != 128)
145 		return NULL;
146 	ap = kmem_zalloc(sizeof(*ap), KM_SLEEP);
147 	switch (keylen) {
148 	case 128:
149 		aes_setenckey128(&ap->ap_enckey, key);
150 		aes_setdeckey128(&ap->ap_deckey, key);
151 		ap->ap_nrounds = AES_128_NROUNDS;
152 		break;
153 	case 192:
154 		aes_setenckey192(&ap->ap_enckey, key);
155 		aes_setdeckey192(&ap->ap_deckey, key);
156 		ap->ap_nrounds = AES_192_NROUNDS;
157 		break;
158 	case 256:
159 		aes_setenckey256(&ap->ap_enckey, key);
160 		aes_setdeckey256(&ap->ap_deckey, key);
161 		ap->ap_nrounds = AES_256_NROUNDS;
162 		break;
163 	}
164 	return ap;
165 }
166 
167 static void
168 cgd_cipher_aes_cbc_destroy(void *data)
169 {
170 	struct aes_privdata *apd = data;
171 
172 	explicit_memset(apd, 0, sizeof(*apd));
173 	kmem_free(apd, sizeof(*apd));
174 }
175 
176 static void
177 cgd_cipher_aes_cbc(void *privdata, void *dst, const void *src, size_t nbytes,
178     const void *blkno, int dir)
179 {
180 	struct aes_privdata	*apd = privdata;
181 	uint8_t iv[CGD_AES_BLOCK_SIZE] __aligned(CGD_AES_BLOCK_SIZE) = {0};
182 
183 	/* Compute the CBC IV as AES_k(blkno).  */
184 	aes_enc(&apd->ap_enckey, blkno, iv, apd->ap_nrounds);
185 
186 	switch (dir) {
187 	case CGD_CIPHER_ENCRYPT:
188 		aes_cbc_enc(&apd->ap_enckey, src, dst, nbytes, iv,
189 		    apd->ap_nrounds);
190 		break;
191 	case CGD_CIPHER_DECRYPT:
192 		aes_cbc_dec(&apd->ap_deckey, src, dst, nbytes, iv,
193 		    apd->ap_nrounds);
194 		break;
195 	default:
196 		panic("%s: unrecognised direction %d", __func__, dir);
197 	}
198 }
199 
200 /*
201  * AES-XTS
202  */
203 
204 struct aesxts {
205 	struct aesenc	ax_enckey;
206 	struct aesdec	ax_deckey;
207 	struct aesenc	ax_tweakkey;
208 	uint32_t	ax_nrounds;
209 };
210 
211 static void *
212 cgd_cipher_aes_xts_init(size_t keylen, const void *xtskey, size_t *blocksize)
213 {
214 	struct aesxts *ax;
215 	const char *key, *key2; /* XTS key is made of two AES keys. */
216 
217 	if (!blocksize)
218 		return NULL;
219 	if (keylen != 256 && keylen != 512)
220 		return NULL;
221 	if (*blocksize == (size_t)-1)
222 		*blocksize = 128;
223 	if (*blocksize != 128)
224 		return NULL;
225 
226 	ax = kmem_zalloc(sizeof(*ax), KM_SLEEP);
227 	keylen /= 2;
228 	key = xtskey;
229 	key2 = key + keylen / CHAR_BIT;
230 
231 	switch (keylen) {
232 	case 128:
233 		aes_setenckey128(&ax->ax_enckey, key);
234 		aes_setdeckey128(&ax->ax_deckey, key);
235 		aes_setenckey128(&ax->ax_tweakkey, key2);
236 		ax->ax_nrounds = AES_128_NROUNDS;
237 		break;
238 	case 256:
239 		aes_setenckey256(&ax->ax_enckey, key);
240 		aes_setdeckey256(&ax->ax_deckey, key);
241 		aes_setenckey256(&ax->ax_tweakkey, key2);
242 		ax->ax_nrounds = AES_256_NROUNDS;
243 		break;
244 	}
245 
246 	return ax;
247 }
248 
249 static void
250 cgd_cipher_aes_xts_destroy(void *cookie)
251 {
252 	struct aesxts *ax = cookie;
253 
254 	explicit_memset(ax, 0, sizeof(*ax));
255 	kmem_free(ax, sizeof(*ax));
256 }
257 
258 static void
259 cgd_cipher_aes_xts(void *cookie, void *dst, const void *src, size_t nbytes,
260     const void *blkno, int dir)
261 {
262 	struct aesxts *ax = cookie;
263 	uint8_t tweak[CGD_AES_BLOCK_SIZE];
264 
265 	/* Compute the initial tweak as AES_k(blkno).  */
266 	aes_enc(&ax->ax_tweakkey, blkno, tweak, ax->ax_nrounds);
267 
268 	switch (dir) {
269 	case CGD_CIPHER_ENCRYPT:
270 		aes_xts_enc(&ax->ax_enckey, src, dst, nbytes, tweak,
271 		    ax->ax_nrounds);
272 		break;
273 	case CGD_CIPHER_DECRYPT:
274 		aes_xts_dec(&ax->ax_deckey, src, dst, nbytes, tweak,
275 		    ax->ax_nrounds);
276 		break;
277 	default:
278 		panic("%s: unrecognised direction %d", __func__, dir);
279 	}
280 }
281 
282 /*
283  * 3DES Framework
284  */
285 
286 struct c3des_privdata {
287 	des_key_schedule	cp_key1;
288 	des_key_schedule	cp_key2;
289 	des_key_schedule	cp_key3;
290 };
291 
292 static void *
293 cgd_cipher_3des_init(size_t keylen, const void *key, size_t *blocksize)
294 {
295 	struct	c3des_privdata *cp;
296 	int	error = 0;
297 	des_cblock *block;
298 
299 	if (!blocksize)
300 		return NULL;
301 	if (*blocksize == (size_t)-1)
302 		*blocksize = 64;
303 	if (keylen != (DES_KEY_SZ * 3 * 8) || *blocksize != 64)
304 		return NULL;
305 	cp = kmem_zalloc(sizeof(*cp), KM_SLEEP);
306 	block = __UNCONST(key);
307 	error  = des_key_sched(block, cp->cp_key1);
308 	error |= des_key_sched(block + 1, cp->cp_key2);
309 	error |= des_key_sched(block + 2, cp->cp_key3);
310 	if (error) {
311 		explicit_memset(cp, 0, sizeof(*cp));
312 		kmem_free(cp, sizeof(*cp));
313 		return NULL;
314 	}
315 	return cp;
316 }
317 
318 static void
319 cgd_cipher_3des_destroy(void *data)
320 {
321 	struct c3des_privdata *cp = data;
322 
323 	explicit_memset(cp, 0, sizeof(*cp));
324 	kmem_free(cp, sizeof(*cp));
325 }
326 
327 static void
328 cgd_cipher_3des_cbc(void *privdata, void *dst, const void *src, size_t nbytes,
329     const void *blkno, int dir)
330 {
331 	struct	c3des_privdata *cp = privdata;
332 	des_cblock zero;
333 	uint8_t iv[CGD_3DES_BLOCK_SIZE];
334 
335 	/* Compute the CBC IV as 3DES_k(blkno) = 3DES-CBC_k(iv=blkno, 0).  */
336 	memset(&zero, 0, sizeof(zero));
337 	des_ede3_cbc_encrypt(blkno, iv, CGD_3DES_BLOCK_SIZE,
338 	    cp->cp_key1, cp->cp_key2, cp->cp_key3, &zero, /*encrypt*/1);
339 
340 	switch (dir) {
341 	case CGD_CIPHER_ENCRYPT:
342 		des_ede3_cbc_encrypt(src, dst, nbytes,
343 		    cp->cp_key1, cp->cp_key2, cp->cp_key3,
344 		    (des_cblock *)iv, /*encrypt*/1);
345 		break;
346 	case CGD_CIPHER_DECRYPT:
347 		des_ede3_cbc_encrypt(src, dst, nbytes,
348 		    cp->cp_key1, cp->cp_key2, cp->cp_key3,
349 		    (des_cblock *)iv, /*encrypt*/0);
350 		break;
351 	default:
352 		panic("%s: unrecognised direction %d", __func__, dir);
353 	}
354 }
355 
356 /*
357  * Blowfish Framework
358  */
359 
360 struct bf_privdata {
361 	BF_KEY	bp_key;
362 };
363 
364 struct bf_encdata {
365 	BF_KEY		*be_key;
366 	uint8_t		 be_iv[CGD_BF_BLOCK_SIZE];
367 };
368 
369 static void *
370 cgd_cipher_bf_init(size_t keylen, const void *key, size_t *blocksize)
371 {
372 	struct	bf_privdata *bp;
373 
374 	if (!blocksize)
375 		return NULL;
376 	if (keylen < 40 || keylen > 448 || (keylen % 8 != 0))
377 		return NULL;
378 	if (*blocksize == (size_t)-1)
379 		*blocksize = 64;
380 	if (*blocksize != 64)
381 		return NULL;
382 	bp = kmem_zalloc(sizeof(*bp), KM_SLEEP);
383 	if (!bp)
384 		return NULL;
385 	BF_set_key(&bp->bp_key, keylen / 8, key);
386 	return bp;
387 }
388 
389 static void
390 cgd_cipher_bf_destroy(void *data)
391 {
392 	struct	bf_privdata *bp = data;
393 
394 	explicit_memset(bp, 0, sizeof(*bp));
395 	kmem_free(bp, sizeof(*bp));
396 }
397 
398 static void
399 cgd_cipher_bf_cbc(void *privdata, void *dst, const void *src, size_t nbytes,
400     const void *blkno, int dir)
401 {
402 	struct	bf_privdata *bp = privdata;
403 	uint8_t zero[CGD_BF_BLOCK_SIZE], iv[CGD_BF_BLOCK_SIZE];
404 
405 	/* Compute the CBC IV as Blowfish_k(blkno) = BF_CBC_k(blkno, 0).  */
406 	memset(zero, 0, sizeof(zero));
407 	BF_cbc_encrypt(blkno, iv, CGD_BF_BLOCK_SIZE, &bp->bp_key, zero,
408 	    /*encrypt*/1);
409 
410 	switch (dir) {
411 	case CGD_CIPHER_ENCRYPT:
412 		BF_cbc_encrypt(src, dst, nbytes, &bp->bp_key, iv,
413 		    /*encrypt*/1);
414 		break;
415 	case CGD_CIPHER_DECRYPT:
416 		BF_cbc_encrypt(src, dst, nbytes, &bp->bp_key, iv,
417 		    /*encrypt*/0);
418 		break;
419 	default:
420 		panic("%s: unrecognised direction %d", __func__, dir);
421 	}
422 }
423 
424 /*
425  * Adiantum
426  */
427 
428 static void *
429 cgd_cipher_adiantum_init(size_t keylen, const void *key, size_t *blocksize)
430 {
431 	struct adiantum *A;
432 
433 	if (!blocksize)
434 		return NULL;
435 	if (keylen != 256)
436 		return NULL;
437 	if (*blocksize == (size_t)-1)
438 		*blocksize = 128;
439 	if (*blocksize != 128)
440 		return NULL;
441 
442 	A = kmem_zalloc(sizeof(*A), KM_SLEEP);
443 	adiantum_init(A, key);
444 
445 	return A;
446 }
447 
448 static void
449 cgd_cipher_adiantum_destroy(void *cookie)
450 {
451 	struct adiantum *A = cookie;
452 
453 	explicit_memset(A, 0, sizeof(*A));
454 	kmem_free(A, sizeof(*A));
455 }
456 
457 static void
458 cgd_cipher_adiantum_crypt(void *cookie, void *dst, const void *src,
459     size_t nbytes, const void *blkno, int dir)
460 {
461 	/*
462 	 * Treat the block number as a 128-bit block.  This is more
463 	 * than twice as big as the largest number of reasonable
464 	 * blocks, but it doesn't hurt (it would be rounded up to a
465 	 * 128-bit input anyway).
466 	 */
467 	const unsigned tweaklen = 16;
468 	struct adiantum *A = cookie;
469 
470 	switch (dir) {
471 	case CGD_CIPHER_ENCRYPT:
472 		adiantum_enc(dst, src, nbytes, blkno, tweaklen, A);
473 		break;
474 	case CGD_CIPHER_DECRYPT:
475 		adiantum_dec(dst, src, nbytes, blkno, tweaklen, A);
476 		break;
477 	default:
478 		panic("%s: unrecognised direction %d", __func__, dir);
479 	}
480 }
481