xref: /netbsd-src/sys/dev/cgd_crypto.c (revision 820a30e5858771374a654758682d04e5a2bff53e)
1 /* $NetBSD: cgd_crypto.c,v 1.27 2020/07/25 22:14:35 riastradh Exp $ */
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Roland C. Dowdeswell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  *  Crypto Framework For cgd.c
34  *
35  *	This framework is temporary and awaits a more complete
36  *	kernel wide crypto implementation.
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: cgd_crypto.c,v 1.27 2020/07/25 22:14:35 riastradh Exp $");
41 
42 #include <sys/param.h>
43 #include <sys/kmem.h>
44 #include <sys/systm.h>
45 
46 #include <dev/cgd_crypto.h>
47 
48 #include <crypto/adiantum/adiantum.h>
49 #include <crypto/aes/aes.h>
50 #include <crypto/aes/aes_cbc.h>
51 #include <crypto/aes/aes_xts.h>
52 #include <crypto/blowfish/blowfish.h>
53 #include <crypto/des/des.h>
54 
55 /*
56  * The general framework provides only one generic function.
57  * It takes the name of an algorithm and returns a struct cryptfuncs *
58  * for it.  It is up to the initialisation routines of the algorithm
59  * to check key size and block size.
60  */
61 
62 static cfunc_init		cgd_cipher_aes_cbc_init;
63 static cfunc_destroy		cgd_cipher_aes_cbc_destroy;
64 static cfunc_cipher		cgd_cipher_aes_cbc;
65 
66 static cfunc_init		cgd_cipher_aes_xts_init;
67 static cfunc_destroy		cgd_cipher_aes_xts_destroy;
68 static cfunc_cipher		cgd_cipher_aes_xts;
69 
70 static cfunc_init		cgd_cipher_3des_init;
71 static cfunc_destroy		cgd_cipher_3des_destroy;
72 static cfunc_cipher		cgd_cipher_3des_cbc;
73 
74 static cfunc_init		cgd_cipher_bf_init;
75 static cfunc_destroy		cgd_cipher_bf_destroy;
76 static cfunc_cipher		cgd_cipher_bf_cbc;
77 
78 static cfunc_init		cgd_cipher_adiantum_init;
79 static cfunc_destroy		cgd_cipher_adiantum_destroy;
80 static cfunc_cipher		cgd_cipher_adiantum_crypt;
81 
82 static const struct cryptfuncs cf[] = {
83 	{
84 		.cf_name	= "aes-xts",
85 		.cf_init	= cgd_cipher_aes_xts_init,
86 		.cf_destroy	= cgd_cipher_aes_xts_destroy,
87 		.cf_cipher	= cgd_cipher_aes_xts,
88 	},
89 	{
90 		.cf_name	= "aes-cbc",
91 		.cf_init	= cgd_cipher_aes_cbc_init,
92 		.cf_destroy	= cgd_cipher_aes_cbc_destroy,
93 		.cf_cipher	= cgd_cipher_aes_cbc,
94 	},
95 	{
96 		.cf_name	= "3des-cbc",
97 		.cf_init	= cgd_cipher_3des_init,
98 		.cf_destroy	= cgd_cipher_3des_destroy,
99 		.cf_cipher	= cgd_cipher_3des_cbc,
100 	},
101 	{
102 		.cf_name	= "blowfish-cbc",
103 		.cf_init	= cgd_cipher_bf_init,
104 		.cf_destroy	= cgd_cipher_bf_destroy,
105 		.cf_cipher	= cgd_cipher_bf_cbc,
106 	},
107 	{
108 		.cf_name	= "adiantum",
109 		.cf_init	= cgd_cipher_adiantum_init,
110 		.cf_destroy	= cgd_cipher_adiantum_destroy,
111 		.cf_cipher	= cgd_cipher_adiantum_crypt,
112 	},
113 };
114 const struct cryptfuncs *
cryptfuncs_find(const char * alg)115 cryptfuncs_find(const char *alg)
116 {
117 
118 	for (size_t i = 0; i < __arraycount(cf); i++)
119 		if (strcmp(cf[i].cf_name, alg) == 0)
120 			return &cf[i];
121 
122 	return NULL;
123 }
124 
125 /*
126  *  AES Framework
127  */
128 
129 struct aes_privdata {
130 	struct aesenc	ap_enckey;
131 	struct aesdec	ap_deckey;
132 	uint32_t	ap_nrounds;
133 };
134 
135 static void *
cgd_cipher_aes_cbc_init(size_t keylen,const void * key,size_t * blocksize)136 cgd_cipher_aes_cbc_init(size_t keylen, const void *key, size_t *blocksize)
137 {
138 	struct	aes_privdata *ap;
139 
140 	if (!blocksize)
141 		return NULL;
142 	if (keylen != 128 && keylen != 192 && keylen != 256)
143 		return NULL;
144 	if (*blocksize == (size_t)-1)
145 		*blocksize = 128;
146 	if (*blocksize != 128)
147 		return NULL;
148 	ap = kmem_zalloc(sizeof(*ap), KM_SLEEP);
149 	switch (keylen) {
150 	case 128:
151 		aes_setenckey128(&ap->ap_enckey, key);
152 		aes_setdeckey128(&ap->ap_deckey, key);
153 		ap->ap_nrounds = AES_128_NROUNDS;
154 		break;
155 	case 192:
156 		aes_setenckey192(&ap->ap_enckey, key);
157 		aes_setdeckey192(&ap->ap_deckey, key);
158 		ap->ap_nrounds = AES_192_NROUNDS;
159 		break;
160 	case 256:
161 		aes_setenckey256(&ap->ap_enckey, key);
162 		aes_setdeckey256(&ap->ap_deckey, key);
163 		ap->ap_nrounds = AES_256_NROUNDS;
164 		break;
165 	}
166 	return ap;
167 }
168 
169 static void
cgd_cipher_aes_cbc_destroy(void * data)170 cgd_cipher_aes_cbc_destroy(void *data)
171 {
172 	struct aes_privdata *apd = data;
173 
174 	explicit_memset(apd, 0, sizeof(*apd));
175 	kmem_free(apd, sizeof(*apd));
176 }
177 
178 static void
cgd_cipher_aes_cbc(void * privdata,void * dst,const void * src,size_t nbytes,const void * blkno,int dir)179 cgd_cipher_aes_cbc(void *privdata, void *dst, const void *src, size_t nbytes,
180     const void *blkno, int dir)
181 {
182 	struct aes_privdata	*apd = privdata;
183 	uint8_t iv[CGD_AES_BLOCK_SIZE] __aligned(CGD_AES_BLOCK_SIZE) = {0};
184 
185 	/* Compute the CBC IV as AES_k(blkno).  */
186 	aes_enc(&apd->ap_enckey, blkno, iv, apd->ap_nrounds);
187 
188 	switch (dir) {
189 	case CGD_CIPHER_ENCRYPT:
190 		aes_cbc_enc(&apd->ap_enckey, src, dst, nbytes, iv,
191 		    apd->ap_nrounds);
192 		break;
193 	case CGD_CIPHER_DECRYPT:
194 		aes_cbc_dec(&apd->ap_deckey, src, dst, nbytes, iv,
195 		    apd->ap_nrounds);
196 		break;
197 	default:
198 		panic("%s: unrecognised direction %d", __func__, dir);
199 	}
200 }
201 
202 /*
203  * AES-XTS
204  */
205 
206 struct aesxts {
207 	struct aesenc	ax_enckey;
208 	struct aesdec	ax_deckey;
209 	struct aesenc	ax_tweakkey;
210 	uint32_t	ax_nrounds;
211 };
212 
213 static void *
cgd_cipher_aes_xts_init(size_t keylen,const void * xtskey,size_t * blocksize)214 cgd_cipher_aes_xts_init(size_t keylen, const void *xtskey, size_t *blocksize)
215 {
216 	struct aesxts *ax;
217 	const char *key, *key2; /* XTS key is made of two AES keys. */
218 
219 	if (!blocksize)
220 		return NULL;
221 	if (keylen != 256 && keylen != 512)
222 		return NULL;
223 	if (*blocksize == (size_t)-1)
224 		*blocksize = 128;
225 	if (*blocksize != 128)
226 		return NULL;
227 
228 	ax = kmem_zalloc(sizeof(*ax), KM_SLEEP);
229 	keylen /= 2;
230 	key = xtskey;
231 	key2 = key + keylen / CHAR_BIT;
232 
233 	switch (keylen) {
234 	case 128:
235 		aes_setenckey128(&ax->ax_enckey, key);
236 		aes_setdeckey128(&ax->ax_deckey, key);
237 		aes_setenckey128(&ax->ax_tweakkey, key2);
238 		ax->ax_nrounds = AES_128_NROUNDS;
239 		break;
240 	case 256:
241 		aes_setenckey256(&ax->ax_enckey, key);
242 		aes_setdeckey256(&ax->ax_deckey, key);
243 		aes_setenckey256(&ax->ax_tweakkey, key2);
244 		ax->ax_nrounds = AES_256_NROUNDS;
245 		break;
246 	}
247 
248 	return ax;
249 }
250 
251 static void
cgd_cipher_aes_xts_destroy(void * cookie)252 cgd_cipher_aes_xts_destroy(void *cookie)
253 {
254 	struct aesxts *ax = cookie;
255 
256 	explicit_memset(ax, 0, sizeof(*ax));
257 	kmem_free(ax, sizeof(*ax));
258 }
259 
260 static void
cgd_cipher_aes_xts(void * cookie,void * dst,const void * src,size_t nbytes,const void * blkno,int dir)261 cgd_cipher_aes_xts(void *cookie, void *dst, const void *src, size_t nbytes,
262     const void *blkno, int dir)
263 {
264 	struct aesxts *ax = cookie;
265 	uint8_t tweak[CGD_AES_BLOCK_SIZE];
266 
267 	/* Compute the initial tweak as AES_k(blkno).  */
268 	aes_enc(&ax->ax_tweakkey, blkno, tweak, ax->ax_nrounds);
269 
270 	switch (dir) {
271 	case CGD_CIPHER_ENCRYPT:
272 		aes_xts_enc(&ax->ax_enckey, src, dst, nbytes, tweak,
273 		    ax->ax_nrounds);
274 		break;
275 	case CGD_CIPHER_DECRYPT:
276 		aes_xts_dec(&ax->ax_deckey, src, dst, nbytes, tweak,
277 		    ax->ax_nrounds);
278 		break;
279 	default:
280 		panic("%s: unrecognised direction %d", __func__, dir);
281 	}
282 }
283 
284 /*
285  * 3DES Framework
286  */
287 
288 struct c3des_privdata {
289 	des_key_schedule	cp_key1;
290 	des_key_schedule	cp_key2;
291 	des_key_schedule	cp_key3;
292 };
293 
294 static void *
cgd_cipher_3des_init(size_t keylen,const void * key,size_t * blocksize)295 cgd_cipher_3des_init(size_t keylen, const void *key, size_t *blocksize)
296 {
297 	struct	c3des_privdata *cp;
298 	int	error = 0;
299 	des_cblock *block;
300 
301 	if (!blocksize)
302 		return NULL;
303 	if (*blocksize == (size_t)-1)
304 		*blocksize = 64;
305 	if (keylen != (DES_KEY_SZ * 3 * 8) || *blocksize != 64)
306 		return NULL;
307 	cp = kmem_zalloc(sizeof(*cp), KM_SLEEP);
308 	block = __UNCONST(key);
309 	error  = des_key_sched(block, cp->cp_key1);
310 	error |= des_key_sched(block + 1, cp->cp_key2);
311 	error |= des_key_sched(block + 2, cp->cp_key3);
312 	if (error) {
313 		explicit_memset(cp, 0, sizeof(*cp));
314 		kmem_free(cp, sizeof(*cp));
315 		return NULL;
316 	}
317 	return cp;
318 }
319 
320 static void
cgd_cipher_3des_destroy(void * data)321 cgd_cipher_3des_destroy(void *data)
322 {
323 	struct c3des_privdata *cp = data;
324 
325 	explicit_memset(cp, 0, sizeof(*cp));
326 	kmem_free(cp, sizeof(*cp));
327 }
328 
329 static void
cgd_cipher_3des_cbc(void * privdata,void * dst,const void * src,size_t nbytes,const void * blkno,int dir)330 cgd_cipher_3des_cbc(void *privdata, void *dst, const void *src, size_t nbytes,
331     const void *blkno, int dir)
332 {
333 	struct	c3des_privdata *cp = privdata;
334 	des_cblock zero;
335 	uint8_t iv[CGD_3DES_BLOCK_SIZE];
336 
337 	/* Compute the CBC IV as 3DES_k(blkno) = 3DES-CBC_k(iv=blkno, 0).  */
338 	memset(&zero, 0, sizeof(zero));
339 	des_ede3_cbc_encrypt(blkno, iv, CGD_3DES_BLOCK_SIZE,
340 	    cp->cp_key1, cp->cp_key2, cp->cp_key3, &zero, /*encrypt*/1);
341 
342 	switch (dir) {
343 	case CGD_CIPHER_ENCRYPT:
344 		des_ede3_cbc_encrypt(src, dst, nbytes,
345 		    cp->cp_key1, cp->cp_key2, cp->cp_key3,
346 		    (des_cblock *)iv, /*encrypt*/1);
347 		break;
348 	case CGD_CIPHER_DECRYPT:
349 		des_ede3_cbc_encrypt(src, dst, nbytes,
350 		    cp->cp_key1, cp->cp_key2, cp->cp_key3,
351 		    (des_cblock *)iv, /*encrypt*/0);
352 		break;
353 	default:
354 		panic("%s: unrecognised direction %d", __func__, dir);
355 	}
356 }
357 
358 /*
359  * Blowfish Framework
360  */
361 
362 struct bf_privdata {
363 	BF_KEY	bp_key;
364 };
365 
366 struct bf_encdata {
367 	BF_KEY		*be_key;
368 	uint8_t		 be_iv[CGD_BF_BLOCK_SIZE];
369 };
370 
371 static void *
cgd_cipher_bf_init(size_t keylen,const void * key,size_t * blocksize)372 cgd_cipher_bf_init(size_t keylen, const void *key, size_t *blocksize)
373 {
374 	struct	bf_privdata *bp;
375 
376 	if (!blocksize)
377 		return NULL;
378 	if (keylen < 40 || keylen > 448 || (keylen % 8 != 0))
379 		return NULL;
380 	if (*blocksize == (size_t)-1)
381 		*blocksize = 64;
382 	if (*blocksize != 64)
383 		return NULL;
384 	bp = kmem_zalloc(sizeof(*bp), KM_SLEEP);
385 	if (!bp)
386 		return NULL;
387 	BF_set_key(&bp->bp_key, keylen / 8, key);
388 	return bp;
389 }
390 
391 static void
cgd_cipher_bf_destroy(void * data)392 cgd_cipher_bf_destroy(void *data)
393 {
394 	struct	bf_privdata *bp = data;
395 
396 	explicit_memset(bp, 0, sizeof(*bp));
397 	kmem_free(bp, sizeof(*bp));
398 }
399 
400 static void
cgd_cipher_bf_cbc(void * privdata,void * dst,const void * src,size_t nbytes,const void * blkno,int dir)401 cgd_cipher_bf_cbc(void *privdata, void *dst, const void *src, size_t nbytes,
402     const void *blkno, int dir)
403 {
404 	struct	bf_privdata *bp = privdata;
405 	uint8_t zero[CGD_BF_BLOCK_SIZE], iv[CGD_BF_BLOCK_SIZE];
406 
407 	/* Compute the CBC IV as Blowfish_k(blkno) = BF_CBC_k(blkno, 0).  */
408 	memset(zero, 0, sizeof(zero));
409 	BF_cbc_encrypt(blkno, iv, CGD_BF_BLOCK_SIZE, &bp->bp_key, zero,
410 	    /*encrypt*/1);
411 
412 	switch (dir) {
413 	case CGD_CIPHER_ENCRYPT:
414 		BF_cbc_encrypt(src, dst, nbytes, &bp->bp_key, iv,
415 		    /*encrypt*/1);
416 		break;
417 	case CGD_CIPHER_DECRYPT:
418 		BF_cbc_encrypt(src, dst, nbytes, &bp->bp_key, iv,
419 		    /*encrypt*/0);
420 		break;
421 	default:
422 		panic("%s: unrecognised direction %d", __func__, dir);
423 	}
424 }
425 
426 /*
427  * Adiantum
428  */
429 
430 static void *
cgd_cipher_adiantum_init(size_t keylen,const void * key,size_t * blocksize)431 cgd_cipher_adiantum_init(size_t keylen, const void *key, size_t *blocksize)
432 {
433 	struct adiantum *A;
434 
435 	if (!blocksize)
436 		return NULL;
437 	if (keylen != 256)
438 		return NULL;
439 	if (*blocksize == (size_t)-1)
440 		*blocksize = 128;
441 	if (*blocksize != 128)
442 		return NULL;
443 
444 	A = kmem_zalloc(sizeof(*A), KM_SLEEP);
445 	adiantum_init(A, key);
446 
447 	return A;
448 }
449 
450 static void
cgd_cipher_adiantum_destroy(void * cookie)451 cgd_cipher_adiantum_destroy(void *cookie)
452 {
453 	struct adiantum *A = cookie;
454 
455 	explicit_memset(A, 0, sizeof(*A));
456 	kmem_free(A, sizeof(*A));
457 }
458 
459 static void
cgd_cipher_adiantum_crypt(void * cookie,void * dst,const void * src,size_t nbytes,const void * blkno,int dir)460 cgd_cipher_adiantum_crypt(void *cookie, void *dst, const void *src,
461     size_t nbytes, const void *blkno, int dir)
462 {
463 	/*
464 	 * Treat the block number as a 128-bit block.  This is more
465 	 * than twice as big as the largest number of reasonable
466 	 * blocks, but it doesn't hurt (it would be rounded up to a
467 	 * 128-bit input anyway).
468 	 */
469 	const unsigned tweaklen = 16;
470 	struct adiantum *A = cookie;
471 
472 	switch (dir) {
473 	case CGD_CIPHER_ENCRYPT:
474 		adiantum_enc(dst, src, nbytes, blkno, tweaklen, A);
475 		break;
476 	case CGD_CIPHER_DECRYPT:
477 		adiantum_dec(dst, src, nbytes, blkno, tweaklen, A);
478 		break;
479 	default:
480 		panic("%s: unrecognised direction %d", __func__, dir);
481 	}
482 }
483