1 /* $OpenBSD: cryptox.c,v 1.6 2022/01/01 18:52:36 kettenis Exp $ */
2 /*
3 * Copyright (c) 2003 Jason Wright
4 * Copyright (c) 2003, 2004 Theo de Raadt
5 * Copyright (c) 2010 Thordur I. Bjornsson
6 * Copyright (c) 2010 Mike Belopuhov
7 * Copyright (c) 2020 Tobias Heider
8 * All rights reserved.
9 *
10 * Permission to use, copy, modify, and distribute this software for any
11 * purpose with or without fee is hereby granted, provided that the above
12 * copyright notice and this permission notice appear in all copies.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
15 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
17 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
18 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 */
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/atomic.h>
26 #include <sys/malloc.h>
27 #include <sys/pool.h>
28 #include <sys/mbuf.h>
29 #include <sys/smr.h>
30
31 #include <crypto/cryptodev.h>
32 #include <crypto/aes.h>
33 #include <crypto/gmac.h>
34 #include <crypto/xform.h>
35 #include <crypto/cryptosoft.h>
36
37 #include <machine/fpu.h>
38
39 struct cryptox_aes_key {
40 uint32_t rd_key[4 *(AES_MAXROUNDS + 1)];
41 int rounds;
42 };
43
44 struct cryptox_session {
45 struct cryptox_aes_key ses_ekey;
46 struct cryptox_aes_key ses_dkey;
47 uint32_t ses_klen;
48 int ses_sid;
49 struct swcr_data *ses_swd;
50 SMR_LIST_ENTRY(cryptox_session)
51 ses_entries;
52 uint8_t *ses_buf;
53 size_t ses_buflen;
54 struct smr_entry ses_smr;
55 };
56
57 struct cryptox_softc {
58 int32_t sc_cid;
59 uint32_t sc_sid;
60 struct mutex sc_mtx;
61 SMR_LIST_HEAD(, cryptox_session)
62 sc_sessions;
63 } *cryptox_sc;
64
65 struct pool cryptoxpl;
66
67 uint32_t cryptox_ops;
68
69 extern int aes_v8_set_encrypt_key(const uint8_t *user_key, const int bits,
70 struct cryptox_aes_key *key);
71 extern int aes_v8_set_decrypt_key(const uint8_t *user_key, const int bits,
72 struct cryptox_aes_key *key);
73 extern void aes_v8_encrypt(const uint8_t *in, uint8_t *out,
74 const struct cryptox_aes_key *key);
75 extern void aes_v8_decrypt(const uint8_t *in, uint8_t *out,
76 const struct cryptox_aes_key *key);
77 extern void aes_v8_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
78 const struct cryptox_aes_key *key, uint8_t *ivec, const int enc);
79 extern void aes_v8_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out,
80 size_t len, const struct cryptox_aes_key *key,
81 const uint8_t ivec[16]);
82
83 void cryptox_setup(void);
84 int cryptox_newsession(u_int32_t *, struct cryptoini *);
85 int cryptox_freesession(u_int64_t);
86 int cryptox_process(struct cryptop *);
87
88 struct cryptox_session *
89 cryptox_get(uint32_t);
90 void cryptox_free(struct cryptox_session *);
91 void cryptox_free_smr(void *);
92
93 int cryptox_swauth(struct cryptop *, struct cryptodesc *, struct swcr_data *,
94 caddr_t);
95
96 int cryptox_encdec(struct cryptop *, struct cryptodesc *,
97 struct cryptox_session *);
98
99 void
cryptox_setup(void)100 cryptox_setup(void)
101 {
102 int algs[CRYPTO_ALGORITHM_MAX + 1];
103
104 cryptox_sc = malloc(sizeof(*cryptox_sc), M_DEVBUF, M_NOWAIT|M_ZERO);
105 if (cryptox_sc == NULL)
106 return;
107
108 bzero(algs, sizeof(algs));
109
110 /* Encryption algorithms. */
111 algs[CRYPTO_AES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
112
113 /* HMACs needed for IPsec, uses software crypto. */
114 algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
115 algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
116 algs[CRYPTO_RIPEMD160_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
117 algs[CRYPTO_SHA2_256_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
118 algs[CRYPTO_SHA2_384_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
119 algs[CRYPTO_SHA2_512_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
120
121 /* IPsec Extended Sequence Numbers. */
122 algs[CRYPTO_ESN] = CRYPTO_ALG_FLAG_SUPPORTED;
123
124 cryptox_sc->sc_cid = crypto_get_driverid(CRYPTOCAP_F_MPSAFE);
125 if (cryptox_sc->sc_cid < 0) {
126 free(cryptox_sc, M_DEVBUF, sizeof(*cryptox_sc));
127 cryptox_sc = NULL;
128 return;
129 }
130
131 pool_init(&cryptoxpl, sizeof(struct cryptox_session), 16, IPL_VM, 0,
132 "cryptox", NULL);
133 pool_setlowat(&cryptoxpl, 2);
134
135 mtx_init(&cryptox_sc->sc_mtx, IPL_VM);
136
137 crypto_register(cryptox_sc->sc_cid, algs, cryptox_newsession,
138 cryptox_freesession, cryptox_process);
139 }
140
141 int
cryptox_newsession(u_int32_t * sidp,struct cryptoini * cri)142 cryptox_newsession(u_int32_t *sidp, struct cryptoini *cri)
143 {
144 struct cryptox_session *ses = NULL;
145 struct cryptoini *c;
146 const struct auth_hash *axf;
147 struct swcr_data *swd;
148 int i;
149
150 if (sidp == NULL || cri == NULL)
151 return (EINVAL);
152
153 ses = pool_get(&cryptoxpl, PR_NOWAIT | PR_ZERO);
154 if (!ses)
155 return (ENOMEM);
156 smr_init(&ses->ses_smr);
157
158 ses->ses_buf = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
159 if (ses->ses_buf != NULL)
160 ses->ses_buflen = PAGE_SIZE;
161
162 for (c = cri; c != NULL; c = c->cri_next) {
163 switch (c->cri_alg) {
164 case CRYPTO_AES_CBC:
165 ses->ses_klen = c->cri_klen / 8;
166 fpu_kernel_enter();
167 aes_v8_set_encrypt_key(c->cri_key, c->cri_klen, &ses->ses_ekey);
168 aes_v8_set_decrypt_key(c->cri_key, c->cri_klen, &ses->ses_dkey);
169 fpu_kernel_exit();
170 break;
171
172 case CRYPTO_MD5_HMAC:
173 axf = &auth_hash_hmac_md5_96;
174 goto authcommon;
175 case CRYPTO_SHA1_HMAC:
176 axf = &auth_hash_hmac_sha1_96;
177 goto authcommon;
178 case CRYPTO_RIPEMD160_HMAC:
179 axf = &auth_hash_hmac_ripemd_160_96;
180 goto authcommon;
181 case CRYPTO_SHA2_256_HMAC:
182 axf = &auth_hash_hmac_sha2_256_128;
183 goto authcommon;
184 case CRYPTO_SHA2_384_HMAC:
185 axf = &auth_hash_hmac_sha2_384_192;
186 goto authcommon;
187 case CRYPTO_SHA2_512_HMAC:
188 axf = &auth_hash_hmac_sha2_512_256;
189 authcommon:
190 swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
191 M_NOWAIT|M_ZERO);
192 if (swd == NULL) {
193 cryptox_free(ses);
194 return (ENOMEM);
195 }
196 ses->ses_swd = swd;
197
198 swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
199 M_NOWAIT);
200 if (swd->sw_ictx == NULL) {
201 cryptox_free(ses);
202 return (ENOMEM);
203 }
204
205 swd->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
206 M_NOWAIT);
207 if (swd->sw_octx == NULL) {
208 cryptox_free(ses);
209 return (ENOMEM);
210 }
211
212 for (i = 0; i < c->cri_klen / 8; i++)
213 c->cri_key[i] ^= HMAC_IPAD_VAL;
214
215 axf->Init(swd->sw_ictx);
216 axf->Update(swd->sw_ictx, c->cri_key, c->cri_klen / 8);
217 axf->Update(swd->sw_ictx, hmac_ipad_buffer,
218 axf->blocksize - (c->cri_klen / 8));
219
220 for (i = 0; i < c->cri_klen / 8; i++)
221 c->cri_key[i] ^= (HMAC_IPAD_VAL ^
222 HMAC_OPAD_VAL);
223
224 axf->Init(swd->sw_octx);
225 axf->Update(swd->sw_octx, c->cri_key, c->cri_klen / 8);
226 axf->Update(swd->sw_octx, hmac_opad_buffer,
227 axf->blocksize - (c->cri_klen / 8));
228
229 for (i = 0; i < c->cri_klen / 8; i++)
230 c->cri_key[i] ^= HMAC_OPAD_VAL;
231
232 swd->sw_axf = axf;
233 swd->sw_alg = c->cri_alg;
234
235 break;
236
237 case CRYPTO_ESN:
238 /* nothing to do */
239 break;
240
241 default:
242 cryptox_free(ses);
243 return (EINVAL);
244 }
245 }
246
247 mtx_enter(&cryptox_sc->sc_mtx);
248 ses->ses_sid = ++cryptox_sc->sc_sid;
249 SMR_LIST_INSERT_HEAD_LOCKED(&cryptox_sc->sc_sessions, ses, ses_entries);
250 mtx_leave(&cryptox_sc->sc_mtx);
251
252 *sidp = ses->ses_sid;
253 return (0);
254 }
255
256 int
cryptox_freesession(u_int64_t tid)257 cryptox_freesession(u_int64_t tid)
258 {
259 struct cryptox_session *ses;
260 u_int32_t sid = (u_int32_t)tid;
261
262 mtx_enter(&cryptox_sc->sc_mtx);
263 SMR_LIST_FOREACH_LOCKED(ses, &cryptox_sc->sc_sessions, ses_entries) {
264 if (ses->ses_sid == sid) {
265 SMR_LIST_REMOVE_LOCKED(ses, ses_entries);
266 break;
267 }
268 }
269 mtx_leave(&cryptox_sc->sc_mtx);
270
271 if (ses == NULL)
272 return (EINVAL);
273
274 smr_call(&ses->ses_smr, cryptox_free_smr, ses);
275
276 return (0);
277 }
278
279 void
cryptox_free(struct cryptox_session * ses)280 cryptox_free(struct cryptox_session *ses)
281 {
282 struct swcr_data *swd;
283 const struct auth_hash *axf;
284
285 if (ses->ses_swd) {
286 swd = ses->ses_swd;
287 axf = swd->sw_axf;
288
289 if (swd->sw_ictx) {
290 explicit_bzero(swd->sw_ictx, axf->ctxsize);
291 free(swd->sw_ictx, M_CRYPTO_DATA, axf->ctxsize);
292 }
293 if (swd->sw_octx) {
294 explicit_bzero(swd->sw_octx, axf->ctxsize);
295 free(swd->sw_octx, M_CRYPTO_DATA, axf->ctxsize);
296 }
297 free(swd, M_CRYPTO_DATA, sizeof(*swd));
298 }
299
300 if (ses->ses_buf) {
301 explicit_bzero(ses->ses_buf, ses->ses_buflen);
302 free(ses->ses_buf, M_DEVBUF, ses->ses_buflen);
303 }
304
305 explicit_bzero(ses, sizeof (*ses));
306 pool_put(&cryptoxpl, ses);
307 }
308
309 void
cryptox_free_smr(void * arg)310 cryptox_free_smr(void *arg)
311 {
312 struct cryptox_session *ses = arg;
313
314 cryptox_free(ses);
315 }
316
317 struct cryptox_session *
cryptox_get(uint32_t sid)318 cryptox_get(uint32_t sid)
319 {
320 struct cryptox_session *ses = NULL;
321
322 SMR_ASSERT_CRITICAL();
323 SMR_LIST_FOREACH(ses, &cryptox_sc->sc_sessions, ses_entries) {
324 if (ses->ses_sid == sid)
325 break;
326 }
327 return (ses);
328 }
329
330 int
cryptox_swauth(struct cryptop * crp,struct cryptodesc * crd,struct swcr_data * sw,caddr_t buf)331 cryptox_swauth(struct cryptop *crp, struct cryptodesc *crd,
332 struct swcr_data *sw, caddr_t buf)
333 {
334 int type;
335
336 if (crp->crp_flags & CRYPTO_F_IMBUF)
337 type = CRYPTO_BUF_MBUF;
338 else
339 type = CRYPTO_BUF_IOV;
340
341 return (swcr_authcompute(crp, crd, sw, buf, type));
342 }
343
344 int
cryptox_encdec(struct cryptop * crp,struct cryptodesc * crd,struct cryptox_session * ses)345 cryptox_encdec(struct cryptop *crp, struct cryptodesc *crd,
346 struct cryptox_session *ses)
347 {
348 int err, ivlen, iskip, oskip, rlen;
349 uint8_t iv[EALG_MAX_BLOCK_LEN];
350 uint8_t *buf = ses->ses_buf;
351
352 rlen = err = iskip = oskip = 0;
353
354 if (crd->crd_len > ses->ses_buflen) {
355 if (buf != NULL) {
356 explicit_bzero(buf, ses->ses_buflen);
357 free(buf, M_DEVBUF, ses->ses_buflen);
358 }
359
360 ses->ses_buflen = 0;
361 rlen = roundup(crd->crd_len, EALG_MAX_BLOCK_LEN);
362 ses->ses_buf = buf = malloc(rlen, M_DEVBUF, M_NOWAIT |
363 M_ZERO);
364 if (buf == NULL)
365 return (ENOMEM);
366 ses->ses_buflen = rlen;
367 }
368
369 /* CBC uses 16 */
370 ivlen = 16;
371
372 /* Initialize the IV */
373 if (crd->crd_flags & CRD_F_ENCRYPT) {
374 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
375 memcpy(iv, crd->crd_iv, ivlen);
376 else
377 arc4random_buf(iv, ivlen);
378
379 /* Do we need to write the IV */
380 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
381 if (crp->crp_flags & CRYPTO_F_IMBUF) {
382 if (m_copyback((struct mbuf *)crp->crp_buf,
383 crd->crd_inject, ivlen, iv, M_NOWAIT)) {
384 err = ENOMEM;
385 goto out;
386 }
387 } else
388 cuio_copyback((struct uio *)crp->crp_buf,
389 crd->crd_inject, ivlen, iv);
390 }
391 } else {
392 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
393 memcpy(iv, crd->crd_iv, ivlen);
394 else {
395 if (crp->crp_flags & CRYPTO_F_IMBUF)
396 m_copydata((struct mbuf *)crp->crp_buf,
397 crd->crd_inject, ivlen, iv);
398 else
399 cuio_copydata((struct uio *)crp->crp_buf,
400 crd->crd_inject, ivlen, iv);
401 }
402 }
403
404 /* Copy data to be processed to the buffer */
405 if (crp->crp_flags & CRYPTO_F_IMBUF)
406 m_copydata((struct mbuf *)crp->crp_buf, crd->crd_skip,
407 crd->crd_len, buf);
408 else
409 cuio_copydata((struct uio *)crp->crp_buf, crd->crd_skip,
410 crd->crd_len, buf);
411
412 /* Apply cipher */
413 fpu_kernel_enter();
414 switch (crd->crd_alg) {
415 case CRYPTO_AES_CBC:
416 if (crd->crd_flags & CRD_F_ENCRYPT)
417 aes_v8_cbc_encrypt(buf, buf, crd->crd_len, &ses->ses_ekey, iv, 1);
418 else
419 aes_v8_cbc_encrypt(buf, buf, crd->crd_len, &ses->ses_dkey, iv, 0);
420 break;
421 }
422 fpu_kernel_exit();
423
424 cryptox_ops++;
425
426 /* Copy back the result */
427 if (crp->crp_flags & CRYPTO_F_IMBUF) {
428 if (m_copyback((struct mbuf *)crp->crp_buf, crd->crd_skip,
429 crd->crd_len, buf, M_NOWAIT)) {
430 err = ENOMEM;
431 goto out;
432 }
433 } else
434 cuio_copyback((struct uio *)crp->crp_buf, crd->crd_skip,
435 crd->crd_len, buf);
436
437 out:
438 explicit_bzero(buf, roundup(crd->crd_len, EALG_MAX_BLOCK_LEN));
439 return (err);
440 }
441
442 int
cryptox_process(struct cryptop * crp)443 cryptox_process(struct cryptop *crp)
444 {
445 struct cryptox_session *ses;
446 struct cryptodesc *crd, *crde;
447 int err = 0;
448 int i;
449
450 KASSERT(crp->crp_ndesc >= 1);
451
452 smr_read_enter();
453 ses = cryptox_get(crp->crp_sid & 0xffffffff);
454 if (!ses) {
455 err = EINVAL;
456 goto out;
457 }
458
459 crde = NULL;
460 for (i = 0; i < crp->crp_ndesc; i++) {
461 crd = &crp->crp_desc[i];
462 switch (crd->crd_alg) {
463 case CRYPTO_AES_CBC:
464 err = cryptox_encdec(crp, crd, ses);
465 if (err != 0)
466 goto out;
467 break;
468 case CRYPTO_MD5_HMAC:
469 case CRYPTO_SHA1_HMAC:
470 case CRYPTO_RIPEMD160_HMAC:
471 case CRYPTO_SHA2_256_HMAC:
472 case CRYPTO_SHA2_384_HMAC:
473 case CRYPTO_SHA2_512_HMAC:
474 err = cryptox_swauth(crp, crd, ses->ses_swd,
475 crp->crp_buf);
476 if (err != 0)
477 goto out;
478 break;
479
480 default:
481 err = EINVAL;
482 goto out;
483 }
484 }
485
486 out:
487 smr_read_leave();
488 return (err);
489 }
490