xref: /dpdk/examples/ipsec-secgw/sa.c (revision b53d106d34b5c638f5a2cbdfee0da5bd42d4383f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 /*
6  * Security Associations
7  */
8 #include <sys/types.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12 
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
19 #include <rte_ip.h>
20 #include <rte_udp.h>
21 #include <rte_random.h>
22 #include <rte_ethdev.h>
23 #include <rte_malloc.h>
24 
25 #include "ipsec.h"
26 #include "esp.h"
27 #include "parser.h"
28 #include "sad.h"
29 
30 #define IPDEFTTL 64
31 
32 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
33 
34 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
35 
36 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)
37 
38 struct supported_cipher_algo {
39 	const char *keyword;
40 	enum rte_crypto_cipher_algorithm algo;
41 	uint16_t iv_len;
42 	uint16_t block_size;
43 	uint16_t key_len;
44 };
45 
46 struct supported_auth_algo {
47 	const char *keyword;
48 	enum rte_crypto_auth_algorithm algo;
49 	uint16_t iv_len;
50 	uint16_t digest_len;
51 	uint16_t key_len;
52 	uint8_t key_not_req;
53 };
54 
55 struct supported_aead_algo {
56 	const char *keyword;
57 	enum rte_crypto_aead_algorithm algo;
58 	uint16_t iv_len;
59 	uint16_t block_size;
60 	uint16_t digest_len;
61 	uint16_t key_len;
62 	uint8_t aad_len;
63 };
64 
65 
66 const struct supported_cipher_algo cipher_algos[] = {
67 	{
68 		.keyword = "null",
69 		.algo = RTE_CRYPTO_CIPHER_NULL,
70 		.iv_len = 0,
71 		.block_size = 4,
72 		.key_len = 0
73 	},
74 	{
75 		.keyword = "aes-128-cbc",
76 		.algo = RTE_CRYPTO_CIPHER_AES_CBC,
77 		.iv_len = 16,
78 		.block_size = 16,
79 		.key_len = 16
80 	},
81 	{
82 		.keyword = "aes-192-cbc",
83 		.algo = RTE_CRYPTO_CIPHER_AES_CBC,
84 		.iv_len = 16,
85 		.block_size = 16,
86 		.key_len = 24
87 	},
88 	{
89 		.keyword = "aes-256-cbc",
90 		.algo = RTE_CRYPTO_CIPHER_AES_CBC,
91 		.iv_len = 16,
92 		.block_size = 16,
93 		.key_len = 32
94 	},
95 	{
96 		.keyword = "aes-128-ctr",
97 		.algo = RTE_CRYPTO_CIPHER_AES_CTR,
98 		.iv_len = 8,
99 		.block_size = 4,
100 		.key_len = 20
101 	},
102 	{
103 		.keyword = "aes-192-ctr",
104 		.algo = RTE_CRYPTO_CIPHER_AES_CTR,
105 		.iv_len = 16,
106 		.block_size = 16,
107 		.key_len = 28
108 	},
109 	{
110 		.keyword = "aes-256-ctr",
111 		.algo = RTE_CRYPTO_CIPHER_AES_CTR,
112 		.iv_len = 16,
113 		.block_size = 16,
114 		.key_len = 36
115 	},
116 	{
117 		.keyword = "3des-cbc",
118 		.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
119 		.iv_len = 8,
120 		.block_size = 8,
121 		.key_len = 24
122 	}
123 };
124 
125 const struct supported_auth_algo auth_algos[] = {
126 	{
127 		.keyword = "null",
128 		.algo = RTE_CRYPTO_AUTH_NULL,
129 		.digest_len = 0,
130 		.key_len = 0,
131 		.key_not_req = 1
132 	},
133 	{
134 		.keyword = "sha1-hmac",
135 		.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
136 		.digest_len = 12,
137 		.key_len = 20
138 	},
139 	{
140 		.keyword = "sha256-hmac",
141 		.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
142 		.digest_len = 16,
143 		.key_len = 32
144 	},
145 	{
146 		.keyword = "sha384-hmac",
147 		.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
148 		.digest_len = 24,
149 		.key_len = 48
150 	},
151 	{
152 		.keyword = "sha512-hmac",
153 		.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
154 		.digest_len = 32,
155 		.key_len = 64
156 	},
157 	{
158 		.keyword = "aes-gmac",
159 		.algo = RTE_CRYPTO_AUTH_AES_GMAC,
160 		.iv_len = 8,
161 		.digest_len = 16,
162 		.key_len = 20
163 	},
164 	{
165 		.keyword = "aes-xcbc-mac-96",
166 		.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
167 		.digest_len = 12,
168 		.key_len = 16
169 	}
170 };
171 
172 const struct supported_aead_algo aead_algos[] = {
173 	{
174 		.keyword = "aes-128-gcm",
175 		.algo = RTE_CRYPTO_AEAD_AES_GCM,
176 		.iv_len = 8,
177 		.block_size = 4,
178 		.key_len = 20,
179 		.digest_len = 16,
180 		.aad_len = 8,
181 	},
182 	{
183 		.keyword = "aes-192-gcm",
184 		.algo = RTE_CRYPTO_AEAD_AES_GCM,
185 		.iv_len = 8,
186 		.block_size = 4,
187 		.key_len = 28,
188 		.digest_len = 16,
189 		.aad_len = 8,
190 	},
191 	{
192 		.keyword = "aes-256-gcm",
193 		.algo = RTE_CRYPTO_AEAD_AES_GCM,
194 		.iv_len = 8,
195 		.block_size = 4,
196 		.key_len = 36,
197 		.digest_len = 16,
198 		.aad_len = 8,
199 	},
200 	{
201 		.keyword = "aes-128-ccm",
202 		.algo = RTE_CRYPTO_AEAD_AES_CCM,
203 		.iv_len = 8,
204 		.block_size = 4,
205 		.key_len = 20,
206 		.digest_len = 16,
207 		.aad_len = 8,
208 	},
209 	{
210 		.keyword = "aes-192-ccm",
211 		.algo = RTE_CRYPTO_AEAD_AES_CCM,
212 		.iv_len = 8,
213 		.block_size = 4,
214 		.key_len = 28,
215 		.digest_len = 16,
216 		.aad_len = 8,
217 	},
218 	{
219 		.keyword = "aes-256-ccm",
220 		.algo = RTE_CRYPTO_AEAD_AES_CCM,
221 		.iv_len = 8,
222 		.block_size = 4,
223 		.key_len = 36,
224 		.digest_len = 16,
225 		.aad_len = 8,
226 	},
227 	{
228 		.keyword = "chacha20-poly1305",
229 		.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
230 		.iv_len = 12,
231 		.block_size = 64,
232 		.key_len = 36,
233 		.digest_len = 16,
234 		.aad_len = 8,
235 	}
236 };
237 
238 #define SA_INIT_NB	128
239 
240 static uint32_t nb_crypto_sessions;
241 struct ipsec_sa *sa_out;
242 uint32_t nb_sa_out;
243 static uint32_t sa_out_sz;
244 static struct ipsec_sa_cnt sa_out_cnt;
245 
246 struct ipsec_sa *sa_in;
247 uint32_t nb_sa_in;
248 static uint32_t sa_in_sz;
249 static struct ipsec_sa_cnt sa_in_cnt;
250 
251 static const struct supported_cipher_algo *
252 find_match_cipher_algo(const char *cipher_keyword)
253 {
254 	size_t i;
255 
256 	for (i = 0; i < RTE_DIM(cipher_algos); i++) {
257 		const struct supported_cipher_algo *algo =
258 			&cipher_algos[i];
259 
260 		if (strcmp(cipher_keyword, algo->keyword) == 0)
261 			return algo;
262 	}
263 
264 	return NULL;
265 }
266 
267 static const struct supported_auth_algo *
268 find_match_auth_algo(const char *auth_keyword)
269 {
270 	size_t i;
271 
272 	for (i = 0; i < RTE_DIM(auth_algos); i++) {
273 		const struct supported_auth_algo *algo =
274 			&auth_algos[i];
275 
276 		if (strcmp(auth_keyword, algo->keyword) == 0)
277 			return algo;
278 	}
279 
280 	return NULL;
281 }
282 
283 static const struct supported_aead_algo *
284 find_match_aead_algo(const char *aead_keyword)
285 {
286 	size_t i;
287 
288 	for (i = 0; i < RTE_DIM(aead_algos); i++) {
289 		const struct supported_aead_algo *algo =
290 			&aead_algos[i];
291 
292 		if (strcmp(aead_keyword, algo->keyword) == 0)
293 			return algo;
294 	}
295 
296 	return NULL;
297 }
298 
299 /** parse_key_string
300  *  parse x:x:x:x.... hex number key string into uint8_t *key
301  *  return:
302  *  > 0: number of bytes parsed
303  *  0:   failed
304  */
305 static uint32_t
306 parse_key_string(const char *key_str, uint8_t *key)
307 {
308 	const char *pt_start = key_str, *pt_end = key_str;
309 	uint32_t nb_bytes = 0;
310 
311 	while (pt_end != NULL) {
312 		char sub_str[3] = {0};
313 
314 		pt_end = strchr(pt_start, ':');
315 
316 		if (pt_end == NULL) {
317 			if (strlen(pt_start) > 2)
318 				return 0;
319 			strncpy(sub_str, pt_start, 2);
320 		} else {
321 			if (pt_end - pt_start > 2)
322 				return 0;
323 
324 			strncpy(sub_str, pt_start, pt_end - pt_start);
325 			pt_start = pt_end + 1;
326 		}
327 
328 		key[nb_bytes++] = strtol(sub_str, NULL, 16);
329 	}
330 
331 	return nb_bytes;
332 }
333 
334 static int
335 extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz)
336 {
337 	if (*sa_tbl == NULL) {
338 		*sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa));
339 		if (*sa_tbl == NULL)
340 			return -1;
341 		*cur_sz = SA_INIT_NB;
342 		return 0;
343 	}
344 
345 	if (cur_cnt >= *cur_sz) {
346 		*sa_tbl = realloc(*sa_tbl,
347 			*cur_sz * sizeof(struct ipsec_sa) * 2);
348 		if (*sa_tbl == NULL)
349 			return -1;
350 		/* clean reallocated extra space */
351 		memset(&(*sa_tbl)[*cur_sz], 0,
352 			*cur_sz * sizeof(struct ipsec_sa));
353 		*cur_sz *= 2;
354 	}
355 
356 	return 0;
357 }
358 
359 void
360 parse_sa_tokens(char **tokens, uint32_t n_tokens,
361 	struct parse_status *status)
362 {
363 	struct ipsec_sa *rule = NULL;
364 	struct rte_ipsec_session *ips;
365 	uint32_t ti; /*token index*/
366 	uint32_t *ri /*rule index*/;
367 	struct ipsec_sa_cnt *sa_cnt;
368 	uint32_t cipher_algo_p = 0;
369 	uint32_t auth_algo_p = 0;
370 	uint32_t aead_algo_p = 0;
371 	uint32_t src_p = 0;
372 	uint32_t dst_p = 0;
373 	uint32_t mode_p = 0;
374 	uint32_t type_p = 0;
375 	uint32_t portid_p = 0;
376 	uint32_t fallback_p = 0;
377 	int16_t status_p = 0;
378 	uint16_t udp_encap_p = 0;
379 
380 	if (strcmp(tokens[0], "in") == 0) {
381 		ri = &nb_sa_in;
382 		sa_cnt = &sa_in_cnt;
383 		if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0)
384 			return;
385 		rule = &sa_in[*ri];
386 		rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
387 	} else {
388 		ri = &nb_sa_out;
389 		sa_cnt = &sa_out_cnt;
390 		if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0)
391 			return;
392 		rule = &sa_out[*ri];
393 		rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
394 	}
395 
396 	/* spi number */
397 	APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
398 	if (status->status < 0)
399 		return;
400 	if (atoi(tokens[1]) == INVALID_SPI)
401 		return;
402 	rule->flags = 0;
403 	rule->spi = atoi(tokens[1]);
404 	rule->portid = UINT16_MAX;
405 	ips = ipsec_get_primary_session(rule);
406 
407 	for (ti = 2; ti < n_tokens; ti++) {
408 		if (strcmp(tokens[ti], "mode") == 0) {
409 			APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
410 			if (status->status < 0)
411 				return;
412 
413 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
414 			if (status->status < 0)
415 				return;
416 
417 			if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
418 				sa_cnt->nb_v4++;
419 				rule->flags |= IP4_TUNNEL;
420 			} else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
421 				sa_cnt->nb_v6++;
422 				rule->flags |= IP6_TUNNEL;
423 			} else if (strcmp(tokens[ti], "transport") == 0) {
424 				sa_cnt->nb_v4++;
425 				sa_cnt->nb_v6++;
426 				rule->flags |= TRANSPORT;
427 			} else {
428 				APP_CHECK(0, status, "unrecognized "
429 					"input \"%s\"", tokens[ti]);
430 				return;
431 			}
432 
433 			mode_p = 1;
434 			continue;
435 		}
436 
437 		if (strcmp(tokens[ti], "telemetry") == 0) {
438 			rule->flags |= SA_TELEMETRY_ENABLE;
439 			continue;
440 		}
441 
442 		if (strcmp(tokens[ti], "cipher_algo") == 0) {
443 			const struct supported_cipher_algo *algo;
444 			uint32_t key_len;
445 
446 			APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
447 				status);
448 			if (status->status < 0)
449 				return;
450 
451 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
452 			if (status->status < 0)
453 				return;
454 
455 			algo = find_match_cipher_algo(tokens[ti]);
456 
457 			APP_CHECK(algo != NULL, status, "unrecognized "
458 				"input \"%s\"", tokens[ti]);
459 
460 			if (status->status < 0)
461 				return;
462 
463 			rule->cipher_algo = algo->algo;
464 			rule->block_size = algo->block_size;
465 			rule->iv_len = algo->iv_len;
466 			rule->cipher_key_len = algo->key_len;
467 
468 			/* for NULL algorithm, no cipher key required */
469 			if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
470 				cipher_algo_p = 1;
471 				continue;
472 			}
473 
474 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
475 			if (status->status < 0)
476 				return;
477 
478 			APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
479 				status, "unrecognized input \"%s\", "
480 				"expect \"cipher_key\"", tokens[ti]);
481 			if (status->status < 0)
482 				return;
483 
484 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
485 			if (status->status < 0)
486 				return;
487 
488 			key_len = parse_key_string(tokens[ti],
489 				rule->cipher_key);
490 			APP_CHECK(key_len == rule->cipher_key_len, status,
491 				"unrecognized input \"%s\"", tokens[ti]);
492 			if (status->status < 0)
493 				return;
494 
495 			if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
496 				algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
497 				rule->salt = (uint32_t)rte_rand();
498 
499 			if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
500 				key_len -= 4;
501 				rule->cipher_key_len = key_len;
502 				memcpy(&rule->salt,
503 					&rule->cipher_key[key_len], 4);
504 			}
505 
506 			cipher_algo_p = 1;
507 			continue;
508 		}
509 
510 		if (strcmp(tokens[ti], "auth_algo") == 0) {
511 			const struct supported_auth_algo *algo;
512 			uint32_t key_len;
513 
514 			APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
515 				status);
516 			if (status->status < 0)
517 				return;
518 
519 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
520 			if (status->status < 0)
521 				return;
522 
523 			algo = find_match_auth_algo(tokens[ti]);
524 			APP_CHECK(algo != NULL, status, "unrecognized "
525 				"input \"%s\"", tokens[ti]);
526 
527 			if (status->status < 0)
528 				return;
529 
530 			rule->auth_algo = algo->algo;
531 			rule->auth_key_len = algo->key_len;
532 			rule->digest_len = algo->digest_len;
533 
534 			/* NULL algorithm and combined algos do not
535 			 * require auth key
536 			 */
537 			if (algo->key_not_req) {
538 				auth_algo_p = 1;
539 				continue;
540 			}
541 
542 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
543 			if (status->status < 0)
544 				return;
545 
546 			APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
547 				status, "unrecognized input \"%s\", "
548 				"expect \"auth_key\"", tokens[ti]);
549 			if (status->status < 0)
550 				return;
551 
552 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
553 			if (status->status < 0)
554 				return;
555 
556 			key_len = parse_key_string(tokens[ti],
557 				rule->auth_key);
558 			APP_CHECK(key_len == rule->auth_key_len, status,
559 				"unrecognized input \"%s\"", tokens[ti]);
560 			if (status->status < 0)
561 				return;
562 
563 			if (algo->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
564 				key_len -= 4;
565 				rule->auth_key_len = key_len;
566 				rule->iv_len = algo->iv_len;
567 				memcpy(&rule->salt,
568 					&rule->auth_key[key_len], 4);
569 			}
570 
571 			auth_algo_p = 1;
572 			continue;
573 		}
574 
575 		if (strcmp(tokens[ti], "aead_algo") == 0) {
576 			const struct supported_aead_algo *algo;
577 			uint32_t key_len;
578 
579 			APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
580 				status);
581 			if (status->status < 0)
582 				return;
583 
584 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
585 			if (status->status < 0)
586 				return;
587 
588 			algo = find_match_aead_algo(tokens[ti]);
589 
590 			APP_CHECK(algo != NULL, status, "unrecognized "
591 				"input \"%s\"", tokens[ti]);
592 
593 			if (status->status < 0)
594 				return;
595 
596 			rule->aead_algo = algo->algo;
597 			rule->cipher_key_len = algo->key_len;
598 			rule->digest_len = algo->digest_len;
599 			rule->aad_len = algo->aad_len;
600 			rule->block_size = algo->block_size;
601 			rule->iv_len = algo->iv_len;
602 
603 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
604 			if (status->status < 0)
605 				return;
606 
607 			APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
608 				status, "unrecognized input \"%s\", "
609 				"expect \"aead_key\"", tokens[ti]);
610 			if (status->status < 0)
611 				return;
612 
613 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
614 			if (status->status < 0)
615 				return;
616 
617 			key_len = parse_key_string(tokens[ti],
618 				rule->cipher_key);
619 			APP_CHECK(key_len == rule->cipher_key_len, status,
620 				"unrecognized input \"%s\"", tokens[ti]);
621 			if (status->status < 0)
622 				return;
623 
624 			key_len -= 4;
625 			rule->cipher_key_len = key_len;
626 			memcpy(&rule->salt,
627 				&rule->cipher_key[key_len], 4);
628 
629 			aead_algo_p = 1;
630 			continue;
631 		}
632 
633 		if (strcmp(tokens[ti], "src") == 0) {
634 			APP_CHECK_PRESENCE(src_p, tokens[ti], status);
635 			if (status->status < 0)
636 				return;
637 
638 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
639 			if (status->status < 0)
640 				return;
641 
642 			if (IS_IP4_TUNNEL(rule->flags)) {
643 				struct in_addr ip;
644 
645 				APP_CHECK(parse_ipv4_addr(tokens[ti],
646 					&ip, NULL) == 0, status,
647 					"unrecognized input \"%s\", "
648 					"expect valid ipv4 addr",
649 					tokens[ti]);
650 				if (status->status < 0)
651 					return;
652 				rule->src.ip.ip4 = rte_bswap32(
653 					(uint32_t)ip.s_addr);
654 			} else if (IS_IP6_TUNNEL(rule->flags)) {
655 				struct in6_addr ip;
656 
657 				APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
658 					NULL) == 0, status,
659 					"unrecognized input \"%s\", "
660 					"expect valid ipv6 addr",
661 					tokens[ti]);
662 				if (status->status < 0)
663 					return;
664 				memcpy(rule->src.ip.ip6.ip6_b,
665 					ip.s6_addr, 16);
666 			} else if (IS_TRANSPORT(rule->flags)) {
667 				APP_CHECK(0, status, "unrecognized input "
668 					"\"%s\"", tokens[ti]);
669 				return;
670 			}
671 
672 			src_p = 1;
673 			continue;
674 		}
675 
676 		if (strcmp(tokens[ti], "dst") == 0) {
677 			APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
678 			if (status->status < 0)
679 				return;
680 
681 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
682 			if (status->status < 0)
683 				return;
684 
685 			if (IS_IP4_TUNNEL(rule->flags)) {
686 				struct in_addr ip;
687 
688 				APP_CHECK(parse_ipv4_addr(tokens[ti],
689 					&ip, NULL) == 0, status,
690 					"unrecognized input \"%s\", "
691 					"expect valid ipv4 addr",
692 					tokens[ti]);
693 				if (status->status < 0)
694 					return;
695 				rule->dst.ip.ip4 = rte_bswap32(
696 					(uint32_t)ip.s_addr);
697 			} else if (IS_IP6_TUNNEL(rule->flags)) {
698 				struct in6_addr ip;
699 
700 				APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
701 					NULL) == 0, status,
702 					"unrecognized input \"%s\", "
703 					"expect valid ipv6 addr",
704 					tokens[ti]);
705 				if (status->status < 0)
706 					return;
707 				memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
708 			} else if (IS_TRANSPORT(rule->flags)) {
709 				APP_CHECK(0, status, "unrecognized "
710 					"input \"%s\"",	tokens[ti]);
711 				return;
712 			}
713 
714 			dst_p = 1;
715 			continue;
716 		}
717 
718 		if (strcmp(tokens[ti], "type") == 0) {
719 			APP_CHECK_PRESENCE(type_p, tokens[ti], status);
720 			if (status->status < 0)
721 				return;
722 
723 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
724 			if (status->status < 0)
725 				return;
726 
727 			if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
728 				ips->type =
729 					RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
730 			else if (strcmp(tokens[ti],
731 					"inline-protocol-offload") == 0)
732 				ips->type =
733 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
734 			else if (strcmp(tokens[ti],
735 					"lookaside-protocol-offload") == 0)
736 				ips->type =
737 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
738 			else if (strcmp(tokens[ti], "no-offload") == 0)
739 				ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
740 			else if (strcmp(tokens[ti], "cpu-crypto") == 0)
741 				ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
742 			else {
743 				APP_CHECK(0, status, "Invalid input \"%s\"",
744 						tokens[ti]);
745 				return;
746 			}
747 
748 			type_p = 1;
749 			continue;
750 		}
751 
752 		if (strcmp(tokens[ti], "port_id") == 0) {
753 			APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
754 			if (status->status < 0)
755 				return;
756 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
757 			if (status->status < 0)
758 				return;
759 			if (rule->portid == UINT16_MAX)
760 				rule->portid = atoi(tokens[ti]);
761 			else if (rule->portid != atoi(tokens[ti])) {
762 				APP_CHECK(0, status,
763 					"portid %s not matching with already assigned portid %u",
764 					tokens[ti], rule->portid);
765 				return;
766 			}
767 			portid_p = 1;
768 			continue;
769 		}
770 
771 		if (strcmp(tokens[ti], "mss") == 0) {
772 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
773 			if (status->status < 0)
774 				return;
775 			rule->mss = atoi(tokens[ti]);
776 			if (status->status < 0)
777 				return;
778 			continue;
779 		}
780 
781 		if (strcmp(tokens[ti], "esn") == 0) {
782 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
783 			if (status->status < 0)
784 				return;
785 			rule->esn = atoll(tokens[ti]);
786 			if (status->status < 0)
787 				return;
788 			continue;
789 		}
790 
791 		if (strcmp(tokens[ti], "fallback") == 0) {
792 			struct rte_ipsec_session *fb;
793 
794 			APP_CHECK(app_sa_prm.enable, status, "Fallback session "
795 				"not allowed for legacy mode.");
796 			if (status->status < 0)
797 				return;
798 			APP_CHECK(ips->type ==
799 				RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
800 				"Fallback session allowed if primary session "
801 				"is of type inline-crypto-offload only.");
802 			if (status->status < 0)
803 				return;
804 			APP_CHECK(rule->direction ==
805 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
806 				"Fallback session not allowed for egress "
807 				"rule");
808 			if (status->status < 0)
809 				return;
810 			APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
811 			if (status->status < 0)
812 				return;
813 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
814 			if (status->status < 0)
815 				return;
816 			fb = ipsec_get_fallback_session(rule);
817 			if (strcmp(tokens[ti], "lookaside-none") == 0)
818 				fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
819 			else if (strcmp(tokens[ti], "cpu-crypto") == 0)
820 				fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
821 			else {
822 				APP_CHECK(0, status, "unrecognized fallback "
823 					"type %s.", tokens[ti]);
824 				return;
825 			}
826 
827 			rule->fallback_sessions = 1;
828 			nb_crypto_sessions++;
829 			fallback_p = 1;
830 			continue;
831 		}
832 		if (strcmp(tokens[ti], "flow-direction") == 0) {
833 			switch (ips->type) {
834 			case RTE_SECURITY_ACTION_TYPE_NONE:
835 			case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
836 				rule->fdir_flag = 1;
837 				INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
838 				if (status->status < 0)
839 					return;
840 				if (rule->portid == UINT16_MAX)
841 					rule->portid = atoi(tokens[ti]);
842 				else if (rule->portid != atoi(tokens[ti])) {
843 					APP_CHECK(0, status,
844 						"portid %s not matching with already assigned portid %u",
845 						tokens[ti], rule->portid);
846 					return;
847 				}
848 				INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
849 				if (status->status < 0)
850 					return;
851 				rule->fdir_qid = atoi(tokens[ti]);
852 				/* validating portid and queueid */
853 				status_p = check_flow_params(rule->portid,
854 						rule->fdir_qid);
855 				if (status_p < 0) {
856 					printf("port id %u / queue id %u is "
857 						"not valid\n", rule->portid,
858 						 rule->fdir_qid);
859 				}
860 				break;
861 			case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
862 			case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
863 			case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
864 			default:
865 				APP_CHECK(0, status,
866 					"flow director not supported for security session type %d",
867 					ips->type);
868 				return;
869 			}
870 			continue;
871 		}
872 		if (strcmp(tokens[ti], "udp-encap") == 0) {
873 			switch (ips->type) {
874 			case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
875 			case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
876 				APP_CHECK_PRESENCE(udp_encap_p, tokens[ti],
877 						   status);
878 				if (status->status < 0)
879 					return;
880 
881 				rule->udp_encap = 1;
882 				app_sa_prm.udp_encap = 1;
883 				udp_encap_p = 1;
884 				break;
885 			case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
886 				rule->udp_encap = 1;
887 				rule->udp.sport = 0;
888 				rule->udp.dport = 4500;
889 				break;
890 			default:
891 				APP_CHECK(0, status,
892 					"UDP encapsulation not supported for "
893 					"security session type %d",
894 					ips->type);
895 				return;
896 			}
897 			continue;
898 		}
899 
900 		/* unrecognizeable input */
901 		APP_CHECK(0, status, "unrecognized input \"%s\"",
902 			tokens[ti]);
903 		return;
904 	}
905 
906 	if (aead_algo_p) {
907 		APP_CHECK(cipher_algo_p == 0, status,
908 				"AEAD used, no need for cipher options");
909 		if (status->status < 0)
910 			return;
911 
912 		APP_CHECK(auth_algo_p == 0, status,
913 				"AEAD used, no need for auth options");
914 		if (status->status < 0)
915 			return;
916 	} else {
917 		APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
918 		if (status->status < 0)
919 			return;
920 
921 		APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
922 		if (status->status < 0)
923 			return;
924 	}
925 
926 	APP_CHECK(mode_p == 1, status, "missing mode option");
927 	if (status->status < 0)
928 		return;
929 
930 	if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
931 			RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
932 		printf("Missing portid option, falling back to non-offload\n");
933 
934 	if (!type_p || (!portid_p && ips->type !=
935 			RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
936 		ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
937 	}
938 
939 	nb_crypto_sessions++;
940 	*ri = *ri + 1;
941 }
942 
943 static void
944 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
945 {
946 	uint32_t i;
947 	uint8_t a, b, c, d;
948 	const struct rte_ipsec_session *ips;
949 	const struct rte_ipsec_session *fallback_ips;
950 
951 	printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
952 
953 	for (i = 0; i < RTE_DIM(cipher_algos); i++) {
954 		if (cipher_algos[i].algo == sa->cipher_algo &&
955 				cipher_algos[i].key_len == sa->cipher_key_len) {
956 			printf("%s ", cipher_algos[i].keyword);
957 			break;
958 		}
959 	}
960 
961 	for (i = 0; i < RTE_DIM(auth_algos); i++) {
962 		if (auth_algos[i].algo == sa->auth_algo) {
963 			printf("%s ", auth_algos[i].keyword);
964 			break;
965 		}
966 	}
967 
968 	for (i = 0; i < RTE_DIM(aead_algos); i++) {
969 		if (aead_algos[i].algo == sa->aead_algo &&
970 				aead_algos[i].key_len-4 == sa->cipher_key_len) {
971 			printf("%s ", aead_algos[i].keyword);
972 			break;
973 		}
974 	}
975 
976 	printf("mode:");
977 	if (sa->udp_encap)
978 		printf("UDP encapsulated ");
979 
980 	switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
981 	case IP4_TUNNEL:
982 		printf("IP4Tunnel ");
983 		uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
984 		printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
985 		uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
986 		printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
987 		break;
988 	case IP6_TUNNEL:
989 		printf("IP6Tunnel ");
990 		for (i = 0; i < 16; i++) {
991 			if (i % 2 && i != 15)
992 				printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
993 			else
994 				printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
995 		}
996 		printf(" ");
997 		for (i = 0; i < 16; i++) {
998 			if (i % 2 && i != 15)
999 				printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
1000 			else
1001 				printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
1002 		}
1003 		break;
1004 	case TRANSPORT:
1005 		printf("Transport ");
1006 		break;
1007 	}
1008 
1009 	ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
1010 	printf(" type:");
1011 	switch (ips->type) {
1012 	case RTE_SECURITY_ACTION_TYPE_NONE:
1013 		printf("no-offload ");
1014 		break;
1015 	case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
1016 		printf("inline-crypto-offload ");
1017 		break;
1018 	case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1019 		printf("inline-protocol-offload ");
1020 		break;
1021 	case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
1022 		printf("lookaside-protocol-offload ");
1023 		break;
1024 	case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
1025 		printf("cpu-crypto-accelerated ");
1026 		break;
1027 	}
1028 
1029 	fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
1030 	if (fallback_ips != NULL && sa->fallback_sessions > 0) {
1031 		printf("inline fallback: ");
1032 		switch (fallback_ips->type) {
1033 		case RTE_SECURITY_ACTION_TYPE_NONE:
1034 			printf("lookaside-none");
1035 			break;
1036 		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
1037 			printf("cpu-crypto-accelerated");
1038 			break;
1039 		default:
1040 			printf("invalid");
1041 			break;
1042 		}
1043 	}
1044 	if (sa->fdir_flag == 1)
1045 		printf("flow-direction port %d queue %d", sa->portid,
1046 				sa->fdir_qid);
1047 
1048 	printf("\n");
1049 }
1050 
1051 static struct sa_ctx *
1052 sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
1053 {
1054 	char s[PATH_MAX];
1055 	struct sa_ctx *sa_ctx;
1056 	uint32_t mz_size;
1057 	const struct rte_memzone *mz;
1058 
1059 	snprintf(s, sizeof(s), "%s_%u", name, socket_id);
1060 
1061 	/* Create SA context */
1062 	printf("Creating SA context with %u maximum entries on socket %d\n",
1063 			nb_sa, socket_id);
1064 
1065 	mz_size = sizeof(struct ipsec_xf) * nb_sa;
1066 	mz = rte_memzone_reserve(s, mz_size, socket_id,
1067 			RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
1068 	if (mz == NULL) {
1069 		printf("Failed to allocate SA XFORM memory\n");
1070 		rte_errno = ENOMEM;
1071 		return NULL;
1072 	}
1073 
1074 	sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
1075 		sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
1076 
1077 	if (sa_ctx == NULL) {
1078 		printf("Failed to allocate SA CTX memory\n");
1079 		rte_errno = ENOMEM;
1080 		rte_memzone_free(mz);
1081 		return NULL;
1082 	}
1083 
1084 	sa_ctx->xf = (struct ipsec_xf *)mz->addr;
1085 	sa_ctx->nb_sa = nb_sa;
1086 
1087 	return sa_ctx;
1088 }
1089 
1090 static int
1091 check_eth_dev_caps(uint16_t portid, uint32_t inbound, uint32_t tso)
1092 {
1093 	struct rte_eth_dev_info dev_info;
1094 	int retval;
1095 
1096 	retval = rte_eth_dev_info_get(portid, &dev_info);
1097 	if (retval != 0) {
1098 		RTE_LOG(ERR, IPSEC,
1099 			"Error during getting device (port %u) info: %s\n",
1100 			portid, strerror(-retval));
1101 
1102 		return retval;
1103 	}
1104 
1105 	if (inbound) {
1106 		if ((dev_info.rx_offload_capa &
1107 				RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
1108 			RTE_LOG(WARNING, PORT,
1109 				"hardware RX IPSec offload is not supported\n");
1110 			return -EINVAL;
1111 		}
1112 
1113 	} else { /* outbound */
1114 		if ((dev_info.tx_offload_capa &
1115 				RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
1116 			RTE_LOG(WARNING, PORT,
1117 				"hardware TX IPSec offload is not supported\n");
1118 			return -EINVAL;
1119 		}
1120 		if (tso && (dev_info.tx_offload_capa &
1121 				RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
1122 			RTE_LOG(WARNING, PORT,
1123 				"hardware TCP TSO offload is not supported\n");
1124 			return -EINVAL;
1125 		}
1126 	}
1127 	return 0;
1128 }
1129 
1130 /*
1131  * Helper function, tries to determine next_proto for SPI
1132  * by searching though SP rules.
1133  */
1134 static int
1135 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
1136 		struct ip_addr ip_addr[2], uint32_t mask[2])
1137 {
1138 	int32_t rc4, rc6;
1139 
1140 	rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1141 				ip_addr, mask);
1142 	rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1143 				ip_addr, mask);
1144 
1145 	if (rc4 >= 0) {
1146 		if (rc6 >= 0) {
1147 			RTE_LOG(ERR, IPSEC,
1148 				"%s: SPI %u used simultaeously by "
1149 				"IPv4(%d) and IPv6 (%d) SP rules\n",
1150 				__func__, spi, rc4, rc6);
1151 			return -EINVAL;
1152 		} else
1153 			return IPPROTO_IPIP;
1154 	} else if (rc6 < 0) {
1155 		RTE_LOG(ERR, IPSEC,
1156 			"%s: SPI %u is not used by any SP rule\n",
1157 			__func__, spi);
1158 		return -EINVAL;
1159 	} else
1160 		return IPPROTO_IPV6;
1161 }
1162 
1163 /*
1164  * Helper function for getting source and destination IP addresses
1165  * from SP. Needed for inline crypto transport mode, as addresses are not
1166  * provided in config file for that mode. It checks if SP for current SA exists,
1167  * and based on what type of protocol is returned, it stores appropriate
1168  * addresses got from SP into SA.
1169  */
1170 static int
1171 sa_add_address_inline_crypto(struct ipsec_sa *sa)
1172 {
1173 	int protocol;
1174 	struct ip_addr ip_addr[2];
1175 	uint32_t mask[2];
1176 
1177 	protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
1178 	if (protocol < 0)
1179 		return protocol;
1180 	else if (protocol == IPPROTO_IPIP) {
1181 		sa->flags |= IP4_TRANSPORT;
1182 		if (mask[0] == IP4_FULL_MASK &&
1183 				mask[1] == IP4_FULL_MASK &&
1184 				ip_addr[0].ip.ip4 != 0 &&
1185 				ip_addr[1].ip.ip4 != 0) {
1186 
1187 			sa->src.ip.ip4 = ip_addr[0].ip.ip4;
1188 			sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
1189 		} else {
1190 			RTE_LOG(ERR, IPSEC,
1191 			"%s: No valid address or mask entry in"
1192 			" IPv4 SP rule for SPI %u\n",
1193 			__func__, sa->spi);
1194 			return -EINVAL;
1195 		}
1196 	} else if (protocol == IPPROTO_IPV6) {
1197 		sa->flags |= IP6_TRANSPORT;
1198 		if (mask[0] == IP6_FULL_MASK &&
1199 				mask[1] == IP6_FULL_MASK &&
1200 				(ip_addr[0].ip.ip6.ip6[0] != 0 ||
1201 				ip_addr[0].ip.ip6.ip6[1] != 0) &&
1202 				(ip_addr[1].ip.ip6.ip6[0] != 0 ||
1203 				ip_addr[1].ip.ip6.ip6[1] != 0)) {
1204 
1205 			sa->src.ip.ip6 = ip_addr[0].ip.ip6;
1206 			sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
1207 		} else {
1208 			RTE_LOG(ERR, IPSEC,
1209 			"%s: No valid address or mask entry in"
1210 			" IPv6 SP rule for SPI %u\n",
1211 			__func__, sa->spi);
1212 			return -EINVAL;
1213 		}
1214 	}
1215 	return 0;
1216 }
1217 
1218 static int
1219 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1220 		uint32_t nb_entries, uint32_t inbound,
1221 		struct socket_ctx *skt_ctx)
1222 {
1223 	struct ipsec_sa *sa;
1224 	uint32_t i, idx;
1225 	uint16_t iv_length, aad_length;
1226 	int inline_status;
1227 	int32_t rc;
1228 	struct rte_ipsec_session *ips;
1229 
1230 	/* for ESN upper 32 bits of SQN also need to be part of AAD */
1231 	aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
1232 
1233 	for (i = 0; i < nb_entries; i++) {
1234 		idx = i;
1235 		sa = &sa_ctx->sa[idx];
1236 		if (sa->spi != 0) {
1237 			printf("Index %u already in use by SPI %u\n",
1238 					idx, sa->spi);
1239 			return -EINVAL;
1240 		}
1241 		*sa = entries[i];
1242 
1243 		if (inbound) {
1244 			rc = ipsec_sad_add(&sa_ctx->sad, sa);
1245 			if (rc != 0)
1246 				return rc;
1247 		}
1248 
1249 		sa->seq = 0;
1250 		ips = ipsec_get_primary_session(sa);
1251 
1252 		if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1253 			ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1254 			if (check_eth_dev_caps(sa->portid, inbound, sa->mss))
1255 				return -EINVAL;
1256 		}
1257 
1258 		switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1259 		case IP4_TUNNEL:
1260 			sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
1261 			sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
1262 			break;
1263 		case TRANSPORT:
1264 			if (ips->type ==
1265 				RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1266 				inline_status =
1267 					sa_add_address_inline_crypto(sa);
1268 				if (inline_status < 0)
1269 					return inline_status;
1270 			}
1271 			break;
1272 		}
1273 
1274 
1275 		if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM ||
1276 			sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM ||
1277 			sa->aead_algo == RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
1278 
1279 			if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
1280 				iv_length = 11;
1281 			else
1282 				iv_length = 12;
1283 
1284 			sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1285 			sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1286 			sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1287 			sa_ctx->xf[idx].a.aead.key.length =
1288 				sa->cipher_key_len;
1289 			sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1290 				RTE_CRYPTO_AEAD_OP_DECRYPT :
1291 				RTE_CRYPTO_AEAD_OP_ENCRYPT;
1292 			sa_ctx->xf[idx].a.next = NULL;
1293 			sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1294 			sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1295 			sa_ctx->xf[idx].a.aead.aad_length =
1296 				sa->aad_len + aad_length;
1297 			sa_ctx->xf[idx].a.aead.digest_length =
1298 				sa->digest_len;
1299 
1300 			sa->xforms = &sa_ctx->xf[idx].a;
1301 		} else {
1302 			switch (sa->cipher_algo) {
1303 			case RTE_CRYPTO_CIPHER_NULL:
1304 			case RTE_CRYPTO_CIPHER_3DES_CBC:
1305 			case RTE_CRYPTO_CIPHER_AES_CBC:
1306 			case RTE_CRYPTO_CIPHER_AES_CTR:
1307 				iv_length = sa->iv_len;
1308 				break;
1309 			default:
1310 				RTE_LOG(ERR, IPSEC_ESP,
1311 						"unsupported cipher algorithm %u\n",
1312 						sa->cipher_algo);
1313 				return -EINVAL;
1314 			}
1315 
1316 			/* AES_GMAC uses salt like AEAD algorithms */
1317 			if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC)
1318 				iv_length = 12;
1319 
1320 			if (inbound) {
1321 				sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1322 				sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1323 				sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1324 				sa_ctx->xf[idx].b.cipher.key.length =
1325 					sa->cipher_key_len;
1326 				sa_ctx->xf[idx].b.cipher.op =
1327 					RTE_CRYPTO_CIPHER_OP_DECRYPT;
1328 				sa_ctx->xf[idx].b.next = NULL;
1329 				sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1330 				sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1331 
1332 				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1333 				sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1334 				sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1335 				sa_ctx->xf[idx].a.auth.key.length =
1336 					sa->auth_key_len;
1337 				sa_ctx->xf[idx].a.auth.digest_length =
1338 					sa->digest_len;
1339 				sa_ctx->xf[idx].a.auth.op =
1340 					RTE_CRYPTO_AUTH_OP_VERIFY;
1341 				sa_ctx->xf[idx].a.auth.iv.offset = IV_OFFSET;
1342 				sa_ctx->xf[idx].a.auth.iv.length = iv_length;
1343 
1344 			} else { /* outbound */
1345 				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1346 				sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1347 				sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1348 				sa_ctx->xf[idx].a.cipher.key.length =
1349 					sa->cipher_key_len;
1350 				sa_ctx->xf[idx].a.cipher.op =
1351 					RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1352 				sa_ctx->xf[idx].a.next = NULL;
1353 				sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1354 				sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1355 
1356 				sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1357 				sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1358 				sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1359 				sa_ctx->xf[idx].b.auth.key.length =
1360 					sa->auth_key_len;
1361 				sa_ctx->xf[idx].b.auth.digest_length =
1362 					sa->digest_len;
1363 				sa_ctx->xf[idx].b.auth.op =
1364 					RTE_CRYPTO_AUTH_OP_GENERATE;
1365 				sa_ctx->xf[idx].b.auth.iv.offset = IV_OFFSET;
1366 				sa_ctx->xf[idx].b.auth.iv.length = iv_length;
1367 
1368 			}
1369 
1370 			if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
1371 				sa->xforms = inbound ?
1372 					&sa_ctx->xf[idx].a : &sa_ctx->xf[idx].b;
1373 				sa->xforms->next = NULL;
1374 
1375 			} else {
1376 				sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1377 				sa_ctx->xf[idx].b.next = NULL;
1378 				sa->xforms = &sa_ctx->xf[idx].a;
1379 			}
1380 		}
1381 
1382 		if (ips->type ==
1383 			RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1384 			ips->type ==
1385 			RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1386 			rc = create_inline_session(skt_ctx, sa, ips);
1387 			if (rc != 0) {
1388 				RTE_LOG(ERR, IPSEC_ESP,
1389 					"create_inline_session() failed\n");
1390 				return -EINVAL;
1391 			}
1392 		}
1393 
1394 		if (sa->fdir_flag && inbound) {
1395 			rc = create_ipsec_esp_flow(sa);
1396 			if (rc != 0)
1397 				RTE_LOG(ERR, IPSEC_ESP,
1398 					"create_ipsec_esp_flow() failed\n");
1399 		}
1400 		print_one_sa_rule(sa, inbound);
1401 	}
1402 
1403 	return 0;
1404 }
1405 
1406 static inline int
1407 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1408 		uint32_t nb_entries, struct socket_ctx *skt_ctx)
1409 {
1410 	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
1411 }
1412 
1413 static inline int
1414 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1415 		uint32_t nb_entries, struct socket_ctx *skt_ctx)
1416 {
1417 	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
1418 }
1419 
1420 /*
1421  * helper function, fills parameters that are identical for all SAs
1422  */
1423 static void
1424 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1425 	const struct app_sa_prm *app_prm)
1426 {
1427 	memset(prm, 0, sizeof(*prm));
1428 
1429 	prm->flags = app_prm->flags;
1430 	prm->ipsec_xform.options.esn = app_prm->enable_esn;
1431 	prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1432 }
1433 
1434 static int
1435 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1436 	const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1437 {
1438 	int32_t rc;
1439 
1440 	/*
1441 	 * Try to get SPI next proto by searching that SPI in SPD.
1442 	 * probably not the optimal way, but there seems nothing
1443 	 * better right now.
1444 	 */
1445 	rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1446 	if (rc < 0)
1447 		return rc;
1448 
1449 	fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1450 	prm->userdata = (uintptr_t)ss;
1451 
1452 	/* setup ipsec xform */
1453 	prm->ipsec_xform.spi = ss->spi;
1454 	prm->ipsec_xform.salt = ss->salt;
1455 	prm->ipsec_xform.direction = ss->direction;
1456 	prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1457 	prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1458 		RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1459 		RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1460 	prm->ipsec_xform.options.udp_encap = ss->udp_encap;
1461 	prm->ipsec_xform.options.ecn = 1;
1462 	prm->ipsec_xform.options.copy_dscp = 1;
1463 
1464 	if (IS_IP4_TUNNEL(ss->flags)) {
1465 		prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1466 		prm->tun.hdr_len = sizeof(*v4);
1467 		prm->tun.next_proto = rc;
1468 		prm->tun.hdr = v4;
1469 	} else if (IS_IP6_TUNNEL(ss->flags)) {
1470 		prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1471 		prm->tun.hdr_len = sizeof(*v6);
1472 		prm->tun.next_proto = rc;
1473 		prm->tun.hdr = v6;
1474 	} else {
1475 		/* transport mode */
1476 		prm->trs.proto = rc;
1477 	}
1478 
1479 	/* setup crypto section */
1480 	prm->crypto_xform = ss->xforms;
1481 	return 0;
1482 }
1483 
1484 static int
1485 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1486 {
1487 	int32_t rc = 0;
1488 
1489 	ss->sa = sa;
1490 
1491 	if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1492 		ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
1493 		if (ss->security.ses != NULL) {
1494 			rc = rte_ipsec_session_prepare(ss);
1495 			if (rc != 0)
1496 				memset(ss, 0, sizeof(*ss));
1497 		}
1498 	}
1499 
1500 	return rc;
1501 }
1502 
1503 /*
1504  * Initialise related rte_ipsec_sa object.
1505  */
1506 static int
1507 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1508 {
1509 	int rc;
1510 	struct rte_ipsec_sa_prm prm;
1511 	struct rte_ipsec_session *ips;
1512 	struct rte_ipv4_hdr v4  = {
1513 		.version_ihl = IPVERSION << 4 |
1514 			sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1515 		.time_to_live = IPDEFTTL,
1516 		.next_proto_id = IPPROTO_ESP,
1517 		.src_addr = lsa->src.ip.ip4,
1518 		.dst_addr = lsa->dst.ip.ip4,
1519 	};
1520 	struct rte_ipv6_hdr v6 = {
1521 		.vtc_flow = htonl(IP6_VERSION << 28),
1522 		.proto = IPPROTO_ESP,
1523 	};
1524 
1525 	if (IS_IP6_TUNNEL(lsa->flags)) {
1526 		memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1527 		memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1528 	}
1529 
1530 	rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1531 	if (rc == 0)
1532 		rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1533 	if (rc < 0)
1534 		return rc;
1535 
1536 	if (lsa->flags & SA_TELEMETRY_ENABLE)
1537 		rte_ipsec_telemetry_sa_add(sa);
1538 
1539 	/* init primary processing session */
1540 	ips = ipsec_get_primary_session(lsa);
1541 	rc = fill_ipsec_session(ips, sa);
1542 	if (rc != 0)
1543 		return rc;
1544 
1545 	/* init inline fallback processing session */
1546 	if (lsa->fallback_sessions == 1)
1547 		rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
1548 
1549 	return rc;
1550 }
1551 
1552 /*
1553  * Allocate space and init rte_ipsec_sa strcutures,
1554  * one per session.
1555  */
1556 static int
1557 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket)
1558 {
1559 	int32_t rc, sz;
1560 	uint32_t i, idx;
1561 	size_t tsz;
1562 	struct rte_ipsec_sa *sa;
1563 	struct ipsec_sa *lsa;
1564 	struct rte_ipsec_sa_prm prm;
1565 
1566 	/* determine SA size */
1567 	idx = 0;
1568 	fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1569 	sz = rte_ipsec_sa_size(&prm);
1570 	if (sz < 0) {
1571 		RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1572 			"failed to determine SA size, error code: %d\n",
1573 			__func__, ctx, nb_ent, socket, sz);
1574 		return sz;
1575 	}
1576 
1577 	tsz = sz * nb_ent;
1578 
1579 	ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1580 	if (ctx->satbl == NULL) {
1581 		RTE_LOG(ERR, IPSEC,
1582 			"%s(%p, %u, %d): failed to allocate %zu bytes\n",
1583 			__func__,  ctx, nb_ent, socket, tsz);
1584 		return -ENOMEM;
1585 	}
1586 
1587 	rc = 0;
1588 	for (i = 0; i != nb_ent && rc == 0; i++) {
1589 
1590 		idx = i;
1591 
1592 		sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1593 		lsa = ctx->sa + idx;
1594 
1595 		rc = ipsec_sa_init(lsa, sa, sz);
1596 	}
1597 
1598 	return rc;
1599 }
1600 
1601 static int
1602 sa_cmp(const void *p, const void *q)
1603 {
1604 	uint32_t spi1 = ((const struct ipsec_sa *)p)->spi;
1605 	uint32_t spi2 = ((const struct ipsec_sa *)q)->spi;
1606 
1607 	return (int)(spi1 - spi2);
1608 }
1609 
1610 /*
1611  * Walk through all SA rules to find an SA with given SPI
1612  */
1613 int
1614 sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
1615 {
1616 	uint32_t num;
1617 	struct ipsec_sa *sa;
1618 	struct ipsec_sa tmpl;
1619 	const struct ipsec_sa *sar;
1620 
1621 	sar = sa_ctx->sa;
1622 	if (inbound != 0)
1623 		num = nb_sa_in;
1624 	else
1625 		num = nb_sa_out;
1626 
1627 	tmpl.spi = spi;
1628 
1629 	sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp);
1630 	if (sa != NULL)
1631 		return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa);
1632 
1633 	return -ENOENT;
1634 }
1635 
1636 void
1637 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1638 {
1639 	int32_t rc;
1640 	const char *name;
1641 
1642 	if (ctx == NULL)
1643 		rte_exit(EXIT_FAILURE, "NULL context.\n");
1644 
1645 	if (ctx->sa_in != NULL)
1646 		rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1647 				"initialized\n", socket_id);
1648 
1649 	if (ctx->sa_out != NULL)
1650 		rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1651 				"initialized\n", socket_id);
1652 
1653 	if (nb_sa_in > 0) {
1654 		name = "sa_in";
1655 		ctx->sa_in = sa_create(name, socket_id, nb_sa_in);
1656 		if (ctx->sa_in == NULL)
1657 			rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1658 				"context %s in socket %d\n", rte_errno,
1659 				name, socket_id);
1660 
1661 		rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id,
1662 				&sa_in_cnt);
1663 		if (rc != 0)
1664 			rte_exit(EXIT_FAILURE, "failed to init SAD\n");
1665 
1666 		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
1667 
1668 		if (app_sa_prm.enable != 0) {
1669 			rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
1670 				socket_id);
1671 			if (rc != 0)
1672 				rte_exit(EXIT_FAILURE,
1673 					"failed to init inbound SAs\n");
1674 		}
1675 	} else
1676 		RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1677 
1678 	if (nb_sa_out > 0) {
1679 		name = "sa_out";
1680 		ctx->sa_out = sa_create(name, socket_id, nb_sa_out);
1681 		if (ctx->sa_out == NULL)
1682 			rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1683 				"context %s in socket %d\n", rte_errno,
1684 				name, socket_id);
1685 
1686 		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
1687 
1688 		if (app_sa_prm.enable != 0) {
1689 			rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
1690 				socket_id);
1691 			if (rc != 0)
1692 				rte_exit(EXIT_FAILURE,
1693 					"failed to init outbound SAs\n");
1694 		}
1695 	} else
1696 		RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1697 			"specified\n");
1698 }
1699 
1700 int
1701 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1702 {
1703 	struct ipsec_mbuf_metadata *priv;
1704 	struct ipsec_sa *sa;
1705 
1706 	priv = get_priv(m);
1707 	sa = priv->sa;
1708 	if (sa != NULL)
1709 		return (sa_ctx->sa[sa_idx].spi == sa->spi);
1710 
1711 	RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1712 	return 0;
1713 }
1714 
1715 void
1716 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1717 		void *sa_arr[], uint16_t nb_pkts)
1718 {
1719 	uint32_t i;
1720 	void *result_sa;
1721 	struct ipsec_sa *sa;
1722 
1723 	sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts);
1724 
1725 	/*
1726 	 * Mark need for inline offload fallback on the LSB of SA pointer.
1727 	 * Thanks to packet grouping mechanism which ipsec_process is using
1728 	 * packets marked for fallback processing will form separate group.
1729 	 *
1730 	 * Because it is not safe to use SA pointer it is casted to generic
1731 	 * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1732 	 * to get valid struct pointer.
1733 	 */
1734 	for (i = 0; i < nb_pkts; i++) {
1735 		if (sa_arr[i] == NULL)
1736 			continue;
1737 
1738 		result_sa = sa = sa_arr[i];
1739 		if (MBUF_NO_SEC_OFFLOAD(pkts[i]) &&
1740 			sa->fallback_sessions > 0) {
1741 			uintptr_t intsa = (uintptr_t)sa;
1742 			intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1743 			result_sa = (void *)intsa;
1744 		}
1745 		sa_arr[i] = result_sa;
1746 	}
1747 }
1748 
1749 void
1750 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1751 		void *sa[], uint16_t nb_pkts)
1752 {
1753 	uint32_t i;
1754 
1755 	for (i = 0; i < nb_pkts; i++)
1756 		sa[i] = &sa_ctx->sa[sa_idx[i]];
1757 }
1758 
1759 /*
1760  * Select HW offloads to be used.
1761  */
1762 int
1763 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1764 		uint64_t *tx_offloads)
1765 {
1766 	struct ipsec_sa *rule;
1767 	uint32_t idx_sa;
1768 	enum rte_security_session_action_type rule_type;
1769 
1770 	*rx_offloads = 0;
1771 	*tx_offloads = 0;
1772 
1773 	/* Check for inbound rules that use offloads and use this port */
1774 	for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1775 		rule = &sa_in[idx_sa];
1776 		rule_type = ipsec_get_action_type(rule);
1777 		if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1778 				rule_type ==
1779 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1780 				&& rule->portid == port_id)
1781 			*rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
1782 	}
1783 
1784 	/* Check for outbound rules that use offloads and use this port */
1785 	for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1786 		rule = &sa_out[idx_sa];
1787 		rule_type = ipsec_get_action_type(rule);
1788 		if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1789 				rule_type ==
1790 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1791 				&& rule->portid == port_id) {
1792 			*tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
1793 			if (rule->mss)
1794 				*tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
1795 		}
1796 	}
1797 	return 0;
1798 }
1799 
1800 void
1801 sa_sort_arr(void)
1802 {
1803 	qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
1804 	qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);
1805 }
1806 
1807 uint32_t
1808 get_nb_crypto_sessions(void)
1809 {
1810 	return nb_crypto_sessions;
1811 }
1812