xref: /dpdk/examples/ipsec-secgw/sa.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 /*
6  * Security Associations
7  */
8 #include <stdlib.h>
9 #include <sys/types.h>
10 #include <netinet/in.h>
11 #include <netinet/ip.h>
12 #include <netinet/ip6.h>
13 
14 #include <rte_memzone.h>
15 #include <rte_crypto.h>
16 #include <rte_security.h>
17 #include <rte_cryptodev.h>
18 #include <rte_byteorder.h>
19 #include <rte_errno.h>
20 #include <rte_ip.h>
21 #include <rte_udp.h>
22 #include <rte_random.h>
23 #include <rte_ethdev.h>
24 #include <rte_malloc.h>
25 
26 #include "ipsec.h"
27 #include "esp.h"
28 #include "parser.h"
29 #include "sad.h"
30 
31 #define IPDEFTTL 64
32 
33 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
34 
35 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
36 
37 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)
38 
39 struct supported_cipher_algo {
40 	const char *keyword;
41 	enum rte_crypto_cipher_algorithm algo;
42 	uint16_t iv_len;
43 	uint16_t block_size;
44 	uint16_t key_len;
45 };
46 
47 struct supported_auth_algo {
48 	const char *keyword;
49 	enum rte_crypto_auth_algorithm algo;
50 	uint16_t iv_len;
51 	uint16_t digest_len;
52 	uint16_t key_len;
53 	uint8_t key_not_req;
54 };
55 
56 struct supported_aead_algo {
57 	const char *keyword;
58 	enum rte_crypto_aead_algorithm algo;
59 	uint16_t iv_len;
60 	uint16_t block_size;
61 	uint16_t digest_len;
62 	uint16_t key_len;
63 	uint8_t aad_len;
64 };
65 
66 
67 const struct supported_cipher_algo cipher_algos[] = {
68 	{
69 		.keyword = "null",
70 		.algo = RTE_CRYPTO_CIPHER_NULL,
71 		.iv_len = 0,
72 		.block_size = 4,
73 		.key_len = 0
74 	},
75 	{
76 		.keyword = "aes-128-cbc",
77 		.algo = RTE_CRYPTO_CIPHER_AES_CBC,
78 		.iv_len = 16,
79 		.block_size = 16,
80 		.key_len = 16
81 	},
82 	{
83 		.keyword = "aes-192-cbc",
84 		.algo = RTE_CRYPTO_CIPHER_AES_CBC,
85 		.iv_len = 16,
86 		.block_size = 16,
87 		.key_len = 24
88 	},
89 	{
90 		.keyword = "aes-256-cbc",
91 		.algo = RTE_CRYPTO_CIPHER_AES_CBC,
92 		.iv_len = 16,
93 		.block_size = 16,
94 		.key_len = 32
95 	},
96 	{
97 		.keyword = "aes-128-ctr",
98 		.algo = RTE_CRYPTO_CIPHER_AES_CTR,
99 		/* Per packet IV length */
100 		.iv_len = 8,
101 		.block_size = 4,
102 		.key_len = 20
103 	},
104 	{
105 		.keyword = "aes-192-ctr",
106 		.algo = RTE_CRYPTO_CIPHER_AES_CTR,
107 		.iv_len = 16,
108 		.block_size = 16,
109 		.key_len = 28
110 	},
111 	{
112 		.keyword = "aes-256-ctr",
113 		.algo = RTE_CRYPTO_CIPHER_AES_CTR,
114 		.iv_len = 16,
115 		.block_size = 16,
116 		.key_len = 36
117 	},
118 	{
119 		.keyword = "3des-cbc",
120 		.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
121 		.iv_len = 8,
122 		.block_size = 8,
123 		.key_len = 24
124 	},
125 	{
126 		.keyword = "des-cbc",
127 		.algo = RTE_CRYPTO_CIPHER_DES_CBC,
128 		.iv_len = 8,
129 		.block_size = 8,
130 		.key_len = 8
131 	}
132 };
133 
134 const struct supported_auth_algo auth_algos[] = {
135 	{
136 		.keyword = "null",
137 		.algo = RTE_CRYPTO_AUTH_NULL,
138 		.digest_len = 0,
139 		.key_len = 0,
140 		.key_not_req = 1
141 	},
142 	{
143 		.keyword = "sha1-hmac",
144 		.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
145 		.digest_len = 12,
146 		.key_len = 20
147 	},
148 	{
149 		.keyword = "sha256-hmac",
150 		.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
151 		.digest_len = 16,
152 		.key_len = 32
153 	},
154 	{
155 		.keyword = "sha384-hmac",
156 		.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
157 		.digest_len = 24,
158 		.key_len = 48
159 	},
160 	{
161 		.keyword = "sha512-hmac",
162 		.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
163 		.digest_len = 32,
164 		.key_len = 64
165 	},
166 	{
167 		.keyword = "aes-gmac",
168 		.algo = RTE_CRYPTO_AUTH_AES_GMAC,
169 		.iv_len = 8,
170 		.digest_len = 16,
171 		.key_len = 20
172 	},
173 	{
174 		.keyword = "aes-xcbc-mac-96",
175 		.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
176 		.digest_len = 12,
177 		.key_len = 16
178 	}
179 };
180 
181 const struct supported_aead_algo aead_algos[] = {
182 	{
183 		.keyword = "aes-128-gcm",
184 		.algo = RTE_CRYPTO_AEAD_AES_GCM,
185 		.iv_len = 8,
186 		.block_size = 4,
187 		.key_len = 20,
188 		.digest_len = 16,
189 		.aad_len = 8,
190 	},
191 	{
192 		.keyword = "aes-192-gcm",
193 		.algo = RTE_CRYPTO_AEAD_AES_GCM,
194 		.iv_len = 8,
195 		.block_size = 4,
196 		.key_len = 28,
197 		.digest_len = 16,
198 		.aad_len = 8,
199 	},
200 	{
201 		.keyword = "aes-256-gcm",
202 		.algo = RTE_CRYPTO_AEAD_AES_GCM,
203 		.iv_len = 8,
204 		.block_size = 4,
205 		.key_len = 36,
206 		.digest_len = 16,
207 		.aad_len = 8,
208 	},
209 	{
210 		.keyword = "aes-128-ccm",
211 		.algo = RTE_CRYPTO_AEAD_AES_CCM,
212 		.iv_len = 8,
213 		.block_size = 4,
214 		.key_len = 20,
215 		.digest_len = 16,
216 		.aad_len = 8,
217 	},
218 	{
219 		.keyword = "aes-192-ccm",
220 		.algo = RTE_CRYPTO_AEAD_AES_CCM,
221 		.iv_len = 8,
222 		.block_size = 4,
223 		.key_len = 28,
224 		.digest_len = 16,
225 		.aad_len = 8,
226 	},
227 	{
228 		.keyword = "aes-256-ccm",
229 		.algo = RTE_CRYPTO_AEAD_AES_CCM,
230 		.iv_len = 8,
231 		.block_size = 4,
232 		.key_len = 36,
233 		.digest_len = 16,
234 		.aad_len = 8,
235 	},
236 	{
237 		.keyword = "chacha20-poly1305",
238 		.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
239 		.iv_len = 12,
240 		.block_size = 64,
241 		.key_len = 36,
242 		.digest_len = 16,
243 		.aad_len = 8,
244 	}
245 };
246 
247 #define SA_INIT_NB	128
248 
249 static uint32_t nb_crypto_sessions;
250 struct ipsec_sa *sa_out;
251 uint32_t nb_sa_out;
252 static uint32_t sa_out_sz;
253 static struct ipsec_sa_cnt sa_out_cnt;
254 
255 struct ipsec_sa *sa_in;
256 uint32_t nb_sa_in;
257 static uint32_t sa_in_sz;
258 static struct ipsec_sa_cnt sa_in_cnt;
259 
260 static const struct supported_cipher_algo *
261 find_match_cipher_algo(const char *cipher_keyword)
262 {
263 	size_t i;
264 
265 	for (i = 0; i < RTE_DIM(cipher_algos); i++) {
266 		const struct supported_cipher_algo *algo =
267 			&cipher_algos[i];
268 
269 		if (strcmp(cipher_keyword, algo->keyword) == 0)
270 			return algo;
271 	}
272 
273 	return NULL;
274 }
275 
276 static const struct supported_auth_algo *
277 find_match_auth_algo(const char *auth_keyword)
278 {
279 	size_t i;
280 
281 	for (i = 0; i < RTE_DIM(auth_algos); i++) {
282 		const struct supported_auth_algo *algo =
283 			&auth_algos[i];
284 
285 		if (strcmp(auth_keyword, algo->keyword) == 0)
286 			return algo;
287 	}
288 
289 	return NULL;
290 }
291 
292 static const struct supported_aead_algo *
293 find_match_aead_algo(const char *aead_keyword)
294 {
295 	size_t i;
296 
297 	for (i = 0; i < RTE_DIM(aead_algos); i++) {
298 		const struct supported_aead_algo *algo =
299 			&aead_algos[i];
300 
301 		if (strcmp(aead_keyword, algo->keyword) == 0)
302 			return algo;
303 	}
304 
305 	return NULL;
306 }
307 
308 /** parse_key_string
309  *  parse x:x:x:x.... hex number key string into uint8_t *key
310  *  return:
311  *  > 0: number of bytes parsed
312  *  0:   failed
313  */
314 static uint32_t
315 parse_key_string(const char *key_str, uint8_t *key)
316 {
317 	const char *pt_start = key_str, *pt_end = key_str;
318 	uint32_t nb_bytes = 0;
319 
320 	while (pt_end != NULL) {
321 		char sub_str[3] = {0};
322 
323 		pt_end = strchr(pt_start, ':');
324 
325 		if (pt_end == NULL) {
326 			if (strlen(pt_start) > 2)
327 				return 0;
328 			strncpy(sub_str, pt_start, 2);
329 		} else {
330 			if (pt_end - pt_start > 2)
331 				return 0;
332 
333 			strncpy(sub_str, pt_start, pt_end - pt_start);
334 			pt_start = pt_end + 1;
335 		}
336 
337 		key[nb_bytes++] = strtol(sub_str, NULL, 16);
338 	}
339 
340 	return nb_bytes;
341 }
342 
343 static int
344 extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz)
345 {
346 	if (*sa_tbl == NULL) {
347 		*sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa));
348 		if (*sa_tbl == NULL)
349 			return -1;
350 		*cur_sz = SA_INIT_NB;
351 		return 0;
352 	}
353 
354 	if (cur_cnt >= *cur_sz) {
355 		*sa_tbl = realloc(*sa_tbl,
356 			*cur_sz * sizeof(struct ipsec_sa) * 2);
357 		if (*sa_tbl == NULL)
358 			return -1;
359 		/* clean reallocated extra space */
360 		memset(&(*sa_tbl)[*cur_sz], 0,
361 			*cur_sz * sizeof(struct ipsec_sa));
362 		*cur_sz *= 2;
363 	}
364 
365 	return 0;
366 }
367 
368 void
369 parse_sa_tokens(char **tokens, uint32_t n_tokens,
370 	struct parse_status *status)
371 {
372 	struct ipsec_sa *rule = NULL;
373 	struct rte_ipsec_session *ips;
374 	uint32_t ti; /*token index*/
375 	uint32_t *ri /*rule index*/;
376 	struct ipsec_sa_cnt *sa_cnt;
377 	uint32_t cipher_algo_p = 0;
378 	uint32_t auth_algo_p = 0;
379 	uint32_t aead_algo_p = 0;
380 	uint32_t src_p = 0;
381 	uint32_t dst_p = 0;
382 	uint32_t mode_p = 0;
383 	uint32_t type_p = 0;
384 	uint32_t portid_p = 0;
385 	uint32_t fallback_p = 0;
386 	int16_t status_p = 0;
387 	uint16_t udp_encap_p = 0;
388 
389 	if (strcmp(tokens[0], "in") == 0) {
390 		ri = &nb_sa_in;
391 		sa_cnt = &sa_in_cnt;
392 		if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0)
393 			return;
394 		rule = &sa_in[*ri];
395 		rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
396 	} else {
397 		ri = &nb_sa_out;
398 		sa_cnt = &sa_out_cnt;
399 		if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0)
400 			return;
401 		rule = &sa_out[*ri];
402 		rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
403 	}
404 
405 	/* spi number */
406 	APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
407 	if (status->status < 0)
408 		return;
409 	if (atoi(tokens[1]) == INVALID_SPI)
410 		return;
411 	rule->flags = 0;
412 	rule->spi = atoi(tokens[1]);
413 	rule->portid = UINT16_MAX;
414 	ips = ipsec_get_primary_session(rule);
415 
416 	for (ti = 2; ti < n_tokens; ti++) {
417 		if (strcmp(tokens[ti], "mode") == 0) {
418 			APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
419 			if (status->status < 0)
420 				return;
421 
422 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
423 			if (status->status < 0)
424 				return;
425 
426 			if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
427 				sa_cnt->nb_v4++;
428 				rule->flags |= IP4_TUNNEL;
429 			} else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
430 				sa_cnt->nb_v6++;
431 				rule->flags |= IP6_TUNNEL;
432 			} else if (strcmp(tokens[ti], "transport") == 0) {
433 				sa_cnt->nb_v4++;
434 				sa_cnt->nb_v6++;
435 				rule->flags |= TRANSPORT;
436 			} else {
437 				APP_CHECK(0, status, "unrecognized "
438 					"input \"%s\"", tokens[ti]);
439 				return;
440 			}
441 
442 			mode_p = 1;
443 			continue;
444 		}
445 
446 		if (strcmp(tokens[ti], "telemetry") == 0) {
447 			rule->flags |= SA_TELEMETRY_ENABLE;
448 			continue;
449 		}
450 
451 		if (strcmp(tokens[ti], "cipher_algo") == 0) {
452 			const struct supported_cipher_algo *algo;
453 			uint32_t key_len;
454 
455 			APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
456 				status);
457 			if (status->status < 0)
458 				return;
459 
460 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
461 			if (status->status < 0)
462 				return;
463 
464 			algo = find_match_cipher_algo(tokens[ti]);
465 
466 			APP_CHECK(algo != NULL, status, "unrecognized "
467 				"input \"%s\"", tokens[ti]);
468 
469 			if (status->status < 0)
470 				return;
471 
472 			rule->cipher_algo = algo->algo;
473 			rule->block_size = algo->block_size;
474 			rule->iv_len = algo->iv_len;
475 			rule->cipher_key_len = algo->key_len;
476 
477 			/* for NULL algorithm, no cipher key required */
478 			if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
479 				cipher_algo_p = 1;
480 				continue;
481 			}
482 
483 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
484 			if (status->status < 0)
485 				return;
486 
487 			APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
488 				status, "unrecognized input \"%s\", "
489 				"expect \"cipher_key\"", tokens[ti]);
490 			if (status->status < 0)
491 				return;
492 
493 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
494 			if (status->status < 0)
495 				return;
496 
497 			key_len = parse_key_string(tokens[ti],
498 				rule->cipher_key);
499 			APP_CHECK(key_len == rule->cipher_key_len, status,
500 				"unrecognized input \"%s\"", tokens[ti]);
501 			if (status->status < 0)
502 				return;
503 
504 			if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
505 				algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
506 				rule->salt = (uint32_t)rte_rand();
507 
508 			if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
509 				key_len -= 4;
510 				rule->cipher_key_len = key_len;
511 				memcpy(&rule->salt,
512 					&rule->cipher_key[key_len], 4);
513 			}
514 
515 			cipher_algo_p = 1;
516 			continue;
517 		}
518 
519 		if (strcmp(tokens[ti], "auth_algo") == 0) {
520 			const struct supported_auth_algo *algo;
521 			uint32_t key_len;
522 
523 			APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
524 				status);
525 			if (status->status < 0)
526 				return;
527 
528 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
529 			if (status->status < 0)
530 				return;
531 
532 			algo = find_match_auth_algo(tokens[ti]);
533 			APP_CHECK(algo != NULL, status, "unrecognized "
534 				"input \"%s\"", tokens[ti]);
535 
536 			if (status->status < 0)
537 				return;
538 
539 			rule->auth_algo = algo->algo;
540 			rule->auth_key_len = algo->key_len;
541 			rule->digest_len = algo->digest_len;
542 
543 			/* NULL algorithm and combined algos do not
544 			 * require auth key
545 			 */
546 			if (algo->key_not_req) {
547 				auth_algo_p = 1;
548 				continue;
549 			}
550 
551 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
552 			if (status->status < 0)
553 				return;
554 
555 			APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
556 				status, "unrecognized input \"%s\", "
557 				"expect \"auth_key\"", tokens[ti]);
558 			if (status->status < 0)
559 				return;
560 
561 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
562 			if (status->status < 0)
563 				return;
564 
565 			key_len = parse_key_string(tokens[ti],
566 				rule->auth_key);
567 			APP_CHECK(key_len == rule->auth_key_len, status,
568 				"unrecognized input \"%s\"", tokens[ti]);
569 			if (status->status < 0)
570 				return;
571 
572 			if (algo->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
573 				key_len -= 4;
574 				rule->auth_key_len = key_len;
575 				rule->iv_len = algo->iv_len;
576 				memcpy(&rule->salt,
577 					&rule->auth_key[key_len], 4);
578 			}
579 
580 			auth_algo_p = 1;
581 			continue;
582 		}
583 
584 		if (strcmp(tokens[ti], "aead_algo") == 0) {
585 			const struct supported_aead_algo *algo;
586 			uint32_t key_len;
587 
588 			APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
589 				status);
590 			if (status->status < 0)
591 				return;
592 
593 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
594 			if (status->status < 0)
595 				return;
596 
597 			algo = find_match_aead_algo(tokens[ti]);
598 
599 			APP_CHECK(algo != NULL, status, "unrecognized "
600 				"input \"%s\"", tokens[ti]);
601 
602 			if (status->status < 0)
603 				return;
604 
605 			rule->aead_algo = algo->algo;
606 			rule->cipher_key_len = algo->key_len;
607 			rule->digest_len = algo->digest_len;
608 			rule->aad_len = algo->aad_len;
609 			rule->block_size = algo->block_size;
610 			rule->iv_len = algo->iv_len;
611 
612 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
613 			if (status->status < 0)
614 				return;
615 
616 			APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
617 				status, "unrecognized input \"%s\", "
618 				"expect \"aead_key\"", tokens[ti]);
619 			if (status->status < 0)
620 				return;
621 
622 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
623 			if (status->status < 0)
624 				return;
625 
626 			key_len = parse_key_string(tokens[ti],
627 				rule->cipher_key);
628 			APP_CHECK(key_len == rule->cipher_key_len, status,
629 				"unrecognized input \"%s\"", tokens[ti]);
630 			if (status->status < 0)
631 				return;
632 
633 			key_len -= 4;
634 			rule->cipher_key_len = key_len;
635 			memcpy(&rule->salt,
636 				&rule->cipher_key[key_len], 4);
637 
638 			aead_algo_p = 1;
639 			continue;
640 		}
641 
642 		if (strcmp(tokens[ti], "src") == 0) {
643 			APP_CHECK_PRESENCE(src_p, tokens[ti], status);
644 			if (status->status < 0)
645 				return;
646 
647 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
648 			if (status->status < 0)
649 				return;
650 
651 			if (IS_IP4_TUNNEL(rule->flags)) {
652 				struct in_addr ip;
653 
654 				APP_CHECK(parse_ipv4_addr(tokens[ti],
655 					&ip, NULL) == 0, status,
656 					"unrecognized input \"%s\", "
657 					"expect valid ipv4 addr",
658 					tokens[ti]);
659 				if (status->status < 0)
660 					return;
661 				rule->src.ip.ip4 = rte_bswap32(
662 					(uint32_t)ip.s_addr);
663 			} else if (IS_IP6_TUNNEL(rule->flags)) {
664 				struct in6_addr ip;
665 
666 				APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
667 					NULL) == 0, status,
668 					"unrecognized input \"%s\", "
669 					"expect valid ipv6 addr",
670 					tokens[ti]);
671 				if (status->status < 0)
672 					return;
673 				memcpy(rule->src.ip.ip6.ip6_b,
674 					ip.s6_addr, 16);
675 			} else if (IS_TRANSPORT(rule->flags)) {
676 				APP_CHECK(0, status, "unrecognized input "
677 					"\"%s\"", tokens[ti]);
678 				return;
679 			}
680 
681 			src_p = 1;
682 			continue;
683 		}
684 
685 		if (strcmp(tokens[ti], "dst") == 0) {
686 			APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
687 			if (status->status < 0)
688 				return;
689 
690 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
691 			if (status->status < 0)
692 				return;
693 
694 			if (IS_IP4_TUNNEL(rule->flags)) {
695 				struct in_addr ip;
696 
697 				APP_CHECK(parse_ipv4_addr(tokens[ti],
698 					&ip, NULL) == 0, status,
699 					"unrecognized input \"%s\", "
700 					"expect valid ipv4 addr",
701 					tokens[ti]);
702 				if (status->status < 0)
703 					return;
704 				rule->dst.ip.ip4 = rte_bswap32(
705 					(uint32_t)ip.s_addr);
706 			} else if (IS_IP6_TUNNEL(rule->flags)) {
707 				struct in6_addr ip;
708 
709 				APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
710 					NULL) == 0, status,
711 					"unrecognized input \"%s\", "
712 					"expect valid ipv6 addr",
713 					tokens[ti]);
714 				if (status->status < 0)
715 					return;
716 				memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
717 			} else if (IS_TRANSPORT(rule->flags)) {
718 				APP_CHECK(0, status, "unrecognized "
719 					"input \"%s\"",	tokens[ti]);
720 				return;
721 			}
722 
723 			dst_p = 1;
724 			continue;
725 		}
726 
727 		if (strcmp(tokens[ti], "type") == 0) {
728 			APP_CHECK_PRESENCE(type_p, tokens[ti], status);
729 			if (status->status < 0)
730 				return;
731 
732 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
733 			if (status->status < 0)
734 				return;
735 
736 			if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
737 				ips->type =
738 					RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
739 			else if (strcmp(tokens[ti],
740 					"inline-protocol-offload") == 0)
741 				ips->type =
742 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
743 			else if (strcmp(tokens[ti],
744 					"lookaside-protocol-offload") == 0)
745 				ips->type =
746 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
747 			else if (strcmp(tokens[ti], "no-offload") == 0)
748 				ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
749 			else if (strcmp(tokens[ti], "cpu-crypto") == 0)
750 				ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
751 			else {
752 				APP_CHECK(0, status, "Invalid input \"%s\"",
753 						tokens[ti]);
754 				return;
755 			}
756 
757 			type_p = 1;
758 			continue;
759 		}
760 
761 		if (strcmp(tokens[ti], "port_id") == 0) {
762 			APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
763 			if (status->status < 0)
764 				return;
765 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
766 			if (status->status < 0)
767 				return;
768 			if (rule->portid == UINT16_MAX)
769 				rule->portid = atoi(tokens[ti]);
770 			else if (rule->portid != atoi(tokens[ti])) {
771 				APP_CHECK(0, status,
772 					"portid %s not matching with already assigned portid %u",
773 					tokens[ti], rule->portid);
774 				return;
775 			}
776 			portid_p = 1;
777 			continue;
778 		}
779 
780 		if (strcmp(tokens[ti], "mss") == 0) {
781 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
782 			if (status->status < 0)
783 				return;
784 			rule->mss = atoi(tokens[ti]);
785 			if (status->status < 0)
786 				return;
787 			continue;
788 		}
789 
790 		if (strcmp(tokens[ti], "reassembly_en") == 0) {
791 			rule->flags |= SA_REASSEMBLY_ENABLE;
792 			continue;
793 		}
794 
795 		if (strcmp(tokens[ti], "esn") == 0) {
796 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
797 			if (status->status < 0)
798 				return;
799 			rule->esn = atoll(tokens[ti]);
800 			if (status->status < 0)
801 				return;
802 			continue;
803 		}
804 
805 		if (strcmp(tokens[ti], "fallback") == 0) {
806 			struct rte_ipsec_session *fb;
807 
808 			APP_CHECK(app_sa_prm.enable, status, "Fallback session "
809 				"not allowed for legacy mode.");
810 			if (status->status < 0)
811 				return;
812 			APP_CHECK(ips->type ==
813 				RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
814 				"Fallback session allowed if primary session "
815 				"is of type inline-crypto-offload only.");
816 			if (status->status < 0)
817 				return;
818 			APP_CHECK(rule->direction ==
819 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
820 				"Fallback session not allowed for egress "
821 				"rule");
822 			if (status->status < 0)
823 				return;
824 			APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
825 			if (status->status < 0)
826 				return;
827 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
828 			if (status->status < 0)
829 				return;
830 			fb = ipsec_get_fallback_session(rule);
831 			if (strcmp(tokens[ti], "lookaside-none") == 0)
832 				fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
833 			else if (strcmp(tokens[ti], "cpu-crypto") == 0)
834 				fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
835 			else {
836 				APP_CHECK(0, status, "unrecognized fallback "
837 					"type %s.", tokens[ti]);
838 				return;
839 			}
840 
841 			rule->fallback_sessions = 1;
842 			nb_crypto_sessions++;
843 			fallback_p = 1;
844 			continue;
845 		}
846 		if (strcmp(tokens[ti], "flow-direction") == 0) {
847 			switch (ips->type) {
848 			case RTE_SECURITY_ACTION_TYPE_NONE:
849 			case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
850 				rule->fdir_flag = 1;
851 				INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
852 				if (status->status < 0)
853 					return;
854 				if (rule->portid == UINT16_MAX)
855 					rule->portid = atoi(tokens[ti]);
856 				else if (rule->portid != atoi(tokens[ti])) {
857 					APP_CHECK(0, status,
858 						"portid %s not matching with already assigned portid %u",
859 						tokens[ti], rule->portid);
860 					return;
861 				}
862 				INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
863 				if (status->status < 0)
864 					return;
865 				rule->fdir_qid = atoi(tokens[ti]);
866 				/* validating portid and queueid */
867 				status_p = check_flow_params(rule->portid,
868 						rule->fdir_qid);
869 				if (status_p < 0) {
870 					printf("port id %u / queue id %u is "
871 						"not valid\n", rule->portid,
872 						 rule->fdir_qid);
873 				}
874 				break;
875 			case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
876 			case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
877 			case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
878 			default:
879 				APP_CHECK(0, status,
880 					"flow director not supported for security session type %d",
881 					ips->type);
882 				return;
883 			}
884 			continue;
885 		}
886 		if (strcmp(tokens[ti], "udp-encap") == 0) {
887 			switch (ips->type) {
888 			case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
889 			case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
890 				APP_CHECK_PRESENCE(udp_encap_p, tokens[ti],
891 						   status);
892 				if (status->status < 0)
893 					return;
894 
895 				rule->udp_encap = 1;
896 				app_sa_prm.udp_encap = 1;
897 				udp_encap_p = 1;
898 				break;
899 			case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
900 				rule->udp_encap = 1;
901 				rule->udp.sport = 0;
902 				rule->udp.dport = 4500;
903 				break;
904 			default:
905 				APP_CHECK(0, status,
906 					"UDP encapsulation not supported for "
907 					"security session type %d",
908 					ips->type);
909 				return;
910 			}
911 			continue;
912 		}
913 
914 		/* unrecognizable input */
915 		APP_CHECK(0, status, "unrecognized input \"%s\"",
916 			tokens[ti]);
917 		return;
918 	}
919 
920 	if (aead_algo_p) {
921 		APP_CHECK(cipher_algo_p == 0, status,
922 				"AEAD used, no need for cipher options");
923 		if (status->status < 0)
924 			return;
925 
926 		APP_CHECK(auth_algo_p == 0, status,
927 				"AEAD used, no need for auth options");
928 		if (status->status < 0)
929 			return;
930 	} else {
931 		APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
932 		if (status->status < 0)
933 			return;
934 
935 		APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
936 		if (status->status < 0)
937 			return;
938 	}
939 
940 	APP_CHECK(mode_p == 1, status, "missing mode option");
941 	if (status->status < 0)
942 		return;
943 
944 	if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
945 			RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
946 		printf("Missing portid option, falling back to non-offload\n");
947 
948 	if (!type_p || (!portid_p && ips->type !=
949 			RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
950 		ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
951 	}
952 
953 	if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
954 		wrkr_flags |= INL_CR_F;
955 	else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
956 		wrkr_flags |= INL_PR_F;
957 	else if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
958 		wrkr_flags |= LA_PR_F;
959 	else
960 		wrkr_flags |= LA_ANY_F;
961 
962 	nb_crypto_sessions++;
963 	*ri = *ri + 1;
964 }
965 
966 static void
967 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
968 {
969 	uint32_t i;
970 	uint8_t a, b, c, d;
971 	const struct rte_ipsec_session *ips;
972 	const struct rte_ipsec_session *fallback_ips;
973 
974 	printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
975 
976 	for (i = 0; i < RTE_DIM(cipher_algos); i++) {
977 		if (cipher_algos[i].algo == sa->cipher_algo &&
978 				cipher_algos[i].key_len == sa->cipher_key_len) {
979 			printf("%s ", cipher_algos[i].keyword);
980 			break;
981 		}
982 	}
983 
984 	for (i = 0; i < RTE_DIM(auth_algos); i++) {
985 		if (auth_algos[i].algo == sa->auth_algo) {
986 			printf("%s ", auth_algos[i].keyword);
987 			break;
988 		}
989 	}
990 
991 	for (i = 0; i < RTE_DIM(aead_algos); i++) {
992 		if (aead_algos[i].algo == sa->aead_algo &&
993 				aead_algos[i].key_len-4 == sa->cipher_key_len) {
994 			printf("%s ", aead_algos[i].keyword);
995 			break;
996 		}
997 	}
998 
999 	printf("mode:");
1000 	if (sa->udp_encap)
1001 		printf("UDP encapsulated ");
1002 
1003 	switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1004 	case IP4_TUNNEL:
1005 		printf("IP4Tunnel ");
1006 		uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
1007 		printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
1008 		uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
1009 		printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
1010 		break;
1011 	case IP6_TUNNEL:
1012 		printf("IP6Tunnel ");
1013 		for (i = 0; i < 16; i++) {
1014 			if (i % 2 && i != 15)
1015 				printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
1016 			else
1017 				printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
1018 		}
1019 		printf(" ");
1020 		for (i = 0; i < 16; i++) {
1021 			if (i % 2 && i != 15)
1022 				printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
1023 			else
1024 				printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
1025 		}
1026 		break;
1027 	case TRANSPORT:
1028 		printf("Transport ");
1029 		break;
1030 	}
1031 
1032 	ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
1033 	printf(" type:");
1034 	switch (ips->type) {
1035 	case RTE_SECURITY_ACTION_TYPE_NONE:
1036 		printf("no-offload ");
1037 		break;
1038 	case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
1039 		printf("inline-crypto-offload ");
1040 		break;
1041 	case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1042 		printf("inline-protocol-offload ");
1043 		break;
1044 	case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
1045 		printf("lookaside-protocol-offload ");
1046 		break;
1047 	case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
1048 		printf("cpu-crypto-accelerated ");
1049 		break;
1050 	}
1051 
1052 	fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
1053 	if (fallback_ips != NULL && sa->fallback_sessions > 0) {
1054 		printf("inline fallback: ");
1055 		switch (fallback_ips->type) {
1056 		case RTE_SECURITY_ACTION_TYPE_NONE:
1057 			printf("lookaside-none");
1058 			break;
1059 		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
1060 			printf("cpu-crypto-accelerated");
1061 			break;
1062 		default:
1063 			printf("invalid");
1064 			break;
1065 		}
1066 	}
1067 	if (sa->fdir_flag == 1)
1068 		printf("flow-direction port %d queue %d", sa->portid,
1069 				sa->fdir_qid);
1070 
1071 	printf("\n");
1072 }
1073 
1074 static struct sa_ctx *
1075 sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
1076 {
1077 	char s[PATH_MAX];
1078 	struct sa_ctx *sa_ctx;
1079 	uint32_t mz_size;
1080 	const struct rte_memzone *mz;
1081 
1082 	snprintf(s, sizeof(s), "%s_%u", name, socket_id);
1083 
1084 	/* Create SA context */
1085 	printf("Creating SA context with %u maximum entries on socket %d\n",
1086 			nb_sa, socket_id);
1087 
1088 	mz_size = sizeof(struct ipsec_xf) * nb_sa;
1089 	mz = rte_memzone_reserve(s, mz_size, socket_id,
1090 			RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
1091 	if (mz == NULL) {
1092 		printf("Failed to allocate SA XFORM memory\n");
1093 		rte_errno = ENOMEM;
1094 		return NULL;
1095 	}
1096 
1097 	sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
1098 		sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
1099 
1100 	if (sa_ctx == NULL) {
1101 		printf("Failed to allocate SA CTX memory\n");
1102 		rte_errno = ENOMEM;
1103 		rte_memzone_free(mz);
1104 		return NULL;
1105 	}
1106 
1107 	sa_ctx->xf = (struct ipsec_xf *)mz->addr;
1108 	sa_ctx->nb_sa = nb_sa;
1109 
1110 	return sa_ctx;
1111 }
1112 
1113 static int
1114 check_eth_dev_caps(uint16_t portid, uint32_t inbound, uint32_t tso)
1115 {
1116 	struct rte_eth_dev_info dev_info;
1117 	int retval;
1118 
1119 	retval = rte_eth_dev_info_get(portid, &dev_info);
1120 	if (retval != 0) {
1121 		RTE_LOG(ERR, IPSEC,
1122 			"Error during getting device (port %u) info: %s\n",
1123 			portid, strerror(-retval));
1124 
1125 		return retval;
1126 	}
1127 
1128 	if (inbound) {
1129 		if ((dev_info.rx_offload_capa &
1130 				RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
1131 			RTE_LOG(WARNING, IPSEC,
1132 				"hardware RX IPSec offload is not supported\n");
1133 			return -EINVAL;
1134 		}
1135 
1136 	} else { /* outbound */
1137 		if ((dev_info.tx_offload_capa &
1138 				RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
1139 			RTE_LOG(WARNING, IPSEC,
1140 				"hardware TX IPSec offload is not supported\n");
1141 			return -EINVAL;
1142 		}
1143 		if (tso && (dev_info.tx_offload_capa &
1144 				RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
1145 			RTE_LOG(WARNING, IPSEC,
1146 				"hardware TCP TSO offload is not supported\n");
1147 			return -EINVAL;
1148 		}
1149 	}
1150 	return 0;
1151 }
1152 
1153 /*
1154  * Helper function, tries to determine next_proto for SPI
1155  * by searching though SP rules.
1156  */
1157 static int
1158 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
1159 		struct ip_addr ip_addr[2], uint32_t mask[2])
1160 {
1161 	int32_t rc4, rc6;
1162 
1163 	rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1164 				ip_addr, mask);
1165 	rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1166 				ip_addr, mask);
1167 
1168 	if (rc4 >= 0) {
1169 		if (rc6 >= 0) {
1170 			RTE_LOG(ERR, IPSEC,
1171 				"%s: SPI %u used simultaneously by "
1172 				"IPv4(%d) and IPv6 (%d) SP rules\n",
1173 				__func__, spi, rc4, rc6);
1174 			return -EINVAL;
1175 		} else
1176 			return IPPROTO_IPIP;
1177 	} else if (rc6 < 0) {
1178 		RTE_LOG(ERR, IPSEC,
1179 			"%s: SPI %u is not used by any SP rule\n",
1180 			__func__, spi);
1181 		return -EINVAL;
1182 	} else
1183 		return IPPROTO_IPV6;
1184 }
1185 
1186 /*
1187  * Helper function for getting source and destination IP addresses
1188  * from SP. Needed for inline crypto transport mode, as addresses are not
1189  * provided in config file for that mode. It checks if SP for current SA exists,
1190  * and based on what type of protocol is returned, it stores appropriate
1191  * addresses got from SP into SA.
1192  */
1193 static int
1194 sa_add_address_inline_crypto(struct ipsec_sa *sa)
1195 {
1196 	int protocol;
1197 	struct ip_addr ip_addr[2];
1198 	uint32_t mask[2];
1199 
1200 	protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
1201 	if (protocol < 0)
1202 		return protocol;
1203 	else if (protocol == IPPROTO_IPIP) {
1204 		sa->flags |= IP4_TRANSPORT;
1205 		if (mask[0] == IP4_FULL_MASK &&
1206 				mask[1] == IP4_FULL_MASK &&
1207 				ip_addr[0].ip.ip4 != 0 &&
1208 				ip_addr[1].ip.ip4 != 0) {
1209 
1210 			sa->src.ip.ip4 = ip_addr[0].ip.ip4;
1211 			sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
1212 		} else {
1213 			RTE_LOG(ERR, IPSEC,
1214 			"%s: No valid address or mask entry in"
1215 			" IPv4 SP rule for SPI %u\n",
1216 			__func__, sa->spi);
1217 			return -EINVAL;
1218 		}
1219 	} else if (protocol == IPPROTO_IPV6) {
1220 		sa->flags |= IP6_TRANSPORT;
1221 		if (mask[0] == IP6_FULL_MASK &&
1222 				mask[1] == IP6_FULL_MASK &&
1223 				(ip_addr[0].ip.ip6.ip6[0] != 0 ||
1224 				ip_addr[0].ip.ip6.ip6[1] != 0) &&
1225 				(ip_addr[1].ip.ip6.ip6[0] != 0 ||
1226 				ip_addr[1].ip.ip6.ip6[1] != 0)) {
1227 
1228 			sa->src.ip.ip6 = ip_addr[0].ip.ip6;
1229 			sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
1230 		} else {
1231 			RTE_LOG(ERR, IPSEC,
1232 			"%s: No valid address or mask entry in"
1233 			" IPv6 SP rule for SPI %u\n",
1234 			__func__, sa->spi);
1235 			return -EINVAL;
1236 		}
1237 	}
1238 	return 0;
1239 }
1240 
1241 static int
1242 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1243 		uint32_t nb_entries, uint32_t inbound,
1244 		struct socket_ctx *skt_ctx,
1245 		struct ipsec_ctx *ips_ctx[],
1246 		const struct eventmode_conf *em_conf)
1247 {
1248 	struct ipsec_sa *sa;
1249 	uint32_t i, idx;
1250 	uint16_t iv_length, aad_length;
1251 	uint16_t auth_iv_length = 0;
1252 	int inline_status;
1253 	int32_t rc;
1254 	struct rte_ipsec_session *ips;
1255 
1256 	/* for ESN upper 32 bits of SQN also need to be part of AAD */
1257 	aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
1258 
1259 	for (i = 0; i < nb_entries; i++) {
1260 		idx = i;
1261 		sa = &sa_ctx->sa[idx];
1262 		if (sa->spi != 0) {
1263 			printf("Index %u already in use by SPI %u\n",
1264 					idx, sa->spi);
1265 			return -EINVAL;
1266 		}
1267 		*sa = entries[i];
1268 
1269 		if (inbound) {
1270 			rc = ipsec_sad_add(&sa_ctx->sad, sa);
1271 			if (rc != 0)
1272 				return rc;
1273 		}
1274 
1275 		sa->seq = 0;
1276 		ips = ipsec_get_primary_session(sa);
1277 
1278 		if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1279 			ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1280 			if (check_eth_dev_caps(sa->portid, inbound, sa->mss))
1281 				return -EINVAL;
1282 		}
1283 
1284 		switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1285 		case IP4_TUNNEL:
1286 			sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
1287 			sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
1288 			break;
1289 		case TRANSPORT:
1290 			if (ips->type ==
1291 				RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1292 				inline_status =
1293 					sa_add_address_inline_crypto(sa);
1294 				if (inline_status < 0)
1295 					return inline_status;
1296 			}
1297 			break;
1298 		}
1299 
1300 
1301 		if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM ||
1302 			sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM ||
1303 			sa->aead_algo == RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
1304 
1305 			if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
1306 				iv_length = 11;
1307 			else
1308 				iv_length = 12;
1309 
1310 			sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1311 			sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1312 			sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1313 			sa_ctx->xf[idx].a.aead.key.length =
1314 				sa->cipher_key_len;
1315 			sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1316 				RTE_CRYPTO_AEAD_OP_DECRYPT :
1317 				RTE_CRYPTO_AEAD_OP_ENCRYPT;
1318 			sa_ctx->xf[idx].a.next = NULL;
1319 			sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1320 			sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1321 			sa_ctx->xf[idx].a.aead.aad_length =
1322 				sa->aad_len + aad_length;
1323 			sa_ctx->xf[idx].a.aead.digest_length =
1324 				sa->digest_len;
1325 
1326 			sa->xforms = &sa_ctx->xf[idx].a;
1327 		} else {
1328 			switch (sa->cipher_algo) {
1329 			case RTE_CRYPTO_CIPHER_NULL:
1330 			case RTE_CRYPTO_CIPHER_DES_CBC:
1331 			case RTE_CRYPTO_CIPHER_3DES_CBC:
1332 			case RTE_CRYPTO_CIPHER_AES_CBC:
1333 				iv_length = sa->iv_len;
1334 				break;
1335 			case RTE_CRYPTO_CIPHER_AES_CTR:
1336 				/* Length includes 8B per packet IV, 4B nonce and
1337 				 * 4B counter as populated in datapath.
1338 				 */
1339 				iv_length = 16;
1340 				break;
1341 			default:
1342 				RTE_LOG(ERR, IPSEC_ESP,
1343 						"unsupported cipher algorithm %u\n",
1344 						sa->cipher_algo);
1345 				return -EINVAL;
1346 			}
1347 
1348 			/* AES_GMAC uses salt like AEAD algorithms */
1349 			if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC)
1350 				auth_iv_length = 12;
1351 
1352 			if (inbound) {
1353 				sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1354 				sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1355 				sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1356 				sa_ctx->xf[idx].b.cipher.key.length =
1357 					sa->cipher_key_len;
1358 				sa_ctx->xf[idx].b.cipher.op =
1359 					RTE_CRYPTO_CIPHER_OP_DECRYPT;
1360 				sa_ctx->xf[idx].b.next = NULL;
1361 				sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1362 				sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1363 
1364 				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1365 				sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1366 				sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1367 				sa_ctx->xf[idx].a.auth.key.length =
1368 					sa->auth_key_len;
1369 				sa_ctx->xf[idx].a.auth.digest_length =
1370 					sa->digest_len;
1371 				sa_ctx->xf[idx].a.auth.op =
1372 					RTE_CRYPTO_AUTH_OP_VERIFY;
1373 				sa_ctx->xf[idx].a.auth.iv.offset = IV_OFFSET;
1374 				sa_ctx->xf[idx].a.auth.iv.length = auth_iv_length;
1375 
1376 			} else { /* outbound */
1377 				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1378 				sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1379 				sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1380 				sa_ctx->xf[idx].a.cipher.key.length =
1381 					sa->cipher_key_len;
1382 				sa_ctx->xf[idx].a.cipher.op =
1383 					RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1384 				sa_ctx->xf[idx].a.next = NULL;
1385 				sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1386 				sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1387 
1388 				sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1389 				sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1390 				sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1391 				sa_ctx->xf[idx].b.auth.key.length =
1392 					sa->auth_key_len;
1393 				sa_ctx->xf[idx].b.auth.digest_length =
1394 					sa->digest_len;
1395 				sa_ctx->xf[idx].b.auth.op =
1396 					RTE_CRYPTO_AUTH_OP_GENERATE;
1397 				sa_ctx->xf[idx].b.auth.iv.offset = IV_OFFSET;
1398 				sa_ctx->xf[idx].b.auth.iv.length = auth_iv_length;
1399 
1400 			}
1401 
1402 			if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
1403 				sa->xforms = inbound ?
1404 					&sa_ctx->xf[idx].a : &sa_ctx->xf[idx].b;
1405 				sa->xforms->next = NULL;
1406 
1407 			} else {
1408 				sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1409 				sa_ctx->xf[idx].b.next = NULL;
1410 				sa->xforms = &sa_ctx->xf[idx].a;
1411 			}
1412 		}
1413 
1414 		if (ips->type ==
1415 			RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1416 			ips->type ==
1417 			RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1418 			rc = create_inline_session(skt_ctx, sa, ips);
1419 			if (rc != 0) {
1420 				RTE_LOG(ERR, IPSEC_ESP,
1421 					"create_inline_session() failed\n");
1422 				return -EINVAL;
1423 			}
1424 		} else {
1425 			rc = create_lookaside_session(ips_ctx, skt_ctx,
1426 						      em_conf, sa, ips);
1427 			if (rc != 0) {
1428 				RTE_LOG(ERR, IPSEC_ESP,
1429 					"create_lookaside_session() failed\n");
1430 				return -EINVAL;
1431 			}
1432 		}
1433 
1434 		if (sa->fdir_flag && inbound) {
1435 			rc = create_ipsec_esp_flow(sa);
1436 			if (rc != 0)
1437 				RTE_LOG(ERR, IPSEC_ESP,
1438 					"create_ipsec_esp_flow() failed\n");
1439 		}
1440 		print_one_sa_rule(sa, inbound);
1441 	}
1442 
1443 	return 0;
1444 }
1445 
1446 static inline int
1447 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1448 		uint32_t nb_entries, struct socket_ctx *skt_ctx,
1449 		struct ipsec_ctx *ips_ctx[],
1450 		const struct eventmode_conf *em_conf)
1451 {
1452 	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx, em_conf);
1453 }
1454 
1455 static inline int
1456 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1457 		uint32_t nb_entries, struct socket_ctx *skt_ctx,
1458 		struct ipsec_ctx *ips_ctx[],
1459 		const struct eventmode_conf *em_conf)
1460 {
1461 	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx, em_conf);
1462 }
1463 
1464 /*
1465  * helper function, fills parameters that are identical for all SAs
1466  */
1467 static void
1468 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1469 	const struct app_sa_prm *app_prm)
1470 {
1471 	memset(prm, 0, sizeof(*prm));
1472 
1473 	prm->flags = app_prm->flags;
1474 	prm->ipsec_xform.options.esn = app_prm->enable_esn;
1475 	prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1476 }
1477 
1478 static int
1479 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1480 	const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1481 {
1482 	int32_t rc;
1483 
1484 	/*
1485 	 * Try to get SPI next proto by searching that SPI in SPD.
1486 	 * probably not the optimal way, but there seems nothing
1487 	 * better right now.
1488 	 */
1489 	rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1490 	if (rc < 0)
1491 		return rc;
1492 
1493 	fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1494 	prm->userdata = (uintptr_t)ss;
1495 
1496 	/* setup ipsec xform */
1497 	prm->ipsec_xform.spi = ss->spi;
1498 	prm->ipsec_xform.salt = ss->salt;
1499 	prm->ipsec_xform.direction = ss->direction;
1500 	prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1501 	prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1502 		RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1503 		RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1504 	prm->ipsec_xform.options.udp_encap = ss->udp_encap;
1505 	prm->ipsec_xform.udp.dport = ss->udp.dport;
1506 	prm->ipsec_xform.udp.sport = ss->udp.sport;
1507 	prm->ipsec_xform.options.ecn = 1;
1508 	prm->ipsec_xform.options.copy_dscp = 1;
1509 
1510 	if (ss->esn > 0) {
1511 		prm->ipsec_xform.options.esn = 1;
1512 		prm->ipsec_xform.esn.value = ss->esn;
1513 	}
1514 
1515 	if (IS_IP4_TUNNEL(ss->flags)) {
1516 		prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1517 		prm->tun.hdr_len = sizeof(*v4);
1518 		prm->tun.next_proto = rc;
1519 		prm->tun.hdr = v4;
1520 	} else if (IS_IP6_TUNNEL(ss->flags)) {
1521 		prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1522 		prm->tun.hdr_len = sizeof(*v6);
1523 		prm->tun.next_proto = rc;
1524 		prm->tun.hdr = v6;
1525 	} else {
1526 		/* transport mode */
1527 		prm->trs.proto = rc;
1528 	}
1529 
1530 	/* setup crypto section */
1531 	prm->crypto_xform = ss->xforms;
1532 	return 0;
1533 }
1534 
1535 static int
1536 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1537 {
1538 	int32_t rc = 0;
1539 
1540 	ss->sa = sa;
1541 
1542 	rc = rte_ipsec_session_prepare(ss);
1543 	if (rc != 0)
1544 		memset(ss, 0, sizeof(*ss));
1545 
1546 	return rc;
1547 }
1548 
1549 /*
1550  * Initialise related rte_ipsec_sa object.
1551  */
1552 static int
1553 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size,
1554 		struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[],
1555 		const struct eventmode_conf *em_conf)
1556 {
1557 	int rc;
1558 	struct rte_ipsec_sa_prm prm;
1559 	struct rte_ipsec_session *ips;
1560 	struct rte_ipv4_hdr v4  = {
1561 		.version_ihl = IPVERSION << 4 |
1562 			sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1563 		.time_to_live = IPDEFTTL,
1564 		.next_proto_id = lsa->udp_encap ? IPPROTO_UDP : IPPROTO_ESP,
1565 		.src_addr = lsa->src.ip.ip4,
1566 		.dst_addr = lsa->dst.ip.ip4,
1567 	};
1568 	struct rte_ipv6_hdr v6 = {
1569 		.vtc_flow = htonl(IP6_VERSION << 28),
1570 		.proto = lsa->udp_encap ? IPPROTO_UDP : IPPROTO_ESP,
1571 	};
1572 
1573 	if (IS_IP6_TUNNEL(lsa->flags)) {
1574 		memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1575 		memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1576 	}
1577 
1578 	rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1579 	if (rc == 0)
1580 		rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1581 	if (rc < 0)
1582 		return rc;
1583 
1584 	if (lsa->flags & SA_TELEMETRY_ENABLE)
1585 		rte_ipsec_telemetry_sa_add(sa);
1586 
1587 	/* init primary processing session */
1588 	ips = ipsec_get_primary_session(lsa);
1589 	rc = fill_ipsec_session(ips, sa);
1590 	if (rc != 0)
1591 		return rc;
1592 
1593 	/* init inline fallback processing session */
1594 	if (lsa->fallback_sessions == 1) {
1595 		struct rte_ipsec_session *ipfs = ipsec_get_fallback_session(lsa);
1596 		if (ipfs->security.ses == NULL) {
1597 			rc = create_lookaside_session(ips_ctx, skt_ctx, em_conf, lsa, ipfs);
1598 			if (rc != 0)
1599 				return rc;
1600 		}
1601 		rc = fill_ipsec_session(ipfs, sa);
1602 	}
1603 
1604 	return rc;
1605 }
1606 
1607 /*
1608  * Allocate space and init rte_ipsec_sa structures,
1609  * one per session.
1610  */
1611 static int
1612 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket,
1613 		struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[],
1614 		const struct eventmode_conf *em_conf)
1615 {
1616 	int32_t rc, sz;
1617 	uint32_t i, idx;
1618 	size_t tsz;
1619 	struct rte_ipsec_sa *sa;
1620 	struct ipsec_sa *lsa;
1621 	struct rte_ipsec_sa_prm prm;
1622 
1623 	/* determine SA size */
1624 	idx = 0;
1625 	fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1626 	sz = rte_ipsec_sa_size(&prm);
1627 	if (sz < 0) {
1628 		RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1629 			"failed to determine SA size, error code: %d\n",
1630 			__func__, ctx, nb_ent, socket, sz);
1631 		return sz;
1632 	}
1633 
1634 	tsz = sz * nb_ent;
1635 
1636 	ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1637 	if (ctx->satbl == NULL) {
1638 		RTE_LOG(ERR, IPSEC,
1639 			"%s(%p, %u, %d): failed to allocate %zu bytes\n",
1640 			__func__,  ctx, nb_ent, socket, tsz);
1641 		return -ENOMEM;
1642 	}
1643 
1644 	rc = 0;
1645 	for (i = 0; i != nb_ent && rc == 0; i++) {
1646 
1647 		idx = i;
1648 
1649 		sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1650 		lsa = ctx->sa + idx;
1651 
1652 		rc = ipsec_sa_init(lsa, sa, sz, skt_ctx, ips_ctx, em_conf);
1653 	}
1654 
1655 	return rc;
1656 }
1657 
1658 static int
1659 sa_cmp(const void *p, const void *q)
1660 {
1661 	uint32_t spi1 = ((const struct ipsec_sa *)p)->spi;
1662 	uint32_t spi2 = ((const struct ipsec_sa *)q)->spi;
1663 
1664 	return (int)(spi1 - spi2);
1665 }
1666 
1667 /*
1668  * Walk through all SA rules to find an SA with given SPI
1669  */
1670 int
1671 sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
1672 {
1673 	uint32_t num;
1674 	struct ipsec_sa *sa;
1675 	struct ipsec_sa tmpl;
1676 	const struct ipsec_sa *sar;
1677 
1678 	sar = sa_ctx->sa;
1679 	if (inbound != 0)
1680 		num = nb_sa_in;
1681 	else
1682 		num = nb_sa_out;
1683 
1684 	tmpl.spi = spi;
1685 
1686 	sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp);
1687 	if (sa != NULL)
1688 		return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa);
1689 
1690 	return -ENOENT;
1691 }
1692 
1693 void
1694 sa_init(struct socket_ctx *ctx, int32_t socket_id,
1695 	struct lcore_conf *lcore_conf,
1696 	const struct eventmode_conf *em_conf)
1697 {
1698 	int32_t rc;
1699 	const char *name;
1700 	uint32_t lcore_id;
1701 	struct ipsec_ctx *ipsec_ctx[RTE_MAX_LCORE];
1702 
1703 	if (ctx == NULL)
1704 		rte_exit(EXIT_FAILURE, "NULL context.\n");
1705 
1706 	if (ctx->sa_in != NULL)
1707 		rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1708 				"initialized\n", socket_id);
1709 
1710 	if (ctx->sa_out != NULL)
1711 		rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1712 				"initialized\n", socket_id);
1713 
1714 	if (nb_sa_in > 0) {
1715 		name = "sa_in";
1716 		ctx->sa_in = sa_create(name, socket_id, nb_sa_in);
1717 		if (ctx->sa_in == NULL)
1718 			rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1719 				"context %s in socket %d\n", rte_errno,
1720 				name, socket_id);
1721 
1722 		rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id,
1723 				&sa_in_cnt);
1724 		if (rc != 0)
1725 			rte_exit(EXIT_FAILURE, "failed to init SAD\n");
1726 		RTE_LCORE_FOREACH(lcore_id)
1727 			ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].inbound;
1728 		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx, em_conf);
1729 
1730 		if (app_sa_prm.enable != 0) {
1731 			rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
1732 				socket_id, ctx, ipsec_ctx, em_conf);
1733 			if (rc != 0)
1734 				rte_exit(EXIT_FAILURE,
1735 					"failed to init inbound SAs\n");
1736 		}
1737 	} else
1738 		RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1739 
1740 	if (nb_sa_out > 0) {
1741 		name = "sa_out";
1742 		ctx->sa_out = sa_create(name, socket_id, nb_sa_out);
1743 		if (ctx->sa_out == NULL)
1744 			rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1745 				"context %s in socket %d\n", rte_errno,
1746 				name, socket_id);
1747 
1748 		RTE_LCORE_FOREACH(lcore_id)
1749 			ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].outbound;
1750 		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx, em_conf);
1751 
1752 		if (app_sa_prm.enable != 0) {
1753 			rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
1754 				socket_id, ctx, ipsec_ctx, em_conf);
1755 			if (rc != 0)
1756 				rte_exit(EXIT_FAILURE,
1757 					"failed to init outbound SAs\n");
1758 		}
1759 	} else
1760 		RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1761 			"specified\n");
1762 }
1763 
1764 int
1765 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1766 {
1767 	struct ipsec_mbuf_metadata *priv;
1768 	struct ipsec_sa *sa;
1769 
1770 	priv = get_priv(m);
1771 	sa = priv->sa;
1772 	if (sa != NULL)
1773 		return (sa_ctx->sa[sa_idx].spi == sa->spi);
1774 
1775 	RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1776 	return 0;
1777 }
1778 
1779 void
1780 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1781 		void *sa_arr[], uint16_t nb_pkts)
1782 {
1783 	uint32_t i;
1784 	void *result_sa;
1785 	struct ipsec_sa *sa;
1786 
1787 	sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts);
1788 
1789 	/*
1790 	 * Mark need for inline offload fallback on the LSB of SA pointer.
1791 	 * Thanks to packet grouping mechanism which ipsec_process is using
1792 	 * packets marked for fallback processing will form separate group.
1793 	 *
1794 	 * Because it is not safe to use SA pointer it is casted to generic
1795 	 * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1796 	 * to get valid struct pointer.
1797 	 */
1798 	for (i = 0; i < nb_pkts; i++) {
1799 		if (sa_arr[i] == NULL)
1800 			continue;
1801 
1802 		result_sa = sa = sa_arr[i];
1803 		if (MBUF_NO_SEC_OFFLOAD(pkts[i]) &&
1804 			sa->fallback_sessions > 0) {
1805 			uintptr_t intsa = (uintptr_t)sa;
1806 			intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1807 			result_sa = (void *)intsa;
1808 		}
1809 		sa_arr[i] = result_sa;
1810 	}
1811 }
1812 
1813 void
1814 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1815 		void *sa[], uint16_t nb_pkts)
1816 {
1817 	uint32_t i;
1818 
1819 	for (i = 0; i < nb_pkts; i++)
1820 		sa[i] = &sa_ctx->sa[sa_idx[i]];
1821 }
1822 
1823 /*
1824  * Select HW offloads to be used.
1825  */
1826 int
1827 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1828 		uint64_t *tx_offloads, uint8_t *hw_reassembly)
1829 {
1830 	struct ipsec_sa *rule;
1831 	uint32_t idx_sa;
1832 	enum rte_security_session_action_type rule_type;
1833 	struct rte_eth_dev_info dev_info;
1834 	int ret;
1835 
1836 	*rx_offloads = 0;
1837 	*tx_offloads = 0;
1838 	*hw_reassembly = 0;
1839 
1840 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1841 	if (ret != 0)
1842 		rte_exit(EXIT_FAILURE,
1843 			"Error during getting device (port %u) info: %s\n",
1844 			port_id, strerror(-ret));
1845 
1846 	/* Check for inbound rules that use offloads and use this port */
1847 	for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1848 		rule = &sa_in[idx_sa];
1849 		rule_type = ipsec_get_action_type(rule);
1850 		if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1851 				rule_type ==
1852 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1853 				&& rule->portid == port_id)
1854 			*rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
1855 		if (IS_HW_REASSEMBLY_EN(rule->flags)) {
1856 			*tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1857 			*hw_reassembly = 1;
1858 		}
1859 	}
1860 
1861 	/* Check for outbound rules that use offloads and use this port */
1862 	for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1863 		rule = &sa_out[idx_sa];
1864 		rule_type = ipsec_get_action_type(rule);
1865 		if (rule->portid == port_id) {
1866 			switch (rule_type) {
1867 			case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1868 				/* Checksum offload is not needed for inline
1869 				 * protocol as all processing for Outbound IPSec
1870 				 * packets will be implicitly taken care and for
1871 				 * non-IPSec packets, there is no need of
1872 				 * IPv4 Checksum offload.
1873 				 */
1874 				*tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
1875 				if (rule->mss)
1876 					*tx_offloads |= (RTE_ETH_TX_OFFLOAD_TCP_TSO |
1877 							 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM);
1878 				break;
1879 			case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
1880 				*tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
1881 				if (rule->mss)
1882 					*tx_offloads |=
1883 						RTE_ETH_TX_OFFLOAD_TCP_TSO;
1884 				if (dev_info.tx_offload_capa &
1885 						RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
1886 					*tx_offloads |=
1887 						RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
1888 				break;
1889 			default:
1890 				/* Enable IPv4 checksum offload even if
1891 				 * one of lookaside SA's are present.
1892 				 */
1893 				if (dev_info.tx_offload_capa &
1894 				    RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
1895 					*tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
1896 				break;
1897 			}
1898 		} else {
1899 			if (dev_info.tx_offload_capa &
1900 			    RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
1901 				*tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
1902 		}
1903 	}
1904 	return 0;
1905 }
1906 
1907 void
1908 sa_sort_arr(void)
1909 {
1910 	qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
1911 	qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);
1912 }
1913 
1914 uint32_t
1915 get_nb_crypto_sessions(void)
1916 {
1917 	return nb_crypto_sessions;
1918 }
1919