xref: /dpdk/examples/ipsec-secgw/sa.c (revision bcd7e3e8e6e18ce94c8aa02b949fc0274bd1faeb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 /*
6  * Security Associations
7  */
8 #include <sys/types.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12 
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
19 #include <rte_ip.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
22 #include <rte_malloc.h>
23 
24 #include "ipsec.h"
25 #include "esp.h"
26 #include "parser.h"
27 
28 #define IPDEFTTL 64
29 
30 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
31 
32 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
33 
34 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)
35 
36 struct supported_cipher_algo {
37 	const char *keyword;
38 	enum rte_crypto_cipher_algorithm algo;
39 	uint16_t iv_len;
40 	uint16_t block_size;
41 	uint16_t key_len;
42 };
43 
44 struct supported_auth_algo {
45 	const char *keyword;
46 	enum rte_crypto_auth_algorithm algo;
47 	uint16_t digest_len;
48 	uint16_t key_len;
49 	uint8_t key_not_req;
50 };
51 
52 struct supported_aead_algo {
53 	const char *keyword;
54 	enum rte_crypto_aead_algorithm algo;
55 	uint16_t iv_len;
56 	uint16_t block_size;
57 	uint16_t digest_len;
58 	uint16_t key_len;
59 	uint8_t aad_len;
60 };
61 
62 
63 const struct supported_cipher_algo cipher_algos[] = {
64 	{
65 		.keyword = "null",
66 		.algo = RTE_CRYPTO_CIPHER_NULL,
67 		.iv_len = 0,
68 		.block_size = 4,
69 		.key_len = 0
70 	},
71 	{
72 		.keyword = "aes-128-cbc",
73 		.algo = RTE_CRYPTO_CIPHER_AES_CBC,
74 		.iv_len = 16,
75 		.block_size = 16,
76 		.key_len = 16
77 	},
78 	{
79 		.keyword = "aes-256-cbc",
80 		.algo = RTE_CRYPTO_CIPHER_AES_CBC,
81 		.iv_len = 16,
82 		.block_size = 16,
83 		.key_len = 32
84 	},
85 	{
86 		.keyword = "aes-128-ctr",
87 		.algo = RTE_CRYPTO_CIPHER_AES_CTR,
88 		.iv_len = 8,
89 		.block_size = 4,
90 		.key_len = 20
91 	},
92 	{
93 		.keyword = "3des-cbc",
94 		.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
95 		.iv_len = 8,
96 		.block_size = 8,
97 		.key_len = 24
98 	}
99 };
100 
101 const struct supported_auth_algo auth_algos[] = {
102 	{
103 		.keyword = "null",
104 		.algo = RTE_CRYPTO_AUTH_NULL,
105 		.digest_len = 0,
106 		.key_len = 0,
107 		.key_not_req = 1
108 	},
109 	{
110 		.keyword = "sha1-hmac",
111 		.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
112 		.digest_len = 12,
113 		.key_len = 20
114 	},
115 	{
116 		.keyword = "sha256-hmac",
117 		.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
118 		.digest_len = 16,
119 		.key_len = 32
120 	}
121 };
122 
123 const struct supported_aead_algo aead_algos[] = {
124 	{
125 		.keyword = "aes-128-gcm",
126 		.algo = RTE_CRYPTO_AEAD_AES_GCM,
127 		.iv_len = 8,
128 		.block_size = 4,
129 		.key_len = 20,
130 		.digest_len = 16,
131 		.aad_len = 8,
132 	}
133 };
134 
135 static struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES];
136 static uint32_t nb_sa_out;
137 
138 static struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES];
139 static uint32_t nb_sa_in;
140 
141 static const struct supported_cipher_algo *
142 find_match_cipher_algo(const char *cipher_keyword)
143 {
144 	size_t i;
145 
146 	for (i = 0; i < RTE_DIM(cipher_algos); i++) {
147 		const struct supported_cipher_algo *algo =
148 			&cipher_algos[i];
149 
150 		if (strcmp(cipher_keyword, algo->keyword) == 0)
151 			return algo;
152 	}
153 
154 	return NULL;
155 }
156 
157 static const struct supported_auth_algo *
158 find_match_auth_algo(const char *auth_keyword)
159 {
160 	size_t i;
161 
162 	for (i = 0; i < RTE_DIM(auth_algos); i++) {
163 		const struct supported_auth_algo *algo =
164 			&auth_algos[i];
165 
166 		if (strcmp(auth_keyword, algo->keyword) == 0)
167 			return algo;
168 	}
169 
170 	return NULL;
171 }
172 
173 static const struct supported_aead_algo *
174 find_match_aead_algo(const char *aead_keyword)
175 {
176 	size_t i;
177 
178 	for (i = 0; i < RTE_DIM(aead_algos); i++) {
179 		const struct supported_aead_algo *algo =
180 			&aead_algos[i];
181 
182 		if (strcmp(aead_keyword, algo->keyword) == 0)
183 			return algo;
184 	}
185 
186 	return NULL;
187 }
188 
189 /** parse_key_string
190  *  parse x:x:x:x.... hex number key string into uint8_t *key
191  *  return:
192  *  > 0: number of bytes parsed
193  *  0:   failed
194  */
195 static uint32_t
196 parse_key_string(const char *key_str, uint8_t *key)
197 {
198 	const char *pt_start = key_str, *pt_end = key_str;
199 	uint32_t nb_bytes = 0;
200 
201 	while (pt_end != NULL) {
202 		char sub_str[3] = {0};
203 
204 		pt_end = strchr(pt_start, ':');
205 
206 		if (pt_end == NULL) {
207 			if (strlen(pt_start) > 2)
208 				return 0;
209 			strncpy(sub_str, pt_start, 2);
210 		} else {
211 			if (pt_end - pt_start > 2)
212 				return 0;
213 
214 			strncpy(sub_str, pt_start, pt_end - pt_start);
215 			pt_start = pt_end + 1;
216 		}
217 
218 		key[nb_bytes++] = strtol(sub_str, NULL, 16);
219 	}
220 
221 	return nb_bytes;
222 }
223 
224 void
225 parse_sa_tokens(char **tokens, uint32_t n_tokens,
226 	struct parse_status *status)
227 {
228 	struct ipsec_sa *rule = NULL;
229 	struct rte_ipsec_session *ips;
230 	uint32_t ti; /*token index*/
231 	uint32_t *ri /*rule index*/;
232 	uint32_t cipher_algo_p = 0;
233 	uint32_t auth_algo_p = 0;
234 	uint32_t aead_algo_p = 0;
235 	uint32_t src_p = 0;
236 	uint32_t dst_p = 0;
237 	uint32_t mode_p = 0;
238 	uint32_t type_p = 0;
239 	uint32_t portid_p = 0;
240 	uint32_t fallback_p = 0;
241 
242 	if (strcmp(tokens[0], "in") == 0) {
243 		ri = &nb_sa_in;
244 
245 		APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
246 			"too many sa rules, abort insertion\n");
247 		if (status->status < 0)
248 			return;
249 
250 		rule = &sa_in[*ri];
251 		rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
252 	} else {
253 		ri = &nb_sa_out;
254 
255 		APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
256 			"too many sa rules, abort insertion\n");
257 		if (status->status < 0)
258 			return;
259 
260 		rule = &sa_out[*ri];
261 		rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
262 	}
263 
264 	/* spi number */
265 	APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
266 	if (status->status < 0)
267 		return;
268 	if (atoi(tokens[1]) == INVALID_SPI)
269 		return;
270 	rule->spi = atoi(tokens[1]);
271 	ips = ipsec_get_primary_session(rule);
272 
273 	for (ti = 2; ti < n_tokens; ti++) {
274 		if (strcmp(tokens[ti], "mode") == 0) {
275 			APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
276 			if (status->status < 0)
277 				return;
278 
279 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
280 			if (status->status < 0)
281 				return;
282 
283 			if (strcmp(tokens[ti], "ipv4-tunnel") == 0)
284 				rule->flags = IP4_TUNNEL;
285 			else if (strcmp(tokens[ti], "ipv6-tunnel") == 0)
286 				rule->flags = IP6_TUNNEL;
287 			else if (strcmp(tokens[ti], "transport") == 0)
288 				rule->flags = TRANSPORT;
289 			else {
290 				APP_CHECK(0, status, "unrecognized "
291 					"input \"%s\"", tokens[ti]);
292 				return;
293 			}
294 
295 			mode_p = 1;
296 			continue;
297 		}
298 
299 		if (strcmp(tokens[ti], "cipher_algo") == 0) {
300 			const struct supported_cipher_algo *algo;
301 			uint32_t key_len;
302 
303 			APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
304 				status);
305 			if (status->status < 0)
306 				return;
307 
308 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
309 			if (status->status < 0)
310 				return;
311 
312 			algo = find_match_cipher_algo(tokens[ti]);
313 
314 			APP_CHECK(algo != NULL, status, "unrecognized "
315 				"input \"%s\"", tokens[ti]);
316 
317 			if (status->status < 0)
318 				return;
319 
320 			rule->cipher_algo = algo->algo;
321 			rule->block_size = algo->block_size;
322 			rule->iv_len = algo->iv_len;
323 			rule->cipher_key_len = algo->key_len;
324 
325 			/* for NULL algorithm, no cipher key required */
326 			if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
327 				cipher_algo_p = 1;
328 				continue;
329 			}
330 
331 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
332 			if (status->status < 0)
333 				return;
334 
335 			APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
336 				status, "unrecognized input \"%s\", "
337 				"expect \"cipher_key\"", tokens[ti]);
338 			if (status->status < 0)
339 				return;
340 
341 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
342 			if (status->status < 0)
343 				return;
344 
345 			key_len = parse_key_string(tokens[ti],
346 				rule->cipher_key);
347 			APP_CHECK(key_len == rule->cipher_key_len, status,
348 				"unrecognized input \"%s\"", tokens[ti]);
349 			if (status->status < 0)
350 				return;
351 
352 			if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
353 				algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
354 				rule->salt = (uint32_t)rte_rand();
355 
356 			if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
357 				key_len -= 4;
358 				rule->cipher_key_len = key_len;
359 				memcpy(&rule->salt,
360 					&rule->cipher_key[key_len], 4);
361 			}
362 
363 			cipher_algo_p = 1;
364 			continue;
365 		}
366 
367 		if (strcmp(tokens[ti], "auth_algo") == 0) {
368 			const struct supported_auth_algo *algo;
369 			uint32_t key_len;
370 
371 			APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
372 				status);
373 			if (status->status < 0)
374 				return;
375 
376 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
377 			if (status->status < 0)
378 				return;
379 
380 			algo = find_match_auth_algo(tokens[ti]);
381 			APP_CHECK(algo != NULL, status, "unrecognized "
382 				"input \"%s\"", tokens[ti]);
383 
384 			if (status->status < 0)
385 				return;
386 
387 			rule->auth_algo = algo->algo;
388 			rule->auth_key_len = algo->key_len;
389 			rule->digest_len = algo->digest_len;
390 
391 			/* NULL algorithm and combined algos do not
392 			 * require auth key
393 			 */
394 			if (algo->key_not_req) {
395 				auth_algo_p = 1;
396 				continue;
397 			}
398 
399 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
400 			if (status->status < 0)
401 				return;
402 
403 			APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
404 				status, "unrecognized input \"%s\", "
405 				"expect \"auth_key\"", tokens[ti]);
406 			if (status->status < 0)
407 				return;
408 
409 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
410 			if (status->status < 0)
411 				return;
412 
413 			key_len = parse_key_string(tokens[ti],
414 				rule->auth_key);
415 			APP_CHECK(key_len == rule->auth_key_len, status,
416 				"unrecognized input \"%s\"", tokens[ti]);
417 			if (status->status < 0)
418 				return;
419 
420 			auth_algo_p = 1;
421 			continue;
422 		}
423 
424 		if (strcmp(tokens[ti], "aead_algo") == 0) {
425 			const struct supported_aead_algo *algo;
426 			uint32_t key_len;
427 
428 			APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
429 				status);
430 			if (status->status < 0)
431 				return;
432 
433 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
434 			if (status->status < 0)
435 				return;
436 
437 			algo = find_match_aead_algo(tokens[ti]);
438 
439 			APP_CHECK(algo != NULL, status, "unrecognized "
440 				"input \"%s\"", tokens[ti]);
441 
442 			if (status->status < 0)
443 				return;
444 
445 			rule->aead_algo = algo->algo;
446 			rule->cipher_key_len = algo->key_len;
447 			rule->digest_len = algo->digest_len;
448 			rule->aad_len = algo->aad_len;
449 			rule->block_size = algo->block_size;
450 			rule->iv_len = algo->iv_len;
451 
452 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
453 			if (status->status < 0)
454 				return;
455 
456 			APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
457 				status, "unrecognized input \"%s\", "
458 				"expect \"aead_key\"", tokens[ti]);
459 			if (status->status < 0)
460 				return;
461 
462 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
463 			if (status->status < 0)
464 				return;
465 
466 			key_len = parse_key_string(tokens[ti],
467 				rule->cipher_key);
468 			APP_CHECK(key_len == rule->cipher_key_len, status,
469 				"unrecognized input \"%s\"", tokens[ti]);
470 			if (status->status < 0)
471 				return;
472 
473 			key_len -= 4;
474 			rule->cipher_key_len = key_len;
475 			memcpy(&rule->salt,
476 				&rule->cipher_key[key_len], 4);
477 
478 			aead_algo_p = 1;
479 			continue;
480 		}
481 
482 		if (strcmp(tokens[ti], "src") == 0) {
483 			APP_CHECK_PRESENCE(src_p, tokens[ti], status);
484 			if (status->status < 0)
485 				return;
486 
487 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
488 			if (status->status < 0)
489 				return;
490 
491 			if (IS_IP4_TUNNEL(rule->flags)) {
492 				struct in_addr ip;
493 
494 				APP_CHECK(parse_ipv4_addr(tokens[ti],
495 					&ip, NULL) == 0, status,
496 					"unrecognized input \"%s\", "
497 					"expect valid ipv4 addr",
498 					tokens[ti]);
499 				if (status->status < 0)
500 					return;
501 				rule->src.ip.ip4 = rte_bswap32(
502 					(uint32_t)ip.s_addr);
503 			} else if (IS_IP6_TUNNEL(rule->flags)) {
504 				struct in6_addr ip;
505 
506 				APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
507 					NULL) == 0, status,
508 					"unrecognized input \"%s\", "
509 					"expect valid ipv6 addr",
510 					tokens[ti]);
511 				if (status->status < 0)
512 					return;
513 				memcpy(rule->src.ip.ip6.ip6_b,
514 					ip.s6_addr, 16);
515 			} else if (IS_TRANSPORT(rule->flags)) {
516 				APP_CHECK(0, status, "unrecognized input "
517 					"\"%s\"", tokens[ti]);
518 				return;
519 			}
520 
521 			src_p = 1;
522 			continue;
523 		}
524 
525 		if (strcmp(tokens[ti], "dst") == 0) {
526 			APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
527 			if (status->status < 0)
528 				return;
529 
530 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
531 			if (status->status < 0)
532 				return;
533 
534 			if (IS_IP4_TUNNEL(rule->flags)) {
535 				struct in_addr ip;
536 
537 				APP_CHECK(parse_ipv4_addr(tokens[ti],
538 					&ip, NULL) == 0, status,
539 					"unrecognized input \"%s\", "
540 					"expect valid ipv4 addr",
541 					tokens[ti]);
542 				if (status->status < 0)
543 					return;
544 				rule->dst.ip.ip4 = rte_bswap32(
545 					(uint32_t)ip.s_addr);
546 			} else if (IS_IP6_TUNNEL(rule->flags)) {
547 				struct in6_addr ip;
548 
549 				APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
550 					NULL) == 0, status,
551 					"unrecognized input \"%s\", "
552 					"expect valid ipv6 addr",
553 					tokens[ti]);
554 				if (status->status < 0)
555 					return;
556 				memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
557 			} else if (IS_TRANSPORT(rule->flags)) {
558 				APP_CHECK(0, status, "unrecognized "
559 					"input \"%s\"",	tokens[ti]);
560 				return;
561 			}
562 
563 			dst_p = 1;
564 			continue;
565 		}
566 
567 		if (strcmp(tokens[ti], "type") == 0) {
568 			APP_CHECK_PRESENCE(type_p, tokens[ti], status);
569 			if (status->status < 0)
570 				return;
571 
572 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
573 			if (status->status < 0)
574 				return;
575 
576 			if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
577 				ips->type =
578 					RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
579 			else if (strcmp(tokens[ti],
580 					"inline-protocol-offload") == 0)
581 				ips->type =
582 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
583 			else if (strcmp(tokens[ti],
584 					"lookaside-protocol-offload") == 0)
585 				ips->type =
586 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
587 			else if (strcmp(tokens[ti], "no-offload") == 0)
588 				ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
589 			else {
590 				APP_CHECK(0, status, "Invalid input \"%s\"",
591 						tokens[ti]);
592 				return;
593 			}
594 
595 			type_p = 1;
596 			continue;
597 		}
598 
599 		if (strcmp(tokens[ti], "port_id") == 0) {
600 			APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
601 			if (status->status < 0)
602 				return;
603 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
604 			if (status->status < 0)
605 				return;
606 			rule->portid = atoi(tokens[ti]);
607 			if (status->status < 0)
608 				return;
609 			portid_p = 1;
610 			continue;
611 		}
612 
613 		if (strcmp(tokens[ti], "fallback") == 0) {
614 			struct rte_ipsec_session *fb;
615 
616 			APP_CHECK(app_sa_prm.enable, status, "Fallback session "
617 				"not allowed for legacy mode.");
618 			if (status->status < 0)
619 				return;
620 			APP_CHECK(ips->type ==
621 				RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
622 				"Fallback session allowed if primary session "
623 				"is of type inline-crypto-offload only.");
624 			if (status->status < 0)
625 				return;
626 			APP_CHECK(rule->direction ==
627 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
628 				"Fallback session not allowed for egress "
629 				"rule");
630 			if (status->status < 0)
631 				return;
632 			APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
633 			if (status->status < 0)
634 				return;
635 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
636 			if (status->status < 0)
637 				return;
638 			fb = ipsec_get_fallback_session(rule);
639 			if (strcmp(tokens[ti], "lookaside-none") == 0) {
640 				fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
641 			} else {
642 				APP_CHECK(0, status, "unrecognized fallback "
643 					"type %s.", tokens[ti]);
644 				return;
645 			}
646 
647 			rule->fallback_sessions = 1;
648 			fallback_p = 1;
649 			continue;
650 		}
651 
652 		/* unrecognizeable input */
653 		APP_CHECK(0, status, "unrecognized input \"%s\"",
654 			tokens[ti]);
655 		return;
656 	}
657 
658 	if (aead_algo_p) {
659 		APP_CHECK(cipher_algo_p == 0, status,
660 				"AEAD used, no need for cipher options");
661 		if (status->status < 0)
662 			return;
663 
664 		APP_CHECK(auth_algo_p == 0, status,
665 				"AEAD used, no need for auth options");
666 		if (status->status < 0)
667 			return;
668 	} else {
669 		APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
670 		if (status->status < 0)
671 			return;
672 
673 		APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
674 		if (status->status < 0)
675 			return;
676 	}
677 
678 	APP_CHECK(mode_p == 1, status, "missing mode option");
679 	if (status->status < 0)
680 		return;
681 
682 	if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
683 		printf("Missing portid option, falling back to non-offload\n");
684 
685 	if (!type_p || !portid_p) {
686 		ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
687 		rule->portid = -1;
688 	}
689 
690 	*ri = *ri + 1;
691 }
692 
693 static void
694 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
695 {
696 	uint32_t i;
697 	uint8_t a, b, c, d;
698 	const struct rte_ipsec_session *ips;
699 	const struct rte_ipsec_session *fallback_ips;
700 
701 	printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
702 
703 	for (i = 0; i < RTE_DIM(cipher_algos); i++) {
704 		if (cipher_algos[i].algo == sa->cipher_algo &&
705 				cipher_algos[i].key_len == sa->cipher_key_len) {
706 			printf("%s ", cipher_algos[i].keyword);
707 			break;
708 		}
709 	}
710 
711 	for (i = 0; i < RTE_DIM(auth_algos); i++) {
712 		if (auth_algos[i].algo == sa->auth_algo) {
713 			printf("%s ", auth_algos[i].keyword);
714 			break;
715 		}
716 	}
717 
718 	for (i = 0; i < RTE_DIM(aead_algos); i++) {
719 		if (aead_algos[i].algo == sa->aead_algo) {
720 			printf("%s ", aead_algos[i].keyword);
721 			break;
722 		}
723 	}
724 
725 	printf("mode:");
726 
727 	switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
728 	case IP4_TUNNEL:
729 		printf("IP4Tunnel ");
730 		uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
731 		printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
732 		uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
733 		printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
734 		break;
735 	case IP6_TUNNEL:
736 		printf("IP6Tunnel ");
737 		for (i = 0; i < 16; i++) {
738 			if (i % 2 && i != 15)
739 				printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
740 			else
741 				printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
742 		}
743 		printf(" ");
744 		for (i = 0; i < 16; i++) {
745 			if (i % 2 && i != 15)
746 				printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
747 			else
748 				printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
749 		}
750 		break;
751 	case TRANSPORT:
752 		printf("Transport ");
753 		break;
754 	}
755 
756 	ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
757 	printf(" type:");
758 	switch (ips->type) {
759 	case RTE_SECURITY_ACTION_TYPE_NONE:
760 		printf("no-offload ");
761 		break;
762 	case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
763 		printf("inline-crypto-offload ");
764 		break;
765 	case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
766 		printf("inline-protocol-offload ");
767 		break;
768 	case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
769 		printf("lookaside-protocol-offload ");
770 		break;
771 	}
772 
773 	fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
774 	if (fallback_ips != NULL && sa->fallback_sessions > 0) {
775 		printf("inline fallback: ");
776 		if (fallback_ips->type == RTE_SECURITY_ACTION_TYPE_NONE)
777 			printf("lookaside-none");
778 		else
779 			printf("invalid");
780 	}
781 	printf("\n");
782 }
783 
784 struct sa_ctx {
785 	void *satbl; /* pointer to array of rte_ipsec_sa objects*/
786 	struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
787 	union {
788 		struct {
789 			struct rte_crypto_sym_xform a;
790 			struct rte_crypto_sym_xform b;
791 		};
792 	} xf[IPSEC_SA_MAX_ENTRIES];
793 };
794 
795 static struct sa_ctx *
796 sa_create(const char *name, int32_t socket_id)
797 {
798 	char s[PATH_MAX];
799 	struct sa_ctx *sa_ctx;
800 	uint32_t mz_size;
801 	const struct rte_memzone *mz;
802 
803 	snprintf(s, sizeof(s), "%s_%u", name, socket_id);
804 
805 	/* Create SA array table */
806 	printf("Creating SA context with %u maximum entries on socket %d\n",
807 			IPSEC_SA_MAX_ENTRIES, socket_id);
808 
809 	mz_size = sizeof(struct sa_ctx);
810 	mz = rte_memzone_reserve(s, mz_size, socket_id,
811 			RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
812 	if (mz == NULL) {
813 		printf("Failed to allocate SA DB memory\n");
814 		rte_errno = ENOMEM;
815 		return NULL;
816 	}
817 
818 	sa_ctx = (struct sa_ctx *)mz->addr;
819 
820 	return sa_ctx;
821 }
822 
823 static int
824 check_eth_dev_caps(uint16_t portid, uint32_t inbound)
825 {
826 	struct rte_eth_dev_info dev_info;
827 	int retval;
828 
829 	retval = rte_eth_dev_info_get(portid, &dev_info);
830 	if (retval != 0) {
831 		RTE_LOG(ERR, IPSEC,
832 			"Error during getting device (port %u) info: %s\n",
833 			portid, strerror(-retval));
834 
835 		return retval;
836 	}
837 
838 	if (inbound) {
839 		if ((dev_info.rx_offload_capa &
840 				DEV_RX_OFFLOAD_SECURITY) == 0) {
841 			RTE_LOG(WARNING, PORT,
842 				"hardware RX IPSec offload is not supported\n");
843 			return -EINVAL;
844 		}
845 
846 	} else { /* outbound */
847 		if ((dev_info.tx_offload_capa &
848 				DEV_TX_OFFLOAD_SECURITY) == 0) {
849 			RTE_LOG(WARNING, PORT,
850 				"hardware TX IPSec offload is not supported\n");
851 			return -EINVAL;
852 		}
853 	}
854 	return 0;
855 }
856 
857 /*
858  * Helper function, tries to determine next_proto for SPI
859  * by searching though SP rules.
860  */
861 static int
862 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
863 		struct ip_addr ip_addr[2], uint32_t mask[2])
864 {
865 	int32_t rc4, rc6;
866 
867 	rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
868 				ip_addr, mask);
869 	rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
870 				ip_addr, mask);
871 
872 	if (rc4 >= 0) {
873 		if (rc6 >= 0) {
874 			RTE_LOG(ERR, IPSEC,
875 				"%s: SPI %u used simultaeously by "
876 				"IPv4(%d) and IPv6 (%d) SP rules\n",
877 				__func__, spi, rc4, rc6);
878 			return -EINVAL;
879 		} else
880 			return IPPROTO_IPIP;
881 	} else if (rc6 < 0) {
882 		RTE_LOG(ERR, IPSEC,
883 			"%s: SPI %u is not used by any SP rule\n",
884 			__func__, spi);
885 		return -EINVAL;
886 	} else
887 		return IPPROTO_IPV6;
888 }
889 
890 /*
891  * Helper function for getting source and destination IP addresses
892  * from SP. Needed for inline crypto transport mode, as addresses are not
893  * provided in config file for that mode. It checks if SP for current SA exists,
894  * and based on what type of protocol is returned, it stores appropriate
895  * addresses got from SP into SA.
896  */
897 static int
898 sa_add_address_inline_crypto(struct ipsec_sa *sa)
899 {
900 	int protocol;
901 	struct ip_addr ip_addr[2];
902 	uint32_t mask[2];
903 
904 	protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
905 	if (protocol < 0)
906 		return protocol;
907 	else if (protocol == IPPROTO_IPIP) {
908 		sa->flags |= IP4_TRANSPORT;
909 		if (mask[0] == IP4_FULL_MASK &&
910 				mask[1] == IP4_FULL_MASK &&
911 				ip_addr[0].ip.ip4 != 0 &&
912 				ip_addr[1].ip.ip4 != 0) {
913 
914 			sa->src.ip.ip4 = ip_addr[0].ip.ip4;
915 			sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
916 		} else {
917 			RTE_LOG(ERR, IPSEC,
918 			"%s: No valid address or mask entry in"
919 			" IPv4 SP rule for SPI %u\n",
920 			__func__, sa->spi);
921 			return -EINVAL;
922 		}
923 	} else if (protocol == IPPROTO_IPV6) {
924 		sa->flags |= IP6_TRANSPORT;
925 		if (mask[0] == IP6_FULL_MASK &&
926 				mask[1] == IP6_FULL_MASK &&
927 				(ip_addr[0].ip.ip6.ip6[0] != 0 ||
928 				ip_addr[0].ip.ip6.ip6[1] != 0) &&
929 				(ip_addr[1].ip.ip6.ip6[0] != 0 ||
930 				ip_addr[1].ip.ip6.ip6[1] != 0)) {
931 
932 			sa->src.ip.ip6 = ip_addr[0].ip.ip6;
933 			sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
934 		} else {
935 			RTE_LOG(ERR, IPSEC,
936 			"%s: No valid address or mask entry in"
937 			" IPv6 SP rule for SPI %u\n",
938 			__func__, sa->spi);
939 			return -EINVAL;
940 		}
941 	}
942 	return 0;
943 }
944 
945 static int
946 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
947 		uint32_t nb_entries, uint32_t inbound,
948 		struct socket_ctx *skt_ctx)
949 {
950 	struct ipsec_sa *sa;
951 	uint32_t i, idx;
952 	uint16_t iv_length, aad_length;
953 	int inline_status;
954 	int32_t rc;
955 	struct rte_ipsec_session *ips;
956 
957 	/* for ESN upper 32 bits of SQN also need to be part of AAD */
958 	aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
959 
960 	for (i = 0; i < nb_entries; i++) {
961 		idx = SPI2IDX(entries[i].spi);
962 		sa = &sa_ctx->sa[idx];
963 		if (sa->spi != 0) {
964 			printf("Index %u already in use by SPI %u\n",
965 					idx, sa->spi);
966 			return -EINVAL;
967 		}
968 		*sa = entries[i];
969 		sa->seq = 0;
970 		ips = ipsec_get_primary_session(sa);
971 
972 		if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
973 			ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
974 			if (check_eth_dev_caps(sa->portid, inbound))
975 				return -EINVAL;
976 		}
977 
978 
979 		switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
980 		case IP4_TUNNEL:
981 			sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
982 			sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
983 			break;
984 		case TRANSPORT:
985 			if (ips->type ==
986 				RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
987 				inline_status =
988 					sa_add_address_inline_crypto(sa);
989 				if (inline_status < 0)
990 					return inline_status;
991 			}
992 			break;
993 		}
994 
995 		if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
996 			struct rte_ipsec_session *ips;
997 			iv_length = 12;
998 
999 			sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1000 			sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1001 			sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1002 			sa_ctx->xf[idx].a.aead.key.length =
1003 				sa->cipher_key_len;
1004 			sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1005 				RTE_CRYPTO_AEAD_OP_DECRYPT :
1006 				RTE_CRYPTO_AEAD_OP_ENCRYPT;
1007 			sa_ctx->xf[idx].a.next = NULL;
1008 			sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1009 			sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1010 			sa_ctx->xf[idx].a.aead.aad_length =
1011 				sa->aad_len + aad_length;
1012 			sa_ctx->xf[idx].a.aead.digest_length =
1013 				sa->digest_len;
1014 
1015 			sa->xforms = &sa_ctx->xf[idx].a;
1016 
1017 			ips = ipsec_get_primary_session(sa);
1018 			if (ips->type ==
1019 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1020 				ips->type ==
1021 				RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1022 				rc = create_inline_session(skt_ctx, sa, ips);
1023 				if (rc != 0) {
1024 					RTE_LOG(ERR, IPSEC_ESP,
1025 						"create_inline_session() failed\n");
1026 					return -EINVAL;
1027 				}
1028 			}
1029 			print_one_sa_rule(sa, inbound);
1030 		} else {
1031 			switch (sa->cipher_algo) {
1032 			case RTE_CRYPTO_CIPHER_NULL:
1033 			case RTE_CRYPTO_CIPHER_3DES_CBC:
1034 			case RTE_CRYPTO_CIPHER_AES_CBC:
1035 				iv_length = sa->iv_len;
1036 				break;
1037 			case RTE_CRYPTO_CIPHER_AES_CTR:
1038 				iv_length = 16;
1039 				break;
1040 			default:
1041 				RTE_LOG(ERR, IPSEC_ESP,
1042 						"unsupported cipher algorithm %u\n",
1043 						sa->cipher_algo);
1044 				return -EINVAL;
1045 			}
1046 
1047 			if (inbound) {
1048 				sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1049 				sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1050 				sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1051 				sa_ctx->xf[idx].b.cipher.key.length =
1052 					sa->cipher_key_len;
1053 				sa_ctx->xf[idx].b.cipher.op =
1054 					RTE_CRYPTO_CIPHER_OP_DECRYPT;
1055 				sa_ctx->xf[idx].b.next = NULL;
1056 				sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1057 				sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1058 
1059 				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1060 				sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1061 				sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1062 				sa_ctx->xf[idx].a.auth.key.length =
1063 					sa->auth_key_len;
1064 				sa_ctx->xf[idx].a.auth.digest_length =
1065 					sa->digest_len;
1066 				sa_ctx->xf[idx].a.auth.op =
1067 					RTE_CRYPTO_AUTH_OP_VERIFY;
1068 			} else { /* outbound */
1069 				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1070 				sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1071 				sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1072 				sa_ctx->xf[idx].a.cipher.key.length =
1073 					sa->cipher_key_len;
1074 				sa_ctx->xf[idx].a.cipher.op =
1075 					RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1076 				sa_ctx->xf[idx].a.next = NULL;
1077 				sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1078 				sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1079 
1080 				sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1081 				sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1082 				sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1083 				sa_ctx->xf[idx].b.auth.key.length =
1084 					sa->auth_key_len;
1085 				sa_ctx->xf[idx].b.auth.digest_length =
1086 					sa->digest_len;
1087 				sa_ctx->xf[idx].b.auth.op =
1088 					RTE_CRYPTO_AUTH_OP_GENERATE;
1089 			}
1090 
1091 			sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1092 			sa_ctx->xf[idx].b.next = NULL;
1093 			sa->xforms = &sa_ctx->xf[idx].a;
1094 
1095 			print_one_sa_rule(sa, inbound);
1096 		}
1097 	}
1098 
1099 	return 0;
1100 }
1101 
1102 static inline int
1103 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1104 		uint32_t nb_entries, struct socket_ctx *skt_ctx)
1105 {
1106 	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
1107 }
1108 
1109 static inline int
1110 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1111 		uint32_t nb_entries, struct socket_ctx *skt_ctx)
1112 {
1113 	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
1114 }
1115 
1116 /*
1117  * helper function, fills parameters that are identical for all SAs
1118  */
1119 static void
1120 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1121 	const struct app_sa_prm *app_prm)
1122 {
1123 	memset(prm, 0, sizeof(*prm));
1124 
1125 	prm->flags = app_prm->flags;
1126 	prm->ipsec_xform.options.esn = app_prm->enable_esn;
1127 	prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1128 }
1129 
1130 static int
1131 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1132 	const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1133 {
1134 	int32_t rc;
1135 
1136 	/*
1137 	 * Try to get SPI next proto by searching that SPI in SPD.
1138 	 * probably not the optimal way, but there seems nothing
1139 	 * better right now.
1140 	 */
1141 	rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1142 	if (rc < 0)
1143 		return rc;
1144 
1145 	fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1146 	prm->userdata = (uintptr_t)ss;
1147 
1148 	/* setup ipsec xform */
1149 	prm->ipsec_xform.spi = ss->spi;
1150 	prm->ipsec_xform.salt = ss->salt;
1151 	prm->ipsec_xform.direction = ss->direction;
1152 	prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1153 	prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1154 		RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1155 		RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1156 	prm->ipsec_xform.options.ecn = 1;
1157 	prm->ipsec_xform.options.copy_dscp = 1;
1158 
1159 	if (IS_IP4_TUNNEL(ss->flags)) {
1160 		prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1161 		prm->tun.hdr_len = sizeof(*v4);
1162 		prm->tun.next_proto = rc;
1163 		prm->tun.hdr = v4;
1164 	} else if (IS_IP6_TUNNEL(ss->flags)) {
1165 		prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1166 		prm->tun.hdr_len = sizeof(*v6);
1167 		prm->tun.next_proto = rc;
1168 		prm->tun.hdr = v6;
1169 	} else {
1170 		/* transport mode */
1171 		prm->trs.proto = rc;
1172 	}
1173 
1174 	/* setup crypto section */
1175 	prm->crypto_xform = ss->xforms;
1176 	return 0;
1177 }
1178 
1179 static int
1180 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1181 {
1182 	int32_t rc = 0;
1183 
1184 	ss->sa = sa;
1185 
1186 	if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1187 		ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
1188 		if (ss->security.ses != NULL) {
1189 			rc = rte_ipsec_session_prepare(ss);
1190 			if (rc != 0)
1191 				memset(ss, 0, sizeof(*ss));
1192 		}
1193 	}
1194 
1195 	return rc;
1196 }
1197 
1198 /*
1199  * Initialise related rte_ipsec_sa object.
1200  */
1201 static int
1202 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1203 {
1204 	int rc;
1205 	struct rte_ipsec_sa_prm prm;
1206 	struct rte_ipsec_session *ips;
1207 	struct rte_ipv4_hdr v4  = {
1208 		.version_ihl = IPVERSION << 4 |
1209 			sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1210 		.time_to_live = IPDEFTTL,
1211 		.next_proto_id = IPPROTO_ESP,
1212 		.src_addr = lsa->src.ip.ip4,
1213 		.dst_addr = lsa->dst.ip.ip4,
1214 	};
1215 	struct rte_ipv6_hdr v6 = {
1216 		.vtc_flow = htonl(IP6_VERSION << 28),
1217 		.proto = IPPROTO_ESP,
1218 	};
1219 
1220 	if (IS_IP6_TUNNEL(lsa->flags)) {
1221 		memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1222 		memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1223 	}
1224 
1225 	rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1226 	if (rc == 0)
1227 		rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1228 	if (rc < 0)
1229 		return rc;
1230 
1231 	/* init primary processing session */
1232 	ips = ipsec_get_primary_session(lsa);
1233 	rc = fill_ipsec_session(ips, sa);
1234 	if (rc != 0)
1235 		return rc;
1236 
1237 	/* init inline fallback processing session */
1238 	if (lsa->fallback_sessions == 1)
1239 		rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
1240 
1241 	return rc;
1242 }
1243 
1244 /*
1245  * Allocate space and init rte_ipsec_sa strcutures,
1246  * one per session.
1247  */
1248 static int
1249 ipsec_satbl_init(struct sa_ctx *ctx, const struct ipsec_sa *ent,
1250 	uint32_t nb_ent, int32_t socket)
1251 {
1252 	int32_t rc, sz;
1253 	uint32_t i, idx;
1254 	size_t tsz;
1255 	struct rte_ipsec_sa *sa;
1256 	struct ipsec_sa *lsa;
1257 	struct rte_ipsec_sa_prm prm;
1258 
1259 	/* determine SA size */
1260 	idx = SPI2IDX(ent[0].spi);
1261 	fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1262 	sz = rte_ipsec_sa_size(&prm);
1263 	if (sz < 0) {
1264 		RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1265 			"failed to determine SA size, error code: %d\n",
1266 			__func__, ctx, nb_ent, socket, sz);
1267 		return sz;
1268 	}
1269 
1270 	tsz = sz * nb_ent;
1271 
1272 	ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1273 	if (ctx->satbl == NULL) {
1274 		RTE_LOG(ERR, IPSEC,
1275 			"%s(%p, %u, %d): failed to allocate %zu bytes\n",
1276 			__func__,  ctx, nb_ent, socket, tsz);
1277 		return -ENOMEM;
1278 	}
1279 
1280 	rc = 0;
1281 	for (i = 0; i != nb_ent && rc == 0; i++) {
1282 
1283 		idx = SPI2IDX(ent[i].spi);
1284 
1285 		sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1286 		lsa = ctx->sa + idx;
1287 
1288 		rc = ipsec_sa_init(lsa, sa, sz);
1289 	}
1290 
1291 	return rc;
1292 }
1293 
1294 /*
1295  * Walk through all SA rules to find an SA with given SPI
1296  */
1297 int
1298 sa_spi_present(uint32_t spi, int inbound)
1299 {
1300 	uint32_t i, num;
1301 	const struct ipsec_sa *sar;
1302 
1303 	if (inbound != 0) {
1304 		sar = sa_in;
1305 		num = nb_sa_in;
1306 	} else {
1307 		sar = sa_out;
1308 		num = nb_sa_out;
1309 	}
1310 
1311 	for (i = 0; i != num; i++) {
1312 		if (sar[i].spi == spi)
1313 			return i;
1314 	}
1315 
1316 	return -ENOENT;
1317 }
1318 
1319 void
1320 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1321 {
1322 	int32_t rc;
1323 	const char *name;
1324 
1325 	if (ctx == NULL)
1326 		rte_exit(EXIT_FAILURE, "NULL context.\n");
1327 
1328 	if (ctx->sa_in != NULL)
1329 		rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1330 				"initialized\n", socket_id);
1331 
1332 	if (ctx->sa_out != NULL)
1333 		rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1334 				"initialized\n", socket_id);
1335 
1336 	if (nb_sa_in > 0) {
1337 		name = "sa_in";
1338 		ctx->sa_in = sa_create(name, socket_id);
1339 		if (ctx->sa_in == NULL)
1340 			rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1341 				"context %s in socket %d\n", rte_errno,
1342 				name, socket_id);
1343 
1344 		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
1345 
1346 		if (app_sa_prm.enable != 0) {
1347 			rc = ipsec_satbl_init(ctx->sa_in, sa_in, nb_sa_in,
1348 				socket_id);
1349 			if (rc != 0)
1350 				rte_exit(EXIT_FAILURE,
1351 					"failed to init inbound SAs\n");
1352 		}
1353 	} else
1354 		RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1355 
1356 	if (nb_sa_out > 0) {
1357 		name = "sa_out";
1358 		ctx->sa_out = sa_create(name, socket_id);
1359 		if (ctx->sa_out == NULL)
1360 			rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1361 				"context %s in socket %d\n", rte_errno,
1362 				name, socket_id);
1363 
1364 		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
1365 
1366 		if (app_sa_prm.enable != 0) {
1367 			rc = ipsec_satbl_init(ctx->sa_out, sa_out, nb_sa_out,
1368 				socket_id);
1369 			if (rc != 0)
1370 				rte_exit(EXIT_FAILURE,
1371 					"failed to init outbound SAs\n");
1372 		}
1373 	} else
1374 		RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1375 			"specified\n");
1376 }
1377 
1378 int
1379 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1380 {
1381 	struct ipsec_mbuf_metadata *priv;
1382 	struct ipsec_sa *sa;
1383 
1384 	priv = get_priv(m);
1385 	sa = priv->sa;
1386 	if (sa != NULL)
1387 		return (sa_ctx->sa[sa_idx].spi == sa->spi);
1388 
1389 	RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1390 	return 0;
1391 }
1392 
1393 static inline void
1394 single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
1395 		void **sa_ret)
1396 {
1397 	struct rte_esp_hdr *esp;
1398 	struct ip *ip;
1399 	uint32_t *src4_addr;
1400 	uint8_t *src6_addr;
1401 	struct ipsec_sa *sa;
1402 	void *result_sa;
1403 
1404 	*sa_ret = NULL;
1405 
1406 	ip = rte_pktmbuf_mtod(pkt, struct ip *);
1407 	esp = rte_pktmbuf_mtod_offset(pkt, struct rte_esp_hdr *, pkt->l3_len);
1408 
1409 	if (esp->spi == INVALID_SPI)
1410 		return;
1411 
1412 	result_sa = sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
1413 	if (rte_be_to_cpu_32(esp->spi) != sa->spi)
1414 		return;
1415 
1416 	/*
1417 	 * Mark need for inline offload fallback on the LSB of SA pointer.
1418 	 * Thanks to packet grouping mechanism which ipsec_process is using
1419 	 * packets marked for fallback processing will form separate group.
1420 	 *
1421 	 * Because it is not safe to use SA pointer it is casted to generic
1422 	 * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1423 	 * to get valid struct pointer.
1424 	 */
1425 	if (MBUF_NO_SEC_OFFLOAD(pkt) && sa->fallback_sessions > 0) {
1426 		uintptr_t intsa = (uintptr_t)sa;
1427 		intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1428 		result_sa = (void *)intsa;
1429 	}
1430 
1431 	switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1432 	case IP4_TUNNEL:
1433 		src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
1434 		if ((ip->ip_v == IPVERSION) &&
1435 				(sa->src.ip.ip4 == *src4_addr) &&
1436 				(sa->dst.ip.ip4 == *(src4_addr + 1)))
1437 			*sa_ret = result_sa;
1438 		break;
1439 	case IP6_TUNNEL:
1440 		src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
1441 		if ((ip->ip_v == IP6_VERSION) &&
1442 				!memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) &&
1443 				!memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16))
1444 			*sa_ret = result_sa;
1445 		break;
1446 	case TRANSPORT:
1447 		*sa_ret = result_sa;
1448 	}
1449 }
1450 
1451 void
1452 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1453 		void *sa[], uint16_t nb_pkts)
1454 {
1455 	uint32_t i;
1456 
1457 	for (i = 0; i < nb_pkts; i++)
1458 		single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]);
1459 }
1460 
1461 void
1462 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1463 		void *sa[], uint16_t nb_pkts)
1464 {
1465 	uint32_t i;
1466 
1467 	for (i = 0; i < nb_pkts; i++)
1468 		sa[i] = &sa_ctx->sa[sa_idx[i]];
1469 }
1470 
1471 /*
1472  * Select HW offloads to be used.
1473  */
1474 int
1475 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1476 		uint64_t *tx_offloads)
1477 {
1478 	struct ipsec_sa *rule;
1479 	uint32_t idx_sa;
1480 	enum rte_security_session_action_type rule_type;
1481 
1482 	*rx_offloads = 0;
1483 	*tx_offloads = 0;
1484 
1485 	/* Check for inbound rules that use offloads and use this port */
1486 	for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1487 		rule = &sa_in[idx_sa];
1488 		rule_type = ipsec_get_action_type(rule);
1489 		if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1490 				rule_type ==
1491 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1492 				&& rule->portid == port_id)
1493 			*rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
1494 	}
1495 
1496 	/* Check for outbound rules that use offloads and use this port */
1497 	for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1498 		rule = &sa_out[idx_sa];
1499 		rule_type = ipsec_get_action_type(rule);
1500 		if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1501 				rule_type ==
1502 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1503 				&& rule->portid == port_id)
1504 			*tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
1505 	}
1506 	return 0;
1507 }
1508