xref: /dpdk/examples/ipsec-secgw/sa.c (revision f69ed1044230c218c9afd8f1b47b6fe6aa1eeec5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 /*
6  * Security Associations
7  */
8 #include <sys/types.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12 
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
19 #include <rte_ip.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
22 #include <rte_malloc.h>
23 
24 #include "ipsec.h"
25 #include "esp.h"
26 #include "parser.h"
27 #include "sad.h"
28 
29 #define IPDEFTTL 64
30 
31 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
32 
33 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
34 
35 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)
36 
37 struct supported_cipher_algo {
38 	const char *keyword;
39 	enum rte_crypto_cipher_algorithm algo;
40 	uint16_t iv_len;
41 	uint16_t block_size;
42 	uint16_t key_len;
43 };
44 
45 struct supported_auth_algo {
46 	const char *keyword;
47 	enum rte_crypto_auth_algorithm algo;
48 	uint16_t digest_len;
49 	uint16_t key_len;
50 	uint8_t key_not_req;
51 };
52 
53 struct supported_aead_algo {
54 	const char *keyword;
55 	enum rte_crypto_aead_algorithm algo;
56 	uint16_t iv_len;
57 	uint16_t block_size;
58 	uint16_t digest_len;
59 	uint16_t key_len;
60 	uint8_t aad_len;
61 };
62 
63 
64 const struct supported_cipher_algo cipher_algos[] = {
65 	{
66 		.keyword = "null",
67 		.algo = RTE_CRYPTO_CIPHER_NULL,
68 		.iv_len = 0,
69 		.block_size = 4,
70 		.key_len = 0
71 	},
72 	{
73 		.keyword = "aes-128-cbc",
74 		.algo = RTE_CRYPTO_CIPHER_AES_CBC,
75 		.iv_len = 16,
76 		.block_size = 16,
77 		.key_len = 16
78 	},
79 	{
80 		.keyword = "aes-256-cbc",
81 		.algo = RTE_CRYPTO_CIPHER_AES_CBC,
82 		.iv_len = 16,
83 		.block_size = 16,
84 		.key_len = 32
85 	},
86 	{
87 		.keyword = "aes-128-ctr",
88 		.algo = RTE_CRYPTO_CIPHER_AES_CTR,
89 		.iv_len = 8,
90 		.block_size = 4,
91 		.key_len = 20
92 	},
93 	{
94 		.keyword = "3des-cbc",
95 		.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
96 		.iv_len = 8,
97 		.block_size = 8,
98 		.key_len = 24
99 	}
100 };
101 
102 const struct supported_auth_algo auth_algos[] = {
103 	{
104 		.keyword = "null",
105 		.algo = RTE_CRYPTO_AUTH_NULL,
106 		.digest_len = 0,
107 		.key_len = 0,
108 		.key_not_req = 1
109 	},
110 	{
111 		.keyword = "sha1-hmac",
112 		.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
113 		.digest_len = 12,
114 		.key_len = 20
115 	},
116 	{
117 		.keyword = "sha256-hmac",
118 		.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
119 		.digest_len = 16,
120 		.key_len = 32
121 	}
122 };
123 
124 const struct supported_aead_algo aead_algos[] = {
125 	{
126 		.keyword = "aes-128-gcm",
127 		.algo = RTE_CRYPTO_AEAD_AES_GCM,
128 		.iv_len = 8,
129 		.block_size = 4,
130 		.key_len = 20,
131 		.digest_len = 16,
132 		.aad_len = 8,
133 	}
134 };
135 
136 #define SA_INIT_NB	128
137 
138 static struct ipsec_sa *sa_out;
139 static uint32_t sa_out_sz;
140 static uint32_t nb_sa_out;
141 static struct ipsec_sa_cnt sa_out_cnt;
142 
143 static struct ipsec_sa *sa_in;
144 static uint32_t sa_in_sz;
145 static uint32_t nb_sa_in;
146 static struct ipsec_sa_cnt sa_in_cnt;
147 
148 static const struct supported_cipher_algo *
149 find_match_cipher_algo(const char *cipher_keyword)
150 {
151 	size_t i;
152 
153 	for (i = 0; i < RTE_DIM(cipher_algos); i++) {
154 		const struct supported_cipher_algo *algo =
155 			&cipher_algos[i];
156 
157 		if (strcmp(cipher_keyword, algo->keyword) == 0)
158 			return algo;
159 	}
160 
161 	return NULL;
162 }
163 
164 static const struct supported_auth_algo *
165 find_match_auth_algo(const char *auth_keyword)
166 {
167 	size_t i;
168 
169 	for (i = 0; i < RTE_DIM(auth_algos); i++) {
170 		const struct supported_auth_algo *algo =
171 			&auth_algos[i];
172 
173 		if (strcmp(auth_keyword, algo->keyword) == 0)
174 			return algo;
175 	}
176 
177 	return NULL;
178 }
179 
180 static const struct supported_aead_algo *
181 find_match_aead_algo(const char *aead_keyword)
182 {
183 	size_t i;
184 
185 	for (i = 0; i < RTE_DIM(aead_algos); i++) {
186 		const struct supported_aead_algo *algo =
187 			&aead_algos[i];
188 
189 		if (strcmp(aead_keyword, algo->keyword) == 0)
190 			return algo;
191 	}
192 
193 	return NULL;
194 }
195 
196 /** parse_key_string
197  *  parse x:x:x:x.... hex number key string into uint8_t *key
198  *  return:
199  *  > 0: number of bytes parsed
200  *  0:   failed
201  */
202 static uint32_t
203 parse_key_string(const char *key_str, uint8_t *key)
204 {
205 	const char *pt_start = key_str, *pt_end = key_str;
206 	uint32_t nb_bytes = 0;
207 
208 	while (pt_end != NULL) {
209 		char sub_str[3] = {0};
210 
211 		pt_end = strchr(pt_start, ':');
212 
213 		if (pt_end == NULL) {
214 			if (strlen(pt_start) > 2)
215 				return 0;
216 			strncpy(sub_str, pt_start, 2);
217 		} else {
218 			if (pt_end - pt_start > 2)
219 				return 0;
220 
221 			strncpy(sub_str, pt_start, pt_end - pt_start);
222 			pt_start = pt_end + 1;
223 		}
224 
225 		key[nb_bytes++] = strtol(sub_str, NULL, 16);
226 	}
227 
228 	return nb_bytes;
229 }
230 
231 static int
232 extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz)
233 {
234 	if (*sa_tbl == NULL) {
235 		*sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa));
236 		if (*sa_tbl == NULL)
237 			return -1;
238 		*cur_sz = SA_INIT_NB;
239 		return 0;
240 	}
241 
242 	if (cur_cnt >= *cur_sz) {
243 		*sa_tbl = realloc(*sa_tbl,
244 			*cur_sz * sizeof(struct ipsec_sa) * 2);
245 		if (*sa_tbl == NULL)
246 			return -1;
247 		/* clean reallocated extra space */
248 		memset(&(*sa_tbl)[*cur_sz], 0,
249 			*cur_sz * sizeof(struct ipsec_sa));
250 		*cur_sz *= 2;
251 	}
252 
253 	return 0;
254 }
255 
256 void
257 parse_sa_tokens(char **tokens, uint32_t n_tokens,
258 	struct parse_status *status)
259 {
260 	struct ipsec_sa *rule = NULL;
261 	struct rte_ipsec_session *ips;
262 	uint32_t ti; /*token index*/
263 	uint32_t *ri /*rule index*/;
264 	struct ipsec_sa_cnt *sa_cnt;
265 	uint32_t cipher_algo_p = 0;
266 	uint32_t auth_algo_p = 0;
267 	uint32_t aead_algo_p = 0;
268 	uint32_t src_p = 0;
269 	uint32_t dst_p = 0;
270 	uint32_t mode_p = 0;
271 	uint32_t type_p = 0;
272 	uint32_t portid_p = 0;
273 	uint32_t fallback_p = 0;
274 
275 	if (strcmp(tokens[0], "in") == 0) {
276 		ri = &nb_sa_in;
277 		sa_cnt = &sa_in_cnt;
278 		if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0)
279 			return;
280 		rule = &sa_in[*ri];
281 		rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
282 	} else {
283 		ri = &nb_sa_out;
284 		sa_cnt = &sa_out_cnt;
285 		if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0)
286 			return;
287 		rule = &sa_out[*ri];
288 		rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
289 	}
290 
291 	/* spi number */
292 	APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
293 	if (status->status < 0)
294 		return;
295 	if (atoi(tokens[1]) == INVALID_SPI)
296 		return;
297 	rule->spi = atoi(tokens[1]);
298 	ips = ipsec_get_primary_session(rule);
299 
300 	for (ti = 2; ti < n_tokens; ti++) {
301 		if (strcmp(tokens[ti], "mode") == 0) {
302 			APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
303 			if (status->status < 0)
304 				return;
305 
306 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
307 			if (status->status < 0)
308 				return;
309 
310 			if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
311 				sa_cnt->nb_v4++;
312 				rule->flags = IP4_TUNNEL;
313 			} else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
314 				sa_cnt->nb_v6++;
315 				rule->flags = IP6_TUNNEL;
316 			} else if (strcmp(tokens[ti], "transport") == 0) {
317 				sa_cnt->nb_v4++;
318 				sa_cnt->nb_v6++;
319 				rule->flags = TRANSPORT;
320 			} else {
321 				APP_CHECK(0, status, "unrecognized "
322 					"input \"%s\"", tokens[ti]);
323 				return;
324 			}
325 
326 			mode_p = 1;
327 			continue;
328 		}
329 
330 		if (strcmp(tokens[ti], "cipher_algo") == 0) {
331 			const struct supported_cipher_algo *algo;
332 			uint32_t key_len;
333 
334 			APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
335 				status);
336 			if (status->status < 0)
337 				return;
338 
339 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
340 			if (status->status < 0)
341 				return;
342 
343 			algo = find_match_cipher_algo(tokens[ti]);
344 
345 			APP_CHECK(algo != NULL, status, "unrecognized "
346 				"input \"%s\"", tokens[ti]);
347 
348 			if (status->status < 0)
349 				return;
350 
351 			rule->cipher_algo = algo->algo;
352 			rule->block_size = algo->block_size;
353 			rule->iv_len = algo->iv_len;
354 			rule->cipher_key_len = algo->key_len;
355 
356 			/* for NULL algorithm, no cipher key required */
357 			if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
358 				cipher_algo_p = 1;
359 				continue;
360 			}
361 
362 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
363 			if (status->status < 0)
364 				return;
365 
366 			APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
367 				status, "unrecognized input \"%s\", "
368 				"expect \"cipher_key\"", tokens[ti]);
369 			if (status->status < 0)
370 				return;
371 
372 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
373 			if (status->status < 0)
374 				return;
375 
376 			key_len = parse_key_string(tokens[ti],
377 				rule->cipher_key);
378 			APP_CHECK(key_len == rule->cipher_key_len, status,
379 				"unrecognized input \"%s\"", tokens[ti]);
380 			if (status->status < 0)
381 				return;
382 
383 			if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
384 				algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
385 				rule->salt = (uint32_t)rte_rand();
386 
387 			if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
388 				key_len -= 4;
389 				rule->cipher_key_len = key_len;
390 				memcpy(&rule->salt,
391 					&rule->cipher_key[key_len], 4);
392 			}
393 
394 			cipher_algo_p = 1;
395 			continue;
396 		}
397 
398 		if (strcmp(tokens[ti], "auth_algo") == 0) {
399 			const struct supported_auth_algo *algo;
400 			uint32_t key_len;
401 
402 			APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
403 				status);
404 			if (status->status < 0)
405 				return;
406 
407 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
408 			if (status->status < 0)
409 				return;
410 
411 			algo = find_match_auth_algo(tokens[ti]);
412 			APP_CHECK(algo != NULL, status, "unrecognized "
413 				"input \"%s\"", tokens[ti]);
414 
415 			if (status->status < 0)
416 				return;
417 
418 			rule->auth_algo = algo->algo;
419 			rule->auth_key_len = algo->key_len;
420 			rule->digest_len = algo->digest_len;
421 
422 			/* NULL algorithm and combined algos do not
423 			 * require auth key
424 			 */
425 			if (algo->key_not_req) {
426 				auth_algo_p = 1;
427 				continue;
428 			}
429 
430 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
431 			if (status->status < 0)
432 				return;
433 
434 			APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
435 				status, "unrecognized input \"%s\", "
436 				"expect \"auth_key\"", tokens[ti]);
437 			if (status->status < 0)
438 				return;
439 
440 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
441 			if (status->status < 0)
442 				return;
443 
444 			key_len = parse_key_string(tokens[ti],
445 				rule->auth_key);
446 			APP_CHECK(key_len == rule->auth_key_len, status,
447 				"unrecognized input \"%s\"", tokens[ti]);
448 			if (status->status < 0)
449 				return;
450 
451 			auth_algo_p = 1;
452 			continue;
453 		}
454 
455 		if (strcmp(tokens[ti], "aead_algo") == 0) {
456 			const struct supported_aead_algo *algo;
457 			uint32_t key_len;
458 
459 			APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
460 				status);
461 			if (status->status < 0)
462 				return;
463 
464 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
465 			if (status->status < 0)
466 				return;
467 
468 			algo = find_match_aead_algo(tokens[ti]);
469 
470 			APP_CHECK(algo != NULL, status, "unrecognized "
471 				"input \"%s\"", tokens[ti]);
472 
473 			if (status->status < 0)
474 				return;
475 
476 			rule->aead_algo = algo->algo;
477 			rule->cipher_key_len = algo->key_len;
478 			rule->digest_len = algo->digest_len;
479 			rule->aad_len = algo->aad_len;
480 			rule->block_size = algo->block_size;
481 			rule->iv_len = algo->iv_len;
482 
483 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
484 			if (status->status < 0)
485 				return;
486 
487 			APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
488 				status, "unrecognized input \"%s\", "
489 				"expect \"aead_key\"", tokens[ti]);
490 			if (status->status < 0)
491 				return;
492 
493 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
494 			if (status->status < 0)
495 				return;
496 
497 			key_len = parse_key_string(tokens[ti],
498 				rule->cipher_key);
499 			APP_CHECK(key_len == rule->cipher_key_len, status,
500 				"unrecognized input \"%s\"", tokens[ti]);
501 			if (status->status < 0)
502 				return;
503 
504 			key_len -= 4;
505 			rule->cipher_key_len = key_len;
506 			memcpy(&rule->salt,
507 				&rule->cipher_key[key_len], 4);
508 
509 			aead_algo_p = 1;
510 			continue;
511 		}
512 
513 		if (strcmp(tokens[ti], "src") == 0) {
514 			APP_CHECK_PRESENCE(src_p, tokens[ti], status);
515 			if (status->status < 0)
516 				return;
517 
518 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
519 			if (status->status < 0)
520 				return;
521 
522 			if (IS_IP4_TUNNEL(rule->flags)) {
523 				struct in_addr ip;
524 
525 				APP_CHECK(parse_ipv4_addr(tokens[ti],
526 					&ip, NULL) == 0, status,
527 					"unrecognized input \"%s\", "
528 					"expect valid ipv4 addr",
529 					tokens[ti]);
530 				if (status->status < 0)
531 					return;
532 				rule->src.ip.ip4 = rte_bswap32(
533 					(uint32_t)ip.s_addr);
534 			} else if (IS_IP6_TUNNEL(rule->flags)) {
535 				struct in6_addr ip;
536 
537 				APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
538 					NULL) == 0, status,
539 					"unrecognized input \"%s\", "
540 					"expect valid ipv6 addr",
541 					tokens[ti]);
542 				if (status->status < 0)
543 					return;
544 				memcpy(rule->src.ip.ip6.ip6_b,
545 					ip.s6_addr, 16);
546 			} else if (IS_TRANSPORT(rule->flags)) {
547 				APP_CHECK(0, status, "unrecognized input "
548 					"\"%s\"", tokens[ti]);
549 				return;
550 			}
551 
552 			src_p = 1;
553 			continue;
554 		}
555 
556 		if (strcmp(tokens[ti], "dst") == 0) {
557 			APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
558 			if (status->status < 0)
559 				return;
560 
561 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
562 			if (status->status < 0)
563 				return;
564 
565 			if (IS_IP4_TUNNEL(rule->flags)) {
566 				struct in_addr ip;
567 
568 				APP_CHECK(parse_ipv4_addr(tokens[ti],
569 					&ip, NULL) == 0, status,
570 					"unrecognized input \"%s\", "
571 					"expect valid ipv4 addr",
572 					tokens[ti]);
573 				if (status->status < 0)
574 					return;
575 				rule->dst.ip.ip4 = rte_bswap32(
576 					(uint32_t)ip.s_addr);
577 			} else if (IS_IP6_TUNNEL(rule->flags)) {
578 				struct in6_addr ip;
579 
580 				APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
581 					NULL) == 0, status,
582 					"unrecognized input \"%s\", "
583 					"expect valid ipv6 addr",
584 					tokens[ti]);
585 				if (status->status < 0)
586 					return;
587 				memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
588 			} else if (IS_TRANSPORT(rule->flags)) {
589 				APP_CHECK(0, status, "unrecognized "
590 					"input \"%s\"",	tokens[ti]);
591 				return;
592 			}
593 
594 			dst_p = 1;
595 			continue;
596 		}
597 
598 		if (strcmp(tokens[ti], "type") == 0) {
599 			APP_CHECK_PRESENCE(type_p, tokens[ti], status);
600 			if (status->status < 0)
601 				return;
602 
603 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
604 			if (status->status < 0)
605 				return;
606 
607 			if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
608 				ips->type =
609 					RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
610 			else if (strcmp(tokens[ti],
611 					"inline-protocol-offload") == 0)
612 				ips->type =
613 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
614 			else if (strcmp(tokens[ti],
615 					"lookaside-protocol-offload") == 0)
616 				ips->type =
617 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
618 			else if (strcmp(tokens[ti], "no-offload") == 0)
619 				ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
620 			else if (strcmp(tokens[ti], "cpu-crypto") == 0)
621 				ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
622 			else {
623 				APP_CHECK(0, status, "Invalid input \"%s\"",
624 						tokens[ti]);
625 				return;
626 			}
627 
628 			type_p = 1;
629 			continue;
630 		}
631 
632 		if (strcmp(tokens[ti], "port_id") == 0) {
633 			APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
634 			if (status->status < 0)
635 				return;
636 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
637 			if (status->status < 0)
638 				return;
639 			rule->portid = atoi(tokens[ti]);
640 			if (status->status < 0)
641 				return;
642 			portid_p = 1;
643 			continue;
644 		}
645 
646 		if (strcmp(tokens[ti], "fallback") == 0) {
647 			struct rte_ipsec_session *fb;
648 
649 			APP_CHECK(app_sa_prm.enable, status, "Fallback session "
650 				"not allowed for legacy mode.");
651 			if (status->status < 0)
652 				return;
653 			APP_CHECK(ips->type ==
654 				RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
655 				"Fallback session allowed if primary session "
656 				"is of type inline-crypto-offload only.");
657 			if (status->status < 0)
658 				return;
659 			APP_CHECK(rule->direction ==
660 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
661 				"Fallback session not allowed for egress "
662 				"rule");
663 			if (status->status < 0)
664 				return;
665 			APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
666 			if (status->status < 0)
667 				return;
668 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
669 			if (status->status < 0)
670 				return;
671 			fb = ipsec_get_fallback_session(rule);
672 			if (strcmp(tokens[ti], "lookaside-none") == 0) {
673 				fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
674 			} else {
675 				APP_CHECK(0, status, "unrecognized fallback "
676 					"type %s.", tokens[ti]);
677 				return;
678 			}
679 
680 			rule->fallback_sessions = 1;
681 			fallback_p = 1;
682 			continue;
683 		}
684 
685 		/* unrecognizeable input */
686 		APP_CHECK(0, status, "unrecognized input \"%s\"",
687 			tokens[ti]);
688 		return;
689 	}
690 
691 	if (aead_algo_p) {
692 		APP_CHECK(cipher_algo_p == 0, status,
693 				"AEAD used, no need for cipher options");
694 		if (status->status < 0)
695 			return;
696 
697 		APP_CHECK(auth_algo_p == 0, status,
698 				"AEAD used, no need for auth options");
699 		if (status->status < 0)
700 			return;
701 	} else {
702 		APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
703 		if (status->status < 0)
704 			return;
705 
706 		APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
707 		if (status->status < 0)
708 			return;
709 	}
710 
711 	APP_CHECK(mode_p == 1, status, "missing mode option");
712 	if (status->status < 0)
713 		return;
714 
715 	if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
716 			RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
717 		printf("Missing portid option, falling back to non-offload\n");
718 
719 	if (!type_p || (!portid_p && ips->type !=
720 			RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
721 		ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
722 		rule->portid = -1;
723 	}
724 
725 	*ri = *ri + 1;
726 }
727 
728 static void
729 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
730 {
731 	uint32_t i;
732 	uint8_t a, b, c, d;
733 	const struct rte_ipsec_session *ips;
734 	const struct rte_ipsec_session *fallback_ips;
735 
736 	printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
737 
738 	for (i = 0; i < RTE_DIM(cipher_algos); i++) {
739 		if (cipher_algos[i].algo == sa->cipher_algo &&
740 				cipher_algos[i].key_len == sa->cipher_key_len) {
741 			printf("%s ", cipher_algos[i].keyword);
742 			break;
743 		}
744 	}
745 
746 	for (i = 0; i < RTE_DIM(auth_algos); i++) {
747 		if (auth_algos[i].algo == sa->auth_algo) {
748 			printf("%s ", auth_algos[i].keyword);
749 			break;
750 		}
751 	}
752 
753 	for (i = 0; i < RTE_DIM(aead_algos); i++) {
754 		if (aead_algos[i].algo == sa->aead_algo) {
755 			printf("%s ", aead_algos[i].keyword);
756 			break;
757 		}
758 	}
759 
760 	printf("mode:");
761 
762 	switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
763 	case IP4_TUNNEL:
764 		printf("IP4Tunnel ");
765 		uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
766 		printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
767 		uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
768 		printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
769 		break;
770 	case IP6_TUNNEL:
771 		printf("IP6Tunnel ");
772 		for (i = 0; i < 16; i++) {
773 			if (i % 2 && i != 15)
774 				printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
775 			else
776 				printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
777 		}
778 		printf(" ");
779 		for (i = 0; i < 16; i++) {
780 			if (i % 2 && i != 15)
781 				printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
782 			else
783 				printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
784 		}
785 		break;
786 	case TRANSPORT:
787 		printf("Transport ");
788 		break;
789 	}
790 
791 	ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
792 	printf(" type:");
793 	switch (ips->type) {
794 	case RTE_SECURITY_ACTION_TYPE_NONE:
795 		printf("no-offload ");
796 		break;
797 	case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
798 		printf("inline-crypto-offload ");
799 		break;
800 	case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
801 		printf("inline-protocol-offload ");
802 		break;
803 	case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
804 		printf("lookaside-protocol-offload ");
805 		break;
806 	case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
807 		printf("cpu-crypto-accelerated");
808 		break;
809 	}
810 
811 	fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
812 	if (fallback_ips != NULL && sa->fallback_sessions > 0) {
813 		printf("inline fallback: ");
814 		switch (fallback_ips->type) {
815 		case RTE_SECURITY_ACTION_TYPE_NONE:
816 			printf("lookaside-none");
817 			break;
818 		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
819 			printf("cpu-crypto-accelerated");
820 			break;
821 		default:
822 			printf("invalid");
823 			break;
824 		}
825 	}
826 	printf("\n");
827 }
828 
829 struct ipsec_xf {
830 	struct rte_crypto_sym_xform a;
831 	struct rte_crypto_sym_xform b;
832 };
833 
834 struct sa_ctx {
835 	void *satbl; /* pointer to array of rte_ipsec_sa objects*/
836 	struct ipsec_sad sad;
837 	struct ipsec_xf *xf;
838 	uint32_t nb_sa;
839 	struct ipsec_sa sa[];
840 };
841 
842 static struct sa_ctx *
843 sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
844 {
845 	char s[PATH_MAX];
846 	struct sa_ctx *sa_ctx;
847 	uint32_t mz_size;
848 	const struct rte_memzone *mz;
849 
850 	snprintf(s, sizeof(s), "%s_%u", name, socket_id);
851 
852 	/* Create SA context */
853 	printf("Creating SA context with %u maximum entries on socket %d\n",
854 			nb_sa, socket_id);
855 
856 	mz_size = sizeof(struct ipsec_xf) * nb_sa;
857 	mz = rte_memzone_reserve(s, mz_size, socket_id,
858 			RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
859 	if (mz == NULL) {
860 		printf("Failed to allocate SA XFORM memory\n");
861 		rte_errno = ENOMEM;
862 		return NULL;
863 	}
864 
865 	sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
866 		sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
867 
868 	if (sa_ctx == NULL) {
869 		printf("Failed to allocate SA CTX memory\n");
870 		rte_errno = ENOMEM;
871 		rte_memzone_free(mz);
872 		return NULL;
873 	}
874 
875 	sa_ctx->xf = (struct ipsec_xf *)mz->addr;
876 	sa_ctx->nb_sa = nb_sa;
877 
878 	return sa_ctx;
879 }
880 
881 static int
882 check_eth_dev_caps(uint16_t portid, uint32_t inbound)
883 {
884 	struct rte_eth_dev_info dev_info;
885 	int retval;
886 
887 	retval = rte_eth_dev_info_get(portid, &dev_info);
888 	if (retval != 0) {
889 		RTE_LOG(ERR, IPSEC,
890 			"Error during getting device (port %u) info: %s\n",
891 			portid, strerror(-retval));
892 
893 		return retval;
894 	}
895 
896 	if (inbound) {
897 		if ((dev_info.rx_offload_capa &
898 				DEV_RX_OFFLOAD_SECURITY) == 0) {
899 			RTE_LOG(WARNING, PORT,
900 				"hardware RX IPSec offload is not supported\n");
901 			return -EINVAL;
902 		}
903 
904 	} else { /* outbound */
905 		if ((dev_info.tx_offload_capa &
906 				DEV_TX_OFFLOAD_SECURITY) == 0) {
907 			RTE_LOG(WARNING, PORT,
908 				"hardware TX IPSec offload is not supported\n");
909 			return -EINVAL;
910 		}
911 	}
912 	return 0;
913 }
914 
915 /*
916  * Helper function, tries to determine next_proto for SPI
917  * by searching though SP rules.
918  */
919 static int
920 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
921 		struct ip_addr ip_addr[2], uint32_t mask[2])
922 {
923 	int32_t rc4, rc6;
924 
925 	rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
926 				ip_addr, mask);
927 	rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
928 				ip_addr, mask);
929 
930 	if (rc4 >= 0) {
931 		if (rc6 >= 0) {
932 			RTE_LOG(ERR, IPSEC,
933 				"%s: SPI %u used simultaeously by "
934 				"IPv4(%d) and IPv6 (%d) SP rules\n",
935 				__func__, spi, rc4, rc6);
936 			return -EINVAL;
937 		} else
938 			return IPPROTO_IPIP;
939 	} else if (rc6 < 0) {
940 		RTE_LOG(ERR, IPSEC,
941 			"%s: SPI %u is not used by any SP rule\n",
942 			__func__, spi);
943 		return -EINVAL;
944 	} else
945 		return IPPROTO_IPV6;
946 }
947 
948 /*
949  * Helper function for getting source and destination IP addresses
950  * from SP. Needed for inline crypto transport mode, as addresses are not
951  * provided in config file for that mode. It checks if SP for current SA exists,
952  * and based on what type of protocol is returned, it stores appropriate
953  * addresses got from SP into SA.
954  */
955 static int
956 sa_add_address_inline_crypto(struct ipsec_sa *sa)
957 {
958 	int protocol;
959 	struct ip_addr ip_addr[2];
960 	uint32_t mask[2];
961 
962 	protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
963 	if (protocol < 0)
964 		return protocol;
965 	else if (protocol == IPPROTO_IPIP) {
966 		sa->flags |= IP4_TRANSPORT;
967 		if (mask[0] == IP4_FULL_MASK &&
968 				mask[1] == IP4_FULL_MASK &&
969 				ip_addr[0].ip.ip4 != 0 &&
970 				ip_addr[1].ip.ip4 != 0) {
971 
972 			sa->src.ip.ip4 = ip_addr[0].ip.ip4;
973 			sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
974 		} else {
975 			RTE_LOG(ERR, IPSEC,
976 			"%s: No valid address or mask entry in"
977 			" IPv4 SP rule for SPI %u\n",
978 			__func__, sa->spi);
979 			return -EINVAL;
980 		}
981 	} else if (protocol == IPPROTO_IPV6) {
982 		sa->flags |= IP6_TRANSPORT;
983 		if (mask[0] == IP6_FULL_MASK &&
984 				mask[1] == IP6_FULL_MASK &&
985 				(ip_addr[0].ip.ip6.ip6[0] != 0 ||
986 				ip_addr[0].ip.ip6.ip6[1] != 0) &&
987 				(ip_addr[1].ip.ip6.ip6[0] != 0 ||
988 				ip_addr[1].ip.ip6.ip6[1] != 0)) {
989 
990 			sa->src.ip.ip6 = ip_addr[0].ip.ip6;
991 			sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
992 		} else {
993 			RTE_LOG(ERR, IPSEC,
994 			"%s: No valid address or mask entry in"
995 			" IPv6 SP rule for SPI %u\n",
996 			__func__, sa->spi);
997 			return -EINVAL;
998 		}
999 	}
1000 	return 0;
1001 }
1002 
1003 static int
1004 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1005 		uint32_t nb_entries, uint32_t inbound,
1006 		struct socket_ctx *skt_ctx)
1007 {
1008 	struct ipsec_sa *sa;
1009 	uint32_t i, idx;
1010 	uint16_t iv_length, aad_length;
1011 	int inline_status;
1012 	int32_t rc;
1013 	struct rte_ipsec_session *ips;
1014 
1015 	/* for ESN upper 32 bits of SQN also need to be part of AAD */
1016 	aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
1017 
1018 	for (i = 0; i < nb_entries; i++) {
1019 		idx = i;
1020 		sa = &sa_ctx->sa[idx];
1021 		if (sa->spi != 0) {
1022 			printf("Index %u already in use by SPI %u\n",
1023 					idx, sa->spi);
1024 			return -EINVAL;
1025 		}
1026 		*sa = entries[i];
1027 
1028 		if (inbound) {
1029 			rc = ipsec_sad_add(&sa_ctx->sad, sa);
1030 			if (rc != 0)
1031 				return rc;
1032 		}
1033 
1034 		sa->seq = 0;
1035 		ips = ipsec_get_primary_session(sa);
1036 
1037 		if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1038 			ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1039 			if (check_eth_dev_caps(sa->portid, inbound))
1040 				return -EINVAL;
1041 		}
1042 
1043 		switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1044 		case IP4_TUNNEL:
1045 			sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
1046 			sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
1047 			break;
1048 		case TRANSPORT:
1049 			if (ips->type ==
1050 				RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1051 				inline_status =
1052 					sa_add_address_inline_crypto(sa);
1053 				if (inline_status < 0)
1054 					return inline_status;
1055 			}
1056 			break;
1057 		}
1058 
1059 		if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
1060 			iv_length = 12;
1061 
1062 			sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1063 			sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1064 			sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1065 			sa_ctx->xf[idx].a.aead.key.length =
1066 				sa->cipher_key_len;
1067 			sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1068 				RTE_CRYPTO_AEAD_OP_DECRYPT :
1069 				RTE_CRYPTO_AEAD_OP_ENCRYPT;
1070 			sa_ctx->xf[idx].a.next = NULL;
1071 			sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1072 			sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1073 			sa_ctx->xf[idx].a.aead.aad_length =
1074 				sa->aad_len + aad_length;
1075 			sa_ctx->xf[idx].a.aead.digest_length =
1076 				sa->digest_len;
1077 
1078 			sa->xforms = &sa_ctx->xf[idx].a;
1079 		} else {
1080 			switch (sa->cipher_algo) {
1081 			case RTE_CRYPTO_CIPHER_NULL:
1082 			case RTE_CRYPTO_CIPHER_3DES_CBC:
1083 			case RTE_CRYPTO_CIPHER_AES_CBC:
1084 				iv_length = sa->iv_len;
1085 				break;
1086 			case RTE_CRYPTO_CIPHER_AES_CTR:
1087 				iv_length = 16;
1088 				break;
1089 			default:
1090 				RTE_LOG(ERR, IPSEC_ESP,
1091 						"unsupported cipher algorithm %u\n",
1092 						sa->cipher_algo);
1093 				return -EINVAL;
1094 			}
1095 
1096 			if (inbound) {
1097 				sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1098 				sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1099 				sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1100 				sa_ctx->xf[idx].b.cipher.key.length =
1101 					sa->cipher_key_len;
1102 				sa_ctx->xf[idx].b.cipher.op =
1103 					RTE_CRYPTO_CIPHER_OP_DECRYPT;
1104 				sa_ctx->xf[idx].b.next = NULL;
1105 				sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1106 				sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1107 
1108 				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1109 				sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1110 				sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1111 				sa_ctx->xf[idx].a.auth.key.length =
1112 					sa->auth_key_len;
1113 				sa_ctx->xf[idx].a.auth.digest_length =
1114 					sa->digest_len;
1115 				sa_ctx->xf[idx].a.auth.op =
1116 					RTE_CRYPTO_AUTH_OP_VERIFY;
1117 			} else { /* outbound */
1118 				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1119 				sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1120 				sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1121 				sa_ctx->xf[idx].a.cipher.key.length =
1122 					sa->cipher_key_len;
1123 				sa_ctx->xf[idx].a.cipher.op =
1124 					RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1125 				sa_ctx->xf[idx].a.next = NULL;
1126 				sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1127 				sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1128 
1129 				sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1130 				sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1131 				sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1132 				sa_ctx->xf[idx].b.auth.key.length =
1133 					sa->auth_key_len;
1134 				sa_ctx->xf[idx].b.auth.digest_length =
1135 					sa->digest_len;
1136 				sa_ctx->xf[idx].b.auth.op =
1137 					RTE_CRYPTO_AUTH_OP_GENERATE;
1138 			}
1139 
1140 			sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1141 			sa_ctx->xf[idx].b.next = NULL;
1142 			sa->xforms = &sa_ctx->xf[idx].a;
1143 		}
1144 
1145 		if (ips->type ==
1146 			RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1147 			ips->type ==
1148 			RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1149 			rc = create_inline_session(skt_ctx, sa, ips);
1150 			if (rc != 0) {
1151 				RTE_LOG(ERR, IPSEC_ESP,
1152 					"create_inline_session() failed\n");
1153 				return -EINVAL;
1154 			}
1155 		}
1156 
1157 		print_one_sa_rule(sa, inbound);
1158 	}
1159 
1160 	return 0;
1161 }
1162 
1163 static inline int
1164 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1165 		uint32_t nb_entries, struct socket_ctx *skt_ctx)
1166 {
1167 	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
1168 }
1169 
1170 static inline int
1171 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1172 		uint32_t nb_entries, struct socket_ctx *skt_ctx)
1173 {
1174 	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
1175 }
1176 
1177 /*
1178  * helper function, fills parameters that are identical for all SAs
1179  */
1180 static void
1181 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1182 	const struct app_sa_prm *app_prm)
1183 {
1184 	memset(prm, 0, sizeof(*prm));
1185 
1186 	prm->flags = app_prm->flags;
1187 	prm->ipsec_xform.options.esn = app_prm->enable_esn;
1188 	prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1189 }
1190 
1191 static int
1192 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1193 	const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1194 {
1195 	int32_t rc;
1196 
1197 	/*
1198 	 * Try to get SPI next proto by searching that SPI in SPD.
1199 	 * probably not the optimal way, but there seems nothing
1200 	 * better right now.
1201 	 */
1202 	rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1203 	if (rc < 0)
1204 		return rc;
1205 
1206 	fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1207 	prm->userdata = (uintptr_t)ss;
1208 
1209 	/* setup ipsec xform */
1210 	prm->ipsec_xform.spi = ss->spi;
1211 	prm->ipsec_xform.salt = ss->salt;
1212 	prm->ipsec_xform.direction = ss->direction;
1213 	prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1214 	prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1215 		RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1216 		RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1217 	prm->ipsec_xform.options.ecn = 1;
1218 	prm->ipsec_xform.options.copy_dscp = 1;
1219 
1220 	if (IS_IP4_TUNNEL(ss->flags)) {
1221 		prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1222 		prm->tun.hdr_len = sizeof(*v4);
1223 		prm->tun.next_proto = rc;
1224 		prm->tun.hdr = v4;
1225 	} else if (IS_IP6_TUNNEL(ss->flags)) {
1226 		prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1227 		prm->tun.hdr_len = sizeof(*v6);
1228 		prm->tun.next_proto = rc;
1229 		prm->tun.hdr = v6;
1230 	} else {
1231 		/* transport mode */
1232 		prm->trs.proto = rc;
1233 	}
1234 
1235 	/* setup crypto section */
1236 	prm->crypto_xform = ss->xforms;
1237 	return 0;
1238 }
1239 
1240 static int
1241 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1242 {
1243 	int32_t rc = 0;
1244 
1245 	ss->sa = sa;
1246 
1247 	if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1248 		ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
1249 		if (ss->security.ses != NULL) {
1250 			rc = rte_ipsec_session_prepare(ss);
1251 			if (rc != 0)
1252 				memset(ss, 0, sizeof(*ss));
1253 		}
1254 	}
1255 
1256 	return rc;
1257 }
1258 
1259 /*
1260  * Initialise related rte_ipsec_sa object.
1261  */
1262 static int
1263 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1264 {
1265 	int rc;
1266 	struct rte_ipsec_sa_prm prm;
1267 	struct rte_ipsec_session *ips;
1268 	struct rte_ipv4_hdr v4  = {
1269 		.version_ihl = IPVERSION << 4 |
1270 			sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1271 		.time_to_live = IPDEFTTL,
1272 		.next_proto_id = IPPROTO_ESP,
1273 		.src_addr = lsa->src.ip.ip4,
1274 		.dst_addr = lsa->dst.ip.ip4,
1275 	};
1276 	struct rte_ipv6_hdr v6 = {
1277 		.vtc_flow = htonl(IP6_VERSION << 28),
1278 		.proto = IPPROTO_ESP,
1279 	};
1280 
1281 	if (IS_IP6_TUNNEL(lsa->flags)) {
1282 		memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1283 		memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1284 	}
1285 
1286 	rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1287 	if (rc == 0)
1288 		rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1289 	if (rc < 0)
1290 		return rc;
1291 
1292 	/* init primary processing session */
1293 	ips = ipsec_get_primary_session(lsa);
1294 	rc = fill_ipsec_session(ips, sa);
1295 	if (rc != 0)
1296 		return rc;
1297 
1298 	/* init inline fallback processing session */
1299 	if (lsa->fallback_sessions == 1)
1300 		rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
1301 
1302 	return rc;
1303 }
1304 
1305 /*
1306  * Allocate space and init rte_ipsec_sa strcutures,
1307  * one per session.
1308  */
1309 static int
1310 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket)
1311 {
1312 	int32_t rc, sz;
1313 	uint32_t i, idx;
1314 	size_t tsz;
1315 	struct rte_ipsec_sa *sa;
1316 	struct ipsec_sa *lsa;
1317 	struct rte_ipsec_sa_prm prm;
1318 
1319 	/* determine SA size */
1320 	idx = 0;
1321 	fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1322 	sz = rte_ipsec_sa_size(&prm);
1323 	if (sz < 0) {
1324 		RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1325 			"failed to determine SA size, error code: %d\n",
1326 			__func__, ctx, nb_ent, socket, sz);
1327 		return sz;
1328 	}
1329 
1330 	tsz = sz * nb_ent;
1331 
1332 	ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1333 	if (ctx->satbl == NULL) {
1334 		RTE_LOG(ERR, IPSEC,
1335 			"%s(%p, %u, %d): failed to allocate %zu bytes\n",
1336 			__func__,  ctx, nb_ent, socket, tsz);
1337 		return -ENOMEM;
1338 	}
1339 
1340 	rc = 0;
1341 	for (i = 0; i != nb_ent && rc == 0; i++) {
1342 
1343 		idx = i;
1344 
1345 		sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1346 		lsa = ctx->sa + idx;
1347 
1348 		rc = ipsec_sa_init(lsa, sa, sz);
1349 	}
1350 
1351 	return rc;
1352 }
1353 
1354 static int
1355 sa_cmp(const void *p, const void *q)
1356 {
1357 	uint32_t spi1 = ((const struct ipsec_sa *)p)->spi;
1358 	uint32_t spi2 = ((const struct ipsec_sa *)q)->spi;
1359 
1360 	return (int)(spi1 - spi2);
1361 }
1362 
1363 /*
1364  * Walk through all SA rules to find an SA with given SPI
1365  */
1366 int
1367 sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
1368 {
1369 	uint32_t num;
1370 	struct ipsec_sa *sa;
1371 	struct ipsec_sa tmpl;
1372 	const struct ipsec_sa *sar;
1373 
1374 	sar = sa_ctx->sa;
1375 	if (inbound != 0)
1376 		num = nb_sa_in;
1377 	else
1378 		num = nb_sa_out;
1379 
1380 	tmpl.spi = spi;
1381 
1382 	sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp);
1383 	if (sa != NULL)
1384 		return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa);
1385 
1386 	return -ENOENT;
1387 }
1388 
1389 void
1390 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1391 {
1392 	int32_t rc;
1393 	const char *name;
1394 
1395 	if (ctx == NULL)
1396 		rte_exit(EXIT_FAILURE, "NULL context.\n");
1397 
1398 	if (ctx->sa_in != NULL)
1399 		rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1400 				"initialized\n", socket_id);
1401 
1402 	if (ctx->sa_out != NULL)
1403 		rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1404 				"initialized\n", socket_id);
1405 
1406 	if (nb_sa_in > 0) {
1407 		name = "sa_in";
1408 		ctx->sa_in = sa_create(name, socket_id, nb_sa_in);
1409 		if (ctx->sa_in == NULL)
1410 			rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1411 				"context %s in socket %d\n", rte_errno,
1412 				name, socket_id);
1413 
1414 		rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id,
1415 				&sa_in_cnt);
1416 		if (rc != 0)
1417 			rte_exit(EXIT_FAILURE, "failed to init SAD\n");
1418 
1419 		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
1420 
1421 		if (app_sa_prm.enable != 0) {
1422 			rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
1423 				socket_id);
1424 			if (rc != 0)
1425 				rte_exit(EXIT_FAILURE,
1426 					"failed to init inbound SAs\n");
1427 		}
1428 	} else
1429 		RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1430 
1431 	if (nb_sa_out > 0) {
1432 		name = "sa_out";
1433 		ctx->sa_out = sa_create(name, socket_id, nb_sa_out);
1434 		if (ctx->sa_out == NULL)
1435 			rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1436 				"context %s in socket %d\n", rte_errno,
1437 				name, socket_id);
1438 
1439 		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
1440 
1441 		if (app_sa_prm.enable != 0) {
1442 			rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
1443 				socket_id);
1444 			if (rc != 0)
1445 				rte_exit(EXIT_FAILURE,
1446 					"failed to init outbound SAs\n");
1447 		}
1448 	} else
1449 		RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1450 			"specified\n");
1451 }
1452 
1453 int
1454 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1455 {
1456 	struct ipsec_mbuf_metadata *priv;
1457 	struct ipsec_sa *sa;
1458 
1459 	priv = get_priv(m);
1460 	sa = priv->sa;
1461 	if (sa != NULL)
1462 		return (sa_ctx->sa[sa_idx].spi == sa->spi);
1463 
1464 	RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1465 	return 0;
1466 }
1467 
1468 void
1469 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1470 		void *sa_arr[], uint16_t nb_pkts)
1471 {
1472 	uint32_t i;
1473 	void *result_sa;
1474 	struct ipsec_sa *sa;
1475 
1476 	sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts);
1477 
1478 	/*
1479 	 * Mark need for inline offload fallback on the LSB of SA pointer.
1480 	 * Thanks to packet grouping mechanism which ipsec_process is using
1481 	 * packets marked for fallback processing will form separate group.
1482 	 *
1483 	 * Because it is not safe to use SA pointer it is casted to generic
1484 	 * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1485 	 * to get valid struct pointer.
1486 	 */
1487 	for (i = 0; i < nb_pkts; i++) {
1488 		if (sa_arr[i] == NULL)
1489 			continue;
1490 
1491 		result_sa = sa = sa_arr[i];
1492 		if (MBUF_NO_SEC_OFFLOAD(pkts[i]) &&
1493 			sa->fallback_sessions > 0) {
1494 			uintptr_t intsa = (uintptr_t)sa;
1495 			intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1496 			result_sa = (void *)intsa;
1497 		}
1498 		sa_arr[i] = result_sa;
1499 	}
1500 }
1501 
1502 void
1503 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1504 		void *sa[], uint16_t nb_pkts)
1505 {
1506 	uint32_t i;
1507 
1508 	for (i = 0; i < nb_pkts; i++)
1509 		sa[i] = &sa_ctx->sa[sa_idx[i]];
1510 }
1511 
1512 /*
1513  * Select HW offloads to be used.
1514  */
1515 int
1516 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1517 		uint64_t *tx_offloads)
1518 {
1519 	struct ipsec_sa *rule;
1520 	uint32_t idx_sa;
1521 	enum rte_security_session_action_type rule_type;
1522 
1523 	*rx_offloads = 0;
1524 	*tx_offloads = 0;
1525 
1526 	/* Check for inbound rules that use offloads and use this port */
1527 	for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1528 		rule = &sa_in[idx_sa];
1529 		rule_type = ipsec_get_action_type(rule);
1530 		if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1531 				rule_type ==
1532 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1533 				&& rule->portid == port_id)
1534 			*rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
1535 	}
1536 
1537 	/* Check for outbound rules that use offloads and use this port */
1538 	for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1539 		rule = &sa_out[idx_sa];
1540 		rule_type = ipsec_get_action_type(rule);
1541 		if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1542 				rule_type ==
1543 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1544 				&& rule->portid == port_id)
1545 			*tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
1546 	}
1547 	return 0;
1548 }
1549 
1550 void
1551 sa_sort_arr(void)
1552 {
1553 	qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
1554 	qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);
1555 }
1556