xref: /dpdk/examples/ipsec-secgw/sa.c (revision b922dbd38cedc41e5791ab290b43fcfbbcf2259c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 /*
6  * Security Associations
7  */
8 #include <sys/types.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12 
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
19 #include <rte_ip.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
22 #include <rte_malloc.h>
23 
24 #include "ipsec.h"
25 #include "esp.h"
26 #include "parser.h"
27 #include "sad.h"
28 
29 #define IPDEFTTL 64
30 
31 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
32 
33 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
34 
35 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)
36 
37 struct supported_cipher_algo {
38 	const char *keyword;
39 	enum rte_crypto_cipher_algorithm algo;
40 	uint16_t iv_len;
41 	uint16_t block_size;
42 	uint16_t key_len;
43 };
44 
45 struct supported_auth_algo {
46 	const char *keyword;
47 	enum rte_crypto_auth_algorithm algo;
48 	uint16_t digest_len;
49 	uint16_t key_len;
50 	uint8_t key_not_req;
51 };
52 
53 struct supported_aead_algo {
54 	const char *keyword;
55 	enum rte_crypto_aead_algorithm algo;
56 	uint16_t iv_len;
57 	uint16_t block_size;
58 	uint16_t digest_len;
59 	uint16_t key_len;
60 	uint8_t aad_len;
61 };
62 
63 
64 const struct supported_cipher_algo cipher_algos[] = {
65 	{
66 		.keyword = "null",
67 		.algo = RTE_CRYPTO_CIPHER_NULL,
68 		.iv_len = 0,
69 		.block_size = 4,
70 		.key_len = 0
71 	},
72 	{
73 		.keyword = "aes-128-cbc",
74 		.algo = RTE_CRYPTO_CIPHER_AES_CBC,
75 		.iv_len = 16,
76 		.block_size = 16,
77 		.key_len = 16
78 	},
79 	{
80 		.keyword = "aes-192-cbc",
81 		.algo = RTE_CRYPTO_CIPHER_AES_CBC,
82 		.iv_len = 16,
83 		.block_size = 16,
84 		.key_len = 24
85 	},
86 	{
87 		.keyword = "aes-256-cbc",
88 		.algo = RTE_CRYPTO_CIPHER_AES_CBC,
89 		.iv_len = 16,
90 		.block_size = 16,
91 		.key_len = 32
92 	},
93 	{
94 		.keyword = "aes-128-ctr",
95 		.algo = RTE_CRYPTO_CIPHER_AES_CTR,
96 		.iv_len = 8,
97 		.block_size = 4,
98 		.key_len = 20
99 	},
100 	{
101 		.keyword = "3des-cbc",
102 		.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
103 		.iv_len = 8,
104 		.block_size = 8,
105 		.key_len = 24
106 	}
107 };
108 
109 const struct supported_auth_algo auth_algos[] = {
110 	{
111 		.keyword = "null",
112 		.algo = RTE_CRYPTO_AUTH_NULL,
113 		.digest_len = 0,
114 		.key_len = 0,
115 		.key_not_req = 1
116 	},
117 	{
118 		.keyword = "sha1-hmac",
119 		.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
120 		.digest_len = 12,
121 		.key_len = 20
122 	},
123 	{
124 		.keyword = "sha256-hmac",
125 		.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
126 		.digest_len = 16,
127 		.key_len = 32
128 	}
129 };
130 
131 const struct supported_aead_algo aead_algos[] = {
132 	{
133 		.keyword = "aes-128-gcm",
134 		.algo = RTE_CRYPTO_AEAD_AES_GCM,
135 		.iv_len = 8,
136 		.block_size = 4,
137 		.key_len = 20,
138 		.digest_len = 16,
139 		.aad_len = 8,
140 	},
141 	{
142 		.keyword = "aes-192-gcm",
143 		.algo = RTE_CRYPTO_AEAD_AES_GCM,
144 		.iv_len = 8,
145 		.block_size = 4,
146 		.key_len = 28,
147 		.digest_len = 16,
148 		.aad_len = 8,
149 	},
150 	{
151 		.keyword = "aes-256-gcm",
152 		.algo = RTE_CRYPTO_AEAD_AES_GCM,
153 		.iv_len = 8,
154 		.block_size = 4,
155 		.key_len = 36,
156 		.digest_len = 16,
157 		.aad_len = 8,
158 	}
159 };
160 
161 #define SA_INIT_NB	128
162 
163 static uint32_t nb_crypto_sessions;
164 struct ipsec_sa *sa_out;
165 uint32_t nb_sa_out;
166 static uint32_t sa_out_sz;
167 static struct ipsec_sa_cnt sa_out_cnt;
168 
169 struct ipsec_sa *sa_in;
170 uint32_t nb_sa_in;
171 static uint32_t sa_in_sz;
172 static struct ipsec_sa_cnt sa_in_cnt;
173 
174 static const struct supported_cipher_algo *
175 find_match_cipher_algo(const char *cipher_keyword)
176 {
177 	size_t i;
178 
179 	for (i = 0; i < RTE_DIM(cipher_algos); i++) {
180 		const struct supported_cipher_algo *algo =
181 			&cipher_algos[i];
182 
183 		if (strcmp(cipher_keyword, algo->keyword) == 0)
184 			return algo;
185 	}
186 
187 	return NULL;
188 }
189 
190 static const struct supported_auth_algo *
191 find_match_auth_algo(const char *auth_keyword)
192 {
193 	size_t i;
194 
195 	for (i = 0; i < RTE_DIM(auth_algos); i++) {
196 		const struct supported_auth_algo *algo =
197 			&auth_algos[i];
198 
199 		if (strcmp(auth_keyword, algo->keyword) == 0)
200 			return algo;
201 	}
202 
203 	return NULL;
204 }
205 
206 static const struct supported_aead_algo *
207 find_match_aead_algo(const char *aead_keyword)
208 {
209 	size_t i;
210 
211 	for (i = 0; i < RTE_DIM(aead_algos); i++) {
212 		const struct supported_aead_algo *algo =
213 			&aead_algos[i];
214 
215 		if (strcmp(aead_keyword, algo->keyword) == 0)
216 			return algo;
217 	}
218 
219 	return NULL;
220 }
221 
222 /** parse_key_string
223  *  parse x:x:x:x.... hex number key string into uint8_t *key
224  *  return:
225  *  > 0: number of bytes parsed
226  *  0:   failed
227  */
228 static uint32_t
229 parse_key_string(const char *key_str, uint8_t *key)
230 {
231 	const char *pt_start = key_str, *pt_end = key_str;
232 	uint32_t nb_bytes = 0;
233 
234 	while (pt_end != NULL) {
235 		char sub_str[3] = {0};
236 
237 		pt_end = strchr(pt_start, ':');
238 
239 		if (pt_end == NULL) {
240 			if (strlen(pt_start) > 2)
241 				return 0;
242 			strncpy(sub_str, pt_start, 2);
243 		} else {
244 			if (pt_end - pt_start > 2)
245 				return 0;
246 
247 			strncpy(sub_str, pt_start, pt_end - pt_start);
248 			pt_start = pt_end + 1;
249 		}
250 
251 		key[nb_bytes++] = strtol(sub_str, NULL, 16);
252 	}
253 
254 	return nb_bytes;
255 }
256 
257 static int
258 extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz)
259 {
260 	if (*sa_tbl == NULL) {
261 		*sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa));
262 		if (*sa_tbl == NULL)
263 			return -1;
264 		*cur_sz = SA_INIT_NB;
265 		return 0;
266 	}
267 
268 	if (cur_cnt >= *cur_sz) {
269 		*sa_tbl = realloc(*sa_tbl,
270 			*cur_sz * sizeof(struct ipsec_sa) * 2);
271 		if (*sa_tbl == NULL)
272 			return -1;
273 		/* clean reallocated extra space */
274 		memset(&(*sa_tbl)[*cur_sz], 0,
275 			*cur_sz * sizeof(struct ipsec_sa));
276 		*cur_sz *= 2;
277 	}
278 
279 	return 0;
280 }
281 
282 void
283 parse_sa_tokens(char **tokens, uint32_t n_tokens,
284 	struct parse_status *status)
285 {
286 	struct ipsec_sa *rule = NULL;
287 	struct rte_ipsec_session *ips;
288 	uint32_t ti; /*token index*/
289 	uint32_t *ri /*rule index*/;
290 	struct ipsec_sa_cnt *sa_cnt;
291 	uint32_t cipher_algo_p = 0;
292 	uint32_t auth_algo_p = 0;
293 	uint32_t aead_algo_p = 0;
294 	uint32_t src_p = 0;
295 	uint32_t dst_p = 0;
296 	uint32_t mode_p = 0;
297 	uint32_t type_p = 0;
298 	uint32_t portid_p = 0;
299 	uint32_t fallback_p = 0;
300 	int16_t status_p = 0;
301 
302 	if (strcmp(tokens[0], "in") == 0) {
303 		ri = &nb_sa_in;
304 		sa_cnt = &sa_in_cnt;
305 		if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0)
306 			return;
307 		rule = &sa_in[*ri];
308 		rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
309 	} else {
310 		ri = &nb_sa_out;
311 		sa_cnt = &sa_out_cnt;
312 		if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0)
313 			return;
314 		rule = &sa_out[*ri];
315 		rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
316 	}
317 
318 	/* spi number */
319 	APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
320 	if (status->status < 0)
321 		return;
322 	if (atoi(tokens[1]) == INVALID_SPI)
323 		return;
324 	rule->spi = atoi(tokens[1]);
325 	rule->portid = UINT16_MAX;
326 	ips = ipsec_get_primary_session(rule);
327 
328 	for (ti = 2; ti < n_tokens; ti++) {
329 		if (strcmp(tokens[ti], "mode") == 0) {
330 			APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
331 			if (status->status < 0)
332 				return;
333 
334 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
335 			if (status->status < 0)
336 				return;
337 
338 			if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
339 				sa_cnt->nb_v4++;
340 				rule->flags = IP4_TUNNEL;
341 			} else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
342 				sa_cnt->nb_v6++;
343 				rule->flags = IP6_TUNNEL;
344 			} else if (strcmp(tokens[ti], "transport") == 0) {
345 				sa_cnt->nb_v4++;
346 				sa_cnt->nb_v6++;
347 				rule->flags = TRANSPORT;
348 			} else {
349 				APP_CHECK(0, status, "unrecognized "
350 					"input \"%s\"", tokens[ti]);
351 				return;
352 			}
353 
354 			mode_p = 1;
355 			continue;
356 		}
357 
358 		if (strcmp(tokens[ti], "cipher_algo") == 0) {
359 			const struct supported_cipher_algo *algo;
360 			uint32_t key_len;
361 
362 			APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
363 				status);
364 			if (status->status < 0)
365 				return;
366 
367 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
368 			if (status->status < 0)
369 				return;
370 
371 			algo = find_match_cipher_algo(tokens[ti]);
372 
373 			APP_CHECK(algo != NULL, status, "unrecognized "
374 				"input \"%s\"", tokens[ti]);
375 
376 			if (status->status < 0)
377 				return;
378 
379 			rule->cipher_algo = algo->algo;
380 			rule->block_size = algo->block_size;
381 			rule->iv_len = algo->iv_len;
382 			rule->cipher_key_len = algo->key_len;
383 
384 			/* for NULL algorithm, no cipher key required */
385 			if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
386 				cipher_algo_p = 1;
387 				continue;
388 			}
389 
390 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
391 			if (status->status < 0)
392 				return;
393 
394 			APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
395 				status, "unrecognized input \"%s\", "
396 				"expect \"cipher_key\"", tokens[ti]);
397 			if (status->status < 0)
398 				return;
399 
400 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
401 			if (status->status < 0)
402 				return;
403 
404 			key_len = parse_key_string(tokens[ti],
405 				rule->cipher_key);
406 			APP_CHECK(key_len == rule->cipher_key_len, status,
407 				"unrecognized input \"%s\"", tokens[ti]);
408 			if (status->status < 0)
409 				return;
410 
411 			if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
412 				algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
413 				rule->salt = (uint32_t)rte_rand();
414 
415 			if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
416 				key_len -= 4;
417 				rule->cipher_key_len = key_len;
418 				memcpy(&rule->salt,
419 					&rule->cipher_key[key_len], 4);
420 			}
421 
422 			cipher_algo_p = 1;
423 			continue;
424 		}
425 
426 		if (strcmp(tokens[ti], "auth_algo") == 0) {
427 			const struct supported_auth_algo *algo;
428 			uint32_t key_len;
429 
430 			APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
431 				status);
432 			if (status->status < 0)
433 				return;
434 
435 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
436 			if (status->status < 0)
437 				return;
438 
439 			algo = find_match_auth_algo(tokens[ti]);
440 			APP_CHECK(algo != NULL, status, "unrecognized "
441 				"input \"%s\"", tokens[ti]);
442 
443 			if (status->status < 0)
444 				return;
445 
446 			rule->auth_algo = algo->algo;
447 			rule->auth_key_len = algo->key_len;
448 			rule->digest_len = algo->digest_len;
449 
450 			/* NULL algorithm and combined algos do not
451 			 * require auth key
452 			 */
453 			if (algo->key_not_req) {
454 				auth_algo_p = 1;
455 				continue;
456 			}
457 
458 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
459 			if (status->status < 0)
460 				return;
461 
462 			APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
463 				status, "unrecognized input \"%s\", "
464 				"expect \"auth_key\"", tokens[ti]);
465 			if (status->status < 0)
466 				return;
467 
468 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
469 			if (status->status < 0)
470 				return;
471 
472 			key_len = parse_key_string(tokens[ti],
473 				rule->auth_key);
474 			APP_CHECK(key_len == rule->auth_key_len, status,
475 				"unrecognized input \"%s\"", tokens[ti]);
476 			if (status->status < 0)
477 				return;
478 
479 			auth_algo_p = 1;
480 			continue;
481 		}
482 
483 		if (strcmp(tokens[ti], "aead_algo") == 0) {
484 			const struct supported_aead_algo *algo;
485 			uint32_t key_len;
486 
487 			APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
488 				status);
489 			if (status->status < 0)
490 				return;
491 
492 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
493 			if (status->status < 0)
494 				return;
495 
496 			algo = find_match_aead_algo(tokens[ti]);
497 
498 			APP_CHECK(algo != NULL, status, "unrecognized "
499 				"input \"%s\"", tokens[ti]);
500 
501 			if (status->status < 0)
502 				return;
503 
504 			rule->aead_algo = algo->algo;
505 			rule->cipher_key_len = algo->key_len;
506 			rule->digest_len = algo->digest_len;
507 			rule->aad_len = algo->aad_len;
508 			rule->block_size = algo->block_size;
509 			rule->iv_len = algo->iv_len;
510 
511 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
512 			if (status->status < 0)
513 				return;
514 
515 			APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
516 				status, "unrecognized input \"%s\", "
517 				"expect \"aead_key\"", tokens[ti]);
518 			if (status->status < 0)
519 				return;
520 
521 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
522 			if (status->status < 0)
523 				return;
524 
525 			key_len = parse_key_string(tokens[ti],
526 				rule->cipher_key);
527 			APP_CHECK(key_len == rule->cipher_key_len, status,
528 				"unrecognized input \"%s\"", tokens[ti]);
529 			if (status->status < 0)
530 				return;
531 
532 			key_len -= 4;
533 			rule->cipher_key_len = key_len;
534 			memcpy(&rule->salt,
535 				&rule->cipher_key[key_len], 4);
536 
537 			aead_algo_p = 1;
538 			continue;
539 		}
540 
541 		if (strcmp(tokens[ti], "src") == 0) {
542 			APP_CHECK_PRESENCE(src_p, tokens[ti], status);
543 			if (status->status < 0)
544 				return;
545 
546 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
547 			if (status->status < 0)
548 				return;
549 
550 			if (IS_IP4_TUNNEL(rule->flags)) {
551 				struct in_addr ip;
552 
553 				APP_CHECK(parse_ipv4_addr(tokens[ti],
554 					&ip, NULL) == 0, status,
555 					"unrecognized input \"%s\", "
556 					"expect valid ipv4 addr",
557 					tokens[ti]);
558 				if (status->status < 0)
559 					return;
560 				rule->src.ip.ip4 = rte_bswap32(
561 					(uint32_t)ip.s_addr);
562 			} else if (IS_IP6_TUNNEL(rule->flags)) {
563 				struct in6_addr ip;
564 
565 				APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
566 					NULL) == 0, status,
567 					"unrecognized input \"%s\", "
568 					"expect valid ipv6 addr",
569 					tokens[ti]);
570 				if (status->status < 0)
571 					return;
572 				memcpy(rule->src.ip.ip6.ip6_b,
573 					ip.s6_addr, 16);
574 			} else if (IS_TRANSPORT(rule->flags)) {
575 				APP_CHECK(0, status, "unrecognized input "
576 					"\"%s\"", tokens[ti]);
577 				return;
578 			}
579 
580 			src_p = 1;
581 			continue;
582 		}
583 
584 		if (strcmp(tokens[ti], "dst") == 0) {
585 			APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
586 			if (status->status < 0)
587 				return;
588 
589 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
590 			if (status->status < 0)
591 				return;
592 
593 			if (IS_IP4_TUNNEL(rule->flags)) {
594 				struct in_addr ip;
595 
596 				APP_CHECK(parse_ipv4_addr(tokens[ti],
597 					&ip, NULL) == 0, status,
598 					"unrecognized input \"%s\", "
599 					"expect valid ipv4 addr",
600 					tokens[ti]);
601 				if (status->status < 0)
602 					return;
603 				rule->dst.ip.ip4 = rte_bswap32(
604 					(uint32_t)ip.s_addr);
605 			} else if (IS_IP6_TUNNEL(rule->flags)) {
606 				struct in6_addr ip;
607 
608 				APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
609 					NULL) == 0, status,
610 					"unrecognized input \"%s\", "
611 					"expect valid ipv6 addr",
612 					tokens[ti]);
613 				if (status->status < 0)
614 					return;
615 				memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
616 			} else if (IS_TRANSPORT(rule->flags)) {
617 				APP_CHECK(0, status, "unrecognized "
618 					"input \"%s\"",	tokens[ti]);
619 				return;
620 			}
621 
622 			dst_p = 1;
623 			continue;
624 		}
625 
626 		if (strcmp(tokens[ti], "type") == 0) {
627 			APP_CHECK_PRESENCE(type_p, tokens[ti], status);
628 			if (status->status < 0)
629 				return;
630 
631 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
632 			if (status->status < 0)
633 				return;
634 
635 			if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
636 				ips->type =
637 					RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
638 			else if (strcmp(tokens[ti],
639 					"inline-protocol-offload") == 0)
640 				ips->type =
641 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
642 			else if (strcmp(tokens[ti],
643 					"lookaside-protocol-offload") == 0)
644 				ips->type =
645 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
646 			else if (strcmp(tokens[ti], "no-offload") == 0)
647 				ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
648 			else if (strcmp(tokens[ti], "cpu-crypto") == 0)
649 				ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
650 			else {
651 				APP_CHECK(0, status, "Invalid input \"%s\"",
652 						tokens[ti]);
653 				return;
654 			}
655 
656 			type_p = 1;
657 			continue;
658 		}
659 
660 		if (strcmp(tokens[ti], "port_id") == 0) {
661 			APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
662 			if (status->status < 0)
663 				return;
664 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
665 			if (status->status < 0)
666 				return;
667 			if (rule->portid == UINT16_MAX)
668 				rule->portid = atoi(tokens[ti]);
669 			else if (rule->portid != atoi(tokens[ti])) {
670 				APP_CHECK(0, status,
671 					"portid %s not matching with already assigned portid %u",
672 					tokens[ti], rule->portid);
673 				return;
674 			}
675 			portid_p = 1;
676 			continue;
677 		}
678 
679 		if (strcmp(tokens[ti], "fallback") == 0) {
680 			struct rte_ipsec_session *fb;
681 
682 			APP_CHECK(app_sa_prm.enable, status, "Fallback session "
683 				"not allowed for legacy mode.");
684 			if (status->status < 0)
685 				return;
686 			APP_CHECK(ips->type ==
687 				RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
688 				"Fallback session allowed if primary session "
689 				"is of type inline-crypto-offload only.");
690 			if (status->status < 0)
691 				return;
692 			APP_CHECK(rule->direction ==
693 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
694 				"Fallback session not allowed for egress "
695 				"rule");
696 			if (status->status < 0)
697 				return;
698 			APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
699 			if (status->status < 0)
700 				return;
701 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
702 			if (status->status < 0)
703 				return;
704 			fb = ipsec_get_fallback_session(rule);
705 			if (strcmp(tokens[ti], "lookaside-none") == 0)
706 				fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
707 			else if (strcmp(tokens[ti], "cpu-crypto") == 0)
708 				fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
709 			else {
710 				APP_CHECK(0, status, "unrecognized fallback "
711 					"type %s.", tokens[ti]);
712 				return;
713 			}
714 
715 			rule->fallback_sessions = 1;
716 			nb_crypto_sessions++;
717 			fallback_p = 1;
718 			continue;
719 		}
720 		if (strcmp(tokens[ti], "flow-direction") == 0) {
721 			switch (ips->type) {
722 			case RTE_SECURITY_ACTION_TYPE_NONE:
723 			case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
724 				rule->fdir_flag = 1;
725 				INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
726 				if (status->status < 0)
727 					return;
728 				if (rule->portid == UINT16_MAX)
729 					rule->portid = atoi(tokens[ti]);
730 				else if (rule->portid != atoi(tokens[ti])) {
731 					APP_CHECK(0, status,
732 						"portid %s not matching with already assigned portid %u",
733 						tokens[ti], rule->portid);
734 					return;
735 				}
736 				INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
737 				if (status->status < 0)
738 					return;
739 				rule->fdir_qid = atoi(tokens[ti]);
740 				/* validating portid and queueid */
741 				status_p = check_flow_params(rule->portid,
742 						rule->fdir_qid);
743 				if (status_p < 0) {
744 					printf("port id %u / queue id %u is "
745 						"not valid\n", rule->portid,
746 						 rule->fdir_qid);
747 				}
748 				break;
749 			case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
750 			case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
751 			case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
752 			default:
753 				APP_CHECK(0, status,
754 					"flow director not supported for security session type %d",
755 					ips->type);
756 				return;
757 			}
758 			continue;
759 		}
760 
761 		/* unrecognizeable input */
762 		APP_CHECK(0, status, "unrecognized input \"%s\"",
763 			tokens[ti]);
764 		return;
765 	}
766 
767 	if (aead_algo_p) {
768 		APP_CHECK(cipher_algo_p == 0, status,
769 				"AEAD used, no need for cipher options");
770 		if (status->status < 0)
771 			return;
772 
773 		APP_CHECK(auth_algo_p == 0, status,
774 				"AEAD used, no need for auth options");
775 		if (status->status < 0)
776 			return;
777 	} else {
778 		APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
779 		if (status->status < 0)
780 			return;
781 
782 		APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
783 		if (status->status < 0)
784 			return;
785 	}
786 
787 	APP_CHECK(mode_p == 1, status, "missing mode option");
788 	if (status->status < 0)
789 		return;
790 
791 	if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
792 			RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
793 		printf("Missing portid option, falling back to non-offload\n");
794 
795 	if (!type_p || (!portid_p && ips->type !=
796 			RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
797 		ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
798 	}
799 
800 	nb_crypto_sessions++;
801 	*ri = *ri + 1;
802 }
803 
804 static void
805 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
806 {
807 	uint32_t i;
808 	uint8_t a, b, c, d;
809 	const struct rte_ipsec_session *ips;
810 	const struct rte_ipsec_session *fallback_ips;
811 
812 	printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
813 
814 	for (i = 0; i < RTE_DIM(cipher_algos); i++) {
815 		if (cipher_algos[i].algo == sa->cipher_algo &&
816 				cipher_algos[i].key_len == sa->cipher_key_len) {
817 			printf("%s ", cipher_algos[i].keyword);
818 			break;
819 		}
820 	}
821 
822 	for (i = 0; i < RTE_DIM(auth_algos); i++) {
823 		if (auth_algos[i].algo == sa->auth_algo) {
824 			printf("%s ", auth_algos[i].keyword);
825 			break;
826 		}
827 	}
828 
829 	for (i = 0; i < RTE_DIM(aead_algos); i++) {
830 		if (aead_algos[i].algo == sa->aead_algo &&
831 				aead_algos[i].key_len-4 == sa->cipher_key_len) {
832 			printf("%s ", aead_algos[i].keyword);
833 			break;
834 		}
835 	}
836 
837 	printf("mode:");
838 
839 	switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
840 	case IP4_TUNNEL:
841 		printf("IP4Tunnel ");
842 		uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
843 		printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
844 		uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
845 		printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
846 		break;
847 	case IP6_TUNNEL:
848 		printf("IP6Tunnel ");
849 		for (i = 0; i < 16; i++) {
850 			if (i % 2 && i != 15)
851 				printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
852 			else
853 				printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
854 		}
855 		printf(" ");
856 		for (i = 0; i < 16; i++) {
857 			if (i % 2 && i != 15)
858 				printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
859 			else
860 				printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
861 		}
862 		break;
863 	case TRANSPORT:
864 		printf("Transport ");
865 		break;
866 	}
867 
868 	ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
869 	printf(" type:");
870 	switch (ips->type) {
871 	case RTE_SECURITY_ACTION_TYPE_NONE:
872 		printf("no-offload ");
873 		break;
874 	case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
875 		printf("inline-crypto-offload ");
876 		break;
877 	case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
878 		printf("inline-protocol-offload ");
879 		break;
880 	case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
881 		printf("lookaside-protocol-offload ");
882 		break;
883 	case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
884 		printf("cpu-crypto-accelerated ");
885 		break;
886 	}
887 
888 	fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
889 	if (fallback_ips != NULL && sa->fallback_sessions > 0) {
890 		printf("inline fallback: ");
891 		switch (fallback_ips->type) {
892 		case RTE_SECURITY_ACTION_TYPE_NONE:
893 			printf("lookaside-none");
894 			break;
895 		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
896 			printf("cpu-crypto-accelerated");
897 			break;
898 		default:
899 			printf("invalid");
900 			break;
901 		}
902 	}
903 	if (sa->fdir_flag == 1)
904 		printf("flow-direction port %d queue %d", sa->portid,
905 				sa->fdir_qid);
906 
907 	printf("\n");
908 }
909 
910 static struct sa_ctx *
911 sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
912 {
913 	char s[PATH_MAX];
914 	struct sa_ctx *sa_ctx;
915 	uint32_t mz_size;
916 	const struct rte_memzone *mz;
917 
918 	snprintf(s, sizeof(s), "%s_%u", name, socket_id);
919 
920 	/* Create SA context */
921 	printf("Creating SA context with %u maximum entries on socket %d\n",
922 			nb_sa, socket_id);
923 
924 	mz_size = sizeof(struct ipsec_xf) * nb_sa;
925 	mz = rte_memzone_reserve(s, mz_size, socket_id,
926 			RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
927 	if (mz == NULL) {
928 		printf("Failed to allocate SA XFORM memory\n");
929 		rte_errno = ENOMEM;
930 		return NULL;
931 	}
932 
933 	sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
934 		sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
935 
936 	if (sa_ctx == NULL) {
937 		printf("Failed to allocate SA CTX memory\n");
938 		rte_errno = ENOMEM;
939 		rte_memzone_free(mz);
940 		return NULL;
941 	}
942 
943 	sa_ctx->xf = (struct ipsec_xf *)mz->addr;
944 	sa_ctx->nb_sa = nb_sa;
945 
946 	return sa_ctx;
947 }
948 
949 static int
950 check_eth_dev_caps(uint16_t portid, uint32_t inbound)
951 {
952 	struct rte_eth_dev_info dev_info;
953 	int retval;
954 
955 	retval = rte_eth_dev_info_get(portid, &dev_info);
956 	if (retval != 0) {
957 		RTE_LOG(ERR, IPSEC,
958 			"Error during getting device (port %u) info: %s\n",
959 			portid, strerror(-retval));
960 
961 		return retval;
962 	}
963 
964 	if (inbound) {
965 		if ((dev_info.rx_offload_capa &
966 				DEV_RX_OFFLOAD_SECURITY) == 0) {
967 			RTE_LOG(WARNING, PORT,
968 				"hardware RX IPSec offload is not supported\n");
969 			return -EINVAL;
970 		}
971 
972 	} else { /* outbound */
973 		if ((dev_info.tx_offload_capa &
974 				DEV_TX_OFFLOAD_SECURITY) == 0) {
975 			RTE_LOG(WARNING, PORT,
976 				"hardware TX IPSec offload is not supported\n");
977 			return -EINVAL;
978 		}
979 	}
980 	return 0;
981 }
982 
983 /*
984  * Helper function, tries to determine next_proto for SPI
985  * by searching though SP rules.
986  */
987 static int
988 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
989 		struct ip_addr ip_addr[2], uint32_t mask[2])
990 {
991 	int32_t rc4, rc6;
992 
993 	rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
994 				ip_addr, mask);
995 	rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
996 				ip_addr, mask);
997 
998 	if (rc4 >= 0) {
999 		if (rc6 >= 0) {
1000 			RTE_LOG(ERR, IPSEC,
1001 				"%s: SPI %u used simultaeously by "
1002 				"IPv4(%d) and IPv6 (%d) SP rules\n",
1003 				__func__, spi, rc4, rc6);
1004 			return -EINVAL;
1005 		} else
1006 			return IPPROTO_IPIP;
1007 	} else if (rc6 < 0) {
1008 		RTE_LOG(ERR, IPSEC,
1009 			"%s: SPI %u is not used by any SP rule\n",
1010 			__func__, spi);
1011 		return -EINVAL;
1012 	} else
1013 		return IPPROTO_IPV6;
1014 }
1015 
1016 /*
1017  * Helper function for getting source and destination IP addresses
1018  * from SP. Needed for inline crypto transport mode, as addresses are not
1019  * provided in config file for that mode. It checks if SP for current SA exists,
1020  * and based on what type of protocol is returned, it stores appropriate
1021  * addresses got from SP into SA.
1022  */
1023 static int
1024 sa_add_address_inline_crypto(struct ipsec_sa *sa)
1025 {
1026 	int protocol;
1027 	struct ip_addr ip_addr[2];
1028 	uint32_t mask[2];
1029 
1030 	protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
1031 	if (protocol < 0)
1032 		return protocol;
1033 	else if (protocol == IPPROTO_IPIP) {
1034 		sa->flags |= IP4_TRANSPORT;
1035 		if (mask[0] == IP4_FULL_MASK &&
1036 				mask[1] == IP4_FULL_MASK &&
1037 				ip_addr[0].ip.ip4 != 0 &&
1038 				ip_addr[1].ip.ip4 != 0) {
1039 
1040 			sa->src.ip.ip4 = ip_addr[0].ip.ip4;
1041 			sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
1042 		} else {
1043 			RTE_LOG(ERR, IPSEC,
1044 			"%s: No valid address or mask entry in"
1045 			" IPv4 SP rule for SPI %u\n",
1046 			__func__, sa->spi);
1047 			return -EINVAL;
1048 		}
1049 	} else if (protocol == IPPROTO_IPV6) {
1050 		sa->flags |= IP6_TRANSPORT;
1051 		if (mask[0] == IP6_FULL_MASK &&
1052 				mask[1] == IP6_FULL_MASK &&
1053 				(ip_addr[0].ip.ip6.ip6[0] != 0 ||
1054 				ip_addr[0].ip.ip6.ip6[1] != 0) &&
1055 				(ip_addr[1].ip.ip6.ip6[0] != 0 ||
1056 				ip_addr[1].ip.ip6.ip6[1] != 0)) {
1057 
1058 			sa->src.ip.ip6 = ip_addr[0].ip.ip6;
1059 			sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
1060 		} else {
1061 			RTE_LOG(ERR, IPSEC,
1062 			"%s: No valid address or mask entry in"
1063 			" IPv6 SP rule for SPI %u\n",
1064 			__func__, sa->spi);
1065 			return -EINVAL;
1066 		}
1067 	}
1068 	return 0;
1069 }
1070 
1071 static int
1072 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1073 		uint32_t nb_entries, uint32_t inbound,
1074 		struct socket_ctx *skt_ctx)
1075 {
1076 	struct ipsec_sa *sa;
1077 	uint32_t i, idx;
1078 	uint16_t iv_length, aad_length;
1079 	int inline_status;
1080 	int32_t rc;
1081 	struct rte_ipsec_session *ips;
1082 
1083 	/* for ESN upper 32 bits of SQN also need to be part of AAD */
1084 	aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
1085 
1086 	for (i = 0; i < nb_entries; i++) {
1087 		idx = i;
1088 		sa = &sa_ctx->sa[idx];
1089 		if (sa->spi != 0) {
1090 			printf("Index %u already in use by SPI %u\n",
1091 					idx, sa->spi);
1092 			return -EINVAL;
1093 		}
1094 		*sa = entries[i];
1095 
1096 		if (inbound) {
1097 			rc = ipsec_sad_add(&sa_ctx->sad, sa);
1098 			if (rc != 0)
1099 				return rc;
1100 		}
1101 
1102 		sa->seq = 0;
1103 		ips = ipsec_get_primary_session(sa);
1104 
1105 		if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1106 			ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1107 			if (check_eth_dev_caps(sa->portid, inbound))
1108 				return -EINVAL;
1109 		}
1110 
1111 		switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1112 		case IP4_TUNNEL:
1113 			sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
1114 			sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
1115 			break;
1116 		case TRANSPORT:
1117 			if (ips->type ==
1118 				RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1119 				inline_status =
1120 					sa_add_address_inline_crypto(sa);
1121 				if (inline_status < 0)
1122 					return inline_status;
1123 			}
1124 			break;
1125 		}
1126 
1127 		if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
1128 			iv_length = 12;
1129 
1130 			sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1131 			sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1132 			sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1133 			sa_ctx->xf[idx].a.aead.key.length =
1134 				sa->cipher_key_len;
1135 			sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1136 				RTE_CRYPTO_AEAD_OP_DECRYPT :
1137 				RTE_CRYPTO_AEAD_OP_ENCRYPT;
1138 			sa_ctx->xf[idx].a.next = NULL;
1139 			sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1140 			sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1141 			sa_ctx->xf[idx].a.aead.aad_length =
1142 				sa->aad_len + aad_length;
1143 			sa_ctx->xf[idx].a.aead.digest_length =
1144 				sa->digest_len;
1145 
1146 			sa->xforms = &sa_ctx->xf[idx].a;
1147 		} else {
1148 			switch (sa->cipher_algo) {
1149 			case RTE_CRYPTO_CIPHER_NULL:
1150 			case RTE_CRYPTO_CIPHER_3DES_CBC:
1151 			case RTE_CRYPTO_CIPHER_AES_CBC:
1152 				iv_length = sa->iv_len;
1153 				break;
1154 			case RTE_CRYPTO_CIPHER_AES_CTR:
1155 				iv_length = 16;
1156 				break;
1157 			default:
1158 				RTE_LOG(ERR, IPSEC_ESP,
1159 						"unsupported cipher algorithm %u\n",
1160 						sa->cipher_algo);
1161 				return -EINVAL;
1162 			}
1163 
1164 			if (inbound) {
1165 				sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1166 				sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1167 				sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1168 				sa_ctx->xf[idx].b.cipher.key.length =
1169 					sa->cipher_key_len;
1170 				sa_ctx->xf[idx].b.cipher.op =
1171 					RTE_CRYPTO_CIPHER_OP_DECRYPT;
1172 				sa_ctx->xf[idx].b.next = NULL;
1173 				sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1174 				sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1175 
1176 				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1177 				sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1178 				sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1179 				sa_ctx->xf[idx].a.auth.key.length =
1180 					sa->auth_key_len;
1181 				sa_ctx->xf[idx].a.auth.digest_length =
1182 					sa->digest_len;
1183 				sa_ctx->xf[idx].a.auth.op =
1184 					RTE_CRYPTO_AUTH_OP_VERIFY;
1185 			} else { /* outbound */
1186 				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1187 				sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1188 				sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1189 				sa_ctx->xf[idx].a.cipher.key.length =
1190 					sa->cipher_key_len;
1191 				sa_ctx->xf[idx].a.cipher.op =
1192 					RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1193 				sa_ctx->xf[idx].a.next = NULL;
1194 				sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1195 				sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1196 
1197 				sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1198 				sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1199 				sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1200 				sa_ctx->xf[idx].b.auth.key.length =
1201 					sa->auth_key_len;
1202 				sa_ctx->xf[idx].b.auth.digest_length =
1203 					sa->digest_len;
1204 				sa_ctx->xf[idx].b.auth.op =
1205 					RTE_CRYPTO_AUTH_OP_GENERATE;
1206 			}
1207 
1208 			sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1209 			sa_ctx->xf[idx].b.next = NULL;
1210 			sa->xforms = &sa_ctx->xf[idx].a;
1211 		}
1212 
1213 		if (ips->type ==
1214 			RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1215 			ips->type ==
1216 			RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1217 			rc = create_inline_session(skt_ctx, sa, ips);
1218 			if (rc != 0) {
1219 				RTE_LOG(ERR, IPSEC_ESP,
1220 					"create_inline_session() failed\n");
1221 				return -EINVAL;
1222 			}
1223 		}
1224 
1225 		if (sa->fdir_flag && inbound) {
1226 			rc = create_ipsec_esp_flow(sa);
1227 			if (rc != 0)
1228 				RTE_LOG(ERR, IPSEC_ESP,
1229 					"create_ipsec_esp_flow() failed\n");
1230 		}
1231 		print_one_sa_rule(sa, inbound);
1232 	}
1233 
1234 	return 0;
1235 }
1236 
1237 static inline int
1238 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1239 		uint32_t nb_entries, struct socket_ctx *skt_ctx)
1240 {
1241 	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
1242 }
1243 
1244 static inline int
1245 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1246 		uint32_t nb_entries, struct socket_ctx *skt_ctx)
1247 {
1248 	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
1249 }
1250 
1251 /*
1252  * helper function, fills parameters that are identical for all SAs
1253  */
1254 static void
1255 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1256 	const struct app_sa_prm *app_prm)
1257 {
1258 	memset(prm, 0, sizeof(*prm));
1259 
1260 	prm->flags = app_prm->flags;
1261 	prm->ipsec_xform.options.esn = app_prm->enable_esn;
1262 	prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1263 }
1264 
1265 static int
1266 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1267 	const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1268 {
1269 	int32_t rc;
1270 
1271 	/*
1272 	 * Try to get SPI next proto by searching that SPI in SPD.
1273 	 * probably not the optimal way, but there seems nothing
1274 	 * better right now.
1275 	 */
1276 	rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1277 	if (rc < 0)
1278 		return rc;
1279 
1280 	fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1281 	prm->userdata = (uintptr_t)ss;
1282 
1283 	/* setup ipsec xform */
1284 	prm->ipsec_xform.spi = ss->spi;
1285 	prm->ipsec_xform.salt = ss->salt;
1286 	prm->ipsec_xform.direction = ss->direction;
1287 	prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1288 	prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1289 		RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1290 		RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1291 	prm->ipsec_xform.options.ecn = 1;
1292 	prm->ipsec_xform.options.copy_dscp = 1;
1293 
1294 	if (IS_IP4_TUNNEL(ss->flags)) {
1295 		prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1296 		prm->tun.hdr_len = sizeof(*v4);
1297 		prm->tun.next_proto = rc;
1298 		prm->tun.hdr = v4;
1299 	} else if (IS_IP6_TUNNEL(ss->flags)) {
1300 		prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1301 		prm->tun.hdr_len = sizeof(*v6);
1302 		prm->tun.next_proto = rc;
1303 		prm->tun.hdr = v6;
1304 	} else {
1305 		/* transport mode */
1306 		prm->trs.proto = rc;
1307 	}
1308 
1309 	/* setup crypto section */
1310 	prm->crypto_xform = ss->xforms;
1311 	return 0;
1312 }
1313 
1314 static int
1315 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1316 {
1317 	int32_t rc = 0;
1318 
1319 	ss->sa = sa;
1320 
1321 	if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1322 		ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
1323 		if (ss->security.ses != NULL) {
1324 			rc = rte_ipsec_session_prepare(ss);
1325 			if (rc != 0)
1326 				memset(ss, 0, sizeof(*ss));
1327 		}
1328 	}
1329 
1330 	return rc;
1331 }
1332 
1333 /*
1334  * Initialise related rte_ipsec_sa object.
1335  */
1336 static int
1337 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1338 {
1339 	int rc;
1340 	struct rte_ipsec_sa_prm prm;
1341 	struct rte_ipsec_session *ips;
1342 	struct rte_ipv4_hdr v4  = {
1343 		.version_ihl = IPVERSION << 4 |
1344 			sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1345 		.time_to_live = IPDEFTTL,
1346 		.next_proto_id = IPPROTO_ESP,
1347 		.src_addr = lsa->src.ip.ip4,
1348 		.dst_addr = lsa->dst.ip.ip4,
1349 	};
1350 	struct rte_ipv6_hdr v6 = {
1351 		.vtc_flow = htonl(IP6_VERSION << 28),
1352 		.proto = IPPROTO_ESP,
1353 	};
1354 
1355 	if (IS_IP6_TUNNEL(lsa->flags)) {
1356 		memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1357 		memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1358 	}
1359 
1360 	rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1361 	if (rc == 0)
1362 		rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1363 	if (rc < 0)
1364 		return rc;
1365 
1366 	/* init primary processing session */
1367 	ips = ipsec_get_primary_session(lsa);
1368 	rc = fill_ipsec_session(ips, sa);
1369 	if (rc != 0)
1370 		return rc;
1371 
1372 	/* init inline fallback processing session */
1373 	if (lsa->fallback_sessions == 1)
1374 		rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
1375 
1376 	return rc;
1377 }
1378 
1379 /*
1380  * Allocate space and init rte_ipsec_sa strcutures,
1381  * one per session.
1382  */
1383 static int
1384 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket)
1385 {
1386 	int32_t rc, sz;
1387 	uint32_t i, idx;
1388 	size_t tsz;
1389 	struct rte_ipsec_sa *sa;
1390 	struct ipsec_sa *lsa;
1391 	struct rte_ipsec_sa_prm prm;
1392 
1393 	/* determine SA size */
1394 	idx = 0;
1395 	fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1396 	sz = rte_ipsec_sa_size(&prm);
1397 	if (sz < 0) {
1398 		RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1399 			"failed to determine SA size, error code: %d\n",
1400 			__func__, ctx, nb_ent, socket, sz);
1401 		return sz;
1402 	}
1403 
1404 	tsz = sz * nb_ent;
1405 
1406 	ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1407 	if (ctx->satbl == NULL) {
1408 		RTE_LOG(ERR, IPSEC,
1409 			"%s(%p, %u, %d): failed to allocate %zu bytes\n",
1410 			__func__,  ctx, nb_ent, socket, tsz);
1411 		return -ENOMEM;
1412 	}
1413 
1414 	rc = 0;
1415 	for (i = 0; i != nb_ent && rc == 0; i++) {
1416 
1417 		idx = i;
1418 
1419 		sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1420 		lsa = ctx->sa + idx;
1421 
1422 		rc = ipsec_sa_init(lsa, sa, sz);
1423 	}
1424 
1425 	return rc;
1426 }
1427 
1428 static int
1429 sa_cmp(const void *p, const void *q)
1430 {
1431 	uint32_t spi1 = ((const struct ipsec_sa *)p)->spi;
1432 	uint32_t spi2 = ((const struct ipsec_sa *)q)->spi;
1433 
1434 	return (int)(spi1 - spi2);
1435 }
1436 
1437 /*
1438  * Walk through all SA rules to find an SA with given SPI
1439  */
1440 int
1441 sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
1442 {
1443 	uint32_t num;
1444 	struct ipsec_sa *sa;
1445 	struct ipsec_sa tmpl;
1446 	const struct ipsec_sa *sar;
1447 
1448 	sar = sa_ctx->sa;
1449 	if (inbound != 0)
1450 		num = nb_sa_in;
1451 	else
1452 		num = nb_sa_out;
1453 
1454 	tmpl.spi = spi;
1455 
1456 	sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp);
1457 	if (sa != NULL)
1458 		return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa);
1459 
1460 	return -ENOENT;
1461 }
1462 
1463 void
1464 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1465 {
1466 	int32_t rc;
1467 	const char *name;
1468 
1469 	if (ctx == NULL)
1470 		rte_exit(EXIT_FAILURE, "NULL context.\n");
1471 
1472 	if (ctx->sa_in != NULL)
1473 		rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1474 				"initialized\n", socket_id);
1475 
1476 	if (ctx->sa_out != NULL)
1477 		rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1478 				"initialized\n", socket_id);
1479 
1480 	if (nb_sa_in > 0) {
1481 		name = "sa_in";
1482 		ctx->sa_in = sa_create(name, socket_id, nb_sa_in);
1483 		if (ctx->sa_in == NULL)
1484 			rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1485 				"context %s in socket %d\n", rte_errno,
1486 				name, socket_id);
1487 
1488 		rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id,
1489 				&sa_in_cnt);
1490 		if (rc != 0)
1491 			rte_exit(EXIT_FAILURE, "failed to init SAD\n");
1492 
1493 		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
1494 
1495 		if (app_sa_prm.enable != 0) {
1496 			rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
1497 				socket_id);
1498 			if (rc != 0)
1499 				rte_exit(EXIT_FAILURE,
1500 					"failed to init inbound SAs\n");
1501 		}
1502 	} else
1503 		RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1504 
1505 	if (nb_sa_out > 0) {
1506 		name = "sa_out";
1507 		ctx->sa_out = sa_create(name, socket_id, nb_sa_out);
1508 		if (ctx->sa_out == NULL)
1509 			rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1510 				"context %s in socket %d\n", rte_errno,
1511 				name, socket_id);
1512 
1513 		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
1514 
1515 		if (app_sa_prm.enable != 0) {
1516 			rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
1517 				socket_id);
1518 			if (rc != 0)
1519 				rte_exit(EXIT_FAILURE,
1520 					"failed to init outbound SAs\n");
1521 		}
1522 	} else
1523 		RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1524 			"specified\n");
1525 }
1526 
1527 int
1528 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1529 {
1530 	struct ipsec_mbuf_metadata *priv;
1531 	struct ipsec_sa *sa;
1532 
1533 	priv = get_priv(m);
1534 	sa = priv->sa;
1535 	if (sa != NULL)
1536 		return (sa_ctx->sa[sa_idx].spi == sa->spi);
1537 
1538 	RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1539 	return 0;
1540 }
1541 
1542 void
1543 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1544 		void *sa_arr[], uint16_t nb_pkts)
1545 {
1546 	uint32_t i;
1547 	void *result_sa;
1548 	struct ipsec_sa *sa;
1549 
1550 	sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts);
1551 
1552 	/*
1553 	 * Mark need for inline offload fallback on the LSB of SA pointer.
1554 	 * Thanks to packet grouping mechanism which ipsec_process is using
1555 	 * packets marked for fallback processing will form separate group.
1556 	 *
1557 	 * Because it is not safe to use SA pointer it is casted to generic
1558 	 * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1559 	 * to get valid struct pointer.
1560 	 */
1561 	for (i = 0; i < nb_pkts; i++) {
1562 		if (sa_arr[i] == NULL)
1563 			continue;
1564 
1565 		result_sa = sa = sa_arr[i];
1566 		if (MBUF_NO_SEC_OFFLOAD(pkts[i]) &&
1567 			sa->fallback_sessions > 0) {
1568 			uintptr_t intsa = (uintptr_t)sa;
1569 			intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1570 			result_sa = (void *)intsa;
1571 		}
1572 		sa_arr[i] = result_sa;
1573 	}
1574 }
1575 
1576 void
1577 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1578 		void *sa[], uint16_t nb_pkts)
1579 {
1580 	uint32_t i;
1581 
1582 	for (i = 0; i < nb_pkts; i++)
1583 		sa[i] = &sa_ctx->sa[sa_idx[i]];
1584 }
1585 
1586 /*
1587  * Select HW offloads to be used.
1588  */
1589 int
1590 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1591 		uint64_t *tx_offloads)
1592 {
1593 	struct ipsec_sa *rule;
1594 	uint32_t idx_sa;
1595 	enum rte_security_session_action_type rule_type;
1596 
1597 	*rx_offloads = 0;
1598 	*tx_offloads = 0;
1599 
1600 	/* Check for inbound rules that use offloads and use this port */
1601 	for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1602 		rule = &sa_in[idx_sa];
1603 		rule_type = ipsec_get_action_type(rule);
1604 		if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1605 				rule_type ==
1606 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1607 				&& rule->portid == port_id)
1608 			*rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
1609 	}
1610 
1611 	/* Check for outbound rules that use offloads and use this port */
1612 	for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1613 		rule = &sa_out[idx_sa];
1614 		rule_type = ipsec_get_action_type(rule);
1615 		if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1616 				rule_type ==
1617 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1618 				&& rule->portid == port_id)
1619 			*tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
1620 	}
1621 	return 0;
1622 }
1623 
1624 void
1625 sa_sort_arr(void)
1626 {
1627 	qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
1628 	qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);
1629 }
1630 
1631 uint32_t
1632 get_nb_crypto_sessions(void)
1633 {
1634 	return nb_crypto_sessions;
1635 }
1636