xref: /dpdk/app/test/test_security_inline_proto.c (revision 4677de0a4c2ba803d0e1adc26774f2c6c8b5b6df)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Marvell.
3  */
4 
5 
6 #include <stdio.h>
7 #include <inttypes.h>
8 
9 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
11 #include <rte_security.h>
12 
13 #include "test.h"
14 #include "test_security_inline_proto_vectors.h"
15 #include "test_security_proto.h"
16 
17 #ifdef RTE_EXEC_ENV_WINDOWS
18 static int
19 test_inline_ipsec(void)
20 {
21 	printf("Inline ipsec not supported on Windows, skipping test\n");
22 	return TEST_SKIPPED;
23 }
24 
25 static int
26 test_event_inline_ipsec(void)
27 {
28 	printf("Event inline ipsec not supported on Windows, skipping test\n");
29 	return TEST_SKIPPED;
30 }
31 
32 static int
33 test_inline_ipsec_sg(void)
34 {
35 	printf("Inline ipsec SG not supported on Windows, skipping test\n");
36 	return TEST_SKIPPED;
37 }
38 
39 #else
40 
41 #include <rte_eventdev.h>
42 #include <rte_event_eth_rx_adapter.h>
43 #include <rte_event_eth_tx_adapter.h>
44 
45 #define NB_ETHPORTS_USED		1
46 #define MEMPOOL_CACHE_SIZE		32
47 #define MAX_PKT_BURST			32
48 #define RX_DESC_DEFAULT	1024
49 #define TX_DESC_DEFAULT	1024
50 #define RTE_PORT_ALL		(~(uint16_t)0x0)
51 
52 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
53 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
54 #define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */
55 
56 #define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */
57 #define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
58 #define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
59 
60 #define MAX_TRAFFIC_BURST		2048
61 #define NB_MBUF				10240
62 
63 #define ENCAP_DECAP_BURST_SZ		33
64 #define APP_REASS_TIMEOUT		10
65 
66 extern struct ipsec_test_data pkt_aes_128_gcm;
67 extern struct ipsec_test_data pkt_aes_192_gcm;
68 extern struct ipsec_test_data pkt_aes_256_gcm;
69 extern struct ipsec_test_data pkt_aes_128_gcm_frag;
70 extern struct ipsec_test_data pkt_aes_128_cbc_null;
71 extern struct ipsec_test_data pkt_null_aes_xcbc;
72 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha384;
73 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha512;
74 extern struct ipsec_test_data pkt_3des_cbc_hmac_sha256;
75 extern struct ipsec_test_data pkt_3des_cbc_hmac_sha384;
76 extern struct ipsec_test_data pkt_3des_cbc_hmac_sha512;
77 extern struct ipsec_test_data pkt_3des_cbc_hmac_sha256_v6;
78 extern struct ipsec_test_data pkt_des_cbc_hmac_sha256;
79 extern struct ipsec_test_data pkt_des_cbc_hmac_sha384;
80 extern struct ipsec_test_data pkt_des_cbc_hmac_sha512;
81 extern struct ipsec_test_data pkt_des_cbc_hmac_sha256_v6;
82 extern struct ipsec_test_data pkt_aes_128_cbc_md5;
83 
84 static struct rte_mempool *mbufpool;
85 static struct rte_mempool *sess_pool;
86 /* ethernet addresses of ports */
87 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
88 
89 static struct rte_eth_conf port_conf = {
90 	.rxmode = {
91 		.mq_mode = RTE_ETH_MQ_RX_NONE,
92 		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM |
93 			    RTE_ETH_RX_OFFLOAD_SECURITY,
94 	},
95 	.txmode = {
96 		.mq_mode = RTE_ETH_MQ_TX_NONE,
97 		.offloads = RTE_ETH_TX_OFFLOAD_SECURITY |
98 			    RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
99 	},
100 	.lpbk_mode = 1,  /* enable loopback */
101 };
102 
103 static struct rte_eth_rxconf rx_conf = {
104 	.rx_thresh = {
105 		.pthresh = RX_PTHRESH,
106 		.hthresh = RX_HTHRESH,
107 		.wthresh = RX_WTHRESH,
108 	},
109 	.rx_free_thresh = 32,
110 };
111 
112 static struct rte_eth_txconf tx_conf = {
113 	.tx_thresh = {
114 		.pthresh = TX_PTHRESH,
115 		.hthresh = TX_HTHRESH,
116 		.wthresh = TX_WTHRESH,
117 	},
118 	.tx_free_thresh = 32, /* Use PMD default values */
119 	.tx_rs_thresh = 32, /* Use PMD default values */
120 };
121 
122 static uint16_t port_id;
123 static uint8_t eventdev_id;
124 static uint8_t rx_adapter_id;
125 static uint8_t tx_adapter_id;
126 static uint16_t plaintext_len;
127 static bool sg_mode;
128 
129 static bool event_mode_enabled;
130 
131 static uint64_t link_mbps;
132 
133 static int ip_reassembly_dynfield_offset = -1;
134 
135 static struct rte_flow *default_flow[RTE_MAX_ETHPORTS];
136 
137 /* Create Inline IPsec session */
138 static int
139 create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid,
140 		void **sess, void **ctx,
141 		uint32_t *ol_flags, const struct ipsec_test_flags *flags,
142 		struct rte_security_session_conf *sess_conf)
143 {
144 	uint16_t src_v6[8] = {0x2607, 0xf8b0, 0x400c, 0x0c03, 0x0000, 0x0000,
145 				0x0000, 0x001a};
146 	uint16_t dst_v6[8] = {0x2001, 0x0470, 0xe5bf, 0xdead, 0x4957, 0x2174,
147 				0xe82c, 0x4887};
148 	uint32_t src_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 2));
149 	uint32_t dst_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 1));
150 	struct rte_security_capability_idx sec_cap_idx;
151 	const struct rte_security_capability *sec_cap;
152 	enum rte_security_ipsec_sa_direction dir;
153 	void *sec_ctx;
154 	uint32_t verify;
155 
156 	sess_conf->action_type = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
157 	sess_conf->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
158 	sess_conf->ipsec = sa->ipsec_xform;
159 
160 	dir = sa->ipsec_xform.direction;
161 	verify = flags->tunnel_hdr_verify;
162 
163 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && verify) {
164 		if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR)
165 			src_v4 += 1;
166 		else if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR)
167 			dst_v4 += 1;
168 	}
169 
170 	if (sa->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
171 		if (sa->ipsec_xform.tunnel.type ==
172 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
173 			memcpy(&sess_conf->ipsec.tunnel.ipv4.src_ip, &src_v4,
174 					sizeof(src_v4));
175 			memcpy(&sess_conf->ipsec.tunnel.ipv4.dst_ip, &dst_v4,
176 					sizeof(dst_v4));
177 
178 			if (flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
179 				sess_conf->ipsec.tunnel.ipv4.df = 0;
180 
181 			if (flags->df == TEST_IPSEC_SET_DF_1_INNER_0)
182 				sess_conf->ipsec.tunnel.ipv4.df = 1;
183 
184 			if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
185 				sess_conf->ipsec.tunnel.ipv4.dscp = 0;
186 
187 			if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
188 				sess_conf->ipsec.tunnel.ipv4.dscp =
189 						TEST_IPSEC_DSCP_VAL;
190 		} else {
191 			if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
192 				sess_conf->ipsec.tunnel.ipv6.dscp = 0;
193 
194 			if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
195 				sess_conf->ipsec.tunnel.ipv6.dscp =
196 						TEST_IPSEC_DSCP_VAL;
197 
198 			if (flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
199 				sess_conf->ipsec.tunnel.ipv6.flabel = 0;
200 
201 			if (flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0)
202 				sess_conf->ipsec.tunnel.ipv6.flabel =
203 						TEST_IPSEC_FLABEL_VAL;
204 
205 			memcpy(&sess_conf->ipsec.tunnel.ipv6.src_addr, &src_v6,
206 					sizeof(src_v6));
207 			memcpy(&sess_conf->ipsec.tunnel.ipv6.dst_addr, &dst_v6,
208 					sizeof(dst_v6));
209 		}
210 	}
211 
212 	/* Save SA as userdata for the security session. When
213 	 * the packet is received, this userdata will be
214 	 * retrieved using the metadata from the packet.
215 	 *
216 	 * The PMD is expected to set similar metadata for other
217 	 * operations, like rte_eth_event, which are tied to
218 	 * security session. In such cases, the userdata could
219 	 * be obtained to uniquely identify the security
220 	 * parameters denoted.
221 	 */
222 
223 	sess_conf->userdata = (void *) sa;
224 
225 	sec_ctx = rte_eth_dev_get_sec_ctx(portid);
226 	if (sec_ctx == NULL) {
227 		printf("Ethernet device doesn't support security features.\n");
228 		return TEST_SKIPPED;
229 	}
230 
231 	sec_cap_idx.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
232 	sec_cap_idx.protocol = RTE_SECURITY_PROTOCOL_IPSEC;
233 	sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto;
234 	sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode;
235 	sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction;
236 	sec_cap = rte_security_capability_get(sec_ctx, &sec_cap_idx);
237 	if (sec_cap == NULL) {
238 		printf("No capabilities registered\n");
239 		return TEST_SKIPPED;
240 	}
241 
242 	if (sa->aead || sa->aes_gmac)
243 		memcpy(&sess_conf->ipsec.salt, sa->salt.data,
244 			RTE_MIN(sizeof(sess_conf->ipsec.salt), sa->salt.len));
245 
246 	/* Copy cipher session parameters */
247 	if (sa->aead) {
248 		rte_memcpy(sess_conf->crypto_xform, &sa->xform.aead,
249 				sizeof(struct rte_crypto_sym_xform));
250 		sess_conf->crypto_xform->aead.key.data = sa->key.data;
251 		/* Verify crypto capabilities */
252 		if (test_sec_crypto_caps_aead_verify(sec_cap, sess_conf->crypto_xform) != 0) {
253 			RTE_LOG(INFO, USER1,
254 				"Crypto capabilities not supported\n");
255 			return TEST_SKIPPED;
256 		}
257 	} else {
258 		if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
259 			rte_memcpy(&sess_conf->crypto_xform->cipher,
260 					&sa->xform.chain.cipher.cipher,
261 					sizeof(struct rte_crypto_cipher_xform));
262 
263 			rte_memcpy(&sess_conf->crypto_xform->next->auth,
264 					&sa->xform.chain.auth.auth,
265 					sizeof(struct rte_crypto_auth_xform));
266 			sess_conf->crypto_xform->cipher.key.data =
267 							sa->key.data;
268 			sess_conf->crypto_xform->next->auth.key.data =
269 							sa->auth_key.data;
270 			/* Verify crypto capabilities */
271 			if (test_sec_crypto_caps_cipher_verify(sec_cap,
272 					sess_conf->crypto_xform) != 0) {
273 				RTE_LOG(INFO, USER1,
274 					"Cipher crypto capabilities not supported\n");
275 				return TEST_SKIPPED;
276 			}
277 
278 			if (test_sec_crypto_caps_auth_verify(sec_cap,
279 					sess_conf->crypto_xform->next) != 0) {
280 				RTE_LOG(INFO, USER1,
281 					"Auth crypto capabilities not supported\n");
282 				return TEST_SKIPPED;
283 			}
284 		} else {
285 			rte_memcpy(&sess_conf->crypto_xform->next->cipher,
286 					&sa->xform.chain.cipher.cipher,
287 					sizeof(struct rte_crypto_cipher_xform));
288 			rte_memcpy(&sess_conf->crypto_xform->auth,
289 					&sa->xform.chain.auth.auth,
290 					sizeof(struct rte_crypto_auth_xform));
291 			sess_conf->crypto_xform->auth.key.data =
292 							sa->auth_key.data;
293 			sess_conf->crypto_xform->next->cipher.key.data =
294 							sa->key.data;
295 
296 			/* Verify crypto capabilities */
297 			if (test_sec_crypto_caps_cipher_verify(sec_cap,
298 					sess_conf->crypto_xform->next) != 0) {
299 				RTE_LOG(INFO, USER1,
300 					"Cipher crypto capabilities not supported\n");
301 				return TEST_SKIPPED;
302 			}
303 
304 			if (test_sec_crypto_caps_auth_verify(sec_cap,
305 					sess_conf->crypto_xform) != 0) {
306 				RTE_LOG(INFO, USER1,
307 					"Auth crypto capabilities not supported\n");
308 				return TEST_SKIPPED;
309 			}
310 		}
311 	}
312 
313 	if (test_ipsec_sec_caps_verify(&sess_conf->ipsec, sec_cap, false) != 0)
314 		return TEST_SKIPPED;
315 
316 	if ((sa->ipsec_xform.direction ==
317 			RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
318 			(sa->ipsec_xform.options.iv_gen_disable == 1)) {
319 		/* Set env variable when IV generation is disabled */
320 		char arr[128];
321 		int len = 0, j = 0;
322 		int iv_len = (sa->aead || sa->aes_gmac) ? 8 : 16;
323 
324 		for (; j < iv_len; j++)
325 			len += snprintf(arr+len, sizeof(arr) - len,
326 					"0x%x, ", sa->iv.data[j]);
327 		setenv("ETH_SEC_IV_OVR", arr, 1);
328 	}
329 
330 	*sess = rte_security_session_create(sec_ctx, sess_conf, sess_pool);
331 	if (*sess == NULL) {
332 		printf("SEC Session init failed.\n");
333 		return TEST_FAILED;
334 	}
335 
336 	*ol_flags = sec_cap->ol_flags;
337 	*ctx = sec_ctx;
338 
339 	return 0;
340 }
341 
342 /* Check the link status of all ports in up to 3s, and print them finally */
343 static void
344 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
345 {
346 #define CHECK_INTERVAL 100 /* 100ms */
347 #define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */
348 	uint16_t portid;
349 	uint8_t count, all_ports_up, print_flag = 0;
350 	struct rte_eth_link link;
351 	int ret;
352 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
353 
354 	printf("Checking link statuses...\n");
355 	fflush(stdout);
356 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
357 		all_ports_up = 1;
358 		for (portid = 0; portid < port_num; portid++) {
359 			if ((port_mask & (1 << portid)) == 0)
360 				continue;
361 			memset(&link, 0, sizeof(link));
362 			ret = rte_eth_link_get_nowait(portid, &link);
363 			if (ret < 0) {
364 				all_ports_up = 0;
365 				if (print_flag == 1)
366 					printf("Port %u link get failed: %s\n",
367 						portid, rte_strerror(-ret));
368 				continue;
369 			}
370 
371 			/* print link status if flag set */
372 			if (print_flag == 1) {
373 				if (link.link_status && link_mbps == 0)
374 					link_mbps = link.link_speed;
375 
376 				rte_eth_link_to_str(link_status,
377 					sizeof(link_status), &link);
378 				printf("Port %d %s\n", portid, link_status);
379 				continue;
380 			}
381 			/* clear all_ports_up flag if any link down */
382 			if (link.link_status == RTE_ETH_LINK_DOWN) {
383 				all_ports_up = 0;
384 				break;
385 			}
386 		}
387 		/* after finally printing all link status, get out */
388 		if (print_flag == 1)
389 			break;
390 
391 		if (all_ports_up == 0) {
392 			fflush(stdout);
393 			rte_delay_ms(CHECK_INTERVAL);
394 		}
395 
396 		/* set the print_flag if all ports up or timeout */
397 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1))
398 			print_flag = 1;
399 	}
400 }
401 
402 static void
403 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
404 {
405 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
406 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
407 	printf("%s%s", name, buf);
408 }
409 
410 static void
411 copy_buf_to_pkt_segs(const uint8_t *buf, unsigned int len,
412 		     struct rte_mbuf *pkt, unsigned int offset)
413 {
414 	unsigned int copied = 0;
415 	unsigned int copy_len;
416 	struct rte_mbuf *seg;
417 	void *seg_buf;
418 
419 	seg = pkt;
420 	while (offset >= rte_pktmbuf_tailroom(seg)) {
421 		offset -= rte_pktmbuf_tailroom(seg);
422 		seg = seg->next;
423 	}
424 	copy_len = seg->buf_len - seg->data_off - offset;
425 	seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
426 	while (len > copy_len) {
427 		rte_memcpy(seg_buf, buf + copied, (size_t) copy_len);
428 		len -= copy_len;
429 		copied += copy_len;
430 		seg->data_len += copy_len;
431 
432 		seg = seg->next;
433 		copy_len = seg->buf_len - seg->data_off;
434 		seg_buf = rte_pktmbuf_mtod(seg, void *);
435 	}
436 	rte_memcpy(seg_buf, buf + copied, (size_t) len);
437 	seg->data_len = len;
438 
439 	pkt->pkt_len += copied + len;
440 }
441 
442 static bool
443 is_outer_ipv4(struct ipsec_test_data *td)
444 {
445 	bool outer_ipv4;
446 
447 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ||
448 	    td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT)
449 		outer_ipv4 = (((td->input_text.data[0] & 0xF0) >> 4) == IPVERSION);
450 	else
451 		outer_ipv4 = (td->ipsec_xform.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4);
452 	return outer_ipv4;
453 }
454 
455 static inline struct rte_mbuf *
456 init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len, bool outer_ipv4)
457 {
458 	struct rte_mbuf *pkt, *tail;
459 	uint16_t space;
460 
461 	pkt = rte_pktmbuf_alloc(mp);
462 	if (pkt == NULL)
463 		return NULL;
464 
465 	if (outer_ipv4) {
466 		rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
467 				&dummy_ipv4_eth_hdr, RTE_ETHER_HDR_LEN);
468 		pkt->l3_len = sizeof(struct rte_ipv4_hdr);
469 	} else {
470 		rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
471 				&dummy_ipv6_eth_hdr, RTE_ETHER_HDR_LEN);
472 		pkt->l3_len = sizeof(struct rte_ipv6_hdr);
473 	}
474 	pkt->l2_len = RTE_ETHER_HDR_LEN;
475 
476 	space = rte_pktmbuf_tailroom(pkt);
477 	tail = pkt;
478 	/* Error if SG mode is not enabled */
479 	if (!sg_mode && space < len) {
480 		rte_pktmbuf_free(pkt);
481 		return NULL;
482 	}
483 	/* Extra room for expansion */
484 	while (space < len) {
485 		tail->next = rte_pktmbuf_alloc(mp);
486 		if (!tail->next)
487 			goto error;
488 		tail = tail->next;
489 		space += rte_pktmbuf_tailroom(tail);
490 		pkt->nb_segs++;
491 	}
492 
493 	if (pkt->buf_len > len + RTE_ETHER_HDR_LEN)
494 		rte_memcpy(rte_pktmbuf_append(pkt, len), data, len);
495 	else
496 		copy_buf_to_pkt_segs(data, len, pkt, RTE_ETHER_HDR_LEN);
497 	return pkt;
498 error:
499 	rte_pktmbuf_free(pkt);
500 	return NULL;
501 }
502 
503 static int
504 init_mempools(unsigned int nb_mbuf)
505 {
506 	void *sec_ctx;
507 	uint16_t nb_sess = 512;
508 	uint32_t sess_sz;
509 	char s[64];
510 
511 	if (mbufpool == NULL) {
512 		snprintf(s, sizeof(s), "mbuf_pool");
513 		mbufpool = rte_pktmbuf_pool_create(s, nb_mbuf,
514 				MEMPOOL_CACHE_SIZE, RTE_CACHE_LINE_SIZE,
515 				RTE_MBUF_DEFAULT_BUF_SIZE, SOCKET_ID_ANY);
516 		if (mbufpool == NULL) {
517 			printf("Cannot init mbuf pool\n");
518 			return TEST_FAILED;
519 		}
520 		printf("Allocated mbuf pool\n");
521 	}
522 
523 	sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
524 	if (sec_ctx == NULL) {
525 		printf("Device does not support Security ctx\n");
526 		return TEST_SKIPPED;
527 	}
528 	sess_sz = rte_security_session_get_size(sec_ctx);
529 	if (sess_pool == NULL) {
530 		snprintf(s, sizeof(s), "sess_pool");
531 		sess_pool = rte_mempool_create(s, nb_sess, sess_sz,
532 				MEMPOOL_CACHE_SIZE, 0,
533 				NULL, NULL, NULL, NULL,
534 				SOCKET_ID_ANY, 0);
535 		if (sess_pool == NULL) {
536 			printf("Cannot init sess pool\n");
537 			return TEST_FAILED;
538 		}
539 		printf("Allocated sess pool\n");
540 	}
541 
542 	return 0;
543 }
544 
545 static int
546 create_default_flow(uint16_t portid)
547 {
548 	struct rte_flow_action action[2];
549 	struct rte_flow_item pattern[2];
550 	struct rte_flow_attr attr = {0};
551 	struct rte_flow_error err;
552 	struct rte_flow *flow;
553 	int ret;
554 
555 	/* Add the default rte_flow to enable SECURITY for all ESP packets */
556 
557 	pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
558 	pattern[0].spec = NULL;
559 	pattern[0].mask = NULL;
560 	pattern[0].last = NULL;
561 	pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
562 
563 	action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
564 	action[0].conf = NULL;
565 	action[1].type = RTE_FLOW_ACTION_TYPE_END;
566 	action[1].conf = NULL;
567 
568 	attr.ingress = 1;
569 
570 	ret = rte_flow_validate(portid, &attr, pattern, action, &err);
571 	if (ret) {
572 		printf("\nValidate flow failed, ret = %d\n", ret);
573 		return -1;
574 	}
575 	flow = rte_flow_create(portid, &attr, pattern, action, &err);
576 	if (flow == NULL) {
577 		printf("\nDefault flow rule create failed\n");
578 		return -1;
579 	}
580 
581 	default_flow[portid] = flow;
582 
583 	return 0;
584 }
585 
586 static void
587 destroy_default_flow(uint16_t portid)
588 {
589 	struct rte_flow_error err;
590 	int ret;
591 
592 	if (!default_flow[portid])
593 		return;
594 	ret = rte_flow_destroy(portid, default_flow[portid], &err);
595 	if (ret) {
596 		printf("\nDefault flow rule destroy failed\n");
597 		return;
598 	}
599 	default_flow[portid] = NULL;
600 }
601 
602 struct rte_mbuf **tx_pkts_burst;
603 struct rte_mbuf **rx_pkts_burst;
604 
605 static int
606 compare_pkt_data(struct rte_mbuf *m, uint8_t *ref, unsigned int tot_len)
607 {
608 	unsigned int len;
609 	unsigned int nb_segs = m->nb_segs;
610 	unsigned int matched = 0;
611 	struct rte_mbuf *save = m;
612 
613 	while (m) {
614 		len = tot_len;
615 		if (len > m->data_len)
616 			len = m->data_len;
617 		if (len != 0) {
618 			if (memcmp(rte_pktmbuf_mtod(m, char *),
619 					ref + matched, len)) {
620 				printf("\n====Reassembly case failed: Data Mismatch");
621 				rte_hexdump(stdout, "Reassembled",
622 					rte_pktmbuf_mtod(m, char *),
623 					len);
624 				rte_hexdump(stdout, "reference",
625 					ref + matched,
626 					len);
627 				return TEST_FAILED;
628 			}
629 		}
630 		tot_len -= len;
631 		matched += len;
632 		m = m->next;
633 	}
634 
635 	if (tot_len) {
636 		printf("\n====Reassembly case failed: Data Missing %u",
637 		       tot_len);
638 		printf("\n====nb_segs %u, tot_len %u", nb_segs, tot_len);
639 		rte_pktmbuf_dump(stderr, save, -1);
640 		return TEST_FAILED;
641 	}
642 	return TEST_SUCCESS;
643 }
644 
645 static inline bool
646 is_ip_reassembly_incomplete(struct rte_mbuf *mbuf)
647 {
648 	static uint64_t ip_reassembly_dynflag;
649 	int ip_reassembly_dynflag_offset;
650 
651 	if (ip_reassembly_dynflag == 0) {
652 		ip_reassembly_dynflag_offset = rte_mbuf_dynflag_lookup(
653 			RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL);
654 		if (ip_reassembly_dynflag_offset < 0)
655 			return false;
656 		ip_reassembly_dynflag = RTE_BIT64(ip_reassembly_dynflag_offset);
657 	}
658 
659 	return (mbuf->ol_flags & ip_reassembly_dynflag) != 0;
660 }
661 
662 static void
663 free_mbuf(struct rte_mbuf *mbuf)
664 {
665 	rte_eth_ip_reassembly_dynfield_t dynfield;
666 
667 	if (!mbuf)
668 		return;
669 
670 	if (!is_ip_reassembly_incomplete(mbuf)) {
671 		rte_pktmbuf_free(mbuf);
672 	} else {
673 		if (ip_reassembly_dynfield_offset < 0)
674 			return;
675 
676 		while (mbuf) {
677 			dynfield = *RTE_MBUF_DYNFIELD(mbuf,
678 					ip_reassembly_dynfield_offset,
679 					rte_eth_ip_reassembly_dynfield_t *);
680 			rte_pktmbuf_free(mbuf);
681 			if (dynfield.nb_frags == 0)
682 				break;
683 			mbuf = dynfield.next_frag;
684 		}
685 	}
686 }
687 
688 
689 static int
690 get_and_verify_incomplete_frags(struct rte_mbuf *mbuf,
691 				struct reassembly_vector *vector)
692 {
693 	rte_eth_ip_reassembly_dynfield_t *dynfield[MAX_PKT_BURST];
694 	int j = 0, ret;
695 	/**
696 	 * IP reassembly offload is incomplete, and fragments are listed in
697 	 * dynfield which can be reassembled in SW.
698 	 */
699 	printf("\nHW IP Reassembly is not complete; attempt SW IP Reassembly,"
700 		"\nMatching with original frags.");
701 
702 	if (ip_reassembly_dynfield_offset < 0)
703 		return -1;
704 
705 	printf("\ncomparing frag: %d", j);
706 	/* Skip Ethernet header comparison */
707 	rte_pktmbuf_adj(mbuf, RTE_ETHER_HDR_LEN);
708 	ret = compare_pkt_data(mbuf, vector->frags[j]->data,
709 				vector->frags[j]->len);
710 	if (ret)
711 		return ret;
712 	j++;
713 	dynfield[j] = RTE_MBUF_DYNFIELD(mbuf, ip_reassembly_dynfield_offset,
714 					rte_eth_ip_reassembly_dynfield_t *);
715 	printf("\ncomparing frag: %d", j);
716 	/* Skip Ethernet header comparison */
717 	rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
718 	ret = compare_pkt_data(dynfield[j]->next_frag, vector->frags[j]->data,
719 			vector->frags[j]->len);
720 	if (ret)
721 		return ret;
722 
723 	while ((dynfield[j]->nb_frags > 1) &&
724 			is_ip_reassembly_incomplete(dynfield[j]->next_frag)) {
725 		j++;
726 		dynfield[j] = RTE_MBUF_DYNFIELD(dynfield[j-1]->next_frag,
727 					ip_reassembly_dynfield_offset,
728 					rte_eth_ip_reassembly_dynfield_t *);
729 		printf("\ncomparing frag: %d", j);
730 		/* Skip Ethernet header comparison */
731 		rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
732 		ret = compare_pkt_data(dynfield[j]->next_frag,
733 				vector->frags[j]->data, vector->frags[j]->len);
734 		if (ret)
735 			return ret;
736 	}
737 	return ret;
738 }
739 
740 static int
741 event_tx_burst(struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
742 {
743 	struct rte_event ev;
744 	int i, nb_sent = 0;
745 
746 	/* Convert packets to events */
747 	memset(&ev, 0, sizeof(ev));
748 	ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
749 	for (i = 0; i < nb_pkts; i++) {
750 		ev.mbuf = tx_pkts[i];
751 		ev.mbuf->port = port_id;
752 		nb_sent += rte_event_eth_tx_adapter_enqueue(
753 				eventdev_id, port_id, &ev, 1, 0);
754 	}
755 
756 	return nb_sent;
757 }
758 
759 static int
760 event_rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts_to_rx)
761 {
762 	int nb_ev, nb_rx = 0, j = 0;
763 	const int ms_per_pkt = 5;
764 	struct rte_event ev;
765 
766 	do {
767 		nb_ev = rte_event_dequeue_burst(eventdev_id, port_id,
768 				&ev, 1, 0);
769 
770 		if (nb_ev == 0) {
771 			rte_delay_ms(1);
772 			continue;
773 		}
774 
775 		/* Get packet from event */
776 		if (ev.event_type != RTE_EVENT_TYPE_ETHDEV) {
777 			printf("Unsupported event type: %i\n",
778 				ev.event_type);
779 			continue;
780 		}
781 		rx_pkts[nb_rx++] = ev.mbuf;
782 	} while (j++ < (nb_pkts_to_rx * ms_per_pkt) && nb_rx < nb_pkts_to_rx);
783 
784 	return nb_rx;
785 }
786 
787 static int
788 verify_inbound_oop(struct ipsec_test_data *td,
789 		   bool silent, struct rte_mbuf *mbuf)
790 {
791 	int ret = TEST_SUCCESS, rc;
792 	struct rte_mbuf *orig;
793 	uint32_t len;
794 	void *data;
795 
796 	orig = *rte_security_oop_dynfield(mbuf);
797 	if (!orig) {
798 		if (!silent)
799 			printf("\nUnable to get orig buffer OOP session");
800 		return TEST_FAILED;
801 	}
802 
803 	/* Skip Ethernet header comparison */
804 	rte_pktmbuf_adj(orig, RTE_ETHER_HDR_LEN);
805 
806 	len = td->input_text.len;
807 	if (orig->pkt_len != len) {
808 		if (!silent)
809 			printf("\nOriginal packet length mismatch, expected %u, got %u ",
810 			       len, orig->pkt_len);
811 		ret = TEST_FAILED;
812 	}
813 
814 	data = rte_pktmbuf_mtod(orig, void *);
815 	rc = memcmp(data, td->input_text.data, len);
816 	if (rc) {
817 		ret = TEST_FAILED;
818 		if (silent)
819 			goto exit;
820 
821 		printf("TestCase %s line %d: %s\n", __func__, __LINE__,
822 		       "output text not as expected\n");
823 
824 		rte_hexdump(stdout, "expected", td->input_text.data, len);
825 		rte_hexdump(stdout, "actual", data, len);
826 	}
827 exit:
828 	rte_pktmbuf_free(orig);
829 	return ret;
830 }
831 
832 static int
833 test_ipsec_with_rx_inject(struct ip_pkt_vector *vector, const struct ipsec_test_flags *flags)
834 {
835 	struct rte_security_session_conf sess_conf_out = {0};
836 	struct rte_security_session_conf sess_conf_in = {0};
837 	uint32_t nb_tx, burst_sz, nb_sent = 0, nb_inj = 0;
838 	struct rte_crypto_sym_xform cipher_out = {0};
839 	struct rte_crypto_sym_xform cipher_in = {0};
840 	struct rte_crypto_sym_xform auth_out = {0};
841 	struct rte_crypto_sym_xform aead_out = {0};
842 	struct rte_crypto_sym_xform auth_in = {0};
843 	struct rte_crypto_sym_xform aead_in = {0};
844 	void *out_ses[ENCAP_DECAP_BURST_SZ] = {0};
845 	void *in_ses[ENCAP_DECAP_BURST_SZ] = {0};
846 	uint32_t i, j, nb_rx = 0, nb_inj_rx = 0;
847 	struct rte_mbuf **inj_pkts_burst;
848 	struct ipsec_test_data sa_data;
849 	uint32_t ol_flags;
850 	bool outer_ipv4;
851 	int ret = 0;
852 	void *ctx;
853 
854 	inj_pkts_burst = calloc(MAX_TRAFFIC_BURST, sizeof(void *));
855 	if (!inj_pkts_burst)
856 		return TEST_FAILED;
857 
858 	burst_sz = vector->burst ? ENCAP_DECAP_BURST_SZ : 1;
859 	nb_tx = burst_sz;
860 
861 	memcpy(&sa_data, vector->sa_data, sizeof(struct ipsec_test_data));
862 	sa_data.ipsec_xform.direction =	RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
863 	outer_ipv4 = is_outer_ipv4(&sa_data);
864 
865 	for (i = 0; i < nb_tx; i++) {
866 		tx_pkts_burst[i] = init_packet(mbufpool, vector->full_pkt->data,
867 					       vector->full_pkt->len, outer_ipv4);
868 		if (tx_pkts_burst[i] == NULL) {
869 			ret = -1;
870 			printf("\n packed init failed\n");
871 			goto out;
872 		}
873 	}
874 
875 	for (i = 0; i < burst_sz; i++) {
876 		memcpy(&sa_data, vector->sa_data, sizeof(struct ipsec_test_data));
877 		/* Update SPI for every new SA */
878 		sa_data.ipsec_xform.spi += i;
879 		sa_data.ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
880 		if (sa_data.aead) {
881 			sess_conf_out.crypto_xform = &aead_out;
882 		} else {
883 			sess_conf_out.crypto_xform = &cipher_out;
884 			sess_conf_out.crypto_xform->next = &auth_out;
885 		}
886 
887 		/* Create Inline IPsec outbound session. */
888 		ret = create_inline_ipsec_session(&sa_data, port_id, &out_ses[i], &ctx, &ol_flags,
889 						  flags, &sess_conf_out);
890 		if (ret) {
891 			printf("\nInline outbound session create failed\n");
892 			goto out;
893 		}
894 	}
895 
896 	for (i = 0; i < nb_tx; i++) {
897 		if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
898 			rte_security_set_pkt_metadata(ctx,
899 				out_ses[i], tx_pkts_burst[i], NULL);
900 		tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
901 	}
902 
903 	for (i = 0; i < burst_sz; i++) {
904 		memcpy(&sa_data, vector->sa_data, sizeof(struct ipsec_test_data));
905 		/* Update SPI for every new SA */
906 		sa_data.ipsec_xform.spi += i;
907 		sa_data.ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
908 
909 		if (sa_data.aead) {
910 			sess_conf_in.crypto_xform = &aead_in;
911 		} else {
912 			sess_conf_in.crypto_xform = &auth_in;
913 			sess_conf_in.crypto_xform->next = &cipher_in;
914 		}
915 		/* Create Inline IPsec inbound session. */
916 		ret = create_inline_ipsec_session(&sa_data, port_id, &in_ses[i], &ctx, &ol_flags,
917 						  flags, &sess_conf_in);
918 		if (ret) {
919 			printf("\nInline inbound session create failed\n");
920 			goto out;
921 		}
922 	}
923 
924 	rte_delay_ms(1);
925 	/* Create and receive encrypted packets */
926 	if (event_mode_enabled)
927 		nb_sent = event_tx_burst(tx_pkts_burst, nb_tx);
928 	else
929 		nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_tx);
930 	if (nb_sent != nb_tx) {
931 		ret = -1;
932 		printf("\nFailed to tx %u pkts", nb_tx);
933 		goto out;
934 	}
935 
936 	rte_delay_ms(1);
937 
938 	/* Retry few times before giving up */
939 	nb_rx = 0;
940 	j = 0;
941 	if (event_mode_enabled)
942 		nb_rx = event_rx_burst(rx_pkts_burst, nb_tx);
943 	else
944 		do {
945 			nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
946 						  nb_tx - nb_rx);
947 			j++;
948 			if (nb_rx >= nb_tx)
949 				break;
950 			rte_delay_ms(1);
951 		} while (j < 5 || !nb_rx);
952 
953 	/* Check for minimum number of Rx packets expected */
954 	if (nb_rx != nb_tx) {
955 		printf("\nReceived less Rx pkts(%u)\n", nb_rx);
956 		ret = TEST_FAILED;
957 		goto out;
958 	}
959 
960 	for (i = 0; i < nb_rx; i++) {
961 		if (!(rx_pkts_burst[i]->packet_type & RTE_PTYPE_TUNNEL_ESP)) {
962 			printf("\nNot received ESP packet, pytpe=%x\n",
963 					rx_pkts_burst[i]->packet_type);
964 			goto out;
965 		}
966 		rx_pkts_burst[i]->l2_len = RTE_ETHER_HDR_LEN;
967 	}
968 
969 	/* Inject Packets */
970 	if (flags->rx_inject)
971 		nb_inj = rte_security_inb_pkt_rx_inject(ctx, rx_pkts_burst, in_ses, nb_rx);
972 	else {
973 		printf("\nInject flag disabled, Failed to Inject %u pkts", nb_rx);
974 		goto out;
975 	}
976 	if (nb_inj != nb_rx) {
977 		ret = -1;
978 		printf("\nFailed to Inject %u pkts", nb_rx);
979 		goto out;
980 	}
981 
982 	rte_delay_ms(1);
983 
984 	/* Retry few times before giving up */
985 	nb_inj_rx = 0;
986 	j = 0;
987 	if (event_mode_enabled)
988 		nb_inj_rx = event_rx_burst(inj_pkts_burst, nb_inj);
989 	else
990 		do {
991 			nb_inj_rx += rte_eth_rx_burst(port_id, 0, &inj_pkts_burst[nb_inj_rx],
992 						      nb_inj - nb_inj_rx);
993 			j++;
994 			if (nb_inj_rx >= nb_inj)
995 				break;
996 			rte_delay_ms(1);
997 		} while (j < 5 || !nb_inj_rx);
998 
999 	/* Check for minimum number of Rx packets expected */
1000 	if (nb_inj_rx != nb_inj) {
1001 		printf("\nReceived less Rx pkts(%u)\n", nb_inj_rx);
1002 		ret = TEST_FAILED;
1003 		goto out;
1004 	}
1005 
1006 	for (i = 0; i < nb_inj_rx; i++) {
1007 		if (inj_pkts_burst[i]->ol_flags &
1008 		    RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED ||
1009 		    !(inj_pkts_burst[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD)) {
1010 			printf("\nsecurity offload failed\n");
1011 			ret = TEST_FAILED;
1012 			break;
1013 		}
1014 
1015 		if (vector->full_pkt->len + RTE_ETHER_HDR_LEN !=
1016 		    inj_pkts_burst[i]->pkt_len) {
1017 			printf("\nreassembled/decrypted packet length mismatch\n");
1018 			ret = TEST_FAILED;
1019 			break;
1020 		}
1021 		rte_pktmbuf_adj(inj_pkts_burst[i], RTE_ETHER_HDR_LEN);
1022 		ret = compare_pkt_data(inj_pkts_burst[i], vector->full_pkt->data,
1023 				       vector->full_pkt->len);
1024 		if (ret != TEST_SUCCESS)
1025 			break;
1026 	}
1027 
1028 out:
1029 	/* Clear session data. */
1030 	for (i = 0; i < burst_sz; i++) {
1031 		if (out_ses[i])
1032 			rte_security_session_destroy(ctx, out_ses[i]);
1033 		if (in_ses[i])
1034 			rte_security_session_destroy(ctx, in_ses[i]);
1035 	}
1036 
1037 	for (i = nb_sent; i < nb_tx; i++)
1038 		free_mbuf(tx_pkts_burst[i]);
1039 	for (i = 0; i < nb_rx; i++)
1040 		free_mbuf(rx_pkts_burst[i]);
1041 	for (i = 0; i < nb_inj_rx; i++)
1042 		free_mbuf(inj_pkts_burst[i]);
1043 	free(inj_pkts_burst);
1044 
1045 	return ret;
1046 }
1047 
1048 static int
1049 test_ipsec_with_reassembly(struct reassembly_vector *vector,
1050 		const struct ipsec_test_flags *flags)
1051 {
1052 	void *out_ses[ENCAP_DECAP_BURST_SZ] = {0};
1053 	void *in_ses[ENCAP_DECAP_BURST_SZ] = {0};
1054 	struct rte_eth_ip_reassembly_params reass_capa = {0};
1055 	struct rte_security_session_conf sess_conf_out = {0};
1056 	struct rte_security_session_conf sess_conf_in = {0};
1057 	unsigned int nb_tx, burst_sz, nb_sent = 0;
1058 	struct rte_crypto_sym_xform cipher_out = {0};
1059 	struct rte_crypto_sym_xform auth_out = {0};
1060 	struct rte_crypto_sym_xform aead_out = {0};
1061 	struct rte_crypto_sym_xform cipher_in = {0};
1062 	struct rte_crypto_sym_xform auth_in = {0};
1063 	struct rte_crypto_sym_xform aead_in = {0};
1064 	struct ipsec_test_data sa_data;
1065 	void *ctx;
1066 	unsigned int i, nb_rx = 0, j;
1067 	uint32_t ol_flags;
1068 	bool outer_ipv4;
1069 	int ret = 0;
1070 
1071 	burst_sz = vector->burst ? ENCAP_DECAP_BURST_SZ : 1;
1072 	nb_tx = vector->nb_frags * burst_sz;
1073 
1074 	rte_eth_ip_reassembly_capability_get(port_id, &reass_capa);
1075 	if (reass_capa.max_frags < vector->nb_frags)
1076 		return TEST_SKIPPED;
1077 
1078 	memset(tx_pkts_burst, 0, sizeof(tx_pkts_burst[0]) * nb_tx);
1079 	memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_tx);
1080 
1081 	memcpy(&sa_data, vector->sa_data, sizeof(struct ipsec_test_data));
1082 	sa_data.ipsec_xform.direction =	RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
1083 	outer_ipv4 = is_outer_ipv4(&sa_data);
1084 
1085 	for (i = 0; i < nb_tx; i += vector->nb_frags) {
1086 		for (j = 0; j < vector->nb_frags; j++) {
1087 			tx_pkts_burst[i+j] = init_packet(mbufpool,
1088 						vector->frags[j]->data,
1089 						vector->frags[j]->len, outer_ipv4);
1090 			if (tx_pkts_burst[i+j] == NULL) {
1091 				ret = -1;
1092 				printf("\n packed init failed\n");
1093 				goto out;
1094 			}
1095 		}
1096 	}
1097 
1098 	for (i = 0; i < burst_sz; i++) {
1099 		memcpy(&sa_data, vector->sa_data,
1100 				sizeof(struct ipsec_test_data));
1101 		/* Update SPI for every new SA */
1102 		sa_data.ipsec_xform.spi += i;
1103 		sa_data.ipsec_xform.direction =
1104 					RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
1105 		if (sa_data.aead) {
1106 			sess_conf_out.crypto_xform = &aead_out;
1107 		} else {
1108 			sess_conf_out.crypto_xform = &cipher_out;
1109 			sess_conf_out.crypto_xform->next = &auth_out;
1110 		}
1111 
1112 		/* Create Inline IPsec outbound session. */
1113 		ret = create_inline_ipsec_session(&sa_data, port_id,
1114 				&out_ses[i], &ctx, &ol_flags, flags,
1115 				&sess_conf_out);
1116 		if (ret) {
1117 			printf("\nInline outbound session create failed\n");
1118 			goto out;
1119 		}
1120 	}
1121 
1122 	j = 0;
1123 	for (i = 0; i < nb_tx; i++) {
1124 		if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
1125 			rte_security_set_pkt_metadata(ctx,
1126 				out_ses[j], tx_pkts_burst[i], NULL);
1127 		tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1128 
1129 		/* Move to next SA after nb_frags */
1130 		if ((i + 1) % vector->nb_frags == 0)
1131 			j++;
1132 	}
1133 
1134 	for (i = 0; i < burst_sz; i++) {
1135 		memcpy(&sa_data, vector->sa_data,
1136 				sizeof(struct ipsec_test_data));
1137 		/* Update SPI for every new SA */
1138 		sa_data.ipsec_xform.spi += i;
1139 		sa_data.ipsec_xform.direction =
1140 					RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
1141 
1142 		if (sa_data.aead) {
1143 			sess_conf_in.crypto_xform = &aead_in;
1144 		} else {
1145 			sess_conf_in.crypto_xform = &auth_in;
1146 			sess_conf_in.crypto_xform->next = &cipher_in;
1147 		}
1148 		/* Create Inline IPsec inbound session. */
1149 		ret = create_inline_ipsec_session(&sa_data, port_id, &in_ses[i],
1150 				&ctx, &ol_flags, flags, &sess_conf_in);
1151 		if (ret) {
1152 			printf("\nInline inbound session create failed\n");
1153 			goto out;
1154 		}
1155 	}
1156 
1157 	/* Retrieve reassembly dynfield offset if available */
1158 	if (ip_reassembly_dynfield_offset < 0 && vector->nb_frags > 1)
1159 		ip_reassembly_dynfield_offset = rte_mbuf_dynfield_lookup(
1160 				RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, NULL);
1161 
1162 
1163 	ret = create_default_flow(port_id);
1164 	if (ret)
1165 		goto out;
1166 
1167 	if (event_mode_enabled)
1168 		nb_sent = event_tx_burst(tx_pkts_burst, nb_tx);
1169 	else
1170 		nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_tx);
1171 	if (nb_sent != nb_tx) {
1172 		ret = -1;
1173 		printf("\nFailed to tx %u pkts", nb_tx);
1174 		goto out;
1175 	}
1176 
1177 	rte_delay_ms(1);
1178 
1179 	/* Retry few times before giving up */
1180 	nb_rx = 0;
1181 	j = 0;
1182 	if (event_mode_enabled)
1183 		nb_rx = event_rx_burst(rx_pkts_burst, nb_tx);
1184 	else
1185 		do {
1186 			nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
1187 						  nb_tx - nb_rx);
1188 			j++;
1189 			if (nb_rx >= nb_tx)
1190 				break;
1191 			rte_delay_ms(1);
1192 		} while (j < 5 || !nb_rx);
1193 
1194 	/* Check for minimum number of Rx packets expected */
1195 	if ((vector->nb_frags == 1 && nb_rx != nb_tx) ||
1196 	    (vector->nb_frags > 1 && nb_rx < burst_sz)) {
1197 		printf("\nreceived less Rx pkts(%u) pkts\n", nb_rx);
1198 		ret = TEST_FAILED;
1199 		goto out;
1200 	}
1201 
1202 	for (i = 0; i < nb_rx; i++) {
1203 		if (vector->nb_frags > 1 &&
1204 		    is_ip_reassembly_incomplete(rx_pkts_burst[i])) {
1205 			ret = get_and_verify_incomplete_frags(rx_pkts_burst[i],
1206 							      vector);
1207 			if (ret != TEST_SUCCESS)
1208 				break;
1209 			continue;
1210 		}
1211 
1212 		if (rx_pkts_burst[i]->ol_flags &
1213 		    RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED ||
1214 		    !(rx_pkts_burst[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD)) {
1215 			printf("\nsecurity offload failed\n");
1216 			ret = TEST_FAILED;
1217 			break;
1218 		}
1219 
1220 		if (vector->full_pkt->len + RTE_ETHER_HDR_LEN !=
1221 				rx_pkts_burst[i]->pkt_len) {
1222 			printf("\nreassembled/decrypted packet length mismatch\n");
1223 			ret = TEST_FAILED;
1224 			break;
1225 		}
1226 		rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
1227 		ret = compare_pkt_data(rx_pkts_burst[i],
1228 				       vector->full_pkt->data,
1229 				       vector->full_pkt->len);
1230 		if (ret != TEST_SUCCESS)
1231 			break;
1232 	}
1233 
1234 out:
1235 	destroy_default_flow(port_id);
1236 
1237 	/* Clear session data. */
1238 	for (i = 0; i < burst_sz; i++) {
1239 		if (out_ses[i])
1240 			rte_security_session_destroy(ctx, out_ses[i]);
1241 		if (in_ses[i])
1242 			rte_security_session_destroy(ctx, in_ses[i]);
1243 	}
1244 
1245 	for (i = nb_sent; i < nb_tx; i++)
1246 		free_mbuf(tx_pkts_burst[i]);
1247 	for (i = 0; i < nb_rx; i++)
1248 		free_mbuf(rx_pkts_burst[i]);
1249 	return ret;
1250 }
1251 
1252 static int
1253 test_ipsec_inline_sa_exp_event_callback(uint16_t port_id,
1254 		enum rte_eth_event_type type, void *param, void *ret_param)
1255 {
1256 	struct sa_expiry_vector *vector = (struct sa_expiry_vector *)param;
1257 	struct rte_eth_event_ipsec_desc *event_desc = NULL;
1258 
1259 	RTE_SET_USED(port_id);
1260 
1261 	if (type != RTE_ETH_EVENT_IPSEC)
1262 		return -1;
1263 
1264 	event_desc = ret_param;
1265 	if (event_desc == NULL) {
1266 		printf("Event descriptor not set\n");
1267 		return -1;
1268 	}
1269 	vector->notify_event = true;
1270 	if (event_desc->metadata != (uint64_t)vector->sa_data) {
1271 		printf("Mismatch in event specific metadata\n");
1272 		return -1;
1273 	}
1274 	switch (event_desc->subtype) {
1275 	case RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY:
1276 		vector->event = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
1277 		break;
1278 	case RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY:
1279 		vector->event = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
1280 		break;
1281 	case RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY:
1282 		vector->event = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
1283 		break;
1284 	case RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY:
1285 		vector->event = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
1286 		break;
1287 	default:
1288 		printf("Invalid IPsec event reported\n");
1289 		return -1;
1290 	}
1291 
1292 	return 0;
1293 }
1294 
1295 static enum rte_eth_event_ipsec_subtype
1296 test_ipsec_inline_setup_expiry_vector(struct sa_expiry_vector *vector,
1297 		const struct ipsec_test_flags *flags,
1298 		struct ipsec_test_data *tdata)
1299 {
1300 	enum rte_eth_event_ipsec_subtype event = RTE_ETH_EVENT_IPSEC_UNKNOWN;
1301 
1302 	vector->event = RTE_ETH_EVENT_IPSEC_UNKNOWN;
1303 	vector->notify_event = false;
1304 	vector->sa_data = (void *)tdata;
1305 	if (flags->sa_expiry_pkts_soft)
1306 		event = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
1307 	else if (flags->sa_expiry_bytes_soft)
1308 		event = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
1309 	else if (flags->sa_expiry_pkts_hard)
1310 		event = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
1311 	else
1312 		event = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
1313 	rte_eth_dev_callback_register(port_id, RTE_ETH_EVENT_IPSEC,
1314 		       test_ipsec_inline_sa_exp_event_callback, vector);
1315 
1316 	return event;
1317 }
1318 
1319 static int
1320 test_ipsec_inline_proto_process(struct ipsec_test_data *td,
1321 		struct ipsec_test_data *res_d,
1322 		int nb_pkts,
1323 		bool silent,
1324 		const struct ipsec_test_flags *flags)
1325 {
1326 	enum rte_eth_event_ipsec_subtype event = RTE_ETH_EVENT_IPSEC_UNKNOWN;
1327 	struct rte_security_session_conf sess_conf = {0};
1328 	struct rte_crypto_sym_xform cipher = {0};
1329 	struct rte_crypto_sym_xform auth = {0};
1330 	struct rte_crypto_sym_xform aead = {0};
1331 	struct sa_expiry_vector vector = {0};
1332 	void *ctx;
1333 	int nb_rx = 0, nb_sent;
1334 	uint32_t ol_flags;
1335 	int i, j = 0, ret;
1336 	bool outer_ipv4;
1337 	void *ses;
1338 
1339 	memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_pkts);
1340 
1341 	if (flags->sa_expiry_pkts_soft || flags->sa_expiry_bytes_soft ||
1342 		flags->sa_expiry_pkts_hard || flags->sa_expiry_bytes_hard) {
1343 		if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1344 			return TEST_SUCCESS;
1345 		event = test_ipsec_inline_setup_expiry_vector(&vector, flags, td);
1346 	}
1347 
1348 	if (td->aead) {
1349 		sess_conf.crypto_xform = &aead;
1350 	} else {
1351 		if (td->ipsec_xform.direction ==
1352 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1353 			sess_conf.crypto_xform = &cipher;
1354 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1355 			sess_conf.crypto_xform->next = &auth;
1356 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1357 		} else {
1358 			sess_conf.crypto_xform = &auth;
1359 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1360 			sess_conf.crypto_xform->next = &cipher;
1361 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1362 		}
1363 	}
1364 
1365 	/* Create Inline IPsec session. */
1366 	ret = create_inline_ipsec_session(td, port_id, &ses, &ctx,
1367 					  &ol_flags, flags, &sess_conf);
1368 	if (ret)
1369 		return ret;
1370 
1371 	if (flags->inb_oop && rte_security_oop_dynfield_offset < 0) {
1372 		printf("\nDynamic field not available for inline inbound OOP");
1373 		ret = TEST_FAILED;
1374 		goto out;
1375 	}
1376 
1377 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1378 		ret = create_default_flow(port_id);
1379 		if (ret)
1380 			goto out;
1381 	}
1382 	outer_ipv4 = is_outer_ipv4(td);
1383 
1384 	for (i = 0; i < nb_pkts; i++) {
1385 		tx_pkts_burst[i] = init_packet(mbufpool, td->input_text.data,
1386 						td->input_text.len, outer_ipv4);
1387 		if (tx_pkts_burst[i] == NULL) {
1388 			while (i--)
1389 				rte_pktmbuf_free(tx_pkts_burst[i]);
1390 			ret = TEST_FAILED;
1391 			goto out;
1392 		}
1393 
1394 		if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkts_burst[i],
1395 					uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
1396 			while (i--)
1397 				rte_pktmbuf_free(tx_pkts_burst[i]);
1398 			ret = TEST_FAILED;
1399 			goto out;
1400 		}
1401 
1402 		if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1403 			if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
1404 				rte_security_set_pkt_metadata(ctx, ses,
1405 						tx_pkts_burst[i], NULL);
1406 			tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1407 		}
1408 	}
1409 	/* Send packet to ethdev for inline IPsec processing. */
1410 	if (event_mode_enabled)
1411 		nb_sent = event_tx_burst(tx_pkts_burst, nb_pkts);
1412 	else
1413 		nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts);
1414 
1415 	if (nb_sent != nb_pkts) {
1416 		printf("\nUnable to TX %d packets, sent: %i", nb_pkts, nb_sent);
1417 		for ( ; nb_sent < nb_pkts; nb_sent++)
1418 			rte_pktmbuf_free(tx_pkts_burst[nb_sent]);
1419 		ret = TEST_FAILED;
1420 		goto out;
1421 	}
1422 
1423 	rte_pause();
1424 
1425 	/* Receive back packet on loopback interface. */
1426 	if (event_mode_enabled)
1427 		nb_rx = event_rx_burst(rx_pkts_burst, nb_sent);
1428 	else
1429 		do {
1430 			rte_delay_ms(1);
1431 			nb_rx += rte_eth_rx_burst(port_id, 0,
1432 					&rx_pkts_burst[nb_rx],
1433 					nb_sent - nb_rx);
1434 			if (nb_rx >= nb_sent)
1435 				break;
1436 		} while (j++ < 5 || nb_rx == 0);
1437 
1438 	if (!flags->sa_expiry_pkts_hard &&
1439 			!flags->sa_expiry_bytes_hard &&
1440 			(nb_rx != nb_sent)) {
1441 		printf("\nUnable to RX all %d packets, received(%i)",
1442 				nb_sent, nb_rx);
1443 		while (--nb_rx >= 0)
1444 			rte_pktmbuf_free(rx_pkts_burst[nb_rx]);
1445 		ret = TEST_FAILED;
1446 		goto out;
1447 	}
1448 
1449 	for (i = 0; i < nb_rx; i++) {
1450 		rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
1451 
1452 		/* For tests with status as error for test success,
1453 		 * skip verification
1454 		 */
1455 		if (td->ipsec_xform.direction ==
1456 		    RTE_SECURITY_IPSEC_SA_DIR_INGRESS && (flags->icv_corrupt ||
1457 		    flags->sa_expiry_pkts_hard || flags->tunnel_hdr_verify ||
1458 		    td->ar_packet)) {
1459 			if (!(rx_pkts_burst[i]->ol_flags &
1460 			    RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
1461 				rte_pktmbuf_free(rx_pkts_burst[i]);
1462 				rx_pkts_burst[i] = NULL;
1463 				return TEST_FAILED;
1464 			}
1465 		} else {
1466 			ret = test_ipsec_post_process(rx_pkts_burst[i], td,
1467 						      res_d, silent, flags);
1468 			if (ret != TEST_SUCCESS) {
1469 				for ( ; i < nb_rx; i++)
1470 					rte_pktmbuf_free(rx_pkts_burst[i]);
1471 				goto out;
1472 			}
1473 		}
1474 
1475 		ret = test_ipsec_stats_verify(ctx, ses, flags,
1476 					td->ipsec_xform.direction);
1477 		if (ret != TEST_SUCCESS) {
1478 			for ( ; i < nb_rx; i++)
1479 				rte_pktmbuf_free(rx_pkts_burst[i]);
1480 			goto out;
1481 		}
1482 
1483 		if (flags->inb_oop) {
1484 			ret = verify_inbound_oop(td, silent, rx_pkts_burst[i]);
1485 			if (ret != TEST_SUCCESS) {
1486 				for ( ; i < nb_rx; i++)
1487 					rte_pktmbuf_free(rx_pkts_burst[i]);
1488 				goto out;
1489 			}
1490 		}
1491 
1492 		rte_pktmbuf_free(rx_pkts_burst[i]);
1493 		rx_pkts_burst[i] = NULL;
1494 	}
1495 
1496 out:
1497 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1498 		destroy_default_flow(port_id);
1499 	if (flags->sa_expiry_pkts_soft || flags->sa_expiry_bytes_soft ||
1500 		flags->sa_expiry_pkts_hard || flags->sa_expiry_bytes_hard) {
1501 		if (vector.notify_event && (vector.event == event))
1502 			ret = TEST_SUCCESS;
1503 		else
1504 			ret = TEST_FAILED;
1505 
1506 		rte_eth_dev_callback_unregister(port_id, RTE_ETH_EVENT_IPSEC,
1507 			test_ipsec_inline_sa_exp_event_callback, &vector);
1508 	}
1509 
1510 	/* Destroy session so that other cases can create the session again */
1511 	rte_security_session_destroy(ctx, ses);
1512 	ses = NULL;
1513 
1514 	return ret;
1515 }
1516 
1517 static int
1518 test_ipsec_inline_proto_all(const struct ipsec_test_flags *flags)
1519 {
1520 	struct ipsec_test_data td_outb;
1521 	struct ipsec_test_data td_inb;
1522 	unsigned int i, nb_pkts = 1, pass_cnt = 0, fail_cnt = 0;
1523 	int ret;
1524 
1525 	if (flags->iv_gen || flags->sa_expiry_pkts_soft ||
1526 			flags->sa_expiry_bytes_soft ||
1527 			flags->sa_expiry_bytes_hard ||
1528 			flags->sa_expiry_pkts_hard)
1529 		nb_pkts = TEST_SEC_PKTS_MAX;
1530 
1531 	for (i = 0; i < RTE_DIM(sec_alg_list); i++) {
1532 		test_ipsec_td_prepare(sec_alg_list[i].param1,
1533 				      sec_alg_list[i].param2,
1534 				      flags, &td_outb, 1);
1535 
1536 		if (!td_outb.aead) {
1537 			enum rte_crypto_cipher_algorithm cipher_alg;
1538 			enum rte_crypto_auth_algorithm auth_alg;
1539 
1540 			cipher_alg = td_outb.xform.chain.cipher.cipher.algo;
1541 			auth_alg = td_outb.xform.chain.auth.auth.algo;
1542 
1543 			if (td_outb.aes_gmac && cipher_alg != RTE_CRYPTO_CIPHER_NULL)
1544 				continue;
1545 
1546 			/* ICV is not applicable for NULL auth */
1547 			if (flags->icv_corrupt &&
1548 			    auth_alg == RTE_CRYPTO_AUTH_NULL)
1549 				continue;
1550 
1551 			/* IV is not applicable for NULL cipher */
1552 			if (flags->iv_gen &&
1553 			    cipher_alg == RTE_CRYPTO_CIPHER_NULL)
1554 				continue;
1555 		}
1556 
1557 		if (flags->udp_encap)
1558 			td_outb.ipsec_xform.options.udp_encap = 1;
1559 
1560 		if (flags->sa_expiry_bytes_soft)
1561 			td_outb.ipsec_xform.life.bytes_soft_limit =
1562 				(((td_outb.output_text.len + RTE_ETHER_HDR_LEN)
1563 				  * nb_pkts) >> 3) - 1;
1564 		if (flags->sa_expiry_pkts_hard)
1565 			td_outb.ipsec_xform.life.packets_hard_limit = TEST_SEC_PKTS_MAX - 1;
1566 		if (flags->sa_expiry_bytes_hard)
1567 			td_outb.ipsec_xform.life.bytes_hard_limit =
1568 				(((td_outb.output_text.len + RTE_ETHER_HDR_LEN)
1569 				  * nb_pkts) >> 3) - 1;
1570 
1571 		ret = test_ipsec_inline_proto_process(&td_outb, &td_inb, nb_pkts,
1572 						false, flags);
1573 		if (ret == TEST_SKIPPED)
1574 			continue;
1575 
1576 		if (ret == TEST_FAILED) {
1577 			printf("\n TEST FAILED");
1578 			test_sec_alg_display(sec_alg_list[i].param1, sec_alg_list[i].param2);
1579 			fail_cnt++;
1580 			continue;
1581 		}
1582 
1583 		test_ipsec_td_update(&td_inb, &td_outb, 1, flags);
1584 
1585 		ret = test_ipsec_inline_proto_process(&td_inb, NULL, nb_pkts,
1586 						false, flags);
1587 		if (ret == TEST_SKIPPED)
1588 			continue;
1589 
1590 		if (ret == TEST_FAILED) {
1591 			printf("\n TEST FAILED");
1592 			test_sec_alg_display(sec_alg_list[i].param1, sec_alg_list[i].param2);
1593 			fail_cnt++;
1594 			continue;
1595 		}
1596 
1597 		if (flags->display_alg)
1598 			test_sec_alg_display(sec_alg_list[i].param1, sec_alg_list[i].param2);
1599 
1600 		pass_cnt++;
1601 	}
1602 
1603 	printf("Tests passed: %d, failed: %d", pass_cnt, fail_cnt);
1604 	if (fail_cnt > 0)
1605 		return TEST_FAILED;
1606 	if (pass_cnt > 0)
1607 		return TEST_SUCCESS;
1608 	else
1609 		return TEST_SKIPPED;
1610 }
1611 
1612 static int
1613 test_ipsec_inline_proto_process_with_esn(struct ipsec_test_data td[],
1614 		struct ipsec_test_data res_d[],
1615 		int nb_pkts,
1616 		bool silent,
1617 		const struct ipsec_test_flags *flags)
1618 {
1619 	struct rte_security_session_conf sess_conf = {0};
1620 	struct ipsec_test_data *res_d_tmp = NULL;
1621 	struct rte_crypto_sym_xform cipher = {0};
1622 	struct rte_crypto_sym_xform auth = {0};
1623 	struct rte_crypto_sym_xform aead = {0};
1624 	struct rte_mbuf *rx_pkt = NULL;
1625 	struct rte_mbuf *tx_pkt = NULL;
1626 	int nb_rx, nb_sent;
1627 	void *ses;
1628 	void *ctx;
1629 	uint32_t ol_flags;
1630 	bool outer_ipv4;
1631 	int i, ret;
1632 
1633 	if (td[0].aead) {
1634 		sess_conf.crypto_xform = &aead;
1635 	} else {
1636 		if (td[0].ipsec_xform.direction ==
1637 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1638 			sess_conf.crypto_xform = &cipher;
1639 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1640 			sess_conf.crypto_xform->next = &auth;
1641 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1642 		} else {
1643 			sess_conf.crypto_xform = &auth;
1644 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1645 			sess_conf.crypto_xform->next = &cipher;
1646 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1647 		}
1648 	}
1649 
1650 	/* Create Inline IPsec session. */
1651 	ret = create_inline_ipsec_session(&td[0], port_id, &ses, &ctx,
1652 					  &ol_flags, flags, &sess_conf);
1653 	if (ret)
1654 		return ret;
1655 
1656 	if (td[0].ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1657 		ret = create_default_flow(port_id);
1658 		if (ret)
1659 			goto out;
1660 	}
1661 	outer_ipv4 = is_outer_ipv4(td);
1662 
1663 	for (i = 0; i < nb_pkts; i++) {
1664 		tx_pkt = init_packet(mbufpool, td[i].input_text.data,
1665 					td[i].input_text.len, outer_ipv4);
1666 		if (tx_pkt == NULL) {
1667 			ret = TEST_FAILED;
1668 			goto out;
1669 		}
1670 
1671 		if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkt,
1672 					uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
1673 			ret = TEST_FAILED;
1674 			goto out;
1675 		}
1676 
1677 		if (td[i].ipsec_xform.direction ==
1678 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1679 			if (flags->antireplay) {
1680 				sess_conf.ipsec.esn.value =
1681 						td[i].ipsec_xform.esn.value;
1682 				ret = rte_security_session_update(ctx, ses,
1683 						&sess_conf);
1684 				if (ret) {
1685 					printf("Could not update ESN in session\n");
1686 					rte_pktmbuf_free(tx_pkt);
1687 					ret = TEST_SKIPPED;
1688 					goto out;
1689 				}
1690 			}
1691 			if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
1692 				rte_security_set_pkt_metadata(ctx, ses,
1693 						tx_pkt, NULL);
1694 			tx_pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1695 		}
1696 
1697 		/* Send packet to ethdev for inline IPsec processing. */
1698 		if (event_mode_enabled)
1699 			nb_sent = event_tx_burst(&tx_pkt, 1);
1700 		else
1701 			nb_sent = rte_eth_tx_burst(port_id, 0, &tx_pkt, 1);
1702 
1703 		if (nb_sent != 1) {
1704 			printf("\nUnable to TX packets");
1705 			rte_pktmbuf_free(tx_pkt);
1706 			ret = TEST_FAILED;
1707 			goto out;
1708 		}
1709 
1710 		rte_pause();
1711 
1712 		/* Receive back packet on loopback interface. */
1713 		if (event_mode_enabled)
1714 			nb_rx = event_rx_burst(&rx_pkt, nb_sent);
1715 		else {
1716 			do {
1717 				rte_delay_ms(1);
1718 				nb_rx = rte_eth_rx_burst(port_id, 0, &rx_pkt, 1);
1719 			} while (nb_rx == 0);
1720 		}
1721 		rte_pktmbuf_adj(rx_pkt, RTE_ETHER_HDR_LEN);
1722 
1723 		if (res_d != NULL)
1724 			res_d_tmp = &res_d[i];
1725 
1726 		ret = test_ipsec_post_process(rx_pkt, &td[i],
1727 					      res_d_tmp, silent, flags);
1728 		if (ret != TEST_SUCCESS) {
1729 			rte_pktmbuf_free(rx_pkt);
1730 			goto out;
1731 		}
1732 
1733 		ret = test_ipsec_stats_verify(ctx, ses, flags,
1734 					td->ipsec_xform.direction);
1735 		if (ret != TEST_SUCCESS) {
1736 			rte_pktmbuf_free(rx_pkt);
1737 			goto out;
1738 		}
1739 
1740 		rte_pktmbuf_free(rx_pkt);
1741 		rx_pkt = NULL;
1742 		tx_pkt = NULL;
1743 	}
1744 
1745 out:
1746 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1747 		destroy_default_flow(port_id);
1748 
1749 	/* Destroy session so that other cases can create the session again */
1750 	rte_security_session_destroy(ctx, ses);
1751 	ses = NULL;
1752 
1753 	return ret;
1754 }
1755 
1756 static int
1757 ut_setup_inline_ipsec_reassembly(void)
1758 {
1759 	struct rte_eth_ip_reassembly_params reass_capa = {0};
1760 	int ret;
1761 
1762 	rte_eth_ip_reassembly_capability_get(port_id, &reass_capa);
1763 	if (reass_capa.timeout_ms > APP_REASS_TIMEOUT) {
1764 		reass_capa.timeout_ms = APP_REASS_TIMEOUT;
1765 		rte_eth_ip_reassembly_conf_set(port_id, &reass_capa);
1766 	}
1767 
1768 	/* Start event devices */
1769 	if (event_mode_enabled) {
1770 		ret = rte_event_eth_rx_adapter_start(rx_adapter_id);
1771 		if (ret < 0) {
1772 			printf("Failed to start rx adapter %d\n", ret);
1773 			return ret;
1774 		}
1775 
1776 		ret = rte_event_dev_start(eventdev_id);
1777 		if (ret < 0) {
1778 			printf("Failed to start event device %d\n", ret);
1779 			return ret;
1780 		}
1781 	}
1782 
1783 	/* Start device */
1784 	ret = rte_eth_dev_start(port_id);
1785 	if (ret < 0) {
1786 		printf("rte_eth_dev_start: err=%d, port=%d\n",
1787 			ret, port_id);
1788 		return ret;
1789 	}
1790 	/* always enable promiscuous */
1791 	ret = rte_eth_promiscuous_enable(port_id);
1792 	if (ret != 0) {
1793 		printf("rte_eth_promiscuous_enable: err=%s, port=%d\n",
1794 			rte_strerror(-ret), port_id);
1795 		return ret;
1796 	}
1797 
1798 	check_all_ports_link_status(1, RTE_PORT_ALL);
1799 
1800 	return 0;
1801 }
1802 
1803 static void
1804 ut_teardown_inline_ipsec_reassembly(void)
1805 {
1806 	struct rte_eth_ip_reassembly_params reass_conf = {0};
1807 	uint16_t portid;
1808 	int ret;
1809 
1810 	/* Stop event devices */
1811 	if (event_mode_enabled)
1812 		rte_event_dev_stop(eventdev_id);
1813 
1814 	/* port tear down */
1815 	RTE_ETH_FOREACH_DEV(portid) {
1816 		ret = rte_eth_dev_stop(portid);
1817 		if (ret != 0)
1818 			printf("rte_eth_dev_stop: err=%s, port=%u\n",
1819 			       rte_strerror(-ret), portid);
1820 
1821 		/* Clear reassembly configuration */
1822 		rte_eth_ip_reassembly_conf_set(portid, &reass_conf);
1823 	}
1824 }
1825 static int
1826 ut_setup_inline_ipsec_rx_inj(void)
1827 {
1828 	void *sec_ctx;
1829 	int ret;
1830 
1831 	sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
1832 	ret = rte_security_rx_inject_configure(sec_ctx, port_id, true);
1833 	if (ret) {
1834 		printf("Could not enable Rx inject\n");
1835 		return TEST_SKIPPED;
1836 	}
1837 
1838 	/* Start event devices */
1839 	if (event_mode_enabled) {
1840 		ret = rte_event_dev_start(eventdev_id);
1841 		if (ret < 0) {
1842 			printf("Failed to start event device %d\n", ret);
1843 			return ret;
1844 		}
1845 	}
1846 
1847 	/* Start device */
1848 	ret = rte_eth_dev_start(port_id);
1849 	if (ret < 0) {
1850 		printf("rte_eth_dev_start: err=%d, port=%d\n",
1851 			ret, port_id);
1852 		return ret;
1853 	}
1854 	/* always enable promiscuous */
1855 	ret = rte_eth_promiscuous_enable(port_id);
1856 	if (ret != 0) {
1857 		printf("rte_eth_promiscuous_enable: err=%s, port=%d\n",
1858 			rte_strerror(-ret), port_id);
1859 		return ret;
1860 	}
1861 
1862 	check_all_ports_link_status(1, RTE_PORT_ALL);
1863 
1864 	return 0;
1865 }
1866 
1867 static int
1868 ut_setup_inline_ipsec(void)
1869 {
1870 	int ret;
1871 
1872 	/* Start event devices */
1873 	if (event_mode_enabled) {
1874 		ret = rte_event_dev_start(eventdev_id);
1875 		if (ret < 0) {
1876 			printf("Failed to start event device %d\n", ret);
1877 			return ret;
1878 		}
1879 	}
1880 
1881 	/* Start device */
1882 	ret = rte_eth_dev_start(port_id);
1883 	if (ret < 0) {
1884 		printf("rte_eth_dev_start: err=%d, port=%d\n",
1885 			ret, port_id);
1886 		return ret;
1887 	}
1888 	/* always enable promiscuous */
1889 	ret = rte_eth_promiscuous_enable(port_id);
1890 	if (ret != 0) {
1891 		printf("rte_eth_promiscuous_enable: err=%s, port=%d\n",
1892 			rte_strerror(-ret), port_id);
1893 		return ret;
1894 	}
1895 
1896 	check_all_ports_link_status(1, RTE_PORT_ALL);
1897 
1898 	return 0;
1899 }
1900 
1901 static void
1902 ut_teardown_inline_ipsec_rx_inj(void)
1903 {
1904 	uint16_t portid;
1905 	void *sec_ctx;
1906 	int ret;
1907 
1908 	/* Stop event devices */
1909 	if (event_mode_enabled)
1910 		rte_event_dev_stop(eventdev_id);
1911 
1912 	/* port tear down */
1913 	RTE_ETH_FOREACH_DEV(portid) {
1914 		ret = rte_eth_dev_stop(portid);
1915 		if (ret != 0)
1916 			printf("rte_eth_dev_stop: err=%s, port=%u\n",
1917 			       rte_strerror(-ret), portid);
1918 
1919 		sec_ctx = rte_eth_dev_get_sec_ctx(portid);
1920 		ret = rte_security_rx_inject_configure(sec_ctx, portid, false);
1921 		if (ret)
1922 			printf("Could not disable Rx inject\n");
1923 
1924 	}
1925 }
1926 
1927 static void
1928 ut_teardown_inline_ipsec(void)
1929 {
1930 	uint16_t portid;
1931 	int ret;
1932 
1933 	/* Stop event devices */
1934 	if (event_mode_enabled)
1935 		rte_event_dev_stop(eventdev_id);
1936 
1937 	/* port tear down */
1938 	RTE_ETH_FOREACH_DEV(portid) {
1939 		ret = rte_eth_dev_stop(portid);
1940 		if (ret != 0)
1941 			printf("rte_eth_dev_stop: err=%s, port=%u\n",
1942 			       rte_strerror(-ret), portid);
1943 	}
1944 }
1945 
1946 static int
1947 inline_ipsec_testsuite_setup(void)
1948 {
1949 	struct rte_eth_conf local_port_conf;
1950 	struct rte_eth_dev_info dev_info;
1951 	uint16_t nb_rxd;
1952 	uint16_t nb_txd;
1953 	uint16_t nb_ports;
1954 	int ret;
1955 	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
1956 
1957 	printf("Start inline IPsec test.\n");
1958 
1959 	nb_ports = rte_eth_dev_count_avail();
1960 	if (nb_ports < NB_ETHPORTS_USED) {
1961 		printf("At least %u port(s) used for test\n",
1962 		       NB_ETHPORTS_USED);
1963 		return TEST_SKIPPED;
1964 	}
1965 
1966 	ret = init_mempools(NB_MBUF);
1967 	if (ret)
1968 		return ret;
1969 
1970 	if (tx_pkts_burst == NULL) {
1971 		tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
1972 					  MAX_TRAFFIC_BURST,
1973 					  sizeof(void *),
1974 					  RTE_CACHE_LINE_SIZE);
1975 		if (!tx_pkts_burst)
1976 			return TEST_FAILED;
1977 
1978 		rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
1979 					  MAX_TRAFFIC_BURST,
1980 					  sizeof(void *),
1981 					  RTE_CACHE_LINE_SIZE);
1982 		if (!rx_pkts_burst)
1983 			return TEST_FAILED;
1984 	}
1985 
1986 	printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
1987 
1988 	nb_rxd = RX_DESC_DEFAULT;
1989 	nb_txd = TX_DESC_DEFAULT;
1990 
1991 	/* configuring port 0 for the test is enough */
1992 	port_id = 0;
1993 	if (rte_eth_dev_info_get(0, &dev_info)) {
1994 		printf("Failed to get devinfo");
1995 		return -1;
1996 	}
1997 
1998 	memcpy(&local_port_conf, &port_conf, sizeof(port_conf));
1999 	/* Add Multi seg flags */
2000 	if (sg_mode) {
2001 		uint16_t max_data_room = RTE_MBUF_DEFAULT_DATAROOM *
2002 			dev_info.rx_desc_lim.nb_seg_max;
2003 
2004 		local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
2005 		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
2006 		local_port_conf.rxmode.mtu = RTE_MIN(dev_info.max_mtu, max_data_room - 256);
2007 	}
2008 
2009 	/* port configure */
2010 	ret = rte_eth_dev_configure(port_id, nb_rx_queue,
2011 				    nb_tx_queue, &local_port_conf);
2012 	if (ret < 0) {
2013 		printf("Cannot configure device: err=%d, port=%d\n",
2014 			 ret, port_id);
2015 		return ret;
2016 	}
2017 	ret = rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
2018 	if (ret < 0) {
2019 		printf("Cannot get mac address: err=%d, port=%d\n",
2020 			 ret, port_id);
2021 		return ret;
2022 	}
2023 	printf("Port %u ", port_id);
2024 	print_ethaddr("Address:", &ports_eth_addr[port_id]);
2025 	printf("\n");
2026 
2027 	/* tx queue setup */
2028 	ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
2029 				     SOCKET_ID_ANY, &tx_conf);
2030 	if (ret < 0) {
2031 		printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
2032 				ret, port_id);
2033 		return ret;
2034 	}
2035 	/* rx queue steup */
2036 	ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
2037 				     &rx_conf, mbufpool);
2038 	if (ret < 0) {
2039 		printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
2040 				ret, port_id);
2041 		return ret;
2042 	}
2043 
2044 	test_sec_alg_list_populate();
2045 
2046 	/* Change the plaintext size for tests without Known vectors */
2047 	if (sg_mode) {
2048 		/* Leave space of 256B as ESP packet would be bigger and we
2049 		 * expect packets to be received back on same interface.
2050 		 * Without SG mode, default value is picked.
2051 		 */
2052 		plaintext_len = local_port_conf.rxmode.mtu - 256;
2053 	} else {
2054 		plaintext_len = 0;
2055 	}
2056 
2057 	return 0;
2058 }
2059 
2060 static void
2061 inline_ipsec_testsuite_teardown(void)
2062 {
2063 	uint16_t portid;
2064 	int ret;
2065 
2066 	/* port tear down */
2067 	RTE_ETH_FOREACH_DEV(portid) {
2068 		ret = rte_eth_dev_reset(portid);
2069 		if (ret != 0)
2070 			printf("rte_eth_dev_reset: err=%s, port=%u\n",
2071 			       rte_strerror(-ret), port_id);
2072 	}
2073 	rte_free(tx_pkts_burst);
2074 	rte_free(rx_pkts_burst);
2075 }
2076 
2077 static int
2078 event_inline_ipsec_testsuite_setup(void)
2079 {
2080 	struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
2081 	struct rte_event_dev_info evdev_default_conf = {0};
2082 	struct rte_event_dev_config eventdev_conf = {0};
2083 	struct rte_event_queue_conf eventq_conf = {0};
2084 	struct rte_event_port_conf ev_port_conf = {0};
2085 	const uint16_t nb_txd = 1024, nb_rxd = 1024;
2086 	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
2087 	uint8_t ev_queue_id = 0, tx_queue_id = 0;
2088 	int nb_eventqueue = 1, nb_eventport = 1;
2089 	const int all_queues = -1;
2090 	uint32_t caps = 0;
2091 	uint16_t nb_ports;
2092 	int ret;
2093 
2094 	printf("Start event inline IPsec test.\n");
2095 
2096 	nb_ports = rte_eth_dev_count_avail();
2097 	if (nb_ports == 0) {
2098 		printf("Test require: 1 port, available: 0\n");
2099 		return TEST_SKIPPED;
2100 	}
2101 
2102 	init_mempools(NB_MBUF);
2103 
2104 	if (tx_pkts_burst == NULL) {
2105 		tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
2106 					  MAX_TRAFFIC_BURST,
2107 					  sizeof(void *),
2108 					  RTE_CACHE_LINE_SIZE);
2109 		if (!tx_pkts_burst)
2110 			return -1;
2111 
2112 		rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
2113 					  MAX_TRAFFIC_BURST,
2114 					  sizeof(void *),
2115 					  RTE_CACHE_LINE_SIZE);
2116 		if (!rx_pkts_burst)
2117 			return -1;
2118 
2119 	}
2120 
2121 	printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
2122 
2123 	/* configuring port 0 for the test is enough */
2124 	port_id = 0;
2125 	/* port configure */
2126 	ret = rte_eth_dev_configure(port_id, nb_rx_queue,
2127 				    nb_tx_queue, &port_conf);
2128 	if (ret < 0) {
2129 		printf("Cannot configure device: err=%d, port=%d\n",
2130 			 ret, port_id);
2131 		return ret;
2132 	}
2133 
2134 	/* Tx queue setup */
2135 	ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
2136 				     SOCKET_ID_ANY, &tx_conf);
2137 	if (ret < 0) {
2138 		printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
2139 				ret, port_id);
2140 		return ret;
2141 	}
2142 
2143 	/* rx queue steup */
2144 	ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
2145 				     &rx_conf, mbufpool);
2146 	if (ret < 0) {
2147 		printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
2148 				ret, port_id);
2149 		return ret;
2150 	}
2151 
2152 	/* Setup eventdev */
2153 	eventdev_id = 0;
2154 	rx_adapter_id = 0;
2155 	tx_adapter_id = 0;
2156 
2157 	/* Get default conf of eventdev */
2158 	ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
2159 	if (ret < 0) {
2160 		printf("Error in getting event device info[devID:%d]\n",
2161 				eventdev_id);
2162 		return ret;
2163 	}
2164 
2165 	/* Get Tx adapter capabilities */
2166 	ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, tx_adapter_id, &caps);
2167 	if (ret < 0) {
2168 		printf("Failed to get event device %d eth tx adapter"
2169 				" capabilities for port %d\n",
2170 				eventdev_id, port_id);
2171 		return ret;
2172 	}
2173 	if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
2174 		tx_queue_id = nb_eventqueue++;
2175 
2176 	eventdev_conf.nb_events_limit =
2177 			evdev_default_conf.max_num_events;
2178 	eventdev_conf.nb_event_queue_flows =
2179 			evdev_default_conf.max_event_queue_flows;
2180 	eventdev_conf.nb_event_port_dequeue_depth =
2181 			evdev_default_conf.max_event_port_dequeue_depth;
2182 	eventdev_conf.nb_event_port_enqueue_depth =
2183 			evdev_default_conf.max_event_port_enqueue_depth;
2184 
2185 	eventdev_conf.nb_event_queues = nb_eventqueue;
2186 	eventdev_conf.nb_event_ports = nb_eventport;
2187 
2188 	/* Configure event device */
2189 
2190 	ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
2191 	if (ret < 0) {
2192 		printf("Error in configuring event device\n");
2193 		return ret;
2194 	}
2195 
2196 	/* Configure event queue */
2197 	eventq_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL;
2198 	eventq_conf.nb_atomic_flows = 1024;
2199 	eventq_conf.nb_atomic_order_sequences = 1024;
2200 
2201 	/* Setup the queue */
2202 	ret = rte_event_queue_setup(eventdev_id, ev_queue_id, &eventq_conf);
2203 	if (ret < 0) {
2204 		printf("Failed to setup event queue %d\n", ret);
2205 		return ret;
2206 	}
2207 
2208 	/* Configure event port */
2209 	ret = rte_event_port_setup(eventdev_id, port_id, NULL);
2210 	if (ret < 0) {
2211 		printf("Failed to setup event port %d\n", ret);
2212 		return ret;
2213 	}
2214 
2215 	/* Make event queue - event port link */
2216 	ret = rte_event_port_link(eventdev_id, port_id, NULL, NULL, 1);
2217 	if (ret < 0) {
2218 		printf("Failed to link event port %d\n", ret);
2219 		return ret;
2220 	}
2221 
2222 	/* Setup port conf */
2223 	ev_port_conf.new_event_threshold = 1200;
2224 	ev_port_conf.dequeue_depth =
2225 			evdev_default_conf.max_event_port_dequeue_depth;
2226 	ev_port_conf.enqueue_depth =
2227 			evdev_default_conf.max_event_port_enqueue_depth;
2228 
2229 	/* Create Rx adapter */
2230 	ret = rte_event_eth_rx_adapter_create(rx_adapter_id, eventdev_id,
2231 			&ev_port_conf);
2232 	if (ret < 0) {
2233 		printf("Failed to create rx adapter %d\n", ret);
2234 		return ret;
2235 	}
2236 
2237 	/* Setup queue conf */
2238 	queue_conf.ev.queue_id = ev_queue_id;
2239 	queue_conf.ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
2240 	queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
2241 
2242 	/* Add queue to the adapter */
2243 	ret = rte_event_eth_rx_adapter_queue_add(rx_adapter_id, port_id,
2244 			all_queues, &queue_conf);
2245 	if (ret < 0) {
2246 		printf("Failed to add eth queue to rx adapter %d\n", ret);
2247 		return ret;
2248 	}
2249 
2250 	/* Start rx adapter */
2251 	ret = rte_event_eth_rx_adapter_start(rx_adapter_id);
2252 	if (ret < 0) {
2253 		printf("Failed to start rx adapter %d\n", ret);
2254 		return ret;
2255 	}
2256 
2257 	/* Create tx adapter */
2258 	ret = rte_event_eth_tx_adapter_create(tx_adapter_id, eventdev_id,
2259 			&ev_port_conf);
2260 	if (ret < 0) {
2261 		printf("Failed to create tx adapter %d\n", ret);
2262 		return ret;
2263 	}
2264 
2265 	/* Add queue to the adapter */
2266 	ret = rte_event_eth_tx_adapter_queue_add(tx_adapter_id, port_id,
2267 			all_queues);
2268 	if (ret < 0) {
2269 		printf("Failed to add eth queue to tx adapter %d\n", ret);
2270 		return ret;
2271 	}
2272 	/* Setup Tx queue & port */
2273 	if (tx_queue_id) {
2274 		/* Setup the queue */
2275 		ret = rte_event_queue_setup(eventdev_id, tx_queue_id,
2276 				&eventq_conf);
2277 		if (ret < 0) {
2278 			printf("Failed to setup tx event queue %d\n", ret);
2279 			return ret;
2280 		}
2281 		/* Link Tx event queue to Tx port */
2282 		ret = rte_event_port_link(eventdev_id, port_id,
2283 				&tx_queue_id, NULL, 1);
2284 		if (ret != 1) {
2285 			printf("Failed to link event queue to port\n");
2286 			return ret;
2287 		}
2288 	}
2289 
2290 	/* Start tx adapter */
2291 	ret = rte_event_eth_tx_adapter_start(tx_adapter_id);
2292 	if (ret < 0) {
2293 		printf("Failed to start tx adapter %d\n", ret);
2294 		return ret;
2295 	}
2296 
2297 	/* Start eventdev */
2298 	ret = rte_event_dev_start(eventdev_id);
2299 	if (ret < 0) {
2300 		printf("Failed to start event device %d\n", ret);
2301 		return ret;
2302 	}
2303 
2304 	event_mode_enabled = true;
2305 
2306 	test_sec_alg_list_populate();
2307 
2308 	return 0;
2309 }
2310 
2311 static void
2312 event_inline_ipsec_testsuite_teardown(void)
2313 {
2314 	uint16_t portid;
2315 	int ret;
2316 
2317 	event_mode_enabled = false;
2318 
2319 	/* Stop and release rx adapter */
2320 	ret = rte_event_eth_rx_adapter_stop(rx_adapter_id);
2321 	if (ret < 0)
2322 		printf("Failed to stop rx adapter %d\n", ret);
2323 	ret = rte_event_eth_rx_adapter_queue_del(rx_adapter_id, port_id, -1);
2324 	if (ret < 0)
2325 		printf("Failed to remove rx adapter queues %d\n", ret);
2326 	ret = rte_event_eth_rx_adapter_free(rx_adapter_id);
2327 	if (ret < 0)
2328 		printf("Failed to free rx adapter %d\n", ret);
2329 
2330 	/* Stop and release tx adapter */
2331 	ret = rte_event_eth_tx_adapter_stop(tx_adapter_id);
2332 	if (ret < 0)
2333 		printf("Failed to stop tx adapter %d\n", ret);
2334 	ret = rte_event_eth_tx_adapter_queue_del(tx_adapter_id, port_id, -1);
2335 	if (ret < 0)
2336 		printf("Failed to remove tx adapter queues %d\n", ret);
2337 	ret = rte_event_eth_tx_adapter_free(tx_adapter_id);
2338 	if (ret < 0)
2339 		printf("Failed to free tx adapter %d\n", ret);
2340 
2341 	/* Stop and release event devices */
2342 	rte_event_dev_stop(eventdev_id);
2343 	ret = rte_event_dev_close(eventdev_id);
2344 	if (ret < 0)
2345 		printf("Failed to close event dev %d, %d\n", eventdev_id, ret);
2346 
2347 	/* port tear down */
2348 	RTE_ETH_FOREACH_DEV(portid) {
2349 		ret = rte_eth_dev_reset(portid);
2350 		if (ret != 0)
2351 			printf("rte_eth_dev_reset: err=%s, port=%u\n",
2352 			       rte_strerror(-ret), port_id);
2353 	}
2354 
2355 	rte_free(tx_pkts_burst);
2356 	rte_free(rx_pkts_burst);
2357 }
2358 
2359 static int
2360 test_inline_ip_reassembly(const void *testdata)
2361 {
2362 	struct reassembly_vector reassembly_td = {0};
2363 	const struct reassembly_vector *td = testdata;
2364 	struct ip_reassembly_test_packet full_pkt;
2365 	struct ip_reassembly_test_packet frags[MAX_FRAGS];
2366 	uint16_t extra_data, extra_data_sum = 0;
2367 	struct ipsec_test_flags flags = {0};
2368 	int i = 0;
2369 
2370 	reassembly_td.sa_data = td->sa_data;
2371 	reassembly_td.nb_frags = td->nb_frags;
2372 	reassembly_td.burst = td->burst;
2373 
2374 	memcpy(&full_pkt, td->full_pkt,
2375 			sizeof(struct ip_reassembly_test_packet));
2376 	reassembly_td.full_pkt = &full_pkt;
2377 
2378 	for (; i < reassembly_td.nb_frags; i++) {
2379 		memcpy(&frags[i], td->frags[i],
2380 			sizeof(struct ip_reassembly_test_packet));
2381 		reassembly_td.frags[i] = &frags[i];
2382 
2383 		/* Add extra data for multi-seg test on all fragments except last one */
2384 		extra_data = 0;
2385 		if (plaintext_len && reassembly_td.frags[i]->len < plaintext_len &&
2386 		    (i != reassembly_td.nb_frags - 1))
2387 			extra_data = ((plaintext_len - reassembly_td.frags[i]->len) & ~0x7ULL);
2388 
2389 		test_vector_payload_populate(reassembly_td.frags[i],
2390 				(i == 0) ? true : false, extra_data, extra_data_sum);
2391 		extra_data_sum += extra_data;
2392 	}
2393 	test_vector_payload_populate(reassembly_td.full_pkt, true, extra_data_sum, 0);
2394 
2395 	return test_ipsec_with_reassembly(&reassembly_td, &flags);
2396 }
2397 
2398 static int
2399 test_ipsec_inline_proto_known_vec(const void *test_data)
2400 {
2401 	struct ipsec_test_data td_outb;
2402 	struct ipsec_test_flags flags;
2403 
2404 	memset(&flags, 0, sizeof(flags));
2405 
2406 	memcpy(&td_outb, test_data, sizeof(td_outb));
2407 
2408 	if (td_outb.aead ||
2409 	    td_outb.xform.chain.cipher.cipher.algo != RTE_CRYPTO_CIPHER_NULL) {
2410 		/* Disable IV gen to be able to test with known vectors */
2411 		td_outb.ipsec_xform.options.iv_gen_disable = 1;
2412 	}
2413 
2414 	return test_ipsec_inline_proto_process(&td_outb, NULL, 1,
2415 				false, &flags);
2416 }
2417 
2418 static int
2419 test_ipsec_inline_proto_known_vec_inb(const void *test_data)
2420 {
2421 	const struct ipsec_test_data *td = test_data;
2422 	struct ipsec_test_flags flags;
2423 	struct ipsec_test_data td_inb;
2424 
2425 	memset(&flags, 0, sizeof(flags));
2426 
2427 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2428 		test_ipsec_td_in_from_out(td, &td_inb);
2429 	else
2430 		memcpy(&td_inb, td, sizeof(td_inb));
2431 
2432 	return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags);
2433 }
2434 
2435 static int
2436 test_ipsec_inline_proto_oop_inb(const void *test_data)
2437 {
2438 	const struct ipsec_test_data *td = test_data;
2439 	struct ipsec_test_flags flags;
2440 	struct ipsec_test_data td_inb;
2441 
2442 	memset(&flags, 0, sizeof(flags));
2443 	flags.inb_oop = true;
2444 
2445 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2446 		test_ipsec_td_in_from_out(td, &td_inb);
2447 	else
2448 		memcpy(&td_inb, td, sizeof(td_inb));
2449 
2450 	td_inb.ipsec_xform.options.ingress_oop = true;
2451 
2452 	return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags);
2453 }
2454 
2455 static int
2456 test_ipsec_inline_proto_rx_inj_inb(const void *test_data)
2457 {
2458 	const struct ip_pkt_vector *td = test_data;
2459 	struct ip_reassembly_test_packet full_pkt;
2460 	struct ipsec_test_flags flags = {0};
2461 	struct ip_pkt_vector out_td = {0};
2462 	uint16_t extra_data = 0;
2463 
2464 	flags.rx_inject = true;
2465 
2466 	out_td.sa_data = td->sa_data;
2467 	out_td.burst = td->burst;
2468 
2469 	memcpy(&full_pkt, td->full_pkt,
2470 			sizeof(struct ip_reassembly_test_packet));
2471 	out_td.full_pkt = &full_pkt;
2472 
2473 	/* Add extra data for multi-seg test */
2474 	if (plaintext_len && out_td.full_pkt->len < plaintext_len)
2475 		extra_data = ((plaintext_len - out_td.full_pkt->len) & ~0x7ULL);
2476 
2477 	test_vector_payload_populate(out_td.full_pkt, true, extra_data, 0);
2478 
2479 	return test_ipsec_with_rx_inject(&out_td, &flags);
2480 }
2481 
2482 static int
2483 test_ipsec_inline_proto_display_list(void)
2484 {
2485 	struct ipsec_test_flags flags;
2486 
2487 	memset(&flags, 0, sizeof(flags));
2488 
2489 	flags.display_alg = true;
2490 	flags.plaintext_len = plaintext_len;
2491 
2492 	return test_ipsec_inline_proto_all(&flags);
2493 }
2494 
2495 static int
2496 test_ipsec_inline_proto_udp_encap(void)
2497 {
2498 	struct ipsec_test_flags flags;
2499 
2500 	memset(&flags, 0, sizeof(flags));
2501 
2502 	flags.udp_encap = true;
2503 	flags.plaintext_len = plaintext_len;
2504 
2505 	return test_ipsec_inline_proto_all(&flags);
2506 }
2507 
2508 static int
2509 test_ipsec_inline_proto_udp_ports_verify(void)
2510 {
2511 	struct ipsec_test_flags flags;
2512 
2513 	memset(&flags, 0, sizeof(flags));
2514 
2515 	flags.udp_encap = true;
2516 	flags.udp_ports_verify = true;
2517 	flags.plaintext_len = plaintext_len;
2518 
2519 	return test_ipsec_inline_proto_all(&flags);
2520 }
2521 
2522 static int
2523 test_ipsec_inline_proto_err_icv_corrupt(void)
2524 {
2525 	struct ipsec_test_flags flags;
2526 
2527 	memset(&flags, 0, sizeof(flags));
2528 
2529 	flags.icv_corrupt = true;
2530 	flags.plaintext_len = plaintext_len;
2531 
2532 	return test_ipsec_inline_proto_all(&flags);
2533 }
2534 
2535 static int
2536 test_ipsec_inline_proto_tunnel_dst_addr_verify(void)
2537 {
2538 	struct ipsec_test_flags flags;
2539 
2540 	memset(&flags, 0, sizeof(flags));
2541 
2542 	flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR;
2543 	flags.plaintext_len = plaintext_len;
2544 
2545 	return test_ipsec_inline_proto_all(&flags);
2546 }
2547 
2548 static int
2549 test_ipsec_inline_proto_tunnel_src_dst_addr_verify(void)
2550 {
2551 	struct ipsec_test_flags flags;
2552 
2553 	memset(&flags, 0, sizeof(flags));
2554 
2555 	flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR;
2556 	flags.plaintext_len = plaintext_len;
2557 
2558 	return test_ipsec_inline_proto_all(&flags);
2559 }
2560 
2561 static int
2562 test_ipsec_inline_proto_inner_ip_csum(void)
2563 {
2564 	struct ipsec_test_flags flags;
2565 
2566 	memset(&flags, 0, sizeof(flags));
2567 
2568 	flags.ip_csum = true;
2569 	flags.plaintext_len = plaintext_len;
2570 
2571 	return test_ipsec_inline_proto_all(&flags);
2572 }
2573 
2574 static int
2575 test_ipsec_inline_proto_inner_l4_csum(void)
2576 {
2577 	struct ipsec_test_flags flags;
2578 
2579 	memset(&flags, 0, sizeof(flags));
2580 
2581 	flags.l4_csum = true;
2582 	flags.plaintext_len = plaintext_len;
2583 
2584 	return test_ipsec_inline_proto_all(&flags);
2585 }
2586 
2587 static int
2588 test_ipsec_inline_proto_tunnel_v4_in_v4(void)
2589 {
2590 	struct ipsec_test_flags flags;
2591 
2592 	memset(&flags, 0, sizeof(flags));
2593 
2594 	flags.ipv6 = false;
2595 	flags.tunnel_ipv6 = false;
2596 	flags.plaintext_len = plaintext_len;
2597 
2598 	return test_ipsec_inline_proto_all(&flags);
2599 }
2600 
2601 static int
2602 test_ipsec_inline_proto_tunnel_v6_in_v6(void)
2603 {
2604 	struct ipsec_test_flags flags;
2605 
2606 	memset(&flags, 0, sizeof(flags));
2607 
2608 	flags.ipv6 = true;
2609 	flags.tunnel_ipv6 = true;
2610 	flags.plaintext_len = plaintext_len;
2611 
2612 	return test_ipsec_inline_proto_all(&flags);
2613 }
2614 
2615 static int
2616 test_ipsec_inline_proto_tunnel_v4_in_v6(void)
2617 {
2618 	struct ipsec_test_flags flags;
2619 
2620 	memset(&flags, 0, sizeof(flags));
2621 
2622 	flags.ipv6 = false;
2623 	flags.tunnel_ipv6 = true;
2624 	flags.plaintext_len = plaintext_len;
2625 
2626 	return test_ipsec_inline_proto_all(&flags);
2627 }
2628 
2629 static int
2630 test_ipsec_inline_proto_tunnel_v6_in_v4(void)
2631 {
2632 	struct ipsec_test_flags flags;
2633 
2634 	memset(&flags, 0, sizeof(flags));
2635 
2636 	flags.ipv6 = true;
2637 	flags.tunnel_ipv6 = false;
2638 	flags.plaintext_len = plaintext_len;
2639 
2640 	return test_ipsec_inline_proto_all(&flags);
2641 }
2642 
2643 static int
2644 test_ipsec_inline_proto_transport_v4(void)
2645 {
2646 	struct ipsec_test_flags flags;
2647 
2648 	memset(&flags, 0, sizeof(flags));
2649 
2650 	flags.ipv6 = false;
2651 	flags.transport = true;
2652 	flags.plaintext_len = plaintext_len;
2653 
2654 	return test_ipsec_inline_proto_all(&flags);
2655 }
2656 
2657 static int
2658 test_ipsec_inline_proto_transport_l4_csum(void)
2659 {
2660 	struct ipsec_test_flags flags = {
2661 		.l4_csum = true,
2662 		.transport = true,
2663 		.plaintext_len = plaintext_len,
2664 	};
2665 
2666 	return test_ipsec_inline_proto_all(&flags);
2667 }
2668 
2669 static int
2670 test_ipsec_inline_proto_stats(void)
2671 {
2672 	struct ipsec_test_flags flags;
2673 
2674 	memset(&flags, 0, sizeof(flags));
2675 
2676 	flags.stats_success = true;
2677 	flags.plaintext_len = plaintext_len;
2678 
2679 	return test_ipsec_inline_proto_all(&flags);
2680 }
2681 
2682 static int
2683 test_ipsec_inline_proto_pkt_fragment(void)
2684 {
2685 	struct ipsec_test_flags flags;
2686 
2687 	memset(&flags, 0, sizeof(flags));
2688 
2689 	flags.fragment = true;
2690 	flags.plaintext_len = plaintext_len;
2691 
2692 	return test_ipsec_inline_proto_all(&flags);
2693 
2694 }
2695 
2696 static int
2697 test_ipsec_inline_proto_copy_df_inner_0(void)
2698 {
2699 	struct ipsec_test_flags flags;
2700 
2701 	memset(&flags, 0, sizeof(flags));
2702 
2703 	flags.df = TEST_IPSEC_COPY_DF_INNER_0;
2704 	flags.plaintext_len = plaintext_len;
2705 
2706 	return test_ipsec_inline_proto_all(&flags);
2707 }
2708 
2709 static int
2710 test_ipsec_inline_proto_copy_df_inner_1(void)
2711 {
2712 	struct ipsec_test_flags flags;
2713 
2714 	memset(&flags, 0, sizeof(flags));
2715 
2716 	flags.df = TEST_IPSEC_COPY_DF_INNER_1;
2717 	flags.plaintext_len = plaintext_len;
2718 
2719 	return test_ipsec_inline_proto_all(&flags);
2720 }
2721 
2722 static int
2723 test_ipsec_inline_proto_set_df_0_inner_1(void)
2724 {
2725 	struct ipsec_test_flags flags;
2726 
2727 	memset(&flags, 0, sizeof(flags));
2728 
2729 	flags.df = TEST_IPSEC_SET_DF_0_INNER_1;
2730 	flags.plaintext_len = plaintext_len;
2731 
2732 	return test_ipsec_inline_proto_all(&flags);
2733 }
2734 
2735 static int
2736 test_ipsec_inline_proto_set_df_1_inner_0(void)
2737 {
2738 	struct ipsec_test_flags flags;
2739 
2740 	memset(&flags, 0, sizeof(flags));
2741 
2742 	flags.df = TEST_IPSEC_SET_DF_1_INNER_0;
2743 	flags.plaintext_len = plaintext_len;
2744 
2745 	return test_ipsec_inline_proto_all(&flags);
2746 }
2747 
2748 static int
2749 test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(void)
2750 {
2751 	struct ipsec_test_flags flags;
2752 
2753 	memset(&flags, 0, sizeof(flags));
2754 
2755 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
2756 	flags.plaintext_len = plaintext_len;
2757 
2758 	return test_ipsec_inline_proto_all(&flags);
2759 }
2760 
2761 static int
2762 test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(void)
2763 {
2764 	struct ipsec_test_flags flags;
2765 
2766 	memset(&flags, 0, sizeof(flags));
2767 
2768 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
2769 	flags.plaintext_len = plaintext_len;
2770 
2771 	return test_ipsec_inline_proto_all(&flags);
2772 }
2773 
2774 static int
2775 test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(void)
2776 {
2777 	struct ipsec_test_flags flags;
2778 
2779 	memset(&flags, 0, sizeof(flags));
2780 
2781 	flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
2782 	flags.plaintext_len = plaintext_len;
2783 
2784 	return test_ipsec_inline_proto_all(&flags);
2785 }
2786 
2787 static int
2788 test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(void)
2789 {
2790 	struct ipsec_test_flags flags;
2791 
2792 	memset(&flags, 0, sizeof(flags));
2793 
2794 	flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
2795 	flags.plaintext_len = plaintext_len;
2796 
2797 	return test_ipsec_inline_proto_all(&flags);
2798 }
2799 
2800 static int
2801 test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(void)
2802 {
2803 	struct ipsec_test_flags flags;
2804 
2805 	memset(&flags, 0, sizeof(flags));
2806 
2807 	flags.ipv6 = true;
2808 	flags.tunnel_ipv6 = true;
2809 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
2810 	flags.plaintext_len = plaintext_len;
2811 
2812 	return test_ipsec_inline_proto_all(&flags);
2813 }
2814 
2815 static int
2816 test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(void)
2817 {
2818 	struct ipsec_test_flags flags;
2819 
2820 	memset(&flags, 0, sizeof(flags));
2821 
2822 	flags.ipv6 = true;
2823 	flags.tunnel_ipv6 = true;
2824 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
2825 	flags.plaintext_len = plaintext_len;
2826 
2827 	return test_ipsec_inline_proto_all(&flags);
2828 }
2829 
2830 static int
2831 test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(void)
2832 {
2833 	struct ipsec_test_flags flags;
2834 
2835 	memset(&flags, 0, sizeof(flags));
2836 
2837 	flags.ipv6 = true;
2838 	flags.tunnel_ipv6 = true;
2839 	flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
2840 	flags.plaintext_len = plaintext_len;
2841 
2842 	return test_ipsec_inline_proto_all(&flags);
2843 }
2844 
2845 static int
2846 test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(void)
2847 {
2848 	struct ipsec_test_flags flags;
2849 
2850 	memset(&flags, 0, sizeof(flags));
2851 
2852 	flags.ipv6 = true;
2853 	flags.tunnel_ipv6 = true;
2854 	flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
2855 	flags.plaintext_len = plaintext_len;
2856 
2857 	return test_ipsec_inline_proto_all(&flags);
2858 }
2859 
2860 static int
2861 test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(void)
2862 {
2863 	struct ipsec_test_flags flags;
2864 
2865 	memset(&flags, 0, sizeof(flags));
2866 
2867 	flags.ipv6 = true;
2868 	flags.tunnel_ipv6 = true;
2869 	flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_0;
2870 
2871 	return test_ipsec_inline_proto_all(&flags);
2872 }
2873 
2874 static int
2875 test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(void)
2876 {
2877 	struct ipsec_test_flags flags;
2878 
2879 	memset(&flags, 0, sizeof(flags));
2880 
2881 	flags.ipv6 = true;
2882 	flags.tunnel_ipv6 = true;
2883 	flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_1;
2884 
2885 	return test_ipsec_inline_proto_all(&flags);
2886 }
2887 
2888 static int
2889 test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(void)
2890 {
2891 	struct ipsec_test_flags flags;
2892 
2893 	memset(&flags, 0, sizeof(flags));
2894 
2895 	flags.ipv6 = true;
2896 	flags.tunnel_ipv6 = true;
2897 	flags.flabel = TEST_IPSEC_SET_FLABEL_0_INNER_1;
2898 
2899 	return test_ipsec_inline_proto_all(&flags);
2900 }
2901 
2902 static int
2903 test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(void)
2904 {
2905 	struct ipsec_test_flags flags;
2906 
2907 	memset(&flags, 0, sizeof(flags));
2908 
2909 	flags.ipv6 = true;
2910 	flags.tunnel_ipv6 = true;
2911 	flags.flabel = TEST_IPSEC_SET_FLABEL_1_INNER_0;
2912 
2913 	return test_ipsec_inline_proto_all(&flags);
2914 }
2915 
2916 static int
2917 test_ipsec_inline_proto_ipv4_ttl_decrement(void)
2918 {
2919 	struct ipsec_test_flags flags = {
2920 		.dec_ttl_or_hop_limit = true,
2921 		.plaintext_len = plaintext_len,
2922 	};
2923 
2924 	return test_ipsec_inline_proto_all(&flags);
2925 }
2926 
2927 static int
2928 test_ipsec_inline_proto_ipv6_hop_limit_decrement(void)
2929 {
2930 	struct ipsec_test_flags flags = {
2931 		.ipv6 = true,
2932 		.dec_ttl_or_hop_limit = true,
2933 		.plaintext_len = plaintext_len,
2934 	};
2935 
2936 	return test_ipsec_inline_proto_all(&flags);
2937 }
2938 
2939 static int
2940 test_ipsec_inline_proto_iv_gen(void)
2941 {
2942 	struct ipsec_test_flags flags;
2943 
2944 	memset(&flags, 0, sizeof(flags));
2945 
2946 	flags.iv_gen = true;
2947 	flags.plaintext_len = plaintext_len;
2948 
2949 	return test_ipsec_inline_proto_all(&flags);
2950 }
2951 
2952 static int
2953 test_ipsec_inline_proto_sa_pkt_soft_expiry(void)
2954 {
2955 	struct ipsec_test_flags flags = {
2956 		.sa_expiry_pkts_soft = true,
2957 		.plaintext_len = plaintext_len,
2958 	};
2959 	return test_ipsec_inline_proto_all(&flags);
2960 }
2961 static int
2962 test_ipsec_inline_proto_sa_byte_soft_expiry(void)
2963 {
2964 	struct ipsec_test_flags flags = {
2965 		.sa_expiry_bytes_soft = true,
2966 		.plaintext_len = plaintext_len,
2967 	};
2968 	return test_ipsec_inline_proto_all(&flags);
2969 }
2970 
2971 static int
2972 test_ipsec_inline_proto_sa_pkt_hard_expiry(void)
2973 {
2974 	struct ipsec_test_flags flags = {
2975 		.sa_expiry_pkts_hard = true
2976 	};
2977 
2978 	return test_ipsec_inline_proto_all(&flags);
2979 }
2980 
2981 static int
2982 test_ipsec_inline_proto_sa_byte_hard_expiry(void)
2983 {
2984 	struct ipsec_test_flags flags = {
2985 		.sa_expiry_bytes_hard = true
2986 	};
2987 
2988 	return test_ipsec_inline_proto_all(&flags);
2989 }
2990 
2991 static int
2992 test_ipsec_inline_proto_known_vec_fragmented(const void *test_data)
2993 {
2994 	struct ipsec_test_data td_outb;
2995 	struct ipsec_test_flags flags;
2996 
2997 	memset(&flags, 0, sizeof(flags));
2998 	flags.fragment = true;
2999 	flags.plaintext_len = plaintext_len;
3000 
3001 	memcpy(&td_outb, test_data, sizeof(td_outb));
3002 
3003 	/* Disable IV gen to be able to test with known vectors */
3004 	td_outb.ipsec_xform.options.iv_gen_disable = 1;
3005 
3006 	return test_ipsec_inline_proto_process(&td_outb, NULL, 1, false,
3007 						&flags);
3008 }
3009 
3010 static int
3011 test_ipsec_inline_pkt_replay(const void *test_data, const uint64_t esn[],
3012 		      bool replayed_pkt[], uint32_t nb_pkts, bool esn_en,
3013 		      uint64_t winsz)
3014 {
3015 	struct ipsec_test_data td_outb[TEST_SEC_PKTS_MAX];
3016 	struct ipsec_test_data td_inb[TEST_SEC_PKTS_MAX];
3017 	struct ipsec_test_flags flags;
3018 	uint32_t i, ret = 0;
3019 
3020 	memset(&flags, 0, sizeof(flags));
3021 	flags.antireplay = true;
3022 	flags.plaintext_len = plaintext_len;
3023 
3024 	for (i = 0; i < nb_pkts; i++) {
3025 		memcpy(&td_outb[i], test_data, sizeof(td_outb[0]));
3026 		td_outb[i].ipsec_xform.options.iv_gen_disable = 1;
3027 		td_outb[i].ipsec_xform.replay_win_sz = winsz;
3028 		td_outb[i].ipsec_xform.options.esn = esn_en;
3029 	}
3030 
3031 	for (i = 0; i < nb_pkts; i++)
3032 		td_outb[i].ipsec_xform.esn.value = esn[i];
3033 
3034 	ret = test_ipsec_inline_proto_process_with_esn(td_outb, td_inb,
3035 				nb_pkts, true, &flags);
3036 	if (ret != TEST_SUCCESS)
3037 		return ret;
3038 
3039 	test_ipsec_td_update(td_inb, td_outb, nb_pkts, &flags);
3040 
3041 	for (i = 0; i < nb_pkts; i++) {
3042 		td_inb[i].ipsec_xform.options.esn = esn_en;
3043 		/* Set antireplay flag for packets to be dropped */
3044 		td_inb[i].ar_packet = replayed_pkt[i];
3045 	}
3046 
3047 	ret = test_ipsec_inline_proto_process_with_esn(td_inb, NULL, nb_pkts,
3048 				true, &flags);
3049 
3050 	return ret;
3051 }
3052 
3053 static int
3054 test_ipsec_inline_proto_pkt_antireplay(const void *test_data, uint64_t winsz)
3055 {
3056 
3057 	uint32_t nb_pkts = 5;
3058 	bool replayed_pkt[5];
3059 	uint64_t esn[5];
3060 
3061 	/* 1. Advance the TOP of the window to WS * 2 */
3062 	esn[0] = winsz * 2;
3063 	/* 2. Test sequence number within the new window(WS + 1) */
3064 	esn[1] = winsz + 1;
3065 	/* 3. Test sequence number less than the window BOTTOM */
3066 	esn[2] = winsz;
3067 	/* 4. Test sequence number in the middle of the window */
3068 	esn[3] = winsz + (winsz / 2);
3069 	/* 5. Test replay of the packet in the middle of the window */
3070 	esn[4] = winsz + (winsz / 2);
3071 
3072 	replayed_pkt[0] = false;
3073 	replayed_pkt[1] = false;
3074 	replayed_pkt[2] = true;
3075 	replayed_pkt[3] = false;
3076 	replayed_pkt[4] = true;
3077 
3078 	return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt,
3079 			nb_pkts, false, winsz);
3080 }
3081 
3082 static int
3083 test_ipsec_inline_proto_pkt_antireplay1024(const void *test_data)
3084 {
3085 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 1024);
3086 }
3087 
3088 static int
3089 test_ipsec_inline_proto_pkt_antireplay2048(const void *test_data)
3090 {
3091 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 2048);
3092 }
3093 
3094 static int
3095 test_ipsec_inline_proto_pkt_antireplay4096(const void *test_data)
3096 {
3097 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 4096);
3098 }
3099 
3100 static int
3101 test_ipsec_inline_proto_pkt_esn_antireplay(const void *test_data, uint64_t winsz)
3102 {
3103 
3104 	uint32_t nb_pkts = 7;
3105 	bool replayed_pkt[7];
3106 	uint64_t esn[7];
3107 
3108 	/* Set the initial sequence number */
3109 	esn[0] = (uint64_t)(0xFFFFFFFF - winsz);
3110 	/* 1. Advance the TOP of the window to (1<<32 + WS/2) */
3111 	esn[1] = (uint64_t)((1ULL << 32) + (winsz / 2));
3112 	/* 2. Test sequence number within new window (1<<32 + WS/2 + 1) */
3113 	esn[2] = (uint64_t)((1ULL << 32) - (winsz / 2) + 1);
3114 	/* 3. Test with sequence number within window (1<<32 - 1) */
3115 	esn[3] = (uint64_t)((1ULL << 32) - 1);
3116 	/* 4. Test with sequence number within window (1<<32 - 1) */
3117 	esn[4] = (uint64_t)(1ULL << 32);
3118 	/* 5. Test with duplicate sequence number within
3119 	 * new window (1<<32 - 1)
3120 	 */
3121 	esn[5] = (uint64_t)((1ULL << 32) - 1);
3122 	/* 6. Test with duplicate sequence number within new window (1<<32) */
3123 	esn[6] = (uint64_t)(1ULL << 32);
3124 
3125 	replayed_pkt[0] = false;
3126 	replayed_pkt[1] = false;
3127 	replayed_pkt[2] = false;
3128 	replayed_pkt[3] = false;
3129 	replayed_pkt[4] = false;
3130 	replayed_pkt[5] = true;
3131 	replayed_pkt[6] = true;
3132 
3133 	return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt, nb_pkts,
3134 				     true, winsz);
3135 }
3136 
3137 static int
3138 test_ipsec_inline_proto_pkt_esn_antireplay1024(const void *test_data)
3139 {
3140 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 1024);
3141 }
3142 
3143 static int
3144 test_ipsec_inline_proto_pkt_esn_antireplay2048(const void *test_data)
3145 {
3146 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 2048);
3147 }
3148 
3149 static int
3150 test_ipsec_inline_proto_pkt_esn_antireplay4096(const void *test_data)
3151 {
3152 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 4096);
3153 }
3154 
3155 static struct unit_test_suite inline_ipsec_testsuite  = {
3156 	.suite_name = "Inline IPsec Ethernet Device Unit Test Suite",
3157 	.unit_test_cases = {
3158 		TEST_CASE_NAMED_WITH_DATA(
3159 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
3160 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3161 			test_ipsec_inline_proto_known_vec, &pkt_aes_128_gcm),
3162 		TEST_CASE_NAMED_WITH_DATA(
3163 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
3164 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3165 			test_ipsec_inline_proto_known_vec, &pkt_aes_192_gcm),
3166 		TEST_CASE_NAMED_WITH_DATA(
3167 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
3168 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3169 			test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm),
3170 		TEST_CASE_NAMED_WITH_DATA(
3171 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC MD5 [12B ICV])",
3172 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3173 			test_ipsec_inline_proto_known_vec,
3174 			&pkt_aes_128_cbc_md5),
3175 		TEST_CASE_NAMED_WITH_DATA(
3176 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
3177 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3178 			test_ipsec_inline_proto_known_vec,
3179 			&pkt_aes_128_cbc_hmac_sha256),
3180 		TEST_CASE_NAMED_WITH_DATA(
3181 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
3182 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3183 			test_ipsec_inline_proto_known_vec,
3184 			&pkt_aes_128_cbc_hmac_sha384),
3185 		TEST_CASE_NAMED_WITH_DATA(
3186 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
3187 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3188 			test_ipsec_inline_proto_known_vec,
3189 			&pkt_aes_128_cbc_hmac_sha512),
3190 		TEST_CASE_NAMED_WITH_DATA(
3191 			"Outbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA256 [16B ICV])",
3192 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3193 			test_ipsec_inline_proto_known_vec,
3194 			&pkt_3des_cbc_hmac_sha256),
3195 		TEST_CASE_NAMED_WITH_DATA(
3196 			"Outbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA384 [24B ICV])",
3197 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3198 			test_ipsec_inline_proto_known_vec,
3199 			&pkt_3des_cbc_hmac_sha384),
3200 		TEST_CASE_NAMED_WITH_DATA(
3201 			"Outbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA512 [32B ICV])",
3202 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3203 			test_ipsec_inline_proto_known_vec,
3204 			&pkt_3des_cbc_hmac_sha512),
3205 		TEST_CASE_NAMED_WITH_DATA(
3206 			"Outbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
3207 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3208 			test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm_v6),
3209 		TEST_CASE_NAMED_WITH_DATA(
3210 			"Outbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
3211 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3212 			test_ipsec_inline_proto_known_vec,
3213 			&pkt_aes_128_cbc_hmac_sha256_v6),
3214 		TEST_CASE_NAMED_WITH_DATA(
3215 			"Outbound known vector (ESP tunnel mode IPv6 3DES-CBC HMAC-SHA256 [16B ICV])",
3216 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3217 			test_ipsec_inline_proto_known_vec,
3218 			&pkt_3des_cbc_hmac_sha256_v6),
3219 		TEST_CASE_NAMED_WITH_DATA(
3220 			"Outbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
3221 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3222 			test_ipsec_inline_proto_known_vec,
3223 			&pkt_null_aes_xcbc),
3224 		TEST_CASE_NAMED_WITH_DATA(
3225 			"Outbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA256 [16B ICV])",
3226 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3227 			test_ipsec_inline_proto_known_vec,
3228 			&pkt_des_cbc_hmac_sha256),
3229 		TEST_CASE_NAMED_WITH_DATA(
3230 			"Outbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA384 [24B ICV])",
3231 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3232 			test_ipsec_inline_proto_known_vec,
3233 			&pkt_des_cbc_hmac_sha384),
3234 		TEST_CASE_NAMED_WITH_DATA(
3235 			"Outbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA512 [32B ICV])",
3236 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3237 			test_ipsec_inline_proto_known_vec,
3238 			&pkt_des_cbc_hmac_sha512),
3239 		TEST_CASE_NAMED_WITH_DATA(
3240 			"Outbound known vector (ESP tunnel mode IPv6 DES-CBC HMAC-SHA256 [16B ICV])",
3241 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3242 			test_ipsec_inline_proto_known_vec,
3243 			&pkt_des_cbc_hmac_sha256_v6),
3244 
3245 		TEST_CASE_NAMED_WITH_DATA(
3246 			"Outbound fragmented packet",
3247 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3248 			test_ipsec_inline_proto_known_vec_fragmented,
3249 			&pkt_aes_128_gcm_frag),
3250 
3251 		TEST_CASE_NAMED_WITH_DATA(
3252 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
3253 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3254 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_gcm),
3255 		TEST_CASE_NAMED_WITH_DATA(
3256 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
3257 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3258 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_192_gcm),
3259 		TEST_CASE_NAMED_WITH_DATA(
3260 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
3261 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3262 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm),
3263 		TEST_CASE_NAMED_WITH_DATA(
3264 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128)",
3265 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3266 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_cbc_null),
3267 		TEST_CASE_NAMED_WITH_DATA(
3268 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC MD5 [12B ICV])",
3269 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3270 			test_ipsec_inline_proto_known_vec_inb,
3271 			&pkt_aes_128_cbc_md5),
3272 		TEST_CASE_NAMED_WITH_DATA(
3273 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
3274 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3275 			test_ipsec_inline_proto_known_vec_inb,
3276 			&pkt_aes_128_cbc_hmac_sha256),
3277 		TEST_CASE_NAMED_WITH_DATA(
3278 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
3279 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3280 			test_ipsec_inline_proto_known_vec_inb,
3281 			&pkt_aes_128_cbc_hmac_sha384),
3282 		TEST_CASE_NAMED_WITH_DATA(
3283 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
3284 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3285 			test_ipsec_inline_proto_known_vec_inb,
3286 			&pkt_aes_128_cbc_hmac_sha512),
3287 		TEST_CASE_NAMED_WITH_DATA(
3288 			"Inbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA256 [16B ICV])",
3289 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3290 			test_ipsec_inline_proto_known_vec_inb,
3291 			&pkt_3des_cbc_hmac_sha256),
3292 		TEST_CASE_NAMED_WITH_DATA(
3293 			"Inbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA384 [24B ICV])",
3294 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3295 			test_ipsec_inline_proto_known_vec_inb,
3296 			&pkt_3des_cbc_hmac_sha384),
3297 		TEST_CASE_NAMED_WITH_DATA(
3298 			"Inbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA512 [32B ICV])",
3299 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3300 			test_ipsec_inline_proto_known_vec_inb,
3301 			&pkt_3des_cbc_hmac_sha512),
3302 		TEST_CASE_NAMED_WITH_DATA(
3303 			"Inbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
3304 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3305 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm_v6),
3306 		TEST_CASE_NAMED_WITH_DATA(
3307 			"Inbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
3308 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3309 			test_ipsec_inline_proto_known_vec_inb,
3310 			&pkt_aes_128_cbc_hmac_sha256_v6),
3311 		TEST_CASE_NAMED_WITH_DATA(
3312 			"Inbound known vector (ESP tunnel mode IPv6 3DES-CBC HMAC-SHA256 [16B ICV])",
3313 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3314 			test_ipsec_inline_proto_known_vec_inb,
3315 			&pkt_3des_cbc_hmac_sha256_v6),
3316 		TEST_CASE_NAMED_WITH_DATA(
3317 			"Inbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
3318 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3319 			test_ipsec_inline_proto_known_vec_inb,
3320 			&pkt_null_aes_xcbc),
3321 		TEST_CASE_NAMED_WITH_DATA(
3322 			"Inbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA256 [16B ICV])",
3323 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3324 			test_ipsec_inline_proto_known_vec_inb,
3325 			&pkt_des_cbc_hmac_sha256),
3326 		TEST_CASE_NAMED_WITH_DATA(
3327 			"Inbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA384 [24B ICV])",
3328 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3329 			test_ipsec_inline_proto_known_vec_inb,
3330 			&pkt_des_cbc_hmac_sha384),
3331 		TEST_CASE_NAMED_WITH_DATA(
3332 			"Inbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA512 [32B ICV])",
3333 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3334 			test_ipsec_inline_proto_known_vec_inb,
3335 			&pkt_des_cbc_hmac_sha512),
3336 		TEST_CASE_NAMED_WITH_DATA(
3337 			"Inbound known vector (ESP tunnel mode IPv6 DES-CBC HMAC-SHA256 [16B ICV])",
3338 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3339 			test_ipsec_inline_proto_known_vec_inb,
3340 			&pkt_des_cbc_hmac_sha256_v6),
3341 
3342 
3343 		TEST_CASE_NAMED_ST(
3344 			"Combined test alg list",
3345 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3346 			test_ipsec_inline_proto_display_list),
3347 
3348 		TEST_CASE_NAMED_ST(
3349 			"UDP encapsulation",
3350 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3351 			test_ipsec_inline_proto_udp_encap),
3352 		TEST_CASE_NAMED_ST(
3353 			"UDP encapsulation ports verification test",
3354 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3355 			test_ipsec_inline_proto_udp_ports_verify),
3356 		TEST_CASE_NAMED_ST(
3357 			"Negative test: ICV corruption",
3358 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3359 			test_ipsec_inline_proto_err_icv_corrupt),
3360 		TEST_CASE_NAMED_ST(
3361 			"Tunnel dst addr verification",
3362 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3363 			test_ipsec_inline_proto_tunnel_dst_addr_verify),
3364 		TEST_CASE_NAMED_ST(
3365 			"Tunnel src and dst addr verification",
3366 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3367 			test_ipsec_inline_proto_tunnel_src_dst_addr_verify),
3368 		TEST_CASE_NAMED_ST(
3369 			"Inner IP checksum",
3370 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3371 			test_ipsec_inline_proto_inner_ip_csum),
3372 		TEST_CASE_NAMED_ST(
3373 			"Inner L4 checksum",
3374 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3375 			test_ipsec_inline_proto_inner_l4_csum),
3376 		TEST_CASE_NAMED_ST(
3377 			"Tunnel IPv4 in IPv4",
3378 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3379 			test_ipsec_inline_proto_tunnel_v4_in_v4),
3380 		TEST_CASE_NAMED_ST(
3381 			"Tunnel IPv6 in IPv6",
3382 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3383 			test_ipsec_inline_proto_tunnel_v6_in_v6),
3384 		TEST_CASE_NAMED_ST(
3385 			"Tunnel IPv4 in IPv6",
3386 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3387 			test_ipsec_inline_proto_tunnel_v4_in_v6),
3388 		TEST_CASE_NAMED_ST(
3389 			"Tunnel IPv6 in IPv4",
3390 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3391 			test_ipsec_inline_proto_tunnel_v6_in_v4),
3392 		TEST_CASE_NAMED_ST(
3393 			"Transport IPv4",
3394 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3395 			test_ipsec_inline_proto_transport_v4),
3396 		TEST_CASE_NAMED_ST(
3397 			"Transport l4 checksum",
3398 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3399 			test_ipsec_inline_proto_transport_l4_csum),
3400 		TEST_CASE_NAMED_ST(
3401 			"Statistics: success",
3402 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3403 			test_ipsec_inline_proto_stats),
3404 		TEST_CASE_NAMED_ST(
3405 			"Fragmented packet",
3406 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3407 			test_ipsec_inline_proto_pkt_fragment),
3408 		TEST_CASE_NAMED_ST(
3409 			"Tunnel header copy DF (inner 0)",
3410 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3411 			test_ipsec_inline_proto_copy_df_inner_0),
3412 		TEST_CASE_NAMED_ST(
3413 			"Tunnel header copy DF (inner 1)",
3414 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3415 			test_ipsec_inline_proto_copy_df_inner_1),
3416 		TEST_CASE_NAMED_ST(
3417 			"Tunnel header set DF 0 (inner 1)",
3418 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3419 			test_ipsec_inline_proto_set_df_0_inner_1),
3420 		TEST_CASE_NAMED_ST(
3421 			"Tunnel header set DF 1 (inner 0)",
3422 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3423 			test_ipsec_inline_proto_set_df_1_inner_0),
3424 		TEST_CASE_NAMED_ST(
3425 			"Tunnel header IPv4 copy DSCP (inner 0)",
3426 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3427 			test_ipsec_inline_proto_ipv4_copy_dscp_inner_0),
3428 		TEST_CASE_NAMED_ST(
3429 			"Tunnel header IPv4 copy DSCP (inner 1)",
3430 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3431 			test_ipsec_inline_proto_ipv4_copy_dscp_inner_1),
3432 		TEST_CASE_NAMED_ST(
3433 			"Tunnel header IPv4 set DSCP 0 (inner 1)",
3434 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3435 			test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1),
3436 		TEST_CASE_NAMED_ST(
3437 			"Tunnel header IPv4 set DSCP 1 (inner 0)",
3438 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3439 			test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0),
3440 		TEST_CASE_NAMED_ST(
3441 			"Tunnel header IPv6 copy DSCP (inner 0)",
3442 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3443 			test_ipsec_inline_proto_ipv6_copy_dscp_inner_0),
3444 		TEST_CASE_NAMED_ST(
3445 			"Tunnel header IPv6 copy DSCP (inner 1)",
3446 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3447 			test_ipsec_inline_proto_ipv6_copy_dscp_inner_1),
3448 		TEST_CASE_NAMED_ST(
3449 			"Tunnel header IPv6 set DSCP 0 (inner 1)",
3450 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3451 			test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1),
3452 		TEST_CASE_NAMED_ST(
3453 			"Tunnel header IPv6 set DSCP 1 (inner 0)",
3454 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3455 			test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0),
3456 		TEST_CASE_NAMED_ST(
3457 			"Tunnel header IPv6 copy FLABEL (inner 0)",
3458 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3459 			test_ipsec_inline_proto_ipv6_copy_flabel_inner_0),
3460 		TEST_CASE_NAMED_ST(
3461 			"Tunnel header IPv6 copy FLABEL (inner 1)",
3462 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3463 			test_ipsec_inline_proto_ipv6_copy_flabel_inner_1),
3464 		TEST_CASE_NAMED_ST(
3465 			"Tunnel header IPv6 set FLABEL 0 (inner 1)",
3466 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3467 			test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1),
3468 		TEST_CASE_NAMED_ST(
3469 			"Tunnel header IPv6 set FLABEL 1 (inner 0)",
3470 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3471 			test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0),
3472 		TEST_CASE_NAMED_ST(
3473 			"Tunnel header IPv4 decrement inner TTL",
3474 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3475 			test_ipsec_inline_proto_ipv4_ttl_decrement),
3476 		TEST_CASE_NAMED_ST(
3477 			"Tunnel header IPv6 decrement inner hop limit",
3478 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3479 			test_ipsec_inline_proto_ipv6_hop_limit_decrement),
3480 		TEST_CASE_NAMED_ST(
3481 			"IV generation",
3482 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3483 			test_ipsec_inline_proto_iv_gen),
3484 		TEST_CASE_NAMED_ST(
3485 			"SA soft expiry with packet limit",
3486 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3487 			test_ipsec_inline_proto_sa_pkt_soft_expiry),
3488 		TEST_CASE_NAMED_ST(
3489 			"SA soft expiry with byte limit",
3490 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3491 			test_ipsec_inline_proto_sa_byte_soft_expiry),
3492 		TEST_CASE_NAMED_ST(
3493 			"SA hard expiry with packet limit",
3494 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3495 			test_ipsec_inline_proto_sa_pkt_hard_expiry),
3496 		TEST_CASE_NAMED_ST(
3497 			"SA hard expiry with byte limit",
3498 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3499 			test_ipsec_inline_proto_sa_byte_hard_expiry),
3500 
3501 		TEST_CASE_NAMED_WITH_DATA(
3502 			"Antireplay with window size 1024",
3503 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3504 			test_ipsec_inline_proto_pkt_antireplay1024,
3505 			&pkt_aes_128_gcm),
3506 		TEST_CASE_NAMED_WITH_DATA(
3507 			"Antireplay with window size 2048",
3508 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3509 			test_ipsec_inline_proto_pkt_antireplay2048,
3510 			&pkt_aes_128_gcm),
3511 		TEST_CASE_NAMED_WITH_DATA(
3512 			"Antireplay with window size 4096",
3513 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3514 			test_ipsec_inline_proto_pkt_antireplay4096,
3515 			&pkt_aes_128_gcm),
3516 		TEST_CASE_NAMED_WITH_DATA(
3517 			"ESN and Antireplay with window size 1024",
3518 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3519 			test_ipsec_inline_proto_pkt_esn_antireplay1024,
3520 			&pkt_aes_128_gcm),
3521 		TEST_CASE_NAMED_WITH_DATA(
3522 			"ESN and Antireplay with window size 2048",
3523 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3524 			test_ipsec_inline_proto_pkt_esn_antireplay2048,
3525 			&pkt_aes_128_gcm),
3526 		TEST_CASE_NAMED_WITH_DATA(
3527 			"ESN and Antireplay with window size 4096",
3528 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3529 			test_ipsec_inline_proto_pkt_esn_antireplay4096,
3530 			&pkt_aes_128_gcm),
3531 
3532 		TEST_CASE_NAMED_WITH_DATA(
3533 			"IPv4 Reassembly with 2 fragments",
3534 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3535 			test_inline_ip_reassembly, &ipv4_2frag_vector),
3536 		TEST_CASE_NAMED_WITH_DATA(
3537 			"IPv6 Reassembly with 2 fragments",
3538 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3539 			test_inline_ip_reassembly, &ipv6_2frag_vector),
3540 		TEST_CASE_NAMED_WITH_DATA(
3541 			"IPv4 Reassembly with 4 fragments",
3542 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3543 			test_inline_ip_reassembly, &ipv4_4frag_vector),
3544 		TEST_CASE_NAMED_WITH_DATA(
3545 			"IPv6 Reassembly with 4 fragments",
3546 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3547 			test_inline_ip_reassembly, &ipv6_4frag_vector),
3548 		TEST_CASE_NAMED_WITH_DATA(
3549 			"IPv4 Reassembly with 5 fragments",
3550 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3551 			test_inline_ip_reassembly, &ipv4_5frag_vector),
3552 		TEST_CASE_NAMED_WITH_DATA(
3553 			"IPv6 Reassembly with 5 fragments",
3554 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3555 			test_inline_ip_reassembly, &ipv6_5frag_vector),
3556 		TEST_CASE_NAMED_WITH_DATA(
3557 			"IPv4 Reassembly with incomplete fragments",
3558 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3559 			test_inline_ip_reassembly, &ipv4_incomplete_vector),
3560 		TEST_CASE_NAMED_WITH_DATA(
3561 			"IPv4 Reassembly with overlapping fragments",
3562 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3563 			test_inline_ip_reassembly, &ipv4_overlap_vector),
3564 		TEST_CASE_NAMED_WITH_DATA(
3565 			"IPv4 Reassembly with out of order fragments",
3566 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3567 			test_inline_ip_reassembly, &ipv4_out_of_order_vector),
3568 		TEST_CASE_NAMED_WITH_DATA(
3569 			"IPv4 Reassembly with burst of 4 fragments",
3570 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3571 			test_inline_ip_reassembly, &ipv4_4frag_burst_vector),
3572 		TEST_CASE_NAMED_WITH_DATA(
3573 			"Inbound Out-Of-Place processing",
3574 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3575 			test_ipsec_inline_proto_oop_inb,
3576 			&pkt_aes_128_gcm),
3577 		TEST_CASE_NAMED_WITH_DATA(
3578 			"Inbound Rx Inject processing",
3579 			ut_setup_inline_ipsec_rx_inj, ut_teardown_inline_ipsec_rx_inj,
3580 			test_ipsec_inline_proto_rx_inj_inb, &ipv4_vector),
3581 
3582 		TEST_CASES_END() /**< NULL terminate unit test array */
3583 	},
3584 };
3585 
3586 
3587 static int
3588 test_inline_ipsec(void)
3589 {
3590 	inline_ipsec_testsuite.setup = inline_ipsec_testsuite_setup;
3591 	inline_ipsec_testsuite.teardown = inline_ipsec_testsuite_teardown;
3592 	return unit_test_suite_runner(&inline_ipsec_testsuite);
3593 }
3594 
3595 
3596 static int
3597 test_inline_ipsec_sg(void)
3598 {
3599 	int rc;
3600 
3601 	inline_ipsec_testsuite.setup = inline_ipsec_testsuite_setup;
3602 	inline_ipsec_testsuite.teardown = inline_ipsec_testsuite_teardown;
3603 
3604 	sg_mode = true;
3605 	/* Run the tests */
3606 	rc = unit_test_suite_runner(&inline_ipsec_testsuite);
3607 	sg_mode = false;
3608 
3609 	port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_SCATTER;
3610 	port_conf.txmode.offloads &= ~RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
3611 	return rc;
3612 }
3613 
3614 static int
3615 test_event_inline_ipsec(void)
3616 {
3617 	inline_ipsec_testsuite.setup = event_inline_ipsec_testsuite_setup;
3618 	inline_ipsec_testsuite.teardown = event_inline_ipsec_testsuite_teardown;
3619 	return unit_test_suite_runner(&inline_ipsec_testsuite);
3620 }
3621 
3622 #endif /* !RTE_EXEC_ENV_WINDOWS */
3623 
3624 REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec);
3625 REGISTER_TEST_COMMAND(inline_ipsec_sg_autotest, test_inline_ipsec_sg);
3626 REGISTER_TEST_COMMAND(event_inline_ipsec_autotest, test_event_inline_ipsec);
3627