xref: /dpdk/app/test/test_security_inline_proto.c (revision 515cd4a488b6a0c6e40d20e6b10d8e89657dc23f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Marvell.
3  */
4 
5 
6 #include <stdio.h>
7 #include <inttypes.h>
8 
9 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
11 #include <rte_security.h>
12 
13 #include "test.h"
14 #include "test_security_inline_proto_vectors.h"
15 
16 #ifdef RTE_EXEC_ENV_WINDOWS
17 static int
18 test_inline_ipsec(void)
19 {
20 	printf("Inline ipsec not supported on Windows, skipping test\n");
21 	return TEST_SKIPPED;
22 }
23 
24 static int
25 test_event_inline_ipsec(void)
26 {
27 	printf("Event inline ipsec not supported on Windows, skipping test\n");
28 	return TEST_SKIPPED;
29 }
30 
31 #else
32 
33 #include <rte_eventdev.h>
34 #include <rte_event_eth_rx_adapter.h>
35 #include <rte_event_eth_tx_adapter.h>
36 
37 #define NB_ETHPORTS_USED		1
38 #define MEMPOOL_CACHE_SIZE		32
39 #define MAX_PKT_BURST			32
40 #define RTE_TEST_RX_DESC_DEFAULT	1024
41 #define RTE_TEST_TX_DESC_DEFAULT	1024
42 #define RTE_PORT_ALL		(~(uint16_t)0x0)
43 
44 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
45 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
46 #define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */
47 
48 #define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */
49 #define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
50 #define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
51 
52 #define MAX_TRAFFIC_BURST		2048
53 #define NB_MBUF				10240
54 
55 #define ENCAP_DECAP_BURST_SZ		33
56 #define APP_REASS_TIMEOUT		10
57 
58 extern struct ipsec_test_data pkt_aes_128_gcm;
59 extern struct ipsec_test_data pkt_aes_192_gcm;
60 extern struct ipsec_test_data pkt_aes_256_gcm;
61 extern struct ipsec_test_data pkt_aes_128_gcm_frag;
62 extern struct ipsec_test_data pkt_aes_128_cbc_null;
63 extern struct ipsec_test_data pkt_null_aes_xcbc;
64 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha384;
65 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha512;
66 
67 static struct rte_mempool *mbufpool;
68 static struct rte_mempool *sess_pool;
69 static struct rte_mempool *sess_priv_pool;
70 /* ethernet addresses of ports */
71 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
72 
73 static struct rte_eth_conf port_conf = {
74 	.rxmode = {
75 		.mq_mode = RTE_ETH_MQ_RX_NONE,
76 		.split_hdr_size = 0,
77 		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM |
78 			    RTE_ETH_RX_OFFLOAD_SECURITY,
79 	},
80 	.txmode = {
81 		.mq_mode = RTE_ETH_MQ_TX_NONE,
82 		.offloads = RTE_ETH_TX_OFFLOAD_SECURITY |
83 			    RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
84 	},
85 	.lpbk_mode = 1,  /* enable loopback */
86 };
87 
88 static struct rte_eth_rxconf rx_conf = {
89 	.rx_thresh = {
90 		.pthresh = RX_PTHRESH,
91 		.hthresh = RX_HTHRESH,
92 		.wthresh = RX_WTHRESH,
93 	},
94 	.rx_free_thresh = 32,
95 };
96 
97 static struct rte_eth_txconf tx_conf = {
98 	.tx_thresh = {
99 		.pthresh = TX_PTHRESH,
100 		.hthresh = TX_HTHRESH,
101 		.wthresh = TX_WTHRESH,
102 	},
103 	.tx_free_thresh = 32, /* Use PMD default values */
104 	.tx_rs_thresh = 32, /* Use PMD default values */
105 };
106 
107 static uint16_t port_id;
108 static uint8_t eventdev_id;
109 static uint8_t rx_adapter_id;
110 static uint8_t tx_adapter_id;
111 
112 static bool event_mode_enabled;
113 
114 static uint64_t link_mbps;
115 
116 static int ip_reassembly_dynfield_offset = -1;
117 
118 static struct rte_flow *default_flow[RTE_MAX_ETHPORTS];
119 
120 /* Create Inline IPsec session */
121 static int
122 create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid,
123 		struct rte_security_session **sess, struct rte_security_ctx **ctx,
124 		uint32_t *ol_flags, const struct ipsec_test_flags *flags,
125 		struct rte_security_session_conf *sess_conf)
126 {
127 	uint16_t src_v6[8] = {0x2607, 0xf8b0, 0x400c, 0x0c03, 0x0000, 0x0000,
128 				0x0000, 0x001a};
129 	uint16_t dst_v6[8] = {0x2001, 0x0470, 0xe5bf, 0xdead, 0x4957, 0x2174,
130 				0xe82c, 0x4887};
131 	uint32_t src_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 2));
132 	uint32_t dst_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 1));
133 	struct rte_security_capability_idx sec_cap_idx;
134 	const struct rte_security_capability *sec_cap;
135 	enum rte_security_ipsec_sa_direction dir;
136 	struct rte_security_ctx *sec_ctx;
137 	uint32_t verify;
138 
139 	sess_conf->action_type = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
140 	sess_conf->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
141 	sess_conf->ipsec = sa->ipsec_xform;
142 
143 	dir = sa->ipsec_xform.direction;
144 	verify = flags->tunnel_hdr_verify;
145 
146 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && verify) {
147 		if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR)
148 			src_v4 += 1;
149 		else if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR)
150 			dst_v4 += 1;
151 	}
152 
153 	if (sa->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
154 		if (sa->ipsec_xform.tunnel.type ==
155 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
156 			memcpy(&sess_conf->ipsec.tunnel.ipv4.src_ip, &src_v4,
157 					sizeof(src_v4));
158 			memcpy(&sess_conf->ipsec.tunnel.ipv4.dst_ip, &dst_v4,
159 					sizeof(dst_v4));
160 
161 			if (flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
162 				sess_conf->ipsec.tunnel.ipv4.df = 0;
163 
164 			if (flags->df == TEST_IPSEC_SET_DF_1_INNER_0)
165 				sess_conf->ipsec.tunnel.ipv4.df = 1;
166 
167 			if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
168 				sess_conf->ipsec.tunnel.ipv4.dscp = 0;
169 
170 			if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
171 				sess_conf->ipsec.tunnel.ipv4.dscp =
172 						TEST_IPSEC_DSCP_VAL;
173 		} else {
174 			if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
175 				sess_conf->ipsec.tunnel.ipv6.dscp = 0;
176 
177 			if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
178 				sess_conf->ipsec.tunnel.ipv6.dscp =
179 						TEST_IPSEC_DSCP_VAL;
180 
181 			if (flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
182 				sess_conf->ipsec.tunnel.ipv6.flabel = 0;
183 
184 			if (flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0)
185 				sess_conf->ipsec.tunnel.ipv6.flabel =
186 						TEST_IPSEC_FLABEL_VAL;
187 
188 			memcpy(&sess_conf->ipsec.tunnel.ipv6.src_addr, &src_v6,
189 					sizeof(src_v6));
190 			memcpy(&sess_conf->ipsec.tunnel.ipv6.dst_addr, &dst_v6,
191 					sizeof(dst_v6));
192 		}
193 	}
194 
195 	/* Save SA as userdata for the security session. When
196 	 * the packet is received, this userdata will be
197 	 * retrieved using the metadata from the packet.
198 	 *
199 	 * The PMD is expected to set similar metadata for other
200 	 * operations, like rte_eth_event, which are tied to
201 	 * security session. In such cases, the userdata could
202 	 * be obtained to uniquely identify the security
203 	 * parameters denoted.
204 	 */
205 
206 	sess_conf->userdata = (void *) sa;
207 
208 	sec_ctx = (struct rte_security_ctx *)rte_eth_dev_get_sec_ctx(portid);
209 	if (sec_ctx == NULL) {
210 		printf("Ethernet device doesn't support security features.\n");
211 		return TEST_SKIPPED;
212 	}
213 
214 	sec_cap_idx.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
215 	sec_cap_idx.protocol = RTE_SECURITY_PROTOCOL_IPSEC;
216 	sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto;
217 	sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode;
218 	sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction;
219 	sec_cap = rte_security_capability_get(sec_ctx, &sec_cap_idx);
220 	if (sec_cap == NULL) {
221 		printf("No capabilities registered\n");
222 		return TEST_SKIPPED;
223 	}
224 
225 	if (sa->aead || sa->aes_gmac)
226 		memcpy(&sess_conf->ipsec.salt, sa->salt.data,
227 			RTE_MIN(sizeof(sess_conf->ipsec.salt), sa->salt.len));
228 
229 	/* Copy cipher session parameters */
230 	if (sa->aead) {
231 		rte_memcpy(sess_conf->crypto_xform, &sa->xform.aead,
232 				sizeof(struct rte_crypto_sym_xform));
233 		sess_conf->crypto_xform->aead.key.data = sa->key.data;
234 		/* Verify crypto capabilities */
235 		if (test_ipsec_crypto_caps_aead_verify(sec_cap,
236 					sess_conf->crypto_xform) != 0) {
237 			RTE_LOG(INFO, USER1,
238 				"Crypto capabilities not supported\n");
239 			return TEST_SKIPPED;
240 		}
241 	} else {
242 		if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
243 			rte_memcpy(&sess_conf->crypto_xform->cipher,
244 					&sa->xform.chain.cipher.cipher,
245 					sizeof(struct rte_crypto_cipher_xform));
246 
247 			rte_memcpy(&sess_conf->crypto_xform->next->auth,
248 					&sa->xform.chain.auth.auth,
249 					sizeof(struct rte_crypto_auth_xform));
250 			sess_conf->crypto_xform->cipher.key.data =
251 							sa->key.data;
252 			sess_conf->crypto_xform->next->auth.key.data =
253 							sa->auth_key.data;
254 			/* Verify crypto capabilities */
255 			if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
256 					sess_conf->crypto_xform) != 0) {
257 				RTE_LOG(INFO, USER1,
258 					"Cipher crypto capabilities not supported\n");
259 				return TEST_SKIPPED;
260 			}
261 
262 			if (test_ipsec_crypto_caps_auth_verify(sec_cap,
263 					sess_conf->crypto_xform->next) != 0) {
264 				RTE_LOG(INFO, USER1,
265 					"Auth crypto capabilities not supported\n");
266 				return TEST_SKIPPED;
267 			}
268 		} else {
269 			rte_memcpy(&sess_conf->crypto_xform->next->cipher,
270 					&sa->xform.chain.cipher.cipher,
271 					sizeof(struct rte_crypto_cipher_xform));
272 			rte_memcpy(&sess_conf->crypto_xform->auth,
273 					&sa->xform.chain.auth.auth,
274 					sizeof(struct rte_crypto_auth_xform));
275 			sess_conf->crypto_xform->auth.key.data =
276 							sa->auth_key.data;
277 			sess_conf->crypto_xform->next->cipher.key.data =
278 							sa->key.data;
279 
280 			/* Verify crypto capabilities */
281 			if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
282 					sess_conf->crypto_xform->next) != 0) {
283 				RTE_LOG(INFO, USER1,
284 					"Cipher crypto capabilities not supported\n");
285 				return TEST_SKIPPED;
286 			}
287 
288 			if (test_ipsec_crypto_caps_auth_verify(sec_cap,
289 					sess_conf->crypto_xform) != 0) {
290 				RTE_LOG(INFO, USER1,
291 					"Auth crypto capabilities not supported\n");
292 				return TEST_SKIPPED;
293 			}
294 		}
295 	}
296 
297 	if (test_ipsec_sec_caps_verify(&sess_conf->ipsec, sec_cap, false) != 0)
298 		return TEST_SKIPPED;
299 
300 	if ((sa->ipsec_xform.direction ==
301 			RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
302 			(sa->ipsec_xform.options.iv_gen_disable == 1)) {
303 		/* Set env variable when IV generation is disabled */
304 		char arr[128];
305 		int len = 0, j = 0;
306 		int iv_len = (sa->aead || sa->aes_gmac) ? 8 : 16;
307 
308 		for (; j < iv_len; j++)
309 			len += snprintf(arr+len, sizeof(arr) - len,
310 					"0x%x, ", sa->iv.data[j]);
311 		setenv("ETH_SEC_IV_OVR", arr, 1);
312 	}
313 
314 	*sess = rte_security_session_create(sec_ctx,
315 				sess_conf, sess_pool, sess_priv_pool);
316 	if (*sess == NULL) {
317 		printf("SEC Session init failed.\n");
318 		return TEST_FAILED;
319 	}
320 
321 	*ol_flags = sec_cap->ol_flags;
322 	*ctx = sec_ctx;
323 
324 	return 0;
325 }
326 
327 /* Check the link status of all ports in up to 3s, and print them finally */
328 static void
329 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
330 {
331 #define CHECK_INTERVAL 100 /* 100ms */
332 #define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */
333 	uint16_t portid;
334 	uint8_t count, all_ports_up, print_flag = 0;
335 	struct rte_eth_link link;
336 	int ret;
337 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
338 
339 	printf("Checking link statuses...\n");
340 	fflush(stdout);
341 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
342 		all_ports_up = 1;
343 		for (portid = 0; portid < port_num; portid++) {
344 			if ((port_mask & (1 << portid)) == 0)
345 				continue;
346 			memset(&link, 0, sizeof(link));
347 			ret = rte_eth_link_get_nowait(portid, &link);
348 			if (ret < 0) {
349 				all_ports_up = 0;
350 				if (print_flag == 1)
351 					printf("Port %u link get failed: %s\n",
352 						portid, rte_strerror(-ret));
353 				continue;
354 			}
355 
356 			/* print link status if flag set */
357 			if (print_flag == 1) {
358 				if (link.link_status && link_mbps == 0)
359 					link_mbps = link.link_speed;
360 
361 				rte_eth_link_to_str(link_status,
362 					sizeof(link_status), &link);
363 				printf("Port %d %s\n", portid, link_status);
364 				continue;
365 			}
366 			/* clear all_ports_up flag if any link down */
367 			if (link.link_status == RTE_ETH_LINK_DOWN) {
368 				all_ports_up = 0;
369 				break;
370 			}
371 		}
372 		/* after finally printing all link status, get out */
373 		if (print_flag == 1)
374 			break;
375 
376 		if (all_ports_up == 0) {
377 			fflush(stdout);
378 			rte_delay_ms(CHECK_INTERVAL);
379 		}
380 
381 		/* set the print_flag if all ports up or timeout */
382 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1))
383 			print_flag = 1;
384 	}
385 }
386 
387 static void
388 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
389 {
390 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
391 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
392 	printf("%s%s", name, buf);
393 }
394 
395 static void
396 copy_buf_to_pkt_segs(const uint8_t *buf, unsigned int len,
397 		     struct rte_mbuf *pkt, unsigned int offset)
398 {
399 	unsigned int copied = 0;
400 	unsigned int copy_len;
401 	struct rte_mbuf *seg;
402 	void *seg_buf;
403 
404 	seg = pkt;
405 	while (offset >= seg->data_len) {
406 		offset -= seg->data_len;
407 		seg = seg->next;
408 	}
409 	copy_len = seg->data_len - offset;
410 	seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
411 	while (len > copy_len) {
412 		rte_memcpy(seg_buf, buf + copied, (size_t) copy_len);
413 		len -= copy_len;
414 		copied += copy_len;
415 		seg = seg->next;
416 		seg_buf = rte_pktmbuf_mtod(seg, void *);
417 	}
418 	rte_memcpy(seg_buf, buf + copied, (size_t) len);
419 }
420 
421 static bool
422 is_outer_ipv4(struct ipsec_test_data *td)
423 {
424 	bool outer_ipv4;
425 
426 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ||
427 	    td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT)
428 		outer_ipv4 = (((td->input_text.data[0] & 0xF0) >> 4) == IPVERSION);
429 	else
430 		outer_ipv4 = (td->ipsec_xform.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4);
431 	return outer_ipv4;
432 }
433 
434 static inline struct rte_mbuf *
435 init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len, bool outer_ipv4)
436 {
437 	struct rte_mbuf *pkt;
438 
439 	pkt = rte_pktmbuf_alloc(mp);
440 	if (pkt == NULL)
441 		return NULL;
442 
443 	if (outer_ipv4) {
444 		rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
445 				&dummy_ipv4_eth_hdr, RTE_ETHER_HDR_LEN);
446 		pkt->l3_len = sizeof(struct rte_ipv4_hdr);
447 	} else {
448 		rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
449 				&dummy_ipv6_eth_hdr, RTE_ETHER_HDR_LEN);
450 		pkt->l3_len = sizeof(struct rte_ipv6_hdr);
451 	}
452 	pkt->l2_len = RTE_ETHER_HDR_LEN;
453 
454 	if (pkt->buf_len > (len + RTE_ETHER_HDR_LEN))
455 		rte_memcpy(rte_pktmbuf_append(pkt, len), data, len);
456 	else
457 		copy_buf_to_pkt_segs(data, len, pkt, RTE_ETHER_HDR_LEN);
458 	return pkt;
459 }
460 
461 static int
462 init_mempools(unsigned int nb_mbuf)
463 {
464 	struct rte_security_ctx *sec_ctx;
465 	uint16_t nb_sess = 512;
466 	uint32_t sess_sz;
467 	char s[64];
468 
469 	if (mbufpool == NULL) {
470 		snprintf(s, sizeof(s), "mbuf_pool");
471 		mbufpool = rte_pktmbuf_pool_create(s, nb_mbuf,
472 				MEMPOOL_CACHE_SIZE, 0,
473 				RTE_MBUF_DEFAULT_BUF_SIZE, SOCKET_ID_ANY);
474 		if (mbufpool == NULL) {
475 			printf("Cannot init mbuf pool\n");
476 			return TEST_FAILED;
477 		}
478 		printf("Allocated mbuf pool\n");
479 	}
480 
481 	sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
482 	if (sec_ctx == NULL) {
483 		printf("Device does not support Security ctx\n");
484 		return TEST_SKIPPED;
485 	}
486 	sess_sz = rte_security_session_get_size(sec_ctx);
487 	if (sess_pool == NULL) {
488 		snprintf(s, sizeof(s), "sess_pool");
489 		sess_pool = rte_mempool_create(s, nb_sess, sess_sz,
490 				MEMPOOL_CACHE_SIZE, 0,
491 				NULL, NULL, NULL, NULL,
492 				SOCKET_ID_ANY, 0);
493 		if (sess_pool == NULL) {
494 			printf("Cannot init sess pool\n");
495 			return TEST_FAILED;
496 		}
497 		printf("Allocated sess pool\n");
498 	}
499 	if (sess_priv_pool == NULL) {
500 		snprintf(s, sizeof(s), "sess_priv_pool");
501 		sess_priv_pool = rte_mempool_create(s, nb_sess, sess_sz,
502 				MEMPOOL_CACHE_SIZE, 0,
503 				NULL, NULL, NULL, NULL,
504 				SOCKET_ID_ANY, 0);
505 		if (sess_priv_pool == NULL) {
506 			printf("Cannot init sess_priv pool\n");
507 			return TEST_FAILED;
508 		}
509 		printf("Allocated sess_priv pool\n");
510 	}
511 
512 	return 0;
513 }
514 
515 static int
516 create_default_flow(uint16_t portid)
517 {
518 	struct rte_flow_action action[2];
519 	struct rte_flow_item pattern[2];
520 	struct rte_flow_attr attr = {0};
521 	struct rte_flow_error err;
522 	struct rte_flow *flow;
523 	int ret;
524 
525 	/* Add the default rte_flow to enable SECURITY for all ESP packets */
526 
527 	pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
528 	pattern[0].spec = NULL;
529 	pattern[0].mask = NULL;
530 	pattern[0].last = NULL;
531 	pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
532 
533 	action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
534 	action[0].conf = NULL;
535 	action[1].type = RTE_FLOW_ACTION_TYPE_END;
536 	action[1].conf = NULL;
537 
538 	attr.ingress = 1;
539 
540 	ret = rte_flow_validate(portid, &attr, pattern, action, &err);
541 	if (ret) {
542 		printf("\nValidate flow failed, ret = %d\n", ret);
543 		return -1;
544 	}
545 	flow = rte_flow_create(portid, &attr, pattern, action, &err);
546 	if (flow == NULL) {
547 		printf("\nDefault flow rule create failed\n");
548 		return -1;
549 	}
550 
551 	default_flow[portid] = flow;
552 
553 	return 0;
554 }
555 
556 static void
557 destroy_default_flow(uint16_t portid)
558 {
559 	struct rte_flow_error err;
560 	int ret;
561 
562 	if (!default_flow[portid])
563 		return;
564 	ret = rte_flow_destroy(portid, default_flow[portid], &err);
565 	if (ret) {
566 		printf("\nDefault flow rule destroy failed\n");
567 		return;
568 	}
569 	default_flow[portid] = NULL;
570 }
571 
572 struct rte_mbuf **tx_pkts_burst;
573 struct rte_mbuf **rx_pkts_burst;
574 
575 static int
576 compare_pkt_data(struct rte_mbuf *m, uint8_t *ref, unsigned int tot_len)
577 {
578 	unsigned int len;
579 	unsigned int nb_segs = m->nb_segs;
580 	unsigned int matched = 0;
581 	struct rte_mbuf *save = m;
582 
583 	while (m) {
584 		len = tot_len;
585 		if (len > m->data_len)
586 			len = m->data_len;
587 		if (len != 0) {
588 			if (memcmp(rte_pktmbuf_mtod(m, char *),
589 					ref + matched, len)) {
590 				printf("\n====Reassembly case failed: Data Mismatch");
591 				rte_hexdump(stdout, "Reassembled",
592 					rte_pktmbuf_mtod(m, char *),
593 					len);
594 				rte_hexdump(stdout, "reference",
595 					ref + matched,
596 					len);
597 				return TEST_FAILED;
598 			}
599 		}
600 		tot_len -= len;
601 		matched += len;
602 		m = m->next;
603 	}
604 
605 	if (tot_len) {
606 		printf("\n====Reassembly case failed: Data Missing %u",
607 		       tot_len);
608 		printf("\n====nb_segs %u, tot_len %u", nb_segs, tot_len);
609 		rte_pktmbuf_dump(stderr, save, -1);
610 		return TEST_FAILED;
611 	}
612 	return TEST_SUCCESS;
613 }
614 
615 static inline bool
616 is_ip_reassembly_incomplete(struct rte_mbuf *mbuf)
617 {
618 	static uint64_t ip_reassembly_dynflag;
619 	int ip_reassembly_dynflag_offset;
620 
621 	if (ip_reassembly_dynflag == 0) {
622 		ip_reassembly_dynflag_offset = rte_mbuf_dynflag_lookup(
623 			RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL);
624 		if (ip_reassembly_dynflag_offset < 0)
625 			return false;
626 		ip_reassembly_dynflag = RTE_BIT64(ip_reassembly_dynflag_offset);
627 	}
628 
629 	return (mbuf->ol_flags & ip_reassembly_dynflag) != 0;
630 }
631 
632 static void
633 free_mbuf(struct rte_mbuf *mbuf)
634 {
635 	rte_eth_ip_reassembly_dynfield_t dynfield;
636 
637 	if (!mbuf)
638 		return;
639 
640 	if (!is_ip_reassembly_incomplete(mbuf)) {
641 		rte_pktmbuf_free(mbuf);
642 	} else {
643 		if (ip_reassembly_dynfield_offset < 0)
644 			return;
645 
646 		while (mbuf) {
647 			dynfield = *RTE_MBUF_DYNFIELD(mbuf,
648 					ip_reassembly_dynfield_offset,
649 					rte_eth_ip_reassembly_dynfield_t *);
650 			rte_pktmbuf_free(mbuf);
651 			mbuf = dynfield.next_frag;
652 		}
653 	}
654 }
655 
656 
657 static int
658 get_and_verify_incomplete_frags(struct rte_mbuf *mbuf,
659 				struct reassembly_vector *vector)
660 {
661 	rte_eth_ip_reassembly_dynfield_t *dynfield[MAX_PKT_BURST];
662 	int j = 0, ret;
663 	/**
664 	 * IP reassembly offload is incomplete, and fragments are listed in
665 	 * dynfield which can be reassembled in SW.
666 	 */
667 	printf("\nHW IP Reassembly is not complete; attempt SW IP Reassembly,"
668 		"\nMatching with original frags.");
669 
670 	if (ip_reassembly_dynfield_offset < 0)
671 		return -1;
672 
673 	printf("\ncomparing frag: %d", j);
674 	/* Skip Ethernet header comparison */
675 	rte_pktmbuf_adj(mbuf, RTE_ETHER_HDR_LEN);
676 	ret = compare_pkt_data(mbuf, vector->frags[j]->data,
677 				vector->frags[j]->len);
678 	if (ret)
679 		return ret;
680 	j++;
681 	dynfield[j] = RTE_MBUF_DYNFIELD(mbuf, ip_reassembly_dynfield_offset,
682 					rte_eth_ip_reassembly_dynfield_t *);
683 	printf("\ncomparing frag: %d", j);
684 	/* Skip Ethernet header comparison */
685 	rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
686 	ret = compare_pkt_data(dynfield[j]->next_frag, vector->frags[j]->data,
687 			vector->frags[j]->len);
688 	if (ret)
689 		return ret;
690 
691 	while ((dynfield[j]->nb_frags > 1) &&
692 			is_ip_reassembly_incomplete(dynfield[j]->next_frag)) {
693 		j++;
694 		dynfield[j] = RTE_MBUF_DYNFIELD(dynfield[j-1]->next_frag,
695 					ip_reassembly_dynfield_offset,
696 					rte_eth_ip_reassembly_dynfield_t *);
697 		printf("\ncomparing frag: %d", j);
698 		/* Skip Ethernet header comparison */
699 		rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
700 		ret = compare_pkt_data(dynfield[j]->next_frag,
701 				vector->frags[j]->data, vector->frags[j]->len);
702 		if (ret)
703 			return ret;
704 	}
705 	return ret;
706 }
707 
708 static int
709 test_ipsec_with_reassembly(struct reassembly_vector *vector,
710 		const struct ipsec_test_flags *flags)
711 {
712 	struct rte_security_session *out_ses[ENCAP_DECAP_BURST_SZ] = {0};
713 	struct rte_security_session *in_ses[ENCAP_DECAP_BURST_SZ] = {0};
714 	struct rte_eth_ip_reassembly_params reass_capa = {0};
715 	struct rte_security_session_conf sess_conf_out = {0};
716 	struct rte_security_session_conf sess_conf_in = {0};
717 	unsigned int nb_tx, burst_sz, nb_sent = 0;
718 	struct rte_crypto_sym_xform cipher_out = {0};
719 	struct rte_crypto_sym_xform auth_out = {0};
720 	struct rte_crypto_sym_xform aead_out = {0};
721 	struct rte_crypto_sym_xform cipher_in = {0};
722 	struct rte_crypto_sym_xform auth_in = {0};
723 	struct rte_crypto_sym_xform aead_in = {0};
724 	struct ipsec_test_data sa_data;
725 	struct rte_security_ctx *ctx;
726 	unsigned int i, nb_rx = 0, j;
727 	uint32_t ol_flags;
728 	bool outer_ipv4;
729 	int ret = 0;
730 
731 	burst_sz = vector->burst ? ENCAP_DECAP_BURST_SZ : 1;
732 	nb_tx = vector->nb_frags * burst_sz;
733 
734 	rte_eth_dev_stop(port_id);
735 	if (ret != 0) {
736 		printf("rte_eth_dev_stop: err=%s, port=%u\n",
737 			       rte_strerror(-ret), port_id);
738 		return ret;
739 	}
740 	rte_eth_ip_reassembly_capability_get(port_id, &reass_capa);
741 	if (reass_capa.max_frags < vector->nb_frags)
742 		return TEST_SKIPPED;
743 	if (reass_capa.timeout_ms > APP_REASS_TIMEOUT) {
744 		reass_capa.timeout_ms = APP_REASS_TIMEOUT;
745 		rte_eth_ip_reassembly_conf_set(port_id, &reass_capa);
746 	}
747 
748 	ret = rte_eth_dev_start(port_id);
749 	if (ret < 0) {
750 		printf("rte_eth_dev_start: err=%d, port=%d\n",
751 			ret, port_id);
752 		return ret;
753 	}
754 
755 	memset(tx_pkts_burst, 0, sizeof(tx_pkts_burst[0]) * nb_tx);
756 	memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_tx);
757 
758 	memcpy(&sa_data, vector->sa_data, sizeof(struct ipsec_test_data));
759 	sa_data.ipsec_xform.direction =	RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
760 	outer_ipv4 = is_outer_ipv4(&sa_data);
761 
762 	for (i = 0; i < nb_tx; i += vector->nb_frags) {
763 		for (j = 0; j < vector->nb_frags; j++) {
764 			tx_pkts_burst[i+j] = init_packet(mbufpool,
765 						vector->frags[j]->data,
766 						vector->frags[j]->len, outer_ipv4);
767 			if (tx_pkts_burst[i+j] == NULL) {
768 				ret = -1;
769 				printf("\n packed init failed\n");
770 				goto out;
771 			}
772 		}
773 	}
774 
775 	for (i = 0; i < burst_sz; i++) {
776 		memcpy(&sa_data, vector->sa_data,
777 				sizeof(struct ipsec_test_data));
778 		/* Update SPI for every new SA */
779 		sa_data.ipsec_xform.spi += i;
780 		sa_data.ipsec_xform.direction =
781 					RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
782 		if (sa_data.aead) {
783 			sess_conf_out.crypto_xform = &aead_out;
784 		} else {
785 			sess_conf_out.crypto_xform = &cipher_out;
786 			sess_conf_out.crypto_xform->next = &auth_out;
787 		}
788 
789 		/* Create Inline IPsec outbound session. */
790 		ret = create_inline_ipsec_session(&sa_data, port_id,
791 				&out_ses[i], &ctx, &ol_flags, flags,
792 				&sess_conf_out);
793 		if (ret) {
794 			printf("\nInline outbound session create failed\n");
795 			goto out;
796 		}
797 	}
798 
799 	j = 0;
800 	for (i = 0; i < nb_tx; i++) {
801 		if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
802 			rte_security_set_pkt_metadata(ctx,
803 				out_ses[j], tx_pkts_burst[i], NULL);
804 		tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
805 
806 		/* Move to next SA after nb_frags */
807 		if ((i + 1) % vector->nb_frags == 0)
808 			j++;
809 	}
810 
811 	for (i = 0; i < burst_sz; i++) {
812 		memcpy(&sa_data, vector->sa_data,
813 				sizeof(struct ipsec_test_data));
814 		/* Update SPI for every new SA */
815 		sa_data.ipsec_xform.spi += i;
816 		sa_data.ipsec_xform.direction =
817 					RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
818 
819 		if (sa_data.aead) {
820 			sess_conf_in.crypto_xform = &aead_in;
821 		} else {
822 			sess_conf_in.crypto_xform = &auth_in;
823 			sess_conf_in.crypto_xform->next = &cipher_in;
824 		}
825 		/* Create Inline IPsec inbound session. */
826 		ret = create_inline_ipsec_session(&sa_data, port_id, &in_ses[i],
827 				&ctx, &ol_flags, flags, &sess_conf_in);
828 		if (ret) {
829 			printf("\nInline inbound session create failed\n");
830 			goto out;
831 		}
832 	}
833 
834 	/* Retrieve reassembly dynfield offset if available */
835 	if (ip_reassembly_dynfield_offset < 0 && vector->nb_frags > 1)
836 		ip_reassembly_dynfield_offset = rte_mbuf_dynfield_lookup(
837 				RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, NULL);
838 
839 
840 	ret = create_default_flow(port_id);
841 	if (ret)
842 		goto out;
843 
844 	nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_tx);
845 	if (nb_sent != nb_tx) {
846 		ret = -1;
847 		printf("\nFailed to tx %u pkts", nb_tx);
848 		goto out;
849 	}
850 
851 	rte_delay_ms(1);
852 
853 	/* Retry few times before giving up */
854 	nb_rx = 0;
855 	j = 0;
856 	do {
857 		nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
858 					  nb_tx - nb_rx);
859 		j++;
860 		if (nb_rx >= nb_tx)
861 			break;
862 		rte_delay_ms(1);
863 	} while (j < 5 || !nb_rx);
864 
865 	/* Check for minimum number of Rx packets expected */
866 	if ((vector->nb_frags == 1 && nb_rx != nb_tx) ||
867 	    (vector->nb_frags > 1 && nb_rx < burst_sz)) {
868 		printf("\nreceived less Rx pkts(%u) pkts\n", nb_rx);
869 		ret = TEST_FAILED;
870 		goto out;
871 	}
872 
873 	for (i = 0; i < nb_rx; i++) {
874 		if (vector->nb_frags > 1 &&
875 		    is_ip_reassembly_incomplete(rx_pkts_burst[i])) {
876 			ret = get_and_verify_incomplete_frags(rx_pkts_burst[i],
877 							      vector);
878 			if (ret != TEST_SUCCESS)
879 				break;
880 			continue;
881 		}
882 
883 		if (rx_pkts_burst[i]->ol_flags &
884 		    RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED ||
885 		    !(rx_pkts_burst[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD)) {
886 			printf("\nsecurity offload failed\n");
887 			ret = TEST_FAILED;
888 			break;
889 		}
890 
891 		if (vector->full_pkt->len + RTE_ETHER_HDR_LEN !=
892 				rx_pkts_burst[i]->pkt_len) {
893 			printf("\nreassembled/decrypted packet length mismatch\n");
894 			ret = TEST_FAILED;
895 			break;
896 		}
897 		rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
898 		ret = compare_pkt_data(rx_pkts_burst[i],
899 				       vector->full_pkt->data,
900 				       vector->full_pkt->len);
901 		if (ret != TEST_SUCCESS)
902 			break;
903 	}
904 
905 out:
906 	destroy_default_flow(port_id);
907 
908 	/* Clear session data. */
909 	for (i = 0; i < burst_sz; i++) {
910 		if (out_ses[i])
911 			rte_security_session_destroy(ctx, out_ses[i]);
912 		if (in_ses[i])
913 			rte_security_session_destroy(ctx, in_ses[i]);
914 	}
915 
916 	for (i = nb_sent; i < nb_tx; i++)
917 		free_mbuf(tx_pkts_burst[i]);
918 	for (i = 0; i < nb_rx; i++)
919 		free_mbuf(rx_pkts_burst[i]);
920 	return ret;
921 }
922 
923 static int
924 event_tx_burst(struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
925 {
926 	struct rte_event ev;
927 	int i, nb_sent = 0;
928 
929 	/* Convert packets to events */
930 	memset(&ev, 0, sizeof(ev));
931 	ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
932 	for (i = 0; i < nb_pkts; i++) {
933 		ev.mbuf = tx_pkts[i];
934 		nb_sent += rte_event_eth_tx_adapter_enqueue(
935 				eventdev_id, port_id, &ev, 1, 0);
936 	}
937 
938 	return nb_sent;
939 }
940 
941 static int
942 event_rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts_to_rx)
943 {
944 	int nb_ev, nb_rx = 0, j = 0;
945 	const int ms_per_pkt = 3;
946 	struct rte_event ev;
947 
948 	do {
949 		nb_ev = rte_event_dequeue_burst(eventdev_id, port_id,
950 				&ev, 1, 0);
951 
952 		if (nb_ev == 0) {
953 			rte_delay_ms(1);
954 			continue;
955 		}
956 
957 		/* Get packet from event */
958 		if (ev.event_type != RTE_EVENT_TYPE_ETHDEV) {
959 			printf("Unsupported event type: %i\n",
960 				ev.event_type);
961 			continue;
962 		}
963 		rx_pkts[nb_rx++] = ev.mbuf;
964 	} while (j++ < (nb_pkts_to_rx * ms_per_pkt) && nb_rx < nb_pkts_to_rx);
965 
966 	return nb_rx;
967 }
968 
969 static int
970 test_ipsec_inline_sa_exp_event_callback(uint16_t port_id,
971 		enum rte_eth_event_type type, void *param, void *ret_param)
972 {
973 	struct sa_expiry_vector *vector = (struct sa_expiry_vector *)param;
974 	struct rte_eth_event_ipsec_desc *event_desc = NULL;
975 
976 	RTE_SET_USED(port_id);
977 
978 	if (type != RTE_ETH_EVENT_IPSEC)
979 		return -1;
980 
981 	event_desc = ret_param;
982 	if (event_desc == NULL) {
983 		printf("Event descriptor not set\n");
984 		return -1;
985 	}
986 	vector->notify_event = true;
987 	if (event_desc->metadata != (uint64_t)vector->sa_data) {
988 		printf("Mismatch in event specific metadata\n");
989 		return -1;
990 	}
991 	switch (event_desc->subtype) {
992 	case RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY:
993 		vector->event = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
994 		break;
995 	case RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY:
996 		vector->event = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
997 		break;
998 	case RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY:
999 		vector->event = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
1000 		break;
1001 	case RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY:
1002 		vector->event = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
1003 		break;
1004 	default:
1005 		printf("Invalid IPsec event reported\n");
1006 		return -1;
1007 	}
1008 
1009 	return 0;
1010 }
1011 
1012 static enum rte_eth_event_ipsec_subtype
1013 test_ipsec_inline_setup_expiry_vector(struct sa_expiry_vector *vector,
1014 		const struct ipsec_test_flags *flags,
1015 		struct ipsec_test_data *tdata)
1016 {
1017 	enum rte_eth_event_ipsec_subtype event = RTE_ETH_EVENT_IPSEC_UNKNOWN;
1018 
1019 	vector->event = RTE_ETH_EVENT_IPSEC_UNKNOWN;
1020 	vector->notify_event = false;
1021 	vector->sa_data = (void *)tdata;
1022 	if (flags->sa_expiry_pkts_soft)
1023 		event = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
1024 	else if (flags->sa_expiry_bytes_soft)
1025 		event = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
1026 	else if (flags->sa_expiry_pkts_hard)
1027 		event = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
1028 	else
1029 		event = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
1030 	rte_eth_dev_callback_register(port_id, RTE_ETH_EVENT_IPSEC,
1031 		       test_ipsec_inline_sa_exp_event_callback, vector);
1032 
1033 	return event;
1034 }
1035 
1036 static int
1037 test_ipsec_inline_proto_process(struct ipsec_test_data *td,
1038 		struct ipsec_test_data *res_d,
1039 		int nb_pkts,
1040 		bool silent,
1041 		const struct ipsec_test_flags *flags)
1042 {
1043 	enum rte_eth_event_ipsec_subtype event = RTE_ETH_EVENT_IPSEC_UNKNOWN;
1044 	struct rte_security_session_conf sess_conf = {0};
1045 	struct rte_crypto_sym_xform cipher = {0};
1046 	struct rte_crypto_sym_xform auth = {0};
1047 	struct rte_crypto_sym_xform aead = {0};
1048 	struct sa_expiry_vector vector = {0};
1049 	struct rte_security_session *ses;
1050 	struct rte_security_ctx *ctx;
1051 	int nb_rx = 0, nb_sent;
1052 	uint32_t ol_flags;
1053 	int i, j = 0, ret;
1054 	bool outer_ipv4;
1055 
1056 	memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_pkts);
1057 
1058 	if (flags->sa_expiry_pkts_soft || flags->sa_expiry_bytes_soft ||
1059 		flags->sa_expiry_pkts_hard || flags->sa_expiry_bytes_hard) {
1060 		if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1061 			return TEST_SUCCESS;
1062 		event = test_ipsec_inline_setup_expiry_vector(&vector, flags, td);
1063 	}
1064 
1065 	if (td->aead) {
1066 		sess_conf.crypto_xform = &aead;
1067 	} else {
1068 		if (td->ipsec_xform.direction ==
1069 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1070 			sess_conf.crypto_xform = &cipher;
1071 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1072 			sess_conf.crypto_xform->next = &auth;
1073 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1074 		} else {
1075 			sess_conf.crypto_xform = &auth;
1076 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1077 			sess_conf.crypto_xform->next = &cipher;
1078 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1079 		}
1080 	}
1081 
1082 	/* Create Inline IPsec session. */
1083 	ret = create_inline_ipsec_session(td, port_id, &ses, &ctx,
1084 					  &ol_flags, flags, &sess_conf);
1085 	if (ret)
1086 		return ret;
1087 
1088 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1089 		ret = create_default_flow(port_id);
1090 		if (ret)
1091 			goto out;
1092 	}
1093 	outer_ipv4 = is_outer_ipv4(td);
1094 
1095 	for (i = 0; i < nb_pkts; i++) {
1096 		tx_pkts_burst[i] = init_packet(mbufpool, td->input_text.data,
1097 						td->input_text.len, outer_ipv4);
1098 		if (tx_pkts_burst[i] == NULL) {
1099 			while (i--)
1100 				rte_pktmbuf_free(tx_pkts_burst[i]);
1101 			ret = TEST_FAILED;
1102 			goto out;
1103 		}
1104 
1105 		if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkts_burst[i],
1106 					uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
1107 			while (i--)
1108 				rte_pktmbuf_free(tx_pkts_burst[i]);
1109 			ret = TEST_FAILED;
1110 			goto out;
1111 		}
1112 
1113 		if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1114 			if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
1115 				rte_security_set_pkt_metadata(ctx, ses,
1116 						tx_pkts_burst[i], NULL);
1117 			tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1118 		}
1119 	}
1120 	/* Send packet to ethdev for inline IPsec processing. */
1121 	if (event_mode_enabled)
1122 		nb_sent = event_tx_burst(tx_pkts_burst, nb_pkts);
1123 	else
1124 		nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts);
1125 
1126 	if (nb_sent != nb_pkts) {
1127 		printf("\nUnable to TX %d packets, sent: %i", nb_pkts, nb_sent);
1128 		for ( ; nb_sent < nb_pkts; nb_sent++)
1129 			rte_pktmbuf_free(tx_pkts_burst[nb_sent]);
1130 		ret = TEST_FAILED;
1131 		goto out;
1132 	}
1133 
1134 	rte_pause();
1135 
1136 	/* Receive back packet on loopback interface. */
1137 	if (event_mode_enabled)
1138 		nb_rx = event_rx_burst(rx_pkts_burst, nb_sent);
1139 	else
1140 		do {
1141 			rte_delay_ms(1);
1142 			nb_rx += rte_eth_rx_burst(port_id, 0,
1143 					&rx_pkts_burst[nb_rx],
1144 					nb_sent - nb_rx);
1145 			if (nb_rx >= nb_sent)
1146 				break;
1147 		} while (j++ < 5 || nb_rx == 0);
1148 
1149 	if (!flags->sa_expiry_pkts_hard &&
1150 			!flags->sa_expiry_bytes_hard &&
1151 			(nb_rx != nb_sent)) {
1152 		printf("\nUnable to RX all %d packets, received(%i)",
1153 				nb_sent, nb_rx);
1154 		while (--nb_rx >= 0)
1155 			rte_pktmbuf_free(rx_pkts_burst[nb_rx]);
1156 		ret = TEST_FAILED;
1157 		goto out;
1158 	}
1159 
1160 	for (i = 0; i < nb_rx; i++) {
1161 		rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
1162 
1163 		ret = test_ipsec_post_process(rx_pkts_burst[i], td,
1164 					      res_d, silent, flags);
1165 		if (ret != TEST_SUCCESS) {
1166 			for ( ; i < nb_rx; i++)
1167 				rte_pktmbuf_free(rx_pkts_burst[i]);
1168 			goto out;
1169 		}
1170 
1171 		ret = test_ipsec_stats_verify(ctx, ses, flags,
1172 					td->ipsec_xform.direction);
1173 		if (ret != TEST_SUCCESS) {
1174 			for ( ; i < nb_rx; i++)
1175 				rte_pktmbuf_free(rx_pkts_burst[i]);
1176 			goto out;
1177 		}
1178 
1179 		rte_pktmbuf_free(rx_pkts_burst[i]);
1180 		rx_pkts_burst[i] = NULL;
1181 	}
1182 
1183 out:
1184 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1185 		destroy_default_flow(port_id);
1186 	if (flags->sa_expiry_pkts_soft || flags->sa_expiry_bytes_soft ||
1187 		flags->sa_expiry_pkts_hard || flags->sa_expiry_bytes_hard) {
1188 		if (vector.notify_event && (vector.event == event))
1189 			ret = TEST_SUCCESS;
1190 		else
1191 			ret = TEST_FAILED;
1192 
1193 		rte_eth_dev_callback_unregister(port_id, RTE_ETH_EVENT_IPSEC,
1194 			test_ipsec_inline_sa_exp_event_callback, &vector);
1195 	}
1196 
1197 	/* Destroy session so that other cases can create the session again */
1198 	rte_security_session_destroy(ctx, ses);
1199 	ses = NULL;
1200 
1201 	return ret;
1202 }
1203 
1204 static int
1205 test_ipsec_inline_proto_all(const struct ipsec_test_flags *flags)
1206 {
1207 	struct ipsec_test_data td_outb;
1208 	struct ipsec_test_data td_inb;
1209 	unsigned int i, nb_pkts = 1, pass_cnt = 0, fail_cnt = 0;
1210 	int ret;
1211 
1212 	if (flags->iv_gen || flags->sa_expiry_pkts_soft ||
1213 			flags->sa_expiry_bytes_soft ||
1214 			flags->sa_expiry_bytes_hard ||
1215 			flags->sa_expiry_pkts_hard)
1216 		nb_pkts = IPSEC_TEST_PACKETS_MAX;
1217 
1218 	for (i = 0; i < RTE_DIM(alg_list); i++) {
1219 		test_ipsec_td_prepare(alg_list[i].param1,
1220 				      alg_list[i].param2,
1221 				      flags, &td_outb, 1);
1222 
1223 		if (!td_outb.aead) {
1224 			enum rte_crypto_cipher_algorithm cipher_alg;
1225 			enum rte_crypto_auth_algorithm auth_alg;
1226 
1227 			cipher_alg = td_outb.xform.chain.cipher.cipher.algo;
1228 			auth_alg = td_outb.xform.chain.auth.auth.algo;
1229 
1230 			if (td_outb.aes_gmac && cipher_alg != RTE_CRYPTO_CIPHER_NULL)
1231 				continue;
1232 
1233 			/* ICV is not applicable for NULL auth */
1234 			if (flags->icv_corrupt &&
1235 			    auth_alg == RTE_CRYPTO_AUTH_NULL)
1236 				continue;
1237 
1238 			/* IV is not applicable for NULL cipher */
1239 			if (flags->iv_gen &&
1240 			    cipher_alg == RTE_CRYPTO_CIPHER_NULL)
1241 				continue;
1242 		}
1243 
1244 		if (flags->udp_encap)
1245 			td_outb.ipsec_xform.options.udp_encap = 1;
1246 
1247 		if (flags->sa_expiry_bytes_soft)
1248 			td_outb.ipsec_xform.life.bytes_soft_limit =
1249 				(((td_outb.output_text.len + RTE_ETHER_HDR_LEN)
1250 				  * nb_pkts) >> 3) - 1;
1251 		if (flags->sa_expiry_pkts_hard)
1252 			td_outb.ipsec_xform.life.packets_hard_limit =
1253 					IPSEC_TEST_PACKETS_MAX - 1;
1254 		if (flags->sa_expiry_bytes_hard)
1255 			td_outb.ipsec_xform.life.bytes_hard_limit =
1256 				(((td_outb.output_text.len + RTE_ETHER_HDR_LEN)
1257 				  * nb_pkts) >> 3) - 1;
1258 
1259 		ret = test_ipsec_inline_proto_process(&td_outb, &td_inb, nb_pkts,
1260 						false, flags);
1261 		if (ret == TEST_SKIPPED)
1262 			continue;
1263 
1264 		if (ret == TEST_FAILED) {
1265 			printf("\n TEST FAILED");
1266 			test_ipsec_display_alg(alg_list[i].param1,
1267 					       alg_list[i].param2);
1268 			fail_cnt++;
1269 			continue;
1270 		}
1271 
1272 		test_ipsec_td_update(&td_inb, &td_outb, 1, flags);
1273 
1274 		ret = test_ipsec_inline_proto_process(&td_inb, NULL, nb_pkts,
1275 						false, flags);
1276 		if (ret == TEST_SKIPPED)
1277 			continue;
1278 
1279 		if (ret == TEST_FAILED) {
1280 			printf("\n TEST FAILED");
1281 			test_ipsec_display_alg(alg_list[i].param1,
1282 					       alg_list[i].param2);
1283 			fail_cnt++;
1284 			continue;
1285 		}
1286 
1287 		if (flags->display_alg)
1288 			test_ipsec_display_alg(alg_list[i].param1,
1289 					       alg_list[i].param2);
1290 
1291 		pass_cnt++;
1292 	}
1293 
1294 	printf("Tests passed: %d, failed: %d", pass_cnt, fail_cnt);
1295 	if (fail_cnt > 0)
1296 		return TEST_FAILED;
1297 	if (pass_cnt > 0)
1298 		return TEST_SUCCESS;
1299 	else
1300 		return TEST_SKIPPED;
1301 }
1302 
1303 static int
1304 test_ipsec_inline_proto_process_with_esn(struct ipsec_test_data td[],
1305 		struct ipsec_test_data res_d[],
1306 		int nb_pkts,
1307 		bool silent,
1308 		const struct ipsec_test_flags *flags)
1309 {
1310 	struct rte_security_session_conf sess_conf = {0};
1311 	struct ipsec_test_data *res_d_tmp = NULL;
1312 	struct rte_crypto_sym_xform cipher = {0};
1313 	struct rte_crypto_sym_xform auth = {0};
1314 	struct rte_crypto_sym_xform aead = {0};
1315 	struct rte_mbuf *rx_pkt = NULL;
1316 	struct rte_mbuf *tx_pkt = NULL;
1317 	int nb_rx, nb_sent;
1318 	struct rte_security_session *ses;
1319 	struct rte_security_ctx *ctx;
1320 	uint32_t ol_flags;
1321 	bool outer_ipv4;
1322 	int i, ret;
1323 
1324 	if (td[0].aead) {
1325 		sess_conf.crypto_xform = &aead;
1326 	} else {
1327 		if (td[0].ipsec_xform.direction ==
1328 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1329 			sess_conf.crypto_xform = &cipher;
1330 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1331 			sess_conf.crypto_xform->next = &auth;
1332 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1333 		} else {
1334 			sess_conf.crypto_xform = &auth;
1335 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1336 			sess_conf.crypto_xform->next = &cipher;
1337 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1338 		}
1339 	}
1340 
1341 	/* Create Inline IPsec session. */
1342 	ret = create_inline_ipsec_session(&td[0], port_id, &ses, &ctx,
1343 					  &ol_flags, flags, &sess_conf);
1344 	if (ret)
1345 		return ret;
1346 
1347 	if (td[0].ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1348 		ret = create_default_flow(port_id);
1349 		if (ret)
1350 			goto out;
1351 	}
1352 	outer_ipv4 = is_outer_ipv4(td);
1353 
1354 	for (i = 0; i < nb_pkts; i++) {
1355 		tx_pkt = init_packet(mbufpool, td[i].input_text.data,
1356 					td[i].input_text.len, outer_ipv4);
1357 		if (tx_pkt == NULL) {
1358 			ret = TEST_FAILED;
1359 			goto out;
1360 		}
1361 
1362 		if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkt,
1363 					uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
1364 			ret = TEST_FAILED;
1365 			goto out;
1366 		}
1367 
1368 		if (td[i].ipsec_xform.direction ==
1369 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1370 			if (flags->antireplay) {
1371 				sess_conf.ipsec.esn.value =
1372 						td[i].ipsec_xform.esn.value;
1373 				ret = rte_security_session_update(ctx, ses,
1374 						&sess_conf);
1375 				if (ret) {
1376 					printf("Could not update ESN in session\n");
1377 					rte_pktmbuf_free(tx_pkt);
1378 					ret = TEST_SKIPPED;
1379 					goto out;
1380 				}
1381 			}
1382 			if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
1383 				rte_security_set_pkt_metadata(ctx, ses,
1384 						tx_pkt, NULL);
1385 			tx_pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1386 		}
1387 		/* Send packet to ethdev for inline IPsec processing. */
1388 		nb_sent = rte_eth_tx_burst(port_id, 0, &tx_pkt, 1);
1389 		if (nb_sent != 1) {
1390 			printf("\nUnable to TX packets");
1391 			rte_pktmbuf_free(tx_pkt);
1392 			ret = TEST_FAILED;
1393 			goto out;
1394 		}
1395 
1396 		rte_pause();
1397 
1398 		/* Receive back packet on loopback interface. */
1399 		do {
1400 			rte_delay_ms(1);
1401 			nb_rx = rte_eth_rx_burst(port_id, 0, &rx_pkt, 1);
1402 		} while (nb_rx == 0);
1403 
1404 		rte_pktmbuf_adj(rx_pkt, RTE_ETHER_HDR_LEN);
1405 
1406 		if (res_d != NULL)
1407 			res_d_tmp = &res_d[i];
1408 
1409 		ret = test_ipsec_post_process(rx_pkt, &td[i],
1410 					      res_d_tmp, silent, flags);
1411 		if (ret != TEST_SUCCESS) {
1412 			rte_pktmbuf_free(rx_pkt);
1413 			goto out;
1414 		}
1415 
1416 		ret = test_ipsec_stats_verify(ctx, ses, flags,
1417 					td->ipsec_xform.direction);
1418 		if (ret != TEST_SUCCESS) {
1419 			rte_pktmbuf_free(rx_pkt);
1420 			goto out;
1421 		}
1422 
1423 		rte_pktmbuf_free(rx_pkt);
1424 		rx_pkt = NULL;
1425 		tx_pkt = NULL;
1426 	}
1427 
1428 out:
1429 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1430 		destroy_default_flow(port_id);
1431 
1432 	/* Destroy session so that other cases can create the session again */
1433 	rte_security_session_destroy(ctx, ses);
1434 	ses = NULL;
1435 
1436 	return ret;
1437 }
1438 
1439 static int
1440 ut_setup_inline_ipsec(void)
1441 {
1442 	int ret;
1443 
1444 	/* Start device */
1445 	ret = rte_eth_dev_start(port_id);
1446 	if (ret < 0) {
1447 		printf("rte_eth_dev_start: err=%d, port=%d\n",
1448 			ret, port_id);
1449 		return ret;
1450 	}
1451 	/* always enable promiscuous */
1452 	ret = rte_eth_promiscuous_enable(port_id);
1453 	if (ret != 0) {
1454 		printf("rte_eth_promiscuous_enable: err=%s, port=%d\n",
1455 			rte_strerror(-ret), port_id);
1456 		return ret;
1457 	}
1458 
1459 	check_all_ports_link_status(1, RTE_PORT_ALL);
1460 
1461 	return 0;
1462 }
1463 
1464 static void
1465 ut_teardown_inline_ipsec(void)
1466 {
1467 	struct rte_eth_ip_reassembly_params reass_conf = {0};
1468 	uint16_t portid;
1469 	int ret;
1470 
1471 	/* port tear down */
1472 	RTE_ETH_FOREACH_DEV(portid) {
1473 		ret = rte_eth_dev_stop(portid);
1474 		if (ret != 0)
1475 			printf("rte_eth_dev_stop: err=%s, port=%u\n",
1476 			       rte_strerror(-ret), portid);
1477 
1478 		/* Clear reassembly configuration */
1479 		rte_eth_ip_reassembly_conf_set(portid, &reass_conf);
1480 	}
1481 }
1482 
1483 static int
1484 inline_ipsec_testsuite_setup(void)
1485 {
1486 	uint16_t nb_rxd;
1487 	uint16_t nb_txd;
1488 	uint16_t nb_ports;
1489 	int ret;
1490 	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
1491 
1492 	printf("Start inline IPsec test.\n");
1493 
1494 	nb_ports = rte_eth_dev_count_avail();
1495 	if (nb_ports < NB_ETHPORTS_USED) {
1496 		printf("At least %u port(s) used for test\n",
1497 		       NB_ETHPORTS_USED);
1498 		return TEST_SKIPPED;
1499 	}
1500 
1501 	ret = init_mempools(NB_MBUF);
1502 	if (ret)
1503 		return ret;
1504 
1505 	if (tx_pkts_burst == NULL) {
1506 		tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
1507 					  MAX_TRAFFIC_BURST,
1508 					  sizeof(void *),
1509 					  RTE_CACHE_LINE_SIZE);
1510 		if (!tx_pkts_burst)
1511 			return TEST_FAILED;
1512 
1513 		rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
1514 					  MAX_TRAFFIC_BURST,
1515 					  sizeof(void *),
1516 					  RTE_CACHE_LINE_SIZE);
1517 		if (!rx_pkts_burst)
1518 			return TEST_FAILED;
1519 	}
1520 
1521 	printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
1522 
1523 	nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
1524 	nb_txd = RTE_TEST_TX_DESC_DEFAULT;
1525 
1526 	/* configuring port 0 for the test is enough */
1527 	port_id = 0;
1528 	/* port configure */
1529 	ret = rte_eth_dev_configure(port_id, nb_rx_queue,
1530 				    nb_tx_queue, &port_conf);
1531 	if (ret < 0) {
1532 		printf("Cannot configure device: err=%d, port=%d\n",
1533 			 ret, port_id);
1534 		return ret;
1535 	}
1536 	ret = rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
1537 	if (ret < 0) {
1538 		printf("Cannot get mac address: err=%d, port=%d\n",
1539 			 ret, port_id);
1540 		return ret;
1541 	}
1542 	printf("Port %u ", port_id);
1543 	print_ethaddr("Address:", &ports_eth_addr[port_id]);
1544 	printf("\n");
1545 
1546 	/* tx queue setup */
1547 	ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
1548 				     SOCKET_ID_ANY, &tx_conf);
1549 	if (ret < 0) {
1550 		printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
1551 				ret, port_id);
1552 		return ret;
1553 	}
1554 	/* rx queue steup */
1555 	ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
1556 				     &rx_conf, mbufpool);
1557 	if (ret < 0) {
1558 		printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
1559 				ret, port_id);
1560 		return ret;
1561 	}
1562 	test_ipsec_alg_list_populate();
1563 
1564 	return 0;
1565 }
1566 
1567 static void
1568 inline_ipsec_testsuite_teardown(void)
1569 {
1570 	uint16_t portid;
1571 	int ret;
1572 
1573 	/* port tear down */
1574 	RTE_ETH_FOREACH_DEV(portid) {
1575 		ret = rte_eth_dev_reset(portid);
1576 		if (ret != 0)
1577 			printf("rte_eth_dev_reset: err=%s, port=%u\n",
1578 			       rte_strerror(-ret), port_id);
1579 	}
1580 	rte_free(tx_pkts_burst);
1581 	rte_free(rx_pkts_burst);
1582 }
1583 
1584 static int
1585 event_inline_ipsec_testsuite_setup(void)
1586 {
1587 	struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
1588 	struct rte_event_dev_info evdev_default_conf = {0};
1589 	struct rte_event_dev_config eventdev_conf = {0};
1590 	struct rte_event_queue_conf eventq_conf = {0};
1591 	struct rte_event_port_conf ev_port_conf = {0};
1592 	const uint16_t nb_txd = 1024, nb_rxd = 1024;
1593 	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
1594 	uint8_t ev_queue_id = 0, tx_queue_id = 0;
1595 	int nb_eventqueue = 1, nb_eventport = 1;
1596 	const int all_queues = -1;
1597 	uint32_t caps = 0;
1598 	uint16_t nb_ports;
1599 	int ret;
1600 
1601 	printf("Start event inline IPsec test.\n");
1602 
1603 	nb_ports = rte_eth_dev_count_avail();
1604 	if (nb_ports == 0) {
1605 		printf("Test require: 1 port, available: 0\n");
1606 		return TEST_SKIPPED;
1607 	}
1608 
1609 	init_mempools(NB_MBUF);
1610 
1611 	if (tx_pkts_burst == NULL) {
1612 		tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
1613 					  MAX_TRAFFIC_BURST,
1614 					  sizeof(void *),
1615 					  RTE_CACHE_LINE_SIZE);
1616 		if (!tx_pkts_burst)
1617 			return -1;
1618 
1619 		rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
1620 					  MAX_TRAFFIC_BURST,
1621 					  sizeof(void *),
1622 					  RTE_CACHE_LINE_SIZE);
1623 		if (!rx_pkts_burst)
1624 			return -1;
1625 
1626 	}
1627 
1628 	printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
1629 
1630 	/* configuring port 0 for the test is enough */
1631 	port_id = 0;
1632 	/* port configure */
1633 	ret = rte_eth_dev_configure(port_id, nb_rx_queue,
1634 				    nb_tx_queue, &port_conf);
1635 	if (ret < 0) {
1636 		printf("Cannot configure device: err=%d, port=%d\n",
1637 			 ret, port_id);
1638 		return ret;
1639 	}
1640 
1641 	/* Tx queue setup */
1642 	ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
1643 				     SOCKET_ID_ANY, &tx_conf);
1644 	if (ret < 0) {
1645 		printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
1646 				ret, port_id);
1647 		return ret;
1648 	}
1649 
1650 	/* rx queue steup */
1651 	ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
1652 				     &rx_conf, mbufpool);
1653 	if (ret < 0) {
1654 		printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
1655 				ret, port_id);
1656 		return ret;
1657 	}
1658 
1659 	/* Setup eventdev */
1660 	eventdev_id = 0;
1661 	rx_adapter_id = 0;
1662 	tx_adapter_id = 0;
1663 
1664 	/* Get default conf of eventdev */
1665 	ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
1666 	if (ret < 0) {
1667 		printf("Error in getting event device info[devID:%d]\n",
1668 				eventdev_id);
1669 		return ret;
1670 	}
1671 
1672 	/* Get Tx adapter capabilities */
1673 	ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, tx_adapter_id, &caps);
1674 	if (ret < 0) {
1675 		printf("Failed to get event device %d eth tx adapter"
1676 				" capabilities for port %d\n",
1677 				eventdev_id, port_id);
1678 		return ret;
1679 	}
1680 	if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
1681 		tx_queue_id = nb_eventqueue++;
1682 
1683 	eventdev_conf.nb_events_limit =
1684 			evdev_default_conf.max_num_events;
1685 	eventdev_conf.nb_event_queue_flows =
1686 			evdev_default_conf.max_event_queue_flows;
1687 	eventdev_conf.nb_event_port_dequeue_depth =
1688 			evdev_default_conf.max_event_port_dequeue_depth;
1689 	eventdev_conf.nb_event_port_enqueue_depth =
1690 			evdev_default_conf.max_event_port_enqueue_depth;
1691 
1692 	eventdev_conf.nb_event_queues = nb_eventqueue;
1693 	eventdev_conf.nb_event_ports = nb_eventport;
1694 
1695 	/* Configure event device */
1696 
1697 	ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
1698 	if (ret < 0) {
1699 		printf("Error in configuring event device\n");
1700 		return ret;
1701 	}
1702 
1703 	/* Configure event queue */
1704 	eventq_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL;
1705 	eventq_conf.nb_atomic_flows = 1024;
1706 	eventq_conf.nb_atomic_order_sequences = 1024;
1707 
1708 	/* Setup the queue */
1709 	ret = rte_event_queue_setup(eventdev_id, ev_queue_id, &eventq_conf);
1710 	if (ret < 0) {
1711 		printf("Failed to setup event queue %d\n", ret);
1712 		return ret;
1713 	}
1714 
1715 	/* Configure event port */
1716 	ret = rte_event_port_setup(eventdev_id, port_id, NULL);
1717 	if (ret < 0) {
1718 		printf("Failed to setup event port %d\n", ret);
1719 		return ret;
1720 	}
1721 
1722 	/* Make event queue - event port link */
1723 	ret = rte_event_port_link(eventdev_id, port_id, NULL, NULL, 1);
1724 	if (ret < 0) {
1725 		printf("Failed to link event port %d\n", ret);
1726 		return ret;
1727 	}
1728 
1729 	/* Setup port conf */
1730 	ev_port_conf.new_event_threshold = 1200;
1731 	ev_port_conf.dequeue_depth =
1732 			evdev_default_conf.max_event_port_dequeue_depth;
1733 	ev_port_conf.enqueue_depth =
1734 			evdev_default_conf.max_event_port_enqueue_depth;
1735 
1736 	/* Create Rx adapter */
1737 	ret = rte_event_eth_rx_adapter_create(rx_adapter_id, eventdev_id,
1738 			&ev_port_conf);
1739 	if (ret < 0) {
1740 		printf("Failed to create rx adapter %d\n", ret);
1741 		return ret;
1742 	}
1743 
1744 	/* Setup queue conf */
1745 	queue_conf.ev.queue_id = ev_queue_id;
1746 	queue_conf.ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
1747 	queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
1748 
1749 	/* Add queue to the adapter */
1750 	ret = rte_event_eth_rx_adapter_queue_add(rx_adapter_id, port_id,
1751 			all_queues, &queue_conf);
1752 	if (ret < 0) {
1753 		printf("Failed to add eth queue to rx adapter %d\n", ret);
1754 		return ret;
1755 	}
1756 
1757 	/* Start rx adapter */
1758 	ret = rte_event_eth_rx_adapter_start(rx_adapter_id);
1759 	if (ret < 0) {
1760 		printf("Failed to start rx adapter %d\n", ret);
1761 		return ret;
1762 	}
1763 
1764 	/* Create tx adapter */
1765 	ret = rte_event_eth_tx_adapter_create(tx_adapter_id, eventdev_id,
1766 			&ev_port_conf);
1767 	if (ret < 0) {
1768 		printf("Failed to create tx adapter %d\n", ret);
1769 		return ret;
1770 	}
1771 
1772 	/* Add queue to the adapter */
1773 	ret = rte_event_eth_tx_adapter_queue_add(tx_adapter_id, port_id,
1774 			all_queues);
1775 	if (ret < 0) {
1776 		printf("Failed to add eth queue to tx adapter %d\n", ret);
1777 		return ret;
1778 	}
1779 	/* Setup Tx queue & port */
1780 	if (tx_queue_id) {
1781 		/* Setup the queue */
1782 		ret = rte_event_queue_setup(eventdev_id, tx_queue_id,
1783 				&eventq_conf);
1784 		if (ret < 0) {
1785 			printf("Failed to setup tx event queue %d\n", ret);
1786 			return ret;
1787 		}
1788 		/* Link Tx event queue to Tx port */
1789 		ret = rte_event_port_link(eventdev_id, port_id,
1790 				&tx_queue_id, NULL, 1);
1791 		if (ret != 1) {
1792 			printf("Failed to link event queue to port\n");
1793 			return ret;
1794 		}
1795 	}
1796 
1797 	/* Start tx adapter */
1798 	ret = rte_event_eth_tx_adapter_start(tx_adapter_id);
1799 	if (ret < 0) {
1800 		printf("Failed to start tx adapter %d\n", ret);
1801 		return ret;
1802 	}
1803 
1804 	/* Start eventdev */
1805 	ret = rte_event_dev_start(eventdev_id);
1806 	if (ret < 0) {
1807 		printf("Failed to start event device %d\n", ret);
1808 		return ret;
1809 	}
1810 
1811 	event_mode_enabled = true;
1812 	test_ipsec_alg_list_populate();
1813 
1814 	return 0;
1815 }
1816 
1817 static void
1818 event_inline_ipsec_testsuite_teardown(void)
1819 {
1820 	uint16_t portid;
1821 	int ret;
1822 
1823 	event_mode_enabled = false;
1824 
1825 	/* Stop and release rx adapter */
1826 	ret = rte_event_eth_rx_adapter_stop(rx_adapter_id);
1827 	if (ret < 0)
1828 		printf("Failed to stop rx adapter %d\n", ret);
1829 	ret = rte_event_eth_rx_adapter_queue_del(rx_adapter_id, port_id, -1);
1830 	if (ret < 0)
1831 		printf("Failed to remove rx adapter queues %d\n", ret);
1832 	ret = rte_event_eth_rx_adapter_free(rx_adapter_id);
1833 	if (ret < 0)
1834 		printf("Failed to free rx adapter %d\n", ret);
1835 
1836 	/* Stop and release tx adapter */
1837 	ret = rte_event_eth_tx_adapter_stop(tx_adapter_id);
1838 	if (ret < 0)
1839 		printf("Failed to stop tx adapter %d\n", ret);
1840 	ret = rte_event_eth_tx_adapter_queue_del(tx_adapter_id, port_id, -1);
1841 	if (ret < 0)
1842 		printf("Failed to remove tx adapter queues %d\n", ret);
1843 	ret = rte_event_eth_tx_adapter_free(tx_adapter_id);
1844 	if (ret < 0)
1845 		printf("Failed to free tx adapter %d\n", ret);
1846 
1847 	/* Stop and release event devices */
1848 	rte_event_dev_stop(eventdev_id);
1849 	ret = rte_event_dev_close(eventdev_id);
1850 	if (ret < 0)
1851 		printf("Failed to close event dev %d, %d\n", eventdev_id, ret);
1852 
1853 	/* port tear down */
1854 	RTE_ETH_FOREACH_DEV(portid) {
1855 		ret = rte_eth_dev_reset(portid);
1856 		if (ret != 0)
1857 			printf("rte_eth_dev_reset: err=%s, port=%u\n",
1858 			       rte_strerror(-ret), port_id);
1859 	}
1860 
1861 	rte_free(tx_pkts_burst);
1862 	rte_free(rx_pkts_burst);
1863 }
1864 
1865 static int
1866 test_inline_ip_reassembly(const void *testdata)
1867 {
1868 	struct reassembly_vector reassembly_td = {0};
1869 	const struct reassembly_vector *td = testdata;
1870 	struct ip_reassembly_test_packet full_pkt;
1871 	struct ip_reassembly_test_packet frags[MAX_FRAGS];
1872 	struct ipsec_test_flags flags = {0};
1873 	int i = 0;
1874 
1875 	reassembly_td.sa_data = td->sa_data;
1876 	reassembly_td.nb_frags = td->nb_frags;
1877 	reassembly_td.burst = td->burst;
1878 
1879 	memcpy(&full_pkt, td->full_pkt,
1880 			sizeof(struct ip_reassembly_test_packet));
1881 	reassembly_td.full_pkt = &full_pkt;
1882 
1883 	test_vector_payload_populate(reassembly_td.full_pkt, true);
1884 	for (; i < reassembly_td.nb_frags; i++) {
1885 		memcpy(&frags[i], td->frags[i],
1886 			sizeof(struct ip_reassembly_test_packet));
1887 		reassembly_td.frags[i] = &frags[i];
1888 		test_vector_payload_populate(reassembly_td.frags[i],
1889 				(i == 0) ? true : false);
1890 	}
1891 
1892 	return test_ipsec_with_reassembly(&reassembly_td, &flags);
1893 }
1894 
1895 static int
1896 test_ipsec_inline_proto_known_vec(const void *test_data)
1897 {
1898 	struct ipsec_test_data td_outb;
1899 	struct ipsec_test_flags flags;
1900 
1901 	memset(&flags, 0, sizeof(flags));
1902 
1903 	memcpy(&td_outb, test_data, sizeof(td_outb));
1904 
1905 	if (td_outb.aead ||
1906 	    td_outb.xform.chain.cipher.cipher.algo != RTE_CRYPTO_CIPHER_NULL) {
1907 		/* Disable IV gen to be able to test with known vectors */
1908 		td_outb.ipsec_xform.options.iv_gen_disable = 1;
1909 	}
1910 
1911 	return test_ipsec_inline_proto_process(&td_outb, NULL, 1,
1912 				false, &flags);
1913 }
1914 
1915 static int
1916 test_ipsec_inline_proto_known_vec_inb(const void *test_data)
1917 {
1918 	const struct ipsec_test_data *td = test_data;
1919 	struct ipsec_test_flags flags;
1920 	struct ipsec_test_data td_inb;
1921 
1922 	memset(&flags, 0, sizeof(flags));
1923 
1924 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
1925 		test_ipsec_td_in_from_out(td, &td_inb);
1926 	else
1927 		memcpy(&td_inb, td, sizeof(td_inb));
1928 
1929 	return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags);
1930 }
1931 
1932 static int
1933 test_ipsec_inline_proto_display_list(const void *data __rte_unused)
1934 {
1935 	struct ipsec_test_flags flags;
1936 
1937 	memset(&flags, 0, sizeof(flags));
1938 
1939 	flags.display_alg = true;
1940 
1941 	return test_ipsec_inline_proto_all(&flags);
1942 }
1943 
1944 static int
1945 test_ipsec_inline_proto_udp_encap(const void *data __rte_unused)
1946 {
1947 	struct ipsec_test_flags flags;
1948 
1949 	memset(&flags, 0, sizeof(flags));
1950 
1951 	flags.udp_encap = true;
1952 
1953 	return test_ipsec_inline_proto_all(&flags);
1954 }
1955 
1956 static int
1957 test_ipsec_inline_proto_udp_ports_verify(const void *data __rte_unused)
1958 {
1959 	struct ipsec_test_flags flags;
1960 
1961 	memset(&flags, 0, sizeof(flags));
1962 
1963 	flags.udp_encap = true;
1964 	flags.udp_ports_verify = true;
1965 
1966 	return test_ipsec_inline_proto_all(&flags);
1967 }
1968 
1969 static int
1970 test_ipsec_inline_proto_err_icv_corrupt(const void *data __rte_unused)
1971 {
1972 	struct ipsec_test_flags flags;
1973 
1974 	memset(&flags, 0, sizeof(flags));
1975 
1976 	flags.icv_corrupt = true;
1977 
1978 	return test_ipsec_inline_proto_all(&flags);
1979 }
1980 
1981 static int
1982 test_ipsec_inline_proto_tunnel_dst_addr_verify(const void *data __rte_unused)
1983 {
1984 	struct ipsec_test_flags flags;
1985 
1986 	memset(&flags, 0, sizeof(flags));
1987 
1988 	flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR;
1989 
1990 	return test_ipsec_inline_proto_all(&flags);
1991 }
1992 
1993 static int
1994 test_ipsec_inline_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused)
1995 {
1996 	struct ipsec_test_flags flags;
1997 
1998 	memset(&flags, 0, sizeof(flags));
1999 
2000 	flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR;
2001 
2002 	return test_ipsec_inline_proto_all(&flags);
2003 }
2004 
2005 static int
2006 test_ipsec_inline_proto_inner_ip_csum(const void *data __rte_unused)
2007 {
2008 	struct ipsec_test_flags flags;
2009 
2010 	memset(&flags, 0, sizeof(flags));
2011 
2012 	flags.ip_csum = true;
2013 
2014 	return test_ipsec_inline_proto_all(&flags);
2015 }
2016 
2017 static int
2018 test_ipsec_inline_proto_inner_l4_csum(const void *data __rte_unused)
2019 {
2020 	struct ipsec_test_flags flags;
2021 
2022 	memset(&flags, 0, sizeof(flags));
2023 
2024 	flags.l4_csum = true;
2025 
2026 	return test_ipsec_inline_proto_all(&flags);
2027 }
2028 
2029 static int
2030 test_ipsec_inline_proto_tunnel_v4_in_v4(const void *data __rte_unused)
2031 {
2032 	struct ipsec_test_flags flags;
2033 
2034 	memset(&flags, 0, sizeof(flags));
2035 
2036 	flags.ipv6 = false;
2037 	flags.tunnel_ipv6 = false;
2038 
2039 	return test_ipsec_inline_proto_all(&flags);
2040 }
2041 
2042 static int
2043 test_ipsec_inline_proto_tunnel_v6_in_v6(const void *data __rte_unused)
2044 {
2045 	struct ipsec_test_flags flags;
2046 
2047 	memset(&flags, 0, sizeof(flags));
2048 
2049 	flags.ipv6 = true;
2050 	flags.tunnel_ipv6 = true;
2051 
2052 	return test_ipsec_inline_proto_all(&flags);
2053 }
2054 
2055 static int
2056 test_ipsec_inline_proto_tunnel_v4_in_v6(const void *data __rte_unused)
2057 {
2058 	struct ipsec_test_flags flags;
2059 
2060 	memset(&flags, 0, sizeof(flags));
2061 
2062 	flags.ipv6 = false;
2063 	flags.tunnel_ipv6 = true;
2064 
2065 	return test_ipsec_inline_proto_all(&flags);
2066 }
2067 
2068 static int
2069 test_ipsec_inline_proto_tunnel_v6_in_v4(const void *data __rte_unused)
2070 {
2071 	struct ipsec_test_flags flags;
2072 
2073 	memset(&flags, 0, sizeof(flags));
2074 
2075 	flags.ipv6 = true;
2076 	flags.tunnel_ipv6 = false;
2077 
2078 	return test_ipsec_inline_proto_all(&flags);
2079 }
2080 
2081 static int
2082 test_ipsec_inline_proto_transport_v4(const void *data __rte_unused)
2083 {
2084 	struct ipsec_test_flags flags;
2085 
2086 	memset(&flags, 0, sizeof(flags));
2087 
2088 	flags.ipv6 = false;
2089 	flags.transport = true;
2090 
2091 	return test_ipsec_inline_proto_all(&flags);
2092 }
2093 
2094 static int
2095 test_ipsec_inline_proto_transport_l4_csum(const void *data __rte_unused)
2096 {
2097 	struct ipsec_test_flags flags = {
2098 		.l4_csum = true,
2099 		.transport = true,
2100 	};
2101 
2102 	return test_ipsec_inline_proto_all(&flags);
2103 }
2104 
2105 static int
2106 test_ipsec_inline_proto_stats(const void *data __rte_unused)
2107 {
2108 	struct ipsec_test_flags flags;
2109 
2110 	memset(&flags, 0, sizeof(flags));
2111 
2112 	flags.stats_success = true;
2113 
2114 	return test_ipsec_inline_proto_all(&flags);
2115 }
2116 
2117 static int
2118 test_ipsec_inline_proto_pkt_fragment(const void *data __rte_unused)
2119 {
2120 	struct ipsec_test_flags flags;
2121 
2122 	memset(&flags, 0, sizeof(flags));
2123 
2124 	flags.fragment = true;
2125 
2126 	return test_ipsec_inline_proto_all(&flags);
2127 
2128 }
2129 
2130 static int
2131 test_ipsec_inline_proto_copy_df_inner_0(const void *data __rte_unused)
2132 {
2133 	struct ipsec_test_flags flags;
2134 
2135 	memset(&flags, 0, sizeof(flags));
2136 
2137 	flags.df = TEST_IPSEC_COPY_DF_INNER_0;
2138 
2139 	return test_ipsec_inline_proto_all(&flags);
2140 }
2141 
2142 static int
2143 test_ipsec_inline_proto_copy_df_inner_1(const void *data __rte_unused)
2144 {
2145 	struct ipsec_test_flags flags;
2146 
2147 	memset(&flags, 0, sizeof(flags));
2148 
2149 	flags.df = TEST_IPSEC_COPY_DF_INNER_1;
2150 
2151 	return test_ipsec_inline_proto_all(&flags);
2152 }
2153 
2154 static int
2155 test_ipsec_inline_proto_set_df_0_inner_1(const void *data __rte_unused)
2156 {
2157 	struct ipsec_test_flags flags;
2158 
2159 	memset(&flags, 0, sizeof(flags));
2160 
2161 	flags.df = TEST_IPSEC_SET_DF_0_INNER_1;
2162 
2163 	return test_ipsec_inline_proto_all(&flags);
2164 }
2165 
2166 static int
2167 test_ipsec_inline_proto_set_df_1_inner_0(const void *data __rte_unused)
2168 {
2169 	struct ipsec_test_flags flags;
2170 
2171 	memset(&flags, 0, sizeof(flags));
2172 
2173 	flags.df = TEST_IPSEC_SET_DF_1_INNER_0;
2174 
2175 	return test_ipsec_inline_proto_all(&flags);
2176 }
2177 
2178 static int
2179 test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused)
2180 {
2181 	struct ipsec_test_flags flags;
2182 
2183 	memset(&flags, 0, sizeof(flags));
2184 
2185 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
2186 
2187 	return test_ipsec_inline_proto_all(&flags);
2188 }
2189 
2190 static int
2191 test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused)
2192 {
2193 	struct ipsec_test_flags flags;
2194 
2195 	memset(&flags, 0, sizeof(flags));
2196 
2197 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
2198 
2199 	return test_ipsec_inline_proto_all(&flags);
2200 }
2201 
2202 static int
2203 test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused)
2204 {
2205 	struct ipsec_test_flags flags;
2206 
2207 	memset(&flags, 0, sizeof(flags));
2208 
2209 	flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
2210 
2211 	return test_ipsec_inline_proto_all(&flags);
2212 }
2213 
2214 static int
2215 test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused)
2216 {
2217 	struct ipsec_test_flags flags;
2218 
2219 	memset(&flags, 0, sizeof(flags));
2220 
2221 	flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
2222 
2223 	return test_ipsec_inline_proto_all(&flags);
2224 }
2225 
2226 static int
2227 test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused)
2228 {
2229 	struct ipsec_test_flags flags;
2230 
2231 	memset(&flags, 0, sizeof(flags));
2232 
2233 	flags.ipv6 = true;
2234 	flags.tunnel_ipv6 = true;
2235 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
2236 
2237 	return test_ipsec_inline_proto_all(&flags);
2238 }
2239 
2240 static int
2241 test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused)
2242 {
2243 	struct ipsec_test_flags flags;
2244 
2245 	memset(&flags, 0, sizeof(flags));
2246 
2247 	flags.ipv6 = true;
2248 	flags.tunnel_ipv6 = true;
2249 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
2250 
2251 	return test_ipsec_inline_proto_all(&flags);
2252 }
2253 
2254 static int
2255 test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused)
2256 {
2257 	struct ipsec_test_flags flags;
2258 
2259 	memset(&flags, 0, sizeof(flags));
2260 
2261 	flags.ipv6 = true;
2262 	flags.tunnel_ipv6 = true;
2263 	flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
2264 
2265 	return test_ipsec_inline_proto_all(&flags);
2266 }
2267 
2268 static int
2269 test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused)
2270 {
2271 	struct ipsec_test_flags flags;
2272 
2273 	memset(&flags, 0, sizeof(flags));
2274 
2275 	flags.ipv6 = true;
2276 	flags.tunnel_ipv6 = true;
2277 	flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
2278 
2279 	return test_ipsec_inline_proto_all(&flags);
2280 }
2281 
2282 static int
2283 test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(const void *data __rte_unused)
2284 {
2285 	struct ipsec_test_flags flags;
2286 
2287 	memset(&flags, 0, sizeof(flags));
2288 
2289 	flags.ipv6 = true;
2290 	flags.tunnel_ipv6 = true;
2291 	flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_0;
2292 
2293 	return test_ipsec_inline_proto_all(&flags);
2294 }
2295 
2296 static int
2297 test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(const void *data __rte_unused)
2298 {
2299 	struct ipsec_test_flags flags;
2300 
2301 	memset(&flags, 0, sizeof(flags));
2302 
2303 	flags.ipv6 = true;
2304 	flags.tunnel_ipv6 = true;
2305 	flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_1;
2306 
2307 	return test_ipsec_inline_proto_all(&flags);
2308 }
2309 
2310 static int
2311 test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(const void *data __rte_unused)
2312 {
2313 	struct ipsec_test_flags flags;
2314 
2315 	memset(&flags, 0, sizeof(flags));
2316 
2317 	flags.ipv6 = true;
2318 	flags.tunnel_ipv6 = true;
2319 	flags.flabel = TEST_IPSEC_SET_FLABEL_0_INNER_1;
2320 
2321 	return test_ipsec_inline_proto_all(&flags);
2322 }
2323 
2324 static int
2325 test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(const void *data __rte_unused)
2326 {
2327 	struct ipsec_test_flags flags;
2328 
2329 	memset(&flags, 0, sizeof(flags));
2330 
2331 	flags.ipv6 = true;
2332 	flags.tunnel_ipv6 = true;
2333 	flags.flabel = TEST_IPSEC_SET_FLABEL_1_INNER_0;
2334 
2335 	return test_ipsec_inline_proto_all(&flags);
2336 }
2337 
2338 static int
2339 test_ipsec_inline_proto_ipv4_ttl_decrement(const void *data __rte_unused)
2340 {
2341 	struct ipsec_test_flags flags = {
2342 		.dec_ttl_or_hop_limit = true
2343 	};
2344 
2345 	return test_ipsec_inline_proto_all(&flags);
2346 }
2347 
2348 static int
2349 test_ipsec_inline_proto_ipv6_hop_limit_decrement(const void *data __rte_unused)
2350 {
2351 	struct ipsec_test_flags flags = {
2352 		.ipv6 = true,
2353 		.dec_ttl_or_hop_limit = true
2354 	};
2355 
2356 	return test_ipsec_inline_proto_all(&flags);
2357 }
2358 
2359 static int
2360 test_ipsec_inline_proto_iv_gen(const void *data __rte_unused)
2361 {
2362 	struct ipsec_test_flags flags;
2363 
2364 	memset(&flags, 0, sizeof(flags));
2365 
2366 	flags.iv_gen = true;
2367 
2368 	return test_ipsec_inline_proto_all(&flags);
2369 }
2370 
2371 static int
2372 test_ipsec_inline_proto_sa_pkt_soft_expiry(const void *data __rte_unused)
2373 {
2374 	struct ipsec_test_flags flags = {
2375 		.sa_expiry_pkts_soft = true
2376 	};
2377 	return test_ipsec_inline_proto_all(&flags);
2378 }
2379 static int
2380 test_ipsec_inline_proto_sa_byte_soft_expiry(const void *data __rte_unused)
2381 {
2382 	struct ipsec_test_flags flags = {
2383 		.sa_expiry_bytes_soft = true
2384 	};
2385 	return test_ipsec_inline_proto_all(&flags);
2386 }
2387 
2388 static int
2389 test_ipsec_inline_proto_sa_pkt_hard_expiry(const void *data __rte_unused)
2390 {
2391 	struct ipsec_test_flags flags = {
2392 		.sa_expiry_pkts_hard = true
2393 	};
2394 
2395 	return test_ipsec_inline_proto_all(&flags);
2396 }
2397 
2398 static int
2399 test_ipsec_inline_proto_sa_byte_hard_expiry(const void *data __rte_unused)
2400 {
2401 	struct ipsec_test_flags flags = {
2402 		.sa_expiry_bytes_hard = true
2403 	};
2404 
2405 	return test_ipsec_inline_proto_all(&flags);
2406 }
2407 
2408 static int
2409 test_ipsec_inline_proto_known_vec_fragmented(const void *test_data)
2410 {
2411 	struct ipsec_test_data td_outb;
2412 	struct ipsec_test_flags flags;
2413 
2414 	memset(&flags, 0, sizeof(flags));
2415 	flags.fragment = true;
2416 
2417 	memcpy(&td_outb, test_data, sizeof(td_outb));
2418 
2419 	/* Disable IV gen to be able to test with known vectors */
2420 	td_outb.ipsec_xform.options.iv_gen_disable = 1;
2421 
2422 	return test_ipsec_inline_proto_process(&td_outb, NULL, 1, false,
2423 						&flags);
2424 }
2425 
2426 static int
2427 test_ipsec_inline_pkt_replay(const void *test_data, const uint64_t esn[],
2428 		      bool replayed_pkt[], uint32_t nb_pkts, bool esn_en,
2429 		      uint64_t winsz)
2430 {
2431 	struct ipsec_test_data td_outb[IPSEC_TEST_PACKETS_MAX];
2432 	struct ipsec_test_data td_inb[IPSEC_TEST_PACKETS_MAX];
2433 	struct ipsec_test_flags flags;
2434 	uint32_t i, ret = 0;
2435 
2436 	memset(&flags, 0, sizeof(flags));
2437 	flags.antireplay = true;
2438 
2439 	for (i = 0; i < nb_pkts; i++) {
2440 		memcpy(&td_outb[i], test_data, sizeof(td_outb[0]));
2441 		td_outb[i].ipsec_xform.options.iv_gen_disable = 1;
2442 		td_outb[i].ipsec_xform.replay_win_sz = winsz;
2443 		td_outb[i].ipsec_xform.options.esn = esn_en;
2444 	}
2445 
2446 	for (i = 0; i < nb_pkts; i++)
2447 		td_outb[i].ipsec_xform.esn.value = esn[i];
2448 
2449 	ret = test_ipsec_inline_proto_process_with_esn(td_outb, td_inb,
2450 				nb_pkts, true, &flags);
2451 	if (ret != TEST_SUCCESS)
2452 		return ret;
2453 
2454 	test_ipsec_td_update(td_inb, td_outb, nb_pkts, &flags);
2455 
2456 	for (i = 0; i < nb_pkts; i++) {
2457 		td_inb[i].ipsec_xform.options.esn = esn_en;
2458 		/* Set antireplay flag for packets to be dropped */
2459 		td_inb[i].ar_packet = replayed_pkt[i];
2460 	}
2461 
2462 	ret = test_ipsec_inline_proto_process_with_esn(td_inb, NULL, nb_pkts,
2463 				true, &flags);
2464 
2465 	return ret;
2466 }
2467 
2468 static int
2469 test_ipsec_inline_proto_pkt_antireplay(const void *test_data, uint64_t winsz)
2470 {
2471 
2472 	uint32_t nb_pkts = 5;
2473 	bool replayed_pkt[5];
2474 	uint64_t esn[5];
2475 
2476 	/* 1. Advance the TOP of the window to WS * 2 */
2477 	esn[0] = winsz * 2;
2478 	/* 2. Test sequence number within the new window(WS + 1) */
2479 	esn[1] = winsz + 1;
2480 	/* 3. Test sequence number less than the window BOTTOM */
2481 	esn[2] = winsz;
2482 	/* 4. Test sequence number in the middle of the window */
2483 	esn[3] = winsz + (winsz / 2);
2484 	/* 5. Test replay of the packet in the middle of the window */
2485 	esn[4] = winsz + (winsz / 2);
2486 
2487 	replayed_pkt[0] = false;
2488 	replayed_pkt[1] = false;
2489 	replayed_pkt[2] = true;
2490 	replayed_pkt[3] = false;
2491 	replayed_pkt[4] = true;
2492 
2493 	return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt,
2494 			nb_pkts, false, winsz);
2495 }
2496 
2497 static int
2498 test_ipsec_inline_proto_pkt_antireplay1024(const void *test_data)
2499 {
2500 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 1024);
2501 }
2502 
2503 static int
2504 test_ipsec_inline_proto_pkt_antireplay2048(const void *test_data)
2505 {
2506 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 2048);
2507 }
2508 
2509 static int
2510 test_ipsec_inline_proto_pkt_antireplay4096(const void *test_data)
2511 {
2512 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 4096);
2513 }
2514 
2515 static int
2516 test_ipsec_inline_proto_pkt_esn_antireplay(const void *test_data, uint64_t winsz)
2517 {
2518 
2519 	uint32_t nb_pkts = 7;
2520 	bool replayed_pkt[7];
2521 	uint64_t esn[7];
2522 
2523 	/* Set the initial sequence number */
2524 	esn[0] = (uint64_t)(0xFFFFFFFF - winsz);
2525 	/* 1. Advance the TOP of the window to (1<<32 + WS/2) */
2526 	esn[1] = (uint64_t)((1ULL << 32) + (winsz / 2));
2527 	/* 2. Test sequence number within new window (1<<32 + WS/2 + 1) */
2528 	esn[2] = (uint64_t)((1ULL << 32) - (winsz / 2) + 1);
2529 	/* 3. Test with sequence number within window (1<<32 - 1) */
2530 	esn[3] = (uint64_t)((1ULL << 32) - 1);
2531 	/* 4. Test with sequence number within window (1<<32 - 1) */
2532 	esn[4] = (uint64_t)(1ULL << 32);
2533 	/* 5. Test with duplicate sequence number within
2534 	 * new window (1<<32 - 1)
2535 	 */
2536 	esn[5] = (uint64_t)((1ULL << 32) - 1);
2537 	/* 6. Test with duplicate sequence number within new window (1<<32) */
2538 	esn[6] = (uint64_t)(1ULL << 32);
2539 
2540 	replayed_pkt[0] = false;
2541 	replayed_pkt[1] = false;
2542 	replayed_pkt[2] = false;
2543 	replayed_pkt[3] = false;
2544 	replayed_pkt[4] = false;
2545 	replayed_pkt[5] = true;
2546 	replayed_pkt[6] = true;
2547 
2548 	return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt, nb_pkts,
2549 				     true, winsz);
2550 }
2551 
2552 static int
2553 test_ipsec_inline_proto_pkt_esn_antireplay1024(const void *test_data)
2554 {
2555 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 1024);
2556 }
2557 
2558 static int
2559 test_ipsec_inline_proto_pkt_esn_antireplay2048(const void *test_data)
2560 {
2561 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 2048);
2562 }
2563 
2564 static int
2565 test_ipsec_inline_proto_pkt_esn_antireplay4096(const void *test_data)
2566 {
2567 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 4096);
2568 }
2569 
2570 
2571 
2572 static struct unit_test_suite inline_ipsec_testsuite  = {
2573 	.suite_name = "Inline IPsec Ethernet Device Unit Test Suite",
2574 	.unit_test_cases = {
2575 		TEST_CASE_NAMED_WITH_DATA(
2576 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
2577 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2578 			test_ipsec_inline_proto_known_vec, &pkt_aes_128_gcm),
2579 		TEST_CASE_NAMED_WITH_DATA(
2580 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
2581 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2582 			test_ipsec_inline_proto_known_vec, &pkt_aes_192_gcm),
2583 		TEST_CASE_NAMED_WITH_DATA(
2584 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
2585 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2586 			test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm),
2587 		TEST_CASE_NAMED_WITH_DATA(
2588 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2589 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2590 			test_ipsec_inline_proto_known_vec,
2591 			&pkt_aes_128_cbc_hmac_sha256),
2592 		TEST_CASE_NAMED_WITH_DATA(
2593 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
2594 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2595 			test_ipsec_inline_proto_known_vec,
2596 			&pkt_aes_128_cbc_hmac_sha384),
2597 		TEST_CASE_NAMED_WITH_DATA(
2598 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
2599 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2600 			test_ipsec_inline_proto_known_vec,
2601 			&pkt_aes_128_cbc_hmac_sha512),
2602 		TEST_CASE_NAMED_WITH_DATA(
2603 			"Outbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
2604 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2605 			test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm_v6),
2606 		TEST_CASE_NAMED_WITH_DATA(
2607 			"Outbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2608 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2609 			test_ipsec_inline_proto_known_vec,
2610 			&pkt_aes_128_cbc_hmac_sha256_v6),
2611 		TEST_CASE_NAMED_WITH_DATA(
2612 			"Outbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
2613 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2614 			test_ipsec_inline_proto_known_vec,
2615 			&pkt_null_aes_xcbc),
2616 
2617 		TEST_CASE_NAMED_WITH_DATA(
2618 			"Outbound fragmented packet",
2619 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2620 			test_ipsec_inline_proto_known_vec_fragmented,
2621 			&pkt_aes_128_gcm_frag),
2622 
2623 		TEST_CASE_NAMED_WITH_DATA(
2624 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
2625 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2626 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_gcm),
2627 		TEST_CASE_NAMED_WITH_DATA(
2628 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
2629 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2630 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_192_gcm),
2631 		TEST_CASE_NAMED_WITH_DATA(
2632 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
2633 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2634 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm),
2635 		TEST_CASE_NAMED_WITH_DATA(
2636 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128)",
2637 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2638 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_cbc_null),
2639 		TEST_CASE_NAMED_WITH_DATA(
2640 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2641 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2642 			test_ipsec_inline_proto_known_vec_inb,
2643 			&pkt_aes_128_cbc_hmac_sha256),
2644 		TEST_CASE_NAMED_WITH_DATA(
2645 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
2646 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2647 			test_ipsec_inline_proto_known_vec_inb,
2648 			&pkt_aes_128_cbc_hmac_sha384),
2649 		TEST_CASE_NAMED_WITH_DATA(
2650 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
2651 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2652 			test_ipsec_inline_proto_known_vec_inb,
2653 			&pkt_aes_128_cbc_hmac_sha512),
2654 		TEST_CASE_NAMED_WITH_DATA(
2655 			"Inbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
2656 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2657 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm_v6),
2658 		TEST_CASE_NAMED_WITH_DATA(
2659 			"Inbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2660 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2661 			test_ipsec_inline_proto_known_vec_inb,
2662 			&pkt_aes_128_cbc_hmac_sha256_v6),
2663 		TEST_CASE_NAMED_WITH_DATA(
2664 			"Inbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
2665 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2666 			test_ipsec_inline_proto_known_vec_inb,
2667 			&pkt_null_aes_xcbc),
2668 
2669 		TEST_CASE_NAMED_ST(
2670 			"Combined test alg list",
2671 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2672 			test_ipsec_inline_proto_display_list),
2673 
2674 		TEST_CASE_NAMED_ST(
2675 			"UDP encapsulation",
2676 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2677 			test_ipsec_inline_proto_udp_encap),
2678 		TEST_CASE_NAMED_ST(
2679 			"UDP encapsulation ports verification test",
2680 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2681 			test_ipsec_inline_proto_udp_ports_verify),
2682 		TEST_CASE_NAMED_ST(
2683 			"Negative test: ICV corruption",
2684 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2685 			test_ipsec_inline_proto_err_icv_corrupt),
2686 		TEST_CASE_NAMED_ST(
2687 			"Tunnel dst addr verification",
2688 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2689 			test_ipsec_inline_proto_tunnel_dst_addr_verify),
2690 		TEST_CASE_NAMED_ST(
2691 			"Tunnel src and dst addr verification",
2692 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2693 			test_ipsec_inline_proto_tunnel_src_dst_addr_verify),
2694 		TEST_CASE_NAMED_ST(
2695 			"Inner IP checksum",
2696 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2697 			test_ipsec_inline_proto_inner_ip_csum),
2698 		TEST_CASE_NAMED_ST(
2699 			"Inner L4 checksum",
2700 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2701 			test_ipsec_inline_proto_inner_l4_csum),
2702 		TEST_CASE_NAMED_ST(
2703 			"Tunnel IPv4 in IPv4",
2704 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2705 			test_ipsec_inline_proto_tunnel_v4_in_v4),
2706 		TEST_CASE_NAMED_ST(
2707 			"Tunnel IPv6 in IPv6",
2708 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2709 			test_ipsec_inline_proto_tunnel_v6_in_v6),
2710 		TEST_CASE_NAMED_ST(
2711 			"Tunnel IPv4 in IPv6",
2712 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2713 			test_ipsec_inline_proto_tunnel_v4_in_v6),
2714 		TEST_CASE_NAMED_ST(
2715 			"Tunnel IPv6 in IPv4",
2716 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2717 			test_ipsec_inline_proto_tunnel_v6_in_v4),
2718 		TEST_CASE_NAMED_ST(
2719 			"Transport IPv4",
2720 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2721 			test_ipsec_inline_proto_transport_v4),
2722 		TEST_CASE_NAMED_ST(
2723 			"Transport l4 checksum",
2724 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2725 			test_ipsec_inline_proto_transport_l4_csum),
2726 		TEST_CASE_NAMED_ST(
2727 			"Statistics: success",
2728 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2729 			test_ipsec_inline_proto_stats),
2730 		TEST_CASE_NAMED_ST(
2731 			"Fragmented packet",
2732 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2733 			test_ipsec_inline_proto_pkt_fragment),
2734 		TEST_CASE_NAMED_ST(
2735 			"Tunnel header copy DF (inner 0)",
2736 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2737 			test_ipsec_inline_proto_copy_df_inner_0),
2738 		TEST_CASE_NAMED_ST(
2739 			"Tunnel header copy DF (inner 1)",
2740 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2741 			test_ipsec_inline_proto_copy_df_inner_1),
2742 		TEST_CASE_NAMED_ST(
2743 			"Tunnel header set DF 0 (inner 1)",
2744 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2745 			test_ipsec_inline_proto_set_df_0_inner_1),
2746 		TEST_CASE_NAMED_ST(
2747 			"Tunnel header set DF 1 (inner 0)",
2748 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2749 			test_ipsec_inline_proto_set_df_1_inner_0),
2750 		TEST_CASE_NAMED_ST(
2751 			"Tunnel header IPv4 copy DSCP (inner 0)",
2752 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2753 			test_ipsec_inline_proto_ipv4_copy_dscp_inner_0),
2754 		TEST_CASE_NAMED_ST(
2755 			"Tunnel header IPv4 copy DSCP (inner 1)",
2756 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2757 			test_ipsec_inline_proto_ipv4_copy_dscp_inner_1),
2758 		TEST_CASE_NAMED_ST(
2759 			"Tunnel header IPv4 set DSCP 0 (inner 1)",
2760 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2761 			test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1),
2762 		TEST_CASE_NAMED_ST(
2763 			"Tunnel header IPv4 set DSCP 1 (inner 0)",
2764 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2765 			test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0),
2766 		TEST_CASE_NAMED_ST(
2767 			"Tunnel header IPv6 copy DSCP (inner 0)",
2768 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2769 			test_ipsec_inline_proto_ipv6_copy_dscp_inner_0),
2770 		TEST_CASE_NAMED_ST(
2771 			"Tunnel header IPv6 copy DSCP (inner 1)",
2772 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2773 			test_ipsec_inline_proto_ipv6_copy_dscp_inner_1),
2774 		TEST_CASE_NAMED_ST(
2775 			"Tunnel header IPv6 set DSCP 0 (inner 1)",
2776 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2777 			test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1),
2778 		TEST_CASE_NAMED_ST(
2779 			"Tunnel header IPv6 set DSCP 1 (inner 0)",
2780 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2781 			test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0),
2782 		TEST_CASE_NAMED_ST(
2783 			"Tunnel header IPv6 copy FLABEL (inner 0)",
2784 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2785 			test_ipsec_inline_proto_ipv6_copy_flabel_inner_0),
2786 		TEST_CASE_NAMED_ST(
2787 			"Tunnel header IPv6 copy FLABEL (inner 1)",
2788 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2789 			test_ipsec_inline_proto_ipv6_copy_flabel_inner_1),
2790 		TEST_CASE_NAMED_ST(
2791 			"Tunnel header IPv6 set FLABEL 0 (inner 1)",
2792 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2793 			test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1),
2794 		TEST_CASE_NAMED_ST(
2795 			"Tunnel header IPv6 set FLABEL 1 (inner 0)",
2796 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2797 			test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0),
2798 		TEST_CASE_NAMED_ST(
2799 			"Tunnel header IPv4 decrement inner TTL",
2800 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2801 			test_ipsec_inline_proto_ipv4_ttl_decrement),
2802 		TEST_CASE_NAMED_ST(
2803 			"Tunnel header IPv6 decrement inner hop limit",
2804 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2805 			test_ipsec_inline_proto_ipv6_hop_limit_decrement),
2806 		TEST_CASE_NAMED_ST(
2807 			"IV generation",
2808 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2809 			test_ipsec_inline_proto_iv_gen),
2810 		TEST_CASE_NAMED_ST(
2811 			"SA soft expiry with packet limit",
2812 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2813 			test_ipsec_inline_proto_sa_pkt_soft_expiry),
2814 		TEST_CASE_NAMED_ST(
2815 			"SA soft expiry with byte limit",
2816 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2817 			test_ipsec_inline_proto_sa_byte_soft_expiry),
2818 		TEST_CASE_NAMED_ST(
2819 			"SA hard expiry with packet limit",
2820 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2821 			test_ipsec_inline_proto_sa_pkt_hard_expiry),
2822 		TEST_CASE_NAMED_ST(
2823 			"SA hard expiry with byte limit",
2824 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2825 			test_ipsec_inline_proto_sa_byte_hard_expiry),
2826 
2827 		TEST_CASE_NAMED_WITH_DATA(
2828 			"Antireplay with window size 1024",
2829 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2830 			test_ipsec_inline_proto_pkt_antireplay1024,
2831 			&pkt_aes_128_gcm),
2832 		TEST_CASE_NAMED_WITH_DATA(
2833 			"Antireplay with window size 2048",
2834 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2835 			test_ipsec_inline_proto_pkt_antireplay2048,
2836 			&pkt_aes_128_gcm),
2837 		TEST_CASE_NAMED_WITH_DATA(
2838 			"Antireplay with window size 4096",
2839 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2840 			test_ipsec_inline_proto_pkt_antireplay4096,
2841 			&pkt_aes_128_gcm),
2842 		TEST_CASE_NAMED_WITH_DATA(
2843 			"ESN and Antireplay with window size 1024",
2844 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2845 			test_ipsec_inline_proto_pkt_esn_antireplay1024,
2846 			&pkt_aes_128_gcm),
2847 		TEST_CASE_NAMED_WITH_DATA(
2848 			"ESN and Antireplay with window size 2048",
2849 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2850 			test_ipsec_inline_proto_pkt_esn_antireplay2048,
2851 			&pkt_aes_128_gcm),
2852 		TEST_CASE_NAMED_WITH_DATA(
2853 			"ESN and Antireplay with window size 4096",
2854 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2855 			test_ipsec_inline_proto_pkt_esn_antireplay4096,
2856 			&pkt_aes_128_gcm),
2857 
2858 		TEST_CASE_NAMED_WITH_DATA(
2859 			"IPv4 Reassembly with 2 fragments",
2860 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2861 			test_inline_ip_reassembly, &ipv4_2frag_vector),
2862 		TEST_CASE_NAMED_WITH_DATA(
2863 			"IPv6 Reassembly with 2 fragments",
2864 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2865 			test_inline_ip_reassembly, &ipv6_2frag_vector),
2866 		TEST_CASE_NAMED_WITH_DATA(
2867 			"IPv4 Reassembly with 4 fragments",
2868 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2869 			test_inline_ip_reassembly, &ipv4_4frag_vector),
2870 		TEST_CASE_NAMED_WITH_DATA(
2871 			"IPv6 Reassembly with 4 fragments",
2872 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2873 			test_inline_ip_reassembly, &ipv6_4frag_vector),
2874 		TEST_CASE_NAMED_WITH_DATA(
2875 			"IPv4 Reassembly with 5 fragments",
2876 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2877 			test_inline_ip_reassembly, &ipv4_5frag_vector),
2878 		TEST_CASE_NAMED_WITH_DATA(
2879 			"IPv6 Reassembly with 5 fragments",
2880 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2881 			test_inline_ip_reassembly, &ipv6_5frag_vector),
2882 		TEST_CASE_NAMED_WITH_DATA(
2883 			"IPv4 Reassembly with incomplete fragments",
2884 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2885 			test_inline_ip_reassembly, &ipv4_incomplete_vector),
2886 		TEST_CASE_NAMED_WITH_DATA(
2887 			"IPv4 Reassembly with overlapping fragments",
2888 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2889 			test_inline_ip_reassembly, &ipv4_overlap_vector),
2890 		TEST_CASE_NAMED_WITH_DATA(
2891 			"IPv4 Reassembly with out of order fragments",
2892 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2893 			test_inline_ip_reassembly, &ipv4_out_of_order_vector),
2894 		TEST_CASE_NAMED_WITH_DATA(
2895 			"IPv4 Reassembly with burst of 4 fragments",
2896 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2897 			test_inline_ip_reassembly, &ipv4_4frag_burst_vector),
2898 
2899 		TEST_CASES_END() /**< NULL terminate unit test array */
2900 	},
2901 };
2902 
2903 
2904 static int
2905 test_inline_ipsec(void)
2906 {
2907 	inline_ipsec_testsuite.setup = inline_ipsec_testsuite_setup;
2908 	inline_ipsec_testsuite.teardown = inline_ipsec_testsuite_teardown;
2909 	return unit_test_suite_runner(&inline_ipsec_testsuite);
2910 }
2911 
2912 static int
2913 test_event_inline_ipsec(void)
2914 {
2915 	inline_ipsec_testsuite.setup = event_inline_ipsec_testsuite_setup;
2916 	inline_ipsec_testsuite.teardown = event_inline_ipsec_testsuite_teardown;
2917 	return unit_test_suite_runner(&inline_ipsec_testsuite);
2918 }
2919 
2920 #endif /* !RTE_EXEC_ENV_WINDOWS */
2921 
2922 REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec);
2923 REGISTER_TEST_COMMAND(event_inline_ipsec_autotest, test_event_inline_ipsec);
2924