xref: /dpdk/app/test/test_security_inline_proto.c (revision 4b53e9802b6b6040ad5622b1414aaa93d9581d0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Marvell.
3  */
4 
5 
6 #include <stdio.h>
7 #include <inttypes.h>
8 
9 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
11 #include <rte_security.h>
12 
13 #include "test.h"
14 #include "test_security_inline_proto_vectors.h"
15 
16 #ifdef RTE_EXEC_ENV_WINDOWS
17 static int
18 test_inline_ipsec(void)
19 {
20 	printf("Inline ipsec not supported on Windows, skipping test\n");
21 	return TEST_SKIPPED;
22 }
23 
24 static int
25 test_event_inline_ipsec(void)
26 {
27 	printf("Event inline ipsec not supported on Windows, skipping test\n");
28 	return TEST_SKIPPED;
29 }
30 
31 static int
32 test_inline_ipsec_sg(void)
33 {
34 	printf("Inline ipsec SG not supported on Windows, skipping test\n");
35 	return TEST_SKIPPED;
36 }
37 
38 #else
39 
40 #include <rte_eventdev.h>
41 #include <rte_event_eth_rx_adapter.h>
42 #include <rte_event_eth_tx_adapter.h>
43 
44 #define NB_ETHPORTS_USED		1
45 #define MEMPOOL_CACHE_SIZE		32
46 #define MAX_PKT_BURST			32
47 #define RX_DESC_DEFAULT	1024
48 #define TX_DESC_DEFAULT	1024
49 #define RTE_PORT_ALL		(~(uint16_t)0x0)
50 
51 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
52 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
53 #define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */
54 
55 #define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */
56 #define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
57 #define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
58 
59 #define MAX_TRAFFIC_BURST		2048
60 #define NB_MBUF				10240
61 
62 #define ENCAP_DECAP_BURST_SZ		33
63 #define APP_REASS_TIMEOUT		10
64 
65 extern struct ipsec_test_data pkt_aes_128_gcm;
66 extern struct ipsec_test_data pkt_aes_192_gcm;
67 extern struct ipsec_test_data pkt_aes_256_gcm;
68 extern struct ipsec_test_data pkt_aes_128_gcm_frag;
69 extern struct ipsec_test_data pkt_aes_128_cbc_null;
70 extern struct ipsec_test_data pkt_null_aes_xcbc;
71 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha384;
72 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha512;
73 extern struct ipsec_test_data pkt_3des_cbc_hmac_sha256;
74 extern struct ipsec_test_data pkt_3des_cbc_hmac_sha384;
75 extern struct ipsec_test_data pkt_3des_cbc_hmac_sha512;
76 extern struct ipsec_test_data pkt_3des_cbc_hmac_sha256_v6;
77 extern struct ipsec_test_data pkt_des_cbc_hmac_sha256;
78 extern struct ipsec_test_data pkt_des_cbc_hmac_sha384;
79 extern struct ipsec_test_data pkt_des_cbc_hmac_sha512;
80 extern struct ipsec_test_data pkt_des_cbc_hmac_sha256_v6;
81 extern struct ipsec_test_data pkt_aes_128_cbc_md5;
82 
83 static struct rte_mempool *mbufpool;
84 static struct rte_mempool *sess_pool;
85 /* ethernet addresses of ports */
86 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
87 
88 static struct rte_eth_conf port_conf = {
89 	.rxmode = {
90 		.mq_mode = RTE_ETH_MQ_RX_NONE,
91 		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM |
92 			    RTE_ETH_RX_OFFLOAD_SECURITY,
93 	},
94 	.txmode = {
95 		.mq_mode = RTE_ETH_MQ_TX_NONE,
96 		.offloads = RTE_ETH_TX_OFFLOAD_SECURITY |
97 			    RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
98 	},
99 	.lpbk_mode = 1,  /* enable loopback */
100 };
101 
102 static struct rte_eth_rxconf rx_conf = {
103 	.rx_thresh = {
104 		.pthresh = RX_PTHRESH,
105 		.hthresh = RX_HTHRESH,
106 		.wthresh = RX_WTHRESH,
107 	},
108 	.rx_free_thresh = 32,
109 };
110 
111 static struct rte_eth_txconf tx_conf = {
112 	.tx_thresh = {
113 		.pthresh = TX_PTHRESH,
114 		.hthresh = TX_HTHRESH,
115 		.wthresh = TX_WTHRESH,
116 	},
117 	.tx_free_thresh = 32, /* Use PMD default values */
118 	.tx_rs_thresh = 32, /* Use PMD default values */
119 };
120 
121 static uint16_t port_id;
122 static uint8_t eventdev_id;
123 static uint8_t rx_adapter_id;
124 static uint8_t tx_adapter_id;
125 static uint16_t plaintext_len;
126 static bool sg_mode;
127 
128 static bool event_mode_enabled;
129 
130 static uint64_t link_mbps;
131 
132 static int ip_reassembly_dynfield_offset = -1;
133 
134 static struct rte_flow *default_flow[RTE_MAX_ETHPORTS];
135 
136 /* Create Inline IPsec session */
137 static int
138 create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid,
139 		void **sess, struct rte_security_ctx **ctx,
140 		uint32_t *ol_flags, const struct ipsec_test_flags *flags,
141 		struct rte_security_session_conf *sess_conf)
142 {
143 	uint16_t src_v6[8] = {0x2607, 0xf8b0, 0x400c, 0x0c03, 0x0000, 0x0000,
144 				0x0000, 0x001a};
145 	uint16_t dst_v6[8] = {0x2001, 0x0470, 0xe5bf, 0xdead, 0x4957, 0x2174,
146 				0xe82c, 0x4887};
147 	uint32_t src_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 2));
148 	uint32_t dst_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 1));
149 	struct rte_security_capability_idx sec_cap_idx;
150 	const struct rte_security_capability *sec_cap;
151 	enum rte_security_ipsec_sa_direction dir;
152 	struct rte_security_ctx *sec_ctx;
153 	uint32_t verify;
154 
155 	sess_conf->action_type = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
156 	sess_conf->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
157 	sess_conf->ipsec = sa->ipsec_xform;
158 
159 	dir = sa->ipsec_xform.direction;
160 	verify = flags->tunnel_hdr_verify;
161 
162 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && verify) {
163 		if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR)
164 			src_v4 += 1;
165 		else if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR)
166 			dst_v4 += 1;
167 	}
168 
169 	if (sa->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
170 		if (sa->ipsec_xform.tunnel.type ==
171 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
172 			memcpy(&sess_conf->ipsec.tunnel.ipv4.src_ip, &src_v4,
173 					sizeof(src_v4));
174 			memcpy(&sess_conf->ipsec.tunnel.ipv4.dst_ip, &dst_v4,
175 					sizeof(dst_v4));
176 
177 			if (flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
178 				sess_conf->ipsec.tunnel.ipv4.df = 0;
179 
180 			if (flags->df == TEST_IPSEC_SET_DF_1_INNER_0)
181 				sess_conf->ipsec.tunnel.ipv4.df = 1;
182 
183 			if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
184 				sess_conf->ipsec.tunnel.ipv4.dscp = 0;
185 
186 			if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
187 				sess_conf->ipsec.tunnel.ipv4.dscp =
188 						TEST_IPSEC_DSCP_VAL;
189 		} else {
190 			if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
191 				sess_conf->ipsec.tunnel.ipv6.dscp = 0;
192 
193 			if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
194 				sess_conf->ipsec.tunnel.ipv6.dscp =
195 						TEST_IPSEC_DSCP_VAL;
196 
197 			if (flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
198 				sess_conf->ipsec.tunnel.ipv6.flabel = 0;
199 
200 			if (flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0)
201 				sess_conf->ipsec.tunnel.ipv6.flabel =
202 						TEST_IPSEC_FLABEL_VAL;
203 
204 			memcpy(&sess_conf->ipsec.tunnel.ipv6.src_addr, &src_v6,
205 					sizeof(src_v6));
206 			memcpy(&sess_conf->ipsec.tunnel.ipv6.dst_addr, &dst_v6,
207 					sizeof(dst_v6));
208 		}
209 	}
210 
211 	/* Save SA as userdata for the security session. When
212 	 * the packet is received, this userdata will be
213 	 * retrieved using the metadata from the packet.
214 	 *
215 	 * The PMD is expected to set similar metadata for other
216 	 * operations, like rte_eth_event, which are tied to
217 	 * security session. In such cases, the userdata could
218 	 * be obtained to uniquely identify the security
219 	 * parameters denoted.
220 	 */
221 
222 	sess_conf->userdata = (void *) sa;
223 
224 	sec_ctx = (struct rte_security_ctx *)rte_eth_dev_get_sec_ctx(portid);
225 	if (sec_ctx == NULL) {
226 		printf("Ethernet device doesn't support security features.\n");
227 		return TEST_SKIPPED;
228 	}
229 
230 	sec_cap_idx.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
231 	sec_cap_idx.protocol = RTE_SECURITY_PROTOCOL_IPSEC;
232 	sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto;
233 	sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode;
234 	sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction;
235 	sec_cap = rte_security_capability_get(sec_ctx, &sec_cap_idx);
236 	if (sec_cap == NULL) {
237 		printf("No capabilities registered\n");
238 		return TEST_SKIPPED;
239 	}
240 
241 	if (sa->aead || sa->aes_gmac)
242 		memcpy(&sess_conf->ipsec.salt, sa->salt.data,
243 			RTE_MIN(sizeof(sess_conf->ipsec.salt), sa->salt.len));
244 
245 	/* Copy cipher session parameters */
246 	if (sa->aead) {
247 		rte_memcpy(sess_conf->crypto_xform, &sa->xform.aead,
248 				sizeof(struct rte_crypto_sym_xform));
249 		sess_conf->crypto_xform->aead.key.data = sa->key.data;
250 		/* Verify crypto capabilities */
251 		if (test_ipsec_crypto_caps_aead_verify(sec_cap,
252 					sess_conf->crypto_xform) != 0) {
253 			RTE_LOG(INFO, USER1,
254 				"Crypto capabilities not supported\n");
255 			return TEST_SKIPPED;
256 		}
257 	} else {
258 		if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
259 			rte_memcpy(&sess_conf->crypto_xform->cipher,
260 					&sa->xform.chain.cipher.cipher,
261 					sizeof(struct rte_crypto_cipher_xform));
262 
263 			rte_memcpy(&sess_conf->crypto_xform->next->auth,
264 					&sa->xform.chain.auth.auth,
265 					sizeof(struct rte_crypto_auth_xform));
266 			sess_conf->crypto_xform->cipher.key.data =
267 							sa->key.data;
268 			sess_conf->crypto_xform->next->auth.key.data =
269 							sa->auth_key.data;
270 			/* Verify crypto capabilities */
271 			if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
272 					sess_conf->crypto_xform) != 0) {
273 				RTE_LOG(INFO, USER1,
274 					"Cipher crypto capabilities not supported\n");
275 				return TEST_SKIPPED;
276 			}
277 
278 			if (test_ipsec_crypto_caps_auth_verify(sec_cap,
279 					sess_conf->crypto_xform->next) != 0) {
280 				RTE_LOG(INFO, USER1,
281 					"Auth crypto capabilities not supported\n");
282 				return TEST_SKIPPED;
283 			}
284 		} else {
285 			rte_memcpy(&sess_conf->crypto_xform->next->cipher,
286 					&sa->xform.chain.cipher.cipher,
287 					sizeof(struct rte_crypto_cipher_xform));
288 			rte_memcpy(&sess_conf->crypto_xform->auth,
289 					&sa->xform.chain.auth.auth,
290 					sizeof(struct rte_crypto_auth_xform));
291 			sess_conf->crypto_xform->auth.key.data =
292 							sa->auth_key.data;
293 			sess_conf->crypto_xform->next->cipher.key.data =
294 							sa->key.data;
295 
296 			/* Verify crypto capabilities */
297 			if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
298 					sess_conf->crypto_xform->next) != 0) {
299 				RTE_LOG(INFO, USER1,
300 					"Cipher crypto capabilities not supported\n");
301 				return TEST_SKIPPED;
302 			}
303 
304 			if (test_ipsec_crypto_caps_auth_verify(sec_cap,
305 					sess_conf->crypto_xform) != 0) {
306 				RTE_LOG(INFO, USER1,
307 					"Auth crypto capabilities not supported\n");
308 				return TEST_SKIPPED;
309 			}
310 		}
311 	}
312 
313 	if (test_ipsec_sec_caps_verify(&sess_conf->ipsec, sec_cap, false) != 0)
314 		return TEST_SKIPPED;
315 
316 	if ((sa->ipsec_xform.direction ==
317 			RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
318 			(sa->ipsec_xform.options.iv_gen_disable == 1)) {
319 		/* Set env variable when IV generation is disabled */
320 		char arr[128];
321 		int len = 0, j = 0;
322 		int iv_len = (sa->aead || sa->aes_gmac) ? 8 : 16;
323 
324 		for (; j < iv_len; j++)
325 			len += snprintf(arr+len, sizeof(arr) - len,
326 					"0x%x, ", sa->iv.data[j]);
327 		setenv("ETH_SEC_IV_OVR", arr, 1);
328 	}
329 
330 	*sess = rte_security_session_create(sec_ctx, sess_conf, sess_pool);
331 	if (*sess == NULL) {
332 		printf("SEC Session init failed.\n");
333 		return TEST_FAILED;
334 	}
335 
336 	*ol_flags = sec_cap->ol_flags;
337 	*ctx = sec_ctx;
338 
339 	return 0;
340 }
341 
342 /* Check the link status of all ports in up to 3s, and print them finally */
343 static void
344 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
345 {
346 #define CHECK_INTERVAL 100 /* 100ms */
347 #define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */
348 	uint16_t portid;
349 	uint8_t count, all_ports_up, print_flag = 0;
350 	struct rte_eth_link link;
351 	int ret;
352 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
353 
354 	printf("Checking link statuses...\n");
355 	fflush(stdout);
356 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
357 		all_ports_up = 1;
358 		for (portid = 0; portid < port_num; portid++) {
359 			if ((port_mask & (1 << portid)) == 0)
360 				continue;
361 			memset(&link, 0, sizeof(link));
362 			ret = rte_eth_link_get_nowait(portid, &link);
363 			if (ret < 0) {
364 				all_ports_up = 0;
365 				if (print_flag == 1)
366 					printf("Port %u link get failed: %s\n",
367 						portid, rte_strerror(-ret));
368 				continue;
369 			}
370 
371 			/* print link status if flag set */
372 			if (print_flag == 1) {
373 				if (link.link_status && link_mbps == 0)
374 					link_mbps = link.link_speed;
375 
376 				rte_eth_link_to_str(link_status,
377 					sizeof(link_status), &link);
378 				printf("Port %d %s\n", portid, link_status);
379 				continue;
380 			}
381 			/* clear all_ports_up flag if any link down */
382 			if (link.link_status == RTE_ETH_LINK_DOWN) {
383 				all_ports_up = 0;
384 				break;
385 			}
386 		}
387 		/* after finally printing all link status, get out */
388 		if (print_flag == 1)
389 			break;
390 
391 		if (all_ports_up == 0) {
392 			fflush(stdout);
393 			rte_delay_ms(CHECK_INTERVAL);
394 		}
395 
396 		/* set the print_flag if all ports up or timeout */
397 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1))
398 			print_flag = 1;
399 	}
400 }
401 
402 static void
403 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
404 {
405 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
406 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
407 	printf("%s%s", name, buf);
408 }
409 
410 static void
411 copy_buf_to_pkt_segs(const uint8_t *buf, unsigned int len,
412 		     struct rte_mbuf *pkt, unsigned int offset)
413 {
414 	unsigned int copied = 0;
415 	unsigned int copy_len;
416 	struct rte_mbuf *seg;
417 	void *seg_buf;
418 
419 	seg = pkt;
420 	while (offset >= rte_pktmbuf_tailroom(seg)) {
421 		offset -= rte_pktmbuf_tailroom(seg);
422 		seg = seg->next;
423 	}
424 	copy_len = seg->buf_len - seg->data_off - offset;
425 	seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
426 	while (len > copy_len) {
427 		rte_memcpy(seg_buf, buf + copied, (size_t) copy_len);
428 		len -= copy_len;
429 		copied += copy_len;
430 		seg->data_len += copy_len;
431 
432 		seg = seg->next;
433 		copy_len = seg->buf_len - seg->data_off;
434 		seg_buf = rte_pktmbuf_mtod(seg, void *);
435 	}
436 	rte_memcpy(seg_buf, buf + copied, (size_t) len);
437 	seg->data_len = len;
438 
439 	pkt->pkt_len += copied + len;
440 }
441 
442 static bool
443 is_outer_ipv4(struct ipsec_test_data *td)
444 {
445 	bool outer_ipv4;
446 
447 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ||
448 	    td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT)
449 		outer_ipv4 = (((td->input_text.data[0] & 0xF0) >> 4) == IPVERSION);
450 	else
451 		outer_ipv4 = (td->ipsec_xform.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4);
452 	return outer_ipv4;
453 }
454 
455 static inline struct rte_mbuf *
456 init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len, bool outer_ipv4)
457 {
458 	struct rte_mbuf *pkt, *tail;
459 	uint16_t space;
460 
461 	pkt = rte_pktmbuf_alloc(mp);
462 	if (pkt == NULL)
463 		return NULL;
464 
465 	if (outer_ipv4) {
466 		rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
467 				&dummy_ipv4_eth_hdr, RTE_ETHER_HDR_LEN);
468 		pkt->l3_len = sizeof(struct rte_ipv4_hdr);
469 	} else {
470 		rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
471 				&dummy_ipv6_eth_hdr, RTE_ETHER_HDR_LEN);
472 		pkt->l3_len = sizeof(struct rte_ipv6_hdr);
473 	}
474 	pkt->l2_len = RTE_ETHER_HDR_LEN;
475 
476 	space = rte_pktmbuf_tailroom(pkt);
477 	tail = pkt;
478 	/* Error if SG mode is not enabled */
479 	if (!sg_mode && space < len) {
480 		rte_pktmbuf_free(pkt);
481 		return NULL;
482 	}
483 	/* Extra room for expansion */
484 	while (space < len) {
485 		tail->next = rte_pktmbuf_alloc(mp);
486 		if (!tail->next)
487 			goto error;
488 		tail = tail->next;
489 		space += rte_pktmbuf_tailroom(tail);
490 		pkt->nb_segs++;
491 	}
492 
493 	if (pkt->buf_len > len + RTE_ETHER_HDR_LEN)
494 		rte_memcpy(rte_pktmbuf_append(pkt, len), data, len);
495 	else
496 		copy_buf_to_pkt_segs(data, len, pkt, RTE_ETHER_HDR_LEN);
497 	return pkt;
498 error:
499 	rte_pktmbuf_free(pkt);
500 	return NULL;
501 }
502 
503 static int
504 init_mempools(unsigned int nb_mbuf)
505 {
506 	struct rte_security_ctx *sec_ctx;
507 	uint16_t nb_sess = 512;
508 	uint32_t sess_sz;
509 	char s[64];
510 
511 	if (mbufpool == NULL) {
512 		snprintf(s, sizeof(s), "mbuf_pool");
513 		mbufpool = rte_pktmbuf_pool_create(s, nb_mbuf,
514 				MEMPOOL_CACHE_SIZE, RTE_CACHE_LINE_SIZE,
515 				RTE_MBUF_DEFAULT_BUF_SIZE, SOCKET_ID_ANY);
516 		if (mbufpool == NULL) {
517 			printf("Cannot init mbuf pool\n");
518 			return TEST_FAILED;
519 		}
520 		printf("Allocated mbuf pool\n");
521 	}
522 
523 	sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
524 	if (sec_ctx == NULL) {
525 		printf("Device does not support Security ctx\n");
526 		return TEST_SKIPPED;
527 	}
528 	sess_sz = rte_security_session_get_size(sec_ctx);
529 	if (sess_pool == NULL) {
530 		snprintf(s, sizeof(s), "sess_pool");
531 		sess_pool = rte_mempool_create(s, nb_sess, sess_sz,
532 				MEMPOOL_CACHE_SIZE, 0,
533 				NULL, NULL, NULL, NULL,
534 				SOCKET_ID_ANY, 0);
535 		if (sess_pool == NULL) {
536 			printf("Cannot init sess pool\n");
537 			return TEST_FAILED;
538 		}
539 		printf("Allocated sess pool\n");
540 	}
541 
542 	return 0;
543 }
544 
545 static int
546 create_default_flow(uint16_t portid)
547 {
548 	struct rte_flow_action action[2];
549 	struct rte_flow_item pattern[2];
550 	struct rte_flow_attr attr = {0};
551 	struct rte_flow_error err;
552 	struct rte_flow *flow;
553 	int ret;
554 
555 	/* Add the default rte_flow to enable SECURITY for all ESP packets */
556 
557 	pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
558 	pattern[0].spec = NULL;
559 	pattern[0].mask = NULL;
560 	pattern[0].last = NULL;
561 	pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
562 
563 	action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
564 	action[0].conf = NULL;
565 	action[1].type = RTE_FLOW_ACTION_TYPE_END;
566 	action[1].conf = NULL;
567 
568 	attr.ingress = 1;
569 
570 	ret = rte_flow_validate(portid, &attr, pattern, action, &err);
571 	if (ret) {
572 		printf("\nValidate flow failed, ret = %d\n", ret);
573 		return -1;
574 	}
575 	flow = rte_flow_create(portid, &attr, pattern, action, &err);
576 	if (flow == NULL) {
577 		printf("\nDefault flow rule create failed\n");
578 		return -1;
579 	}
580 
581 	default_flow[portid] = flow;
582 
583 	return 0;
584 }
585 
586 static void
587 destroy_default_flow(uint16_t portid)
588 {
589 	struct rte_flow_error err;
590 	int ret;
591 
592 	if (!default_flow[portid])
593 		return;
594 	ret = rte_flow_destroy(portid, default_flow[portid], &err);
595 	if (ret) {
596 		printf("\nDefault flow rule destroy failed\n");
597 		return;
598 	}
599 	default_flow[portid] = NULL;
600 }
601 
602 struct rte_mbuf **tx_pkts_burst;
603 struct rte_mbuf **rx_pkts_burst;
604 
605 static int
606 compare_pkt_data(struct rte_mbuf *m, uint8_t *ref, unsigned int tot_len)
607 {
608 	unsigned int len;
609 	unsigned int nb_segs = m->nb_segs;
610 	unsigned int matched = 0;
611 	struct rte_mbuf *save = m;
612 
613 	while (m) {
614 		len = tot_len;
615 		if (len > m->data_len)
616 			len = m->data_len;
617 		if (len != 0) {
618 			if (memcmp(rte_pktmbuf_mtod(m, char *),
619 					ref + matched, len)) {
620 				printf("\n====Reassembly case failed: Data Mismatch");
621 				rte_hexdump(stdout, "Reassembled",
622 					rte_pktmbuf_mtod(m, char *),
623 					len);
624 				rte_hexdump(stdout, "reference",
625 					ref + matched,
626 					len);
627 				return TEST_FAILED;
628 			}
629 		}
630 		tot_len -= len;
631 		matched += len;
632 		m = m->next;
633 	}
634 
635 	if (tot_len) {
636 		printf("\n====Reassembly case failed: Data Missing %u",
637 		       tot_len);
638 		printf("\n====nb_segs %u, tot_len %u", nb_segs, tot_len);
639 		rte_pktmbuf_dump(stderr, save, -1);
640 		return TEST_FAILED;
641 	}
642 	return TEST_SUCCESS;
643 }
644 
645 static inline bool
646 is_ip_reassembly_incomplete(struct rte_mbuf *mbuf)
647 {
648 	static uint64_t ip_reassembly_dynflag;
649 	int ip_reassembly_dynflag_offset;
650 
651 	if (ip_reassembly_dynflag == 0) {
652 		ip_reassembly_dynflag_offset = rte_mbuf_dynflag_lookup(
653 			RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL);
654 		if (ip_reassembly_dynflag_offset < 0)
655 			return false;
656 		ip_reassembly_dynflag = RTE_BIT64(ip_reassembly_dynflag_offset);
657 	}
658 
659 	return (mbuf->ol_flags & ip_reassembly_dynflag) != 0;
660 }
661 
662 static void
663 free_mbuf(struct rte_mbuf *mbuf)
664 {
665 	rte_eth_ip_reassembly_dynfield_t dynfield;
666 
667 	if (!mbuf)
668 		return;
669 
670 	if (!is_ip_reassembly_incomplete(mbuf)) {
671 		rte_pktmbuf_free(mbuf);
672 	} else {
673 		if (ip_reassembly_dynfield_offset < 0)
674 			return;
675 
676 		while (mbuf) {
677 			dynfield = *RTE_MBUF_DYNFIELD(mbuf,
678 					ip_reassembly_dynfield_offset,
679 					rte_eth_ip_reassembly_dynfield_t *);
680 			rte_pktmbuf_free(mbuf);
681 			mbuf = dynfield.next_frag;
682 		}
683 	}
684 }
685 
686 
687 static int
688 get_and_verify_incomplete_frags(struct rte_mbuf *mbuf,
689 				struct reassembly_vector *vector)
690 {
691 	rte_eth_ip_reassembly_dynfield_t *dynfield[MAX_PKT_BURST];
692 	int j = 0, ret;
693 	/**
694 	 * IP reassembly offload is incomplete, and fragments are listed in
695 	 * dynfield which can be reassembled in SW.
696 	 */
697 	printf("\nHW IP Reassembly is not complete; attempt SW IP Reassembly,"
698 		"\nMatching with original frags.");
699 
700 	if (ip_reassembly_dynfield_offset < 0)
701 		return -1;
702 
703 	printf("\ncomparing frag: %d", j);
704 	/* Skip Ethernet header comparison */
705 	rte_pktmbuf_adj(mbuf, RTE_ETHER_HDR_LEN);
706 	ret = compare_pkt_data(mbuf, vector->frags[j]->data,
707 				vector->frags[j]->len);
708 	if (ret)
709 		return ret;
710 	j++;
711 	dynfield[j] = RTE_MBUF_DYNFIELD(mbuf, ip_reassembly_dynfield_offset,
712 					rte_eth_ip_reassembly_dynfield_t *);
713 	printf("\ncomparing frag: %d", j);
714 	/* Skip Ethernet header comparison */
715 	rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
716 	ret = compare_pkt_data(dynfield[j]->next_frag, vector->frags[j]->data,
717 			vector->frags[j]->len);
718 	if (ret)
719 		return ret;
720 
721 	while ((dynfield[j]->nb_frags > 1) &&
722 			is_ip_reassembly_incomplete(dynfield[j]->next_frag)) {
723 		j++;
724 		dynfield[j] = RTE_MBUF_DYNFIELD(dynfield[j-1]->next_frag,
725 					ip_reassembly_dynfield_offset,
726 					rte_eth_ip_reassembly_dynfield_t *);
727 		printf("\ncomparing frag: %d", j);
728 		/* Skip Ethernet header comparison */
729 		rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
730 		ret = compare_pkt_data(dynfield[j]->next_frag,
731 				vector->frags[j]->data, vector->frags[j]->len);
732 		if (ret)
733 			return ret;
734 	}
735 	return ret;
736 }
737 
738 static int
739 test_ipsec_with_reassembly(struct reassembly_vector *vector,
740 		const struct ipsec_test_flags *flags)
741 {
742 	void *out_ses[ENCAP_DECAP_BURST_SZ] = {0};
743 	void *in_ses[ENCAP_DECAP_BURST_SZ] = {0};
744 	struct rte_eth_ip_reassembly_params reass_capa = {0};
745 	struct rte_security_session_conf sess_conf_out = {0};
746 	struct rte_security_session_conf sess_conf_in = {0};
747 	unsigned int nb_tx, burst_sz, nb_sent = 0;
748 	struct rte_crypto_sym_xform cipher_out = {0};
749 	struct rte_crypto_sym_xform auth_out = {0};
750 	struct rte_crypto_sym_xform aead_out = {0};
751 	struct rte_crypto_sym_xform cipher_in = {0};
752 	struct rte_crypto_sym_xform auth_in = {0};
753 	struct rte_crypto_sym_xform aead_in = {0};
754 	struct ipsec_test_data sa_data;
755 	struct rte_security_ctx *ctx;
756 	unsigned int i, nb_rx = 0, j;
757 	uint32_t ol_flags;
758 	bool outer_ipv4;
759 	int ret = 0;
760 
761 	burst_sz = vector->burst ? ENCAP_DECAP_BURST_SZ : 1;
762 	nb_tx = vector->nb_frags * burst_sz;
763 
764 	rte_eth_dev_stop(port_id);
765 	if (ret != 0) {
766 		printf("rte_eth_dev_stop: err=%s, port=%u\n",
767 			       rte_strerror(-ret), port_id);
768 		return ret;
769 	}
770 	rte_eth_ip_reassembly_capability_get(port_id, &reass_capa);
771 	if (reass_capa.max_frags < vector->nb_frags)
772 		return TEST_SKIPPED;
773 	if (reass_capa.timeout_ms > APP_REASS_TIMEOUT) {
774 		reass_capa.timeout_ms = APP_REASS_TIMEOUT;
775 		rte_eth_ip_reassembly_conf_set(port_id, &reass_capa);
776 	}
777 
778 	ret = rte_eth_dev_start(port_id);
779 	if (ret < 0) {
780 		printf("rte_eth_dev_start: err=%d, port=%d\n",
781 			ret, port_id);
782 		return ret;
783 	}
784 
785 	memset(tx_pkts_burst, 0, sizeof(tx_pkts_burst[0]) * nb_tx);
786 	memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_tx);
787 
788 	memcpy(&sa_data, vector->sa_data, sizeof(struct ipsec_test_data));
789 	sa_data.ipsec_xform.direction =	RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
790 	outer_ipv4 = is_outer_ipv4(&sa_data);
791 
792 	for (i = 0; i < nb_tx; i += vector->nb_frags) {
793 		for (j = 0; j < vector->nb_frags; j++) {
794 			tx_pkts_burst[i+j] = init_packet(mbufpool,
795 						vector->frags[j]->data,
796 						vector->frags[j]->len, outer_ipv4);
797 			if (tx_pkts_burst[i+j] == NULL) {
798 				ret = -1;
799 				printf("\n packed init failed\n");
800 				goto out;
801 			}
802 		}
803 	}
804 
805 	for (i = 0; i < burst_sz; i++) {
806 		memcpy(&sa_data, vector->sa_data,
807 				sizeof(struct ipsec_test_data));
808 		/* Update SPI for every new SA */
809 		sa_data.ipsec_xform.spi += i;
810 		sa_data.ipsec_xform.direction =
811 					RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
812 		if (sa_data.aead) {
813 			sess_conf_out.crypto_xform = &aead_out;
814 		} else {
815 			sess_conf_out.crypto_xform = &cipher_out;
816 			sess_conf_out.crypto_xform->next = &auth_out;
817 		}
818 
819 		/* Create Inline IPsec outbound session. */
820 		ret = create_inline_ipsec_session(&sa_data, port_id,
821 				&out_ses[i], &ctx, &ol_flags, flags,
822 				&sess_conf_out);
823 		if (ret) {
824 			printf("\nInline outbound session create failed\n");
825 			goto out;
826 		}
827 	}
828 
829 	j = 0;
830 	for (i = 0; i < nb_tx; i++) {
831 		if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
832 			rte_security_set_pkt_metadata(ctx,
833 				out_ses[j], tx_pkts_burst[i], NULL);
834 		tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
835 
836 		/* Move to next SA after nb_frags */
837 		if ((i + 1) % vector->nb_frags == 0)
838 			j++;
839 	}
840 
841 	for (i = 0; i < burst_sz; i++) {
842 		memcpy(&sa_data, vector->sa_data,
843 				sizeof(struct ipsec_test_data));
844 		/* Update SPI for every new SA */
845 		sa_data.ipsec_xform.spi += i;
846 		sa_data.ipsec_xform.direction =
847 					RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
848 
849 		if (sa_data.aead) {
850 			sess_conf_in.crypto_xform = &aead_in;
851 		} else {
852 			sess_conf_in.crypto_xform = &auth_in;
853 			sess_conf_in.crypto_xform->next = &cipher_in;
854 		}
855 		/* Create Inline IPsec inbound session. */
856 		ret = create_inline_ipsec_session(&sa_data, port_id, &in_ses[i],
857 				&ctx, &ol_flags, flags, &sess_conf_in);
858 		if (ret) {
859 			printf("\nInline inbound session create failed\n");
860 			goto out;
861 		}
862 	}
863 
864 	/* Retrieve reassembly dynfield offset if available */
865 	if (ip_reassembly_dynfield_offset < 0 && vector->nb_frags > 1)
866 		ip_reassembly_dynfield_offset = rte_mbuf_dynfield_lookup(
867 				RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, NULL);
868 
869 
870 	ret = create_default_flow(port_id);
871 	if (ret)
872 		goto out;
873 
874 	nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_tx);
875 	if (nb_sent != nb_tx) {
876 		ret = -1;
877 		printf("\nFailed to tx %u pkts", nb_tx);
878 		goto out;
879 	}
880 
881 	rte_delay_ms(1);
882 
883 	/* Retry few times before giving up */
884 	nb_rx = 0;
885 	j = 0;
886 	do {
887 		nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
888 					  nb_tx - nb_rx);
889 		j++;
890 		if (nb_rx >= nb_tx)
891 			break;
892 		rte_delay_ms(1);
893 	} while (j < 5 || !nb_rx);
894 
895 	/* Check for minimum number of Rx packets expected */
896 	if ((vector->nb_frags == 1 && nb_rx != nb_tx) ||
897 	    (vector->nb_frags > 1 && nb_rx < burst_sz)) {
898 		printf("\nreceived less Rx pkts(%u) pkts\n", nb_rx);
899 		ret = TEST_FAILED;
900 		goto out;
901 	}
902 
903 	for (i = 0; i < nb_rx; i++) {
904 		if (vector->nb_frags > 1 &&
905 		    is_ip_reassembly_incomplete(rx_pkts_burst[i])) {
906 			ret = get_and_verify_incomplete_frags(rx_pkts_burst[i],
907 							      vector);
908 			if (ret != TEST_SUCCESS)
909 				break;
910 			continue;
911 		}
912 
913 		if (rx_pkts_burst[i]->ol_flags &
914 		    RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED ||
915 		    !(rx_pkts_burst[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD)) {
916 			printf("\nsecurity offload failed\n");
917 			ret = TEST_FAILED;
918 			break;
919 		}
920 
921 		if (vector->full_pkt->len + RTE_ETHER_HDR_LEN !=
922 				rx_pkts_burst[i]->pkt_len) {
923 			printf("\nreassembled/decrypted packet length mismatch\n");
924 			ret = TEST_FAILED;
925 			break;
926 		}
927 		rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
928 		ret = compare_pkt_data(rx_pkts_burst[i],
929 				       vector->full_pkt->data,
930 				       vector->full_pkt->len);
931 		if (ret != TEST_SUCCESS)
932 			break;
933 	}
934 
935 out:
936 	destroy_default_flow(port_id);
937 
938 	/* Clear session data. */
939 	for (i = 0; i < burst_sz; i++) {
940 		if (out_ses[i])
941 			rte_security_session_destroy(ctx, out_ses[i]);
942 		if (in_ses[i])
943 			rte_security_session_destroy(ctx, in_ses[i]);
944 	}
945 
946 	for (i = nb_sent; i < nb_tx; i++)
947 		free_mbuf(tx_pkts_burst[i]);
948 	for (i = 0; i < nb_rx; i++)
949 		free_mbuf(rx_pkts_burst[i]);
950 	return ret;
951 }
952 
953 static int
954 event_tx_burst(struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
955 {
956 	struct rte_event ev;
957 	int i, nb_sent = 0;
958 
959 	/* Convert packets to events */
960 	memset(&ev, 0, sizeof(ev));
961 	ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
962 	for (i = 0; i < nb_pkts; i++) {
963 		ev.mbuf = tx_pkts[i];
964 		nb_sent += rte_event_eth_tx_adapter_enqueue(
965 				eventdev_id, port_id, &ev, 1, 0);
966 	}
967 
968 	return nb_sent;
969 }
970 
971 static int
972 event_rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts_to_rx)
973 {
974 	int nb_ev, nb_rx = 0, j = 0;
975 	const int ms_per_pkt = 3;
976 	struct rte_event ev;
977 
978 	do {
979 		nb_ev = rte_event_dequeue_burst(eventdev_id, port_id,
980 				&ev, 1, 0);
981 
982 		if (nb_ev == 0) {
983 			rte_delay_ms(1);
984 			continue;
985 		}
986 
987 		/* Get packet from event */
988 		if (ev.event_type != RTE_EVENT_TYPE_ETHDEV) {
989 			printf("Unsupported event type: %i\n",
990 				ev.event_type);
991 			continue;
992 		}
993 		rx_pkts[nb_rx++] = ev.mbuf;
994 	} while (j++ < (nb_pkts_to_rx * ms_per_pkt) && nb_rx < nb_pkts_to_rx);
995 
996 	return nb_rx;
997 }
998 
999 static int
1000 test_ipsec_inline_sa_exp_event_callback(uint16_t port_id,
1001 		enum rte_eth_event_type type, void *param, void *ret_param)
1002 {
1003 	struct sa_expiry_vector *vector = (struct sa_expiry_vector *)param;
1004 	struct rte_eth_event_ipsec_desc *event_desc = NULL;
1005 
1006 	RTE_SET_USED(port_id);
1007 
1008 	if (type != RTE_ETH_EVENT_IPSEC)
1009 		return -1;
1010 
1011 	event_desc = ret_param;
1012 	if (event_desc == NULL) {
1013 		printf("Event descriptor not set\n");
1014 		return -1;
1015 	}
1016 	vector->notify_event = true;
1017 	if (event_desc->metadata != (uint64_t)vector->sa_data) {
1018 		printf("Mismatch in event specific metadata\n");
1019 		return -1;
1020 	}
1021 	switch (event_desc->subtype) {
1022 	case RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY:
1023 		vector->event = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
1024 		break;
1025 	case RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY:
1026 		vector->event = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
1027 		break;
1028 	case RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY:
1029 		vector->event = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
1030 		break;
1031 	case RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY:
1032 		vector->event = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
1033 		break;
1034 	default:
1035 		printf("Invalid IPsec event reported\n");
1036 		return -1;
1037 	}
1038 
1039 	return 0;
1040 }
1041 
1042 static enum rte_eth_event_ipsec_subtype
1043 test_ipsec_inline_setup_expiry_vector(struct sa_expiry_vector *vector,
1044 		const struct ipsec_test_flags *flags,
1045 		struct ipsec_test_data *tdata)
1046 {
1047 	enum rte_eth_event_ipsec_subtype event = RTE_ETH_EVENT_IPSEC_UNKNOWN;
1048 
1049 	vector->event = RTE_ETH_EVENT_IPSEC_UNKNOWN;
1050 	vector->notify_event = false;
1051 	vector->sa_data = (void *)tdata;
1052 	if (flags->sa_expiry_pkts_soft)
1053 		event = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
1054 	else if (flags->sa_expiry_bytes_soft)
1055 		event = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
1056 	else if (flags->sa_expiry_pkts_hard)
1057 		event = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
1058 	else
1059 		event = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
1060 	rte_eth_dev_callback_register(port_id, RTE_ETH_EVENT_IPSEC,
1061 		       test_ipsec_inline_sa_exp_event_callback, vector);
1062 
1063 	return event;
1064 }
1065 
1066 static int
1067 test_ipsec_inline_proto_process(struct ipsec_test_data *td,
1068 		struct ipsec_test_data *res_d,
1069 		int nb_pkts,
1070 		bool silent,
1071 		const struct ipsec_test_flags *flags)
1072 {
1073 	enum rte_eth_event_ipsec_subtype event = RTE_ETH_EVENT_IPSEC_UNKNOWN;
1074 	struct rte_security_session_conf sess_conf = {0};
1075 	struct rte_crypto_sym_xform cipher = {0};
1076 	struct rte_crypto_sym_xform auth = {0};
1077 	struct rte_crypto_sym_xform aead = {0};
1078 	struct sa_expiry_vector vector = {0};
1079 	struct rte_security_ctx *ctx;
1080 	int nb_rx = 0, nb_sent;
1081 	uint32_t ol_flags;
1082 	int i, j = 0, ret;
1083 	bool outer_ipv4;
1084 	void *ses;
1085 
1086 	memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_pkts);
1087 
1088 	if (flags->sa_expiry_pkts_soft || flags->sa_expiry_bytes_soft ||
1089 		flags->sa_expiry_pkts_hard || flags->sa_expiry_bytes_hard) {
1090 		if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1091 			return TEST_SUCCESS;
1092 		event = test_ipsec_inline_setup_expiry_vector(&vector, flags, td);
1093 	}
1094 
1095 	if (td->aead) {
1096 		sess_conf.crypto_xform = &aead;
1097 	} else {
1098 		if (td->ipsec_xform.direction ==
1099 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1100 			sess_conf.crypto_xform = &cipher;
1101 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1102 			sess_conf.crypto_xform->next = &auth;
1103 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1104 		} else {
1105 			sess_conf.crypto_xform = &auth;
1106 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1107 			sess_conf.crypto_xform->next = &cipher;
1108 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1109 		}
1110 	}
1111 
1112 	/* Create Inline IPsec session. */
1113 	ret = create_inline_ipsec_session(td, port_id, &ses, &ctx,
1114 					  &ol_flags, flags, &sess_conf);
1115 	if (ret)
1116 		return ret;
1117 
1118 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1119 		ret = create_default_flow(port_id);
1120 		if (ret)
1121 			goto out;
1122 	}
1123 	outer_ipv4 = is_outer_ipv4(td);
1124 
1125 	for (i = 0; i < nb_pkts; i++) {
1126 		tx_pkts_burst[i] = init_packet(mbufpool, td->input_text.data,
1127 						td->input_text.len, outer_ipv4);
1128 		if (tx_pkts_burst[i] == NULL) {
1129 			while (i--)
1130 				rte_pktmbuf_free(tx_pkts_burst[i]);
1131 			ret = TEST_FAILED;
1132 			goto out;
1133 		}
1134 
1135 		if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkts_burst[i],
1136 					uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
1137 			while (i--)
1138 				rte_pktmbuf_free(tx_pkts_burst[i]);
1139 			ret = TEST_FAILED;
1140 			goto out;
1141 		}
1142 
1143 		if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1144 			if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
1145 				rte_security_set_pkt_metadata(ctx, ses,
1146 						tx_pkts_burst[i], NULL);
1147 			tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1148 		}
1149 	}
1150 	/* Send packet to ethdev for inline IPsec processing. */
1151 	if (event_mode_enabled)
1152 		nb_sent = event_tx_burst(tx_pkts_burst, nb_pkts);
1153 	else
1154 		nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts);
1155 
1156 	if (nb_sent != nb_pkts) {
1157 		printf("\nUnable to TX %d packets, sent: %i", nb_pkts, nb_sent);
1158 		for ( ; nb_sent < nb_pkts; nb_sent++)
1159 			rte_pktmbuf_free(tx_pkts_burst[nb_sent]);
1160 		ret = TEST_FAILED;
1161 		goto out;
1162 	}
1163 
1164 	rte_pause();
1165 
1166 	/* Receive back packet on loopback interface. */
1167 	if (event_mode_enabled)
1168 		nb_rx = event_rx_burst(rx_pkts_burst, nb_sent);
1169 	else
1170 		do {
1171 			rte_delay_ms(1);
1172 			nb_rx += rte_eth_rx_burst(port_id, 0,
1173 					&rx_pkts_burst[nb_rx],
1174 					nb_sent - nb_rx);
1175 			if (nb_rx >= nb_sent)
1176 				break;
1177 		} while (j++ < 5 || nb_rx == 0);
1178 
1179 	if (!flags->sa_expiry_pkts_hard &&
1180 			!flags->sa_expiry_bytes_hard &&
1181 			(nb_rx != nb_sent)) {
1182 		printf("\nUnable to RX all %d packets, received(%i)",
1183 				nb_sent, nb_rx);
1184 		while (--nb_rx >= 0)
1185 			rte_pktmbuf_free(rx_pkts_burst[nb_rx]);
1186 		ret = TEST_FAILED;
1187 		goto out;
1188 	}
1189 
1190 	for (i = 0; i < nb_rx; i++) {
1191 		rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
1192 
1193 		ret = test_ipsec_post_process(rx_pkts_burst[i], td,
1194 					      res_d, silent, flags);
1195 		if (ret != TEST_SUCCESS) {
1196 			for ( ; i < nb_rx; i++)
1197 				rte_pktmbuf_free(rx_pkts_burst[i]);
1198 			goto out;
1199 		}
1200 
1201 		ret = test_ipsec_stats_verify(ctx, ses, flags,
1202 					td->ipsec_xform.direction);
1203 		if (ret != TEST_SUCCESS) {
1204 			for ( ; i < nb_rx; i++)
1205 				rte_pktmbuf_free(rx_pkts_burst[i]);
1206 			goto out;
1207 		}
1208 
1209 		rte_pktmbuf_free(rx_pkts_burst[i]);
1210 		rx_pkts_burst[i] = NULL;
1211 	}
1212 
1213 out:
1214 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1215 		destroy_default_flow(port_id);
1216 	if (flags->sa_expiry_pkts_soft || flags->sa_expiry_bytes_soft ||
1217 		flags->sa_expiry_pkts_hard || flags->sa_expiry_bytes_hard) {
1218 		if (vector.notify_event && (vector.event == event))
1219 			ret = TEST_SUCCESS;
1220 		else
1221 			ret = TEST_FAILED;
1222 
1223 		rte_eth_dev_callback_unregister(port_id, RTE_ETH_EVENT_IPSEC,
1224 			test_ipsec_inline_sa_exp_event_callback, &vector);
1225 	}
1226 
1227 	/* Destroy session so that other cases can create the session again */
1228 	rte_security_session_destroy(ctx, ses);
1229 	ses = NULL;
1230 
1231 	return ret;
1232 }
1233 
1234 static int
1235 test_ipsec_inline_proto_all(const struct ipsec_test_flags *flags)
1236 {
1237 	struct ipsec_test_data td_outb;
1238 	struct ipsec_test_data td_inb;
1239 	unsigned int i, nb_pkts = 1, pass_cnt = 0, fail_cnt = 0;
1240 	int ret;
1241 
1242 	if (flags->iv_gen || flags->sa_expiry_pkts_soft ||
1243 			flags->sa_expiry_bytes_soft ||
1244 			flags->sa_expiry_bytes_hard ||
1245 			flags->sa_expiry_pkts_hard)
1246 		nb_pkts = IPSEC_TEST_PACKETS_MAX;
1247 
1248 	for (i = 0; i < RTE_DIM(alg_list); i++) {
1249 		test_ipsec_td_prepare(alg_list[i].param1,
1250 				      alg_list[i].param2,
1251 				      flags, &td_outb, 1);
1252 
1253 		if (!td_outb.aead) {
1254 			enum rte_crypto_cipher_algorithm cipher_alg;
1255 			enum rte_crypto_auth_algorithm auth_alg;
1256 
1257 			cipher_alg = td_outb.xform.chain.cipher.cipher.algo;
1258 			auth_alg = td_outb.xform.chain.auth.auth.algo;
1259 
1260 			if (td_outb.aes_gmac && cipher_alg != RTE_CRYPTO_CIPHER_NULL)
1261 				continue;
1262 
1263 			/* ICV is not applicable for NULL auth */
1264 			if (flags->icv_corrupt &&
1265 			    auth_alg == RTE_CRYPTO_AUTH_NULL)
1266 				continue;
1267 
1268 			/* IV is not applicable for NULL cipher */
1269 			if (flags->iv_gen &&
1270 			    cipher_alg == RTE_CRYPTO_CIPHER_NULL)
1271 				continue;
1272 		}
1273 
1274 		if (flags->udp_encap)
1275 			td_outb.ipsec_xform.options.udp_encap = 1;
1276 
1277 		if (flags->sa_expiry_bytes_soft)
1278 			td_outb.ipsec_xform.life.bytes_soft_limit =
1279 				(((td_outb.output_text.len + RTE_ETHER_HDR_LEN)
1280 				  * nb_pkts) >> 3) - 1;
1281 		if (flags->sa_expiry_pkts_hard)
1282 			td_outb.ipsec_xform.life.packets_hard_limit =
1283 					IPSEC_TEST_PACKETS_MAX - 1;
1284 		if (flags->sa_expiry_bytes_hard)
1285 			td_outb.ipsec_xform.life.bytes_hard_limit =
1286 				(((td_outb.output_text.len + RTE_ETHER_HDR_LEN)
1287 				  * nb_pkts) >> 3) - 1;
1288 
1289 		ret = test_ipsec_inline_proto_process(&td_outb, &td_inb, nb_pkts,
1290 						false, flags);
1291 		if (ret == TEST_SKIPPED)
1292 			continue;
1293 
1294 		if (ret == TEST_FAILED) {
1295 			printf("\n TEST FAILED");
1296 			test_ipsec_display_alg(alg_list[i].param1,
1297 					       alg_list[i].param2);
1298 			fail_cnt++;
1299 			continue;
1300 		}
1301 
1302 		test_ipsec_td_update(&td_inb, &td_outb, 1, flags);
1303 
1304 		ret = test_ipsec_inline_proto_process(&td_inb, NULL, nb_pkts,
1305 						false, flags);
1306 		if (ret == TEST_SKIPPED)
1307 			continue;
1308 
1309 		if (ret == TEST_FAILED) {
1310 			printf("\n TEST FAILED");
1311 			test_ipsec_display_alg(alg_list[i].param1,
1312 					       alg_list[i].param2);
1313 			fail_cnt++;
1314 			continue;
1315 		}
1316 
1317 		if (flags->display_alg)
1318 			test_ipsec_display_alg(alg_list[i].param1,
1319 					       alg_list[i].param2);
1320 
1321 		pass_cnt++;
1322 	}
1323 
1324 	printf("Tests passed: %d, failed: %d", pass_cnt, fail_cnt);
1325 	if (fail_cnt > 0)
1326 		return TEST_FAILED;
1327 	if (pass_cnt > 0)
1328 		return TEST_SUCCESS;
1329 	else
1330 		return TEST_SKIPPED;
1331 }
1332 
1333 static int
1334 test_ipsec_inline_proto_process_with_esn(struct ipsec_test_data td[],
1335 		struct ipsec_test_data res_d[],
1336 		int nb_pkts,
1337 		bool silent,
1338 		const struct ipsec_test_flags *flags)
1339 {
1340 	struct rte_security_session_conf sess_conf = {0};
1341 	struct ipsec_test_data *res_d_tmp = NULL;
1342 	struct rte_crypto_sym_xform cipher = {0};
1343 	struct rte_crypto_sym_xform auth = {0};
1344 	struct rte_crypto_sym_xform aead = {0};
1345 	struct rte_mbuf *rx_pkt = NULL;
1346 	struct rte_mbuf *tx_pkt = NULL;
1347 	int nb_rx, nb_sent;
1348 	void *ses;
1349 	struct rte_security_ctx *ctx;
1350 	uint32_t ol_flags;
1351 	bool outer_ipv4;
1352 	int i, ret;
1353 
1354 	if (td[0].aead) {
1355 		sess_conf.crypto_xform = &aead;
1356 	} else {
1357 		if (td[0].ipsec_xform.direction ==
1358 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1359 			sess_conf.crypto_xform = &cipher;
1360 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1361 			sess_conf.crypto_xform->next = &auth;
1362 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1363 		} else {
1364 			sess_conf.crypto_xform = &auth;
1365 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1366 			sess_conf.crypto_xform->next = &cipher;
1367 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1368 		}
1369 	}
1370 
1371 	/* Create Inline IPsec session. */
1372 	ret = create_inline_ipsec_session(&td[0], port_id, &ses, &ctx,
1373 					  &ol_flags, flags, &sess_conf);
1374 	if (ret)
1375 		return ret;
1376 
1377 	if (td[0].ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1378 		ret = create_default_flow(port_id);
1379 		if (ret)
1380 			goto out;
1381 	}
1382 	outer_ipv4 = is_outer_ipv4(td);
1383 
1384 	for (i = 0; i < nb_pkts; i++) {
1385 		tx_pkt = init_packet(mbufpool, td[i].input_text.data,
1386 					td[i].input_text.len, outer_ipv4);
1387 		if (tx_pkt == NULL) {
1388 			ret = TEST_FAILED;
1389 			goto out;
1390 		}
1391 
1392 		if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkt,
1393 					uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
1394 			ret = TEST_FAILED;
1395 			goto out;
1396 		}
1397 
1398 		if (td[i].ipsec_xform.direction ==
1399 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1400 			if (flags->antireplay) {
1401 				sess_conf.ipsec.esn.value =
1402 						td[i].ipsec_xform.esn.value;
1403 				ret = rte_security_session_update(ctx, ses,
1404 						&sess_conf);
1405 				if (ret) {
1406 					printf("Could not update ESN in session\n");
1407 					rte_pktmbuf_free(tx_pkt);
1408 					ret = TEST_SKIPPED;
1409 					goto out;
1410 				}
1411 			}
1412 			if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
1413 				rte_security_set_pkt_metadata(ctx, ses,
1414 						tx_pkt, NULL);
1415 			tx_pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1416 		}
1417 
1418 		/* Send packet to ethdev for inline IPsec processing. */
1419 		if (event_mode_enabled)
1420 			nb_sent = event_tx_burst(&tx_pkt, 1);
1421 		else
1422 			nb_sent = rte_eth_tx_burst(port_id, 0, &tx_pkt, 1);
1423 
1424 		if (nb_sent != 1) {
1425 			printf("\nUnable to TX packets");
1426 			rte_pktmbuf_free(tx_pkt);
1427 			ret = TEST_FAILED;
1428 			goto out;
1429 		}
1430 
1431 		rte_pause();
1432 
1433 		/* Receive back packet on loopback interface. */
1434 		if (event_mode_enabled)
1435 			nb_rx = event_rx_burst(&rx_pkt, nb_sent);
1436 		else {
1437 			do {
1438 				rte_delay_ms(1);
1439 				nb_rx = rte_eth_rx_burst(port_id, 0, &rx_pkt, 1);
1440 			} while (nb_rx == 0);
1441 		}
1442 		rte_pktmbuf_adj(rx_pkt, RTE_ETHER_HDR_LEN);
1443 
1444 		if (res_d != NULL)
1445 			res_d_tmp = &res_d[i];
1446 
1447 		ret = test_ipsec_post_process(rx_pkt, &td[i],
1448 					      res_d_tmp, silent, flags);
1449 		if (ret != TEST_SUCCESS) {
1450 			rte_pktmbuf_free(rx_pkt);
1451 			goto out;
1452 		}
1453 
1454 		ret = test_ipsec_stats_verify(ctx, ses, flags,
1455 					td->ipsec_xform.direction);
1456 		if (ret != TEST_SUCCESS) {
1457 			rte_pktmbuf_free(rx_pkt);
1458 			goto out;
1459 		}
1460 
1461 		rte_pktmbuf_free(rx_pkt);
1462 		rx_pkt = NULL;
1463 		tx_pkt = NULL;
1464 	}
1465 
1466 out:
1467 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1468 		destroy_default_flow(port_id);
1469 
1470 	/* Destroy session so that other cases can create the session again */
1471 	rte_security_session_destroy(ctx, ses);
1472 	ses = NULL;
1473 
1474 	return ret;
1475 }
1476 
1477 static int
1478 ut_setup_inline_ipsec(void)
1479 {
1480 	int ret;
1481 
1482 	/* Start device */
1483 	ret = rte_eth_dev_start(port_id);
1484 	if (ret < 0) {
1485 		printf("rte_eth_dev_start: err=%d, port=%d\n",
1486 			ret, port_id);
1487 		return ret;
1488 	}
1489 	/* always enable promiscuous */
1490 	ret = rte_eth_promiscuous_enable(port_id);
1491 	if (ret != 0) {
1492 		printf("rte_eth_promiscuous_enable: err=%s, port=%d\n",
1493 			rte_strerror(-ret), port_id);
1494 		return ret;
1495 	}
1496 
1497 	check_all_ports_link_status(1, RTE_PORT_ALL);
1498 
1499 	return 0;
1500 }
1501 
1502 static void
1503 ut_teardown_inline_ipsec(void)
1504 {
1505 	struct rte_eth_ip_reassembly_params reass_conf = {0};
1506 	uint16_t portid;
1507 	int ret;
1508 
1509 	/* port tear down */
1510 	RTE_ETH_FOREACH_DEV(portid) {
1511 		ret = rte_eth_dev_stop(portid);
1512 		if (ret != 0)
1513 			printf("rte_eth_dev_stop: err=%s, port=%u\n",
1514 			       rte_strerror(-ret), portid);
1515 
1516 		/* Clear reassembly configuration */
1517 		rte_eth_ip_reassembly_conf_set(portid, &reass_conf);
1518 	}
1519 }
1520 
1521 static int
1522 inline_ipsec_testsuite_setup(void)
1523 {
1524 	struct rte_eth_conf local_port_conf;
1525 	struct rte_eth_dev_info dev_info;
1526 	uint16_t nb_rxd;
1527 	uint16_t nb_txd;
1528 	uint16_t nb_ports;
1529 	int ret;
1530 	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
1531 
1532 	printf("Start inline IPsec test.\n");
1533 
1534 	nb_ports = rte_eth_dev_count_avail();
1535 	if (nb_ports < NB_ETHPORTS_USED) {
1536 		printf("At least %u port(s) used for test\n",
1537 		       NB_ETHPORTS_USED);
1538 		return TEST_SKIPPED;
1539 	}
1540 
1541 	ret = init_mempools(NB_MBUF);
1542 	if (ret)
1543 		return ret;
1544 
1545 	if (tx_pkts_burst == NULL) {
1546 		tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
1547 					  MAX_TRAFFIC_BURST,
1548 					  sizeof(void *),
1549 					  RTE_CACHE_LINE_SIZE);
1550 		if (!tx_pkts_burst)
1551 			return TEST_FAILED;
1552 
1553 		rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
1554 					  MAX_TRAFFIC_BURST,
1555 					  sizeof(void *),
1556 					  RTE_CACHE_LINE_SIZE);
1557 		if (!rx_pkts_burst)
1558 			return TEST_FAILED;
1559 	}
1560 
1561 	printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
1562 
1563 	nb_rxd = RX_DESC_DEFAULT;
1564 	nb_txd = TX_DESC_DEFAULT;
1565 
1566 	/* configuring port 0 for the test is enough */
1567 	port_id = 0;
1568 	if (rte_eth_dev_info_get(0, &dev_info)) {
1569 		printf("Failed to get devinfo");
1570 		return -1;
1571 	}
1572 
1573 	memcpy(&local_port_conf, &port_conf, sizeof(port_conf));
1574 	/* Add Multi seg flags */
1575 	if (sg_mode) {
1576 		uint16_t max_data_room = RTE_MBUF_DEFAULT_DATAROOM *
1577 			dev_info.rx_desc_lim.nb_seg_max;
1578 
1579 		local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
1580 		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1581 		local_port_conf.rxmode.mtu = RTE_MIN(dev_info.max_mtu, max_data_room - 256);
1582 	}
1583 
1584 	/* port configure */
1585 	ret = rte_eth_dev_configure(port_id, nb_rx_queue,
1586 				    nb_tx_queue, &local_port_conf);
1587 	if (ret < 0) {
1588 		printf("Cannot configure device: err=%d, port=%d\n",
1589 			 ret, port_id);
1590 		return ret;
1591 	}
1592 	ret = rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
1593 	if (ret < 0) {
1594 		printf("Cannot get mac address: err=%d, port=%d\n",
1595 			 ret, port_id);
1596 		return ret;
1597 	}
1598 	printf("Port %u ", port_id);
1599 	print_ethaddr("Address:", &ports_eth_addr[port_id]);
1600 	printf("\n");
1601 
1602 	/* tx queue setup */
1603 	ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
1604 				     SOCKET_ID_ANY, &tx_conf);
1605 	if (ret < 0) {
1606 		printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
1607 				ret, port_id);
1608 		return ret;
1609 	}
1610 	/* rx queue steup */
1611 	ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
1612 				     &rx_conf, mbufpool);
1613 	if (ret < 0) {
1614 		printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
1615 				ret, port_id);
1616 		return ret;
1617 	}
1618 	test_ipsec_alg_list_populate();
1619 
1620 	/* Change the plaintext size for tests without Known vectors */
1621 	if (sg_mode) {
1622 		/* Leave space of 256B as ESP packet would be bigger and we
1623 		 * expect packets to be received back on same interface.
1624 		 * Without SG mode, default value is picked.
1625 		 */
1626 		plaintext_len = local_port_conf.rxmode.mtu - 256;
1627 	}
1628 
1629 	return 0;
1630 }
1631 
1632 static void
1633 inline_ipsec_testsuite_teardown(void)
1634 {
1635 	uint16_t portid;
1636 	int ret;
1637 
1638 	/* port tear down */
1639 	RTE_ETH_FOREACH_DEV(portid) {
1640 		ret = rte_eth_dev_reset(portid);
1641 		if (ret != 0)
1642 			printf("rte_eth_dev_reset: err=%s, port=%u\n",
1643 			       rte_strerror(-ret), port_id);
1644 	}
1645 	rte_free(tx_pkts_burst);
1646 	rte_free(rx_pkts_burst);
1647 }
1648 
1649 static int
1650 event_inline_ipsec_testsuite_setup(void)
1651 {
1652 	struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
1653 	struct rte_event_dev_info evdev_default_conf = {0};
1654 	struct rte_event_dev_config eventdev_conf = {0};
1655 	struct rte_event_queue_conf eventq_conf = {0};
1656 	struct rte_event_port_conf ev_port_conf = {0};
1657 	const uint16_t nb_txd = 1024, nb_rxd = 1024;
1658 	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
1659 	uint8_t ev_queue_id = 0, tx_queue_id = 0;
1660 	int nb_eventqueue = 1, nb_eventport = 1;
1661 	const int all_queues = -1;
1662 	uint32_t caps = 0;
1663 	uint16_t nb_ports;
1664 	int ret;
1665 
1666 	printf("Start event inline IPsec test.\n");
1667 
1668 	nb_ports = rte_eth_dev_count_avail();
1669 	if (nb_ports == 0) {
1670 		printf("Test require: 1 port, available: 0\n");
1671 		return TEST_SKIPPED;
1672 	}
1673 
1674 	init_mempools(NB_MBUF);
1675 
1676 	if (tx_pkts_burst == NULL) {
1677 		tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
1678 					  MAX_TRAFFIC_BURST,
1679 					  sizeof(void *),
1680 					  RTE_CACHE_LINE_SIZE);
1681 		if (!tx_pkts_burst)
1682 			return -1;
1683 
1684 		rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
1685 					  MAX_TRAFFIC_BURST,
1686 					  sizeof(void *),
1687 					  RTE_CACHE_LINE_SIZE);
1688 		if (!rx_pkts_burst)
1689 			return -1;
1690 
1691 	}
1692 
1693 	printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
1694 
1695 	/* configuring port 0 for the test is enough */
1696 	port_id = 0;
1697 	/* port configure */
1698 	ret = rte_eth_dev_configure(port_id, nb_rx_queue,
1699 				    nb_tx_queue, &port_conf);
1700 	if (ret < 0) {
1701 		printf("Cannot configure device: err=%d, port=%d\n",
1702 			 ret, port_id);
1703 		return ret;
1704 	}
1705 
1706 	/* Tx queue setup */
1707 	ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
1708 				     SOCKET_ID_ANY, &tx_conf);
1709 	if (ret < 0) {
1710 		printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
1711 				ret, port_id);
1712 		return ret;
1713 	}
1714 
1715 	/* rx queue steup */
1716 	ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
1717 				     &rx_conf, mbufpool);
1718 	if (ret < 0) {
1719 		printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
1720 				ret, port_id);
1721 		return ret;
1722 	}
1723 
1724 	/* Setup eventdev */
1725 	eventdev_id = 0;
1726 	rx_adapter_id = 0;
1727 	tx_adapter_id = 0;
1728 
1729 	/* Get default conf of eventdev */
1730 	ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
1731 	if (ret < 0) {
1732 		printf("Error in getting event device info[devID:%d]\n",
1733 				eventdev_id);
1734 		return ret;
1735 	}
1736 
1737 	/* Get Tx adapter capabilities */
1738 	ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, tx_adapter_id, &caps);
1739 	if (ret < 0) {
1740 		printf("Failed to get event device %d eth tx adapter"
1741 				" capabilities for port %d\n",
1742 				eventdev_id, port_id);
1743 		return ret;
1744 	}
1745 	if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
1746 		tx_queue_id = nb_eventqueue++;
1747 
1748 	eventdev_conf.nb_events_limit =
1749 			evdev_default_conf.max_num_events;
1750 	eventdev_conf.nb_event_queue_flows =
1751 			evdev_default_conf.max_event_queue_flows;
1752 	eventdev_conf.nb_event_port_dequeue_depth =
1753 			evdev_default_conf.max_event_port_dequeue_depth;
1754 	eventdev_conf.nb_event_port_enqueue_depth =
1755 			evdev_default_conf.max_event_port_enqueue_depth;
1756 
1757 	eventdev_conf.nb_event_queues = nb_eventqueue;
1758 	eventdev_conf.nb_event_ports = nb_eventport;
1759 
1760 	/* Configure event device */
1761 
1762 	ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
1763 	if (ret < 0) {
1764 		printf("Error in configuring event device\n");
1765 		return ret;
1766 	}
1767 
1768 	/* Configure event queue */
1769 	eventq_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL;
1770 	eventq_conf.nb_atomic_flows = 1024;
1771 	eventq_conf.nb_atomic_order_sequences = 1024;
1772 
1773 	/* Setup the queue */
1774 	ret = rte_event_queue_setup(eventdev_id, ev_queue_id, &eventq_conf);
1775 	if (ret < 0) {
1776 		printf("Failed to setup event queue %d\n", ret);
1777 		return ret;
1778 	}
1779 
1780 	/* Configure event port */
1781 	ret = rte_event_port_setup(eventdev_id, port_id, NULL);
1782 	if (ret < 0) {
1783 		printf("Failed to setup event port %d\n", ret);
1784 		return ret;
1785 	}
1786 
1787 	/* Make event queue - event port link */
1788 	ret = rte_event_port_link(eventdev_id, port_id, NULL, NULL, 1);
1789 	if (ret < 0) {
1790 		printf("Failed to link event port %d\n", ret);
1791 		return ret;
1792 	}
1793 
1794 	/* Setup port conf */
1795 	ev_port_conf.new_event_threshold = 1200;
1796 	ev_port_conf.dequeue_depth =
1797 			evdev_default_conf.max_event_port_dequeue_depth;
1798 	ev_port_conf.enqueue_depth =
1799 			evdev_default_conf.max_event_port_enqueue_depth;
1800 
1801 	/* Create Rx adapter */
1802 	ret = rte_event_eth_rx_adapter_create(rx_adapter_id, eventdev_id,
1803 			&ev_port_conf);
1804 	if (ret < 0) {
1805 		printf("Failed to create rx adapter %d\n", ret);
1806 		return ret;
1807 	}
1808 
1809 	/* Setup queue conf */
1810 	queue_conf.ev.queue_id = ev_queue_id;
1811 	queue_conf.ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
1812 	queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
1813 
1814 	/* Add queue to the adapter */
1815 	ret = rte_event_eth_rx_adapter_queue_add(rx_adapter_id, port_id,
1816 			all_queues, &queue_conf);
1817 	if (ret < 0) {
1818 		printf("Failed to add eth queue to rx adapter %d\n", ret);
1819 		return ret;
1820 	}
1821 
1822 	/* Start rx adapter */
1823 	ret = rte_event_eth_rx_adapter_start(rx_adapter_id);
1824 	if (ret < 0) {
1825 		printf("Failed to start rx adapter %d\n", ret);
1826 		return ret;
1827 	}
1828 
1829 	/* Create tx adapter */
1830 	ret = rte_event_eth_tx_adapter_create(tx_adapter_id, eventdev_id,
1831 			&ev_port_conf);
1832 	if (ret < 0) {
1833 		printf("Failed to create tx adapter %d\n", ret);
1834 		return ret;
1835 	}
1836 
1837 	/* Add queue to the adapter */
1838 	ret = rte_event_eth_tx_adapter_queue_add(tx_adapter_id, port_id,
1839 			all_queues);
1840 	if (ret < 0) {
1841 		printf("Failed to add eth queue to tx adapter %d\n", ret);
1842 		return ret;
1843 	}
1844 	/* Setup Tx queue & port */
1845 	if (tx_queue_id) {
1846 		/* Setup the queue */
1847 		ret = rte_event_queue_setup(eventdev_id, tx_queue_id,
1848 				&eventq_conf);
1849 		if (ret < 0) {
1850 			printf("Failed to setup tx event queue %d\n", ret);
1851 			return ret;
1852 		}
1853 		/* Link Tx event queue to Tx port */
1854 		ret = rte_event_port_link(eventdev_id, port_id,
1855 				&tx_queue_id, NULL, 1);
1856 		if (ret != 1) {
1857 			printf("Failed to link event queue to port\n");
1858 			return ret;
1859 		}
1860 	}
1861 
1862 	/* Start tx adapter */
1863 	ret = rte_event_eth_tx_adapter_start(tx_adapter_id);
1864 	if (ret < 0) {
1865 		printf("Failed to start tx adapter %d\n", ret);
1866 		return ret;
1867 	}
1868 
1869 	/* Start eventdev */
1870 	ret = rte_event_dev_start(eventdev_id);
1871 	if (ret < 0) {
1872 		printf("Failed to start event device %d\n", ret);
1873 		return ret;
1874 	}
1875 
1876 	event_mode_enabled = true;
1877 	test_ipsec_alg_list_populate();
1878 
1879 	return 0;
1880 }
1881 
1882 static void
1883 event_inline_ipsec_testsuite_teardown(void)
1884 {
1885 	uint16_t portid;
1886 	int ret;
1887 
1888 	event_mode_enabled = false;
1889 
1890 	/* Stop and release rx adapter */
1891 	ret = rte_event_eth_rx_adapter_stop(rx_adapter_id);
1892 	if (ret < 0)
1893 		printf("Failed to stop rx adapter %d\n", ret);
1894 	ret = rte_event_eth_rx_adapter_queue_del(rx_adapter_id, port_id, -1);
1895 	if (ret < 0)
1896 		printf("Failed to remove rx adapter queues %d\n", ret);
1897 	ret = rte_event_eth_rx_adapter_free(rx_adapter_id);
1898 	if (ret < 0)
1899 		printf("Failed to free rx adapter %d\n", ret);
1900 
1901 	/* Stop and release tx adapter */
1902 	ret = rte_event_eth_tx_adapter_stop(tx_adapter_id);
1903 	if (ret < 0)
1904 		printf("Failed to stop tx adapter %d\n", ret);
1905 	ret = rte_event_eth_tx_adapter_queue_del(tx_adapter_id, port_id, -1);
1906 	if (ret < 0)
1907 		printf("Failed to remove tx adapter queues %d\n", ret);
1908 	ret = rte_event_eth_tx_adapter_free(tx_adapter_id);
1909 	if (ret < 0)
1910 		printf("Failed to free tx adapter %d\n", ret);
1911 
1912 	/* Stop and release event devices */
1913 	rte_event_dev_stop(eventdev_id);
1914 	ret = rte_event_dev_close(eventdev_id);
1915 	if (ret < 0)
1916 		printf("Failed to close event dev %d, %d\n", eventdev_id, ret);
1917 
1918 	/* port tear down */
1919 	RTE_ETH_FOREACH_DEV(portid) {
1920 		ret = rte_eth_dev_reset(portid);
1921 		if (ret != 0)
1922 			printf("rte_eth_dev_reset: err=%s, port=%u\n",
1923 			       rte_strerror(-ret), port_id);
1924 	}
1925 
1926 	rte_free(tx_pkts_burst);
1927 	rte_free(rx_pkts_burst);
1928 }
1929 
1930 static int
1931 test_inline_ip_reassembly(const void *testdata)
1932 {
1933 	struct reassembly_vector reassembly_td = {0};
1934 	const struct reassembly_vector *td = testdata;
1935 	struct ip_reassembly_test_packet full_pkt;
1936 	struct ip_reassembly_test_packet frags[MAX_FRAGS];
1937 	struct ipsec_test_flags flags = {0};
1938 	int i = 0;
1939 
1940 	reassembly_td.sa_data = td->sa_data;
1941 	reassembly_td.nb_frags = td->nb_frags;
1942 	reassembly_td.burst = td->burst;
1943 
1944 	memcpy(&full_pkt, td->full_pkt,
1945 			sizeof(struct ip_reassembly_test_packet));
1946 	reassembly_td.full_pkt = &full_pkt;
1947 
1948 	test_vector_payload_populate(reassembly_td.full_pkt, true);
1949 	for (; i < reassembly_td.nb_frags; i++) {
1950 		memcpy(&frags[i], td->frags[i],
1951 			sizeof(struct ip_reassembly_test_packet));
1952 		reassembly_td.frags[i] = &frags[i];
1953 		test_vector_payload_populate(reassembly_td.frags[i],
1954 				(i == 0) ? true : false);
1955 	}
1956 
1957 	return test_ipsec_with_reassembly(&reassembly_td, &flags);
1958 }
1959 
1960 static int
1961 test_ipsec_inline_proto_known_vec(const void *test_data)
1962 {
1963 	struct ipsec_test_data td_outb;
1964 	struct ipsec_test_flags flags;
1965 
1966 	memset(&flags, 0, sizeof(flags));
1967 
1968 	memcpy(&td_outb, test_data, sizeof(td_outb));
1969 
1970 	if (td_outb.aead ||
1971 	    td_outb.xform.chain.cipher.cipher.algo != RTE_CRYPTO_CIPHER_NULL) {
1972 		/* Disable IV gen to be able to test with known vectors */
1973 		td_outb.ipsec_xform.options.iv_gen_disable = 1;
1974 	}
1975 
1976 	return test_ipsec_inline_proto_process(&td_outb, NULL, 1,
1977 				false, &flags);
1978 }
1979 
1980 static int
1981 test_ipsec_inline_proto_known_vec_inb(const void *test_data)
1982 {
1983 	const struct ipsec_test_data *td = test_data;
1984 	struct ipsec_test_flags flags;
1985 	struct ipsec_test_data td_inb;
1986 
1987 	memset(&flags, 0, sizeof(flags));
1988 
1989 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
1990 		test_ipsec_td_in_from_out(td, &td_inb);
1991 	else
1992 		memcpy(&td_inb, td, sizeof(td_inb));
1993 
1994 	return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags);
1995 }
1996 
1997 static int
1998 test_ipsec_inline_proto_display_list(const void *data __rte_unused)
1999 {
2000 	struct ipsec_test_flags flags;
2001 
2002 	memset(&flags, 0, sizeof(flags));
2003 
2004 	flags.display_alg = true;
2005 	flags.plaintext_len = plaintext_len;
2006 
2007 	return test_ipsec_inline_proto_all(&flags);
2008 }
2009 
2010 static int
2011 test_ipsec_inline_proto_udp_encap(const void *data __rte_unused)
2012 {
2013 	struct ipsec_test_flags flags;
2014 
2015 	memset(&flags, 0, sizeof(flags));
2016 
2017 	flags.udp_encap = true;
2018 	flags.plaintext_len = plaintext_len;
2019 
2020 	return test_ipsec_inline_proto_all(&flags);
2021 }
2022 
2023 static int
2024 test_ipsec_inline_proto_udp_ports_verify(const void *data __rte_unused)
2025 {
2026 	struct ipsec_test_flags flags;
2027 
2028 	memset(&flags, 0, sizeof(flags));
2029 
2030 	flags.udp_encap = true;
2031 	flags.udp_ports_verify = true;
2032 	flags.plaintext_len = plaintext_len;
2033 
2034 	return test_ipsec_inline_proto_all(&flags);
2035 }
2036 
2037 static int
2038 test_ipsec_inline_proto_err_icv_corrupt(const void *data __rte_unused)
2039 {
2040 	struct ipsec_test_flags flags;
2041 
2042 	memset(&flags, 0, sizeof(flags));
2043 
2044 	flags.icv_corrupt = true;
2045 	flags.plaintext_len = plaintext_len;
2046 
2047 	return test_ipsec_inline_proto_all(&flags);
2048 }
2049 
2050 static int
2051 test_ipsec_inline_proto_tunnel_dst_addr_verify(const void *data __rte_unused)
2052 {
2053 	struct ipsec_test_flags flags;
2054 
2055 	memset(&flags, 0, sizeof(flags));
2056 
2057 	flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR;
2058 	flags.plaintext_len = plaintext_len;
2059 
2060 	return test_ipsec_inline_proto_all(&flags);
2061 }
2062 
2063 static int
2064 test_ipsec_inline_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused)
2065 {
2066 	struct ipsec_test_flags flags;
2067 
2068 	memset(&flags, 0, sizeof(flags));
2069 
2070 	flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR;
2071 	flags.plaintext_len = plaintext_len;
2072 
2073 	return test_ipsec_inline_proto_all(&flags);
2074 }
2075 
2076 static int
2077 test_ipsec_inline_proto_inner_ip_csum(const void *data __rte_unused)
2078 {
2079 	struct ipsec_test_flags flags;
2080 
2081 	memset(&flags, 0, sizeof(flags));
2082 
2083 	flags.ip_csum = true;
2084 	flags.plaintext_len = plaintext_len;
2085 
2086 	return test_ipsec_inline_proto_all(&flags);
2087 }
2088 
2089 static int
2090 test_ipsec_inline_proto_inner_l4_csum(const void *data __rte_unused)
2091 {
2092 	struct ipsec_test_flags flags;
2093 
2094 	memset(&flags, 0, sizeof(flags));
2095 
2096 	flags.l4_csum = true;
2097 	flags.plaintext_len = plaintext_len;
2098 
2099 	return test_ipsec_inline_proto_all(&flags);
2100 }
2101 
2102 static int
2103 test_ipsec_inline_proto_tunnel_v4_in_v4(const void *data __rte_unused)
2104 {
2105 	struct ipsec_test_flags flags;
2106 
2107 	memset(&flags, 0, sizeof(flags));
2108 
2109 	flags.ipv6 = false;
2110 	flags.tunnel_ipv6 = false;
2111 	flags.plaintext_len = plaintext_len;
2112 
2113 	return test_ipsec_inline_proto_all(&flags);
2114 }
2115 
2116 static int
2117 test_ipsec_inline_proto_tunnel_v6_in_v6(const void *data __rte_unused)
2118 {
2119 	struct ipsec_test_flags flags;
2120 
2121 	memset(&flags, 0, sizeof(flags));
2122 
2123 	flags.ipv6 = true;
2124 	flags.tunnel_ipv6 = true;
2125 	flags.plaintext_len = plaintext_len;
2126 
2127 	return test_ipsec_inline_proto_all(&flags);
2128 }
2129 
2130 static int
2131 test_ipsec_inline_proto_tunnel_v4_in_v6(const void *data __rte_unused)
2132 {
2133 	struct ipsec_test_flags flags;
2134 
2135 	memset(&flags, 0, sizeof(flags));
2136 
2137 	flags.ipv6 = false;
2138 	flags.tunnel_ipv6 = true;
2139 	flags.plaintext_len = plaintext_len;
2140 
2141 	return test_ipsec_inline_proto_all(&flags);
2142 }
2143 
2144 static int
2145 test_ipsec_inline_proto_tunnel_v6_in_v4(const void *data __rte_unused)
2146 {
2147 	struct ipsec_test_flags flags;
2148 
2149 	memset(&flags, 0, sizeof(flags));
2150 
2151 	flags.ipv6 = true;
2152 	flags.tunnel_ipv6 = false;
2153 	flags.plaintext_len = plaintext_len;
2154 
2155 	return test_ipsec_inline_proto_all(&flags);
2156 }
2157 
2158 static int
2159 test_ipsec_inline_proto_transport_v4(const void *data __rte_unused)
2160 {
2161 	struct ipsec_test_flags flags;
2162 
2163 	memset(&flags, 0, sizeof(flags));
2164 
2165 	flags.ipv6 = false;
2166 	flags.transport = true;
2167 	flags.plaintext_len = plaintext_len;
2168 
2169 	return test_ipsec_inline_proto_all(&flags);
2170 }
2171 
2172 static int
2173 test_ipsec_inline_proto_transport_l4_csum(const void *data __rte_unused)
2174 {
2175 	struct ipsec_test_flags flags = {
2176 		.l4_csum = true,
2177 		.transport = true,
2178 		.plaintext_len = plaintext_len,
2179 	};
2180 
2181 	return test_ipsec_inline_proto_all(&flags);
2182 }
2183 
2184 static int
2185 test_ipsec_inline_proto_stats(const void *data __rte_unused)
2186 {
2187 	struct ipsec_test_flags flags;
2188 
2189 	memset(&flags, 0, sizeof(flags));
2190 
2191 	flags.stats_success = true;
2192 	flags.plaintext_len = plaintext_len;
2193 
2194 	return test_ipsec_inline_proto_all(&flags);
2195 }
2196 
2197 static int
2198 test_ipsec_inline_proto_pkt_fragment(const void *data __rte_unused)
2199 {
2200 	struct ipsec_test_flags flags;
2201 
2202 	memset(&flags, 0, sizeof(flags));
2203 
2204 	flags.fragment = true;
2205 	flags.plaintext_len = plaintext_len;
2206 
2207 	return test_ipsec_inline_proto_all(&flags);
2208 
2209 }
2210 
2211 static int
2212 test_ipsec_inline_proto_copy_df_inner_0(const void *data __rte_unused)
2213 {
2214 	struct ipsec_test_flags flags;
2215 
2216 	memset(&flags, 0, sizeof(flags));
2217 
2218 	flags.df = TEST_IPSEC_COPY_DF_INNER_0;
2219 	flags.plaintext_len = plaintext_len;
2220 
2221 	return test_ipsec_inline_proto_all(&flags);
2222 }
2223 
2224 static int
2225 test_ipsec_inline_proto_copy_df_inner_1(const void *data __rte_unused)
2226 {
2227 	struct ipsec_test_flags flags;
2228 
2229 	memset(&flags, 0, sizeof(flags));
2230 
2231 	flags.df = TEST_IPSEC_COPY_DF_INNER_1;
2232 	flags.plaintext_len = plaintext_len;
2233 
2234 	return test_ipsec_inline_proto_all(&flags);
2235 }
2236 
2237 static int
2238 test_ipsec_inline_proto_set_df_0_inner_1(const void *data __rte_unused)
2239 {
2240 	struct ipsec_test_flags flags;
2241 
2242 	memset(&flags, 0, sizeof(flags));
2243 
2244 	flags.df = TEST_IPSEC_SET_DF_0_INNER_1;
2245 	flags.plaintext_len = plaintext_len;
2246 
2247 	return test_ipsec_inline_proto_all(&flags);
2248 }
2249 
2250 static int
2251 test_ipsec_inline_proto_set_df_1_inner_0(const void *data __rte_unused)
2252 {
2253 	struct ipsec_test_flags flags;
2254 
2255 	memset(&flags, 0, sizeof(flags));
2256 
2257 	flags.df = TEST_IPSEC_SET_DF_1_INNER_0;
2258 	flags.plaintext_len = plaintext_len;
2259 
2260 	return test_ipsec_inline_proto_all(&flags);
2261 }
2262 
2263 static int
2264 test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused)
2265 {
2266 	struct ipsec_test_flags flags;
2267 
2268 	memset(&flags, 0, sizeof(flags));
2269 
2270 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
2271 	flags.plaintext_len = plaintext_len;
2272 
2273 	return test_ipsec_inline_proto_all(&flags);
2274 }
2275 
2276 static int
2277 test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused)
2278 {
2279 	struct ipsec_test_flags flags;
2280 
2281 	memset(&flags, 0, sizeof(flags));
2282 
2283 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
2284 	flags.plaintext_len = plaintext_len;
2285 
2286 	return test_ipsec_inline_proto_all(&flags);
2287 }
2288 
2289 static int
2290 test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused)
2291 {
2292 	struct ipsec_test_flags flags;
2293 
2294 	memset(&flags, 0, sizeof(flags));
2295 
2296 	flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
2297 	flags.plaintext_len = plaintext_len;
2298 
2299 	return test_ipsec_inline_proto_all(&flags);
2300 }
2301 
2302 static int
2303 test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused)
2304 {
2305 	struct ipsec_test_flags flags;
2306 
2307 	memset(&flags, 0, sizeof(flags));
2308 
2309 	flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
2310 	flags.plaintext_len = plaintext_len;
2311 
2312 	return test_ipsec_inline_proto_all(&flags);
2313 }
2314 
2315 static int
2316 test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused)
2317 {
2318 	struct ipsec_test_flags flags;
2319 
2320 	memset(&flags, 0, sizeof(flags));
2321 
2322 	flags.ipv6 = true;
2323 	flags.tunnel_ipv6 = true;
2324 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
2325 	flags.plaintext_len = plaintext_len;
2326 
2327 	return test_ipsec_inline_proto_all(&flags);
2328 }
2329 
2330 static int
2331 test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused)
2332 {
2333 	struct ipsec_test_flags flags;
2334 
2335 	memset(&flags, 0, sizeof(flags));
2336 
2337 	flags.ipv6 = true;
2338 	flags.tunnel_ipv6 = true;
2339 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
2340 	flags.plaintext_len = plaintext_len;
2341 
2342 	return test_ipsec_inline_proto_all(&flags);
2343 }
2344 
2345 static int
2346 test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused)
2347 {
2348 	struct ipsec_test_flags flags;
2349 
2350 	memset(&flags, 0, sizeof(flags));
2351 
2352 	flags.ipv6 = true;
2353 	flags.tunnel_ipv6 = true;
2354 	flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
2355 	flags.plaintext_len = plaintext_len;
2356 
2357 	return test_ipsec_inline_proto_all(&flags);
2358 }
2359 
2360 static int
2361 test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused)
2362 {
2363 	struct ipsec_test_flags flags;
2364 
2365 	memset(&flags, 0, sizeof(flags));
2366 
2367 	flags.ipv6 = true;
2368 	flags.tunnel_ipv6 = true;
2369 	flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
2370 	flags.plaintext_len = plaintext_len;
2371 
2372 	return test_ipsec_inline_proto_all(&flags);
2373 }
2374 
2375 static int
2376 test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(const void *data __rte_unused)
2377 {
2378 	struct ipsec_test_flags flags;
2379 
2380 	memset(&flags, 0, sizeof(flags));
2381 
2382 	flags.ipv6 = true;
2383 	flags.tunnel_ipv6 = true;
2384 	flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_0;
2385 
2386 	return test_ipsec_inline_proto_all(&flags);
2387 }
2388 
2389 static int
2390 test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(const void *data __rte_unused)
2391 {
2392 	struct ipsec_test_flags flags;
2393 
2394 	memset(&flags, 0, sizeof(flags));
2395 
2396 	flags.ipv6 = true;
2397 	flags.tunnel_ipv6 = true;
2398 	flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_1;
2399 
2400 	return test_ipsec_inline_proto_all(&flags);
2401 }
2402 
2403 static int
2404 test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(const void *data __rte_unused)
2405 {
2406 	struct ipsec_test_flags flags;
2407 
2408 	memset(&flags, 0, sizeof(flags));
2409 
2410 	flags.ipv6 = true;
2411 	flags.tunnel_ipv6 = true;
2412 	flags.flabel = TEST_IPSEC_SET_FLABEL_0_INNER_1;
2413 
2414 	return test_ipsec_inline_proto_all(&flags);
2415 }
2416 
2417 static int
2418 test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(const void *data __rte_unused)
2419 {
2420 	struct ipsec_test_flags flags;
2421 
2422 	memset(&flags, 0, sizeof(flags));
2423 
2424 	flags.ipv6 = true;
2425 	flags.tunnel_ipv6 = true;
2426 	flags.flabel = TEST_IPSEC_SET_FLABEL_1_INNER_0;
2427 
2428 	return test_ipsec_inline_proto_all(&flags);
2429 }
2430 
2431 static int
2432 test_ipsec_inline_proto_ipv4_ttl_decrement(const void *data __rte_unused)
2433 {
2434 	struct ipsec_test_flags flags = {
2435 		.dec_ttl_or_hop_limit = true,
2436 		.plaintext_len = plaintext_len,
2437 	};
2438 
2439 	return test_ipsec_inline_proto_all(&flags);
2440 }
2441 
2442 static int
2443 test_ipsec_inline_proto_ipv6_hop_limit_decrement(const void *data __rte_unused)
2444 {
2445 	struct ipsec_test_flags flags = {
2446 		.ipv6 = true,
2447 		.dec_ttl_or_hop_limit = true,
2448 		.plaintext_len = plaintext_len,
2449 	};
2450 
2451 	return test_ipsec_inline_proto_all(&flags);
2452 }
2453 
2454 static int
2455 test_ipsec_inline_proto_iv_gen(const void *data __rte_unused)
2456 {
2457 	struct ipsec_test_flags flags;
2458 
2459 	memset(&flags, 0, sizeof(flags));
2460 
2461 	flags.iv_gen = true;
2462 	flags.plaintext_len = plaintext_len;
2463 
2464 	return test_ipsec_inline_proto_all(&flags);
2465 }
2466 
2467 static int
2468 test_ipsec_inline_proto_sa_pkt_soft_expiry(const void *data __rte_unused)
2469 {
2470 	struct ipsec_test_flags flags = {
2471 		.sa_expiry_pkts_soft = true,
2472 		.plaintext_len = plaintext_len,
2473 	};
2474 	return test_ipsec_inline_proto_all(&flags);
2475 }
2476 static int
2477 test_ipsec_inline_proto_sa_byte_soft_expiry(const void *data __rte_unused)
2478 {
2479 	struct ipsec_test_flags flags = {
2480 		.sa_expiry_bytes_soft = true,
2481 		.plaintext_len = plaintext_len,
2482 	};
2483 	return test_ipsec_inline_proto_all(&flags);
2484 }
2485 
2486 static int
2487 test_ipsec_inline_proto_sa_pkt_hard_expiry(const void *data __rte_unused)
2488 {
2489 	struct ipsec_test_flags flags = {
2490 		.sa_expiry_pkts_hard = true
2491 	};
2492 
2493 	return test_ipsec_inline_proto_all(&flags);
2494 }
2495 
2496 static int
2497 test_ipsec_inline_proto_sa_byte_hard_expiry(const void *data __rte_unused)
2498 {
2499 	struct ipsec_test_flags flags = {
2500 		.sa_expiry_bytes_hard = true
2501 	};
2502 
2503 	return test_ipsec_inline_proto_all(&flags);
2504 }
2505 
2506 static int
2507 test_ipsec_inline_proto_known_vec_fragmented(const void *test_data)
2508 {
2509 	struct ipsec_test_data td_outb;
2510 	struct ipsec_test_flags flags;
2511 
2512 	memset(&flags, 0, sizeof(flags));
2513 	flags.fragment = true;
2514 	flags.plaintext_len = plaintext_len;
2515 
2516 	memcpy(&td_outb, test_data, sizeof(td_outb));
2517 
2518 	/* Disable IV gen to be able to test with known vectors */
2519 	td_outb.ipsec_xform.options.iv_gen_disable = 1;
2520 
2521 	return test_ipsec_inline_proto_process(&td_outb, NULL, 1, false,
2522 						&flags);
2523 }
2524 
2525 static int
2526 test_ipsec_inline_pkt_replay(const void *test_data, const uint64_t esn[],
2527 		      bool replayed_pkt[], uint32_t nb_pkts, bool esn_en,
2528 		      uint64_t winsz)
2529 {
2530 	struct ipsec_test_data td_outb[IPSEC_TEST_PACKETS_MAX];
2531 	struct ipsec_test_data td_inb[IPSEC_TEST_PACKETS_MAX];
2532 	struct ipsec_test_flags flags;
2533 	uint32_t i, ret = 0;
2534 
2535 	memset(&flags, 0, sizeof(flags));
2536 	flags.antireplay = true;
2537 	flags.plaintext_len = plaintext_len;
2538 
2539 	for (i = 0; i < nb_pkts; i++) {
2540 		memcpy(&td_outb[i], test_data, sizeof(td_outb[0]));
2541 		td_outb[i].ipsec_xform.options.iv_gen_disable = 1;
2542 		td_outb[i].ipsec_xform.replay_win_sz = winsz;
2543 		td_outb[i].ipsec_xform.options.esn = esn_en;
2544 	}
2545 
2546 	for (i = 0; i < nb_pkts; i++)
2547 		td_outb[i].ipsec_xform.esn.value = esn[i];
2548 
2549 	ret = test_ipsec_inline_proto_process_with_esn(td_outb, td_inb,
2550 				nb_pkts, true, &flags);
2551 	if (ret != TEST_SUCCESS)
2552 		return ret;
2553 
2554 	test_ipsec_td_update(td_inb, td_outb, nb_pkts, &flags);
2555 
2556 	for (i = 0; i < nb_pkts; i++) {
2557 		td_inb[i].ipsec_xform.options.esn = esn_en;
2558 		/* Set antireplay flag for packets to be dropped */
2559 		td_inb[i].ar_packet = replayed_pkt[i];
2560 	}
2561 
2562 	ret = test_ipsec_inline_proto_process_with_esn(td_inb, NULL, nb_pkts,
2563 				true, &flags);
2564 
2565 	return ret;
2566 }
2567 
2568 static int
2569 test_ipsec_inline_proto_pkt_antireplay(const void *test_data, uint64_t winsz)
2570 {
2571 
2572 	uint32_t nb_pkts = 5;
2573 	bool replayed_pkt[5];
2574 	uint64_t esn[5];
2575 
2576 	/* 1. Advance the TOP of the window to WS * 2 */
2577 	esn[0] = winsz * 2;
2578 	/* 2. Test sequence number within the new window(WS + 1) */
2579 	esn[1] = winsz + 1;
2580 	/* 3. Test sequence number less than the window BOTTOM */
2581 	esn[2] = winsz;
2582 	/* 4. Test sequence number in the middle of the window */
2583 	esn[3] = winsz + (winsz / 2);
2584 	/* 5. Test replay of the packet in the middle of the window */
2585 	esn[4] = winsz + (winsz / 2);
2586 
2587 	replayed_pkt[0] = false;
2588 	replayed_pkt[1] = false;
2589 	replayed_pkt[2] = true;
2590 	replayed_pkt[3] = false;
2591 	replayed_pkt[4] = true;
2592 
2593 	return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt,
2594 			nb_pkts, false, winsz);
2595 }
2596 
2597 static int
2598 test_ipsec_inline_proto_pkt_antireplay1024(const void *test_data)
2599 {
2600 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 1024);
2601 }
2602 
2603 static int
2604 test_ipsec_inline_proto_pkt_antireplay2048(const void *test_data)
2605 {
2606 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 2048);
2607 }
2608 
2609 static int
2610 test_ipsec_inline_proto_pkt_antireplay4096(const void *test_data)
2611 {
2612 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 4096);
2613 }
2614 
2615 static int
2616 test_ipsec_inline_proto_pkt_esn_antireplay(const void *test_data, uint64_t winsz)
2617 {
2618 
2619 	uint32_t nb_pkts = 7;
2620 	bool replayed_pkt[7];
2621 	uint64_t esn[7];
2622 
2623 	/* Set the initial sequence number */
2624 	esn[0] = (uint64_t)(0xFFFFFFFF - winsz);
2625 	/* 1. Advance the TOP of the window to (1<<32 + WS/2) */
2626 	esn[1] = (uint64_t)((1ULL << 32) + (winsz / 2));
2627 	/* 2. Test sequence number within new window (1<<32 + WS/2 + 1) */
2628 	esn[2] = (uint64_t)((1ULL << 32) - (winsz / 2) + 1);
2629 	/* 3. Test with sequence number within window (1<<32 - 1) */
2630 	esn[3] = (uint64_t)((1ULL << 32) - 1);
2631 	/* 4. Test with sequence number within window (1<<32 - 1) */
2632 	esn[4] = (uint64_t)(1ULL << 32);
2633 	/* 5. Test with duplicate sequence number within
2634 	 * new window (1<<32 - 1)
2635 	 */
2636 	esn[5] = (uint64_t)((1ULL << 32) - 1);
2637 	/* 6. Test with duplicate sequence number within new window (1<<32) */
2638 	esn[6] = (uint64_t)(1ULL << 32);
2639 
2640 	replayed_pkt[0] = false;
2641 	replayed_pkt[1] = false;
2642 	replayed_pkt[2] = false;
2643 	replayed_pkt[3] = false;
2644 	replayed_pkt[4] = false;
2645 	replayed_pkt[5] = true;
2646 	replayed_pkt[6] = true;
2647 
2648 	return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt, nb_pkts,
2649 				     true, winsz);
2650 }
2651 
2652 static int
2653 test_ipsec_inline_proto_pkt_esn_antireplay1024(const void *test_data)
2654 {
2655 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 1024);
2656 }
2657 
2658 static int
2659 test_ipsec_inline_proto_pkt_esn_antireplay2048(const void *test_data)
2660 {
2661 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 2048);
2662 }
2663 
2664 static int
2665 test_ipsec_inline_proto_pkt_esn_antireplay4096(const void *test_data)
2666 {
2667 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 4096);
2668 }
2669 
2670 
2671 
2672 static struct unit_test_suite inline_ipsec_testsuite  = {
2673 	.suite_name = "Inline IPsec Ethernet Device Unit Test Suite",
2674 	.unit_test_cases = {
2675 		TEST_CASE_NAMED_WITH_DATA(
2676 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
2677 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2678 			test_ipsec_inline_proto_known_vec, &pkt_aes_128_gcm),
2679 		TEST_CASE_NAMED_WITH_DATA(
2680 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
2681 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2682 			test_ipsec_inline_proto_known_vec, &pkt_aes_192_gcm),
2683 		TEST_CASE_NAMED_WITH_DATA(
2684 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
2685 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2686 			test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm),
2687 		TEST_CASE_NAMED_WITH_DATA(
2688 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC MD5 [12B ICV])",
2689 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2690 			test_ipsec_inline_proto_known_vec,
2691 			&pkt_aes_128_cbc_md5),
2692 		TEST_CASE_NAMED_WITH_DATA(
2693 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2694 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2695 			test_ipsec_inline_proto_known_vec,
2696 			&pkt_aes_128_cbc_hmac_sha256),
2697 		TEST_CASE_NAMED_WITH_DATA(
2698 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
2699 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2700 			test_ipsec_inline_proto_known_vec,
2701 			&pkt_aes_128_cbc_hmac_sha384),
2702 		TEST_CASE_NAMED_WITH_DATA(
2703 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
2704 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2705 			test_ipsec_inline_proto_known_vec,
2706 			&pkt_aes_128_cbc_hmac_sha512),
2707 		TEST_CASE_NAMED_WITH_DATA(
2708 			"Outbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA256 [16B ICV])",
2709 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2710 			test_ipsec_inline_proto_known_vec,
2711 			&pkt_3des_cbc_hmac_sha256),
2712 		TEST_CASE_NAMED_WITH_DATA(
2713 			"Outbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA384 [24B ICV])",
2714 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2715 			test_ipsec_inline_proto_known_vec,
2716 			&pkt_3des_cbc_hmac_sha384),
2717 		TEST_CASE_NAMED_WITH_DATA(
2718 			"Outbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA512 [32B ICV])",
2719 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2720 			test_ipsec_inline_proto_known_vec,
2721 			&pkt_3des_cbc_hmac_sha512),
2722 		TEST_CASE_NAMED_WITH_DATA(
2723 			"Outbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
2724 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2725 			test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm_v6),
2726 		TEST_CASE_NAMED_WITH_DATA(
2727 			"Outbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2728 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2729 			test_ipsec_inline_proto_known_vec,
2730 			&pkt_aes_128_cbc_hmac_sha256_v6),
2731 		TEST_CASE_NAMED_WITH_DATA(
2732 			"Outbound known vector (ESP tunnel mode IPv6 3DES-CBC HMAC-SHA256 [16B ICV])",
2733 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2734 			test_ipsec_inline_proto_known_vec,
2735 			&pkt_3des_cbc_hmac_sha256_v6),
2736 		TEST_CASE_NAMED_WITH_DATA(
2737 			"Outbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
2738 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2739 			test_ipsec_inline_proto_known_vec,
2740 			&pkt_null_aes_xcbc),
2741 		TEST_CASE_NAMED_WITH_DATA(
2742 			"Outbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA256 [16B ICV])",
2743 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2744 			test_ipsec_inline_proto_known_vec,
2745 			&pkt_des_cbc_hmac_sha256),
2746 		TEST_CASE_NAMED_WITH_DATA(
2747 			"Outbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA384 [24B ICV])",
2748 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2749 			test_ipsec_inline_proto_known_vec,
2750 			&pkt_des_cbc_hmac_sha384),
2751 		TEST_CASE_NAMED_WITH_DATA(
2752 			"Outbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA512 [32B ICV])",
2753 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2754 			test_ipsec_inline_proto_known_vec,
2755 			&pkt_des_cbc_hmac_sha512),
2756 		TEST_CASE_NAMED_WITH_DATA(
2757 			"Outbound known vector (ESP tunnel mode IPv6 DES-CBC HMAC-SHA256 [16B ICV])",
2758 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2759 			test_ipsec_inline_proto_known_vec,
2760 			&pkt_des_cbc_hmac_sha256_v6),
2761 
2762 		TEST_CASE_NAMED_WITH_DATA(
2763 			"Outbound fragmented packet",
2764 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2765 			test_ipsec_inline_proto_known_vec_fragmented,
2766 			&pkt_aes_128_gcm_frag),
2767 
2768 		TEST_CASE_NAMED_WITH_DATA(
2769 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
2770 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2771 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_gcm),
2772 		TEST_CASE_NAMED_WITH_DATA(
2773 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
2774 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2775 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_192_gcm),
2776 		TEST_CASE_NAMED_WITH_DATA(
2777 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
2778 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2779 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm),
2780 		TEST_CASE_NAMED_WITH_DATA(
2781 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128)",
2782 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2783 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_cbc_null),
2784 		TEST_CASE_NAMED_WITH_DATA(
2785 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC MD5 [12B ICV])",
2786 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2787 			test_ipsec_inline_proto_known_vec_inb,
2788 			&pkt_aes_128_cbc_md5),
2789 		TEST_CASE_NAMED_WITH_DATA(
2790 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2791 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2792 			test_ipsec_inline_proto_known_vec_inb,
2793 			&pkt_aes_128_cbc_hmac_sha256),
2794 		TEST_CASE_NAMED_WITH_DATA(
2795 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
2796 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2797 			test_ipsec_inline_proto_known_vec_inb,
2798 			&pkt_aes_128_cbc_hmac_sha384),
2799 		TEST_CASE_NAMED_WITH_DATA(
2800 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
2801 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2802 			test_ipsec_inline_proto_known_vec_inb,
2803 			&pkt_aes_128_cbc_hmac_sha512),
2804 		TEST_CASE_NAMED_WITH_DATA(
2805 			"Inbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA256 [16B ICV])",
2806 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2807 			test_ipsec_inline_proto_known_vec_inb,
2808 			&pkt_3des_cbc_hmac_sha256),
2809 		TEST_CASE_NAMED_WITH_DATA(
2810 			"Inbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA384 [24B ICV])",
2811 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2812 			test_ipsec_inline_proto_known_vec_inb,
2813 			&pkt_3des_cbc_hmac_sha384),
2814 		TEST_CASE_NAMED_WITH_DATA(
2815 			"Inbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA512 [32B ICV])",
2816 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2817 			test_ipsec_inline_proto_known_vec_inb,
2818 			&pkt_3des_cbc_hmac_sha512),
2819 		TEST_CASE_NAMED_WITH_DATA(
2820 			"Inbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
2821 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2822 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm_v6),
2823 		TEST_CASE_NAMED_WITH_DATA(
2824 			"Inbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2825 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2826 			test_ipsec_inline_proto_known_vec_inb,
2827 			&pkt_aes_128_cbc_hmac_sha256_v6),
2828 		TEST_CASE_NAMED_WITH_DATA(
2829 			"Inbound known vector (ESP tunnel mode IPv6 3DES-CBC HMAC-SHA256 [16B ICV])",
2830 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2831 			test_ipsec_inline_proto_known_vec_inb,
2832 			&pkt_3des_cbc_hmac_sha256_v6),
2833 		TEST_CASE_NAMED_WITH_DATA(
2834 			"Inbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
2835 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2836 			test_ipsec_inline_proto_known_vec_inb,
2837 			&pkt_null_aes_xcbc),
2838 		TEST_CASE_NAMED_WITH_DATA(
2839 			"Inbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA256 [16B ICV])",
2840 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2841 			test_ipsec_inline_proto_known_vec_inb,
2842 			&pkt_des_cbc_hmac_sha256),
2843 		TEST_CASE_NAMED_WITH_DATA(
2844 			"Inbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA384 [24B ICV])",
2845 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2846 			test_ipsec_inline_proto_known_vec_inb,
2847 			&pkt_des_cbc_hmac_sha384),
2848 		TEST_CASE_NAMED_WITH_DATA(
2849 			"Inbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA512 [32B ICV])",
2850 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2851 			test_ipsec_inline_proto_known_vec_inb,
2852 			&pkt_des_cbc_hmac_sha512),
2853 		TEST_CASE_NAMED_WITH_DATA(
2854 			"Inbound known vector (ESP tunnel mode IPv6 DES-CBC HMAC-SHA256 [16B ICV])",
2855 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2856 			test_ipsec_inline_proto_known_vec_inb,
2857 			&pkt_des_cbc_hmac_sha256_v6),
2858 
2859 
2860 		TEST_CASE_NAMED_ST(
2861 			"Combined test alg list",
2862 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2863 			test_ipsec_inline_proto_display_list),
2864 
2865 		TEST_CASE_NAMED_ST(
2866 			"UDP encapsulation",
2867 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2868 			test_ipsec_inline_proto_udp_encap),
2869 		TEST_CASE_NAMED_ST(
2870 			"UDP encapsulation ports verification test",
2871 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2872 			test_ipsec_inline_proto_udp_ports_verify),
2873 		TEST_CASE_NAMED_ST(
2874 			"Negative test: ICV corruption",
2875 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2876 			test_ipsec_inline_proto_err_icv_corrupt),
2877 		TEST_CASE_NAMED_ST(
2878 			"Tunnel dst addr verification",
2879 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2880 			test_ipsec_inline_proto_tunnel_dst_addr_verify),
2881 		TEST_CASE_NAMED_ST(
2882 			"Tunnel src and dst addr verification",
2883 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2884 			test_ipsec_inline_proto_tunnel_src_dst_addr_verify),
2885 		TEST_CASE_NAMED_ST(
2886 			"Inner IP checksum",
2887 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2888 			test_ipsec_inline_proto_inner_ip_csum),
2889 		TEST_CASE_NAMED_ST(
2890 			"Inner L4 checksum",
2891 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2892 			test_ipsec_inline_proto_inner_l4_csum),
2893 		TEST_CASE_NAMED_ST(
2894 			"Tunnel IPv4 in IPv4",
2895 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2896 			test_ipsec_inline_proto_tunnel_v4_in_v4),
2897 		TEST_CASE_NAMED_ST(
2898 			"Tunnel IPv6 in IPv6",
2899 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2900 			test_ipsec_inline_proto_tunnel_v6_in_v6),
2901 		TEST_CASE_NAMED_ST(
2902 			"Tunnel IPv4 in IPv6",
2903 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2904 			test_ipsec_inline_proto_tunnel_v4_in_v6),
2905 		TEST_CASE_NAMED_ST(
2906 			"Tunnel IPv6 in IPv4",
2907 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2908 			test_ipsec_inline_proto_tunnel_v6_in_v4),
2909 		TEST_CASE_NAMED_ST(
2910 			"Transport IPv4",
2911 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2912 			test_ipsec_inline_proto_transport_v4),
2913 		TEST_CASE_NAMED_ST(
2914 			"Transport l4 checksum",
2915 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2916 			test_ipsec_inline_proto_transport_l4_csum),
2917 		TEST_CASE_NAMED_ST(
2918 			"Statistics: success",
2919 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2920 			test_ipsec_inline_proto_stats),
2921 		TEST_CASE_NAMED_ST(
2922 			"Fragmented packet",
2923 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2924 			test_ipsec_inline_proto_pkt_fragment),
2925 		TEST_CASE_NAMED_ST(
2926 			"Tunnel header copy DF (inner 0)",
2927 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2928 			test_ipsec_inline_proto_copy_df_inner_0),
2929 		TEST_CASE_NAMED_ST(
2930 			"Tunnel header copy DF (inner 1)",
2931 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2932 			test_ipsec_inline_proto_copy_df_inner_1),
2933 		TEST_CASE_NAMED_ST(
2934 			"Tunnel header set DF 0 (inner 1)",
2935 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2936 			test_ipsec_inline_proto_set_df_0_inner_1),
2937 		TEST_CASE_NAMED_ST(
2938 			"Tunnel header set DF 1 (inner 0)",
2939 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2940 			test_ipsec_inline_proto_set_df_1_inner_0),
2941 		TEST_CASE_NAMED_ST(
2942 			"Tunnel header IPv4 copy DSCP (inner 0)",
2943 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2944 			test_ipsec_inline_proto_ipv4_copy_dscp_inner_0),
2945 		TEST_CASE_NAMED_ST(
2946 			"Tunnel header IPv4 copy DSCP (inner 1)",
2947 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2948 			test_ipsec_inline_proto_ipv4_copy_dscp_inner_1),
2949 		TEST_CASE_NAMED_ST(
2950 			"Tunnel header IPv4 set DSCP 0 (inner 1)",
2951 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2952 			test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1),
2953 		TEST_CASE_NAMED_ST(
2954 			"Tunnel header IPv4 set DSCP 1 (inner 0)",
2955 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2956 			test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0),
2957 		TEST_CASE_NAMED_ST(
2958 			"Tunnel header IPv6 copy DSCP (inner 0)",
2959 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2960 			test_ipsec_inline_proto_ipv6_copy_dscp_inner_0),
2961 		TEST_CASE_NAMED_ST(
2962 			"Tunnel header IPv6 copy DSCP (inner 1)",
2963 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2964 			test_ipsec_inline_proto_ipv6_copy_dscp_inner_1),
2965 		TEST_CASE_NAMED_ST(
2966 			"Tunnel header IPv6 set DSCP 0 (inner 1)",
2967 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2968 			test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1),
2969 		TEST_CASE_NAMED_ST(
2970 			"Tunnel header IPv6 set DSCP 1 (inner 0)",
2971 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2972 			test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0),
2973 		TEST_CASE_NAMED_ST(
2974 			"Tunnel header IPv6 copy FLABEL (inner 0)",
2975 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2976 			test_ipsec_inline_proto_ipv6_copy_flabel_inner_0),
2977 		TEST_CASE_NAMED_ST(
2978 			"Tunnel header IPv6 copy FLABEL (inner 1)",
2979 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2980 			test_ipsec_inline_proto_ipv6_copy_flabel_inner_1),
2981 		TEST_CASE_NAMED_ST(
2982 			"Tunnel header IPv6 set FLABEL 0 (inner 1)",
2983 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2984 			test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1),
2985 		TEST_CASE_NAMED_ST(
2986 			"Tunnel header IPv6 set FLABEL 1 (inner 0)",
2987 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2988 			test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0),
2989 		TEST_CASE_NAMED_ST(
2990 			"Tunnel header IPv4 decrement inner TTL",
2991 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2992 			test_ipsec_inline_proto_ipv4_ttl_decrement),
2993 		TEST_CASE_NAMED_ST(
2994 			"Tunnel header IPv6 decrement inner hop limit",
2995 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2996 			test_ipsec_inline_proto_ipv6_hop_limit_decrement),
2997 		TEST_CASE_NAMED_ST(
2998 			"IV generation",
2999 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3000 			test_ipsec_inline_proto_iv_gen),
3001 		TEST_CASE_NAMED_ST(
3002 			"SA soft expiry with packet limit",
3003 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3004 			test_ipsec_inline_proto_sa_pkt_soft_expiry),
3005 		TEST_CASE_NAMED_ST(
3006 			"SA soft expiry with byte limit",
3007 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3008 			test_ipsec_inline_proto_sa_byte_soft_expiry),
3009 		TEST_CASE_NAMED_ST(
3010 			"SA hard expiry with packet limit",
3011 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3012 			test_ipsec_inline_proto_sa_pkt_hard_expiry),
3013 		TEST_CASE_NAMED_ST(
3014 			"SA hard expiry with byte limit",
3015 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3016 			test_ipsec_inline_proto_sa_byte_hard_expiry),
3017 
3018 		TEST_CASE_NAMED_WITH_DATA(
3019 			"Antireplay with window size 1024",
3020 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3021 			test_ipsec_inline_proto_pkt_antireplay1024,
3022 			&pkt_aes_128_gcm),
3023 		TEST_CASE_NAMED_WITH_DATA(
3024 			"Antireplay with window size 2048",
3025 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3026 			test_ipsec_inline_proto_pkt_antireplay2048,
3027 			&pkt_aes_128_gcm),
3028 		TEST_CASE_NAMED_WITH_DATA(
3029 			"Antireplay with window size 4096",
3030 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3031 			test_ipsec_inline_proto_pkt_antireplay4096,
3032 			&pkt_aes_128_gcm),
3033 		TEST_CASE_NAMED_WITH_DATA(
3034 			"ESN and Antireplay with window size 1024",
3035 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3036 			test_ipsec_inline_proto_pkt_esn_antireplay1024,
3037 			&pkt_aes_128_gcm),
3038 		TEST_CASE_NAMED_WITH_DATA(
3039 			"ESN and Antireplay with window size 2048",
3040 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3041 			test_ipsec_inline_proto_pkt_esn_antireplay2048,
3042 			&pkt_aes_128_gcm),
3043 		TEST_CASE_NAMED_WITH_DATA(
3044 			"ESN and Antireplay with window size 4096",
3045 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3046 			test_ipsec_inline_proto_pkt_esn_antireplay4096,
3047 			&pkt_aes_128_gcm),
3048 
3049 		TEST_CASE_NAMED_WITH_DATA(
3050 			"IPv4 Reassembly with 2 fragments",
3051 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3052 			test_inline_ip_reassembly, &ipv4_2frag_vector),
3053 		TEST_CASE_NAMED_WITH_DATA(
3054 			"IPv6 Reassembly with 2 fragments",
3055 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3056 			test_inline_ip_reassembly, &ipv6_2frag_vector),
3057 		TEST_CASE_NAMED_WITH_DATA(
3058 			"IPv4 Reassembly with 4 fragments",
3059 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3060 			test_inline_ip_reassembly, &ipv4_4frag_vector),
3061 		TEST_CASE_NAMED_WITH_DATA(
3062 			"IPv6 Reassembly with 4 fragments",
3063 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3064 			test_inline_ip_reassembly, &ipv6_4frag_vector),
3065 		TEST_CASE_NAMED_WITH_DATA(
3066 			"IPv4 Reassembly with 5 fragments",
3067 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3068 			test_inline_ip_reassembly, &ipv4_5frag_vector),
3069 		TEST_CASE_NAMED_WITH_DATA(
3070 			"IPv6 Reassembly with 5 fragments",
3071 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3072 			test_inline_ip_reassembly, &ipv6_5frag_vector),
3073 		TEST_CASE_NAMED_WITH_DATA(
3074 			"IPv4 Reassembly with incomplete fragments",
3075 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3076 			test_inline_ip_reassembly, &ipv4_incomplete_vector),
3077 		TEST_CASE_NAMED_WITH_DATA(
3078 			"IPv4 Reassembly with overlapping fragments",
3079 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3080 			test_inline_ip_reassembly, &ipv4_overlap_vector),
3081 		TEST_CASE_NAMED_WITH_DATA(
3082 			"IPv4 Reassembly with out of order fragments",
3083 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3084 			test_inline_ip_reassembly, &ipv4_out_of_order_vector),
3085 		TEST_CASE_NAMED_WITH_DATA(
3086 			"IPv4 Reassembly with burst of 4 fragments",
3087 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3088 			test_inline_ip_reassembly, &ipv4_4frag_burst_vector),
3089 
3090 		TEST_CASES_END() /**< NULL terminate unit test array */
3091 	},
3092 };
3093 
3094 
3095 static int
3096 test_inline_ipsec(void)
3097 {
3098 	inline_ipsec_testsuite.setup = inline_ipsec_testsuite_setup;
3099 	inline_ipsec_testsuite.teardown = inline_ipsec_testsuite_teardown;
3100 	return unit_test_suite_runner(&inline_ipsec_testsuite);
3101 }
3102 
3103 
3104 static int
3105 test_inline_ipsec_sg(void)
3106 {
3107 	int rc;
3108 
3109 	inline_ipsec_testsuite.setup = inline_ipsec_testsuite_setup;
3110 	inline_ipsec_testsuite.teardown = inline_ipsec_testsuite_teardown;
3111 
3112 	sg_mode = true;
3113 	/* Run the tests */
3114 	rc = unit_test_suite_runner(&inline_ipsec_testsuite);
3115 	sg_mode = false;
3116 
3117 	port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_SCATTER;
3118 	port_conf.txmode.offloads &= ~RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
3119 	return rc;
3120 }
3121 
3122 static int
3123 test_event_inline_ipsec(void)
3124 {
3125 	inline_ipsec_testsuite.setup = event_inline_ipsec_testsuite_setup;
3126 	inline_ipsec_testsuite.teardown = event_inline_ipsec_testsuite_teardown;
3127 	return unit_test_suite_runner(&inline_ipsec_testsuite);
3128 }
3129 
3130 #endif /* !RTE_EXEC_ENV_WINDOWS */
3131 
3132 REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec);
3133 REGISTER_TEST_COMMAND(inline_ipsec_sg_autotest, test_inline_ipsec_sg);
3134 REGISTER_TEST_COMMAND(event_inline_ipsec_autotest, test_event_inline_ipsec);
3135