xref: /dpdk/app/test/test_security_inline_proto.c (revision 54140461b60485941da282d8da2db2f2bc19e281)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Marvell.
3  */
4 
5 
6 #include <stdio.h>
7 #include <inttypes.h>
8 
9 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
11 #include <rte_security.h>
12 
13 #include "test.h"
14 #include "test_security_inline_proto_vectors.h"
15 
16 #ifdef RTE_EXEC_ENV_WINDOWS
17 static int
18 test_inline_ipsec(void)
19 {
20 	printf("Inline ipsec not supported on Windows, skipping test\n");
21 	return TEST_SKIPPED;
22 }
23 
24 static int
25 test_event_inline_ipsec(void)
26 {
27 	printf("Event inline ipsec not supported on Windows, skipping test\n");
28 	return TEST_SKIPPED;
29 }
30 
31 static int
32 test_inline_ipsec_sg(void)
33 {
34 	printf("Inline ipsec SG not supported on Windows, skipping test\n");
35 	return TEST_SKIPPED;
36 }
37 
38 #else
39 
40 #include <rte_eventdev.h>
41 #include <rte_event_eth_rx_adapter.h>
42 #include <rte_event_eth_tx_adapter.h>
43 
44 #define NB_ETHPORTS_USED		1
45 #define MEMPOOL_CACHE_SIZE		32
46 #define MAX_PKT_BURST			32
47 #define RX_DESC_DEFAULT	1024
48 #define TX_DESC_DEFAULT	1024
49 #define RTE_PORT_ALL		(~(uint16_t)0x0)
50 
51 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
52 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
53 #define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */
54 
55 #define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */
56 #define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
57 #define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
58 
59 #define MAX_TRAFFIC_BURST		2048
60 #define NB_MBUF				10240
61 
62 #define ENCAP_DECAP_BURST_SZ		33
63 #define APP_REASS_TIMEOUT		10
64 
65 extern struct ipsec_test_data pkt_aes_128_gcm;
66 extern struct ipsec_test_data pkt_aes_192_gcm;
67 extern struct ipsec_test_data pkt_aes_256_gcm;
68 extern struct ipsec_test_data pkt_aes_128_gcm_frag;
69 extern struct ipsec_test_data pkt_aes_128_cbc_null;
70 extern struct ipsec_test_data pkt_null_aes_xcbc;
71 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha384;
72 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha512;
73 extern struct ipsec_test_data pkt_3des_cbc_hmac_sha256;
74 extern struct ipsec_test_data pkt_3des_cbc_hmac_sha384;
75 extern struct ipsec_test_data pkt_3des_cbc_hmac_sha512;
76 extern struct ipsec_test_data pkt_3des_cbc_hmac_sha256_v6;
77 extern struct ipsec_test_data pkt_des_cbc_hmac_sha256;
78 extern struct ipsec_test_data pkt_des_cbc_hmac_sha384;
79 extern struct ipsec_test_data pkt_des_cbc_hmac_sha512;
80 extern struct ipsec_test_data pkt_des_cbc_hmac_sha256_v6;
81 extern struct ipsec_test_data pkt_aes_128_cbc_md5;
82 
83 static struct rte_mempool *mbufpool;
84 static struct rte_mempool *sess_pool;
85 /* ethernet addresses of ports */
86 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
87 
88 static struct rte_eth_conf port_conf = {
89 	.rxmode = {
90 		.mq_mode = RTE_ETH_MQ_RX_NONE,
91 		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM |
92 			    RTE_ETH_RX_OFFLOAD_SECURITY,
93 	},
94 	.txmode = {
95 		.mq_mode = RTE_ETH_MQ_TX_NONE,
96 		.offloads = RTE_ETH_TX_OFFLOAD_SECURITY |
97 			    RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
98 	},
99 	.lpbk_mode = 1,  /* enable loopback */
100 };
101 
102 static struct rte_eth_rxconf rx_conf = {
103 	.rx_thresh = {
104 		.pthresh = RX_PTHRESH,
105 		.hthresh = RX_HTHRESH,
106 		.wthresh = RX_WTHRESH,
107 	},
108 	.rx_free_thresh = 32,
109 };
110 
111 static struct rte_eth_txconf tx_conf = {
112 	.tx_thresh = {
113 		.pthresh = TX_PTHRESH,
114 		.hthresh = TX_HTHRESH,
115 		.wthresh = TX_WTHRESH,
116 	},
117 	.tx_free_thresh = 32, /* Use PMD default values */
118 	.tx_rs_thresh = 32, /* Use PMD default values */
119 };
120 
121 static uint16_t port_id;
122 static uint8_t eventdev_id;
123 static uint8_t rx_adapter_id;
124 static uint8_t tx_adapter_id;
125 static uint16_t plaintext_len;
126 static bool sg_mode;
127 
128 static bool event_mode_enabled;
129 
130 static uint64_t link_mbps;
131 
132 static int ip_reassembly_dynfield_offset = -1;
133 
134 static struct rte_flow *default_flow[RTE_MAX_ETHPORTS];
135 
136 /* Create Inline IPsec session */
137 static int
138 create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid,
139 		void **sess, void **ctx,
140 		uint32_t *ol_flags, const struct ipsec_test_flags *flags,
141 		struct rte_security_session_conf *sess_conf)
142 {
143 	uint16_t src_v6[8] = {0x2607, 0xf8b0, 0x400c, 0x0c03, 0x0000, 0x0000,
144 				0x0000, 0x001a};
145 	uint16_t dst_v6[8] = {0x2001, 0x0470, 0xe5bf, 0xdead, 0x4957, 0x2174,
146 				0xe82c, 0x4887};
147 	uint32_t src_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 2));
148 	uint32_t dst_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 1));
149 	struct rte_security_capability_idx sec_cap_idx;
150 	const struct rte_security_capability *sec_cap;
151 	enum rte_security_ipsec_sa_direction dir;
152 	void *sec_ctx;
153 	uint32_t verify;
154 
155 	sess_conf->action_type = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
156 	sess_conf->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
157 	sess_conf->ipsec = sa->ipsec_xform;
158 
159 	dir = sa->ipsec_xform.direction;
160 	verify = flags->tunnel_hdr_verify;
161 
162 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && verify) {
163 		if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR)
164 			src_v4 += 1;
165 		else if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR)
166 			dst_v4 += 1;
167 	}
168 
169 	if (sa->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
170 		if (sa->ipsec_xform.tunnel.type ==
171 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
172 			memcpy(&sess_conf->ipsec.tunnel.ipv4.src_ip, &src_v4,
173 					sizeof(src_v4));
174 			memcpy(&sess_conf->ipsec.tunnel.ipv4.dst_ip, &dst_v4,
175 					sizeof(dst_v4));
176 
177 			if (flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
178 				sess_conf->ipsec.tunnel.ipv4.df = 0;
179 
180 			if (flags->df == TEST_IPSEC_SET_DF_1_INNER_0)
181 				sess_conf->ipsec.tunnel.ipv4.df = 1;
182 
183 			if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
184 				sess_conf->ipsec.tunnel.ipv4.dscp = 0;
185 
186 			if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
187 				sess_conf->ipsec.tunnel.ipv4.dscp =
188 						TEST_IPSEC_DSCP_VAL;
189 		} else {
190 			if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
191 				sess_conf->ipsec.tunnel.ipv6.dscp = 0;
192 
193 			if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
194 				sess_conf->ipsec.tunnel.ipv6.dscp =
195 						TEST_IPSEC_DSCP_VAL;
196 
197 			if (flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
198 				sess_conf->ipsec.tunnel.ipv6.flabel = 0;
199 
200 			if (flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0)
201 				sess_conf->ipsec.tunnel.ipv6.flabel =
202 						TEST_IPSEC_FLABEL_VAL;
203 
204 			memcpy(&sess_conf->ipsec.tunnel.ipv6.src_addr, &src_v6,
205 					sizeof(src_v6));
206 			memcpy(&sess_conf->ipsec.tunnel.ipv6.dst_addr, &dst_v6,
207 					sizeof(dst_v6));
208 		}
209 	}
210 
211 	/* Save SA as userdata for the security session. When
212 	 * the packet is received, this userdata will be
213 	 * retrieved using the metadata from the packet.
214 	 *
215 	 * The PMD is expected to set similar metadata for other
216 	 * operations, like rte_eth_event, which are tied to
217 	 * security session. In such cases, the userdata could
218 	 * be obtained to uniquely identify the security
219 	 * parameters denoted.
220 	 */
221 
222 	sess_conf->userdata = (void *) sa;
223 
224 	sec_ctx = rte_eth_dev_get_sec_ctx(portid);
225 	if (sec_ctx == NULL) {
226 		printf("Ethernet device doesn't support security features.\n");
227 		return TEST_SKIPPED;
228 	}
229 
230 	sec_cap_idx.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
231 	sec_cap_idx.protocol = RTE_SECURITY_PROTOCOL_IPSEC;
232 	sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto;
233 	sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode;
234 	sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction;
235 	sec_cap = rte_security_capability_get(sec_ctx, &sec_cap_idx);
236 	if (sec_cap == NULL) {
237 		printf("No capabilities registered\n");
238 		return TEST_SKIPPED;
239 	}
240 
241 	if (sa->aead || sa->aes_gmac)
242 		memcpy(&sess_conf->ipsec.salt, sa->salt.data,
243 			RTE_MIN(sizeof(sess_conf->ipsec.salt), sa->salt.len));
244 
245 	/* Copy cipher session parameters */
246 	if (sa->aead) {
247 		rte_memcpy(sess_conf->crypto_xform, &sa->xform.aead,
248 				sizeof(struct rte_crypto_sym_xform));
249 		sess_conf->crypto_xform->aead.key.data = sa->key.data;
250 		/* Verify crypto capabilities */
251 		if (test_ipsec_crypto_caps_aead_verify(sec_cap,
252 					sess_conf->crypto_xform) != 0) {
253 			RTE_LOG(INFO, USER1,
254 				"Crypto capabilities not supported\n");
255 			return TEST_SKIPPED;
256 		}
257 	} else {
258 		if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
259 			rte_memcpy(&sess_conf->crypto_xform->cipher,
260 					&sa->xform.chain.cipher.cipher,
261 					sizeof(struct rte_crypto_cipher_xform));
262 
263 			rte_memcpy(&sess_conf->crypto_xform->next->auth,
264 					&sa->xform.chain.auth.auth,
265 					sizeof(struct rte_crypto_auth_xform));
266 			sess_conf->crypto_xform->cipher.key.data =
267 							sa->key.data;
268 			sess_conf->crypto_xform->next->auth.key.data =
269 							sa->auth_key.data;
270 			/* Verify crypto capabilities */
271 			if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
272 					sess_conf->crypto_xform) != 0) {
273 				RTE_LOG(INFO, USER1,
274 					"Cipher crypto capabilities not supported\n");
275 				return TEST_SKIPPED;
276 			}
277 
278 			if (test_ipsec_crypto_caps_auth_verify(sec_cap,
279 					sess_conf->crypto_xform->next) != 0) {
280 				RTE_LOG(INFO, USER1,
281 					"Auth crypto capabilities not supported\n");
282 				return TEST_SKIPPED;
283 			}
284 		} else {
285 			rte_memcpy(&sess_conf->crypto_xform->next->cipher,
286 					&sa->xform.chain.cipher.cipher,
287 					sizeof(struct rte_crypto_cipher_xform));
288 			rte_memcpy(&sess_conf->crypto_xform->auth,
289 					&sa->xform.chain.auth.auth,
290 					sizeof(struct rte_crypto_auth_xform));
291 			sess_conf->crypto_xform->auth.key.data =
292 							sa->auth_key.data;
293 			sess_conf->crypto_xform->next->cipher.key.data =
294 							sa->key.data;
295 
296 			/* Verify crypto capabilities */
297 			if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
298 					sess_conf->crypto_xform->next) != 0) {
299 				RTE_LOG(INFO, USER1,
300 					"Cipher crypto capabilities not supported\n");
301 				return TEST_SKIPPED;
302 			}
303 
304 			if (test_ipsec_crypto_caps_auth_verify(sec_cap,
305 					sess_conf->crypto_xform) != 0) {
306 				RTE_LOG(INFO, USER1,
307 					"Auth crypto capabilities not supported\n");
308 				return TEST_SKIPPED;
309 			}
310 		}
311 	}
312 
313 	if (test_ipsec_sec_caps_verify(&sess_conf->ipsec, sec_cap, false) != 0)
314 		return TEST_SKIPPED;
315 
316 	if ((sa->ipsec_xform.direction ==
317 			RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
318 			(sa->ipsec_xform.options.iv_gen_disable == 1)) {
319 		/* Set env variable when IV generation is disabled */
320 		char arr[128];
321 		int len = 0, j = 0;
322 		int iv_len = (sa->aead || sa->aes_gmac) ? 8 : 16;
323 
324 		for (; j < iv_len; j++)
325 			len += snprintf(arr+len, sizeof(arr) - len,
326 					"0x%x, ", sa->iv.data[j]);
327 		setenv("ETH_SEC_IV_OVR", arr, 1);
328 	}
329 
330 	*sess = rte_security_session_create(sec_ctx, sess_conf, sess_pool);
331 	if (*sess == NULL) {
332 		printf("SEC Session init failed.\n");
333 		return TEST_FAILED;
334 	}
335 
336 	*ol_flags = sec_cap->ol_flags;
337 	*ctx = sec_ctx;
338 
339 	return 0;
340 }
341 
342 /* Check the link status of all ports in up to 3s, and print them finally */
343 static void
344 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
345 {
346 #define CHECK_INTERVAL 100 /* 100ms */
347 #define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */
348 	uint16_t portid;
349 	uint8_t count, all_ports_up, print_flag = 0;
350 	struct rte_eth_link link;
351 	int ret;
352 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
353 
354 	printf("Checking link statuses...\n");
355 	fflush(stdout);
356 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
357 		all_ports_up = 1;
358 		for (portid = 0; portid < port_num; portid++) {
359 			if ((port_mask & (1 << portid)) == 0)
360 				continue;
361 			memset(&link, 0, sizeof(link));
362 			ret = rte_eth_link_get_nowait(portid, &link);
363 			if (ret < 0) {
364 				all_ports_up = 0;
365 				if (print_flag == 1)
366 					printf("Port %u link get failed: %s\n",
367 						portid, rte_strerror(-ret));
368 				continue;
369 			}
370 
371 			/* print link status if flag set */
372 			if (print_flag == 1) {
373 				if (link.link_status && link_mbps == 0)
374 					link_mbps = link.link_speed;
375 
376 				rte_eth_link_to_str(link_status,
377 					sizeof(link_status), &link);
378 				printf("Port %d %s\n", portid, link_status);
379 				continue;
380 			}
381 			/* clear all_ports_up flag if any link down */
382 			if (link.link_status == RTE_ETH_LINK_DOWN) {
383 				all_ports_up = 0;
384 				break;
385 			}
386 		}
387 		/* after finally printing all link status, get out */
388 		if (print_flag == 1)
389 			break;
390 
391 		if (all_ports_up == 0) {
392 			fflush(stdout);
393 			rte_delay_ms(CHECK_INTERVAL);
394 		}
395 
396 		/* set the print_flag if all ports up or timeout */
397 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1))
398 			print_flag = 1;
399 	}
400 }
401 
402 static void
403 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
404 {
405 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
406 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
407 	printf("%s%s", name, buf);
408 }
409 
410 static void
411 copy_buf_to_pkt_segs(const uint8_t *buf, unsigned int len,
412 		     struct rte_mbuf *pkt, unsigned int offset)
413 {
414 	unsigned int copied = 0;
415 	unsigned int copy_len;
416 	struct rte_mbuf *seg;
417 	void *seg_buf;
418 
419 	seg = pkt;
420 	while (offset >= rte_pktmbuf_tailroom(seg)) {
421 		offset -= rte_pktmbuf_tailroom(seg);
422 		seg = seg->next;
423 	}
424 	copy_len = seg->buf_len - seg->data_off - offset;
425 	seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
426 	while (len > copy_len) {
427 		rte_memcpy(seg_buf, buf + copied, (size_t) copy_len);
428 		len -= copy_len;
429 		copied += copy_len;
430 		seg->data_len += copy_len;
431 
432 		seg = seg->next;
433 		copy_len = seg->buf_len - seg->data_off;
434 		seg_buf = rte_pktmbuf_mtod(seg, void *);
435 	}
436 	rte_memcpy(seg_buf, buf + copied, (size_t) len);
437 	seg->data_len = len;
438 
439 	pkt->pkt_len += copied + len;
440 }
441 
442 static bool
443 is_outer_ipv4(struct ipsec_test_data *td)
444 {
445 	bool outer_ipv4;
446 
447 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ||
448 	    td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT)
449 		outer_ipv4 = (((td->input_text.data[0] & 0xF0) >> 4) == IPVERSION);
450 	else
451 		outer_ipv4 = (td->ipsec_xform.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4);
452 	return outer_ipv4;
453 }
454 
455 static inline struct rte_mbuf *
456 init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len, bool outer_ipv4)
457 {
458 	struct rte_mbuf *pkt, *tail;
459 	uint16_t space;
460 
461 	pkt = rte_pktmbuf_alloc(mp);
462 	if (pkt == NULL)
463 		return NULL;
464 
465 	if (outer_ipv4) {
466 		rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
467 				&dummy_ipv4_eth_hdr, RTE_ETHER_HDR_LEN);
468 		pkt->l3_len = sizeof(struct rte_ipv4_hdr);
469 	} else {
470 		rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
471 				&dummy_ipv6_eth_hdr, RTE_ETHER_HDR_LEN);
472 		pkt->l3_len = sizeof(struct rte_ipv6_hdr);
473 	}
474 	pkt->l2_len = RTE_ETHER_HDR_LEN;
475 
476 	space = rte_pktmbuf_tailroom(pkt);
477 	tail = pkt;
478 	/* Error if SG mode is not enabled */
479 	if (!sg_mode && space < len) {
480 		rte_pktmbuf_free(pkt);
481 		return NULL;
482 	}
483 	/* Extra room for expansion */
484 	while (space < len) {
485 		tail->next = rte_pktmbuf_alloc(mp);
486 		if (!tail->next)
487 			goto error;
488 		tail = tail->next;
489 		space += rte_pktmbuf_tailroom(tail);
490 		pkt->nb_segs++;
491 	}
492 
493 	if (pkt->buf_len > len + RTE_ETHER_HDR_LEN)
494 		rte_memcpy(rte_pktmbuf_append(pkt, len), data, len);
495 	else
496 		copy_buf_to_pkt_segs(data, len, pkt, RTE_ETHER_HDR_LEN);
497 	return pkt;
498 error:
499 	rte_pktmbuf_free(pkt);
500 	return NULL;
501 }
502 
503 static int
504 init_mempools(unsigned int nb_mbuf)
505 {
506 	void *sec_ctx;
507 	uint16_t nb_sess = 512;
508 	uint32_t sess_sz;
509 	char s[64];
510 
511 	if (mbufpool == NULL) {
512 		snprintf(s, sizeof(s), "mbuf_pool");
513 		mbufpool = rte_pktmbuf_pool_create(s, nb_mbuf,
514 				MEMPOOL_CACHE_SIZE, RTE_CACHE_LINE_SIZE,
515 				RTE_MBUF_DEFAULT_BUF_SIZE, SOCKET_ID_ANY);
516 		if (mbufpool == NULL) {
517 			printf("Cannot init mbuf pool\n");
518 			return TEST_FAILED;
519 		}
520 		printf("Allocated mbuf pool\n");
521 	}
522 
523 	sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
524 	if (sec_ctx == NULL) {
525 		printf("Device does not support Security ctx\n");
526 		return TEST_SKIPPED;
527 	}
528 	sess_sz = rte_security_session_get_size(sec_ctx);
529 	if (sess_pool == NULL) {
530 		snprintf(s, sizeof(s), "sess_pool");
531 		sess_pool = rte_mempool_create(s, nb_sess, sess_sz,
532 				MEMPOOL_CACHE_SIZE, 0,
533 				NULL, NULL, NULL, NULL,
534 				SOCKET_ID_ANY, 0);
535 		if (sess_pool == NULL) {
536 			printf("Cannot init sess pool\n");
537 			return TEST_FAILED;
538 		}
539 		printf("Allocated sess pool\n");
540 	}
541 
542 	return 0;
543 }
544 
545 static int
546 create_default_flow(uint16_t portid)
547 {
548 	struct rte_flow_action action[2];
549 	struct rte_flow_item pattern[2];
550 	struct rte_flow_attr attr = {0};
551 	struct rte_flow_error err;
552 	struct rte_flow *flow;
553 	int ret;
554 
555 	/* Add the default rte_flow to enable SECURITY for all ESP packets */
556 
557 	pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
558 	pattern[0].spec = NULL;
559 	pattern[0].mask = NULL;
560 	pattern[0].last = NULL;
561 	pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
562 
563 	action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
564 	action[0].conf = NULL;
565 	action[1].type = RTE_FLOW_ACTION_TYPE_END;
566 	action[1].conf = NULL;
567 
568 	attr.ingress = 1;
569 
570 	ret = rte_flow_validate(portid, &attr, pattern, action, &err);
571 	if (ret) {
572 		printf("\nValidate flow failed, ret = %d\n", ret);
573 		return -1;
574 	}
575 	flow = rte_flow_create(portid, &attr, pattern, action, &err);
576 	if (flow == NULL) {
577 		printf("\nDefault flow rule create failed\n");
578 		return -1;
579 	}
580 
581 	default_flow[portid] = flow;
582 
583 	return 0;
584 }
585 
586 static void
587 destroy_default_flow(uint16_t portid)
588 {
589 	struct rte_flow_error err;
590 	int ret;
591 
592 	if (!default_flow[portid])
593 		return;
594 	ret = rte_flow_destroy(portid, default_flow[portid], &err);
595 	if (ret) {
596 		printf("\nDefault flow rule destroy failed\n");
597 		return;
598 	}
599 	default_flow[portid] = NULL;
600 }
601 
602 struct rte_mbuf **tx_pkts_burst;
603 struct rte_mbuf **rx_pkts_burst;
604 
605 static int
606 compare_pkt_data(struct rte_mbuf *m, uint8_t *ref, unsigned int tot_len)
607 {
608 	unsigned int len;
609 	unsigned int nb_segs = m->nb_segs;
610 	unsigned int matched = 0;
611 	struct rte_mbuf *save = m;
612 
613 	while (m) {
614 		len = tot_len;
615 		if (len > m->data_len)
616 			len = m->data_len;
617 		if (len != 0) {
618 			if (memcmp(rte_pktmbuf_mtod(m, char *),
619 					ref + matched, len)) {
620 				printf("\n====Reassembly case failed: Data Mismatch");
621 				rte_hexdump(stdout, "Reassembled",
622 					rte_pktmbuf_mtod(m, char *),
623 					len);
624 				rte_hexdump(stdout, "reference",
625 					ref + matched,
626 					len);
627 				return TEST_FAILED;
628 			}
629 		}
630 		tot_len -= len;
631 		matched += len;
632 		m = m->next;
633 	}
634 
635 	if (tot_len) {
636 		printf("\n====Reassembly case failed: Data Missing %u",
637 		       tot_len);
638 		printf("\n====nb_segs %u, tot_len %u", nb_segs, tot_len);
639 		rte_pktmbuf_dump(stderr, save, -1);
640 		return TEST_FAILED;
641 	}
642 	return TEST_SUCCESS;
643 }
644 
645 static inline bool
646 is_ip_reassembly_incomplete(struct rte_mbuf *mbuf)
647 {
648 	static uint64_t ip_reassembly_dynflag;
649 	int ip_reassembly_dynflag_offset;
650 
651 	if (ip_reassembly_dynflag == 0) {
652 		ip_reassembly_dynflag_offset = rte_mbuf_dynflag_lookup(
653 			RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL);
654 		if (ip_reassembly_dynflag_offset < 0)
655 			return false;
656 		ip_reassembly_dynflag = RTE_BIT64(ip_reassembly_dynflag_offset);
657 	}
658 
659 	return (mbuf->ol_flags & ip_reassembly_dynflag) != 0;
660 }
661 
662 static void
663 free_mbuf(struct rte_mbuf *mbuf)
664 {
665 	rte_eth_ip_reassembly_dynfield_t dynfield;
666 
667 	if (!mbuf)
668 		return;
669 
670 	if (!is_ip_reassembly_incomplete(mbuf)) {
671 		rte_pktmbuf_free(mbuf);
672 	} else {
673 		if (ip_reassembly_dynfield_offset < 0)
674 			return;
675 
676 		while (mbuf) {
677 			dynfield = *RTE_MBUF_DYNFIELD(mbuf,
678 					ip_reassembly_dynfield_offset,
679 					rte_eth_ip_reassembly_dynfield_t *);
680 			rte_pktmbuf_free(mbuf);
681 			if (dynfield.nb_frags == 0)
682 				break;
683 			mbuf = dynfield.next_frag;
684 		}
685 	}
686 }
687 
688 
689 static int
690 get_and_verify_incomplete_frags(struct rte_mbuf *mbuf,
691 				struct reassembly_vector *vector)
692 {
693 	rte_eth_ip_reassembly_dynfield_t *dynfield[MAX_PKT_BURST];
694 	int j = 0, ret;
695 	/**
696 	 * IP reassembly offload is incomplete, and fragments are listed in
697 	 * dynfield which can be reassembled in SW.
698 	 */
699 	printf("\nHW IP Reassembly is not complete; attempt SW IP Reassembly,"
700 		"\nMatching with original frags.");
701 
702 	if (ip_reassembly_dynfield_offset < 0)
703 		return -1;
704 
705 	printf("\ncomparing frag: %d", j);
706 	/* Skip Ethernet header comparison */
707 	rte_pktmbuf_adj(mbuf, RTE_ETHER_HDR_LEN);
708 	ret = compare_pkt_data(mbuf, vector->frags[j]->data,
709 				vector->frags[j]->len);
710 	if (ret)
711 		return ret;
712 	j++;
713 	dynfield[j] = RTE_MBUF_DYNFIELD(mbuf, ip_reassembly_dynfield_offset,
714 					rte_eth_ip_reassembly_dynfield_t *);
715 	printf("\ncomparing frag: %d", j);
716 	/* Skip Ethernet header comparison */
717 	rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
718 	ret = compare_pkt_data(dynfield[j]->next_frag, vector->frags[j]->data,
719 			vector->frags[j]->len);
720 	if (ret)
721 		return ret;
722 
723 	while ((dynfield[j]->nb_frags > 1) &&
724 			is_ip_reassembly_incomplete(dynfield[j]->next_frag)) {
725 		j++;
726 		dynfield[j] = RTE_MBUF_DYNFIELD(dynfield[j-1]->next_frag,
727 					ip_reassembly_dynfield_offset,
728 					rte_eth_ip_reassembly_dynfield_t *);
729 		printf("\ncomparing frag: %d", j);
730 		/* Skip Ethernet header comparison */
731 		rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
732 		ret = compare_pkt_data(dynfield[j]->next_frag,
733 				vector->frags[j]->data, vector->frags[j]->len);
734 		if (ret)
735 			return ret;
736 	}
737 	return ret;
738 }
739 
740 static int
741 event_tx_burst(struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
742 {
743 	struct rte_event ev;
744 	int i, nb_sent = 0;
745 
746 	/* Convert packets to events */
747 	memset(&ev, 0, sizeof(ev));
748 	ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
749 	for (i = 0; i < nb_pkts; i++) {
750 		ev.mbuf = tx_pkts[i];
751 		ev.mbuf->port = port_id;
752 		nb_sent += rte_event_eth_tx_adapter_enqueue(
753 				eventdev_id, port_id, &ev, 1, 0);
754 	}
755 
756 	return nb_sent;
757 }
758 
759 static int
760 event_rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts_to_rx)
761 {
762 	int nb_ev, nb_rx = 0, j = 0;
763 	const int ms_per_pkt = 5;
764 	struct rte_event ev;
765 
766 	do {
767 		nb_ev = rte_event_dequeue_burst(eventdev_id, port_id,
768 				&ev, 1, 0);
769 
770 		if (nb_ev == 0) {
771 			rte_delay_ms(1);
772 			continue;
773 		}
774 
775 		/* Get packet from event */
776 		if (ev.event_type != RTE_EVENT_TYPE_ETHDEV) {
777 			printf("Unsupported event type: %i\n",
778 				ev.event_type);
779 			continue;
780 		}
781 		rx_pkts[nb_rx++] = ev.mbuf;
782 	} while (j++ < (nb_pkts_to_rx * ms_per_pkt) && nb_rx < nb_pkts_to_rx);
783 
784 	return nb_rx;
785 }
786 
787 static int
788 verify_inbound_oop(struct ipsec_test_data *td,
789 		   bool silent, struct rte_mbuf *mbuf)
790 {
791 	int ret = TEST_SUCCESS, rc;
792 	struct rte_mbuf *orig;
793 	uint32_t len;
794 	void *data;
795 
796 	orig = *rte_security_oop_dynfield(mbuf);
797 	if (!orig) {
798 		if (!silent)
799 			printf("\nUnable to get orig buffer OOP session");
800 		return TEST_FAILED;
801 	}
802 
803 	/* Skip Ethernet header comparison */
804 	rte_pktmbuf_adj(orig, RTE_ETHER_HDR_LEN);
805 
806 	len = td->input_text.len;
807 	if (orig->pkt_len != len) {
808 		if (!silent)
809 			printf("\nOriginal packet length mismatch, expected %u, got %u ",
810 			       len, orig->pkt_len);
811 		ret = TEST_FAILED;
812 	}
813 
814 	data = rte_pktmbuf_mtod(orig, void *);
815 	rc = memcmp(data, td->input_text.data, len);
816 	if (rc) {
817 		ret = TEST_FAILED;
818 		if (silent)
819 			goto exit;
820 
821 		printf("TestCase %s line %d: %s\n", __func__, __LINE__,
822 		       "output text not as expected\n");
823 
824 		rte_hexdump(stdout, "expected", td->input_text.data, len);
825 		rte_hexdump(stdout, "actual", data, len);
826 	}
827 exit:
828 	rte_pktmbuf_free(orig);
829 	return ret;
830 }
831 
832 static int
833 test_ipsec_with_reassembly(struct reassembly_vector *vector,
834 		const struct ipsec_test_flags *flags)
835 {
836 	void *out_ses[ENCAP_DECAP_BURST_SZ] = {0};
837 	void *in_ses[ENCAP_DECAP_BURST_SZ] = {0};
838 	struct rte_eth_ip_reassembly_params reass_capa = {0};
839 	struct rte_security_session_conf sess_conf_out = {0};
840 	struct rte_security_session_conf sess_conf_in = {0};
841 	unsigned int nb_tx, burst_sz, nb_sent = 0;
842 	struct rte_crypto_sym_xform cipher_out = {0};
843 	struct rte_crypto_sym_xform auth_out = {0};
844 	struct rte_crypto_sym_xform aead_out = {0};
845 	struct rte_crypto_sym_xform cipher_in = {0};
846 	struct rte_crypto_sym_xform auth_in = {0};
847 	struct rte_crypto_sym_xform aead_in = {0};
848 	struct ipsec_test_data sa_data;
849 	void *ctx;
850 	unsigned int i, nb_rx = 0, j;
851 	uint32_t ol_flags;
852 	bool outer_ipv4;
853 	int ret = 0;
854 
855 	burst_sz = vector->burst ? ENCAP_DECAP_BURST_SZ : 1;
856 	nb_tx = vector->nb_frags * burst_sz;
857 
858 	rte_eth_ip_reassembly_capability_get(port_id, &reass_capa);
859 	if (reass_capa.max_frags < vector->nb_frags)
860 		return TEST_SKIPPED;
861 
862 	memset(tx_pkts_burst, 0, sizeof(tx_pkts_burst[0]) * nb_tx);
863 	memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_tx);
864 
865 	memcpy(&sa_data, vector->sa_data, sizeof(struct ipsec_test_data));
866 	sa_data.ipsec_xform.direction =	RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
867 	outer_ipv4 = is_outer_ipv4(&sa_data);
868 
869 	for (i = 0; i < nb_tx; i += vector->nb_frags) {
870 		for (j = 0; j < vector->nb_frags; j++) {
871 			tx_pkts_burst[i+j] = init_packet(mbufpool,
872 						vector->frags[j]->data,
873 						vector->frags[j]->len, outer_ipv4);
874 			if (tx_pkts_burst[i+j] == NULL) {
875 				ret = -1;
876 				printf("\n packed init failed\n");
877 				goto out;
878 			}
879 		}
880 	}
881 
882 	for (i = 0; i < burst_sz; i++) {
883 		memcpy(&sa_data, vector->sa_data,
884 				sizeof(struct ipsec_test_data));
885 		/* Update SPI for every new SA */
886 		sa_data.ipsec_xform.spi += i;
887 		sa_data.ipsec_xform.direction =
888 					RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
889 		if (sa_data.aead) {
890 			sess_conf_out.crypto_xform = &aead_out;
891 		} else {
892 			sess_conf_out.crypto_xform = &cipher_out;
893 			sess_conf_out.crypto_xform->next = &auth_out;
894 		}
895 
896 		/* Create Inline IPsec outbound session. */
897 		ret = create_inline_ipsec_session(&sa_data, port_id,
898 				&out_ses[i], &ctx, &ol_flags, flags,
899 				&sess_conf_out);
900 		if (ret) {
901 			printf("\nInline outbound session create failed\n");
902 			goto out;
903 		}
904 	}
905 
906 	j = 0;
907 	for (i = 0; i < nb_tx; i++) {
908 		if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
909 			rte_security_set_pkt_metadata(ctx,
910 				out_ses[j], tx_pkts_burst[i], NULL);
911 		tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
912 
913 		/* Move to next SA after nb_frags */
914 		if ((i + 1) % vector->nb_frags == 0)
915 			j++;
916 	}
917 
918 	for (i = 0; i < burst_sz; i++) {
919 		memcpy(&sa_data, vector->sa_data,
920 				sizeof(struct ipsec_test_data));
921 		/* Update SPI for every new SA */
922 		sa_data.ipsec_xform.spi += i;
923 		sa_data.ipsec_xform.direction =
924 					RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
925 
926 		if (sa_data.aead) {
927 			sess_conf_in.crypto_xform = &aead_in;
928 		} else {
929 			sess_conf_in.crypto_xform = &auth_in;
930 			sess_conf_in.crypto_xform->next = &cipher_in;
931 		}
932 		/* Create Inline IPsec inbound session. */
933 		ret = create_inline_ipsec_session(&sa_data, port_id, &in_ses[i],
934 				&ctx, &ol_flags, flags, &sess_conf_in);
935 		if (ret) {
936 			printf("\nInline inbound session create failed\n");
937 			goto out;
938 		}
939 	}
940 
941 	/* Retrieve reassembly dynfield offset if available */
942 	if (ip_reassembly_dynfield_offset < 0 && vector->nb_frags > 1)
943 		ip_reassembly_dynfield_offset = rte_mbuf_dynfield_lookup(
944 				RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, NULL);
945 
946 
947 	ret = create_default_flow(port_id);
948 	if (ret)
949 		goto out;
950 
951 	if (event_mode_enabled)
952 		nb_sent = event_tx_burst(tx_pkts_burst, nb_tx);
953 	else
954 		nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_tx);
955 	if (nb_sent != nb_tx) {
956 		ret = -1;
957 		printf("\nFailed to tx %u pkts", nb_tx);
958 		goto out;
959 	}
960 
961 	rte_delay_ms(1);
962 
963 	/* Retry few times before giving up */
964 	nb_rx = 0;
965 	j = 0;
966 	if (event_mode_enabled)
967 		nb_rx = event_rx_burst(rx_pkts_burst, nb_tx);
968 	else
969 		do {
970 			nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
971 						  nb_tx - nb_rx);
972 			j++;
973 			if (nb_rx >= nb_tx)
974 				break;
975 			rte_delay_ms(1);
976 		} while (j < 5 || !nb_rx);
977 
978 	/* Check for minimum number of Rx packets expected */
979 	if ((vector->nb_frags == 1 && nb_rx != nb_tx) ||
980 	    (vector->nb_frags > 1 && nb_rx < burst_sz)) {
981 		printf("\nreceived less Rx pkts(%u) pkts\n", nb_rx);
982 		ret = TEST_FAILED;
983 		goto out;
984 	}
985 
986 	for (i = 0; i < nb_rx; i++) {
987 		if (vector->nb_frags > 1 &&
988 		    is_ip_reassembly_incomplete(rx_pkts_burst[i])) {
989 			ret = get_and_verify_incomplete_frags(rx_pkts_burst[i],
990 							      vector);
991 			if (ret != TEST_SUCCESS)
992 				break;
993 			continue;
994 		}
995 
996 		if (rx_pkts_burst[i]->ol_flags &
997 		    RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED ||
998 		    !(rx_pkts_burst[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD)) {
999 			printf("\nsecurity offload failed\n");
1000 			ret = TEST_FAILED;
1001 			break;
1002 		}
1003 
1004 		if (vector->full_pkt->len + RTE_ETHER_HDR_LEN !=
1005 				rx_pkts_burst[i]->pkt_len) {
1006 			printf("\nreassembled/decrypted packet length mismatch\n");
1007 			ret = TEST_FAILED;
1008 			break;
1009 		}
1010 		rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
1011 		ret = compare_pkt_data(rx_pkts_burst[i],
1012 				       vector->full_pkt->data,
1013 				       vector->full_pkt->len);
1014 		if (ret != TEST_SUCCESS)
1015 			break;
1016 	}
1017 
1018 out:
1019 	destroy_default_flow(port_id);
1020 
1021 	/* Clear session data. */
1022 	for (i = 0; i < burst_sz; i++) {
1023 		if (out_ses[i])
1024 			rte_security_session_destroy(ctx, out_ses[i]);
1025 		if (in_ses[i])
1026 			rte_security_session_destroy(ctx, in_ses[i]);
1027 	}
1028 
1029 	for (i = nb_sent; i < nb_tx; i++)
1030 		free_mbuf(tx_pkts_burst[i]);
1031 	for (i = 0; i < nb_rx; i++)
1032 		free_mbuf(rx_pkts_burst[i]);
1033 	return ret;
1034 }
1035 
1036 static int
1037 test_ipsec_inline_sa_exp_event_callback(uint16_t port_id,
1038 		enum rte_eth_event_type type, void *param, void *ret_param)
1039 {
1040 	struct sa_expiry_vector *vector = (struct sa_expiry_vector *)param;
1041 	struct rte_eth_event_ipsec_desc *event_desc = NULL;
1042 
1043 	RTE_SET_USED(port_id);
1044 
1045 	if (type != RTE_ETH_EVENT_IPSEC)
1046 		return -1;
1047 
1048 	event_desc = ret_param;
1049 	if (event_desc == NULL) {
1050 		printf("Event descriptor not set\n");
1051 		return -1;
1052 	}
1053 	vector->notify_event = true;
1054 	if (event_desc->metadata != (uint64_t)vector->sa_data) {
1055 		printf("Mismatch in event specific metadata\n");
1056 		return -1;
1057 	}
1058 	switch (event_desc->subtype) {
1059 	case RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY:
1060 		vector->event = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
1061 		break;
1062 	case RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY:
1063 		vector->event = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
1064 		break;
1065 	case RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY:
1066 		vector->event = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
1067 		break;
1068 	case RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY:
1069 		vector->event = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
1070 		break;
1071 	default:
1072 		printf("Invalid IPsec event reported\n");
1073 		return -1;
1074 	}
1075 
1076 	return 0;
1077 }
1078 
1079 static enum rte_eth_event_ipsec_subtype
1080 test_ipsec_inline_setup_expiry_vector(struct sa_expiry_vector *vector,
1081 		const struct ipsec_test_flags *flags,
1082 		struct ipsec_test_data *tdata)
1083 {
1084 	enum rte_eth_event_ipsec_subtype event = RTE_ETH_EVENT_IPSEC_UNKNOWN;
1085 
1086 	vector->event = RTE_ETH_EVENT_IPSEC_UNKNOWN;
1087 	vector->notify_event = false;
1088 	vector->sa_data = (void *)tdata;
1089 	if (flags->sa_expiry_pkts_soft)
1090 		event = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
1091 	else if (flags->sa_expiry_bytes_soft)
1092 		event = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
1093 	else if (flags->sa_expiry_pkts_hard)
1094 		event = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
1095 	else
1096 		event = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
1097 	rte_eth_dev_callback_register(port_id, RTE_ETH_EVENT_IPSEC,
1098 		       test_ipsec_inline_sa_exp_event_callback, vector);
1099 
1100 	return event;
1101 }
1102 
1103 static int
1104 test_ipsec_inline_proto_process(struct ipsec_test_data *td,
1105 		struct ipsec_test_data *res_d,
1106 		int nb_pkts,
1107 		bool silent,
1108 		const struct ipsec_test_flags *flags)
1109 {
1110 	enum rte_eth_event_ipsec_subtype event = RTE_ETH_EVENT_IPSEC_UNKNOWN;
1111 	struct rte_security_session_conf sess_conf = {0};
1112 	struct rte_crypto_sym_xform cipher = {0};
1113 	struct rte_crypto_sym_xform auth = {0};
1114 	struct rte_crypto_sym_xform aead = {0};
1115 	struct sa_expiry_vector vector = {0};
1116 	void *ctx;
1117 	int nb_rx = 0, nb_sent;
1118 	uint32_t ol_flags;
1119 	int i, j = 0, ret;
1120 	bool outer_ipv4;
1121 	void *ses;
1122 
1123 	memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_pkts);
1124 
1125 	if (flags->sa_expiry_pkts_soft || flags->sa_expiry_bytes_soft ||
1126 		flags->sa_expiry_pkts_hard || flags->sa_expiry_bytes_hard) {
1127 		if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1128 			return TEST_SUCCESS;
1129 		event = test_ipsec_inline_setup_expiry_vector(&vector, flags, td);
1130 	}
1131 
1132 	if (td->aead) {
1133 		sess_conf.crypto_xform = &aead;
1134 	} else {
1135 		if (td->ipsec_xform.direction ==
1136 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1137 			sess_conf.crypto_xform = &cipher;
1138 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1139 			sess_conf.crypto_xform->next = &auth;
1140 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1141 		} else {
1142 			sess_conf.crypto_xform = &auth;
1143 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1144 			sess_conf.crypto_xform->next = &cipher;
1145 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1146 		}
1147 	}
1148 
1149 	/* Create Inline IPsec session. */
1150 	ret = create_inline_ipsec_session(td, port_id, &ses, &ctx,
1151 					  &ol_flags, flags, &sess_conf);
1152 	if (ret)
1153 		return ret;
1154 
1155 	if (flags->inb_oop && rte_security_oop_dynfield_offset < 0) {
1156 		printf("\nDynamic field not available for inline inbound OOP");
1157 		ret = TEST_FAILED;
1158 		goto out;
1159 	}
1160 
1161 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1162 		ret = create_default_flow(port_id);
1163 		if (ret)
1164 			goto out;
1165 	}
1166 	outer_ipv4 = is_outer_ipv4(td);
1167 
1168 	for (i = 0; i < nb_pkts; i++) {
1169 		tx_pkts_burst[i] = init_packet(mbufpool, td->input_text.data,
1170 						td->input_text.len, outer_ipv4);
1171 		if (tx_pkts_burst[i] == NULL) {
1172 			while (i--)
1173 				rte_pktmbuf_free(tx_pkts_burst[i]);
1174 			ret = TEST_FAILED;
1175 			goto out;
1176 		}
1177 
1178 		if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkts_burst[i],
1179 					uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
1180 			while (i--)
1181 				rte_pktmbuf_free(tx_pkts_burst[i]);
1182 			ret = TEST_FAILED;
1183 			goto out;
1184 		}
1185 
1186 		if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1187 			if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
1188 				rte_security_set_pkt_metadata(ctx, ses,
1189 						tx_pkts_burst[i], NULL);
1190 			tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1191 		}
1192 	}
1193 	/* Send packet to ethdev for inline IPsec processing. */
1194 	if (event_mode_enabled)
1195 		nb_sent = event_tx_burst(tx_pkts_burst, nb_pkts);
1196 	else
1197 		nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts);
1198 
1199 	if (nb_sent != nb_pkts) {
1200 		printf("\nUnable to TX %d packets, sent: %i", nb_pkts, nb_sent);
1201 		for ( ; nb_sent < nb_pkts; nb_sent++)
1202 			rte_pktmbuf_free(tx_pkts_burst[nb_sent]);
1203 		ret = TEST_FAILED;
1204 		goto out;
1205 	}
1206 
1207 	rte_pause();
1208 
1209 	/* Receive back packet on loopback interface. */
1210 	if (event_mode_enabled)
1211 		nb_rx = event_rx_burst(rx_pkts_burst, nb_sent);
1212 	else
1213 		do {
1214 			rte_delay_ms(1);
1215 			nb_rx += rte_eth_rx_burst(port_id, 0,
1216 					&rx_pkts_burst[nb_rx],
1217 					nb_sent - nb_rx);
1218 			if (nb_rx >= nb_sent)
1219 				break;
1220 		} while (j++ < 5 || nb_rx == 0);
1221 
1222 	if (!flags->sa_expiry_pkts_hard &&
1223 			!flags->sa_expiry_bytes_hard &&
1224 			(nb_rx != nb_sent)) {
1225 		printf("\nUnable to RX all %d packets, received(%i)",
1226 				nb_sent, nb_rx);
1227 		while (--nb_rx >= 0)
1228 			rte_pktmbuf_free(rx_pkts_burst[nb_rx]);
1229 		ret = TEST_FAILED;
1230 		goto out;
1231 	}
1232 
1233 	for (i = 0; i < nb_rx; i++) {
1234 		rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
1235 
1236 		ret = test_ipsec_post_process(rx_pkts_burst[i], td,
1237 					      res_d, silent, flags);
1238 		if (ret != TEST_SUCCESS) {
1239 			for ( ; i < nb_rx; i++)
1240 				rte_pktmbuf_free(rx_pkts_burst[i]);
1241 			goto out;
1242 		}
1243 
1244 		ret = test_ipsec_stats_verify(ctx, ses, flags,
1245 					td->ipsec_xform.direction);
1246 		if (ret != TEST_SUCCESS) {
1247 			for ( ; i < nb_rx; i++)
1248 				rte_pktmbuf_free(rx_pkts_burst[i]);
1249 			goto out;
1250 		}
1251 
1252 		if (flags->inb_oop) {
1253 			ret = verify_inbound_oop(td, silent, rx_pkts_burst[i]);
1254 			if (ret != TEST_SUCCESS) {
1255 				for ( ; i < nb_rx; i++)
1256 					rte_pktmbuf_free(rx_pkts_burst[i]);
1257 				goto out;
1258 			}
1259 		}
1260 
1261 		rte_pktmbuf_free(rx_pkts_burst[i]);
1262 		rx_pkts_burst[i] = NULL;
1263 	}
1264 
1265 out:
1266 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1267 		destroy_default_flow(port_id);
1268 	if (flags->sa_expiry_pkts_soft || flags->sa_expiry_bytes_soft ||
1269 		flags->sa_expiry_pkts_hard || flags->sa_expiry_bytes_hard) {
1270 		if (vector.notify_event && (vector.event == event))
1271 			ret = TEST_SUCCESS;
1272 		else
1273 			ret = TEST_FAILED;
1274 
1275 		rte_eth_dev_callback_unregister(port_id, RTE_ETH_EVENT_IPSEC,
1276 			test_ipsec_inline_sa_exp_event_callback, &vector);
1277 	}
1278 
1279 	/* Destroy session so that other cases can create the session again */
1280 	rte_security_session_destroy(ctx, ses);
1281 	ses = NULL;
1282 
1283 	return ret;
1284 }
1285 
1286 static int
1287 test_ipsec_inline_proto_all(const struct ipsec_test_flags *flags)
1288 {
1289 	struct ipsec_test_data td_outb;
1290 	struct ipsec_test_data td_inb;
1291 	unsigned int i, nb_pkts = 1, pass_cnt = 0, fail_cnt = 0;
1292 	int ret;
1293 
1294 	if (flags->iv_gen || flags->sa_expiry_pkts_soft ||
1295 			flags->sa_expiry_bytes_soft ||
1296 			flags->sa_expiry_bytes_hard ||
1297 			flags->sa_expiry_pkts_hard)
1298 		nb_pkts = IPSEC_TEST_PACKETS_MAX;
1299 
1300 	for (i = 0; i < RTE_DIM(alg_list); i++) {
1301 		test_ipsec_td_prepare(alg_list[i].param1,
1302 				      alg_list[i].param2,
1303 				      flags, &td_outb, 1);
1304 
1305 		if (!td_outb.aead) {
1306 			enum rte_crypto_cipher_algorithm cipher_alg;
1307 			enum rte_crypto_auth_algorithm auth_alg;
1308 
1309 			cipher_alg = td_outb.xform.chain.cipher.cipher.algo;
1310 			auth_alg = td_outb.xform.chain.auth.auth.algo;
1311 
1312 			if (td_outb.aes_gmac && cipher_alg != RTE_CRYPTO_CIPHER_NULL)
1313 				continue;
1314 
1315 			/* ICV is not applicable for NULL auth */
1316 			if (flags->icv_corrupt &&
1317 			    auth_alg == RTE_CRYPTO_AUTH_NULL)
1318 				continue;
1319 
1320 			/* IV is not applicable for NULL cipher */
1321 			if (flags->iv_gen &&
1322 			    cipher_alg == RTE_CRYPTO_CIPHER_NULL)
1323 				continue;
1324 		}
1325 
1326 		if (flags->udp_encap)
1327 			td_outb.ipsec_xform.options.udp_encap = 1;
1328 
1329 		if (flags->sa_expiry_bytes_soft)
1330 			td_outb.ipsec_xform.life.bytes_soft_limit =
1331 				(((td_outb.output_text.len + RTE_ETHER_HDR_LEN)
1332 				  * nb_pkts) >> 3) - 1;
1333 		if (flags->sa_expiry_pkts_hard)
1334 			td_outb.ipsec_xform.life.packets_hard_limit =
1335 					IPSEC_TEST_PACKETS_MAX - 1;
1336 		if (flags->sa_expiry_bytes_hard)
1337 			td_outb.ipsec_xform.life.bytes_hard_limit =
1338 				(((td_outb.output_text.len + RTE_ETHER_HDR_LEN)
1339 				  * nb_pkts) >> 3) - 1;
1340 
1341 		ret = test_ipsec_inline_proto_process(&td_outb, &td_inb, nb_pkts,
1342 						false, flags);
1343 		if (ret == TEST_SKIPPED)
1344 			continue;
1345 
1346 		if (ret == TEST_FAILED) {
1347 			printf("\n TEST FAILED");
1348 			test_ipsec_display_alg(alg_list[i].param1,
1349 					       alg_list[i].param2);
1350 			fail_cnt++;
1351 			continue;
1352 		}
1353 
1354 		test_ipsec_td_update(&td_inb, &td_outb, 1, flags);
1355 
1356 		ret = test_ipsec_inline_proto_process(&td_inb, NULL, nb_pkts,
1357 						false, flags);
1358 		if (ret == TEST_SKIPPED)
1359 			continue;
1360 
1361 		if (ret == TEST_FAILED) {
1362 			printf("\n TEST FAILED");
1363 			test_ipsec_display_alg(alg_list[i].param1,
1364 					       alg_list[i].param2);
1365 			fail_cnt++;
1366 			continue;
1367 		}
1368 
1369 		if (flags->display_alg)
1370 			test_ipsec_display_alg(alg_list[i].param1,
1371 					       alg_list[i].param2);
1372 
1373 		pass_cnt++;
1374 	}
1375 
1376 	printf("Tests passed: %d, failed: %d", pass_cnt, fail_cnt);
1377 	if (fail_cnt > 0)
1378 		return TEST_FAILED;
1379 	if (pass_cnt > 0)
1380 		return TEST_SUCCESS;
1381 	else
1382 		return TEST_SKIPPED;
1383 }
1384 
1385 static int
1386 test_ipsec_inline_proto_process_with_esn(struct ipsec_test_data td[],
1387 		struct ipsec_test_data res_d[],
1388 		int nb_pkts,
1389 		bool silent,
1390 		const struct ipsec_test_flags *flags)
1391 {
1392 	struct rte_security_session_conf sess_conf = {0};
1393 	struct ipsec_test_data *res_d_tmp = NULL;
1394 	struct rte_crypto_sym_xform cipher = {0};
1395 	struct rte_crypto_sym_xform auth = {0};
1396 	struct rte_crypto_sym_xform aead = {0};
1397 	struct rte_mbuf *rx_pkt = NULL;
1398 	struct rte_mbuf *tx_pkt = NULL;
1399 	int nb_rx, nb_sent;
1400 	void *ses;
1401 	void *ctx;
1402 	uint32_t ol_flags;
1403 	bool outer_ipv4;
1404 	int i, ret;
1405 
1406 	if (td[0].aead) {
1407 		sess_conf.crypto_xform = &aead;
1408 	} else {
1409 		if (td[0].ipsec_xform.direction ==
1410 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1411 			sess_conf.crypto_xform = &cipher;
1412 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1413 			sess_conf.crypto_xform->next = &auth;
1414 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1415 		} else {
1416 			sess_conf.crypto_xform = &auth;
1417 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1418 			sess_conf.crypto_xform->next = &cipher;
1419 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1420 		}
1421 	}
1422 
1423 	/* Create Inline IPsec session. */
1424 	ret = create_inline_ipsec_session(&td[0], port_id, &ses, &ctx,
1425 					  &ol_flags, flags, &sess_conf);
1426 	if (ret)
1427 		return ret;
1428 
1429 	if (td[0].ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1430 		ret = create_default_flow(port_id);
1431 		if (ret)
1432 			goto out;
1433 	}
1434 	outer_ipv4 = is_outer_ipv4(td);
1435 
1436 	for (i = 0; i < nb_pkts; i++) {
1437 		tx_pkt = init_packet(mbufpool, td[i].input_text.data,
1438 					td[i].input_text.len, outer_ipv4);
1439 		if (tx_pkt == NULL) {
1440 			ret = TEST_FAILED;
1441 			goto out;
1442 		}
1443 
1444 		if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkt,
1445 					uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
1446 			ret = TEST_FAILED;
1447 			goto out;
1448 		}
1449 
1450 		if (td[i].ipsec_xform.direction ==
1451 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1452 			if (flags->antireplay) {
1453 				sess_conf.ipsec.esn.value =
1454 						td[i].ipsec_xform.esn.value;
1455 				ret = rte_security_session_update(ctx, ses,
1456 						&sess_conf);
1457 				if (ret) {
1458 					printf("Could not update ESN in session\n");
1459 					rte_pktmbuf_free(tx_pkt);
1460 					ret = TEST_SKIPPED;
1461 					goto out;
1462 				}
1463 			}
1464 			if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
1465 				rte_security_set_pkt_metadata(ctx, ses,
1466 						tx_pkt, NULL);
1467 			tx_pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1468 		}
1469 
1470 		/* Send packet to ethdev for inline IPsec processing. */
1471 		if (event_mode_enabled)
1472 			nb_sent = event_tx_burst(&tx_pkt, 1);
1473 		else
1474 			nb_sent = rte_eth_tx_burst(port_id, 0, &tx_pkt, 1);
1475 
1476 		if (nb_sent != 1) {
1477 			printf("\nUnable to TX packets");
1478 			rte_pktmbuf_free(tx_pkt);
1479 			ret = TEST_FAILED;
1480 			goto out;
1481 		}
1482 
1483 		rte_pause();
1484 
1485 		/* Receive back packet on loopback interface. */
1486 		if (event_mode_enabled)
1487 			nb_rx = event_rx_burst(&rx_pkt, nb_sent);
1488 		else {
1489 			do {
1490 				rte_delay_ms(1);
1491 				nb_rx = rte_eth_rx_burst(port_id, 0, &rx_pkt, 1);
1492 			} while (nb_rx == 0);
1493 		}
1494 		rte_pktmbuf_adj(rx_pkt, RTE_ETHER_HDR_LEN);
1495 
1496 		if (res_d != NULL)
1497 			res_d_tmp = &res_d[i];
1498 
1499 		ret = test_ipsec_post_process(rx_pkt, &td[i],
1500 					      res_d_tmp, silent, flags);
1501 		if (ret != TEST_SUCCESS) {
1502 			rte_pktmbuf_free(rx_pkt);
1503 			goto out;
1504 		}
1505 
1506 		ret = test_ipsec_stats_verify(ctx, ses, flags,
1507 					td->ipsec_xform.direction);
1508 		if (ret != TEST_SUCCESS) {
1509 			rte_pktmbuf_free(rx_pkt);
1510 			goto out;
1511 		}
1512 
1513 		rte_pktmbuf_free(rx_pkt);
1514 		rx_pkt = NULL;
1515 		tx_pkt = NULL;
1516 	}
1517 
1518 out:
1519 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1520 		destroy_default_flow(port_id);
1521 
1522 	/* Destroy session so that other cases can create the session again */
1523 	rte_security_session_destroy(ctx, ses);
1524 	ses = NULL;
1525 
1526 	return ret;
1527 }
1528 
1529 static int
1530 ut_setup_inline_ipsec_reassembly(void)
1531 {
1532 	struct rte_eth_ip_reassembly_params reass_capa = {0};
1533 	int ret;
1534 
1535 	rte_eth_ip_reassembly_capability_get(port_id, &reass_capa);
1536 	if (reass_capa.timeout_ms > APP_REASS_TIMEOUT) {
1537 		reass_capa.timeout_ms = APP_REASS_TIMEOUT;
1538 		rte_eth_ip_reassembly_conf_set(port_id, &reass_capa);
1539 	}
1540 
1541 	/* Start event devices */
1542 	if (event_mode_enabled) {
1543 		ret = rte_event_eth_rx_adapter_start(rx_adapter_id);
1544 		if (ret < 0) {
1545 			printf("Failed to start rx adapter %d\n", ret);
1546 			return ret;
1547 		}
1548 
1549 		ret = rte_event_dev_start(eventdev_id);
1550 		if (ret < 0) {
1551 			printf("Failed to start event device %d\n", ret);
1552 			return ret;
1553 		}
1554 	}
1555 
1556 	/* Start device */
1557 	ret = rte_eth_dev_start(port_id);
1558 	if (ret < 0) {
1559 		printf("rte_eth_dev_start: err=%d, port=%d\n",
1560 			ret, port_id);
1561 		return ret;
1562 	}
1563 	/* always enable promiscuous */
1564 	ret = rte_eth_promiscuous_enable(port_id);
1565 	if (ret != 0) {
1566 		printf("rte_eth_promiscuous_enable: err=%s, port=%d\n",
1567 			rte_strerror(-ret), port_id);
1568 		return ret;
1569 	}
1570 
1571 	check_all_ports_link_status(1, RTE_PORT_ALL);
1572 
1573 	return 0;
1574 }
1575 
1576 static void
1577 ut_teardown_inline_ipsec_reassembly(void)
1578 {
1579 	struct rte_eth_ip_reassembly_params reass_conf = {0};
1580 	uint16_t portid;
1581 	int ret;
1582 
1583 	/* Stop event devices */
1584 	if (event_mode_enabled)
1585 		rte_event_dev_stop(eventdev_id);
1586 
1587 	/* port tear down */
1588 	RTE_ETH_FOREACH_DEV(portid) {
1589 		ret = rte_eth_dev_stop(portid);
1590 		if (ret != 0)
1591 			printf("rte_eth_dev_stop: err=%s, port=%u\n",
1592 			       rte_strerror(-ret), portid);
1593 
1594 		/* Clear reassembly configuration */
1595 		rte_eth_ip_reassembly_conf_set(portid, &reass_conf);
1596 	}
1597 }
1598 static int
1599 ut_setup_inline_ipsec(void)
1600 {
1601 	int ret;
1602 
1603 	/* Start event devices */
1604 	if (event_mode_enabled) {
1605 		ret = rte_event_dev_start(eventdev_id);
1606 		if (ret < 0) {
1607 			printf("Failed to start event device %d\n", ret);
1608 			return ret;
1609 		}
1610 	}
1611 
1612 	/* Start device */
1613 	ret = rte_eth_dev_start(port_id);
1614 	if (ret < 0) {
1615 		printf("rte_eth_dev_start: err=%d, port=%d\n",
1616 			ret, port_id);
1617 		return ret;
1618 	}
1619 	/* always enable promiscuous */
1620 	ret = rte_eth_promiscuous_enable(port_id);
1621 	if (ret != 0) {
1622 		printf("rte_eth_promiscuous_enable: err=%s, port=%d\n",
1623 			rte_strerror(-ret), port_id);
1624 		return ret;
1625 	}
1626 
1627 	check_all_ports_link_status(1, RTE_PORT_ALL);
1628 
1629 	return 0;
1630 }
1631 
1632 static void
1633 ut_teardown_inline_ipsec(void)
1634 {
1635 	uint16_t portid;
1636 	int ret;
1637 
1638 	/* Stop event devices */
1639 	if (event_mode_enabled)
1640 		rte_event_dev_stop(eventdev_id);
1641 
1642 	/* port tear down */
1643 	RTE_ETH_FOREACH_DEV(portid) {
1644 		ret = rte_eth_dev_stop(portid);
1645 		if (ret != 0)
1646 			printf("rte_eth_dev_stop: err=%s, port=%u\n",
1647 			       rte_strerror(-ret), portid);
1648 	}
1649 }
1650 
1651 static int
1652 inline_ipsec_testsuite_setup(void)
1653 {
1654 	struct rte_eth_conf local_port_conf;
1655 	struct rte_eth_dev_info dev_info;
1656 	uint16_t nb_rxd;
1657 	uint16_t nb_txd;
1658 	uint16_t nb_ports;
1659 	int ret;
1660 	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
1661 
1662 	printf("Start inline IPsec test.\n");
1663 
1664 	nb_ports = rte_eth_dev_count_avail();
1665 	if (nb_ports < NB_ETHPORTS_USED) {
1666 		printf("At least %u port(s) used for test\n",
1667 		       NB_ETHPORTS_USED);
1668 		return TEST_SKIPPED;
1669 	}
1670 
1671 	ret = init_mempools(NB_MBUF);
1672 	if (ret)
1673 		return ret;
1674 
1675 	if (tx_pkts_burst == NULL) {
1676 		tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
1677 					  MAX_TRAFFIC_BURST,
1678 					  sizeof(void *),
1679 					  RTE_CACHE_LINE_SIZE);
1680 		if (!tx_pkts_burst)
1681 			return TEST_FAILED;
1682 
1683 		rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
1684 					  MAX_TRAFFIC_BURST,
1685 					  sizeof(void *),
1686 					  RTE_CACHE_LINE_SIZE);
1687 		if (!rx_pkts_burst)
1688 			return TEST_FAILED;
1689 	}
1690 
1691 	printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
1692 
1693 	nb_rxd = RX_DESC_DEFAULT;
1694 	nb_txd = TX_DESC_DEFAULT;
1695 
1696 	/* configuring port 0 for the test is enough */
1697 	port_id = 0;
1698 	if (rte_eth_dev_info_get(0, &dev_info)) {
1699 		printf("Failed to get devinfo");
1700 		return -1;
1701 	}
1702 
1703 	memcpy(&local_port_conf, &port_conf, sizeof(port_conf));
1704 	/* Add Multi seg flags */
1705 	if (sg_mode) {
1706 		uint16_t max_data_room = RTE_MBUF_DEFAULT_DATAROOM *
1707 			dev_info.rx_desc_lim.nb_seg_max;
1708 
1709 		local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
1710 		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1711 		local_port_conf.rxmode.mtu = RTE_MIN(dev_info.max_mtu, max_data_room - 256);
1712 	}
1713 
1714 	/* port configure */
1715 	ret = rte_eth_dev_configure(port_id, nb_rx_queue,
1716 				    nb_tx_queue, &local_port_conf);
1717 	if (ret < 0) {
1718 		printf("Cannot configure device: err=%d, port=%d\n",
1719 			 ret, port_id);
1720 		return ret;
1721 	}
1722 	ret = rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
1723 	if (ret < 0) {
1724 		printf("Cannot get mac address: err=%d, port=%d\n",
1725 			 ret, port_id);
1726 		return ret;
1727 	}
1728 	printf("Port %u ", port_id);
1729 	print_ethaddr("Address:", &ports_eth_addr[port_id]);
1730 	printf("\n");
1731 
1732 	/* tx queue setup */
1733 	ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
1734 				     SOCKET_ID_ANY, &tx_conf);
1735 	if (ret < 0) {
1736 		printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
1737 				ret, port_id);
1738 		return ret;
1739 	}
1740 	/* rx queue steup */
1741 	ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
1742 				     &rx_conf, mbufpool);
1743 	if (ret < 0) {
1744 		printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
1745 				ret, port_id);
1746 		return ret;
1747 	}
1748 	test_ipsec_alg_list_populate();
1749 
1750 	/* Change the plaintext size for tests without Known vectors */
1751 	if (sg_mode) {
1752 		/* Leave space of 256B as ESP packet would be bigger and we
1753 		 * expect packets to be received back on same interface.
1754 		 * Without SG mode, default value is picked.
1755 		 */
1756 		plaintext_len = local_port_conf.rxmode.mtu - 256;
1757 	} else {
1758 		plaintext_len = 0;
1759 	}
1760 
1761 	return 0;
1762 }
1763 
1764 static void
1765 inline_ipsec_testsuite_teardown(void)
1766 {
1767 	uint16_t portid;
1768 	int ret;
1769 
1770 	/* port tear down */
1771 	RTE_ETH_FOREACH_DEV(portid) {
1772 		ret = rte_eth_dev_reset(portid);
1773 		if (ret != 0)
1774 			printf("rte_eth_dev_reset: err=%s, port=%u\n",
1775 			       rte_strerror(-ret), port_id);
1776 	}
1777 	rte_free(tx_pkts_burst);
1778 	rte_free(rx_pkts_burst);
1779 }
1780 
1781 static int
1782 event_inline_ipsec_testsuite_setup(void)
1783 {
1784 	struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
1785 	struct rte_event_dev_info evdev_default_conf = {0};
1786 	struct rte_event_dev_config eventdev_conf = {0};
1787 	struct rte_event_queue_conf eventq_conf = {0};
1788 	struct rte_event_port_conf ev_port_conf = {0};
1789 	const uint16_t nb_txd = 1024, nb_rxd = 1024;
1790 	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
1791 	uint8_t ev_queue_id = 0, tx_queue_id = 0;
1792 	int nb_eventqueue = 1, nb_eventport = 1;
1793 	const int all_queues = -1;
1794 	uint32_t caps = 0;
1795 	uint16_t nb_ports;
1796 	int ret;
1797 
1798 	printf("Start event inline IPsec test.\n");
1799 
1800 	nb_ports = rte_eth_dev_count_avail();
1801 	if (nb_ports == 0) {
1802 		printf("Test require: 1 port, available: 0\n");
1803 		return TEST_SKIPPED;
1804 	}
1805 
1806 	init_mempools(NB_MBUF);
1807 
1808 	if (tx_pkts_burst == NULL) {
1809 		tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
1810 					  MAX_TRAFFIC_BURST,
1811 					  sizeof(void *),
1812 					  RTE_CACHE_LINE_SIZE);
1813 		if (!tx_pkts_burst)
1814 			return -1;
1815 
1816 		rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
1817 					  MAX_TRAFFIC_BURST,
1818 					  sizeof(void *),
1819 					  RTE_CACHE_LINE_SIZE);
1820 		if (!rx_pkts_burst)
1821 			return -1;
1822 
1823 	}
1824 
1825 	printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
1826 
1827 	/* configuring port 0 for the test is enough */
1828 	port_id = 0;
1829 	/* port configure */
1830 	ret = rte_eth_dev_configure(port_id, nb_rx_queue,
1831 				    nb_tx_queue, &port_conf);
1832 	if (ret < 0) {
1833 		printf("Cannot configure device: err=%d, port=%d\n",
1834 			 ret, port_id);
1835 		return ret;
1836 	}
1837 
1838 	/* Tx queue setup */
1839 	ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
1840 				     SOCKET_ID_ANY, &tx_conf);
1841 	if (ret < 0) {
1842 		printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
1843 				ret, port_id);
1844 		return ret;
1845 	}
1846 
1847 	/* rx queue steup */
1848 	ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
1849 				     &rx_conf, mbufpool);
1850 	if (ret < 0) {
1851 		printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
1852 				ret, port_id);
1853 		return ret;
1854 	}
1855 
1856 	/* Setup eventdev */
1857 	eventdev_id = 0;
1858 	rx_adapter_id = 0;
1859 	tx_adapter_id = 0;
1860 
1861 	/* Get default conf of eventdev */
1862 	ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
1863 	if (ret < 0) {
1864 		printf("Error in getting event device info[devID:%d]\n",
1865 				eventdev_id);
1866 		return ret;
1867 	}
1868 
1869 	/* Get Tx adapter capabilities */
1870 	ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, tx_adapter_id, &caps);
1871 	if (ret < 0) {
1872 		printf("Failed to get event device %d eth tx adapter"
1873 				" capabilities for port %d\n",
1874 				eventdev_id, port_id);
1875 		return ret;
1876 	}
1877 	if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
1878 		tx_queue_id = nb_eventqueue++;
1879 
1880 	eventdev_conf.nb_events_limit =
1881 			evdev_default_conf.max_num_events;
1882 	eventdev_conf.nb_event_queue_flows =
1883 			evdev_default_conf.max_event_queue_flows;
1884 	eventdev_conf.nb_event_port_dequeue_depth =
1885 			evdev_default_conf.max_event_port_dequeue_depth;
1886 	eventdev_conf.nb_event_port_enqueue_depth =
1887 			evdev_default_conf.max_event_port_enqueue_depth;
1888 
1889 	eventdev_conf.nb_event_queues = nb_eventqueue;
1890 	eventdev_conf.nb_event_ports = nb_eventport;
1891 
1892 	/* Configure event device */
1893 
1894 	ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
1895 	if (ret < 0) {
1896 		printf("Error in configuring event device\n");
1897 		return ret;
1898 	}
1899 
1900 	/* Configure event queue */
1901 	eventq_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL;
1902 	eventq_conf.nb_atomic_flows = 1024;
1903 	eventq_conf.nb_atomic_order_sequences = 1024;
1904 
1905 	/* Setup the queue */
1906 	ret = rte_event_queue_setup(eventdev_id, ev_queue_id, &eventq_conf);
1907 	if (ret < 0) {
1908 		printf("Failed to setup event queue %d\n", ret);
1909 		return ret;
1910 	}
1911 
1912 	/* Configure event port */
1913 	ret = rte_event_port_setup(eventdev_id, port_id, NULL);
1914 	if (ret < 0) {
1915 		printf("Failed to setup event port %d\n", ret);
1916 		return ret;
1917 	}
1918 
1919 	/* Make event queue - event port link */
1920 	ret = rte_event_port_link(eventdev_id, port_id, NULL, NULL, 1);
1921 	if (ret < 0) {
1922 		printf("Failed to link event port %d\n", ret);
1923 		return ret;
1924 	}
1925 
1926 	/* Setup port conf */
1927 	ev_port_conf.new_event_threshold = 1200;
1928 	ev_port_conf.dequeue_depth =
1929 			evdev_default_conf.max_event_port_dequeue_depth;
1930 	ev_port_conf.enqueue_depth =
1931 			evdev_default_conf.max_event_port_enqueue_depth;
1932 
1933 	/* Create Rx adapter */
1934 	ret = rte_event_eth_rx_adapter_create(rx_adapter_id, eventdev_id,
1935 			&ev_port_conf);
1936 	if (ret < 0) {
1937 		printf("Failed to create rx adapter %d\n", ret);
1938 		return ret;
1939 	}
1940 
1941 	/* Setup queue conf */
1942 	queue_conf.ev.queue_id = ev_queue_id;
1943 	queue_conf.ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
1944 	queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
1945 
1946 	/* Add queue to the adapter */
1947 	ret = rte_event_eth_rx_adapter_queue_add(rx_adapter_id, port_id,
1948 			all_queues, &queue_conf);
1949 	if (ret < 0) {
1950 		printf("Failed to add eth queue to rx adapter %d\n", ret);
1951 		return ret;
1952 	}
1953 
1954 	/* Start rx adapter */
1955 	ret = rte_event_eth_rx_adapter_start(rx_adapter_id);
1956 	if (ret < 0) {
1957 		printf("Failed to start rx adapter %d\n", ret);
1958 		return ret;
1959 	}
1960 
1961 	/* Create tx adapter */
1962 	ret = rte_event_eth_tx_adapter_create(tx_adapter_id, eventdev_id,
1963 			&ev_port_conf);
1964 	if (ret < 0) {
1965 		printf("Failed to create tx adapter %d\n", ret);
1966 		return ret;
1967 	}
1968 
1969 	/* Add queue to the adapter */
1970 	ret = rte_event_eth_tx_adapter_queue_add(tx_adapter_id, port_id,
1971 			all_queues);
1972 	if (ret < 0) {
1973 		printf("Failed to add eth queue to tx adapter %d\n", ret);
1974 		return ret;
1975 	}
1976 	/* Setup Tx queue & port */
1977 	if (tx_queue_id) {
1978 		/* Setup the queue */
1979 		ret = rte_event_queue_setup(eventdev_id, tx_queue_id,
1980 				&eventq_conf);
1981 		if (ret < 0) {
1982 			printf("Failed to setup tx event queue %d\n", ret);
1983 			return ret;
1984 		}
1985 		/* Link Tx event queue to Tx port */
1986 		ret = rte_event_port_link(eventdev_id, port_id,
1987 				&tx_queue_id, NULL, 1);
1988 		if (ret != 1) {
1989 			printf("Failed to link event queue to port\n");
1990 			return ret;
1991 		}
1992 	}
1993 
1994 	/* Start tx adapter */
1995 	ret = rte_event_eth_tx_adapter_start(tx_adapter_id);
1996 	if (ret < 0) {
1997 		printf("Failed to start tx adapter %d\n", ret);
1998 		return ret;
1999 	}
2000 
2001 	/* Start eventdev */
2002 	ret = rte_event_dev_start(eventdev_id);
2003 	if (ret < 0) {
2004 		printf("Failed to start event device %d\n", ret);
2005 		return ret;
2006 	}
2007 
2008 	event_mode_enabled = true;
2009 	test_ipsec_alg_list_populate();
2010 
2011 	return 0;
2012 }
2013 
2014 static void
2015 event_inline_ipsec_testsuite_teardown(void)
2016 {
2017 	uint16_t portid;
2018 	int ret;
2019 
2020 	event_mode_enabled = false;
2021 
2022 	/* Stop and release rx adapter */
2023 	ret = rte_event_eth_rx_adapter_stop(rx_adapter_id);
2024 	if (ret < 0)
2025 		printf("Failed to stop rx adapter %d\n", ret);
2026 	ret = rte_event_eth_rx_adapter_queue_del(rx_adapter_id, port_id, -1);
2027 	if (ret < 0)
2028 		printf("Failed to remove rx adapter queues %d\n", ret);
2029 	ret = rte_event_eth_rx_adapter_free(rx_adapter_id);
2030 	if (ret < 0)
2031 		printf("Failed to free rx adapter %d\n", ret);
2032 
2033 	/* Stop and release tx adapter */
2034 	ret = rte_event_eth_tx_adapter_stop(tx_adapter_id);
2035 	if (ret < 0)
2036 		printf("Failed to stop tx adapter %d\n", ret);
2037 	ret = rte_event_eth_tx_adapter_queue_del(tx_adapter_id, port_id, -1);
2038 	if (ret < 0)
2039 		printf("Failed to remove tx adapter queues %d\n", ret);
2040 	ret = rte_event_eth_tx_adapter_free(tx_adapter_id);
2041 	if (ret < 0)
2042 		printf("Failed to free tx adapter %d\n", ret);
2043 
2044 	/* Stop and release event devices */
2045 	rte_event_dev_stop(eventdev_id);
2046 	ret = rte_event_dev_close(eventdev_id);
2047 	if (ret < 0)
2048 		printf("Failed to close event dev %d, %d\n", eventdev_id, ret);
2049 
2050 	/* port tear down */
2051 	RTE_ETH_FOREACH_DEV(portid) {
2052 		ret = rte_eth_dev_reset(portid);
2053 		if (ret != 0)
2054 			printf("rte_eth_dev_reset: err=%s, port=%u\n",
2055 			       rte_strerror(-ret), port_id);
2056 	}
2057 
2058 	rte_free(tx_pkts_burst);
2059 	rte_free(rx_pkts_burst);
2060 }
2061 
2062 static int
2063 test_inline_ip_reassembly(const void *testdata)
2064 {
2065 	struct reassembly_vector reassembly_td = {0};
2066 	const struct reassembly_vector *td = testdata;
2067 	struct ip_reassembly_test_packet full_pkt;
2068 	struct ip_reassembly_test_packet frags[MAX_FRAGS];
2069 	uint16_t extra_data, extra_data_sum = 0;
2070 	struct ipsec_test_flags flags = {0};
2071 	int i = 0;
2072 
2073 	reassembly_td.sa_data = td->sa_data;
2074 	reassembly_td.nb_frags = td->nb_frags;
2075 	reassembly_td.burst = td->burst;
2076 
2077 	memcpy(&full_pkt, td->full_pkt,
2078 			sizeof(struct ip_reassembly_test_packet));
2079 	reassembly_td.full_pkt = &full_pkt;
2080 
2081 	for (; i < reassembly_td.nb_frags; i++) {
2082 		memcpy(&frags[i], td->frags[i],
2083 			sizeof(struct ip_reassembly_test_packet));
2084 		reassembly_td.frags[i] = &frags[i];
2085 
2086 		/* Add extra data for multi-seg test on all fragments except last one */
2087 		extra_data = 0;
2088 		if (plaintext_len && reassembly_td.frags[i]->len < plaintext_len &&
2089 		    (i != reassembly_td.nb_frags - 1))
2090 			extra_data = ((plaintext_len - reassembly_td.frags[i]->len) & ~0x7ULL);
2091 
2092 		test_vector_payload_populate(reassembly_td.frags[i],
2093 				(i == 0) ? true : false, extra_data, extra_data_sum);
2094 		extra_data_sum += extra_data;
2095 	}
2096 	test_vector_payload_populate(reassembly_td.full_pkt, true, extra_data_sum, 0);
2097 
2098 	return test_ipsec_with_reassembly(&reassembly_td, &flags);
2099 }
2100 
2101 static int
2102 test_ipsec_inline_proto_known_vec(const void *test_data)
2103 {
2104 	struct ipsec_test_data td_outb;
2105 	struct ipsec_test_flags flags;
2106 
2107 	memset(&flags, 0, sizeof(flags));
2108 
2109 	memcpy(&td_outb, test_data, sizeof(td_outb));
2110 
2111 	if (td_outb.aead ||
2112 	    td_outb.xform.chain.cipher.cipher.algo != RTE_CRYPTO_CIPHER_NULL) {
2113 		/* Disable IV gen to be able to test with known vectors */
2114 		td_outb.ipsec_xform.options.iv_gen_disable = 1;
2115 	}
2116 
2117 	return test_ipsec_inline_proto_process(&td_outb, NULL, 1,
2118 				false, &flags);
2119 }
2120 
2121 static int
2122 test_ipsec_inline_proto_known_vec_inb(const void *test_data)
2123 {
2124 	const struct ipsec_test_data *td = test_data;
2125 	struct ipsec_test_flags flags;
2126 	struct ipsec_test_data td_inb;
2127 
2128 	memset(&flags, 0, sizeof(flags));
2129 
2130 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2131 		test_ipsec_td_in_from_out(td, &td_inb);
2132 	else
2133 		memcpy(&td_inb, td, sizeof(td_inb));
2134 
2135 	return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags);
2136 }
2137 
2138 static int
2139 test_ipsec_inline_proto_oop_inb(const void *test_data)
2140 {
2141 	const struct ipsec_test_data *td = test_data;
2142 	struct ipsec_test_flags flags;
2143 	struct ipsec_test_data td_inb;
2144 
2145 	memset(&flags, 0, sizeof(flags));
2146 	flags.inb_oop = true;
2147 
2148 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2149 		test_ipsec_td_in_from_out(td, &td_inb);
2150 	else
2151 		memcpy(&td_inb, td, sizeof(td_inb));
2152 
2153 	td_inb.ipsec_xform.options.ingress_oop = true;
2154 
2155 	return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags);
2156 }
2157 
2158 static int
2159 test_ipsec_inline_proto_display_list(void)
2160 {
2161 	struct ipsec_test_flags flags;
2162 
2163 	memset(&flags, 0, sizeof(flags));
2164 
2165 	flags.display_alg = true;
2166 	flags.plaintext_len = plaintext_len;
2167 
2168 	return test_ipsec_inline_proto_all(&flags);
2169 }
2170 
2171 static int
2172 test_ipsec_inline_proto_udp_encap(void)
2173 {
2174 	struct ipsec_test_flags flags;
2175 
2176 	memset(&flags, 0, sizeof(flags));
2177 
2178 	flags.udp_encap = true;
2179 	flags.plaintext_len = plaintext_len;
2180 
2181 	return test_ipsec_inline_proto_all(&flags);
2182 }
2183 
2184 static int
2185 test_ipsec_inline_proto_udp_ports_verify(void)
2186 {
2187 	struct ipsec_test_flags flags;
2188 
2189 	memset(&flags, 0, sizeof(flags));
2190 
2191 	flags.udp_encap = true;
2192 	flags.udp_ports_verify = true;
2193 	flags.plaintext_len = plaintext_len;
2194 
2195 	return test_ipsec_inline_proto_all(&flags);
2196 }
2197 
2198 static int
2199 test_ipsec_inline_proto_err_icv_corrupt(void)
2200 {
2201 	struct ipsec_test_flags flags;
2202 
2203 	memset(&flags, 0, sizeof(flags));
2204 
2205 	flags.icv_corrupt = true;
2206 	flags.plaintext_len = plaintext_len;
2207 
2208 	return test_ipsec_inline_proto_all(&flags);
2209 }
2210 
2211 static int
2212 test_ipsec_inline_proto_tunnel_dst_addr_verify(void)
2213 {
2214 	struct ipsec_test_flags flags;
2215 
2216 	memset(&flags, 0, sizeof(flags));
2217 
2218 	flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR;
2219 	flags.plaintext_len = plaintext_len;
2220 
2221 	return test_ipsec_inline_proto_all(&flags);
2222 }
2223 
2224 static int
2225 test_ipsec_inline_proto_tunnel_src_dst_addr_verify(void)
2226 {
2227 	struct ipsec_test_flags flags;
2228 
2229 	memset(&flags, 0, sizeof(flags));
2230 
2231 	flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR;
2232 	flags.plaintext_len = plaintext_len;
2233 
2234 	return test_ipsec_inline_proto_all(&flags);
2235 }
2236 
2237 static int
2238 test_ipsec_inline_proto_inner_ip_csum(void)
2239 {
2240 	struct ipsec_test_flags flags;
2241 
2242 	memset(&flags, 0, sizeof(flags));
2243 
2244 	flags.ip_csum = true;
2245 	flags.plaintext_len = plaintext_len;
2246 
2247 	return test_ipsec_inline_proto_all(&flags);
2248 }
2249 
2250 static int
2251 test_ipsec_inline_proto_inner_l4_csum(void)
2252 {
2253 	struct ipsec_test_flags flags;
2254 
2255 	memset(&flags, 0, sizeof(flags));
2256 
2257 	flags.l4_csum = true;
2258 	flags.plaintext_len = plaintext_len;
2259 
2260 	return test_ipsec_inline_proto_all(&flags);
2261 }
2262 
2263 static int
2264 test_ipsec_inline_proto_tunnel_v4_in_v4(void)
2265 {
2266 	struct ipsec_test_flags flags;
2267 
2268 	memset(&flags, 0, sizeof(flags));
2269 
2270 	flags.ipv6 = false;
2271 	flags.tunnel_ipv6 = false;
2272 	flags.plaintext_len = plaintext_len;
2273 
2274 	return test_ipsec_inline_proto_all(&flags);
2275 }
2276 
2277 static int
2278 test_ipsec_inline_proto_tunnel_v6_in_v6(void)
2279 {
2280 	struct ipsec_test_flags flags;
2281 
2282 	memset(&flags, 0, sizeof(flags));
2283 
2284 	flags.ipv6 = true;
2285 	flags.tunnel_ipv6 = true;
2286 	flags.plaintext_len = plaintext_len;
2287 
2288 	return test_ipsec_inline_proto_all(&flags);
2289 }
2290 
2291 static int
2292 test_ipsec_inline_proto_tunnel_v4_in_v6(void)
2293 {
2294 	struct ipsec_test_flags flags;
2295 
2296 	memset(&flags, 0, sizeof(flags));
2297 
2298 	flags.ipv6 = false;
2299 	flags.tunnel_ipv6 = true;
2300 	flags.plaintext_len = plaintext_len;
2301 
2302 	return test_ipsec_inline_proto_all(&flags);
2303 }
2304 
2305 static int
2306 test_ipsec_inline_proto_tunnel_v6_in_v4(void)
2307 {
2308 	struct ipsec_test_flags flags;
2309 
2310 	memset(&flags, 0, sizeof(flags));
2311 
2312 	flags.ipv6 = true;
2313 	flags.tunnel_ipv6 = false;
2314 	flags.plaintext_len = plaintext_len;
2315 
2316 	return test_ipsec_inline_proto_all(&flags);
2317 }
2318 
2319 static int
2320 test_ipsec_inline_proto_transport_v4(void)
2321 {
2322 	struct ipsec_test_flags flags;
2323 
2324 	memset(&flags, 0, sizeof(flags));
2325 
2326 	flags.ipv6 = false;
2327 	flags.transport = true;
2328 	flags.plaintext_len = plaintext_len;
2329 
2330 	return test_ipsec_inline_proto_all(&flags);
2331 }
2332 
2333 static int
2334 test_ipsec_inline_proto_transport_l4_csum(void)
2335 {
2336 	struct ipsec_test_flags flags = {
2337 		.l4_csum = true,
2338 		.transport = true,
2339 		.plaintext_len = plaintext_len,
2340 	};
2341 
2342 	return test_ipsec_inline_proto_all(&flags);
2343 }
2344 
2345 static int
2346 test_ipsec_inline_proto_stats(void)
2347 {
2348 	struct ipsec_test_flags flags;
2349 
2350 	memset(&flags, 0, sizeof(flags));
2351 
2352 	flags.stats_success = true;
2353 	flags.plaintext_len = plaintext_len;
2354 
2355 	return test_ipsec_inline_proto_all(&flags);
2356 }
2357 
2358 static int
2359 test_ipsec_inline_proto_pkt_fragment(void)
2360 {
2361 	struct ipsec_test_flags flags;
2362 
2363 	memset(&flags, 0, sizeof(flags));
2364 
2365 	flags.fragment = true;
2366 	flags.plaintext_len = plaintext_len;
2367 
2368 	return test_ipsec_inline_proto_all(&flags);
2369 
2370 }
2371 
2372 static int
2373 test_ipsec_inline_proto_copy_df_inner_0(void)
2374 {
2375 	struct ipsec_test_flags flags;
2376 
2377 	memset(&flags, 0, sizeof(flags));
2378 
2379 	flags.df = TEST_IPSEC_COPY_DF_INNER_0;
2380 	flags.plaintext_len = plaintext_len;
2381 
2382 	return test_ipsec_inline_proto_all(&flags);
2383 }
2384 
2385 static int
2386 test_ipsec_inline_proto_copy_df_inner_1(void)
2387 {
2388 	struct ipsec_test_flags flags;
2389 
2390 	memset(&flags, 0, sizeof(flags));
2391 
2392 	flags.df = TEST_IPSEC_COPY_DF_INNER_1;
2393 	flags.plaintext_len = plaintext_len;
2394 
2395 	return test_ipsec_inline_proto_all(&flags);
2396 }
2397 
2398 static int
2399 test_ipsec_inline_proto_set_df_0_inner_1(void)
2400 {
2401 	struct ipsec_test_flags flags;
2402 
2403 	memset(&flags, 0, sizeof(flags));
2404 
2405 	flags.df = TEST_IPSEC_SET_DF_0_INNER_1;
2406 	flags.plaintext_len = plaintext_len;
2407 
2408 	return test_ipsec_inline_proto_all(&flags);
2409 }
2410 
2411 static int
2412 test_ipsec_inline_proto_set_df_1_inner_0(void)
2413 {
2414 	struct ipsec_test_flags flags;
2415 
2416 	memset(&flags, 0, sizeof(flags));
2417 
2418 	flags.df = TEST_IPSEC_SET_DF_1_INNER_0;
2419 	flags.plaintext_len = plaintext_len;
2420 
2421 	return test_ipsec_inline_proto_all(&flags);
2422 }
2423 
2424 static int
2425 test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(void)
2426 {
2427 	struct ipsec_test_flags flags;
2428 
2429 	memset(&flags, 0, sizeof(flags));
2430 
2431 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
2432 	flags.plaintext_len = plaintext_len;
2433 
2434 	return test_ipsec_inline_proto_all(&flags);
2435 }
2436 
2437 static int
2438 test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(void)
2439 {
2440 	struct ipsec_test_flags flags;
2441 
2442 	memset(&flags, 0, sizeof(flags));
2443 
2444 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
2445 	flags.plaintext_len = plaintext_len;
2446 
2447 	return test_ipsec_inline_proto_all(&flags);
2448 }
2449 
2450 static int
2451 test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(void)
2452 {
2453 	struct ipsec_test_flags flags;
2454 
2455 	memset(&flags, 0, sizeof(flags));
2456 
2457 	flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
2458 	flags.plaintext_len = plaintext_len;
2459 
2460 	return test_ipsec_inline_proto_all(&flags);
2461 }
2462 
2463 static int
2464 test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(void)
2465 {
2466 	struct ipsec_test_flags flags;
2467 
2468 	memset(&flags, 0, sizeof(flags));
2469 
2470 	flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
2471 	flags.plaintext_len = plaintext_len;
2472 
2473 	return test_ipsec_inline_proto_all(&flags);
2474 }
2475 
2476 static int
2477 test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(void)
2478 {
2479 	struct ipsec_test_flags flags;
2480 
2481 	memset(&flags, 0, sizeof(flags));
2482 
2483 	flags.ipv6 = true;
2484 	flags.tunnel_ipv6 = true;
2485 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
2486 	flags.plaintext_len = plaintext_len;
2487 
2488 	return test_ipsec_inline_proto_all(&flags);
2489 }
2490 
2491 static int
2492 test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(void)
2493 {
2494 	struct ipsec_test_flags flags;
2495 
2496 	memset(&flags, 0, sizeof(flags));
2497 
2498 	flags.ipv6 = true;
2499 	flags.tunnel_ipv6 = true;
2500 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
2501 	flags.plaintext_len = plaintext_len;
2502 
2503 	return test_ipsec_inline_proto_all(&flags);
2504 }
2505 
2506 static int
2507 test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(void)
2508 {
2509 	struct ipsec_test_flags flags;
2510 
2511 	memset(&flags, 0, sizeof(flags));
2512 
2513 	flags.ipv6 = true;
2514 	flags.tunnel_ipv6 = true;
2515 	flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
2516 	flags.plaintext_len = plaintext_len;
2517 
2518 	return test_ipsec_inline_proto_all(&flags);
2519 }
2520 
2521 static int
2522 test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(void)
2523 {
2524 	struct ipsec_test_flags flags;
2525 
2526 	memset(&flags, 0, sizeof(flags));
2527 
2528 	flags.ipv6 = true;
2529 	flags.tunnel_ipv6 = true;
2530 	flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
2531 	flags.plaintext_len = plaintext_len;
2532 
2533 	return test_ipsec_inline_proto_all(&flags);
2534 }
2535 
2536 static int
2537 test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(void)
2538 {
2539 	struct ipsec_test_flags flags;
2540 
2541 	memset(&flags, 0, sizeof(flags));
2542 
2543 	flags.ipv6 = true;
2544 	flags.tunnel_ipv6 = true;
2545 	flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_0;
2546 
2547 	return test_ipsec_inline_proto_all(&flags);
2548 }
2549 
2550 static int
2551 test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(void)
2552 {
2553 	struct ipsec_test_flags flags;
2554 
2555 	memset(&flags, 0, sizeof(flags));
2556 
2557 	flags.ipv6 = true;
2558 	flags.tunnel_ipv6 = true;
2559 	flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_1;
2560 
2561 	return test_ipsec_inline_proto_all(&flags);
2562 }
2563 
2564 static int
2565 test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(void)
2566 {
2567 	struct ipsec_test_flags flags;
2568 
2569 	memset(&flags, 0, sizeof(flags));
2570 
2571 	flags.ipv6 = true;
2572 	flags.tunnel_ipv6 = true;
2573 	flags.flabel = TEST_IPSEC_SET_FLABEL_0_INNER_1;
2574 
2575 	return test_ipsec_inline_proto_all(&flags);
2576 }
2577 
2578 static int
2579 test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(void)
2580 {
2581 	struct ipsec_test_flags flags;
2582 
2583 	memset(&flags, 0, sizeof(flags));
2584 
2585 	flags.ipv6 = true;
2586 	flags.tunnel_ipv6 = true;
2587 	flags.flabel = TEST_IPSEC_SET_FLABEL_1_INNER_0;
2588 
2589 	return test_ipsec_inline_proto_all(&flags);
2590 }
2591 
2592 static int
2593 test_ipsec_inline_proto_ipv4_ttl_decrement(void)
2594 {
2595 	struct ipsec_test_flags flags = {
2596 		.dec_ttl_or_hop_limit = true,
2597 		.plaintext_len = plaintext_len,
2598 	};
2599 
2600 	return test_ipsec_inline_proto_all(&flags);
2601 }
2602 
2603 static int
2604 test_ipsec_inline_proto_ipv6_hop_limit_decrement(void)
2605 {
2606 	struct ipsec_test_flags flags = {
2607 		.ipv6 = true,
2608 		.dec_ttl_or_hop_limit = true,
2609 		.plaintext_len = plaintext_len,
2610 	};
2611 
2612 	return test_ipsec_inline_proto_all(&flags);
2613 }
2614 
2615 static int
2616 test_ipsec_inline_proto_iv_gen(void)
2617 {
2618 	struct ipsec_test_flags flags;
2619 
2620 	memset(&flags, 0, sizeof(flags));
2621 
2622 	flags.iv_gen = true;
2623 	flags.plaintext_len = plaintext_len;
2624 
2625 	return test_ipsec_inline_proto_all(&flags);
2626 }
2627 
2628 static int
2629 test_ipsec_inline_proto_sa_pkt_soft_expiry(void)
2630 {
2631 	struct ipsec_test_flags flags = {
2632 		.sa_expiry_pkts_soft = true,
2633 		.plaintext_len = plaintext_len,
2634 	};
2635 	return test_ipsec_inline_proto_all(&flags);
2636 }
2637 static int
2638 test_ipsec_inline_proto_sa_byte_soft_expiry(void)
2639 {
2640 	struct ipsec_test_flags flags = {
2641 		.sa_expiry_bytes_soft = true,
2642 		.plaintext_len = plaintext_len,
2643 	};
2644 	return test_ipsec_inline_proto_all(&flags);
2645 }
2646 
2647 static int
2648 test_ipsec_inline_proto_sa_pkt_hard_expiry(void)
2649 {
2650 	struct ipsec_test_flags flags = {
2651 		.sa_expiry_pkts_hard = true
2652 	};
2653 
2654 	return test_ipsec_inline_proto_all(&flags);
2655 }
2656 
2657 static int
2658 test_ipsec_inline_proto_sa_byte_hard_expiry(void)
2659 {
2660 	struct ipsec_test_flags flags = {
2661 		.sa_expiry_bytes_hard = true
2662 	};
2663 
2664 	return test_ipsec_inline_proto_all(&flags);
2665 }
2666 
2667 static int
2668 test_ipsec_inline_proto_known_vec_fragmented(const void *test_data)
2669 {
2670 	struct ipsec_test_data td_outb;
2671 	struct ipsec_test_flags flags;
2672 
2673 	memset(&flags, 0, sizeof(flags));
2674 	flags.fragment = true;
2675 	flags.plaintext_len = plaintext_len;
2676 
2677 	memcpy(&td_outb, test_data, sizeof(td_outb));
2678 
2679 	/* Disable IV gen to be able to test with known vectors */
2680 	td_outb.ipsec_xform.options.iv_gen_disable = 1;
2681 
2682 	return test_ipsec_inline_proto_process(&td_outb, NULL, 1, false,
2683 						&flags);
2684 }
2685 
2686 static int
2687 test_ipsec_inline_pkt_replay(const void *test_data, const uint64_t esn[],
2688 		      bool replayed_pkt[], uint32_t nb_pkts, bool esn_en,
2689 		      uint64_t winsz)
2690 {
2691 	struct ipsec_test_data td_outb[IPSEC_TEST_PACKETS_MAX];
2692 	struct ipsec_test_data td_inb[IPSEC_TEST_PACKETS_MAX];
2693 	struct ipsec_test_flags flags;
2694 	uint32_t i, ret = 0;
2695 
2696 	memset(&flags, 0, sizeof(flags));
2697 	flags.antireplay = true;
2698 	flags.plaintext_len = plaintext_len;
2699 
2700 	for (i = 0; i < nb_pkts; i++) {
2701 		memcpy(&td_outb[i], test_data, sizeof(td_outb[0]));
2702 		td_outb[i].ipsec_xform.options.iv_gen_disable = 1;
2703 		td_outb[i].ipsec_xform.replay_win_sz = winsz;
2704 		td_outb[i].ipsec_xform.options.esn = esn_en;
2705 	}
2706 
2707 	for (i = 0; i < nb_pkts; i++)
2708 		td_outb[i].ipsec_xform.esn.value = esn[i];
2709 
2710 	ret = test_ipsec_inline_proto_process_with_esn(td_outb, td_inb,
2711 				nb_pkts, true, &flags);
2712 	if (ret != TEST_SUCCESS)
2713 		return ret;
2714 
2715 	test_ipsec_td_update(td_inb, td_outb, nb_pkts, &flags);
2716 
2717 	for (i = 0; i < nb_pkts; i++) {
2718 		td_inb[i].ipsec_xform.options.esn = esn_en;
2719 		/* Set antireplay flag for packets to be dropped */
2720 		td_inb[i].ar_packet = replayed_pkt[i];
2721 	}
2722 
2723 	ret = test_ipsec_inline_proto_process_with_esn(td_inb, NULL, nb_pkts,
2724 				true, &flags);
2725 
2726 	return ret;
2727 }
2728 
2729 static int
2730 test_ipsec_inline_proto_pkt_antireplay(const void *test_data, uint64_t winsz)
2731 {
2732 
2733 	uint32_t nb_pkts = 5;
2734 	bool replayed_pkt[5];
2735 	uint64_t esn[5];
2736 
2737 	/* 1. Advance the TOP of the window to WS * 2 */
2738 	esn[0] = winsz * 2;
2739 	/* 2. Test sequence number within the new window(WS + 1) */
2740 	esn[1] = winsz + 1;
2741 	/* 3. Test sequence number less than the window BOTTOM */
2742 	esn[2] = winsz;
2743 	/* 4. Test sequence number in the middle of the window */
2744 	esn[3] = winsz + (winsz / 2);
2745 	/* 5. Test replay of the packet in the middle of the window */
2746 	esn[4] = winsz + (winsz / 2);
2747 
2748 	replayed_pkt[0] = false;
2749 	replayed_pkt[1] = false;
2750 	replayed_pkt[2] = true;
2751 	replayed_pkt[3] = false;
2752 	replayed_pkt[4] = true;
2753 
2754 	return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt,
2755 			nb_pkts, false, winsz);
2756 }
2757 
2758 static int
2759 test_ipsec_inline_proto_pkt_antireplay1024(const void *test_data)
2760 {
2761 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 1024);
2762 }
2763 
2764 static int
2765 test_ipsec_inline_proto_pkt_antireplay2048(const void *test_data)
2766 {
2767 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 2048);
2768 }
2769 
2770 static int
2771 test_ipsec_inline_proto_pkt_antireplay4096(const void *test_data)
2772 {
2773 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 4096);
2774 }
2775 
2776 static int
2777 test_ipsec_inline_proto_pkt_esn_antireplay(const void *test_data, uint64_t winsz)
2778 {
2779 
2780 	uint32_t nb_pkts = 7;
2781 	bool replayed_pkt[7];
2782 	uint64_t esn[7];
2783 
2784 	/* Set the initial sequence number */
2785 	esn[0] = (uint64_t)(0xFFFFFFFF - winsz);
2786 	/* 1. Advance the TOP of the window to (1<<32 + WS/2) */
2787 	esn[1] = (uint64_t)((1ULL << 32) + (winsz / 2));
2788 	/* 2. Test sequence number within new window (1<<32 + WS/2 + 1) */
2789 	esn[2] = (uint64_t)((1ULL << 32) - (winsz / 2) + 1);
2790 	/* 3. Test with sequence number within window (1<<32 - 1) */
2791 	esn[3] = (uint64_t)((1ULL << 32) - 1);
2792 	/* 4. Test with sequence number within window (1<<32 - 1) */
2793 	esn[4] = (uint64_t)(1ULL << 32);
2794 	/* 5. Test with duplicate sequence number within
2795 	 * new window (1<<32 - 1)
2796 	 */
2797 	esn[5] = (uint64_t)((1ULL << 32) - 1);
2798 	/* 6. Test with duplicate sequence number within new window (1<<32) */
2799 	esn[6] = (uint64_t)(1ULL << 32);
2800 
2801 	replayed_pkt[0] = false;
2802 	replayed_pkt[1] = false;
2803 	replayed_pkt[2] = false;
2804 	replayed_pkt[3] = false;
2805 	replayed_pkt[4] = false;
2806 	replayed_pkt[5] = true;
2807 	replayed_pkt[6] = true;
2808 
2809 	return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt, nb_pkts,
2810 				     true, winsz);
2811 }
2812 
2813 static int
2814 test_ipsec_inline_proto_pkt_esn_antireplay1024(const void *test_data)
2815 {
2816 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 1024);
2817 }
2818 
2819 static int
2820 test_ipsec_inline_proto_pkt_esn_antireplay2048(const void *test_data)
2821 {
2822 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 2048);
2823 }
2824 
2825 static int
2826 test_ipsec_inline_proto_pkt_esn_antireplay4096(const void *test_data)
2827 {
2828 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 4096);
2829 }
2830 
2831 static struct unit_test_suite inline_ipsec_testsuite  = {
2832 	.suite_name = "Inline IPsec Ethernet Device Unit Test Suite",
2833 	.unit_test_cases = {
2834 		TEST_CASE_NAMED_WITH_DATA(
2835 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
2836 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2837 			test_ipsec_inline_proto_known_vec, &pkt_aes_128_gcm),
2838 		TEST_CASE_NAMED_WITH_DATA(
2839 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
2840 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2841 			test_ipsec_inline_proto_known_vec, &pkt_aes_192_gcm),
2842 		TEST_CASE_NAMED_WITH_DATA(
2843 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
2844 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2845 			test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm),
2846 		TEST_CASE_NAMED_WITH_DATA(
2847 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC MD5 [12B ICV])",
2848 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2849 			test_ipsec_inline_proto_known_vec,
2850 			&pkt_aes_128_cbc_md5),
2851 		TEST_CASE_NAMED_WITH_DATA(
2852 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2853 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2854 			test_ipsec_inline_proto_known_vec,
2855 			&pkt_aes_128_cbc_hmac_sha256),
2856 		TEST_CASE_NAMED_WITH_DATA(
2857 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
2858 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2859 			test_ipsec_inline_proto_known_vec,
2860 			&pkt_aes_128_cbc_hmac_sha384),
2861 		TEST_CASE_NAMED_WITH_DATA(
2862 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
2863 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2864 			test_ipsec_inline_proto_known_vec,
2865 			&pkt_aes_128_cbc_hmac_sha512),
2866 		TEST_CASE_NAMED_WITH_DATA(
2867 			"Outbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA256 [16B ICV])",
2868 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2869 			test_ipsec_inline_proto_known_vec,
2870 			&pkt_3des_cbc_hmac_sha256),
2871 		TEST_CASE_NAMED_WITH_DATA(
2872 			"Outbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA384 [24B ICV])",
2873 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2874 			test_ipsec_inline_proto_known_vec,
2875 			&pkt_3des_cbc_hmac_sha384),
2876 		TEST_CASE_NAMED_WITH_DATA(
2877 			"Outbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA512 [32B ICV])",
2878 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2879 			test_ipsec_inline_proto_known_vec,
2880 			&pkt_3des_cbc_hmac_sha512),
2881 		TEST_CASE_NAMED_WITH_DATA(
2882 			"Outbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
2883 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2884 			test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm_v6),
2885 		TEST_CASE_NAMED_WITH_DATA(
2886 			"Outbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2887 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2888 			test_ipsec_inline_proto_known_vec,
2889 			&pkt_aes_128_cbc_hmac_sha256_v6),
2890 		TEST_CASE_NAMED_WITH_DATA(
2891 			"Outbound known vector (ESP tunnel mode IPv6 3DES-CBC HMAC-SHA256 [16B ICV])",
2892 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2893 			test_ipsec_inline_proto_known_vec,
2894 			&pkt_3des_cbc_hmac_sha256_v6),
2895 		TEST_CASE_NAMED_WITH_DATA(
2896 			"Outbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
2897 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2898 			test_ipsec_inline_proto_known_vec,
2899 			&pkt_null_aes_xcbc),
2900 		TEST_CASE_NAMED_WITH_DATA(
2901 			"Outbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA256 [16B ICV])",
2902 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2903 			test_ipsec_inline_proto_known_vec,
2904 			&pkt_des_cbc_hmac_sha256),
2905 		TEST_CASE_NAMED_WITH_DATA(
2906 			"Outbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA384 [24B ICV])",
2907 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2908 			test_ipsec_inline_proto_known_vec,
2909 			&pkt_des_cbc_hmac_sha384),
2910 		TEST_CASE_NAMED_WITH_DATA(
2911 			"Outbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA512 [32B ICV])",
2912 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2913 			test_ipsec_inline_proto_known_vec,
2914 			&pkt_des_cbc_hmac_sha512),
2915 		TEST_CASE_NAMED_WITH_DATA(
2916 			"Outbound known vector (ESP tunnel mode IPv6 DES-CBC HMAC-SHA256 [16B ICV])",
2917 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2918 			test_ipsec_inline_proto_known_vec,
2919 			&pkt_des_cbc_hmac_sha256_v6),
2920 
2921 		TEST_CASE_NAMED_WITH_DATA(
2922 			"Outbound fragmented packet",
2923 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2924 			test_ipsec_inline_proto_known_vec_fragmented,
2925 			&pkt_aes_128_gcm_frag),
2926 
2927 		TEST_CASE_NAMED_WITH_DATA(
2928 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
2929 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2930 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_gcm),
2931 		TEST_CASE_NAMED_WITH_DATA(
2932 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
2933 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2934 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_192_gcm),
2935 		TEST_CASE_NAMED_WITH_DATA(
2936 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
2937 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2938 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm),
2939 		TEST_CASE_NAMED_WITH_DATA(
2940 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128)",
2941 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2942 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_cbc_null),
2943 		TEST_CASE_NAMED_WITH_DATA(
2944 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC MD5 [12B ICV])",
2945 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2946 			test_ipsec_inline_proto_known_vec_inb,
2947 			&pkt_aes_128_cbc_md5),
2948 		TEST_CASE_NAMED_WITH_DATA(
2949 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2950 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2951 			test_ipsec_inline_proto_known_vec_inb,
2952 			&pkt_aes_128_cbc_hmac_sha256),
2953 		TEST_CASE_NAMED_WITH_DATA(
2954 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
2955 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2956 			test_ipsec_inline_proto_known_vec_inb,
2957 			&pkt_aes_128_cbc_hmac_sha384),
2958 		TEST_CASE_NAMED_WITH_DATA(
2959 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
2960 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2961 			test_ipsec_inline_proto_known_vec_inb,
2962 			&pkt_aes_128_cbc_hmac_sha512),
2963 		TEST_CASE_NAMED_WITH_DATA(
2964 			"Inbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA256 [16B ICV])",
2965 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2966 			test_ipsec_inline_proto_known_vec_inb,
2967 			&pkt_3des_cbc_hmac_sha256),
2968 		TEST_CASE_NAMED_WITH_DATA(
2969 			"Inbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA384 [24B ICV])",
2970 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2971 			test_ipsec_inline_proto_known_vec_inb,
2972 			&pkt_3des_cbc_hmac_sha384),
2973 		TEST_CASE_NAMED_WITH_DATA(
2974 			"Inbound known vector (ESP tunnel mode IPv4 3DES-CBC HMAC-SHA512 [32B ICV])",
2975 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2976 			test_ipsec_inline_proto_known_vec_inb,
2977 			&pkt_3des_cbc_hmac_sha512),
2978 		TEST_CASE_NAMED_WITH_DATA(
2979 			"Inbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
2980 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2981 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm_v6),
2982 		TEST_CASE_NAMED_WITH_DATA(
2983 			"Inbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2984 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2985 			test_ipsec_inline_proto_known_vec_inb,
2986 			&pkt_aes_128_cbc_hmac_sha256_v6),
2987 		TEST_CASE_NAMED_WITH_DATA(
2988 			"Inbound known vector (ESP tunnel mode IPv6 3DES-CBC HMAC-SHA256 [16B ICV])",
2989 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2990 			test_ipsec_inline_proto_known_vec_inb,
2991 			&pkt_3des_cbc_hmac_sha256_v6),
2992 		TEST_CASE_NAMED_WITH_DATA(
2993 			"Inbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
2994 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2995 			test_ipsec_inline_proto_known_vec_inb,
2996 			&pkt_null_aes_xcbc),
2997 		TEST_CASE_NAMED_WITH_DATA(
2998 			"Inbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA256 [16B ICV])",
2999 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3000 			test_ipsec_inline_proto_known_vec_inb,
3001 			&pkt_des_cbc_hmac_sha256),
3002 		TEST_CASE_NAMED_WITH_DATA(
3003 			"Inbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA384 [24B ICV])",
3004 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3005 			test_ipsec_inline_proto_known_vec_inb,
3006 			&pkt_des_cbc_hmac_sha384),
3007 		TEST_CASE_NAMED_WITH_DATA(
3008 			"Inbound known vector (ESP tunnel mode IPv4 DES-CBC HMAC-SHA512 [32B ICV])",
3009 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3010 			test_ipsec_inline_proto_known_vec_inb,
3011 			&pkt_des_cbc_hmac_sha512),
3012 		TEST_CASE_NAMED_WITH_DATA(
3013 			"Inbound known vector (ESP tunnel mode IPv6 DES-CBC HMAC-SHA256 [16B ICV])",
3014 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3015 			test_ipsec_inline_proto_known_vec_inb,
3016 			&pkt_des_cbc_hmac_sha256_v6),
3017 
3018 
3019 		TEST_CASE_NAMED_ST(
3020 			"Combined test alg list",
3021 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3022 			test_ipsec_inline_proto_display_list),
3023 
3024 		TEST_CASE_NAMED_ST(
3025 			"UDP encapsulation",
3026 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3027 			test_ipsec_inline_proto_udp_encap),
3028 		TEST_CASE_NAMED_ST(
3029 			"UDP encapsulation ports verification test",
3030 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3031 			test_ipsec_inline_proto_udp_ports_verify),
3032 		TEST_CASE_NAMED_ST(
3033 			"Negative test: ICV corruption",
3034 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3035 			test_ipsec_inline_proto_err_icv_corrupt),
3036 		TEST_CASE_NAMED_ST(
3037 			"Tunnel dst addr verification",
3038 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3039 			test_ipsec_inline_proto_tunnel_dst_addr_verify),
3040 		TEST_CASE_NAMED_ST(
3041 			"Tunnel src and dst addr verification",
3042 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3043 			test_ipsec_inline_proto_tunnel_src_dst_addr_verify),
3044 		TEST_CASE_NAMED_ST(
3045 			"Inner IP checksum",
3046 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3047 			test_ipsec_inline_proto_inner_ip_csum),
3048 		TEST_CASE_NAMED_ST(
3049 			"Inner L4 checksum",
3050 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3051 			test_ipsec_inline_proto_inner_l4_csum),
3052 		TEST_CASE_NAMED_ST(
3053 			"Tunnel IPv4 in IPv4",
3054 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3055 			test_ipsec_inline_proto_tunnel_v4_in_v4),
3056 		TEST_CASE_NAMED_ST(
3057 			"Tunnel IPv6 in IPv6",
3058 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3059 			test_ipsec_inline_proto_tunnel_v6_in_v6),
3060 		TEST_CASE_NAMED_ST(
3061 			"Tunnel IPv4 in IPv6",
3062 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3063 			test_ipsec_inline_proto_tunnel_v4_in_v6),
3064 		TEST_CASE_NAMED_ST(
3065 			"Tunnel IPv6 in IPv4",
3066 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3067 			test_ipsec_inline_proto_tunnel_v6_in_v4),
3068 		TEST_CASE_NAMED_ST(
3069 			"Transport IPv4",
3070 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3071 			test_ipsec_inline_proto_transport_v4),
3072 		TEST_CASE_NAMED_ST(
3073 			"Transport l4 checksum",
3074 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3075 			test_ipsec_inline_proto_transport_l4_csum),
3076 		TEST_CASE_NAMED_ST(
3077 			"Statistics: success",
3078 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3079 			test_ipsec_inline_proto_stats),
3080 		TEST_CASE_NAMED_ST(
3081 			"Fragmented packet",
3082 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3083 			test_ipsec_inline_proto_pkt_fragment),
3084 		TEST_CASE_NAMED_ST(
3085 			"Tunnel header copy DF (inner 0)",
3086 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3087 			test_ipsec_inline_proto_copy_df_inner_0),
3088 		TEST_CASE_NAMED_ST(
3089 			"Tunnel header copy DF (inner 1)",
3090 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3091 			test_ipsec_inline_proto_copy_df_inner_1),
3092 		TEST_CASE_NAMED_ST(
3093 			"Tunnel header set DF 0 (inner 1)",
3094 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3095 			test_ipsec_inline_proto_set_df_0_inner_1),
3096 		TEST_CASE_NAMED_ST(
3097 			"Tunnel header set DF 1 (inner 0)",
3098 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3099 			test_ipsec_inline_proto_set_df_1_inner_0),
3100 		TEST_CASE_NAMED_ST(
3101 			"Tunnel header IPv4 copy DSCP (inner 0)",
3102 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3103 			test_ipsec_inline_proto_ipv4_copy_dscp_inner_0),
3104 		TEST_CASE_NAMED_ST(
3105 			"Tunnel header IPv4 copy DSCP (inner 1)",
3106 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3107 			test_ipsec_inline_proto_ipv4_copy_dscp_inner_1),
3108 		TEST_CASE_NAMED_ST(
3109 			"Tunnel header IPv4 set DSCP 0 (inner 1)",
3110 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3111 			test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1),
3112 		TEST_CASE_NAMED_ST(
3113 			"Tunnel header IPv4 set DSCP 1 (inner 0)",
3114 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3115 			test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0),
3116 		TEST_CASE_NAMED_ST(
3117 			"Tunnel header IPv6 copy DSCP (inner 0)",
3118 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3119 			test_ipsec_inline_proto_ipv6_copy_dscp_inner_0),
3120 		TEST_CASE_NAMED_ST(
3121 			"Tunnel header IPv6 copy DSCP (inner 1)",
3122 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3123 			test_ipsec_inline_proto_ipv6_copy_dscp_inner_1),
3124 		TEST_CASE_NAMED_ST(
3125 			"Tunnel header IPv6 set DSCP 0 (inner 1)",
3126 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3127 			test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1),
3128 		TEST_CASE_NAMED_ST(
3129 			"Tunnel header IPv6 set DSCP 1 (inner 0)",
3130 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3131 			test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0),
3132 		TEST_CASE_NAMED_ST(
3133 			"Tunnel header IPv6 copy FLABEL (inner 0)",
3134 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3135 			test_ipsec_inline_proto_ipv6_copy_flabel_inner_0),
3136 		TEST_CASE_NAMED_ST(
3137 			"Tunnel header IPv6 copy FLABEL (inner 1)",
3138 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3139 			test_ipsec_inline_proto_ipv6_copy_flabel_inner_1),
3140 		TEST_CASE_NAMED_ST(
3141 			"Tunnel header IPv6 set FLABEL 0 (inner 1)",
3142 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3143 			test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1),
3144 		TEST_CASE_NAMED_ST(
3145 			"Tunnel header IPv6 set FLABEL 1 (inner 0)",
3146 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3147 			test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0),
3148 		TEST_CASE_NAMED_ST(
3149 			"Tunnel header IPv4 decrement inner TTL",
3150 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3151 			test_ipsec_inline_proto_ipv4_ttl_decrement),
3152 		TEST_CASE_NAMED_ST(
3153 			"Tunnel header IPv6 decrement inner hop limit",
3154 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3155 			test_ipsec_inline_proto_ipv6_hop_limit_decrement),
3156 		TEST_CASE_NAMED_ST(
3157 			"IV generation",
3158 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3159 			test_ipsec_inline_proto_iv_gen),
3160 		TEST_CASE_NAMED_ST(
3161 			"SA soft expiry with packet limit",
3162 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3163 			test_ipsec_inline_proto_sa_pkt_soft_expiry),
3164 		TEST_CASE_NAMED_ST(
3165 			"SA soft expiry with byte limit",
3166 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3167 			test_ipsec_inline_proto_sa_byte_soft_expiry),
3168 		TEST_CASE_NAMED_ST(
3169 			"SA hard expiry with packet limit",
3170 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3171 			test_ipsec_inline_proto_sa_pkt_hard_expiry),
3172 		TEST_CASE_NAMED_ST(
3173 			"SA hard expiry with byte limit",
3174 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3175 			test_ipsec_inline_proto_sa_byte_hard_expiry),
3176 
3177 		TEST_CASE_NAMED_WITH_DATA(
3178 			"Antireplay with window size 1024",
3179 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3180 			test_ipsec_inline_proto_pkt_antireplay1024,
3181 			&pkt_aes_128_gcm),
3182 		TEST_CASE_NAMED_WITH_DATA(
3183 			"Antireplay with window size 2048",
3184 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3185 			test_ipsec_inline_proto_pkt_antireplay2048,
3186 			&pkt_aes_128_gcm),
3187 		TEST_CASE_NAMED_WITH_DATA(
3188 			"Antireplay with window size 4096",
3189 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3190 			test_ipsec_inline_proto_pkt_antireplay4096,
3191 			&pkt_aes_128_gcm),
3192 		TEST_CASE_NAMED_WITH_DATA(
3193 			"ESN and Antireplay with window size 1024",
3194 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3195 			test_ipsec_inline_proto_pkt_esn_antireplay1024,
3196 			&pkt_aes_128_gcm),
3197 		TEST_CASE_NAMED_WITH_DATA(
3198 			"ESN and Antireplay with window size 2048",
3199 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3200 			test_ipsec_inline_proto_pkt_esn_antireplay2048,
3201 			&pkt_aes_128_gcm),
3202 		TEST_CASE_NAMED_WITH_DATA(
3203 			"ESN and Antireplay with window size 4096",
3204 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3205 			test_ipsec_inline_proto_pkt_esn_antireplay4096,
3206 			&pkt_aes_128_gcm),
3207 
3208 		TEST_CASE_NAMED_WITH_DATA(
3209 			"IPv4 Reassembly with 2 fragments",
3210 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3211 			test_inline_ip_reassembly, &ipv4_2frag_vector),
3212 		TEST_CASE_NAMED_WITH_DATA(
3213 			"IPv6 Reassembly with 2 fragments",
3214 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3215 			test_inline_ip_reassembly, &ipv6_2frag_vector),
3216 		TEST_CASE_NAMED_WITH_DATA(
3217 			"IPv4 Reassembly with 4 fragments",
3218 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3219 			test_inline_ip_reassembly, &ipv4_4frag_vector),
3220 		TEST_CASE_NAMED_WITH_DATA(
3221 			"IPv6 Reassembly with 4 fragments",
3222 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3223 			test_inline_ip_reassembly, &ipv6_4frag_vector),
3224 		TEST_CASE_NAMED_WITH_DATA(
3225 			"IPv4 Reassembly with 5 fragments",
3226 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3227 			test_inline_ip_reassembly, &ipv4_5frag_vector),
3228 		TEST_CASE_NAMED_WITH_DATA(
3229 			"IPv6 Reassembly with 5 fragments",
3230 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3231 			test_inline_ip_reassembly, &ipv6_5frag_vector),
3232 		TEST_CASE_NAMED_WITH_DATA(
3233 			"IPv4 Reassembly with incomplete fragments",
3234 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3235 			test_inline_ip_reassembly, &ipv4_incomplete_vector),
3236 		TEST_CASE_NAMED_WITH_DATA(
3237 			"IPv4 Reassembly with overlapping fragments",
3238 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3239 			test_inline_ip_reassembly, &ipv4_overlap_vector),
3240 		TEST_CASE_NAMED_WITH_DATA(
3241 			"IPv4 Reassembly with out of order fragments",
3242 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3243 			test_inline_ip_reassembly, &ipv4_out_of_order_vector),
3244 		TEST_CASE_NAMED_WITH_DATA(
3245 			"IPv4 Reassembly with burst of 4 fragments",
3246 			ut_setup_inline_ipsec_reassembly, ut_teardown_inline_ipsec_reassembly,
3247 			test_inline_ip_reassembly, &ipv4_4frag_burst_vector),
3248 		TEST_CASE_NAMED_WITH_DATA(
3249 			"Inbound Out-Of-Place processing",
3250 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
3251 			test_ipsec_inline_proto_oop_inb,
3252 			&pkt_aes_128_gcm),
3253 
3254 		TEST_CASES_END() /**< NULL terminate unit test array */
3255 	},
3256 };
3257 
3258 
3259 static int
3260 test_inline_ipsec(void)
3261 {
3262 	inline_ipsec_testsuite.setup = inline_ipsec_testsuite_setup;
3263 	inline_ipsec_testsuite.teardown = inline_ipsec_testsuite_teardown;
3264 	return unit_test_suite_runner(&inline_ipsec_testsuite);
3265 }
3266 
3267 
3268 static int
3269 test_inline_ipsec_sg(void)
3270 {
3271 	int rc;
3272 
3273 	inline_ipsec_testsuite.setup = inline_ipsec_testsuite_setup;
3274 	inline_ipsec_testsuite.teardown = inline_ipsec_testsuite_teardown;
3275 
3276 	sg_mode = true;
3277 	/* Run the tests */
3278 	rc = unit_test_suite_runner(&inline_ipsec_testsuite);
3279 	sg_mode = false;
3280 
3281 	port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_SCATTER;
3282 	port_conf.txmode.offloads &= ~RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
3283 	return rc;
3284 }
3285 
3286 static int
3287 test_event_inline_ipsec(void)
3288 {
3289 	inline_ipsec_testsuite.setup = event_inline_ipsec_testsuite_setup;
3290 	inline_ipsec_testsuite.teardown = event_inline_ipsec_testsuite_teardown;
3291 	return unit_test_suite_runner(&inline_ipsec_testsuite);
3292 }
3293 
3294 #endif /* !RTE_EXEC_ENV_WINDOWS */
3295 
3296 REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec);
3297 REGISTER_TEST_COMMAND(inline_ipsec_sg_autotest, test_inline_ipsec_sg);
3298 REGISTER_TEST_COMMAND(event_inline_ipsec_autotest, test_event_inline_ipsec);
3299