xref: /dpdk/app/test/test_security_inline_proto.c (revision 72206323a5dd3182b13f61b25a64abdddfee595c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Marvell.
3  */
4 
5 
6 #include <stdio.h>
7 #include <inttypes.h>
8 
9 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
11 #include <rte_security.h>
12 
13 #include "test.h"
14 #include "test_security_inline_proto_vectors.h"
15 
16 #ifdef RTE_EXEC_ENV_WINDOWS
17 static int
18 test_inline_ipsec(void)
19 {
20 	printf("Inline ipsec not supported on Windows, skipping test\n");
21 	return TEST_SKIPPED;
22 }
23 
24 static int
25 test_event_inline_ipsec(void)
26 {
27 	printf("Event inline ipsec not supported on Windows, skipping test\n");
28 	return TEST_SKIPPED;
29 }
30 
31 #else
32 
33 #include <rte_eventdev.h>
34 #include <rte_event_eth_rx_adapter.h>
35 #include <rte_event_eth_tx_adapter.h>
36 
37 #define NB_ETHPORTS_USED		1
38 #define MEMPOOL_CACHE_SIZE		32
39 #define MAX_PKT_BURST			32
40 #define RTE_TEST_RX_DESC_DEFAULT	1024
41 #define RTE_TEST_TX_DESC_DEFAULT	1024
42 #define RTE_PORT_ALL		(~(uint16_t)0x0)
43 
44 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
45 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
46 #define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */
47 
48 #define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */
49 #define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
50 #define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
51 
52 #define MAX_TRAFFIC_BURST		2048
53 #define NB_MBUF				10240
54 
55 #define ENCAP_DECAP_BURST_SZ		33
56 #define APP_REASS_TIMEOUT		10
57 
58 extern struct ipsec_test_data pkt_aes_128_gcm;
59 extern struct ipsec_test_data pkt_aes_192_gcm;
60 extern struct ipsec_test_data pkt_aes_256_gcm;
61 extern struct ipsec_test_data pkt_aes_128_gcm_frag;
62 extern struct ipsec_test_data pkt_aes_128_cbc_null;
63 extern struct ipsec_test_data pkt_null_aes_xcbc;
64 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha384;
65 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha512;
66 
67 static struct rte_mempool *mbufpool;
68 static struct rte_mempool *sess_pool;
69 static struct rte_mempool *sess_priv_pool;
70 /* ethernet addresses of ports */
71 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
72 
73 static struct rte_eth_conf port_conf = {
74 	.rxmode = {
75 		.mq_mode = RTE_ETH_MQ_RX_NONE,
76 		.split_hdr_size = 0,
77 		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM |
78 			    RTE_ETH_RX_OFFLOAD_SECURITY,
79 	},
80 	.txmode = {
81 		.mq_mode = RTE_ETH_MQ_TX_NONE,
82 		.offloads = RTE_ETH_TX_OFFLOAD_SECURITY |
83 			    RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
84 	},
85 	.lpbk_mode = 1,  /* enable loopback */
86 };
87 
88 static struct rte_eth_rxconf rx_conf = {
89 	.rx_thresh = {
90 		.pthresh = RX_PTHRESH,
91 		.hthresh = RX_HTHRESH,
92 		.wthresh = RX_WTHRESH,
93 	},
94 	.rx_free_thresh = 32,
95 };
96 
97 static struct rte_eth_txconf tx_conf = {
98 	.tx_thresh = {
99 		.pthresh = TX_PTHRESH,
100 		.hthresh = TX_HTHRESH,
101 		.wthresh = TX_WTHRESH,
102 	},
103 	.tx_free_thresh = 32, /* Use PMD default values */
104 	.tx_rs_thresh = 32, /* Use PMD default values */
105 };
106 
107 static uint16_t port_id;
108 static uint8_t eventdev_id;
109 static uint8_t rx_adapter_id;
110 static uint8_t tx_adapter_id;
111 
112 static bool event_mode_enabled;
113 
114 static uint64_t link_mbps;
115 
116 static int ip_reassembly_dynfield_offset = -1;
117 
118 static struct rte_flow *default_flow[RTE_MAX_ETHPORTS];
119 
120 /* Create Inline IPsec session */
121 static int
122 create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid,
123 		struct rte_security_session **sess, struct rte_security_ctx **ctx,
124 		uint32_t *ol_flags, const struct ipsec_test_flags *flags,
125 		struct rte_security_session_conf *sess_conf)
126 {
127 	uint16_t src_v6[8] = {0x2607, 0xf8b0, 0x400c, 0x0c03, 0x0000, 0x0000,
128 				0x0000, 0x001a};
129 	uint16_t dst_v6[8] = {0x2001, 0x0470, 0xe5bf, 0xdead, 0x4957, 0x2174,
130 				0xe82c, 0x4887};
131 	uint32_t src_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 2));
132 	uint32_t dst_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 1));
133 	struct rte_security_capability_idx sec_cap_idx;
134 	const struct rte_security_capability *sec_cap;
135 	enum rte_security_ipsec_sa_direction dir;
136 	struct rte_security_ctx *sec_ctx;
137 	uint32_t verify;
138 
139 	sess_conf->action_type = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
140 	sess_conf->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
141 	sess_conf->ipsec = sa->ipsec_xform;
142 
143 	dir = sa->ipsec_xform.direction;
144 	verify = flags->tunnel_hdr_verify;
145 
146 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && verify) {
147 		if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR)
148 			src_v4 += 1;
149 		else if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR)
150 			dst_v4 += 1;
151 	}
152 
153 	if (sa->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
154 		if (sa->ipsec_xform.tunnel.type ==
155 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
156 			memcpy(&sess_conf->ipsec.tunnel.ipv4.src_ip, &src_v4,
157 					sizeof(src_v4));
158 			memcpy(&sess_conf->ipsec.tunnel.ipv4.dst_ip, &dst_v4,
159 					sizeof(dst_v4));
160 
161 			if (flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
162 				sess_conf->ipsec.tunnel.ipv4.df = 0;
163 
164 			if (flags->df == TEST_IPSEC_SET_DF_1_INNER_0)
165 				sess_conf->ipsec.tunnel.ipv4.df = 1;
166 
167 			if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
168 				sess_conf->ipsec.tunnel.ipv4.dscp = 0;
169 
170 			if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
171 				sess_conf->ipsec.tunnel.ipv4.dscp =
172 						TEST_IPSEC_DSCP_VAL;
173 		} else {
174 			if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
175 				sess_conf->ipsec.tunnel.ipv6.dscp = 0;
176 
177 			if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
178 				sess_conf->ipsec.tunnel.ipv6.dscp =
179 						TEST_IPSEC_DSCP_VAL;
180 
181 			if (flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
182 				sess_conf->ipsec.tunnel.ipv6.flabel = 0;
183 
184 			if (flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0)
185 				sess_conf->ipsec.tunnel.ipv6.flabel =
186 						TEST_IPSEC_FLABEL_VAL;
187 
188 			memcpy(&sess_conf->ipsec.tunnel.ipv6.src_addr, &src_v6,
189 					sizeof(src_v6));
190 			memcpy(&sess_conf->ipsec.tunnel.ipv6.dst_addr, &dst_v6,
191 					sizeof(dst_v6));
192 		}
193 	}
194 
195 	/* Save SA as userdata for the security session. When
196 	 * the packet is received, this userdata will be
197 	 * retrieved using the metadata from the packet.
198 	 *
199 	 * The PMD is expected to set similar metadata for other
200 	 * operations, like rte_eth_event, which are tied to
201 	 * security session. In such cases, the userdata could
202 	 * be obtained to uniquely identify the security
203 	 * parameters denoted.
204 	 */
205 
206 	sess_conf->userdata = (void *) sa;
207 
208 	sec_ctx = (struct rte_security_ctx *)rte_eth_dev_get_sec_ctx(portid);
209 	if (sec_ctx == NULL) {
210 		printf("Ethernet device doesn't support security features.\n");
211 		return TEST_SKIPPED;
212 	}
213 
214 	sec_cap_idx.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
215 	sec_cap_idx.protocol = RTE_SECURITY_PROTOCOL_IPSEC;
216 	sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto;
217 	sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode;
218 	sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction;
219 	sec_cap = rte_security_capability_get(sec_ctx, &sec_cap_idx);
220 	if (sec_cap == NULL) {
221 		printf("No capabilities registered\n");
222 		return TEST_SKIPPED;
223 	}
224 
225 	if (sa->aead || sa->aes_gmac)
226 		memcpy(&sess_conf->ipsec.salt, sa->salt.data,
227 			RTE_MIN(sizeof(sess_conf->ipsec.salt), sa->salt.len));
228 
229 	/* Copy cipher session parameters */
230 	if (sa->aead) {
231 		rte_memcpy(sess_conf->crypto_xform, &sa->xform.aead,
232 				sizeof(struct rte_crypto_sym_xform));
233 		sess_conf->crypto_xform->aead.key.data = sa->key.data;
234 		/* Verify crypto capabilities */
235 		if (test_ipsec_crypto_caps_aead_verify(sec_cap,
236 					sess_conf->crypto_xform) != 0) {
237 			RTE_LOG(INFO, USER1,
238 				"Crypto capabilities not supported\n");
239 			return TEST_SKIPPED;
240 		}
241 	} else {
242 		if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
243 			rte_memcpy(&sess_conf->crypto_xform->cipher,
244 					&sa->xform.chain.cipher.cipher,
245 					sizeof(struct rte_crypto_cipher_xform));
246 
247 			rte_memcpy(&sess_conf->crypto_xform->next->auth,
248 					&sa->xform.chain.auth.auth,
249 					sizeof(struct rte_crypto_auth_xform));
250 			sess_conf->crypto_xform->cipher.key.data =
251 							sa->key.data;
252 			sess_conf->crypto_xform->next->auth.key.data =
253 							sa->auth_key.data;
254 			/* Verify crypto capabilities */
255 			if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
256 					sess_conf->crypto_xform) != 0) {
257 				RTE_LOG(INFO, USER1,
258 					"Cipher crypto capabilities not supported\n");
259 				return TEST_SKIPPED;
260 			}
261 
262 			if (test_ipsec_crypto_caps_auth_verify(sec_cap,
263 					sess_conf->crypto_xform->next) != 0) {
264 				RTE_LOG(INFO, USER1,
265 					"Auth crypto capabilities not supported\n");
266 				return TEST_SKIPPED;
267 			}
268 		} else {
269 			rte_memcpy(&sess_conf->crypto_xform->next->cipher,
270 					&sa->xform.chain.cipher.cipher,
271 					sizeof(struct rte_crypto_cipher_xform));
272 			rte_memcpy(&sess_conf->crypto_xform->auth,
273 					&sa->xform.chain.auth.auth,
274 					sizeof(struct rte_crypto_auth_xform));
275 			sess_conf->crypto_xform->auth.key.data =
276 							sa->auth_key.data;
277 			sess_conf->crypto_xform->next->cipher.key.data =
278 							sa->key.data;
279 
280 			/* Verify crypto capabilities */
281 			if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
282 					sess_conf->crypto_xform->next) != 0) {
283 				RTE_LOG(INFO, USER1,
284 					"Cipher crypto capabilities not supported\n");
285 				return TEST_SKIPPED;
286 			}
287 
288 			if (test_ipsec_crypto_caps_auth_verify(sec_cap,
289 					sess_conf->crypto_xform) != 0) {
290 				RTE_LOG(INFO, USER1,
291 					"Auth crypto capabilities not supported\n");
292 				return TEST_SKIPPED;
293 			}
294 		}
295 	}
296 
297 	if (test_ipsec_sec_caps_verify(&sess_conf->ipsec, sec_cap, false) != 0)
298 		return TEST_SKIPPED;
299 
300 	if ((sa->ipsec_xform.direction ==
301 			RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
302 			(sa->ipsec_xform.options.iv_gen_disable == 1)) {
303 		/* Set env variable when IV generation is disabled */
304 		char arr[128];
305 		int len = 0, j = 0;
306 		int iv_len = (sa->aead || sa->aes_gmac) ? 8 : 16;
307 
308 		for (; j < iv_len; j++)
309 			len += snprintf(arr+len, sizeof(arr) - len,
310 					"0x%x, ", sa->iv.data[j]);
311 		setenv("ETH_SEC_IV_OVR", arr, 1);
312 	}
313 
314 	*sess = rte_security_session_create(sec_ctx,
315 				sess_conf, sess_pool, sess_priv_pool);
316 	if (*sess == NULL) {
317 		printf("SEC Session init failed.\n");
318 		return TEST_FAILED;
319 	}
320 
321 	*ol_flags = sec_cap->ol_flags;
322 	*ctx = sec_ctx;
323 
324 	return 0;
325 }
326 
327 /* Check the link status of all ports in up to 3s, and print them finally */
328 static void
329 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
330 {
331 #define CHECK_INTERVAL 100 /* 100ms */
332 #define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */
333 	uint16_t portid;
334 	uint8_t count, all_ports_up, print_flag = 0;
335 	struct rte_eth_link link;
336 	int ret;
337 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
338 
339 	printf("Checking link statuses...\n");
340 	fflush(stdout);
341 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
342 		all_ports_up = 1;
343 		for (portid = 0; portid < port_num; portid++) {
344 			if ((port_mask & (1 << portid)) == 0)
345 				continue;
346 			memset(&link, 0, sizeof(link));
347 			ret = rte_eth_link_get_nowait(portid, &link);
348 			if (ret < 0) {
349 				all_ports_up = 0;
350 				if (print_flag == 1)
351 					printf("Port %u link get failed: %s\n",
352 						portid, rte_strerror(-ret));
353 				continue;
354 			}
355 
356 			/* print link status if flag set */
357 			if (print_flag == 1) {
358 				if (link.link_status && link_mbps == 0)
359 					link_mbps = link.link_speed;
360 
361 				rte_eth_link_to_str(link_status,
362 					sizeof(link_status), &link);
363 				printf("Port %d %s\n", portid, link_status);
364 				continue;
365 			}
366 			/* clear all_ports_up flag if any link down */
367 			if (link.link_status == RTE_ETH_LINK_DOWN) {
368 				all_ports_up = 0;
369 				break;
370 			}
371 		}
372 		/* after finally printing all link status, get out */
373 		if (print_flag == 1)
374 			break;
375 
376 		if (all_ports_up == 0) {
377 			fflush(stdout);
378 			rte_delay_ms(CHECK_INTERVAL);
379 		}
380 
381 		/* set the print_flag if all ports up or timeout */
382 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1))
383 			print_flag = 1;
384 	}
385 }
386 
387 static void
388 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
389 {
390 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
391 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
392 	printf("%s%s", name, buf);
393 }
394 
395 static void
396 copy_buf_to_pkt_segs(const uint8_t *buf, unsigned int len,
397 		     struct rte_mbuf *pkt, unsigned int offset)
398 {
399 	unsigned int copied = 0;
400 	unsigned int copy_len;
401 	struct rte_mbuf *seg;
402 	void *seg_buf;
403 
404 	seg = pkt;
405 	while (offset >= seg->data_len) {
406 		offset -= seg->data_len;
407 		seg = seg->next;
408 	}
409 	copy_len = seg->data_len - offset;
410 	seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
411 	while (len > copy_len) {
412 		rte_memcpy(seg_buf, buf + copied, (size_t) copy_len);
413 		len -= copy_len;
414 		copied += copy_len;
415 		seg = seg->next;
416 		seg_buf = rte_pktmbuf_mtod(seg, void *);
417 	}
418 	rte_memcpy(seg_buf, buf + copied, (size_t) len);
419 }
420 
421 static inline struct rte_mbuf *
422 init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len)
423 {
424 	struct rte_mbuf *pkt;
425 
426 	pkt = rte_pktmbuf_alloc(mp);
427 	if (pkt == NULL)
428 		return NULL;
429 	if (((data[0] & 0xF0) >> 4) == IPVERSION) {
430 		rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
431 				&dummy_ipv4_eth_hdr, RTE_ETHER_HDR_LEN);
432 		pkt->l3_len = sizeof(struct rte_ipv4_hdr);
433 	} else {
434 		rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
435 				&dummy_ipv6_eth_hdr, RTE_ETHER_HDR_LEN);
436 		pkt->l3_len = sizeof(struct rte_ipv6_hdr);
437 	}
438 	pkt->l2_len = RTE_ETHER_HDR_LEN;
439 
440 	if (pkt->buf_len > (len + RTE_ETHER_HDR_LEN))
441 		rte_memcpy(rte_pktmbuf_append(pkt, len), data, len);
442 	else
443 		copy_buf_to_pkt_segs(data, len, pkt, RTE_ETHER_HDR_LEN);
444 	return pkt;
445 }
446 
447 static int
448 init_mempools(unsigned int nb_mbuf)
449 {
450 	struct rte_security_ctx *sec_ctx;
451 	uint16_t nb_sess = 512;
452 	uint32_t sess_sz;
453 	char s[64];
454 
455 	if (mbufpool == NULL) {
456 		snprintf(s, sizeof(s), "mbuf_pool");
457 		mbufpool = rte_pktmbuf_pool_create(s, nb_mbuf,
458 				MEMPOOL_CACHE_SIZE, 0,
459 				RTE_MBUF_DEFAULT_BUF_SIZE, SOCKET_ID_ANY);
460 		if (mbufpool == NULL) {
461 			printf("Cannot init mbuf pool\n");
462 			return TEST_FAILED;
463 		}
464 		printf("Allocated mbuf pool\n");
465 	}
466 
467 	sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
468 	if (sec_ctx == NULL) {
469 		printf("Device does not support Security ctx\n");
470 		return TEST_SKIPPED;
471 	}
472 	sess_sz = rte_security_session_get_size(sec_ctx);
473 	if (sess_pool == NULL) {
474 		snprintf(s, sizeof(s), "sess_pool");
475 		sess_pool = rte_mempool_create(s, nb_sess, sess_sz,
476 				MEMPOOL_CACHE_SIZE, 0,
477 				NULL, NULL, NULL, NULL,
478 				SOCKET_ID_ANY, 0);
479 		if (sess_pool == NULL) {
480 			printf("Cannot init sess pool\n");
481 			return TEST_FAILED;
482 		}
483 		printf("Allocated sess pool\n");
484 	}
485 	if (sess_priv_pool == NULL) {
486 		snprintf(s, sizeof(s), "sess_priv_pool");
487 		sess_priv_pool = rte_mempool_create(s, nb_sess, sess_sz,
488 				MEMPOOL_CACHE_SIZE, 0,
489 				NULL, NULL, NULL, NULL,
490 				SOCKET_ID_ANY, 0);
491 		if (sess_priv_pool == NULL) {
492 			printf("Cannot init sess_priv pool\n");
493 			return TEST_FAILED;
494 		}
495 		printf("Allocated sess_priv pool\n");
496 	}
497 
498 	return 0;
499 }
500 
501 static int
502 create_default_flow(uint16_t portid)
503 {
504 	struct rte_flow_action action[2];
505 	struct rte_flow_item pattern[2];
506 	struct rte_flow_attr attr = {0};
507 	struct rte_flow_error err;
508 	struct rte_flow *flow;
509 	int ret;
510 
511 	/* Add the default rte_flow to enable SECURITY for all ESP packets */
512 
513 	pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
514 	pattern[0].spec = NULL;
515 	pattern[0].mask = NULL;
516 	pattern[0].last = NULL;
517 	pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
518 
519 	action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
520 	action[0].conf = NULL;
521 	action[1].type = RTE_FLOW_ACTION_TYPE_END;
522 	action[1].conf = NULL;
523 
524 	attr.ingress = 1;
525 
526 	ret = rte_flow_validate(portid, &attr, pattern, action, &err);
527 	if (ret) {
528 		printf("\nValidate flow failed, ret = %d\n", ret);
529 		return -1;
530 	}
531 	flow = rte_flow_create(portid, &attr, pattern, action, &err);
532 	if (flow == NULL) {
533 		printf("\nDefault flow rule create failed\n");
534 		return -1;
535 	}
536 
537 	default_flow[portid] = flow;
538 
539 	return 0;
540 }
541 
542 static void
543 destroy_default_flow(uint16_t portid)
544 {
545 	struct rte_flow_error err;
546 	int ret;
547 
548 	if (!default_flow[portid])
549 		return;
550 	ret = rte_flow_destroy(portid, default_flow[portid], &err);
551 	if (ret) {
552 		printf("\nDefault flow rule destroy failed\n");
553 		return;
554 	}
555 	default_flow[portid] = NULL;
556 }
557 
558 struct rte_mbuf **tx_pkts_burst;
559 struct rte_mbuf **rx_pkts_burst;
560 
561 static int
562 compare_pkt_data(struct rte_mbuf *m, uint8_t *ref, unsigned int tot_len)
563 {
564 	unsigned int len;
565 	unsigned int nb_segs = m->nb_segs;
566 	unsigned int matched = 0;
567 	struct rte_mbuf *save = m;
568 
569 	while (m) {
570 		len = tot_len;
571 		if (len > m->data_len)
572 			len = m->data_len;
573 		if (len != 0) {
574 			if (memcmp(rte_pktmbuf_mtod(m, char *),
575 					ref + matched, len)) {
576 				printf("\n====Reassembly case failed: Data Mismatch");
577 				rte_hexdump(stdout, "Reassembled",
578 					rte_pktmbuf_mtod(m, char *),
579 					len);
580 				rte_hexdump(stdout, "reference",
581 					ref + matched,
582 					len);
583 				return TEST_FAILED;
584 			}
585 		}
586 		tot_len -= len;
587 		matched += len;
588 		m = m->next;
589 	}
590 
591 	if (tot_len) {
592 		printf("\n====Reassembly case failed: Data Missing %u",
593 		       tot_len);
594 		printf("\n====nb_segs %u, tot_len %u", nb_segs, tot_len);
595 		rte_pktmbuf_dump(stderr, save, -1);
596 		return TEST_FAILED;
597 	}
598 	return TEST_SUCCESS;
599 }
600 
601 static inline bool
602 is_ip_reassembly_incomplete(struct rte_mbuf *mbuf)
603 {
604 	static uint64_t ip_reassembly_dynflag;
605 	int ip_reassembly_dynflag_offset;
606 
607 	if (ip_reassembly_dynflag == 0) {
608 		ip_reassembly_dynflag_offset = rte_mbuf_dynflag_lookup(
609 			RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL);
610 		if (ip_reassembly_dynflag_offset < 0)
611 			return false;
612 		ip_reassembly_dynflag = RTE_BIT64(ip_reassembly_dynflag_offset);
613 	}
614 
615 	return (mbuf->ol_flags & ip_reassembly_dynflag) != 0;
616 }
617 
618 static void
619 free_mbuf(struct rte_mbuf *mbuf)
620 {
621 	rte_eth_ip_reassembly_dynfield_t dynfield;
622 
623 	if (!mbuf)
624 		return;
625 
626 	if (!is_ip_reassembly_incomplete(mbuf)) {
627 		rte_pktmbuf_free(mbuf);
628 	} else {
629 		if (ip_reassembly_dynfield_offset < 0)
630 			return;
631 
632 		while (mbuf) {
633 			dynfield = *RTE_MBUF_DYNFIELD(mbuf,
634 					ip_reassembly_dynfield_offset,
635 					rte_eth_ip_reassembly_dynfield_t *);
636 			rte_pktmbuf_free(mbuf);
637 			mbuf = dynfield.next_frag;
638 		}
639 	}
640 }
641 
642 
643 static int
644 get_and_verify_incomplete_frags(struct rte_mbuf *mbuf,
645 				struct reassembly_vector *vector)
646 {
647 	rte_eth_ip_reassembly_dynfield_t *dynfield[MAX_PKT_BURST];
648 	int j = 0, ret;
649 	/**
650 	 * IP reassembly offload is incomplete, and fragments are listed in
651 	 * dynfield which can be reassembled in SW.
652 	 */
653 	printf("\nHW IP Reassembly is not complete; attempt SW IP Reassembly,"
654 		"\nMatching with original frags.");
655 
656 	if (ip_reassembly_dynfield_offset < 0)
657 		return -1;
658 
659 	printf("\ncomparing frag: %d", j);
660 	/* Skip Ethernet header comparison */
661 	rte_pktmbuf_adj(mbuf, RTE_ETHER_HDR_LEN);
662 	ret = compare_pkt_data(mbuf, vector->frags[j]->data,
663 				vector->frags[j]->len);
664 	if (ret)
665 		return ret;
666 	j++;
667 	dynfield[j] = RTE_MBUF_DYNFIELD(mbuf, ip_reassembly_dynfield_offset,
668 					rte_eth_ip_reassembly_dynfield_t *);
669 	printf("\ncomparing frag: %d", j);
670 	/* Skip Ethernet header comparison */
671 	rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
672 	ret = compare_pkt_data(dynfield[j]->next_frag, vector->frags[j]->data,
673 			vector->frags[j]->len);
674 	if (ret)
675 		return ret;
676 
677 	while ((dynfield[j]->nb_frags > 1) &&
678 			is_ip_reassembly_incomplete(dynfield[j]->next_frag)) {
679 		j++;
680 		dynfield[j] = RTE_MBUF_DYNFIELD(dynfield[j-1]->next_frag,
681 					ip_reassembly_dynfield_offset,
682 					rte_eth_ip_reassembly_dynfield_t *);
683 		printf("\ncomparing frag: %d", j);
684 		/* Skip Ethernet header comparison */
685 		rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
686 		ret = compare_pkt_data(dynfield[j]->next_frag,
687 				vector->frags[j]->data, vector->frags[j]->len);
688 		if (ret)
689 			return ret;
690 	}
691 	return ret;
692 }
693 
694 static int
695 test_ipsec_with_reassembly(struct reassembly_vector *vector,
696 		const struct ipsec_test_flags *flags)
697 {
698 	struct rte_security_session *out_ses[ENCAP_DECAP_BURST_SZ] = {0};
699 	struct rte_security_session *in_ses[ENCAP_DECAP_BURST_SZ] = {0};
700 	struct rte_eth_ip_reassembly_params reass_capa = {0};
701 	struct rte_security_session_conf sess_conf_out = {0};
702 	struct rte_security_session_conf sess_conf_in = {0};
703 	unsigned int nb_tx, burst_sz, nb_sent = 0;
704 	struct rte_crypto_sym_xform cipher_out = {0};
705 	struct rte_crypto_sym_xform auth_out = {0};
706 	struct rte_crypto_sym_xform aead_out = {0};
707 	struct rte_crypto_sym_xform cipher_in = {0};
708 	struct rte_crypto_sym_xform auth_in = {0};
709 	struct rte_crypto_sym_xform aead_in = {0};
710 	struct ipsec_test_data sa_data;
711 	struct rte_security_ctx *ctx;
712 	unsigned int i, nb_rx = 0, j;
713 	uint32_t ol_flags;
714 	int ret = 0;
715 
716 	burst_sz = vector->burst ? ENCAP_DECAP_BURST_SZ : 1;
717 	nb_tx = vector->nb_frags * burst_sz;
718 
719 	rte_eth_dev_stop(port_id);
720 	if (ret != 0) {
721 		printf("rte_eth_dev_stop: err=%s, port=%u\n",
722 			       rte_strerror(-ret), port_id);
723 		return ret;
724 	}
725 	rte_eth_ip_reassembly_capability_get(port_id, &reass_capa);
726 	if (reass_capa.max_frags < vector->nb_frags)
727 		return TEST_SKIPPED;
728 	if (reass_capa.timeout_ms > APP_REASS_TIMEOUT) {
729 		reass_capa.timeout_ms = APP_REASS_TIMEOUT;
730 		rte_eth_ip_reassembly_conf_set(port_id, &reass_capa);
731 	}
732 
733 	ret = rte_eth_dev_start(port_id);
734 	if (ret < 0) {
735 		printf("rte_eth_dev_start: err=%d, port=%d\n",
736 			ret, port_id);
737 		return ret;
738 	}
739 
740 	memset(tx_pkts_burst, 0, sizeof(tx_pkts_burst[0]) * nb_tx);
741 	memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_tx);
742 
743 	for (i = 0; i < nb_tx; i += vector->nb_frags) {
744 		for (j = 0; j < vector->nb_frags; j++) {
745 			tx_pkts_burst[i+j] = init_packet(mbufpool,
746 						vector->frags[j]->data,
747 						vector->frags[j]->len);
748 			if (tx_pkts_burst[i+j] == NULL) {
749 				ret = -1;
750 				printf("\n packed init failed\n");
751 				goto out;
752 			}
753 		}
754 	}
755 
756 	for (i = 0; i < burst_sz; i++) {
757 		memcpy(&sa_data, vector->sa_data,
758 				sizeof(struct ipsec_test_data));
759 		/* Update SPI for every new SA */
760 		sa_data.ipsec_xform.spi += i;
761 		sa_data.ipsec_xform.direction =
762 					RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
763 		if (sa_data.aead) {
764 			sess_conf_out.crypto_xform = &aead_out;
765 		} else {
766 			sess_conf_out.crypto_xform = &cipher_out;
767 			sess_conf_out.crypto_xform->next = &auth_out;
768 		}
769 
770 		/* Create Inline IPsec outbound session. */
771 		ret = create_inline_ipsec_session(&sa_data, port_id,
772 				&out_ses[i], &ctx, &ol_flags, flags,
773 				&sess_conf_out);
774 		if (ret) {
775 			printf("\nInline outbound session create failed\n");
776 			goto out;
777 		}
778 	}
779 
780 	j = 0;
781 	for (i = 0; i < nb_tx; i++) {
782 		if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
783 			rte_security_set_pkt_metadata(ctx,
784 				out_ses[j], tx_pkts_burst[i], NULL);
785 		tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
786 
787 		/* Move to next SA after nb_frags */
788 		if ((i + 1) % vector->nb_frags == 0)
789 			j++;
790 	}
791 
792 	for (i = 0; i < burst_sz; i++) {
793 		memcpy(&sa_data, vector->sa_data,
794 				sizeof(struct ipsec_test_data));
795 		/* Update SPI for every new SA */
796 		sa_data.ipsec_xform.spi += i;
797 		sa_data.ipsec_xform.direction =
798 					RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
799 
800 		if (sa_data.aead) {
801 			sess_conf_in.crypto_xform = &aead_in;
802 		} else {
803 			sess_conf_in.crypto_xform = &auth_in;
804 			sess_conf_in.crypto_xform->next = &cipher_in;
805 		}
806 		/* Create Inline IPsec inbound session. */
807 		ret = create_inline_ipsec_session(&sa_data, port_id, &in_ses[i],
808 				&ctx, &ol_flags, flags, &sess_conf_in);
809 		if (ret) {
810 			printf("\nInline inbound session create failed\n");
811 			goto out;
812 		}
813 	}
814 
815 	/* Retrieve reassembly dynfield offset if available */
816 	if (ip_reassembly_dynfield_offset < 0 && vector->nb_frags > 1)
817 		ip_reassembly_dynfield_offset = rte_mbuf_dynfield_lookup(
818 				RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, NULL);
819 
820 
821 	ret = create_default_flow(port_id);
822 	if (ret)
823 		goto out;
824 
825 	nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_tx);
826 	if (nb_sent != nb_tx) {
827 		ret = -1;
828 		printf("\nFailed to tx %u pkts", nb_tx);
829 		goto out;
830 	}
831 
832 	rte_delay_ms(1);
833 
834 	/* Retry few times before giving up */
835 	nb_rx = 0;
836 	j = 0;
837 	do {
838 		nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
839 					  nb_tx - nb_rx);
840 		j++;
841 		if (nb_rx >= nb_tx)
842 			break;
843 		rte_delay_ms(1);
844 	} while (j < 5 || !nb_rx);
845 
846 	/* Check for minimum number of Rx packets expected */
847 	if ((vector->nb_frags == 1 && nb_rx != nb_tx) ||
848 	    (vector->nb_frags > 1 && nb_rx < burst_sz)) {
849 		printf("\nreceived less Rx pkts(%u) pkts\n", nb_rx);
850 		ret = TEST_FAILED;
851 		goto out;
852 	}
853 
854 	for (i = 0; i < nb_rx; i++) {
855 		if (vector->nb_frags > 1 &&
856 		    is_ip_reassembly_incomplete(rx_pkts_burst[i])) {
857 			ret = get_and_verify_incomplete_frags(rx_pkts_burst[i],
858 							      vector);
859 			if (ret != TEST_SUCCESS)
860 				break;
861 			continue;
862 		}
863 
864 		if (rx_pkts_burst[i]->ol_flags &
865 		    RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED ||
866 		    !(rx_pkts_burst[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD)) {
867 			printf("\nsecurity offload failed\n");
868 			ret = TEST_FAILED;
869 			break;
870 		}
871 
872 		if (vector->full_pkt->len + RTE_ETHER_HDR_LEN !=
873 				rx_pkts_burst[i]->pkt_len) {
874 			printf("\nreassembled/decrypted packet length mismatch\n");
875 			ret = TEST_FAILED;
876 			break;
877 		}
878 		rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
879 		ret = compare_pkt_data(rx_pkts_burst[i],
880 				       vector->full_pkt->data,
881 				       vector->full_pkt->len);
882 		if (ret != TEST_SUCCESS)
883 			break;
884 	}
885 
886 out:
887 	destroy_default_flow(port_id);
888 
889 	/* Clear session data. */
890 	for (i = 0; i < burst_sz; i++) {
891 		if (out_ses[i])
892 			rte_security_session_destroy(ctx, out_ses[i]);
893 		if (in_ses[i])
894 			rte_security_session_destroy(ctx, in_ses[i]);
895 	}
896 
897 	for (i = nb_sent; i < nb_tx; i++)
898 		free_mbuf(tx_pkts_burst[i]);
899 	for (i = 0; i < nb_rx; i++)
900 		free_mbuf(rx_pkts_burst[i]);
901 	return ret;
902 }
903 
904 static int
905 event_tx_burst(struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
906 {
907 	struct rte_event ev;
908 	int i, nb_sent = 0;
909 
910 	/* Convert packets to events */
911 	memset(&ev, 0, sizeof(ev));
912 	ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
913 	for (i = 0; i < nb_pkts; i++) {
914 		ev.mbuf = tx_pkts[i];
915 		nb_sent += rte_event_eth_tx_adapter_enqueue(
916 				eventdev_id, port_id, &ev, 1, 0);
917 	}
918 
919 	return nb_sent;
920 }
921 
922 static int
923 event_rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts_to_rx)
924 {
925 	int nb_ev, nb_rx = 0, j = 0;
926 	const int ms_per_pkt = 3;
927 	struct rte_event ev;
928 
929 	do {
930 		nb_ev = rte_event_dequeue_burst(eventdev_id, port_id,
931 				&ev, 1, 0);
932 
933 		if (nb_ev == 0) {
934 			rte_delay_ms(1);
935 			continue;
936 		}
937 
938 		/* Get packet from event */
939 		if (ev.event_type != RTE_EVENT_TYPE_ETHDEV) {
940 			printf("Unsupported event type: %i\n",
941 				ev.event_type);
942 			continue;
943 		}
944 		rx_pkts[nb_rx++] = ev.mbuf;
945 	} while (j++ < (nb_pkts_to_rx * ms_per_pkt) && nb_rx < nb_pkts_to_rx);
946 
947 	return nb_rx;
948 }
949 
950 static int
951 test_ipsec_inline_proto_process(struct ipsec_test_data *td,
952 		struct ipsec_test_data *res_d,
953 		int nb_pkts,
954 		bool silent,
955 		const struct ipsec_test_flags *flags)
956 {
957 	struct rte_security_session_conf sess_conf = {0};
958 	struct rte_crypto_sym_xform cipher = {0};
959 	struct rte_crypto_sym_xform auth = {0};
960 	struct rte_crypto_sym_xform aead = {0};
961 	struct rte_security_session *ses;
962 	struct rte_security_ctx *ctx;
963 	int nb_rx = 0, nb_sent;
964 	uint32_t ol_flags;
965 	int i, j = 0, ret;
966 
967 	memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_pkts);
968 
969 	if (td->aead) {
970 		sess_conf.crypto_xform = &aead;
971 	} else {
972 		if (td->ipsec_xform.direction ==
973 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
974 			sess_conf.crypto_xform = &cipher;
975 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
976 			sess_conf.crypto_xform->next = &auth;
977 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
978 		} else {
979 			sess_conf.crypto_xform = &auth;
980 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
981 			sess_conf.crypto_xform->next = &cipher;
982 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
983 		}
984 	}
985 
986 	/* Create Inline IPsec session. */
987 	ret = create_inline_ipsec_session(td, port_id, &ses, &ctx,
988 					  &ol_flags, flags, &sess_conf);
989 	if (ret)
990 		return ret;
991 
992 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
993 		ret = create_default_flow(port_id);
994 		if (ret)
995 			goto out;
996 	}
997 	for (i = 0; i < nb_pkts; i++) {
998 		tx_pkts_burst[i] = init_packet(mbufpool, td->input_text.data,
999 						td->input_text.len);
1000 		if (tx_pkts_burst[i] == NULL) {
1001 			while (i--)
1002 				rte_pktmbuf_free(tx_pkts_burst[i]);
1003 			ret = TEST_FAILED;
1004 			goto out;
1005 		}
1006 
1007 		if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkts_burst[i],
1008 					uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
1009 			while (i--)
1010 				rte_pktmbuf_free(tx_pkts_burst[i]);
1011 			ret = TEST_FAILED;
1012 			goto out;
1013 		}
1014 
1015 		if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1016 			if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
1017 				rte_security_set_pkt_metadata(ctx, ses,
1018 						tx_pkts_burst[i], NULL);
1019 			tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1020 		}
1021 	}
1022 	/* Send packet to ethdev for inline IPsec processing. */
1023 	if (event_mode_enabled)
1024 		nb_sent = event_tx_burst(tx_pkts_burst, nb_pkts);
1025 	else
1026 		nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts);
1027 
1028 	if (nb_sent != nb_pkts) {
1029 		printf("\nUnable to TX %d packets, sent: %i", nb_pkts, nb_sent);
1030 		for ( ; nb_sent < nb_pkts; nb_sent++)
1031 			rte_pktmbuf_free(tx_pkts_burst[nb_sent]);
1032 		ret = TEST_FAILED;
1033 		goto out;
1034 	}
1035 
1036 	rte_pause();
1037 
1038 	/* Receive back packet on loopback interface. */
1039 	if (event_mode_enabled)
1040 		nb_rx = event_rx_burst(rx_pkts_burst, nb_sent);
1041 	else
1042 		do {
1043 			rte_delay_ms(1);
1044 			nb_rx += rte_eth_rx_burst(port_id, 0,
1045 					&rx_pkts_burst[nb_rx],
1046 					nb_sent - nb_rx);
1047 			if (nb_rx >= nb_sent)
1048 				break;
1049 		} while (j++ < 5 || nb_rx == 0);
1050 
1051 	if (nb_rx != nb_sent) {
1052 		printf("\nUnable to RX all %d packets, received(%i)",
1053 				nb_sent, nb_rx);
1054 		while (--nb_rx >= 0)
1055 			rte_pktmbuf_free(rx_pkts_burst[nb_rx]);
1056 		ret = TEST_FAILED;
1057 		goto out;
1058 	}
1059 
1060 	for (i = 0; i < nb_rx; i++) {
1061 		rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
1062 
1063 		ret = test_ipsec_post_process(rx_pkts_burst[i], td,
1064 					      res_d, silent, flags);
1065 		if (ret != TEST_SUCCESS) {
1066 			for ( ; i < nb_rx; i++)
1067 				rte_pktmbuf_free(rx_pkts_burst[i]);
1068 			goto out;
1069 		}
1070 
1071 		ret = test_ipsec_stats_verify(ctx, ses, flags,
1072 					td->ipsec_xform.direction);
1073 		if (ret != TEST_SUCCESS) {
1074 			for ( ; i < nb_rx; i++)
1075 				rte_pktmbuf_free(rx_pkts_burst[i]);
1076 			goto out;
1077 		}
1078 
1079 		rte_pktmbuf_free(rx_pkts_burst[i]);
1080 		rx_pkts_burst[i] = NULL;
1081 	}
1082 
1083 out:
1084 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1085 		destroy_default_flow(port_id);
1086 
1087 	/* Destroy session so that other cases can create the session again */
1088 	rte_security_session_destroy(ctx, ses);
1089 	ses = NULL;
1090 
1091 	return ret;
1092 }
1093 
1094 static int
1095 test_ipsec_inline_proto_all(const struct ipsec_test_flags *flags)
1096 {
1097 	struct ipsec_test_data td_outb;
1098 	struct ipsec_test_data td_inb;
1099 	unsigned int i, nb_pkts = 1, pass_cnt = 0, fail_cnt = 0;
1100 	int ret;
1101 
1102 	if (flags->iv_gen || flags->sa_expiry_pkts_soft ||
1103 			flags->sa_expiry_pkts_hard)
1104 		nb_pkts = IPSEC_TEST_PACKETS_MAX;
1105 
1106 	for (i = 0; i < RTE_DIM(alg_list); i++) {
1107 		test_ipsec_td_prepare(alg_list[i].param1,
1108 				      alg_list[i].param2,
1109 				      flags, &td_outb, 1);
1110 
1111 		if (!td_outb.aead) {
1112 			enum rte_crypto_cipher_algorithm cipher_alg;
1113 			enum rte_crypto_auth_algorithm auth_alg;
1114 
1115 			cipher_alg = td_outb.xform.chain.cipher.cipher.algo;
1116 			auth_alg = td_outb.xform.chain.auth.auth.algo;
1117 
1118 			if (td_outb.aes_gmac && cipher_alg != RTE_CRYPTO_CIPHER_NULL)
1119 				continue;
1120 
1121 			/* ICV is not applicable for NULL auth */
1122 			if (flags->icv_corrupt &&
1123 			    auth_alg == RTE_CRYPTO_AUTH_NULL)
1124 				continue;
1125 
1126 			/* IV is not applicable for NULL cipher */
1127 			if (flags->iv_gen &&
1128 			    cipher_alg == RTE_CRYPTO_CIPHER_NULL)
1129 				continue;
1130 		}
1131 
1132 		if (flags->udp_encap)
1133 			td_outb.ipsec_xform.options.udp_encap = 1;
1134 
1135 		ret = test_ipsec_inline_proto_process(&td_outb, &td_inb, nb_pkts,
1136 						false, flags);
1137 		if (ret == TEST_SKIPPED)
1138 			continue;
1139 
1140 		if (ret == TEST_FAILED) {
1141 			printf("\n TEST FAILED");
1142 			test_ipsec_display_alg(alg_list[i].param1,
1143 					       alg_list[i].param2);
1144 			fail_cnt++;
1145 			continue;
1146 		}
1147 
1148 		test_ipsec_td_update(&td_inb, &td_outb, 1, flags);
1149 
1150 		ret = test_ipsec_inline_proto_process(&td_inb, NULL, nb_pkts,
1151 						false, flags);
1152 		if (ret == TEST_SKIPPED)
1153 			continue;
1154 
1155 		if (ret == TEST_FAILED) {
1156 			printf("\n TEST FAILED");
1157 			test_ipsec_display_alg(alg_list[i].param1,
1158 					       alg_list[i].param2);
1159 			fail_cnt++;
1160 			continue;
1161 		}
1162 
1163 		if (flags->display_alg)
1164 			test_ipsec_display_alg(alg_list[i].param1,
1165 					       alg_list[i].param2);
1166 
1167 		pass_cnt++;
1168 	}
1169 
1170 	printf("Tests passed: %d, failed: %d", pass_cnt, fail_cnt);
1171 	if (fail_cnt > 0)
1172 		return TEST_FAILED;
1173 	if (pass_cnt > 0)
1174 		return TEST_SUCCESS;
1175 	else
1176 		return TEST_SKIPPED;
1177 }
1178 
1179 static int
1180 test_ipsec_inline_proto_process_with_esn(struct ipsec_test_data td[],
1181 		struct ipsec_test_data res_d[],
1182 		int nb_pkts,
1183 		bool silent,
1184 		const struct ipsec_test_flags *flags)
1185 {
1186 	struct rte_security_session_conf sess_conf = {0};
1187 	struct ipsec_test_data *res_d_tmp = NULL;
1188 	struct rte_crypto_sym_xform cipher = {0};
1189 	struct rte_crypto_sym_xform auth = {0};
1190 	struct rte_crypto_sym_xform aead = {0};
1191 	struct rte_mbuf *rx_pkt = NULL;
1192 	struct rte_mbuf *tx_pkt = NULL;
1193 	int nb_rx, nb_sent;
1194 	struct rte_security_session *ses;
1195 	struct rte_security_ctx *ctx;
1196 	uint32_t ol_flags;
1197 	int i, ret;
1198 
1199 	if (td[0].aead) {
1200 		sess_conf.crypto_xform = &aead;
1201 	} else {
1202 		if (td[0].ipsec_xform.direction ==
1203 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1204 			sess_conf.crypto_xform = &cipher;
1205 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1206 			sess_conf.crypto_xform->next = &auth;
1207 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1208 		} else {
1209 			sess_conf.crypto_xform = &auth;
1210 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1211 			sess_conf.crypto_xform->next = &cipher;
1212 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1213 		}
1214 	}
1215 
1216 	/* Create Inline IPsec session. */
1217 	ret = create_inline_ipsec_session(&td[0], port_id, &ses, &ctx,
1218 					  &ol_flags, flags, &sess_conf);
1219 	if (ret)
1220 		return ret;
1221 
1222 	if (td[0].ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1223 		ret = create_default_flow(port_id);
1224 		if (ret)
1225 			goto out;
1226 	}
1227 
1228 	for (i = 0; i < nb_pkts; i++) {
1229 		tx_pkt = init_packet(mbufpool, td[i].input_text.data,
1230 					td[i].input_text.len);
1231 		if (tx_pkt == NULL) {
1232 			ret = TEST_FAILED;
1233 			goto out;
1234 		}
1235 
1236 		if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkt,
1237 					uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
1238 			ret = TEST_FAILED;
1239 			goto out;
1240 		}
1241 
1242 		if (td[i].ipsec_xform.direction ==
1243 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1244 			if (flags->antireplay) {
1245 				sess_conf.ipsec.esn.value =
1246 						td[i].ipsec_xform.esn.value;
1247 				ret = rte_security_session_update(ctx, ses,
1248 						&sess_conf);
1249 				if (ret) {
1250 					printf("Could not update ESN in session\n");
1251 					rte_pktmbuf_free(tx_pkt);
1252 					ret = TEST_SKIPPED;
1253 					goto out;
1254 				}
1255 			}
1256 			if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
1257 				rte_security_set_pkt_metadata(ctx, ses,
1258 						tx_pkt, NULL);
1259 			tx_pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1260 		}
1261 		/* Send packet to ethdev for inline IPsec processing. */
1262 		nb_sent = rte_eth_tx_burst(port_id, 0, &tx_pkt, 1);
1263 		if (nb_sent != 1) {
1264 			printf("\nUnable to TX packets");
1265 			rte_pktmbuf_free(tx_pkt);
1266 			ret = TEST_FAILED;
1267 			goto out;
1268 		}
1269 
1270 		rte_pause();
1271 
1272 		/* Receive back packet on loopback interface. */
1273 		do {
1274 			rte_delay_ms(1);
1275 			nb_rx = rte_eth_rx_burst(port_id, 0, &rx_pkt, 1);
1276 		} while (nb_rx == 0);
1277 
1278 		rte_pktmbuf_adj(rx_pkt, RTE_ETHER_HDR_LEN);
1279 
1280 		if (res_d != NULL)
1281 			res_d_tmp = &res_d[i];
1282 
1283 		ret = test_ipsec_post_process(rx_pkt, &td[i],
1284 					      res_d_tmp, silent, flags);
1285 		if (ret != TEST_SUCCESS) {
1286 			rte_pktmbuf_free(rx_pkt);
1287 			goto out;
1288 		}
1289 
1290 		ret = test_ipsec_stats_verify(ctx, ses, flags,
1291 					td->ipsec_xform.direction);
1292 		if (ret != TEST_SUCCESS) {
1293 			rte_pktmbuf_free(rx_pkt);
1294 			goto out;
1295 		}
1296 
1297 		rte_pktmbuf_free(rx_pkt);
1298 		rx_pkt = NULL;
1299 		tx_pkt = NULL;
1300 	}
1301 
1302 out:
1303 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1304 		destroy_default_flow(port_id);
1305 
1306 	/* Destroy session so that other cases can create the session again */
1307 	rte_security_session_destroy(ctx, ses);
1308 	ses = NULL;
1309 
1310 	return ret;
1311 }
1312 
1313 static int
1314 ut_setup_inline_ipsec(void)
1315 {
1316 	int ret;
1317 
1318 	/* Start device */
1319 	ret = rte_eth_dev_start(port_id);
1320 	if (ret < 0) {
1321 		printf("rte_eth_dev_start: err=%d, port=%d\n",
1322 			ret, port_id);
1323 		return ret;
1324 	}
1325 	/* always enable promiscuous */
1326 	ret = rte_eth_promiscuous_enable(port_id);
1327 	if (ret != 0) {
1328 		printf("rte_eth_promiscuous_enable: err=%s, port=%d\n",
1329 			rte_strerror(-ret), port_id);
1330 		return ret;
1331 	}
1332 
1333 	check_all_ports_link_status(1, RTE_PORT_ALL);
1334 
1335 	return 0;
1336 }
1337 
1338 static void
1339 ut_teardown_inline_ipsec(void)
1340 {
1341 	struct rte_eth_ip_reassembly_params reass_conf = {0};
1342 	uint16_t portid;
1343 	int ret;
1344 
1345 	/* port tear down */
1346 	RTE_ETH_FOREACH_DEV(portid) {
1347 		ret = rte_eth_dev_stop(portid);
1348 		if (ret != 0)
1349 			printf("rte_eth_dev_stop: err=%s, port=%u\n",
1350 			       rte_strerror(-ret), portid);
1351 
1352 		/* Clear reassembly configuration */
1353 		rte_eth_ip_reassembly_conf_set(portid, &reass_conf);
1354 	}
1355 }
1356 
1357 static int
1358 inline_ipsec_testsuite_setup(void)
1359 {
1360 	uint16_t nb_rxd;
1361 	uint16_t nb_txd;
1362 	uint16_t nb_ports;
1363 	int ret;
1364 	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
1365 
1366 	printf("Start inline IPsec test.\n");
1367 
1368 	nb_ports = rte_eth_dev_count_avail();
1369 	if (nb_ports < NB_ETHPORTS_USED) {
1370 		printf("At least %u port(s) used for test\n",
1371 		       NB_ETHPORTS_USED);
1372 		return TEST_SKIPPED;
1373 	}
1374 
1375 	ret = init_mempools(NB_MBUF);
1376 	if (ret)
1377 		return ret;
1378 
1379 	if (tx_pkts_burst == NULL) {
1380 		tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
1381 					  MAX_TRAFFIC_BURST,
1382 					  sizeof(void *),
1383 					  RTE_CACHE_LINE_SIZE);
1384 		if (!tx_pkts_burst)
1385 			return TEST_FAILED;
1386 
1387 		rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
1388 					  MAX_TRAFFIC_BURST,
1389 					  sizeof(void *),
1390 					  RTE_CACHE_LINE_SIZE);
1391 		if (!rx_pkts_burst)
1392 			return TEST_FAILED;
1393 	}
1394 
1395 	printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
1396 
1397 	nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
1398 	nb_txd = RTE_TEST_TX_DESC_DEFAULT;
1399 
1400 	/* configuring port 0 for the test is enough */
1401 	port_id = 0;
1402 	/* port configure */
1403 	ret = rte_eth_dev_configure(port_id, nb_rx_queue,
1404 				    nb_tx_queue, &port_conf);
1405 	if (ret < 0) {
1406 		printf("Cannot configure device: err=%d, port=%d\n",
1407 			 ret, port_id);
1408 		return ret;
1409 	}
1410 	ret = rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
1411 	if (ret < 0) {
1412 		printf("Cannot get mac address: err=%d, port=%d\n",
1413 			 ret, port_id);
1414 		return ret;
1415 	}
1416 	printf("Port %u ", port_id);
1417 	print_ethaddr("Address:", &ports_eth_addr[port_id]);
1418 	printf("\n");
1419 
1420 	/* tx queue setup */
1421 	ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
1422 				     SOCKET_ID_ANY, &tx_conf);
1423 	if (ret < 0) {
1424 		printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
1425 				ret, port_id);
1426 		return ret;
1427 	}
1428 	/* rx queue steup */
1429 	ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
1430 				     &rx_conf, mbufpool);
1431 	if (ret < 0) {
1432 		printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
1433 				ret, port_id);
1434 		return ret;
1435 	}
1436 	test_ipsec_alg_list_populate();
1437 
1438 	return 0;
1439 }
1440 
1441 static void
1442 inline_ipsec_testsuite_teardown(void)
1443 {
1444 	uint16_t portid;
1445 	int ret;
1446 
1447 	/* port tear down */
1448 	RTE_ETH_FOREACH_DEV(portid) {
1449 		ret = rte_eth_dev_reset(portid);
1450 		if (ret != 0)
1451 			printf("rte_eth_dev_reset: err=%s, port=%u\n",
1452 			       rte_strerror(-ret), port_id);
1453 	}
1454 	rte_free(tx_pkts_burst);
1455 	rte_free(rx_pkts_burst);
1456 }
1457 
1458 static int
1459 event_inline_ipsec_testsuite_setup(void)
1460 {
1461 	struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
1462 	struct rte_event_dev_info evdev_default_conf = {0};
1463 	struct rte_event_dev_config eventdev_conf = {0};
1464 	struct rte_event_queue_conf eventq_conf = {0};
1465 	struct rte_event_port_conf ev_port_conf = {0};
1466 	const uint16_t nb_txd = 1024, nb_rxd = 1024;
1467 	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
1468 	uint8_t ev_queue_id = 0, tx_queue_id = 0;
1469 	int nb_eventqueue = 1, nb_eventport = 1;
1470 	const int all_queues = -1;
1471 	uint32_t caps = 0;
1472 	uint16_t nb_ports;
1473 	int ret;
1474 
1475 	printf("Start event inline IPsec test.\n");
1476 
1477 	nb_ports = rte_eth_dev_count_avail();
1478 	if (nb_ports == 0) {
1479 		printf("Test require: 1 port, available: 0\n");
1480 		return TEST_SKIPPED;
1481 	}
1482 
1483 	init_mempools(NB_MBUF);
1484 
1485 	if (tx_pkts_burst == NULL) {
1486 		tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
1487 					  MAX_TRAFFIC_BURST,
1488 					  sizeof(void *),
1489 					  RTE_CACHE_LINE_SIZE);
1490 		if (!tx_pkts_burst)
1491 			return -1;
1492 
1493 		rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
1494 					  MAX_TRAFFIC_BURST,
1495 					  sizeof(void *),
1496 					  RTE_CACHE_LINE_SIZE);
1497 		if (!rx_pkts_burst)
1498 			return -1;
1499 
1500 	}
1501 
1502 	printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
1503 
1504 	/* configuring port 0 for the test is enough */
1505 	port_id = 0;
1506 	/* port configure */
1507 	ret = rte_eth_dev_configure(port_id, nb_rx_queue,
1508 				    nb_tx_queue, &port_conf);
1509 	if (ret < 0) {
1510 		printf("Cannot configure device: err=%d, port=%d\n",
1511 			 ret, port_id);
1512 		return ret;
1513 	}
1514 
1515 	/* Tx queue setup */
1516 	ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
1517 				     SOCKET_ID_ANY, &tx_conf);
1518 	if (ret < 0) {
1519 		printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
1520 				ret, port_id);
1521 		return ret;
1522 	}
1523 
1524 	/* rx queue steup */
1525 	ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
1526 				     &rx_conf, mbufpool);
1527 	if (ret < 0) {
1528 		printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
1529 				ret, port_id);
1530 		return ret;
1531 	}
1532 
1533 	/* Setup eventdev */
1534 	eventdev_id = 0;
1535 	rx_adapter_id = 0;
1536 	tx_adapter_id = 0;
1537 
1538 	/* Get default conf of eventdev */
1539 	ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
1540 	if (ret < 0) {
1541 		printf("Error in getting event device info[devID:%d]\n",
1542 				eventdev_id);
1543 		return ret;
1544 	}
1545 
1546 	/* Get Tx adapter capabilities */
1547 	ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, tx_adapter_id, &caps);
1548 	if (ret < 0) {
1549 		printf("Failed to get event device %d eth tx adapter"
1550 				" capabilities for port %d\n",
1551 				eventdev_id, port_id);
1552 		return ret;
1553 	}
1554 	if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
1555 		tx_queue_id = nb_eventqueue++;
1556 
1557 	eventdev_conf.nb_events_limit =
1558 			evdev_default_conf.max_num_events;
1559 	eventdev_conf.nb_event_queue_flows =
1560 			evdev_default_conf.max_event_queue_flows;
1561 	eventdev_conf.nb_event_port_dequeue_depth =
1562 			evdev_default_conf.max_event_port_dequeue_depth;
1563 	eventdev_conf.nb_event_port_enqueue_depth =
1564 			evdev_default_conf.max_event_port_enqueue_depth;
1565 
1566 	eventdev_conf.nb_event_queues = nb_eventqueue;
1567 	eventdev_conf.nb_event_ports = nb_eventport;
1568 
1569 	/* Configure event device */
1570 
1571 	ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
1572 	if (ret < 0) {
1573 		printf("Error in configuring event device\n");
1574 		return ret;
1575 	}
1576 
1577 	/* Configure event queue */
1578 	eventq_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL;
1579 	eventq_conf.nb_atomic_flows = 1024;
1580 	eventq_conf.nb_atomic_order_sequences = 1024;
1581 
1582 	/* Setup the queue */
1583 	ret = rte_event_queue_setup(eventdev_id, ev_queue_id, &eventq_conf);
1584 	if (ret < 0) {
1585 		printf("Failed to setup event queue %d\n", ret);
1586 		return ret;
1587 	}
1588 
1589 	/* Configure event port */
1590 	ret = rte_event_port_setup(eventdev_id, port_id, NULL);
1591 	if (ret < 0) {
1592 		printf("Failed to setup event port %d\n", ret);
1593 		return ret;
1594 	}
1595 
1596 	/* Make event queue - event port link */
1597 	ret = rte_event_port_link(eventdev_id, port_id, NULL, NULL, 1);
1598 	if (ret < 0) {
1599 		printf("Failed to link event port %d\n", ret);
1600 		return ret;
1601 	}
1602 
1603 	/* Setup port conf */
1604 	ev_port_conf.new_event_threshold = 1200;
1605 	ev_port_conf.dequeue_depth =
1606 			evdev_default_conf.max_event_port_dequeue_depth;
1607 	ev_port_conf.enqueue_depth =
1608 			evdev_default_conf.max_event_port_enqueue_depth;
1609 
1610 	/* Create Rx adapter */
1611 	ret = rte_event_eth_rx_adapter_create(rx_adapter_id, eventdev_id,
1612 			&ev_port_conf);
1613 	if (ret < 0) {
1614 		printf("Failed to create rx adapter %d\n", ret);
1615 		return ret;
1616 	}
1617 
1618 	/* Setup queue conf */
1619 	queue_conf.ev.queue_id = ev_queue_id;
1620 	queue_conf.ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
1621 	queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
1622 
1623 	/* Add queue to the adapter */
1624 	ret = rte_event_eth_rx_adapter_queue_add(rx_adapter_id, port_id,
1625 			all_queues, &queue_conf);
1626 	if (ret < 0) {
1627 		printf("Failed to add eth queue to rx adapter %d\n", ret);
1628 		return ret;
1629 	}
1630 
1631 	/* Start rx adapter */
1632 	ret = rte_event_eth_rx_adapter_start(rx_adapter_id);
1633 	if (ret < 0) {
1634 		printf("Failed to start rx adapter %d\n", ret);
1635 		return ret;
1636 	}
1637 
1638 	/* Create tx adapter */
1639 	ret = rte_event_eth_tx_adapter_create(tx_adapter_id, eventdev_id,
1640 			&ev_port_conf);
1641 	if (ret < 0) {
1642 		printf("Failed to create tx adapter %d\n", ret);
1643 		return ret;
1644 	}
1645 
1646 	/* Add queue to the adapter */
1647 	ret = rte_event_eth_tx_adapter_queue_add(tx_adapter_id, port_id,
1648 			all_queues);
1649 	if (ret < 0) {
1650 		printf("Failed to add eth queue to tx adapter %d\n", ret);
1651 		return ret;
1652 	}
1653 	/* Setup Tx queue & port */
1654 	if (tx_queue_id) {
1655 		/* Setup the queue */
1656 		ret = rte_event_queue_setup(eventdev_id, tx_queue_id,
1657 				&eventq_conf);
1658 		if (ret < 0) {
1659 			printf("Failed to setup tx event queue %d\n", ret);
1660 			return ret;
1661 		}
1662 		/* Link Tx event queue to Tx port */
1663 		ret = rte_event_port_link(eventdev_id, port_id,
1664 				&tx_queue_id, NULL, 1);
1665 		if (ret != 1) {
1666 			printf("Failed to link event queue to port\n");
1667 			return ret;
1668 		}
1669 	}
1670 
1671 	/* Start tx adapter */
1672 	ret = rte_event_eth_tx_adapter_start(tx_adapter_id);
1673 	if (ret < 0) {
1674 		printf("Failed to start tx adapter %d\n", ret);
1675 		return ret;
1676 	}
1677 
1678 	/* Start eventdev */
1679 	ret = rte_event_dev_start(eventdev_id);
1680 	if (ret < 0) {
1681 		printf("Failed to start event device %d\n", ret);
1682 		return ret;
1683 	}
1684 
1685 	event_mode_enabled = true;
1686 	test_ipsec_alg_list_populate();
1687 
1688 	return 0;
1689 }
1690 
1691 static void
1692 event_inline_ipsec_testsuite_teardown(void)
1693 {
1694 	uint16_t portid;
1695 	int ret;
1696 
1697 	event_mode_enabled = false;
1698 
1699 	/* Stop and release rx adapter */
1700 	ret = rte_event_eth_rx_adapter_stop(rx_adapter_id);
1701 	if (ret < 0)
1702 		printf("Failed to stop rx adapter %d\n", ret);
1703 	ret = rte_event_eth_rx_adapter_queue_del(rx_adapter_id, port_id, -1);
1704 	if (ret < 0)
1705 		printf("Failed to remove rx adapter queues %d\n", ret);
1706 	ret = rte_event_eth_rx_adapter_free(rx_adapter_id);
1707 	if (ret < 0)
1708 		printf("Failed to free rx adapter %d\n", ret);
1709 
1710 	/* Stop and release tx adapter */
1711 	ret = rte_event_eth_tx_adapter_stop(tx_adapter_id);
1712 	if (ret < 0)
1713 		printf("Failed to stop tx adapter %d\n", ret);
1714 	ret = rte_event_eth_tx_adapter_queue_del(tx_adapter_id, port_id, -1);
1715 	if (ret < 0)
1716 		printf("Failed to remove tx adapter queues %d\n", ret);
1717 	ret = rte_event_eth_tx_adapter_free(tx_adapter_id);
1718 	if (ret < 0)
1719 		printf("Failed to free tx adapter %d\n", ret);
1720 
1721 	/* Stop and release event devices */
1722 	rte_event_dev_stop(eventdev_id);
1723 	ret = rte_event_dev_close(eventdev_id);
1724 	if (ret < 0)
1725 		printf("Failed to close event dev %d, %d\n", eventdev_id, ret);
1726 
1727 	/* port tear down */
1728 	RTE_ETH_FOREACH_DEV(portid) {
1729 		ret = rte_eth_dev_reset(portid);
1730 		if (ret != 0)
1731 			printf("rte_eth_dev_reset: err=%s, port=%u\n",
1732 			       rte_strerror(-ret), port_id);
1733 	}
1734 
1735 	rte_free(tx_pkts_burst);
1736 	rte_free(rx_pkts_burst);
1737 }
1738 
1739 static int
1740 test_inline_ip_reassembly(const void *testdata)
1741 {
1742 	struct reassembly_vector reassembly_td = {0};
1743 	const struct reassembly_vector *td = testdata;
1744 	struct ip_reassembly_test_packet full_pkt;
1745 	struct ip_reassembly_test_packet frags[MAX_FRAGS];
1746 	struct ipsec_test_flags flags = {0};
1747 	int i = 0;
1748 
1749 	reassembly_td.sa_data = td->sa_data;
1750 	reassembly_td.nb_frags = td->nb_frags;
1751 	reassembly_td.burst = td->burst;
1752 
1753 	memcpy(&full_pkt, td->full_pkt,
1754 			sizeof(struct ip_reassembly_test_packet));
1755 	reassembly_td.full_pkt = &full_pkt;
1756 
1757 	test_vector_payload_populate(reassembly_td.full_pkt, true);
1758 	for (; i < reassembly_td.nb_frags; i++) {
1759 		memcpy(&frags[i], td->frags[i],
1760 			sizeof(struct ip_reassembly_test_packet));
1761 		reassembly_td.frags[i] = &frags[i];
1762 		test_vector_payload_populate(reassembly_td.frags[i],
1763 				(i == 0) ? true : false);
1764 	}
1765 
1766 	return test_ipsec_with_reassembly(&reassembly_td, &flags);
1767 }
1768 
1769 static int
1770 test_ipsec_inline_proto_known_vec(const void *test_data)
1771 {
1772 	struct ipsec_test_data td_outb;
1773 	struct ipsec_test_flags flags;
1774 
1775 	memset(&flags, 0, sizeof(flags));
1776 
1777 	memcpy(&td_outb, test_data, sizeof(td_outb));
1778 
1779 	if (td_outb.aead ||
1780 	    td_outb.xform.chain.cipher.cipher.algo != RTE_CRYPTO_CIPHER_NULL) {
1781 		/* Disable IV gen to be able to test with known vectors */
1782 		td_outb.ipsec_xform.options.iv_gen_disable = 1;
1783 	}
1784 
1785 	return test_ipsec_inline_proto_process(&td_outb, NULL, 1,
1786 				false, &flags);
1787 }
1788 
1789 static int
1790 test_ipsec_inline_proto_known_vec_inb(const void *test_data)
1791 {
1792 	const struct ipsec_test_data *td = test_data;
1793 	struct ipsec_test_flags flags;
1794 	struct ipsec_test_data td_inb;
1795 
1796 	memset(&flags, 0, sizeof(flags));
1797 
1798 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
1799 		test_ipsec_td_in_from_out(td, &td_inb);
1800 	else
1801 		memcpy(&td_inb, td, sizeof(td_inb));
1802 
1803 	return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags);
1804 }
1805 
1806 static int
1807 test_ipsec_inline_proto_display_list(const void *data __rte_unused)
1808 {
1809 	struct ipsec_test_flags flags;
1810 
1811 	memset(&flags, 0, sizeof(flags));
1812 
1813 	flags.display_alg = true;
1814 
1815 	return test_ipsec_inline_proto_all(&flags);
1816 }
1817 
1818 static int
1819 test_ipsec_inline_proto_udp_encap(const void *data __rte_unused)
1820 {
1821 	struct ipsec_test_flags flags;
1822 
1823 	memset(&flags, 0, sizeof(flags));
1824 
1825 	flags.udp_encap = true;
1826 
1827 	return test_ipsec_inline_proto_all(&flags);
1828 }
1829 
1830 static int
1831 test_ipsec_inline_proto_udp_ports_verify(const void *data __rte_unused)
1832 {
1833 	struct ipsec_test_flags flags;
1834 
1835 	memset(&flags, 0, sizeof(flags));
1836 
1837 	flags.udp_encap = true;
1838 	flags.udp_ports_verify = true;
1839 
1840 	return test_ipsec_inline_proto_all(&flags);
1841 }
1842 
1843 static int
1844 test_ipsec_inline_proto_err_icv_corrupt(const void *data __rte_unused)
1845 {
1846 	struct ipsec_test_flags flags;
1847 
1848 	memset(&flags, 0, sizeof(flags));
1849 
1850 	flags.icv_corrupt = true;
1851 
1852 	return test_ipsec_inline_proto_all(&flags);
1853 }
1854 
1855 static int
1856 test_ipsec_inline_proto_tunnel_dst_addr_verify(const void *data __rte_unused)
1857 {
1858 	struct ipsec_test_flags flags;
1859 
1860 	memset(&flags, 0, sizeof(flags));
1861 
1862 	flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR;
1863 
1864 	return test_ipsec_inline_proto_all(&flags);
1865 }
1866 
1867 static int
1868 test_ipsec_inline_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused)
1869 {
1870 	struct ipsec_test_flags flags;
1871 
1872 	memset(&flags, 0, sizeof(flags));
1873 
1874 	flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR;
1875 
1876 	return test_ipsec_inline_proto_all(&flags);
1877 }
1878 
1879 static int
1880 test_ipsec_inline_proto_inner_ip_csum(const void *data __rte_unused)
1881 {
1882 	struct ipsec_test_flags flags;
1883 
1884 	memset(&flags, 0, sizeof(flags));
1885 
1886 	flags.ip_csum = true;
1887 
1888 	return test_ipsec_inline_proto_all(&flags);
1889 }
1890 
1891 static int
1892 test_ipsec_inline_proto_inner_l4_csum(const void *data __rte_unused)
1893 {
1894 	struct ipsec_test_flags flags;
1895 
1896 	memset(&flags, 0, sizeof(flags));
1897 
1898 	flags.l4_csum = true;
1899 
1900 	return test_ipsec_inline_proto_all(&flags);
1901 }
1902 
1903 static int
1904 test_ipsec_inline_proto_tunnel_v4_in_v4(const void *data __rte_unused)
1905 {
1906 	struct ipsec_test_flags flags;
1907 
1908 	memset(&flags, 0, sizeof(flags));
1909 
1910 	flags.ipv6 = false;
1911 	flags.tunnel_ipv6 = false;
1912 
1913 	return test_ipsec_inline_proto_all(&flags);
1914 }
1915 
1916 static int
1917 test_ipsec_inline_proto_tunnel_v6_in_v6(const void *data __rte_unused)
1918 {
1919 	struct ipsec_test_flags flags;
1920 
1921 	memset(&flags, 0, sizeof(flags));
1922 
1923 	flags.ipv6 = true;
1924 	flags.tunnel_ipv6 = true;
1925 
1926 	return test_ipsec_inline_proto_all(&flags);
1927 }
1928 
1929 static int
1930 test_ipsec_inline_proto_tunnel_v4_in_v6(const void *data __rte_unused)
1931 {
1932 	struct ipsec_test_flags flags;
1933 
1934 	memset(&flags, 0, sizeof(flags));
1935 
1936 	flags.ipv6 = false;
1937 	flags.tunnel_ipv6 = true;
1938 
1939 	return test_ipsec_inline_proto_all(&flags);
1940 }
1941 
1942 static int
1943 test_ipsec_inline_proto_tunnel_v6_in_v4(const void *data __rte_unused)
1944 {
1945 	struct ipsec_test_flags flags;
1946 
1947 	memset(&flags, 0, sizeof(flags));
1948 
1949 	flags.ipv6 = true;
1950 	flags.tunnel_ipv6 = false;
1951 
1952 	return test_ipsec_inline_proto_all(&flags);
1953 }
1954 
1955 static int
1956 test_ipsec_inline_proto_transport_v4(const void *data __rte_unused)
1957 {
1958 	struct ipsec_test_flags flags;
1959 
1960 	memset(&flags, 0, sizeof(flags));
1961 
1962 	flags.ipv6 = false;
1963 	flags.transport = true;
1964 
1965 	return test_ipsec_inline_proto_all(&flags);
1966 }
1967 
1968 static int
1969 test_ipsec_inline_proto_transport_l4_csum(const void *data __rte_unused)
1970 {
1971 	struct ipsec_test_flags flags = {
1972 		.l4_csum = true,
1973 		.transport = true,
1974 	};
1975 
1976 	return test_ipsec_inline_proto_all(&flags);
1977 }
1978 
1979 static int
1980 test_ipsec_inline_proto_stats(const void *data __rte_unused)
1981 {
1982 	struct ipsec_test_flags flags;
1983 
1984 	memset(&flags, 0, sizeof(flags));
1985 
1986 	flags.stats_success = true;
1987 
1988 	return test_ipsec_inline_proto_all(&flags);
1989 }
1990 
1991 static int
1992 test_ipsec_inline_proto_pkt_fragment(const void *data __rte_unused)
1993 {
1994 	struct ipsec_test_flags flags;
1995 
1996 	memset(&flags, 0, sizeof(flags));
1997 
1998 	flags.fragment = true;
1999 
2000 	return test_ipsec_inline_proto_all(&flags);
2001 
2002 }
2003 
2004 static int
2005 test_ipsec_inline_proto_copy_df_inner_0(const void *data __rte_unused)
2006 {
2007 	struct ipsec_test_flags flags;
2008 
2009 	memset(&flags, 0, sizeof(flags));
2010 
2011 	flags.df = TEST_IPSEC_COPY_DF_INNER_0;
2012 
2013 	return test_ipsec_inline_proto_all(&flags);
2014 }
2015 
2016 static int
2017 test_ipsec_inline_proto_copy_df_inner_1(const void *data __rte_unused)
2018 {
2019 	struct ipsec_test_flags flags;
2020 
2021 	memset(&flags, 0, sizeof(flags));
2022 
2023 	flags.df = TEST_IPSEC_COPY_DF_INNER_1;
2024 
2025 	return test_ipsec_inline_proto_all(&flags);
2026 }
2027 
2028 static int
2029 test_ipsec_inline_proto_set_df_0_inner_1(const void *data __rte_unused)
2030 {
2031 	struct ipsec_test_flags flags;
2032 
2033 	memset(&flags, 0, sizeof(flags));
2034 
2035 	flags.df = TEST_IPSEC_SET_DF_0_INNER_1;
2036 
2037 	return test_ipsec_inline_proto_all(&flags);
2038 }
2039 
2040 static int
2041 test_ipsec_inline_proto_set_df_1_inner_0(const void *data __rte_unused)
2042 {
2043 	struct ipsec_test_flags flags;
2044 
2045 	memset(&flags, 0, sizeof(flags));
2046 
2047 	flags.df = TEST_IPSEC_SET_DF_1_INNER_0;
2048 
2049 	return test_ipsec_inline_proto_all(&flags);
2050 }
2051 
2052 static int
2053 test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused)
2054 {
2055 	struct ipsec_test_flags flags;
2056 
2057 	memset(&flags, 0, sizeof(flags));
2058 
2059 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
2060 
2061 	return test_ipsec_inline_proto_all(&flags);
2062 }
2063 
2064 static int
2065 test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused)
2066 {
2067 	struct ipsec_test_flags flags;
2068 
2069 	memset(&flags, 0, sizeof(flags));
2070 
2071 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
2072 
2073 	return test_ipsec_inline_proto_all(&flags);
2074 }
2075 
2076 static int
2077 test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused)
2078 {
2079 	struct ipsec_test_flags flags;
2080 
2081 	memset(&flags, 0, sizeof(flags));
2082 
2083 	flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
2084 
2085 	return test_ipsec_inline_proto_all(&flags);
2086 }
2087 
2088 static int
2089 test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused)
2090 {
2091 	struct ipsec_test_flags flags;
2092 
2093 	memset(&flags, 0, sizeof(flags));
2094 
2095 	flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
2096 
2097 	return test_ipsec_inline_proto_all(&flags);
2098 }
2099 
2100 static int
2101 test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused)
2102 {
2103 	struct ipsec_test_flags flags;
2104 
2105 	memset(&flags, 0, sizeof(flags));
2106 
2107 	flags.ipv6 = true;
2108 	flags.tunnel_ipv6 = true;
2109 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
2110 
2111 	return test_ipsec_inline_proto_all(&flags);
2112 }
2113 
2114 static int
2115 test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused)
2116 {
2117 	struct ipsec_test_flags flags;
2118 
2119 	memset(&flags, 0, sizeof(flags));
2120 
2121 	flags.ipv6 = true;
2122 	flags.tunnel_ipv6 = true;
2123 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
2124 
2125 	return test_ipsec_inline_proto_all(&flags);
2126 }
2127 
2128 static int
2129 test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused)
2130 {
2131 	struct ipsec_test_flags flags;
2132 
2133 	memset(&flags, 0, sizeof(flags));
2134 
2135 	flags.ipv6 = true;
2136 	flags.tunnel_ipv6 = true;
2137 	flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
2138 
2139 	return test_ipsec_inline_proto_all(&flags);
2140 }
2141 
2142 static int
2143 test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused)
2144 {
2145 	struct ipsec_test_flags flags;
2146 
2147 	memset(&flags, 0, sizeof(flags));
2148 
2149 	flags.ipv6 = true;
2150 	flags.tunnel_ipv6 = true;
2151 	flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
2152 
2153 	return test_ipsec_inline_proto_all(&flags);
2154 }
2155 
2156 static int
2157 test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(const void *data __rte_unused)
2158 {
2159 	struct ipsec_test_flags flags;
2160 
2161 	memset(&flags, 0, sizeof(flags));
2162 
2163 	flags.ipv6 = true;
2164 	flags.tunnel_ipv6 = true;
2165 	flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_0;
2166 
2167 	return test_ipsec_inline_proto_all(&flags);
2168 }
2169 
2170 static int
2171 test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(const void *data __rte_unused)
2172 {
2173 	struct ipsec_test_flags flags;
2174 
2175 	memset(&flags, 0, sizeof(flags));
2176 
2177 	flags.ipv6 = true;
2178 	flags.tunnel_ipv6 = true;
2179 	flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_1;
2180 
2181 	return test_ipsec_inline_proto_all(&flags);
2182 }
2183 
2184 static int
2185 test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(const void *data __rte_unused)
2186 {
2187 	struct ipsec_test_flags flags;
2188 
2189 	memset(&flags, 0, sizeof(flags));
2190 
2191 	flags.ipv6 = true;
2192 	flags.tunnel_ipv6 = true;
2193 	flags.flabel = TEST_IPSEC_SET_FLABEL_0_INNER_1;
2194 
2195 	return test_ipsec_inline_proto_all(&flags);
2196 }
2197 
2198 static int
2199 test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(const void *data __rte_unused)
2200 {
2201 	struct ipsec_test_flags flags;
2202 
2203 	memset(&flags, 0, sizeof(flags));
2204 
2205 	flags.ipv6 = true;
2206 	flags.tunnel_ipv6 = true;
2207 	flags.flabel = TEST_IPSEC_SET_FLABEL_1_INNER_0;
2208 
2209 	return test_ipsec_inline_proto_all(&flags);
2210 }
2211 
2212 static int
2213 test_ipsec_inline_proto_ipv4_ttl_decrement(const void *data __rte_unused)
2214 {
2215 	struct ipsec_test_flags flags = {
2216 		.dec_ttl_or_hop_limit = true
2217 	};
2218 
2219 	return test_ipsec_inline_proto_all(&flags);
2220 }
2221 
2222 static int
2223 test_ipsec_inline_proto_ipv6_hop_limit_decrement(const void *data __rte_unused)
2224 {
2225 	struct ipsec_test_flags flags = {
2226 		.ipv6 = true,
2227 		.dec_ttl_or_hop_limit = true
2228 	};
2229 
2230 	return test_ipsec_inline_proto_all(&flags);
2231 }
2232 
2233 static int
2234 test_ipsec_inline_proto_iv_gen(const void *data __rte_unused)
2235 {
2236 	struct ipsec_test_flags flags;
2237 
2238 	memset(&flags, 0, sizeof(flags));
2239 
2240 	flags.iv_gen = true;
2241 
2242 	return test_ipsec_inline_proto_all(&flags);
2243 }
2244 
2245 static int
2246 test_ipsec_inline_proto_known_vec_fragmented(const void *test_data)
2247 {
2248 	struct ipsec_test_data td_outb;
2249 	struct ipsec_test_flags flags;
2250 
2251 	memset(&flags, 0, sizeof(flags));
2252 	flags.fragment = true;
2253 
2254 	memcpy(&td_outb, test_data, sizeof(td_outb));
2255 
2256 	/* Disable IV gen to be able to test with known vectors */
2257 	td_outb.ipsec_xform.options.iv_gen_disable = 1;
2258 
2259 	return test_ipsec_inline_proto_process(&td_outb, NULL, 1, false,
2260 						&flags);
2261 }
2262 
2263 static int
2264 test_ipsec_inline_pkt_replay(const void *test_data, const uint64_t esn[],
2265 		      bool replayed_pkt[], uint32_t nb_pkts, bool esn_en,
2266 		      uint64_t winsz)
2267 {
2268 	struct ipsec_test_data td_outb[IPSEC_TEST_PACKETS_MAX];
2269 	struct ipsec_test_data td_inb[IPSEC_TEST_PACKETS_MAX];
2270 	struct ipsec_test_flags flags;
2271 	uint32_t i, ret = 0;
2272 
2273 	memset(&flags, 0, sizeof(flags));
2274 	flags.antireplay = true;
2275 
2276 	for (i = 0; i < nb_pkts; i++) {
2277 		memcpy(&td_outb[i], test_data, sizeof(td_outb[0]));
2278 		td_outb[i].ipsec_xform.options.iv_gen_disable = 1;
2279 		td_outb[i].ipsec_xform.replay_win_sz = winsz;
2280 		td_outb[i].ipsec_xform.options.esn = esn_en;
2281 	}
2282 
2283 	for (i = 0; i < nb_pkts; i++)
2284 		td_outb[i].ipsec_xform.esn.value = esn[i];
2285 
2286 	ret = test_ipsec_inline_proto_process_with_esn(td_outb, td_inb,
2287 				nb_pkts, true, &flags);
2288 	if (ret != TEST_SUCCESS)
2289 		return ret;
2290 
2291 	test_ipsec_td_update(td_inb, td_outb, nb_pkts, &flags);
2292 
2293 	for (i = 0; i < nb_pkts; i++) {
2294 		td_inb[i].ipsec_xform.options.esn = esn_en;
2295 		/* Set antireplay flag for packets to be dropped */
2296 		td_inb[i].ar_packet = replayed_pkt[i];
2297 	}
2298 
2299 	ret = test_ipsec_inline_proto_process_with_esn(td_inb, NULL, nb_pkts,
2300 				true, &flags);
2301 
2302 	return ret;
2303 }
2304 
2305 static int
2306 test_ipsec_inline_proto_pkt_antireplay(const void *test_data, uint64_t winsz)
2307 {
2308 
2309 	uint32_t nb_pkts = 5;
2310 	bool replayed_pkt[5];
2311 	uint64_t esn[5];
2312 
2313 	/* 1. Advance the TOP of the window to WS * 2 */
2314 	esn[0] = winsz * 2;
2315 	/* 2. Test sequence number within the new window(WS + 1) */
2316 	esn[1] = winsz + 1;
2317 	/* 3. Test sequence number less than the window BOTTOM */
2318 	esn[2] = winsz;
2319 	/* 4. Test sequence number in the middle of the window */
2320 	esn[3] = winsz + (winsz / 2);
2321 	/* 5. Test replay of the packet in the middle of the window */
2322 	esn[4] = winsz + (winsz / 2);
2323 
2324 	replayed_pkt[0] = false;
2325 	replayed_pkt[1] = false;
2326 	replayed_pkt[2] = true;
2327 	replayed_pkt[3] = false;
2328 	replayed_pkt[4] = true;
2329 
2330 	return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt,
2331 			nb_pkts, false, winsz);
2332 }
2333 
2334 static int
2335 test_ipsec_inline_proto_pkt_antireplay1024(const void *test_data)
2336 {
2337 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 1024);
2338 }
2339 
2340 static int
2341 test_ipsec_inline_proto_pkt_antireplay2048(const void *test_data)
2342 {
2343 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 2048);
2344 }
2345 
2346 static int
2347 test_ipsec_inline_proto_pkt_antireplay4096(const void *test_data)
2348 {
2349 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 4096);
2350 }
2351 
2352 static int
2353 test_ipsec_inline_proto_pkt_esn_antireplay(const void *test_data, uint64_t winsz)
2354 {
2355 
2356 	uint32_t nb_pkts = 7;
2357 	bool replayed_pkt[7];
2358 	uint64_t esn[7];
2359 
2360 	/* Set the initial sequence number */
2361 	esn[0] = (uint64_t)(0xFFFFFFFF - winsz);
2362 	/* 1. Advance the TOP of the window to (1<<32 + WS/2) */
2363 	esn[1] = (uint64_t)((1ULL << 32) + (winsz / 2));
2364 	/* 2. Test sequence number within new window (1<<32 + WS/2 + 1) */
2365 	esn[2] = (uint64_t)((1ULL << 32) - (winsz / 2) + 1);
2366 	/* 3. Test with sequence number within window (1<<32 - 1) */
2367 	esn[3] = (uint64_t)((1ULL << 32) - 1);
2368 	/* 4. Test with sequence number within window (1<<32 - 1) */
2369 	esn[4] = (uint64_t)(1ULL << 32);
2370 	/* 5. Test with duplicate sequence number within
2371 	 * new window (1<<32 - 1)
2372 	 */
2373 	esn[5] = (uint64_t)((1ULL << 32) - 1);
2374 	/* 6. Test with duplicate sequence number within new window (1<<32) */
2375 	esn[6] = (uint64_t)(1ULL << 32);
2376 
2377 	replayed_pkt[0] = false;
2378 	replayed_pkt[1] = false;
2379 	replayed_pkt[2] = false;
2380 	replayed_pkt[3] = false;
2381 	replayed_pkt[4] = false;
2382 	replayed_pkt[5] = true;
2383 	replayed_pkt[6] = true;
2384 
2385 	return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt, nb_pkts,
2386 				     true, winsz);
2387 }
2388 
2389 static int
2390 test_ipsec_inline_proto_pkt_esn_antireplay1024(const void *test_data)
2391 {
2392 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 1024);
2393 }
2394 
2395 static int
2396 test_ipsec_inline_proto_pkt_esn_antireplay2048(const void *test_data)
2397 {
2398 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 2048);
2399 }
2400 
2401 static int
2402 test_ipsec_inline_proto_pkt_esn_antireplay4096(const void *test_data)
2403 {
2404 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 4096);
2405 }
2406 
2407 
2408 
2409 static struct unit_test_suite inline_ipsec_testsuite  = {
2410 	.suite_name = "Inline IPsec Ethernet Device Unit Test Suite",
2411 	.unit_test_cases = {
2412 		TEST_CASE_NAMED_WITH_DATA(
2413 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
2414 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2415 			test_ipsec_inline_proto_known_vec, &pkt_aes_128_gcm),
2416 		TEST_CASE_NAMED_WITH_DATA(
2417 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
2418 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2419 			test_ipsec_inline_proto_known_vec, &pkt_aes_192_gcm),
2420 		TEST_CASE_NAMED_WITH_DATA(
2421 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
2422 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2423 			test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm),
2424 		TEST_CASE_NAMED_WITH_DATA(
2425 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2426 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2427 			test_ipsec_inline_proto_known_vec,
2428 			&pkt_aes_128_cbc_hmac_sha256),
2429 		TEST_CASE_NAMED_WITH_DATA(
2430 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
2431 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2432 			test_ipsec_inline_proto_known_vec,
2433 			&pkt_aes_128_cbc_hmac_sha384),
2434 		TEST_CASE_NAMED_WITH_DATA(
2435 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
2436 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2437 			test_ipsec_inline_proto_known_vec,
2438 			&pkt_aes_128_cbc_hmac_sha512),
2439 		TEST_CASE_NAMED_WITH_DATA(
2440 			"Outbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
2441 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2442 			test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm_v6),
2443 		TEST_CASE_NAMED_WITH_DATA(
2444 			"Outbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2445 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2446 			test_ipsec_inline_proto_known_vec,
2447 			&pkt_aes_128_cbc_hmac_sha256_v6),
2448 		TEST_CASE_NAMED_WITH_DATA(
2449 			"Outbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
2450 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2451 			test_ipsec_inline_proto_known_vec,
2452 			&pkt_null_aes_xcbc),
2453 
2454 		TEST_CASE_NAMED_WITH_DATA(
2455 			"Outbound fragmented packet",
2456 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2457 			test_ipsec_inline_proto_known_vec_fragmented,
2458 			&pkt_aes_128_gcm_frag),
2459 
2460 		TEST_CASE_NAMED_WITH_DATA(
2461 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
2462 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2463 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_gcm),
2464 		TEST_CASE_NAMED_WITH_DATA(
2465 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
2466 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2467 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_192_gcm),
2468 		TEST_CASE_NAMED_WITH_DATA(
2469 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
2470 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2471 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm),
2472 		TEST_CASE_NAMED_WITH_DATA(
2473 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128)",
2474 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2475 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_cbc_null),
2476 		TEST_CASE_NAMED_WITH_DATA(
2477 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2478 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2479 			test_ipsec_inline_proto_known_vec_inb,
2480 			&pkt_aes_128_cbc_hmac_sha256),
2481 		TEST_CASE_NAMED_WITH_DATA(
2482 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
2483 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2484 			test_ipsec_inline_proto_known_vec_inb,
2485 			&pkt_aes_128_cbc_hmac_sha384),
2486 		TEST_CASE_NAMED_WITH_DATA(
2487 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
2488 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2489 			test_ipsec_inline_proto_known_vec_inb,
2490 			&pkt_aes_128_cbc_hmac_sha512),
2491 		TEST_CASE_NAMED_WITH_DATA(
2492 			"Inbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
2493 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2494 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm_v6),
2495 		TEST_CASE_NAMED_WITH_DATA(
2496 			"Inbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2497 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2498 			test_ipsec_inline_proto_known_vec_inb,
2499 			&pkt_aes_128_cbc_hmac_sha256_v6),
2500 		TEST_CASE_NAMED_WITH_DATA(
2501 			"Inbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
2502 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2503 			test_ipsec_inline_proto_known_vec_inb,
2504 			&pkt_null_aes_xcbc),
2505 
2506 		TEST_CASE_NAMED_ST(
2507 			"Combined test alg list",
2508 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2509 			test_ipsec_inline_proto_display_list),
2510 
2511 		TEST_CASE_NAMED_ST(
2512 			"UDP encapsulation",
2513 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2514 			test_ipsec_inline_proto_udp_encap),
2515 		TEST_CASE_NAMED_ST(
2516 			"UDP encapsulation ports verification test",
2517 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2518 			test_ipsec_inline_proto_udp_ports_verify),
2519 		TEST_CASE_NAMED_ST(
2520 			"Negative test: ICV corruption",
2521 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2522 			test_ipsec_inline_proto_err_icv_corrupt),
2523 		TEST_CASE_NAMED_ST(
2524 			"Tunnel dst addr verification",
2525 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2526 			test_ipsec_inline_proto_tunnel_dst_addr_verify),
2527 		TEST_CASE_NAMED_ST(
2528 			"Tunnel src and dst addr verification",
2529 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2530 			test_ipsec_inline_proto_tunnel_src_dst_addr_verify),
2531 		TEST_CASE_NAMED_ST(
2532 			"Inner IP checksum",
2533 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2534 			test_ipsec_inline_proto_inner_ip_csum),
2535 		TEST_CASE_NAMED_ST(
2536 			"Inner L4 checksum",
2537 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2538 			test_ipsec_inline_proto_inner_l4_csum),
2539 		TEST_CASE_NAMED_ST(
2540 			"Tunnel IPv4 in IPv4",
2541 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2542 			test_ipsec_inline_proto_tunnel_v4_in_v4),
2543 		TEST_CASE_NAMED_ST(
2544 			"Tunnel IPv6 in IPv6",
2545 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2546 			test_ipsec_inline_proto_tunnel_v6_in_v6),
2547 		TEST_CASE_NAMED_ST(
2548 			"Tunnel IPv4 in IPv6",
2549 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2550 			test_ipsec_inline_proto_tunnel_v4_in_v6),
2551 		TEST_CASE_NAMED_ST(
2552 			"Tunnel IPv6 in IPv4",
2553 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2554 			test_ipsec_inline_proto_tunnel_v6_in_v4),
2555 		TEST_CASE_NAMED_ST(
2556 			"Transport IPv4",
2557 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2558 			test_ipsec_inline_proto_transport_v4),
2559 		TEST_CASE_NAMED_ST(
2560 			"Transport l4 checksum",
2561 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2562 			test_ipsec_inline_proto_transport_l4_csum),
2563 		TEST_CASE_NAMED_ST(
2564 			"Statistics: success",
2565 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2566 			test_ipsec_inline_proto_stats),
2567 		TEST_CASE_NAMED_ST(
2568 			"Fragmented packet",
2569 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2570 			test_ipsec_inline_proto_pkt_fragment),
2571 		TEST_CASE_NAMED_ST(
2572 			"Tunnel header copy DF (inner 0)",
2573 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2574 			test_ipsec_inline_proto_copy_df_inner_0),
2575 		TEST_CASE_NAMED_ST(
2576 			"Tunnel header copy DF (inner 1)",
2577 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2578 			test_ipsec_inline_proto_copy_df_inner_1),
2579 		TEST_CASE_NAMED_ST(
2580 			"Tunnel header set DF 0 (inner 1)",
2581 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2582 			test_ipsec_inline_proto_set_df_0_inner_1),
2583 		TEST_CASE_NAMED_ST(
2584 			"Tunnel header set DF 1 (inner 0)",
2585 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2586 			test_ipsec_inline_proto_set_df_1_inner_0),
2587 		TEST_CASE_NAMED_ST(
2588 			"Tunnel header IPv4 copy DSCP (inner 0)",
2589 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2590 			test_ipsec_inline_proto_ipv4_copy_dscp_inner_0),
2591 		TEST_CASE_NAMED_ST(
2592 			"Tunnel header IPv4 copy DSCP (inner 1)",
2593 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2594 			test_ipsec_inline_proto_ipv4_copy_dscp_inner_1),
2595 		TEST_CASE_NAMED_ST(
2596 			"Tunnel header IPv4 set DSCP 0 (inner 1)",
2597 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2598 			test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1),
2599 		TEST_CASE_NAMED_ST(
2600 			"Tunnel header IPv4 set DSCP 1 (inner 0)",
2601 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2602 			test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0),
2603 		TEST_CASE_NAMED_ST(
2604 			"Tunnel header IPv6 copy DSCP (inner 0)",
2605 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2606 			test_ipsec_inline_proto_ipv6_copy_dscp_inner_0),
2607 		TEST_CASE_NAMED_ST(
2608 			"Tunnel header IPv6 copy DSCP (inner 1)",
2609 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2610 			test_ipsec_inline_proto_ipv6_copy_dscp_inner_1),
2611 		TEST_CASE_NAMED_ST(
2612 			"Tunnel header IPv6 set DSCP 0 (inner 1)",
2613 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2614 			test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1),
2615 		TEST_CASE_NAMED_ST(
2616 			"Tunnel header IPv6 set DSCP 1 (inner 0)",
2617 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2618 			test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0),
2619 		TEST_CASE_NAMED_ST(
2620 			"Tunnel header IPv6 copy FLABEL (inner 0)",
2621 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2622 			test_ipsec_inline_proto_ipv6_copy_flabel_inner_0),
2623 		TEST_CASE_NAMED_ST(
2624 			"Tunnel header IPv6 copy FLABEL (inner 1)",
2625 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2626 			test_ipsec_inline_proto_ipv6_copy_flabel_inner_1),
2627 		TEST_CASE_NAMED_ST(
2628 			"Tunnel header IPv6 set FLABEL 0 (inner 1)",
2629 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2630 			test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1),
2631 		TEST_CASE_NAMED_ST(
2632 			"Tunnel header IPv6 set FLABEL 1 (inner 0)",
2633 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2634 			test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0),
2635 		TEST_CASE_NAMED_ST(
2636 			"Tunnel header IPv4 decrement inner TTL",
2637 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2638 			test_ipsec_inline_proto_ipv4_ttl_decrement),
2639 		TEST_CASE_NAMED_ST(
2640 			"Tunnel header IPv6 decrement inner hop limit",
2641 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2642 			test_ipsec_inline_proto_ipv6_hop_limit_decrement),
2643 		TEST_CASE_NAMED_ST(
2644 			"IV generation",
2645 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2646 			test_ipsec_inline_proto_iv_gen),
2647 
2648 
2649 		TEST_CASE_NAMED_WITH_DATA(
2650 			"Antireplay with window size 1024",
2651 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2652 			test_ipsec_inline_proto_pkt_antireplay1024,
2653 			&pkt_aes_128_gcm),
2654 		TEST_CASE_NAMED_WITH_DATA(
2655 			"Antireplay with window size 2048",
2656 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2657 			test_ipsec_inline_proto_pkt_antireplay2048,
2658 			&pkt_aes_128_gcm),
2659 		TEST_CASE_NAMED_WITH_DATA(
2660 			"Antireplay with window size 4096",
2661 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2662 			test_ipsec_inline_proto_pkt_antireplay4096,
2663 			&pkt_aes_128_gcm),
2664 		TEST_CASE_NAMED_WITH_DATA(
2665 			"ESN and Antireplay with window size 1024",
2666 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2667 			test_ipsec_inline_proto_pkt_esn_antireplay1024,
2668 			&pkt_aes_128_gcm),
2669 		TEST_CASE_NAMED_WITH_DATA(
2670 			"ESN and Antireplay with window size 2048",
2671 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2672 			test_ipsec_inline_proto_pkt_esn_antireplay2048,
2673 			&pkt_aes_128_gcm),
2674 		TEST_CASE_NAMED_WITH_DATA(
2675 			"ESN and Antireplay with window size 4096",
2676 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2677 			test_ipsec_inline_proto_pkt_esn_antireplay4096,
2678 			&pkt_aes_128_gcm),
2679 
2680 		TEST_CASE_NAMED_WITH_DATA(
2681 			"IPv4 Reassembly with 2 fragments",
2682 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2683 			test_inline_ip_reassembly, &ipv4_2frag_vector),
2684 		TEST_CASE_NAMED_WITH_DATA(
2685 			"IPv6 Reassembly with 2 fragments",
2686 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2687 			test_inline_ip_reassembly, &ipv6_2frag_vector),
2688 		TEST_CASE_NAMED_WITH_DATA(
2689 			"IPv4 Reassembly with 4 fragments",
2690 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2691 			test_inline_ip_reassembly, &ipv4_4frag_vector),
2692 		TEST_CASE_NAMED_WITH_DATA(
2693 			"IPv6 Reassembly with 4 fragments",
2694 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2695 			test_inline_ip_reassembly, &ipv6_4frag_vector),
2696 		TEST_CASE_NAMED_WITH_DATA(
2697 			"IPv4 Reassembly with 5 fragments",
2698 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2699 			test_inline_ip_reassembly, &ipv4_5frag_vector),
2700 		TEST_CASE_NAMED_WITH_DATA(
2701 			"IPv6 Reassembly with 5 fragments",
2702 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2703 			test_inline_ip_reassembly, &ipv6_5frag_vector),
2704 		TEST_CASE_NAMED_WITH_DATA(
2705 			"IPv4 Reassembly with incomplete fragments",
2706 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2707 			test_inline_ip_reassembly, &ipv4_incomplete_vector),
2708 		TEST_CASE_NAMED_WITH_DATA(
2709 			"IPv4 Reassembly with overlapping fragments",
2710 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2711 			test_inline_ip_reassembly, &ipv4_overlap_vector),
2712 		TEST_CASE_NAMED_WITH_DATA(
2713 			"IPv4 Reassembly with out of order fragments",
2714 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2715 			test_inline_ip_reassembly, &ipv4_out_of_order_vector),
2716 		TEST_CASE_NAMED_WITH_DATA(
2717 			"IPv4 Reassembly with burst of 4 fragments",
2718 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2719 			test_inline_ip_reassembly, &ipv4_4frag_burst_vector),
2720 
2721 		TEST_CASES_END() /**< NULL terminate unit test array */
2722 	},
2723 };
2724 
2725 
2726 static int
2727 test_inline_ipsec(void)
2728 {
2729 	inline_ipsec_testsuite.setup = inline_ipsec_testsuite_setup;
2730 	inline_ipsec_testsuite.teardown = inline_ipsec_testsuite_teardown;
2731 	return unit_test_suite_runner(&inline_ipsec_testsuite);
2732 }
2733 
2734 static int
2735 test_event_inline_ipsec(void)
2736 {
2737 	inline_ipsec_testsuite.setup = event_inline_ipsec_testsuite_setup;
2738 	inline_ipsec_testsuite.teardown = event_inline_ipsec_testsuite_teardown;
2739 	return unit_test_suite_runner(&inline_ipsec_testsuite);
2740 }
2741 
2742 #endif /* !RTE_EXEC_ENV_WINDOWS */
2743 
2744 REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec);
2745 REGISTER_TEST_COMMAND(event_inline_ipsec_autotest, test_event_inline_ipsec);
2746