xref: /dpdk/app/test/test_security_inline_proto.c (revision efb1a06bb3f8dbcce5e43b49d23d73aaf80b2c8f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Marvell.
3  */
4 
5 
6 #include <stdio.h>
7 #include <inttypes.h>
8 
9 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
11 #include <rte_security.h>
12 
13 #include "test.h"
14 #include "test_security_inline_proto_vectors.h"
15 
16 #ifdef RTE_EXEC_ENV_WINDOWS
17 static int
18 test_inline_ipsec(void)
19 {
20 	printf("Inline ipsec not supported on Windows, skipping test\n");
21 	return TEST_SKIPPED;
22 }
23 
24 #else
25 
26 #define NB_ETHPORTS_USED		1
27 #define MEMPOOL_CACHE_SIZE		32
28 #define MAX_PKT_BURST			32
29 #define RTE_TEST_RX_DESC_DEFAULT	1024
30 #define RTE_TEST_TX_DESC_DEFAULT	1024
31 #define RTE_PORT_ALL		(~(uint16_t)0x0)
32 
33 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
34 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
35 #define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */
36 
37 #define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */
38 #define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
39 #define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
40 
41 #define MAX_TRAFFIC_BURST		2048
42 #define NB_MBUF				10240
43 
44 #define ENCAP_DECAP_BURST_SZ		33
45 #define APP_REASS_TIMEOUT		10
46 
47 extern struct ipsec_test_data pkt_aes_128_gcm;
48 extern struct ipsec_test_data pkt_aes_192_gcm;
49 extern struct ipsec_test_data pkt_aes_256_gcm;
50 extern struct ipsec_test_data pkt_aes_128_gcm_frag;
51 extern struct ipsec_test_data pkt_aes_128_cbc_null;
52 extern struct ipsec_test_data pkt_null_aes_xcbc;
53 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha384;
54 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha512;
55 
56 static struct rte_mempool *mbufpool;
57 static struct rte_mempool *sess_pool;
58 static struct rte_mempool *sess_priv_pool;
59 /* ethernet addresses of ports */
60 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
61 
62 static struct rte_eth_conf port_conf = {
63 	.rxmode = {
64 		.mq_mode = RTE_ETH_MQ_RX_NONE,
65 		.split_hdr_size = 0,
66 		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM |
67 			    RTE_ETH_RX_OFFLOAD_SECURITY,
68 	},
69 	.txmode = {
70 		.mq_mode = RTE_ETH_MQ_TX_NONE,
71 		.offloads = RTE_ETH_TX_OFFLOAD_SECURITY |
72 			    RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
73 	},
74 	.lpbk_mode = 1,  /* enable loopback */
75 };
76 
77 static struct rte_eth_rxconf rx_conf = {
78 	.rx_thresh = {
79 		.pthresh = RX_PTHRESH,
80 		.hthresh = RX_HTHRESH,
81 		.wthresh = RX_WTHRESH,
82 	},
83 	.rx_free_thresh = 32,
84 };
85 
86 static struct rte_eth_txconf tx_conf = {
87 	.tx_thresh = {
88 		.pthresh = TX_PTHRESH,
89 		.hthresh = TX_HTHRESH,
90 		.wthresh = TX_WTHRESH,
91 	},
92 	.tx_free_thresh = 32, /* Use PMD default values */
93 	.tx_rs_thresh = 32, /* Use PMD default values */
94 };
95 
96 uint16_t port_id;
97 
98 static uint64_t link_mbps;
99 
100 static int ip_reassembly_dynfield_offset = -1;
101 
102 static struct rte_flow *default_flow[RTE_MAX_ETHPORTS];
103 
104 /* Create Inline IPsec session */
105 static int
106 create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid,
107 		struct rte_security_session **sess, struct rte_security_ctx **ctx,
108 		uint32_t *ol_flags, const struct ipsec_test_flags *flags,
109 		struct rte_security_session_conf *sess_conf)
110 {
111 	uint16_t src_v6[8] = {0x2607, 0xf8b0, 0x400c, 0x0c03, 0x0000, 0x0000,
112 				0x0000, 0x001a};
113 	uint16_t dst_v6[8] = {0x2001, 0x0470, 0xe5bf, 0xdead, 0x4957, 0x2174,
114 				0xe82c, 0x4887};
115 	uint32_t src_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 2));
116 	uint32_t dst_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 1));
117 	struct rte_security_capability_idx sec_cap_idx;
118 	const struct rte_security_capability *sec_cap;
119 	enum rte_security_ipsec_sa_direction dir;
120 	struct rte_security_ctx *sec_ctx;
121 	uint32_t verify;
122 
123 	sess_conf->action_type = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
124 	sess_conf->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
125 	sess_conf->ipsec = sa->ipsec_xform;
126 
127 	dir = sa->ipsec_xform.direction;
128 	verify = flags->tunnel_hdr_verify;
129 
130 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && verify) {
131 		if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR)
132 			src_v4 += 1;
133 		else if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR)
134 			dst_v4 += 1;
135 	}
136 
137 	if (sa->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
138 		if (sa->ipsec_xform.tunnel.type ==
139 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
140 			memcpy(&sess_conf->ipsec.tunnel.ipv4.src_ip, &src_v4,
141 					sizeof(src_v4));
142 			memcpy(&sess_conf->ipsec.tunnel.ipv4.dst_ip, &dst_v4,
143 					sizeof(dst_v4));
144 
145 			if (flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
146 				sess_conf->ipsec.tunnel.ipv4.df = 0;
147 
148 			if (flags->df == TEST_IPSEC_SET_DF_1_INNER_0)
149 				sess_conf->ipsec.tunnel.ipv4.df = 1;
150 
151 			if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
152 				sess_conf->ipsec.tunnel.ipv4.dscp = 0;
153 
154 			if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
155 				sess_conf->ipsec.tunnel.ipv4.dscp =
156 						TEST_IPSEC_DSCP_VAL;
157 		} else {
158 			if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
159 				sess_conf->ipsec.tunnel.ipv6.dscp = 0;
160 
161 			if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
162 				sess_conf->ipsec.tunnel.ipv6.dscp =
163 						TEST_IPSEC_DSCP_VAL;
164 
165 			if (flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
166 				sess_conf->ipsec.tunnel.ipv6.flabel = 0;
167 
168 			if (flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0)
169 				sess_conf->ipsec.tunnel.ipv6.flabel =
170 						TEST_IPSEC_FLABEL_VAL;
171 
172 			memcpy(&sess_conf->ipsec.tunnel.ipv6.src_addr, &src_v6,
173 					sizeof(src_v6));
174 			memcpy(&sess_conf->ipsec.tunnel.ipv6.dst_addr, &dst_v6,
175 					sizeof(dst_v6));
176 		}
177 	}
178 
179 	/* Save SA as userdata for the security session. When
180 	 * the packet is received, this userdata will be
181 	 * retrieved using the metadata from the packet.
182 	 *
183 	 * The PMD is expected to set similar metadata for other
184 	 * operations, like rte_eth_event, which are tied to
185 	 * security session. In such cases, the userdata could
186 	 * be obtained to uniquely identify the security
187 	 * parameters denoted.
188 	 */
189 
190 	sess_conf->userdata = (void *) sa;
191 
192 	sec_ctx = (struct rte_security_ctx *)rte_eth_dev_get_sec_ctx(portid);
193 	if (sec_ctx == NULL) {
194 		printf("Ethernet device doesn't support security features.\n");
195 		return TEST_SKIPPED;
196 	}
197 
198 	sec_cap_idx.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
199 	sec_cap_idx.protocol = RTE_SECURITY_PROTOCOL_IPSEC;
200 	sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto;
201 	sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode;
202 	sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction;
203 	sec_cap = rte_security_capability_get(sec_ctx, &sec_cap_idx);
204 	if (sec_cap == NULL) {
205 		printf("No capabilities registered\n");
206 		return TEST_SKIPPED;
207 	}
208 
209 	if (sa->aead || sa->aes_gmac)
210 		memcpy(&sess_conf->ipsec.salt, sa->salt.data,
211 			RTE_MIN(sizeof(sess_conf->ipsec.salt), sa->salt.len));
212 
213 	/* Copy cipher session parameters */
214 	if (sa->aead) {
215 		rte_memcpy(sess_conf->crypto_xform, &sa->xform.aead,
216 				sizeof(struct rte_crypto_sym_xform));
217 		sess_conf->crypto_xform->aead.key.data = sa->key.data;
218 		/* Verify crypto capabilities */
219 		if (test_ipsec_crypto_caps_aead_verify(sec_cap,
220 					sess_conf->crypto_xform) != 0) {
221 			RTE_LOG(INFO, USER1,
222 				"Crypto capabilities not supported\n");
223 			return TEST_SKIPPED;
224 		}
225 	} else {
226 		if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
227 			rte_memcpy(&sess_conf->crypto_xform->cipher,
228 					&sa->xform.chain.cipher.cipher,
229 					sizeof(struct rte_crypto_cipher_xform));
230 
231 			rte_memcpy(&sess_conf->crypto_xform->next->auth,
232 					&sa->xform.chain.auth.auth,
233 					sizeof(struct rte_crypto_auth_xform));
234 			sess_conf->crypto_xform->cipher.key.data =
235 							sa->key.data;
236 			sess_conf->crypto_xform->next->auth.key.data =
237 							sa->auth_key.data;
238 			/* Verify crypto capabilities */
239 			if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
240 					sess_conf->crypto_xform) != 0) {
241 				RTE_LOG(INFO, USER1,
242 					"Cipher crypto capabilities not supported\n");
243 				return TEST_SKIPPED;
244 			}
245 
246 			if (test_ipsec_crypto_caps_auth_verify(sec_cap,
247 					sess_conf->crypto_xform->next) != 0) {
248 				RTE_LOG(INFO, USER1,
249 					"Auth crypto capabilities not supported\n");
250 				return TEST_SKIPPED;
251 			}
252 		} else {
253 			rte_memcpy(&sess_conf->crypto_xform->next->cipher,
254 					&sa->xform.chain.cipher.cipher,
255 					sizeof(struct rte_crypto_cipher_xform));
256 			rte_memcpy(&sess_conf->crypto_xform->auth,
257 					&sa->xform.chain.auth.auth,
258 					sizeof(struct rte_crypto_auth_xform));
259 			sess_conf->crypto_xform->auth.key.data =
260 							sa->auth_key.data;
261 			sess_conf->crypto_xform->next->cipher.key.data =
262 							sa->key.data;
263 
264 			/* Verify crypto capabilities */
265 			if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
266 					sess_conf->crypto_xform->next) != 0) {
267 				RTE_LOG(INFO, USER1,
268 					"Cipher crypto capabilities not supported\n");
269 				return TEST_SKIPPED;
270 			}
271 
272 			if (test_ipsec_crypto_caps_auth_verify(sec_cap,
273 					sess_conf->crypto_xform) != 0) {
274 				RTE_LOG(INFO, USER1,
275 					"Auth crypto capabilities not supported\n");
276 				return TEST_SKIPPED;
277 			}
278 		}
279 	}
280 
281 	if (test_ipsec_sec_caps_verify(&sess_conf->ipsec, sec_cap, false) != 0)
282 		return TEST_SKIPPED;
283 
284 	if ((sa->ipsec_xform.direction ==
285 			RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
286 			(sa->ipsec_xform.options.iv_gen_disable == 1)) {
287 		/* Set env variable when IV generation is disabled */
288 		char arr[128];
289 		int len = 0, j = 0;
290 		int iv_len = (sa->aead || sa->aes_gmac) ? 8 : 16;
291 
292 		for (; j < iv_len; j++)
293 			len += snprintf(arr+len, sizeof(arr) - len,
294 					"0x%x, ", sa->iv.data[j]);
295 		setenv("ETH_SEC_IV_OVR", arr, 1);
296 	}
297 
298 	*sess = rte_security_session_create(sec_ctx,
299 				sess_conf, sess_pool, sess_priv_pool);
300 	if (*sess == NULL) {
301 		printf("SEC Session init failed.\n");
302 		return TEST_FAILED;
303 	}
304 
305 	*ol_flags = sec_cap->ol_flags;
306 	*ctx = sec_ctx;
307 
308 	return 0;
309 }
310 
311 /* Check the link status of all ports in up to 3s, and print them finally */
312 static void
313 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
314 {
315 #define CHECK_INTERVAL 100 /* 100ms */
316 #define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */
317 	uint16_t portid;
318 	uint8_t count, all_ports_up, print_flag = 0;
319 	struct rte_eth_link link;
320 	int ret;
321 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
322 
323 	printf("Checking link statuses...\n");
324 	fflush(stdout);
325 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
326 		all_ports_up = 1;
327 		for (portid = 0; portid < port_num; portid++) {
328 			if ((port_mask & (1 << portid)) == 0)
329 				continue;
330 			memset(&link, 0, sizeof(link));
331 			ret = rte_eth_link_get_nowait(portid, &link);
332 			if (ret < 0) {
333 				all_ports_up = 0;
334 				if (print_flag == 1)
335 					printf("Port %u link get failed: %s\n",
336 						portid, rte_strerror(-ret));
337 				continue;
338 			}
339 
340 			/* print link status if flag set */
341 			if (print_flag == 1) {
342 				if (link.link_status && link_mbps == 0)
343 					link_mbps = link.link_speed;
344 
345 				rte_eth_link_to_str(link_status,
346 					sizeof(link_status), &link);
347 				printf("Port %d %s\n", portid, link_status);
348 				continue;
349 			}
350 			/* clear all_ports_up flag if any link down */
351 			if (link.link_status == RTE_ETH_LINK_DOWN) {
352 				all_ports_up = 0;
353 				break;
354 			}
355 		}
356 		/* after finally printing all link status, get out */
357 		if (print_flag == 1)
358 			break;
359 
360 		if (all_ports_up == 0) {
361 			fflush(stdout);
362 			rte_delay_ms(CHECK_INTERVAL);
363 		}
364 
365 		/* set the print_flag if all ports up or timeout */
366 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1))
367 			print_flag = 1;
368 	}
369 }
370 
371 static void
372 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
373 {
374 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
375 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
376 	printf("%s%s", name, buf);
377 }
378 
379 static void
380 copy_buf_to_pkt_segs(const uint8_t *buf, unsigned int len,
381 		     struct rte_mbuf *pkt, unsigned int offset)
382 {
383 	unsigned int copied = 0;
384 	unsigned int copy_len;
385 	struct rte_mbuf *seg;
386 	void *seg_buf;
387 
388 	seg = pkt;
389 	while (offset >= seg->data_len) {
390 		offset -= seg->data_len;
391 		seg = seg->next;
392 	}
393 	copy_len = seg->data_len - offset;
394 	seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
395 	while (len > copy_len) {
396 		rte_memcpy(seg_buf, buf + copied, (size_t) copy_len);
397 		len -= copy_len;
398 		copied += copy_len;
399 		seg = seg->next;
400 		seg_buf = rte_pktmbuf_mtod(seg, void *);
401 	}
402 	rte_memcpy(seg_buf, buf + copied, (size_t) len);
403 }
404 
405 static inline struct rte_mbuf *
406 init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len)
407 {
408 	struct rte_mbuf *pkt;
409 
410 	pkt = rte_pktmbuf_alloc(mp);
411 	if (pkt == NULL)
412 		return NULL;
413 	if (((data[0] & 0xF0) >> 4) == IPVERSION) {
414 		rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
415 				&dummy_ipv4_eth_hdr, RTE_ETHER_HDR_LEN);
416 		pkt->l3_len = sizeof(struct rte_ipv4_hdr);
417 	} else {
418 		rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
419 				&dummy_ipv6_eth_hdr, RTE_ETHER_HDR_LEN);
420 		pkt->l3_len = sizeof(struct rte_ipv6_hdr);
421 	}
422 	pkt->l2_len = RTE_ETHER_HDR_LEN;
423 
424 	if (pkt->buf_len > (len + RTE_ETHER_HDR_LEN))
425 		rte_memcpy(rte_pktmbuf_append(pkt, len), data, len);
426 	else
427 		copy_buf_to_pkt_segs(data, len, pkt, RTE_ETHER_HDR_LEN);
428 	return pkt;
429 }
430 
431 static int
432 init_mempools(unsigned int nb_mbuf)
433 {
434 	struct rte_security_ctx *sec_ctx;
435 	uint16_t nb_sess = 512;
436 	uint32_t sess_sz;
437 	char s[64];
438 
439 	if (mbufpool == NULL) {
440 		snprintf(s, sizeof(s), "mbuf_pool");
441 		mbufpool = rte_pktmbuf_pool_create(s, nb_mbuf,
442 				MEMPOOL_CACHE_SIZE, 0,
443 				RTE_MBUF_DEFAULT_BUF_SIZE, SOCKET_ID_ANY);
444 		if (mbufpool == NULL) {
445 			printf("Cannot init mbuf pool\n");
446 			return TEST_FAILED;
447 		}
448 		printf("Allocated mbuf pool\n");
449 	}
450 
451 	sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
452 	if (sec_ctx == NULL) {
453 		printf("Device does not support Security ctx\n");
454 		return TEST_SKIPPED;
455 	}
456 	sess_sz = rte_security_session_get_size(sec_ctx);
457 	if (sess_pool == NULL) {
458 		snprintf(s, sizeof(s), "sess_pool");
459 		sess_pool = rte_mempool_create(s, nb_sess, sess_sz,
460 				MEMPOOL_CACHE_SIZE, 0,
461 				NULL, NULL, NULL, NULL,
462 				SOCKET_ID_ANY, 0);
463 		if (sess_pool == NULL) {
464 			printf("Cannot init sess pool\n");
465 			return TEST_FAILED;
466 		}
467 		printf("Allocated sess pool\n");
468 	}
469 	if (sess_priv_pool == NULL) {
470 		snprintf(s, sizeof(s), "sess_priv_pool");
471 		sess_priv_pool = rte_mempool_create(s, nb_sess, sess_sz,
472 				MEMPOOL_CACHE_SIZE, 0,
473 				NULL, NULL, NULL, NULL,
474 				SOCKET_ID_ANY, 0);
475 		if (sess_priv_pool == NULL) {
476 			printf("Cannot init sess_priv pool\n");
477 			return TEST_FAILED;
478 		}
479 		printf("Allocated sess_priv pool\n");
480 	}
481 
482 	return 0;
483 }
484 
485 static int
486 create_default_flow(uint16_t portid)
487 {
488 	struct rte_flow_action action[2];
489 	struct rte_flow_item pattern[2];
490 	struct rte_flow_attr attr = {0};
491 	struct rte_flow_error err;
492 	struct rte_flow *flow;
493 	int ret;
494 
495 	/* Add the default rte_flow to enable SECURITY for all ESP packets */
496 
497 	pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
498 	pattern[0].spec = NULL;
499 	pattern[0].mask = NULL;
500 	pattern[0].last = NULL;
501 	pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
502 
503 	action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
504 	action[0].conf = NULL;
505 	action[1].type = RTE_FLOW_ACTION_TYPE_END;
506 	action[1].conf = NULL;
507 
508 	attr.ingress = 1;
509 
510 	ret = rte_flow_validate(portid, &attr, pattern, action, &err);
511 	if (ret) {
512 		printf("\nValidate flow failed, ret = %d\n", ret);
513 		return -1;
514 	}
515 	flow = rte_flow_create(portid, &attr, pattern, action, &err);
516 	if (flow == NULL) {
517 		printf("\nDefault flow rule create failed\n");
518 		return -1;
519 	}
520 
521 	default_flow[portid] = flow;
522 
523 	return 0;
524 }
525 
526 static void
527 destroy_default_flow(uint16_t portid)
528 {
529 	struct rte_flow_error err;
530 	int ret;
531 
532 	if (!default_flow[portid])
533 		return;
534 	ret = rte_flow_destroy(portid, default_flow[portid], &err);
535 	if (ret) {
536 		printf("\nDefault flow rule destroy failed\n");
537 		return;
538 	}
539 	default_flow[portid] = NULL;
540 }
541 
542 struct rte_mbuf **tx_pkts_burst;
543 struct rte_mbuf **rx_pkts_burst;
544 
545 static int
546 compare_pkt_data(struct rte_mbuf *m, uint8_t *ref, unsigned int tot_len)
547 {
548 	unsigned int len;
549 	unsigned int nb_segs = m->nb_segs;
550 	unsigned int matched = 0;
551 	struct rte_mbuf *save = m;
552 
553 	while (m) {
554 		len = tot_len;
555 		if (len > m->data_len)
556 			len = m->data_len;
557 		if (len != 0) {
558 			if (memcmp(rte_pktmbuf_mtod(m, char *),
559 					ref + matched, len)) {
560 				printf("\n====Reassembly case failed: Data Mismatch");
561 				rte_hexdump(stdout, "Reassembled",
562 					rte_pktmbuf_mtod(m, char *),
563 					len);
564 				rte_hexdump(stdout, "reference",
565 					ref + matched,
566 					len);
567 				return TEST_FAILED;
568 			}
569 		}
570 		tot_len -= len;
571 		matched += len;
572 		m = m->next;
573 	}
574 
575 	if (tot_len) {
576 		printf("\n====Reassembly case failed: Data Missing %u",
577 		       tot_len);
578 		printf("\n====nb_segs %u, tot_len %u", nb_segs, tot_len);
579 		rte_pktmbuf_dump(stderr, save, -1);
580 		return TEST_FAILED;
581 	}
582 	return TEST_SUCCESS;
583 }
584 
585 static inline bool
586 is_ip_reassembly_incomplete(struct rte_mbuf *mbuf)
587 {
588 	static uint64_t ip_reassembly_dynflag;
589 	int ip_reassembly_dynflag_offset;
590 
591 	if (ip_reassembly_dynflag == 0) {
592 		ip_reassembly_dynflag_offset = rte_mbuf_dynflag_lookup(
593 			RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL);
594 		if (ip_reassembly_dynflag_offset < 0)
595 			return false;
596 		ip_reassembly_dynflag = RTE_BIT64(ip_reassembly_dynflag_offset);
597 	}
598 
599 	return (mbuf->ol_flags & ip_reassembly_dynflag) != 0;
600 }
601 
602 static void
603 free_mbuf(struct rte_mbuf *mbuf)
604 {
605 	rte_eth_ip_reassembly_dynfield_t dynfield;
606 
607 	if (!mbuf)
608 		return;
609 
610 	if (!is_ip_reassembly_incomplete(mbuf)) {
611 		rte_pktmbuf_free(mbuf);
612 	} else {
613 		if (ip_reassembly_dynfield_offset < 0)
614 			return;
615 
616 		while (mbuf) {
617 			dynfield = *RTE_MBUF_DYNFIELD(mbuf,
618 					ip_reassembly_dynfield_offset,
619 					rte_eth_ip_reassembly_dynfield_t *);
620 			rte_pktmbuf_free(mbuf);
621 			mbuf = dynfield.next_frag;
622 		}
623 	}
624 }
625 
626 
627 static int
628 get_and_verify_incomplete_frags(struct rte_mbuf *mbuf,
629 				struct reassembly_vector *vector)
630 {
631 	rte_eth_ip_reassembly_dynfield_t *dynfield[MAX_PKT_BURST];
632 	int j = 0, ret;
633 	/**
634 	 * IP reassembly offload is incomplete, and fragments are listed in
635 	 * dynfield which can be reassembled in SW.
636 	 */
637 	printf("\nHW IP Reassembly is not complete; attempt SW IP Reassembly,"
638 		"\nMatching with original frags.");
639 
640 	if (ip_reassembly_dynfield_offset < 0)
641 		return -1;
642 
643 	printf("\ncomparing frag: %d", j);
644 	/* Skip Ethernet header comparison */
645 	rte_pktmbuf_adj(mbuf, RTE_ETHER_HDR_LEN);
646 	ret = compare_pkt_data(mbuf, vector->frags[j]->data,
647 				vector->frags[j]->len);
648 	if (ret)
649 		return ret;
650 	j++;
651 	dynfield[j] = RTE_MBUF_DYNFIELD(mbuf, ip_reassembly_dynfield_offset,
652 					rte_eth_ip_reassembly_dynfield_t *);
653 	printf("\ncomparing frag: %d", j);
654 	/* Skip Ethernet header comparison */
655 	rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
656 	ret = compare_pkt_data(dynfield[j]->next_frag, vector->frags[j]->data,
657 			vector->frags[j]->len);
658 	if (ret)
659 		return ret;
660 
661 	while ((dynfield[j]->nb_frags > 1) &&
662 			is_ip_reassembly_incomplete(dynfield[j]->next_frag)) {
663 		j++;
664 		dynfield[j] = RTE_MBUF_DYNFIELD(dynfield[j-1]->next_frag,
665 					ip_reassembly_dynfield_offset,
666 					rte_eth_ip_reassembly_dynfield_t *);
667 		printf("\ncomparing frag: %d", j);
668 		/* Skip Ethernet header comparison */
669 		rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
670 		ret = compare_pkt_data(dynfield[j]->next_frag,
671 				vector->frags[j]->data, vector->frags[j]->len);
672 		if (ret)
673 			return ret;
674 	}
675 	return ret;
676 }
677 
678 static int
679 test_ipsec_with_reassembly(struct reassembly_vector *vector,
680 		const struct ipsec_test_flags *flags)
681 {
682 	struct rte_security_session *out_ses[ENCAP_DECAP_BURST_SZ] = {0};
683 	struct rte_security_session *in_ses[ENCAP_DECAP_BURST_SZ] = {0};
684 	struct rte_eth_ip_reassembly_params reass_capa = {0};
685 	struct rte_security_session_conf sess_conf_out = {0};
686 	struct rte_security_session_conf sess_conf_in = {0};
687 	unsigned int nb_tx, burst_sz, nb_sent = 0;
688 	struct rte_crypto_sym_xform cipher_out = {0};
689 	struct rte_crypto_sym_xform auth_out = {0};
690 	struct rte_crypto_sym_xform aead_out = {0};
691 	struct rte_crypto_sym_xform cipher_in = {0};
692 	struct rte_crypto_sym_xform auth_in = {0};
693 	struct rte_crypto_sym_xform aead_in = {0};
694 	struct ipsec_test_data sa_data;
695 	struct rte_security_ctx *ctx;
696 	unsigned int i, nb_rx = 0, j;
697 	uint32_t ol_flags;
698 	int ret = 0;
699 
700 	burst_sz = vector->burst ? ENCAP_DECAP_BURST_SZ : 1;
701 	nb_tx = vector->nb_frags * burst_sz;
702 
703 	rte_eth_dev_stop(port_id);
704 	if (ret != 0) {
705 		printf("rte_eth_dev_stop: err=%s, port=%u\n",
706 			       rte_strerror(-ret), port_id);
707 		return ret;
708 	}
709 	rte_eth_ip_reassembly_capability_get(port_id, &reass_capa);
710 	if (reass_capa.max_frags < vector->nb_frags)
711 		return TEST_SKIPPED;
712 	if (reass_capa.timeout_ms > APP_REASS_TIMEOUT) {
713 		reass_capa.timeout_ms = APP_REASS_TIMEOUT;
714 		rte_eth_ip_reassembly_conf_set(port_id, &reass_capa);
715 	}
716 
717 	ret = rte_eth_dev_start(port_id);
718 	if (ret < 0) {
719 		printf("rte_eth_dev_start: err=%d, port=%d\n",
720 			ret, port_id);
721 		return ret;
722 	}
723 
724 	memset(tx_pkts_burst, 0, sizeof(tx_pkts_burst[0]) * nb_tx);
725 	memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_tx);
726 
727 	for (i = 0; i < nb_tx; i += vector->nb_frags) {
728 		for (j = 0; j < vector->nb_frags; j++) {
729 			tx_pkts_burst[i+j] = init_packet(mbufpool,
730 						vector->frags[j]->data,
731 						vector->frags[j]->len);
732 			if (tx_pkts_burst[i+j] == NULL) {
733 				ret = -1;
734 				printf("\n packed init failed\n");
735 				goto out;
736 			}
737 		}
738 	}
739 
740 	for (i = 0; i < burst_sz; i++) {
741 		memcpy(&sa_data, vector->sa_data,
742 				sizeof(struct ipsec_test_data));
743 		/* Update SPI for every new SA */
744 		sa_data.ipsec_xform.spi += i;
745 		sa_data.ipsec_xform.direction =
746 					RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
747 		if (sa_data.aead) {
748 			sess_conf_out.crypto_xform = &aead_out;
749 		} else {
750 			sess_conf_out.crypto_xform = &cipher_out;
751 			sess_conf_out.crypto_xform->next = &auth_out;
752 		}
753 
754 		/* Create Inline IPsec outbound session. */
755 		ret = create_inline_ipsec_session(&sa_data, port_id,
756 				&out_ses[i], &ctx, &ol_flags, flags,
757 				&sess_conf_out);
758 		if (ret) {
759 			printf("\nInline outbound session create failed\n");
760 			goto out;
761 		}
762 	}
763 
764 	j = 0;
765 	for (i = 0; i < nb_tx; i++) {
766 		if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
767 			rte_security_set_pkt_metadata(ctx,
768 				out_ses[j], tx_pkts_burst[i], NULL);
769 		tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
770 
771 		/* Move to next SA after nb_frags */
772 		if ((i + 1) % vector->nb_frags == 0)
773 			j++;
774 	}
775 
776 	for (i = 0; i < burst_sz; i++) {
777 		memcpy(&sa_data, vector->sa_data,
778 				sizeof(struct ipsec_test_data));
779 		/* Update SPI for every new SA */
780 		sa_data.ipsec_xform.spi += i;
781 		sa_data.ipsec_xform.direction =
782 					RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
783 
784 		if (sa_data.aead) {
785 			sess_conf_in.crypto_xform = &aead_in;
786 		} else {
787 			sess_conf_in.crypto_xform = &auth_in;
788 			sess_conf_in.crypto_xform->next = &cipher_in;
789 		}
790 		/* Create Inline IPsec inbound session. */
791 		ret = create_inline_ipsec_session(&sa_data, port_id, &in_ses[i],
792 				&ctx, &ol_flags, flags, &sess_conf_in);
793 		if (ret) {
794 			printf("\nInline inbound session create failed\n");
795 			goto out;
796 		}
797 	}
798 
799 	/* Retrieve reassembly dynfield offset if available */
800 	if (ip_reassembly_dynfield_offset < 0 && vector->nb_frags > 1)
801 		ip_reassembly_dynfield_offset = rte_mbuf_dynfield_lookup(
802 				RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, NULL);
803 
804 
805 	ret = create_default_flow(port_id);
806 	if (ret)
807 		goto out;
808 
809 	nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_tx);
810 	if (nb_sent != nb_tx) {
811 		ret = -1;
812 		printf("\nFailed to tx %u pkts", nb_tx);
813 		goto out;
814 	}
815 
816 	rte_delay_ms(1);
817 
818 	/* Retry few times before giving up */
819 	nb_rx = 0;
820 	j = 0;
821 	do {
822 		nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
823 					  nb_tx - nb_rx);
824 		j++;
825 		if (nb_rx >= nb_tx)
826 			break;
827 		rte_delay_ms(1);
828 	} while (j < 5 || !nb_rx);
829 
830 	/* Check for minimum number of Rx packets expected */
831 	if ((vector->nb_frags == 1 && nb_rx != nb_tx) ||
832 	    (vector->nb_frags > 1 && nb_rx < burst_sz)) {
833 		printf("\nreceived less Rx pkts(%u) pkts\n", nb_rx);
834 		ret = TEST_FAILED;
835 		goto out;
836 	}
837 
838 	for (i = 0; i < nb_rx; i++) {
839 		if (vector->nb_frags > 1 &&
840 		    is_ip_reassembly_incomplete(rx_pkts_burst[i])) {
841 			ret = get_and_verify_incomplete_frags(rx_pkts_burst[i],
842 							      vector);
843 			if (ret != TEST_SUCCESS)
844 				break;
845 			continue;
846 		}
847 
848 		if (rx_pkts_burst[i]->ol_flags &
849 		    RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED ||
850 		    !(rx_pkts_burst[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD)) {
851 			printf("\nsecurity offload failed\n");
852 			ret = TEST_FAILED;
853 			break;
854 		}
855 
856 		if (vector->full_pkt->len + RTE_ETHER_HDR_LEN !=
857 				rx_pkts_burst[i]->pkt_len) {
858 			printf("\nreassembled/decrypted packet length mismatch\n");
859 			ret = TEST_FAILED;
860 			break;
861 		}
862 		rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
863 		ret = compare_pkt_data(rx_pkts_burst[i],
864 				       vector->full_pkt->data,
865 				       vector->full_pkt->len);
866 		if (ret != TEST_SUCCESS)
867 			break;
868 	}
869 
870 out:
871 	destroy_default_flow(port_id);
872 
873 	/* Clear session data. */
874 	for (i = 0; i < burst_sz; i++) {
875 		if (out_ses[i])
876 			rte_security_session_destroy(ctx, out_ses[i]);
877 		if (in_ses[i])
878 			rte_security_session_destroy(ctx, in_ses[i]);
879 	}
880 
881 	for (i = nb_sent; i < nb_tx; i++)
882 		free_mbuf(tx_pkts_burst[i]);
883 	for (i = 0; i < nb_rx; i++)
884 		free_mbuf(rx_pkts_burst[i]);
885 	return ret;
886 }
887 
888 static int
889 test_ipsec_inline_proto_process(struct ipsec_test_data *td,
890 		struct ipsec_test_data *res_d,
891 		int nb_pkts,
892 		bool silent,
893 		const struct ipsec_test_flags *flags)
894 {
895 	struct rte_security_session_conf sess_conf = {0};
896 	struct rte_crypto_sym_xform cipher = {0};
897 	struct rte_crypto_sym_xform auth = {0};
898 	struct rte_crypto_sym_xform aead = {0};
899 	struct rte_security_session *ses;
900 	struct rte_security_ctx *ctx;
901 	int nb_rx = 0, nb_sent;
902 	uint32_t ol_flags;
903 	int i, j = 0, ret;
904 
905 	memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_pkts);
906 
907 	if (td->aead) {
908 		sess_conf.crypto_xform = &aead;
909 	} else {
910 		if (td->ipsec_xform.direction ==
911 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
912 			sess_conf.crypto_xform = &cipher;
913 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
914 			sess_conf.crypto_xform->next = &auth;
915 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
916 		} else {
917 			sess_conf.crypto_xform = &auth;
918 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
919 			sess_conf.crypto_xform->next = &cipher;
920 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
921 		}
922 	}
923 
924 	/* Create Inline IPsec session. */
925 	ret = create_inline_ipsec_session(td, port_id, &ses, &ctx,
926 					  &ol_flags, flags, &sess_conf);
927 	if (ret)
928 		return ret;
929 
930 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
931 		ret = create_default_flow(port_id);
932 		if (ret)
933 			goto out;
934 	}
935 	for (i = 0; i < nb_pkts; i++) {
936 		tx_pkts_burst[i] = init_packet(mbufpool, td->input_text.data,
937 						td->input_text.len);
938 		if (tx_pkts_burst[i] == NULL) {
939 			while (i--)
940 				rte_pktmbuf_free(tx_pkts_burst[i]);
941 			ret = TEST_FAILED;
942 			goto out;
943 		}
944 
945 		if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkts_burst[i],
946 					uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
947 			while (i--)
948 				rte_pktmbuf_free(tx_pkts_burst[i]);
949 			ret = TEST_FAILED;
950 			goto out;
951 		}
952 
953 		if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
954 			if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
955 				rte_security_set_pkt_metadata(ctx, ses,
956 						tx_pkts_burst[i], NULL);
957 			tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
958 		}
959 	}
960 	/* Send packet to ethdev for inline IPsec processing. */
961 	nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts);
962 	if (nb_sent != nb_pkts) {
963 		printf("\nUnable to TX %d packets", nb_pkts);
964 		for ( ; nb_sent < nb_pkts; nb_sent++)
965 			rte_pktmbuf_free(tx_pkts_burst[nb_sent]);
966 		ret = TEST_FAILED;
967 		goto out;
968 	}
969 
970 	rte_pause();
971 
972 	/* Receive back packet on loopback interface. */
973 	do {
974 		rte_delay_ms(1);
975 		nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
976 				nb_sent - nb_rx);
977 		if (nb_rx >= nb_sent)
978 			break;
979 	} while (j++ < 5 || nb_rx == 0);
980 
981 	if (nb_rx != nb_sent) {
982 		printf("\nUnable to RX all %d packets", nb_sent);
983 		while (--nb_rx)
984 			rte_pktmbuf_free(rx_pkts_burst[nb_rx]);
985 		ret = TEST_FAILED;
986 		goto out;
987 	}
988 
989 	for (i = 0; i < nb_rx; i++) {
990 		rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
991 
992 		ret = test_ipsec_post_process(rx_pkts_burst[i], td,
993 					      res_d, silent, flags);
994 		if (ret != TEST_SUCCESS) {
995 			for ( ; i < nb_rx; i++)
996 				rte_pktmbuf_free(rx_pkts_burst[i]);
997 			goto out;
998 		}
999 
1000 		ret = test_ipsec_stats_verify(ctx, ses, flags,
1001 					td->ipsec_xform.direction);
1002 		if (ret != TEST_SUCCESS) {
1003 			for ( ; i < nb_rx; i++)
1004 				rte_pktmbuf_free(rx_pkts_burst[i]);
1005 			goto out;
1006 		}
1007 
1008 		rte_pktmbuf_free(rx_pkts_burst[i]);
1009 		rx_pkts_burst[i] = NULL;
1010 	}
1011 
1012 out:
1013 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1014 		destroy_default_flow(port_id);
1015 
1016 	/* Destroy session so that other cases can create the session again */
1017 	rte_security_session_destroy(ctx, ses);
1018 	ses = NULL;
1019 
1020 	return ret;
1021 }
1022 
1023 static int
1024 test_ipsec_inline_proto_all(const struct ipsec_test_flags *flags)
1025 {
1026 	struct ipsec_test_data td_outb;
1027 	struct ipsec_test_data td_inb;
1028 	unsigned int i, nb_pkts = 1, pass_cnt = 0, fail_cnt = 0;
1029 	int ret;
1030 
1031 	if (flags->iv_gen || flags->sa_expiry_pkts_soft ||
1032 			flags->sa_expiry_pkts_hard)
1033 		nb_pkts = IPSEC_TEST_PACKETS_MAX;
1034 
1035 	for (i = 0; i < RTE_DIM(alg_list); i++) {
1036 		test_ipsec_td_prepare(alg_list[i].param1,
1037 				      alg_list[i].param2,
1038 				      flags, &td_outb, 1);
1039 
1040 		if (!td_outb.aead) {
1041 			enum rte_crypto_cipher_algorithm cipher_alg;
1042 			enum rte_crypto_auth_algorithm auth_alg;
1043 
1044 			cipher_alg = td_outb.xform.chain.cipher.cipher.algo;
1045 			auth_alg = td_outb.xform.chain.auth.auth.algo;
1046 
1047 			if (td_outb.aes_gmac && cipher_alg != RTE_CRYPTO_CIPHER_NULL)
1048 				continue;
1049 
1050 			/* ICV is not applicable for NULL auth */
1051 			if (flags->icv_corrupt &&
1052 			    auth_alg == RTE_CRYPTO_AUTH_NULL)
1053 				continue;
1054 
1055 			/* IV is not applicable for NULL cipher */
1056 			if (flags->iv_gen &&
1057 			    cipher_alg == RTE_CRYPTO_CIPHER_NULL)
1058 				continue;
1059 		}
1060 
1061 		if (flags->udp_encap)
1062 			td_outb.ipsec_xform.options.udp_encap = 1;
1063 
1064 		ret = test_ipsec_inline_proto_process(&td_outb, &td_inb, nb_pkts,
1065 						false, flags);
1066 		if (ret == TEST_SKIPPED)
1067 			continue;
1068 
1069 		if (ret == TEST_FAILED) {
1070 			printf("\n TEST FAILED");
1071 			test_ipsec_display_alg(alg_list[i].param1,
1072 					       alg_list[i].param2);
1073 			fail_cnt++;
1074 			continue;
1075 		}
1076 
1077 		test_ipsec_td_update(&td_inb, &td_outb, 1, flags);
1078 
1079 		ret = test_ipsec_inline_proto_process(&td_inb, NULL, nb_pkts,
1080 						false, flags);
1081 		if (ret == TEST_SKIPPED)
1082 			continue;
1083 
1084 		if (ret == TEST_FAILED) {
1085 			printf("\n TEST FAILED");
1086 			test_ipsec_display_alg(alg_list[i].param1,
1087 					       alg_list[i].param2);
1088 			fail_cnt++;
1089 			continue;
1090 		}
1091 
1092 		if (flags->display_alg)
1093 			test_ipsec_display_alg(alg_list[i].param1,
1094 					       alg_list[i].param2);
1095 
1096 		pass_cnt++;
1097 	}
1098 
1099 	printf("Tests passed: %d, failed: %d", pass_cnt, fail_cnt);
1100 	if (fail_cnt > 0)
1101 		return TEST_FAILED;
1102 	if (pass_cnt > 0)
1103 		return TEST_SUCCESS;
1104 	else
1105 		return TEST_SKIPPED;
1106 }
1107 
1108 static int
1109 test_ipsec_inline_proto_process_with_esn(struct ipsec_test_data td[],
1110 		struct ipsec_test_data res_d[],
1111 		int nb_pkts,
1112 		bool silent,
1113 		const struct ipsec_test_flags *flags)
1114 {
1115 	struct rte_security_session_conf sess_conf = {0};
1116 	struct ipsec_test_data *res_d_tmp = NULL;
1117 	struct rte_crypto_sym_xform cipher = {0};
1118 	struct rte_crypto_sym_xform auth = {0};
1119 	struct rte_crypto_sym_xform aead = {0};
1120 	struct rte_mbuf *rx_pkt = NULL;
1121 	struct rte_mbuf *tx_pkt = NULL;
1122 	int nb_rx, nb_sent;
1123 	struct rte_security_session *ses;
1124 	struct rte_security_ctx *ctx;
1125 	uint32_t ol_flags;
1126 	int i, ret;
1127 
1128 	if (td[0].aead) {
1129 		sess_conf.crypto_xform = &aead;
1130 	} else {
1131 		if (td[0].ipsec_xform.direction ==
1132 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1133 			sess_conf.crypto_xform = &cipher;
1134 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1135 			sess_conf.crypto_xform->next = &auth;
1136 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1137 		} else {
1138 			sess_conf.crypto_xform = &auth;
1139 			sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1140 			sess_conf.crypto_xform->next = &cipher;
1141 			sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1142 		}
1143 	}
1144 
1145 	/* Create Inline IPsec session. */
1146 	ret = create_inline_ipsec_session(&td[0], port_id, &ses, &ctx,
1147 					  &ol_flags, flags, &sess_conf);
1148 	if (ret)
1149 		return ret;
1150 
1151 	if (td[0].ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1152 		ret = create_default_flow(port_id);
1153 		if (ret)
1154 			goto out;
1155 	}
1156 
1157 	for (i = 0; i < nb_pkts; i++) {
1158 		tx_pkt = init_packet(mbufpool, td[i].input_text.data,
1159 					td[i].input_text.len);
1160 		if (tx_pkt == NULL) {
1161 			ret = TEST_FAILED;
1162 			goto out;
1163 		}
1164 
1165 		if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkt,
1166 					uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
1167 			ret = TEST_FAILED;
1168 			goto out;
1169 		}
1170 
1171 		if (td[i].ipsec_xform.direction ==
1172 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1173 			if (flags->antireplay) {
1174 				sess_conf.ipsec.esn.value =
1175 						td[i].ipsec_xform.esn.value;
1176 				ret = rte_security_session_update(ctx, ses,
1177 						&sess_conf);
1178 				if (ret) {
1179 					printf("Could not update ESN in session\n");
1180 					rte_pktmbuf_free(tx_pkt);
1181 					ret = TEST_SKIPPED;
1182 					goto out;
1183 				}
1184 			}
1185 			if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
1186 				rte_security_set_pkt_metadata(ctx, ses,
1187 						tx_pkt, NULL);
1188 			tx_pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1189 		}
1190 		/* Send packet to ethdev for inline IPsec processing. */
1191 		nb_sent = rte_eth_tx_burst(port_id, 0, &tx_pkt, 1);
1192 		if (nb_sent != 1) {
1193 			printf("\nUnable to TX packets");
1194 			rte_pktmbuf_free(tx_pkt);
1195 			ret = TEST_FAILED;
1196 			goto out;
1197 		}
1198 
1199 		rte_pause();
1200 
1201 		/* Receive back packet on loopback interface. */
1202 		do {
1203 			rte_delay_ms(1);
1204 			nb_rx = rte_eth_rx_burst(port_id, 0, &rx_pkt, 1);
1205 		} while (nb_rx == 0);
1206 
1207 		rte_pktmbuf_adj(rx_pkt, RTE_ETHER_HDR_LEN);
1208 
1209 		if (res_d != NULL)
1210 			res_d_tmp = &res_d[i];
1211 
1212 		ret = test_ipsec_post_process(rx_pkt, &td[i],
1213 					      res_d_tmp, silent, flags);
1214 		if (ret != TEST_SUCCESS) {
1215 			rte_pktmbuf_free(rx_pkt);
1216 			goto out;
1217 		}
1218 
1219 		ret = test_ipsec_stats_verify(ctx, ses, flags,
1220 					td->ipsec_xform.direction);
1221 		if (ret != TEST_SUCCESS) {
1222 			rte_pktmbuf_free(rx_pkt);
1223 			goto out;
1224 		}
1225 
1226 		rte_pktmbuf_free(rx_pkt);
1227 		rx_pkt = NULL;
1228 		tx_pkt = NULL;
1229 	}
1230 
1231 out:
1232 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1233 		destroy_default_flow(port_id);
1234 
1235 	/* Destroy session so that other cases can create the session again */
1236 	rte_security_session_destroy(ctx, ses);
1237 	ses = NULL;
1238 
1239 	return ret;
1240 }
1241 
1242 static int
1243 ut_setup_inline_ipsec(void)
1244 {
1245 	int ret;
1246 
1247 	/* Start device */
1248 	ret = rte_eth_dev_start(port_id);
1249 	if (ret < 0) {
1250 		printf("rte_eth_dev_start: err=%d, port=%d\n",
1251 			ret, port_id);
1252 		return ret;
1253 	}
1254 	/* always enable promiscuous */
1255 	ret = rte_eth_promiscuous_enable(port_id);
1256 	if (ret != 0) {
1257 		printf("rte_eth_promiscuous_enable: err=%s, port=%d\n",
1258 			rte_strerror(-ret), port_id);
1259 		return ret;
1260 	}
1261 
1262 	check_all_ports_link_status(1, RTE_PORT_ALL);
1263 
1264 	return 0;
1265 }
1266 
1267 static void
1268 ut_teardown_inline_ipsec(void)
1269 {
1270 	struct rte_eth_ip_reassembly_params reass_conf = {0};
1271 	uint16_t portid;
1272 	int ret;
1273 
1274 	/* port tear down */
1275 	RTE_ETH_FOREACH_DEV(portid) {
1276 		ret = rte_eth_dev_stop(portid);
1277 		if (ret != 0)
1278 			printf("rte_eth_dev_stop: err=%s, port=%u\n",
1279 			       rte_strerror(-ret), portid);
1280 
1281 		/* Clear reassembly configuration */
1282 		rte_eth_ip_reassembly_conf_set(portid, &reass_conf);
1283 	}
1284 }
1285 
1286 static int
1287 inline_ipsec_testsuite_setup(void)
1288 {
1289 	uint16_t nb_rxd;
1290 	uint16_t nb_txd;
1291 	uint16_t nb_ports;
1292 	int ret;
1293 	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
1294 
1295 	printf("Start inline IPsec test.\n");
1296 
1297 	nb_ports = rte_eth_dev_count_avail();
1298 	if (nb_ports < NB_ETHPORTS_USED) {
1299 		printf("At least %u port(s) used for test\n",
1300 		       NB_ETHPORTS_USED);
1301 		return TEST_SKIPPED;
1302 	}
1303 
1304 	ret = init_mempools(NB_MBUF);
1305 	if (ret)
1306 		return ret;
1307 
1308 	if (tx_pkts_burst == NULL) {
1309 		tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
1310 					  MAX_TRAFFIC_BURST,
1311 					  sizeof(void *),
1312 					  RTE_CACHE_LINE_SIZE);
1313 		if (!tx_pkts_burst)
1314 			return TEST_FAILED;
1315 
1316 		rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
1317 					  MAX_TRAFFIC_BURST,
1318 					  sizeof(void *),
1319 					  RTE_CACHE_LINE_SIZE);
1320 		if (!rx_pkts_burst)
1321 			return TEST_FAILED;
1322 	}
1323 
1324 	printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
1325 
1326 	nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
1327 	nb_txd = RTE_TEST_TX_DESC_DEFAULT;
1328 
1329 	/* configuring port 0 for the test is enough */
1330 	port_id = 0;
1331 	/* port configure */
1332 	ret = rte_eth_dev_configure(port_id, nb_rx_queue,
1333 				    nb_tx_queue, &port_conf);
1334 	if (ret < 0) {
1335 		printf("Cannot configure device: err=%d, port=%d\n",
1336 			 ret, port_id);
1337 		return ret;
1338 	}
1339 	ret = rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
1340 	if (ret < 0) {
1341 		printf("Cannot get mac address: err=%d, port=%d\n",
1342 			 ret, port_id);
1343 		return ret;
1344 	}
1345 	printf("Port %u ", port_id);
1346 	print_ethaddr("Address:", &ports_eth_addr[port_id]);
1347 	printf("\n");
1348 
1349 	/* tx queue setup */
1350 	ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
1351 				     SOCKET_ID_ANY, &tx_conf);
1352 	if (ret < 0) {
1353 		printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
1354 				ret, port_id);
1355 		return ret;
1356 	}
1357 	/* rx queue steup */
1358 	ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
1359 				     &rx_conf, mbufpool);
1360 	if (ret < 0) {
1361 		printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
1362 				ret, port_id);
1363 		return ret;
1364 	}
1365 	test_ipsec_alg_list_populate();
1366 
1367 	return 0;
1368 }
1369 
1370 static void
1371 inline_ipsec_testsuite_teardown(void)
1372 {
1373 	uint16_t portid;
1374 	int ret;
1375 
1376 	/* port tear down */
1377 	RTE_ETH_FOREACH_DEV(portid) {
1378 		ret = rte_eth_dev_reset(portid);
1379 		if (ret != 0)
1380 			printf("rte_eth_dev_reset: err=%s, port=%u\n",
1381 			       rte_strerror(-ret), port_id);
1382 	}
1383 }
1384 
1385 static int
1386 test_inline_ip_reassembly(const void *testdata)
1387 {
1388 	struct reassembly_vector reassembly_td = {0};
1389 	const struct reassembly_vector *td = testdata;
1390 	struct ip_reassembly_test_packet full_pkt;
1391 	struct ip_reassembly_test_packet frags[MAX_FRAGS];
1392 	struct ipsec_test_flags flags = {0};
1393 	int i = 0;
1394 
1395 	reassembly_td.sa_data = td->sa_data;
1396 	reassembly_td.nb_frags = td->nb_frags;
1397 	reassembly_td.burst = td->burst;
1398 
1399 	memcpy(&full_pkt, td->full_pkt,
1400 			sizeof(struct ip_reassembly_test_packet));
1401 	reassembly_td.full_pkt = &full_pkt;
1402 
1403 	test_vector_payload_populate(reassembly_td.full_pkt, true);
1404 	for (; i < reassembly_td.nb_frags; i++) {
1405 		memcpy(&frags[i], td->frags[i],
1406 			sizeof(struct ip_reassembly_test_packet));
1407 		reassembly_td.frags[i] = &frags[i];
1408 		test_vector_payload_populate(reassembly_td.frags[i],
1409 				(i == 0) ? true : false);
1410 	}
1411 
1412 	return test_ipsec_with_reassembly(&reassembly_td, &flags);
1413 }
1414 
1415 static int
1416 test_ipsec_inline_proto_known_vec(const void *test_data)
1417 {
1418 	struct ipsec_test_data td_outb;
1419 	struct ipsec_test_flags flags;
1420 
1421 	memset(&flags, 0, sizeof(flags));
1422 
1423 	memcpy(&td_outb, test_data, sizeof(td_outb));
1424 
1425 	if (td_outb.aead ||
1426 	    td_outb.xform.chain.cipher.cipher.algo != RTE_CRYPTO_CIPHER_NULL) {
1427 		/* Disable IV gen to be able to test with known vectors */
1428 		td_outb.ipsec_xform.options.iv_gen_disable = 1;
1429 	}
1430 
1431 	return test_ipsec_inline_proto_process(&td_outb, NULL, 1,
1432 				false, &flags);
1433 }
1434 
1435 static int
1436 test_ipsec_inline_proto_known_vec_inb(const void *test_data)
1437 {
1438 	const struct ipsec_test_data *td = test_data;
1439 	struct ipsec_test_flags flags;
1440 	struct ipsec_test_data td_inb;
1441 
1442 	memset(&flags, 0, sizeof(flags));
1443 
1444 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
1445 		test_ipsec_td_in_from_out(td, &td_inb);
1446 	else
1447 		memcpy(&td_inb, td, sizeof(td_inb));
1448 
1449 	return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags);
1450 }
1451 
1452 static int
1453 test_ipsec_inline_proto_display_list(const void *data __rte_unused)
1454 {
1455 	struct ipsec_test_flags flags;
1456 
1457 	memset(&flags, 0, sizeof(flags));
1458 
1459 	flags.display_alg = true;
1460 
1461 	return test_ipsec_inline_proto_all(&flags);
1462 }
1463 
1464 static int
1465 test_ipsec_inline_proto_udp_encap(const void *data __rte_unused)
1466 {
1467 	struct ipsec_test_flags flags;
1468 
1469 	memset(&flags, 0, sizeof(flags));
1470 
1471 	flags.udp_encap = true;
1472 
1473 	return test_ipsec_inline_proto_all(&flags);
1474 }
1475 
1476 static int
1477 test_ipsec_inline_proto_udp_ports_verify(const void *data __rte_unused)
1478 {
1479 	struct ipsec_test_flags flags;
1480 
1481 	memset(&flags, 0, sizeof(flags));
1482 
1483 	flags.udp_encap = true;
1484 	flags.udp_ports_verify = true;
1485 
1486 	return test_ipsec_inline_proto_all(&flags);
1487 }
1488 
1489 static int
1490 test_ipsec_inline_proto_err_icv_corrupt(const void *data __rte_unused)
1491 {
1492 	struct ipsec_test_flags flags;
1493 
1494 	memset(&flags, 0, sizeof(flags));
1495 
1496 	flags.icv_corrupt = true;
1497 
1498 	return test_ipsec_inline_proto_all(&flags);
1499 }
1500 
1501 static int
1502 test_ipsec_inline_proto_tunnel_dst_addr_verify(const void *data __rte_unused)
1503 {
1504 	struct ipsec_test_flags flags;
1505 
1506 	memset(&flags, 0, sizeof(flags));
1507 
1508 	flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR;
1509 
1510 	return test_ipsec_inline_proto_all(&flags);
1511 }
1512 
1513 static int
1514 test_ipsec_inline_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused)
1515 {
1516 	struct ipsec_test_flags flags;
1517 
1518 	memset(&flags, 0, sizeof(flags));
1519 
1520 	flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR;
1521 
1522 	return test_ipsec_inline_proto_all(&flags);
1523 }
1524 
1525 static int
1526 test_ipsec_inline_proto_inner_ip_csum(const void *data __rte_unused)
1527 {
1528 	struct ipsec_test_flags flags;
1529 
1530 	memset(&flags, 0, sizeof(flags));
1531 
1532 	flags.ip_csum = true;
1533 
1534 	return test_ipsec_inline_proto_all(&flags);
1535 }
1536 
1537 static int
1538 test_ipsec_inline_proto_inner_l4_csum(const void *data __rte_unused)
1539 {
1540 	struct ipsec_test_flags flags;
1541 
1542 	memset(&flags, 0, sizeof(flags));
1543 
1544 	flags.l4_csum = true;
1545 
1546 	return test_ipsec_inline_proto_all(&flags);
1547 }
1548 
1549 static int
1550 test_ipsec_inline_proto_tunnel_v4_in_v4(const void *data __rte_unused)
1551 {
1552 	struct ipsec_test_flags flags;
1553 
1554 	memset(&flags, 0, sizeof(flags));
1555 
1556 	flags.ipv6 = false;
1557 	flags.tunnel_ipv6 = false;
1558 
1559 	return test_ipsec_inline_proto_all(&flags);
1560 }
1561 
1562 static int
1563 test_ipsec_inline_proto_tunnel_v6_in_v6(const void *data __rte_unused)
1564 {
1565 	struct ipsec_test_flags flags;
1566 
1567 	memset(&flags, 0, sizeof(flags));
1568 
1569 	flags.ipv6 = true;
1570 	flags.tunnel_ipv6 = true;
1571 
1572 	return test_ipsec_inline_proto_all(&flags);
1573 }
1574 
1575 static int
1576 test_ipsec_inline_proto_tunnel_v4_in_v6(const void *data __rte_unused)
1577 {
1578 	struct ipsec_test_flags flags;
1579 
1580 	memset(&flags, 0, sizeof(flags));
1581 
1582 	flags.ipv6 = false;
1583 	flags.tunnel_ipv6 = true;
1584 
1585 	return test_ipsec_inline_proto_all(&flags);
1586 }
1587 
1588 static int
1589 test_ipsec_inline_proto_tunnel_v6_in_v4(const void *data __rte_unused)
1590 {
1591 	struct ipsec_test_flags flags;
1592 
1593 	memset(&flags, 0, sizeof(flags));
1594 
1595 	flags.ipv6 = true;
1596 	flags.tunnel_ipv6 = false;
1597 
1598 	return test_ipsec_inline_proto_all(&flags);
1599 }
1600 
1601 static int
1602 test_ipsec_inline_proto_transport_v4(const void *data __rte_unused)
1603 {
1604 	struct ipsec_test_flags flags;
1605 
1606 	memset(&flags, 0, sizeof(flags));
1607 
1608 	flags.ipv6 = false;
1609 	flags.transport = true;
1610 
1611 	return test_ipsec_inline_proto_all(&flags);
1612 }
1613 
1614 static int
1615 test_ipsec_inline_proto_transport_l4_csum(const void *data __rte_unused)
1616 {
1617 	struct ipsec_test_flags flags = {
1618 		.l4_csum = true,
1619 		.transport = true,
1620 	};
1621 
1622 	return test_ipsec_inline_proto_all(&flags);
1623 }
1624 
1625 static int
1626 test_ipsec_inline_proto_stats(const void *data __rte_unused)
1627 {
1628 	struct ipsec_test_flags flags;
1629 
1630 	memset(&flags, 0, sizeof(flags));
1631 
1632 	flags.stats_success = true;
1633 
1634 	return test_ipsec_inline_proto_all(&flags);
1635 }
1636 
1637 static int
1638 test_ipsec_inline_proto_pkt_fragment(const void *data __rte_unused)
1639 {
1640 	struct ipsec_test_flags flags;
1641 
1642 	memset(&flags, 0, sizeof(flags));
1643 
1644 	flags.fragment = true;
1645 
1646 	return test_ipsec_inline_proto_all(&flags);
1647 
1648 }
1649 
1650 static int
1651 test_ipsec_inline_proto_copy_df_inner_0(const void *data __rte_unused)
1652 {
1653 	struct ipsec_test_flags flags;
1654 
1655 	memset(&flags, 0, sizeof(flags));
1656 
1657 	flags.df = TEST_IPSEC_COPY_DF_INNER_0;
1658 
1659 	return test_ipsec_inline_proto_all(&flags);
1660 }
1661 
1662 static int
1663 test_ipsec_inline_proto_copy_df_inner_1(const void *data __rte_unused)
1664 {
1665 	struct ipsec_test_flags flags;
1666 
1667 	memset(&flags, 0, sizeof(flags));
1668 
1669 	flags.df = TEST_IPSEC_COPY_DF_INNER_1;
1670 
1671 	return test_ipsec_inline_proto_all(&flags);
1672 }
1673 
1674 static int
1675 test_ipsec_inline_proto_set_df_0_inner_1(const void *data __rte_unused)
1676 {
1677 	struct ipsec_test_flags flags;
1678 
1679 	memset(&flags, 0, sizeof(flags));
1680 
1681 	flags.df = TEST_IPSEC_SET_DF_0_INNER_1;
1682 
1683 	return test_ipsec_inline_proto_all(&flags);
1684 }
1685 
1686 static int
1687 test_ipsec_inline_proto_set_df_1_inner_0(const void *data __rte_unused)
1688 {
1689 	struct ipsec_test_flags flags;
1690 
1691 	memset(&flags, 0, sizeof(flags));
1692 
1693 	flags.df = TEST_IPSEC_SET_DF_1_INNER_0;
1694 
1695 	return test_ipsec_inline_proto_all(&flags);
1696 }
1697 
1698 static int
1699 test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused)
1700 {
1701 	struct ipsec_test_flags flags;
1702 
1703 	memset(&flags, 0, sizeof(flags));
1704 
1705 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
1706 
1707 	return test_ipsec_inline_proto_all(&flags);
1708 }
1709 
1710 static int
1711 test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused)
1712 {
1713 	struct ipsec_test_flags flags;
1714 
1715 	memset(&flags, 0, sizeof(flags));
1716 
1717 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
1718 
1719 	return test_ipsec_inline_proto_all(&flags);
1720 }
1721 
1722 static int
1723 test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused)
1724 {
1725 	struct ipsec_test_flags flags;
1726 
1727 	memset(&flags, 0, sizeof(flags));
1728 
1729 	flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
1730 
1731 	return test_ipsec_inline_proto_all(&flags);
1732 }
1733 
1734 static int
1735 test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused)
1736 {
1737 	struct ipsec_test_flags flags;
1738 
1739 	memset(&flags, 0, sizeof(flags));
1740 
1741 	flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
1742 
1743 	return test_ipsec_inline_proto_all(&flags);
1744 }
1745 
1746 static int
1747 test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused)
1748 {
1749 	struct ipsec_test_flags flags;
1750 
1751 	memset(&flags, 0, sizeof(flags));
1752 
1753 	flags.ipv6 = true;
1754 	flags.tunnel_ipv6 = true;
1755 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
1756 
1757 	return test_ipsec_inline_proto_all(&flags);
1758 }
1759 
1760 static int
1761 test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused)
1762 {
1763 	struct ipsec_test_flags flags;
1764 
1765 	memset(&flags, 0, sizeof(flags));
1766 
1767 	flags.ipv6 = true;
1768 	flags.tunnel_ipv6 = true;
1769 	flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
1770 
1771 	return test_ipsec_inline_proto_all(&flags);
1772 }
1773 
1774 static int
1775 test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused)
1776 {
1777 	struct ipsec_test_flags flags;
1778 
1779 	memset(&flags, 0, sizeof(flags));
1780 
1781 	flags.ipv6 = true;
1782 	flags.tunnel_ipv6 = true;
1783 	flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
1784 
1785 	return test_ipsec_inline_proto_all(&flags);
1786 }
1787 
1788 static int
1789 test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused)
1790 {
1791 	struct ipsec_test_flags flags;
1792 
1793 	memset(&flags, 0, sizeof(flags));
1794 
1795 	flags.ipv6 = true;
1796 	flags.tunnel_ipv6 = true;
1797 	flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
1798 
1799 	return test_ipsec_inline_proto_all(&flags);
1800 }
1801 
1802 static int
1803 test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(const void *data __rte_unused)
1804 {
1805 	struct ipsec_test_flags flags;
1806 
1807 	memset(&flags, 0, sizeof(flags));
1808 
1809 	flags.ipv6 = true;
1810 	flags.tunnel_ipv6 = true;
1811 	flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_0;
1812 
1813 	return test_ipsec_inline_proto_all(&flags);
1814 }
1815 
1816 static int
1817 test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(const void *data __rte_unused)
1818 {
1819 	struct ipsec_test_flags flags;
1820 
1821 	memset(&flags, 0, sizeof(flags));
1822 
1823 	flags.ipv6 = true;
1824 	flags.tunnel_ipv6 = true;
1825 	flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_1;
1826 
1827 	return test_ipsec_inline_proto_all(&flags);
1828 }
1829 
1830 static int
1831 test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(const void *data __rte_unused)
1832 {
1833 	struct ipsec_test_flags flags;
1834 
1835 	memset(&flags, 0, sizeof(flags));
1836 
1837 	flags.ipv6 = true;
1838 	flags.tunnel_ipv6 = true;
1839 	flags.flabel = TEST_IPSEC_SET_FLABEL_0_INNER_1;
1840 
1841 	return test_ipsec_inline_proto_all(&flags);
1842 }
1843 
1844 static int
1845 test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(const void *data __rte_unused)
1846 {
1847 	struct ipsec_test_flags flags;
1848 
1849 	memset(&flags, 0, sizeof(flags));
1850 
1851 	flags.ipv6 = true;
1852 	flags.tunnel_ipv6 = true;
1853 	flags.flabel = TEST_IPSEC_SET_FLABEL_1_INNER_0;
1854 
1855 	return test_ipsec_inline_proto_all(&flags);
1856 }
1857 
1858 static int
1859 test_ipsec_inline_proto_ipv4_ttl_decrement(const void *data __rte_unused)
1860 {
1861 	struct ipsec_test_flags flags = {
1862 		.dec_ttl_or_hop_limit = true
1863 	};
1864 
1865 	return test_ipsec_inline_proto_all(&flags);
1866 }
1867 
1868 static int
1869 test_ipsec_inline_proto_ipv6_hop_limit_decrement(const void *data __rte_unused)
1870 {
1871 	struct ipsec_test_flags flags = {
1872 		.ipv6 = true,
1873 		.dec_ttl_or_hop_limit = true
1874 	};
1875 
1876 	return test_ipsec_inline_proto_all(&flags);
1877 }
1878 
1879 static int
1880 test_ipsec_inline_proto_iv_gen(const void *data __rte_unused)
1881 {
1882 	struct ipsec_test_flags flags;
1883 
1884 	memset(&flags, 0, sizeof(flags));
1885 
1886 	flags.iv_gen = true;
1887 
1888 	return test_ipsec_inline_proto_all(&flags);
1889 }
1890 
1891 static int
1892 test_ipsec_inline_proto_known_vec_fragmented(const void *test_data)
1893 {
1894 	struct ipsec_test_data td_outb;
1895 	struct ipsec_test_flags flags;
1896 
1897 	memset(&flags, 0, sizeof(flags));
1898 	flags.fragment = true;
1899 
1900 	memcpy(&td_outb, test_data, sizeof(td_outb));
1901 
1902 	/* Disable IV gen to be able to test with known vectors */
1903 	td_outb.ipsec_xform.options.iv_gen_disable = 1;
1904 
1905 	return test_ipsec_inline_proto_process(&td_outb, NULL, 1, false,
1906 						&flags);
1907 }
1908 
1909 static int
1910 test_ipsec_inline_pkt_replay(const void *test_data, const uint64_t esn[],
1911 		      bool replayed_pkt[], uint32_t nb_pkts, bool esn_en,
1912 		      uint64_t winsz)
1913 {
1914 	struct ipsec_test_data td_outb[IPSEC_TEST_PACKETS_MAX];
1915 	struct ipsec_test_data td_inb[IPSEC_TEST_PACKETS_MAX];
1916 	struct ipsec_test_flags flags;
1917 	uint32_t i, ret = 0;
1918 
1919 	memset(&flags, 0, sizeof(flags));
1920 	flags.antireplay = true;
1921 
1922 	for (i = 0; i < nb_pkts; i++) {
1923 		memcpy(&td_outb[i], test_data, sizeof(td_outb));
1924 		td_outb[i].ipsec_xform.options.iv_gen_disable = 1;
1925 		td_outb[i].ipsec_xform.replay_win_sz = winsz;
1926 		td_outb[i].ipsec_xform.options.esn = esn_en;
1927 	}
1928 
1929 	for (i = 0; i < nb_pkts; i++)
1930 		td_outb[i].ipsec_xform.esn.value = esn[i];
1931 
1932 	ret = test_ipsec_inline_proto_process_with_esn(td_outb, td_inb,
1933 				nb_pkts, true, &flags);
1934 	if (ret != TEST_SUCCESS)
1935 		return ret;
1936 
1937 	test_ipsec_td_update(td_inb, td_outb, nb_pkts, &flags);
1938 
1939 	for (i = 0; i < nb_pkts; i++) {
1940 		td_inb[i].ipsec_xform.options.esn = esn_en;
1941 		/* Set antireplay flag for packets to be dropped */
1942 		td_inb[i].ar_packet = replayed_pkt[i];
1943 	}
1944 
1945 	ret = test_ipsec_inline_proto_process_with_esn(td_inb, NULL, nb_pkts,
1946 				true, &flags);
1947 
1948 	return ret;
1949 }
1950 
1951 static int
1952 test_ipsec_inline_proto_pkt_antireplay(const void *test_data, uint64_t winsz)
1953 {
1954 
1955 	uint32_t nb_pkts = 5;
1956 	bool replayed_pkt[5];
1957 	uint64_t esn[5];
1958 
1959 	/* 1. Advance the TOP of the window to WS * 2 */
1960 	esn[0] = winsz * 2;
1961 	/* 2. Test sequence number within the new window(WS + 1) */
1962 	esn[1] = winsz + 1;
1963 	/* 3. Test sequence number less than the window BOTTOM */
1964 	esn[2] = winsz;
1965 	/* 4. Test sequence number in the middle of the window */
1966 	esn[3] = winsz + (winsz / 2);
1967 	/* 5. Test replay of the packet in the middle of the window */
1968 	esn[4] = winsz + (winsz / 2);
1969 
1970 	replayed_pkt[0] = false;
1971 	replayed_pkt[1] = false;
1972 	replayed_pkt[2] = true;
1973 	replayed_pkt[3] = false;
1974 	replayed_pkt[4] = true;
1975 
1976 	return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt,
1977 			nb_pkts, false, winsz);
1978 }
1979 
1980 static int
1981 test_ipsec_inline_proto_pkt_antireplay1024(const void *test_data)
1982 {
1983 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 1024);
1984 }
1985 
1986 static int
1987 test_ipsec_inline_proto_pkt_antireplay2048(const void *test_data)
1988 {
1989 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 2048);
1990 }
1991 
1992 static int
1993 test_ipsec_inline_proto_pkt_antireplay4096(const void *test_data)
1994 {
1995 	return test_ipsec_inline_proto_pkt_antireplay(test_data, 4096);
1996 }
1997 
1998 static int
1999 test_ipsec_inline_proto_pkt_esn_antireplay(const void *test_data, uint64_t winsz)
2000 {
2001 
2002 	uint32_t nb_pkts = 7;
2003 	bool replayed_pkt[7];
2004 	uint64_t esn[7];
2005 
2006 	/* Set the initial sequence number */
2007 	esn[0] = (uint64_t)(0xFFFFFFFF - winsz);
2008 	/* 1. Advance the TOP of the window to (1<<32 + WS/2) */
2009 	esn[1] = (uint64_t)((1ULL << 32) + (winsz / 2));
2010 	/* 2. Test sequence number within new window (1<<32 + WS/2 + 1) */
2011 	esn[2] = (uint64_t)((1ULL << 32) - (winsz / 2) + 1);
2012 	/* 3. Test with sequence number within window (1<<32 - 1) */
2013 	esn[3] = (uint64_t)((1ULL << 32) - 1);
2014 	/* 4. Test with sequence number within window (1<<32 - 1) */
2015 	esn[4] = (uint64_t)(1ULL << 32);
2016 	/* 5. Test with duplicate sequence number within
2017 	 * new window (1<<32 - 1)
2018 	 */
2019 	esn[5] = (uint64_t)((1ULL << 32) - 1);
2020 	/* 6. Test with duplicate sequence number within new window (1<<32) */
2021 	esn[6] = (uint64_t)(1ULL << 32);
2022 
2023 	replayed_pkt[0] = false;
2024 	replayed_pkt[1] = false;
2025 	replayed_pkt[2] = false;
2026 	replayed_pkt[3] = false;
2027 	replayed_pkt[4] = false;
2028 	replayed_pkt[5] = true;
2029 	replayed_pkt[6] = true;
2030 
2031 	return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt, nb_pkts,
2032 				     true, winsz);
2033 }
2034 
2035 static int
2036 test_ipsec_inline_proto_pkt_esn_antireplay1024(const void *test_data)
2037 {
2038 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 1024);
2039 }
2040 
2041 static int
2042 test_ipsec_inline_proto_pkt_esn_antireplay2048(const void *test_data)
2043 {
2044 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 2048);
2045 }
2046 
2047 static int
2048 test_ipsec_inline_proto_pkt_esn_antireplay4096(const void *test_data)
2049 {
2050 	return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 4096);
2051 }
2052 
2053 
2054 
2055 static struct unit_test_suite inline_ipsec_testsuite  = {
2056 	.suite_name = "Inline IPsec Ethernet Device Unit Test Suite",
2057 	.setup = inline_ipsec_testsuite_setup,
2058 	.teardown = inline_ipsec_testsuite_teardown,
2059 	.unit_test_cases = {
2060 		TEST_CASE_NAMED_WITH_DATA(
2061 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
2062 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2063 			test_ipsec_inline_proto_known_vec, &pkt_aes_128_gcm),
2064 		TEST_CASE_NAMED_WITH_DATA(
2065 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
2066 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2067 			test_ipsec_inline_proto_known_vec, &pkt_aes_192_gcm),
2068 		TEST_CASE_NAMED_WITH_DATA(
2069 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
2070 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2071 			test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm),
2072 		TEST_CASE_NAMED_WITH_DATA(
2073 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2074 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2075 			test_ipsec_inline_proto_known_vec,
2076 			&pkt_aes_128_cbc_hmac_sha256),
2077 		TEST_CASE_NAMED_WITH_DATA(
2078 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
2079 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2080 			test_ipsec_inline_proto_known_vec,
2081 			&pkt_aes_128_cbc_hmac_sha384),
2082 		TEST_CASE_NAMED_WITH_DATA(
2083 			"Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
2084 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2085 			test_ipsec_inline_proto_known_vec,
2086 			&pkt_aes_128_cbc_hmac_sha512),
2087 		TEST_CASE_NAMED_WITH_DATA(
2088 			"Outbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
2089 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2090 			test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm_v6),
2091 		TEST_CASE_NAMED_WITH_DATA(
2092 			"Outbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2093 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2094 			test_ipsec_inline_proto_known_vec,
2095 			&pkt_aes_128_cbc_hmac_sha256_v6),
2096 		TEST_CASE_NAMED_WITH_DATA(
2097 			"Outbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
2098 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2099 			test_ipsec_inline_proto_known_vec,
2100 			&pkt_null_aes_xcbc),
2101 
2102 		TEST_CASE_NAMED_WITH_DATA(
2103 			"Outbound fragmented packet",
2104 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2105 			test_ipsec_inline_proto_known_vec_fragmented,
2106 			&pkt_aes_128_gcm_frag),
2107 
2108 		TEST_CASE_NAMED_WITH_DATA(
2109 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
2110 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2111 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_gcm),
2112 		TEST_CASE_NAMED_WITH_DATA(
2113 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
2114 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2115 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_192_gcm),
2116 		TEST_CASE_NAMED_WITH_DATA(
2117 			"Inbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
2118 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2119 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm),
2120 		TEST_CASE_NAMED_WITH_DATA(
2121 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128)",
2122 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2123 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_cbc_null),
2124 		TEST_CASE_NAMED_WITH_DATA(
2125 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2126 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2127 			test_ipsec_inline_proto_known_vec_inb,
2128 			&pkt_aes_128_cbc_hmac_sha256),
2129 		TEST_CASE_NAMED_WITH_DATA(
2130 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
2131 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2132 			test_ipsec_inline_proto_known_vec_inb,
2133 			&pkt_aes_128_cbc_hmac_sha384),
2134 		TEST_CASE_NAMED_WITH_DATA(
2135 			"Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
2136 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2137 			test_ipsec_inline_proto_known_vec_inb,
2138 			&pkt_aes_128_cbc_hmac_sha512),
2139 		TEST_CASE_NAMED_WITH_DATA(
2140 			"Inbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
2141 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2142 			test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm_v6),
2143 		TEST_CASE_NAMED_WITH_DATA(
2144 			"Inbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2145 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2146 			test_ipsec_inline_proto_known_vec_inb,
2147 			&pkt_aes_128_cbc_hmac_sha256_v6),
2148 		TEST_CASE_NAMED_WITH_DATA(
2149 			"Inbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
2150 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2151 			test_ipsec_inline_proto_known_vec_inb,
2152 			&pkt_null_aes_xcbc),
2153 
2154 		TEST_CASE_NAMED_ST(
2155 			"Combined test alg list",
2156 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2157 			test_ipsec_inline_proto_display_list),
2158 
2159 		TEST_CASE_NAMED_ST(
2160 			"UDP encapsulation",
2161 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2162 			test_ipsec_inline_proto_udp_encap),
2163 		TEST_CASE_NAMED_ST(
2164 			"UDP encapsulation ports verification test",
2165 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2166 			test_ipsec_inline_proto_udp_ports_verify),
2167 		TEST_CASE_NAMED_ST(
2168 			"Negative test: ICV corruption",
2169 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2170 			test_ipsec_inline_proto_err_icv_corrupt),
2171 		TEST_CASE_NAMED_ST(
2172 			"Tunnel dst addr verification",
2173 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2174 			test_ipsec_inline_proto_tunnel_dst_addr_verify),
2175 		TEST_CASE_NAMED_ST(
2176 			"Tunnel src and dst addr verification",
2177 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2178 			test_ipsec_inline_proto_tunnel_src_dst_addr_verify),
2179 		TEST_CASE_NAMED_ST(
2180 			"Inner IP checksum",
2181 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2182 			test_ipsec_inline_proto_inner_ip_csum),
2183 		TEST_CASE_NAMED_ST(
2184 			"Inner L4 checksum",
2185 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2186 			test_ipsec_inline_proto_inner_l4_csum),
2187 		TEST_CASE_NAMED_ST(
2188 			"Tunnel IPv4 in IPv4",
2189 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2190 			test_ipsec_inline_proto_tunnel_v4_in_v4),
2191 		TEST_CASE_NAMED_ST(
2192 			"Tunnel IPv6 in IPv6",
2193 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2194 			test_ipsec_inline_proto_tunnel_v6_in_v6),
2195 		TEST_CASE_NAMED_ST(
2196 			"Tunnel IPv4 in IPv6",
2197 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2198 			test_ipsec_inline_proto_tunnel_v4_in_v6),
2199 		TEST_CASE_NAMED_ST(
2200 			"Tunnel IPv6 in IPv4",
2201 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2202 			test_ipsec_inline_proto_tunnel_v6_in_v4),
2203 		TEST_CASE_NAMED_ST(
2204 			"Transport IPv4",
2205 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2206 			test_ipsec_inline_proto_transport_v4),
2207 		TEST_CASE_NAMED_ST(
2208 			"Transport l4 checksum",
2209 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2210 			test_ipsec_inline_proto_transport_l4_csum),
2211 		TEST_CASE_NAMED_ST(
2212 			"Statistics: success",
2213 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2214 			test_ipsec_inline_proto_stats),
2215 		TEST_CASE_NAMED_ST(
2216 			"Fragmented packet",
2217 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2218 			test_ipsec_inline_proto_pkt_fragment),
2219 		TEST_CASE_NAMED_ST(
2220 			"Tunnel header copy DF (inner 0)",
2221 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2222 			test_ipsec_inline_proto_copy_df_inner_0),
2223 		TEST_CASE_NAMED_ST(
2224 			"Tunnel header copy DF (inner 1)",
2225 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2226 			test_ipsec_inline_proto_copy_df_inner_1),
2227 		TEST_CASE_NAMED_ST(
2228 			"Tunnel header set DF 0 (inner 1)",
2229 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2230 			test_ipsec_inline_proto_set_df_0_inner_1),
2231 		TEST_CASE_NAMED_ST(
2232 			"Tunnel header set DF 1 (inner 0)",
2233 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2234 			test_ipsec_inline_proto_set_df_1_inner_0),
2235 		TEST_CASE_NAMED_ST(
2236 			"Tunnel header IPv4 copy DSCP (inner 0)",
2237 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2238 			test_ipsec_inline_proto_ipv4_copy_dscp_inner_0),
2239 		TEST_CASE_NAMED_ST(
2240 			"Tunnel header IPv4 copy DSCP (inner 1)",
2241 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2242 			test_ipsec_inline_proto_ipv4_copy_dscp_inner_1),
2243 		TEST_CASE_NAMED_ST(
2244 			"Tunnel header IPv4 set DSCP 0 (inner 1)",
2245 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2246 			test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1),
2247 		TEST_CASE_NAMED_ST(
2248 			"Tunnel header IPv4 set DSCP 1 (inner 0)",
2249 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2250 			test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0),
2251 		TEST_CASE_NAMED_ST(
2252 			"Tunnel header IPv6 copy DSCP (inner 0)",
2253 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2254 			test_ipsec_inline_proto_ipv6_copy_dscp_inner_0),
2255 		TEST_CASE_NAMED_ST(
2256 			"Tunnel header IPv6 copy DSCP (inner 1)",
2257 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2258 			test_ipsec_inline_proto_ipv6_copy_dscp_inner_1),
2259 		TEST_CASE_NAMED_ST(
2260 			"Tunnel header IPv6 set DSCP 0 (inner 1)",
2261 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2262 			test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1),
2263 		TEST_CASE_NAMED_ST(
2264 			"Tunnel header IPv6 set DSCP 1 (inner 0)",
2265 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2266 			test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0),
2267 		TEST_CASE_NAMED_ST(
2268 			"Tunnel header IPv6 copy FLABEL (inner 0)",
2269 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2270 			test_ipsec_inline_proto_ipv6_copy_flabel_inner_0),
2271 		TEST_CASE_NAMED_ST(
2272 			"Tunnel header IPv6 copy FLABEL (inner 1)",
2273 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2274 			test_ipsec_inline_proto_ipv6_copy_flabel_inner_1),
2275 		TEST_CASE_NAMED_ST(
2276 			"Tunnel header IPv6 set FLABEL 0 (inner 1)",
2277 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2278 			test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1),
2279 		TEST_CASE_NAMED_ST(
2280 			"Tunnel header IPv6 set FLABEL 1 (inner 0)",
2281 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2282 			test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0),
2283 		TEST_CASE_NAMED_ST(
2284 			"Tunnel header IPv4 decrement inner TTL",
2285 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2286 			test_ipsec_inline_proto_ipv4_ttl_decrement),
2287 		TEST_CASE_NAMED_ST(
2288 			"Tunnel header IPv6 decrement inner hop limit",
2289 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2290 			test_ipsec_inline_proto_ipv6_hop_limit_decrement),
2291 		TEST_CASE_NAMED_ST(
2292 			"IV generation",
2293 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2294 			test_ipsec_inline_proto_iv_gen),
2295 
2296 
2297 		TEST_CASE_NAMED_WITH_DATA(
2298 			"Antireplay with window size 1024",
2299 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2300 			test_ipsec_inline_proto_pkt_antireplay1024,
2301 			&pkt_aes_128_gcm),
2302 		TEST_CASE_NAMED_WITH_DATA(
2303 			"Antireplay with window size 2048",
2304 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2305 			test_ipsec_inline_proto_pkt_antireplay2048,
2306 			&pkt_aes_128_gcm),
2307 		TEST_CASE_NAMED_WITH_DATA(
2308 			"Antireplay with window size 4096",
2309 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2310 			test_ipsec_inline_proto_pkt_antireplay4096,
2311 			&pkt_aes_128_gcm),
2312 		TEST_CASE_NAMED_WITH_DATA(
2313 			"ESN and Antireplay with window size 1024",
2314 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2315 			test_ipsec_inline_proto_pkt_esn_antireplay1024,
2316 			&pkt_aes_128_gcm),
2317 		TEST_CASE_NAMED_WITH_DATA(
2318 			"ESN and Antireplay with window size 2048",
2319 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2320 			test_ipsec_inline_proto_pkt_esn_antireplay2048,
2321 			&pkt_aes_128_gcm),
2322 		TEST_CASE_NAMED_WITH_DATA(
2323 			"ESN and Antireplay with window size 4096",
2324 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2325 			test_ipsec_inline_proto_pkt_esn_antireplay4096,
2326 			&pkt_aes_128_gcm),
2327 
2328 		TEST_CASE_NAMED_WITH_DATA(
2329 			"IPv4 Reassembly with 2 fragments",
2330 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2331 			test_inline_ip_reassembly, &ipv4_2frag_vector),
2332 		TEST_CASE_NAMED_WITH_DATA(
2333 			"IPv6 Reassembly with 2 fragments",
2334 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2335 			test_inline_ip_reassembly, &ipv6_2frag_vector),
2336 		TEST_CASE_NAMED_WITH_DATA(
2337 			"IPv4 Reassembly with 4 fragments",
2338 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2339 			test_inline_ip_reassembly, &ipv4_4frag_vector),
2340 		TEST_CASE_NAMED_WITH_DATA(
2341 			"IPv6 Reassembly with 4 fragments",
2342 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2343 			test_inline_ip_reassembly, &ipv6_4frag_vector),
2344 		TEST_CASE_NAMED_WITH_DATA(
2345 			"IPv4 Reassembly with 5 fragments",
2346 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2347 			test_inline_ip_reassembly, &ipv4_5frag_vector),
2348 		TEST_CASE_NAMED_WITH_DATA(
2349 			"IPv6 Reassembly with 5 fragments",
2350 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2351 			test_inline_ip_reassembly, &ipv6_5frag_vector),
2352 		TEST_CASE_NAMED_WITH_DATA(
2353 			"IPv4 Reassembly with incomplete fragments",
2354 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2355 			test_inline_ip_reassembly, &ipv4_incomplete_vector),
2356 		TEST_CASE_NAMED_WITH_DATA(
2357 			"IPv4 Reassembly with overlapping fragments",
2358 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2359 			test_inline_ip_reassembly, &ipv4_overlap_vector),
2360 		TEST_CASE_NAMED_WITH_DATA(
2361 			"IPv4 Reassembly with out of order fragments",
2362 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2363 			test_inline_ip_reassembly, &ipv4_out_of_order_vector),
2364 		TEST_CASE_NAMED_WITH_DATA(
2365 			"IPv4 Reassembly with burst of 4 fragments",
2366 			ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2367 			test_inline_ip_reassembly, &ipv4_4frag_burst_vector),
2368 
2369 		TEST_CASES_END() /**< NULL terminate unit test array */
2370 	},
2371 };
2372 
2373 
2374 static int
2375 test_inline_ipsec(void)
2376 {
2377 	return unit_test_suite_runner(&inline_ipsec_testsuite);
2378 }
2379 
2380 #endif /* !RTE_EXEC_ENV_WINDOWS */
2381 
2382 REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec);
2383