xref: /dpdk/examples/ipsec-secgw/ipsec-secgw.c (revision 88948ff31f57618a74c8985c59e332676995b438)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Intel Corporation
3  */
4 
5 #include <stdbool.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <sys/types.h>
11 #include <netinet/in.h>
12 #include <netinet/ip.h>
13 #include <netinet/ip6.h>
14 #include <string.h>
15 #include <sys/queue.h>
16 #include <stdarg.h>
17 #include <errno.h>
18 #include <signal.h>
19 #include <getopt.h>
20 
21 #include <rte_common.h>
22 #include <rte_bitmap.h>
23 #include <rte_byteorder.h>
24 #include <rte_log.h>
25 #include <rte_eal.h>
26 #include <rte_launch.h>
27 #include <rte_cycles.h>
28 #include <rte_prefetch.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_interrupts.h>
33 #include <rte_random.h>
34 #include <rte_debug.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev.h>
37 #include <rte_mempool.h>
38 #include <rte_mbuf.h>
39 #include <rte_acl.h>
40 #include <rte_lpm.h>
41 #include <rte_lpm6.h>
42 #include <rte_hash.h>
43 #include <rte_jhash.h>
44 #include <rte_cryptodev.h>
45 #include <rte_security.h>
46 #include <rte_eventdev.h>
47 #include <rte_event_crypto_adapter.h>
48 #include <rte_ip.h>
49 #include <rte_ip_frag.h>
50 #include <rte_alarm.h>
51 #include <rte_telemetry.h>
52 
53 #include "event_helper.h"
54 #include "flow.h"
55 #include "ipsec.h"
56 #include "ipsec_worker.h"
57 #include "parser.h"
58 #include "sad.h"
59 
60 #if defined(__ARM_NEON)
61 #include "ipsec_lpm_neon.h"
62 #endif
63 
64 volatile bool force_quit;
65 
66 #define MAX_JUMBO_PKT_LEN  9600
67 
68 #define MEMPOOL_CACHE_SIZE 256
69 
70 #define CDEV_MAP_ENTRIES 16384
71 #define CDEV_MP_CACHE_SZ 64
72 #define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */
73 #define MAX_QUEUE_PAIRS 1
74 
75 #define MAX_LCORE_PARAMS 1024
76 
77 /*
78  * Configurable number of RX/TX ring descriptors
79  */
80 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
81 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
82 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
83 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
84 
85 /*
86  * Configurable number of descriptors per queue pair
87  */
88 uint32_t qp_desc_nb = 2048;
89 
90 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
91 		(addr)->addr_bytes[0], (addr)->addr_bytes[1], \
92 		(addr)->addr_bytes[2], (addr)->addr_bytes[3], \
93 		(addr)->addr_bytes[4], (addr)->addr_bytes[5], \
94 		0, 0)
95 
96 #define	FRAG_TBL_BUCKET_ENTRIES	4
97 #define	MAX_FRAG_TTL_NS		(10LL * NS_PER_S)
98 
99 #define MTU_TO_FRAMELEN(x)	((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
100 
101 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
102 	{ {{0}}, {{0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a}} },
103 	{ {{0}}, {{0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9}} },
104 	{ {{0}}, {{0x00, 0x16, 0x3e, 0x08, 0x69, 0x26}} },
105 	{ {{0}}, {{0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd}} }
106 };
107 
108 struct offloads tx_offloads;
109 
110 /*
111  * To hold ethernet header per port, which will be applied
112  * to outgoing packets.
113  */
114 xmm_t val_eth[RTE_MAX_ETHPORTS];
115 
116 struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS];
117 
118 #define CMD_LINE_OPT_CONFIG		"config"
119 #define CMD_LINE_OPT_SINGLE_SA		"single-sa"
120 #define CMD_LINE_OPT_CRYPTODEV_MASK	"cryptodev_mask"
121 #define CMD_LINE_OPT_TRANSFER_MODE	"transfer-mode"
122 #define CMD_LINE_OPT_SCHEDULE_TYPE	"event-schedule-type"
123 #define CMD_LINE_OPT_RX_OFFLOAD		"rxoffload"
124 #define CMD_LINE_OPT_TX_OFFLOAD		"txoffload"
125 #define CMD_LINE_OPT_REASSEMBLE		"reassemble"
126 #define CMD_LINE_OPT_MTU		"mtu"
127 #define CMD_LINE_OPT_FRAG_TTL		"frag-ttl"
128 #define CMD_LINE_OPT_EVENT_VECTOR	"event-vector"
129 #define CMD_LINE_OPT_VECTOR_SIZE	"vector-size"
130 #define CMD_LINE_OPT_VECTOR_TIMEOUT	"vector-tmo"
131 #define CMD_LINE_OPT_VECTOR_POOL_SZ	"vector-pool-sz"
132 #define CMD_LINE_OPT_PER_PORT_POOL	"per-port-pool"
133 #define CMD_LINE_OPT_QP_DESC_NB		"desc-nb"
134 
135 #define CMD_LINE_ARG_EVENT	"event"
136 #define CMD_LINE_ARG_POLL	"poll"
137 #define CMD_LINE_ARG_ORDERED	"ordered"
138 #define CMD_LINE_ARG_ATOMIC	"atomic"
139 #define CMD_LINE_ARG_PARALLEL	"parallel"
140 
141 enum {
142 	/* long options mapped to a short option */
143 
144 	/* first long only option value must be >= 256, so that we won't
145 	 * conflict with short options
146 	 */
147 	CMD_LINE_OPT_MIN_NUM = 256,
148 	CMD_LINE_OPT_CONFIG_NUM,
149 	CMD_LINE_OPT_SINGLE_SA_NUM,
150 	CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
151 	CMD_LINE_OPT_TRANSFER_MODE_NUM,
152 	CMD_LINE_OPT_SCHEDULE_TYPE_NUM,
153 	CMD_LINE_OPT_RX_OFFLOAD_NUM,
154 	CMD_LINE_OPT_TX_OFFLOAD_NUM,
155 	CMD_LINE_OPT_REASSEMBLE_NUM,
156 	CMD_LINE_OPT_MTU_NUM,
157 	CMD_LINE_OPT_FRAG_TTL_NUM,
158 	CMD_LINE_OPT_EVENT_VECTOR_NUM,
159 	CMD_LINE_OPT_VECTOR_SIZE_NUM,
160 	CMD_LINE_OPT_VECTOR_TIMEOUT_NUM,
161 	CMD_LINE_OPT_VECTOR_POOL_SZ_NUM,
162 	CMD_LINE_OPT_PER_PORT_POOL_NUM,
163 	CMD_LINE_OPT_QP_DESC_NB_NUM,
164 };
165 
166 static const struct option lgopts[] = {
167 	{CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
168 	{CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
169 	{CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
170 	{CMD_LINE_OPT_TRANSFER_MODE, 1, 0, CMD_LINE_OPT_TRANSFER_MODE_NUM},
171 	{CMD_LINE_OPT_SCHEDULE_TYPE, 1, 0, CMD_LINE_OPT_SCHEDULE_TYPE_NUM},
172 	{CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},
173 	{CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},
174 	{CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM},
175 	{CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM},
176 	{CMD_LINE_OPT_FRAG_TTL, 1, 0, CMD_LINE_OPT_FRAG_TTL_NUM},
177 	{CMD_LINE_OPT_EVENT_VECTOR, 0, 0, CMD_LINE_OPT_EVENT_VECTOR_NUM},
178 	{CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
179 	{CMD_LINE_OPT_VECTOR_TIMEOUT, 1, 0, CMD_LINE_OPT_VECTOR_TIMEOUT_NUM},
180 	{CMD_LINE_OPT_VECTOR_POOL_SZ, 1, 0, CMD_LINE_OPT_VECTOR_POOL_SZ_NUM},
181 	{CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PER_PORT_POOL_NUM},
182 	{CMD_LINE_OPT_QP_DESC_NB, 1, 0, CMD_LINE_OPT_QP_DESC_NB_NUM},
183 	{NULL, 0, 0, 0}
184 };
185 
186 uint32_t unprotected_port_mask;
187 uint32_t single_sa_idx;
188 /* mask of enabled ports */
189 static uint32_t enabled_port_mask;
190 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
191 static int32_t promiscuous_on;
192 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
193 static uint32_t nb_lcores;
194 uint32_t single_sa;
195 uint32_t nb_bufs_in_pool;
196 
197 /*
198  * RX/TX HW offload capabilities to enable/use on ethernet ports.
199  * By default all capabilities are enabled.
200  */
201 static uint64_t dev_rx_offload = UINT64_MAX;
202 static uint64_t dev_tx_offload = UINT64_MAX;
203 
204 /*
205  * global values that determine multi-seg policy
206  */
207 uint32_t frag_tbl_sz;
208 static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
209 uint32_t mtu_size = RTE_ETHER_MTU;
210 static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
211 static uint32_t stats_interval;
212 
213 /* application wide librte_ipsec/SA parameters */
214 struct app_sa_prm app_sa_prm = {
215 			.enable = 0,
216 			.cache_sz = SA_CACHE_SZ,
217 			.udp_encap = 0
218 		};
219 static const char *cfgfile;
220 
221 struct __rte_cache_aligned lcore_params {
222 	uint16_t port_id;
223 	uint16_t queue_id;
224 	uint32_t lcore_id;
225 };
226 
227 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
228 
229 static struct lcore_params *lcore_params;
230 static uint16_t nb_lcore_params;
231 
232 static struct rte_hash *cdev_map_in;
233 static struct rte_hash *cdev_map_out;
234 
235 struct lcore_conf lcore_conf[RTE_MAX_LCORE];
236 
237 static struct rte_eth_conf port_conf = {
238 	.rxmode = {
239 		.mq_mode	= RTE_ETH_MQ_RX_RSS,
240 		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
241 	},
242 	.rx_adv_conf = {
243 		.rss_conf = {
244 			.rss_key = NULL,
245 			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
246 				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
247 		},
248 	},
249 	.txmode = {
250 		.mq_mode = RTE_ETH_MQ_TX_NONE,
251 	},
252 };
253 
254 struct socket_ctx socket_ctx[NB_SOCKETS];
255 
256 bool per_port_pool;
257 
258 uint16_t wrkr_flags;
259 /*
260  * Determine is multi-segment support required:
261  *  - either frame buffer size is smaller then mtu
262  *  - or reassemble support is requested
263  */
264 static int
265 multi_seg_required(void)
266 {
267 	return (MTU_TO_FRAMELEN(mtu_size) + RTE_PKTMBUF_HEADROOM >
268 		frame_buf_size || frag_tbl_sz != 0);
269 }
270 
271 
272 struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
273 
274 /* Print out statistics on packet distribution */
275 static void
276 print_stats_cb(__rte_unused void *param)
277 {
278 	uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
279 	uint64_t total_frag_packets_dropped = 0;
280 	float burst_percent, rx_per_call, tx_per_call;
281 	unsigned int coreid;
282 
283 	total_packets_dropped = 0;
284 	total_packets_tx = 0;
285 	total_packets_rx = 0;
286 
287 	const char clr[] = { 27, '[', '2', 'J', '\0' };
288 	const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
289 
290 	/* Clear screen and move to top left */
291 	printf("%s%s", clr, topLeft);
292 
293 	printf("\nCore statistics ====================================");
294 
295 	for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
296 		/* skip disabled cores */
297 		if (rte_lcore_is_enabled(coreid) == 0)
298 			continue;
299 		burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/
300 					core_statistics[coreid].rx;
301 		rx_per_call =  (float)(core_statistics[coreid].rx)/
302 				       core_statistics[coreid].rx_call;
303 		tx_per_call =  (float)(core_statistics[coreid].tx)/
304 				       core_statistics[coreid].tx_call;
305 		printf("\nStatistics for core %u ------------------------------"
306 			   "\nPackets received: %20"PRIu64
307 			   "\nPackets sent: %24"PRIu64
308 			   "\nPackets dropped: %21"PRIu64
309 			   "\nFrag Packets dropped: %16"PRIu64
310 			   "\nBurst percent: %23.2f"
311 			   "\nPackets per Rx call: %17.2f"
312 			   "\nPackets per Tx call: %17.2f",
313 			   coreid,
314 			   core_statistics[coreid].rx,
315 			   core_statistics[coreid].tx,
316 			   core_statistics[coreid].dropped,
317 			   core_statistics[coreid].frag_dropped,
318 			   burst_percent,
319 			   rx_per_call,
320 			   tx_per_call);
321 
322 		total_packets_dropped += core_statistics[coreid].dropped;
323 		total_frag_packets_dropped += core_statistics[coreid].frag_dropped;
324 		total_packets_tx += core_statistics[coreid].tx;
325 		total_packets_rx += core_statistics[coreid].rx;
326 	}
327 	printf("\nAggregate statistics ==============================="
328 		   "\nTotal packets received: %14"PRIu64
329 		   "\nTotal packets sent: %18"PRIu64
330 		   "\nTotal packets dropped: %15"PRIu64
331 		   "\nTotal frag packets dropped: %10"PRIu64,
332 		   total_packets_rx,
333 		   total_packets_tx,
334 		   total_packets_dropped,
335 		   total_frag_packets_dropped);
336 	printf("\n====================================================\n");
337 
338 	rte_eal_alarm_set(stats_interval * US_PER_S, print_stats_cb, NULL);
339 }
340 
341 static void
342 split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
343 {
344 	uint32_t i, n4, n6;
345 	struct ip *ip;
346 	struct rte_mbuf *m;
347 
348 	n4 = trf->ip4.num;
349 	n6 = trf->ip6.num;
350 
351 	for (i = 0; i < num; i++) {
352 
353 		m = mb[i];
354 		ip = rte_pktmbuf_mtod(m, struct ip *);
355 
356 		if (ip->ip_v == IPVERSION) {
357 			trf->ip4.pkts[n4] = m;
358 			trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
359 					uint8_t *, offsetof(struct ip, ip_p));
360 			n4++;
361 		} else if (ip->ip_v == IP6_VERSION) {
362 			trf->ip6.pkts[n6] = m;
363 			trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
364 					uint8_t *,
365 					offsetof(struct ip6_hdr, ip6_nxt));
366 			n6++;
367 		} else
368 			free_pkts(&m, 1);
369 	}
370 
371 	trf->ip4.num = n4;
372 	trf->ip6.num = n6;
373 }
374 
375 
376 static inline void
377 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
378 		struct ipsec_traffic *traffic)
379 {
380 	unsigned int lcoreid = rte_lcore_id();
381 	uint16_t nb_pkts_in, n_ip4, n_ip6;
382 
383 	n_ip4 = traffic->ip4.num;
384 	n_ip6 = traffic->ip6.num;
385 
386 	if (app_sa_prm.enable == 0) {
387 		nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
388 				traffic->ipsec.num, MAX_PKT_BURST);
389 		split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
390 	} else {
391 		inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
392 			traffic->ipsec.saptr, traffic->ipsec.num);
393 		ipsec_process(ipsec_ctx, traffic);
394 	}
395 
396 	inbound_sp_sa(ipsec_ctx->sp4_ctx,
397 		ipsec_ctx->sa_ctx, &traffic->ip4, n_ip4,
398 		&core_statistics[lcoreid].inbound.spd4);
399 
400 	inbound_sp_sa(ipsec_ctx->sp6_ctx,
401 		ipsec_ctx->sa_ctx, &traffic->ip6, n_ip6,
402 		&core_statistics[lcoreid].inbound.spd6);
403 }
404 
405 static inline void
406 outbound_spd_lookup(struct sp_ctx *sp,
407 		struct traffic_type *ip,
408 		struct traffic_type *ipsec,
409 		struct ipsec_spd_stats *stats)
410 {
411 	struct rte_mbuf *m;
412 	uint32_t i, j, sa_idx;
413 
414 	if (ip->num == 0 || sp == NULL)
415 		return;
416 
417 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
418 			ip->num, DEFAULT_MAX_CATEGORIES);
419 
420 	for (i = 0, j = 0; i < ip->num; i++) {
421 		m = ip->pkts[i];
422 		sa_idx = ip->res[i] - 1;
423 
424 		if (unlikely(ip->res[i] == DISCARD)) {
425 			free_pkts(&m, 1);
426 
427 			stats->discard++;
428 		} else if (unlikely(ip->res[i] == BYPASS)) {
429 			ip->pkts[j++] = m;
430 
431 			stats->bypass++;
432 		} else {
433 			ipsec->res[ipsec->num] = sa_idx;
434 			ipsec->pkts[ipsec->num++] = m;
435 
436 			stats->protect++;
437 		}
438 	}
439 	ip->num = j;
440 }
441 
442 static inline void
443 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
444 		struct ipsec_traffic *traffic)
445 {
446 	struct rte_mbuf *m;
447 	uint16_t idx, nb_pkts_out, i;
448 	unsigned int lcoreid = rte_lcore_id();
449 
450 	/* Drop any IPsec traffic from protected ports */
451 	free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
452 
453 	traffic->ipsec.num = 0;
454 
455 	outbound_spd_lookup(ipsec_ctx->sp4_ctx,
456 		&traffic->ip4, &traffic->ipsec,
457 		&core_statistics[lcoreid].outbound.spd4);
458 
459 	outbound_spd_lookup(ipsec_ctx->sp6_ctx,
460 		&traffic->ip6, &traffic->ipsec,
461 		&core_statistics[lcoreid].outbound.spd6);
462 
463 	if (app_sa_prm.enable == 0) {
464 
465 		nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
466 				traffic->ipsec.res, traffic->ipsec.num,
467 				MAX_PKT_BURST);
468 
469 		for (i = 0; i < nb_pkts_out; i++) {
470 			m = traffic->ipsec.pkts[i];
471 			struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
472 			if (ip->ip_v == IPVERSION) {
473 				idx = traffic->ip4.num++;
474 				traffic->ip4.pkts[idx] = m;
475 			} else {
476 				idx = traffic->ip6.num++;
477 				traffic->ip6.pkts[idx] = m;
478 			}
479 		}
480 	} else {
481 		outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
482 			traffic->ipsec.saptr, traffic->ipsec.num);
483 		ipsec_process(ipsec_ctx, traffic);
484 	}
485 }
486 
487 static inline void
488 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
489 		struct ipsec_traffic *traffic)
490 {
491 	struct rte_mbuf *m;
492 	uint32_t nb_pkts_in, i, idx;
493 
494 	if (app_sa_prm.enable == 0) {
495 
496 		nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
497 				traffic->ipsec.num, MAX_PKT_BURST);
498 
499 		for (i = 0; i < nb_pkts_in; i++) {
500 			m = traffic->ipsec.pkts[i];
501 			struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
502 			if (ip->ip_v == IPVERSION) {
503 				idx = traffic->ip4.num++;
504 				traffic->ip4.pkts[idx] = m;
505 			} else {
506 				idx = traffic->ip6.num++;
507 				traffic->ip6.pkts[idx] = m;
508 			}
509 		}
510 	} else {
511 		inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
512 			traffic->ipsec.saptr, traffic->ipsec.num);
513 		ipsec_process(ipsec_ctx, traffic);
514 	}
515 }
516 
517 static inline void
518 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
519 		struct ipsec_traffic *traffic)
520 {
521 	struct rte_mbuf *m;
522 	uint32_t nb_pkts_out, i, n;
523 	struct ip *ip;
524 
525 	/* Drop any IPsec traffic from protected ports */
526 	free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
527 
528 	n = 0;
529 
530 	for (i = 0; i < traffic->ip4.num; i++) {
531 		traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
532 		traffic->ipsec.res[n++] = single_sa_idx;
533 	}
534 
535 	for (i = 0; i < traffic->ip6.num; i++) {
536 		traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
537 		traffic->ipsec.res[n++] = single_sa_idx;
538 	}
539 
540 	traffic->ip4.num = 0;
541 	traffic->ip6.num = 0;
542 	traffic->ipsec.num = n;
543 
544 	if (app_sa_prm.enable == 0) {
545 
546 		nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
547 				traffic->ipsec.res, traffic->ipsec.num,
548 				MAX_PKT_BURST);
549 
550 		/* They all sue the same SA (ip4 or ip6 tunnel) */
551 		m = traffic->ipsec.pkts[0];
552 		ip = rte_pktmbuf_mtod(m, struct ip *);
553 		if (ip->ip_v == IPVERSION) {
554 			traffic->ip4.num = nb_pkts_out;
555 			for (i = 0; i < nb_pkts_out; i++)
556 				traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
557 		} else {
558 			traffic->ip6.num = nb_pkts_out;
559 			for (i = 0; i < nb_pkts_out; i++)
560 				traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
561 		}
562 	} else {
563 		outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
564 			traffic->ipsec.saptr, traffic->ipsec.num);
565 		ipsec_process(ipsec_ctx, traffic);
566 	}
567 }
568 
569 static inline void
570 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
571 	     uint16_t nb_pkts, uint16_t portid, void *ctx)
572 {
573 	struct ipsec_traffic traffic;
574 
575 	prepare_traffic(ctx, pkts, &traffic, nb_pkts);
576 
577 	if (unlikely(single_sa)) {
578 		if (is_unprotected_port(portid))
579 			process_pkts_inbound_nosp(&qconf->inbound, &traffic);
580 		else
581 			process_pkts_outbound_nosp(&qconf->outbound, &traffic);
582 	} else {
583 		if (is_unprotected_port(portid))
584 			process_pkts_inbound(&qconf->inbound, &traffic);
585 		else
586 			process_pkts_outbound(&qconf->outbound, &traffic);
587 	}
588 
589 #if defined __ARM_NEON
590 	/* Neon optimized packet routing */
591 	route4_pkts_neon(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num,
592 			 qconf->outbound.ipv4_offloads, true);
593 	route6_pkts_neon(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
594 #else
595 	route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num,
596 		    qconf->outbound.ipv4_offloads, true);
597 	route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
598 #endif
599 }
600 
601 static inline void
602 drain_crypto_buffers(struct lcore_conf *qconf)
603 {
604 	uint32_t i;
605 	struct ipsec_ctx *ctx;
606 
607 	/* drain inbound buffers*/
608 	ctx = &qconf->inbound;
609 	for (i = 0; i != ctx->nb_qps; i++) {
610 		if (ctx->tbl[i].len != 0)
611 			enqueue_cop_burst(ctx->tbl  + i);
612 	}
613 
614 	/* drain outbound buffers*/
615 	ctx = &qconf->outbound;
616 	for (i = 0; i != ctx->nb_qps; i++) {
617 		if (ctx->tbl[i].len != 0)
618 			enqueue_cop_burst(ctx->tbl  + i);
619 	}
620 }
621 
622 static void
623 drain_inbound_crypto_queues(const struct lcore_conf *qconf,
624 		struct ipsec_ctx *ctx)
625 {
626 	uint32_t n;
627 	struct ipsec_traffic trf;
628 	unsigned int lcoreid = rte_lcore_id();
629 	const int nb_pkts = RTE_DIM(trf.ipsec.pkts);
630 
631 	if (app_sa_prm.enable == 0) {
632 
633 		/* dequeue packets from crypto-queue */
634 		n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
635 			RTE_MIN(MAX_PKT_BURST, nb_pkts));
636 
637 		trf.ip4.num = 0;
638 		trf.ip6.num = 0;
639 
640 		/* split traffic by ipv4-ipv6 */
641 		split46_traffic(&trf, trf.ipsec.pkts, n);
642 	} else
643 		ipsec_cqp_process(ctx, &trf);
644 
645 	/* process ipv4 packets */
646 	if (trf.ip4.num != 0) {
647 		inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0,
648 			&core_statistics[lcoreid].inbound.spd4);
649 		route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
650 			    qconf->outbound.ipv4_offloads, true);
651 	}
652 
653 	/* process ipv6 packets */
654 	if (trf.ip6.num != 0) {
655 		inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0,
656 			&core_statistics[lcoreid].inbound.spd6);
657 		route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
658 	}
659 }
660 
661 static void
662 drain_outbound_crypto_queues(const struct lcore_conf *qconf,
663 		struct ipsec_ctx *ctx)
664 {
665 	uint32_t n;
666 	struct ipsec_traffic trf;
667 	const int nb_pkts = RTE_DIM(trf.ipsec.pkts);
668 
669 	if (app_sa_prm.enable == 0) {
670 
671 		/* dequeue packets from crypto-queue */
672 		n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
673 			RTE_MIN(MAX_PKT_BURST, nb_pkts));
674 
675 		trf.ip4.num = 0;
676 		trf.ip6.num = 0;
677 
678 		/* split traffic by ipv4-ipv6 */
679 		split46_traffic(&trf, trf.ipsec.pkts, n);
680 	} else
681 		ipsec_cqp_process(ctx, &trf);
682 
683 	/* process ipv4 packets */
684 	if (trf.ip4.num != 0)
685 		route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
686 			    qconf->outbound.ipv4_offloads, true);
687 
688 	/* process ipv6 packets */
689 	if (trf.ip6.num != 0)
690 		route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
691 }
692 
693 /* main processing loop */
694 void
695 ipsec_poll_mode_worker(void)
696 {
697 	struct rte_mbuf *pkts[MAX_PKT_BURST];
698 	uint32_t lcore_id;
699 	uint64_t prev_tsc, diff_tsc, cur_tsc;
700 	uint16_t i, nb_rx, portid, queueid;
701 	struct lcore_conf *qconf;
702 	int32_t rc, socket_id;
703 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
704 			/ US_PER_S * BURST_TX_DRAIN_US;
705 	struct lcore_rx_queue *rxql;
706 
707 	prev_tsc = 0;
708 	lcore_id = rte_lcore_id();
709 	qconf = &lcore_conf[lcore_id];
710 	rxql = qconf->rx_queue_list;
711 	socket_id = rte_lcore_to_socket_id(lcore_id);
712 
713 	qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
714 	qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
715 	qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
716 	qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
717 	qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
718 	qconf->inbound.cdev_map = cdev_map_in;
719 	qconf->inbound.lcore_id = lcore_id;
720 	qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
721 	qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
722 	qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
723 	qconf->outbound.cdev_map = cdev_map_out;
724 	qconf->outbound.lcore_id = lcore_id;
725 	qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
726 
727 	rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
728 	if (rc != 0) {
729 		RTE_LOG(ERR, IPSEC,
730 			"SAD cache init on lcore %u, failed with code: %d\n",
731 			lcore_id, rc);
732 		return;
733 	}
734 
735 	if (qconf->nb_rx_queue == 0) {
736 		RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
737 			lcore_id);
738 		return;
739 	}
740 
741 	RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
742 
743 	for (i = 0; i < qconf->nb_rx_queue; i++) {
744 		portid = rxql[i].port_id;
745 		queueid = rxql[i].queue_id;
746 		RTE_LOG(INFO, IPSEC,
747 			" -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
748 			lcore_id, portid, queueid);
749 	}
750 
751 	while (!force_quit) {
752 		cur_tsc = rte_rdtsc();
753 
754 		/* TX queue buffer drain */
755 		diff_tsc = cur_tsc - prev_tsc;
756 
757 		if (unlikely(diff_tsc > drain_tsc)) {
758 			drain_tx_buffers(qconf);
759 			drain_crypto_buffers(qconf);
760 			prev_tsc = cur_tsc;
761 		}
762 
763 		for (i = 0; i < qconf->nb_rx_queue; ++i) {
764 
765 			/* Read packets from RX queues */
766 			portid = rxql[i].port_id;
767 			queueid = rxql[i].queue_id;
768 			nb_rx = rte_eth_rx_burst(portid, queueid,
769 					pkts, MAX_PKT_BURST);
770 
771 			if (nb_rx > 0) {
772 				core_stats_update_rx(nb_rx);
773 				process_pkts(qconf, pkts, nb_rx, portid,
774 					     rxql->sec_ctx);
775 			}
776 
777 			/* dequeue and process completed crypto-ops */
778 			if (is_unprotected_port(portid))
779 				drain_inbound_crypto_queues(qconf,
780 					&qconf->inbound);
781 			else
782 				drain_outbound_crypto_queues(qconf,
783 					&qconf->outbound);
784 		}
785 	}
786 }
787 
788 int
789 check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
790 {
791 	uint16_t i;
792 	uint16_t portid, queueid;
793 
794 	for (i = 0; i < nb_lcore_params; ++i) {
795 		portid = lcore_params_array[i].port_id;
796 		if (portid == fdir_portid) {
797 			queueid = lcore_params_array[i].queue_id;
798 			if (queueid == fdir_qid)
799 				break;
800 		}
801 
802 		if (i == nb_lcore_params - 1)
803 			return -1;
804 	}
805 
806 	return 1;
807 }
808 
809 static int32_t
810 check_poll_mode_params(struct eh_conf *eh_conf)
811 {
812 	uint32_t lcore;
813 	uint16_t portid;
814 	uint16_t i;
815 	int32_t socket_id;
816 
817 	if (!eh_conf)
818 		return -EINVAL;
819 
820 	if (eh_conf->mode != EH_PKT_TRANSFER_MODE_POLL)
821 		return 0;
822 
823 	if (lcore_params == NULL) {
824 		printf("Error: No port/queue/core mappings\n");
825 		return -1;
826 	}
827 
828 	for (i = 0; i < nb_lcore_params; ++i) {
829 		lcore = lcore_params[i].lcore_id;
830 		if (!rte_lcore_is_enabled(lcore)) {
831 			printf("error: lcore %u is not enabled in "
832 				"lcore mask\n", lcore);
833 			return -1;
834 		}
835 		socket_id = rte_lcore_to_socket_id(lcore);
836 		if (socket_id != 0 && numa_on == 0) {
837 			printf("warning: lcore %u is on socket %d "
838 				"with numa off\n",
839 				lcore, socket_id);
840 		}
841 		portid = lcore_params[i].port_id;
842 		if ((enabled_port_mask & (1 << portid)) == 0) {
843 			printf("port %u is not enabled in port mask\n", portid);
844 			return -1;
845 		}
846 		if (!rte_eth_dev_is_valid_port(portid)) {
847 			printf("port %u is not present on the board\n", portid);
848 			return -1;
849 		}
850 	}
851 	return 0;
852 }
853 
854 static uint16_t
855 get_port_nb_rx_queues(const uint16_t port)
856 {
857 	int32_t queue = -1;
858 	uint16_t i;
859 
860 	for (i = 0; i < nb_lcore_params; ++i) {
861 		if (lcore_params[i].port_id == port &&
862 				lcore_params[i].queue_id > queue)
863 			queue = lcore_params[i].queue_id;
864 	}
865 	return (uint16_t)(++queue);
866 }
867 
868 static int32_t
869 init_lcore_rx_queues(void)
870 {
871 	uint16_t i, nb_rx_queue;
872 	uint32_t lcore;
873 
874 	for (i = 0; i < nb_lcore_params; ++i) {
875 		lcore = lcore_params[i].lcore_id;
876 		nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
877 		if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
878 			printf("error: too many queues (%u) for lcore: %u\n",
879 					nb_rx_queue + 1, lcore);
880 			return -1;
881 		}
882 		lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
883 			lcore_params[i].port_id;
884 		lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
885 			lcore_params[i].queue_id;
886 		lcore_conf[lcore].nb_rx_queue++;
887 	}
888 	return 0;
889 }
890 
891 /* display usage */
892 static void
893 print_usage(const char *prgname)
894 {
895 	fprintf(stderr, "%s [EAL options] --"
896 		" -p PORTMASK"
897 		" [-P]"
898 		" [-u PORTMASK]"
899 		" [-j FRAMESIZE]"
900 		" [-l]"
901 		" [-w REPLAY_WINDOW_SIZE]"
902 		" [-e]"
903 		" [-a]"
904 		" [-c]"
905 		" [-t STATS_INTERVAL]"
906 		" [-s NUMBER_OF_MBUFS_IN_PKT_POOL]"
907 		" -f CONFIG_FILE"
908 		" --config (port,queue,lcore)[,(port,queue,lcore)]"
909 		" [--single-sa SAIDX]"
910 		" [--cryptodev_mask MASK]"
911 		" [--transfer-mode MODE]"
912 		" [--event-schedule-type TYPE]"
913 		" [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]"
914 		" [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
915 		" [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]"
916 		" [--" CMD_LINE_OPT_MTU " MTU]"
917 		" [--event-vector]"
918 		" [--vector-size SIZE]"
919 		" [--vector-tmo TIMEOUT in ns]"
920 		" [--" CMD_LINE_OPT_QP_DESC_NB " NUMBER_OF_DESC]"
921 		"\n\n"
922 		"  -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
923 		"  -P : Enable promiscuous mode\n"
924 		"  -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
925 		"  -j FRAMESIZE: Data buffer size, minimum (and default)\n"
926 		"     value: RTE_MBUF_DEFAULT_BUF_SIZE\n"
927 		"  -l enables code-path that uses librte_ipsec\n"
928 		"  -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n"
929 		"     size for each SA\n"
930 		"  -e enables ESN\n"
931 		"  -a enables SA SQN atomic behaviour\n"
932 		"  -c specifies inbound SAD cache size,\n"
933 		"     zero value disables the cache (default value: 128)\n"
934 		"  -t specifies statistics screen update interval,\n"
935 		"     zero disables statistics screen (default value: 0)\n"
936 		"  -s number of mbufs in packet pool, if not specified number\n"
937 		"     of mbufs will be calculated based on number of cores,\n"
938 		"     ports and crypto queues\n"
939 		"  -f CONFIG_FILE: Configuration file\n"
940 		"  --config (port,queue,lcore): Rx queue configuration. In poll\n"
941 		"                               mode determines which queues from\n"
942 		"                               which ports are mapped to which cores.\n"
943 		"                               In event mode this option is not used\n"
944 		"                               as packets are dynamically scheduled\n"
945 		"                               to cores by HW.\n"
946 		"  --single-sa SAIDX: In poll mode use single SA index for\n"
947 		"                     outbound traffic, bypassing the SP\n"
948 		"                     In event mode selects driver submode,\n"
949 		"                     SA index value is ignored\n"
950 		"  --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
951 		"                         devices to configure\n"
952 		"  --transfer-mode MODE\n"
953 		"               \"poll\"  : Packet transfer via polling (default)\n"
954 		"               \"event\" : Packet transfer via event device\n"
955 		"  --event-schedule-type TYPE queue schedule type, used only when\n"
956 		"                             transfer mode is set to event\n"
957 		"               \"ordered\"  : Ordered (default)\n"
958 		"               \"atomic\"   : Atomic\n"
959 		"               \"parallel\" : Parallel\n"
960 		"  --" CMD_LINE_OPT_RX_OFFLOAD
961 		": bitmask of the RX HW offload capabilities to enable/use\n"
962 		"                         (RTE_ETH_RX_OFFLOAD_*)\n"
963 		"  --" CMD_LINE_OPT_TX_OFFLOAD
964 		": bitmask of the TX HW offload capabilities to enable/use\n"
965 		"                         (RTE_ETH_TX_OFFLOAD_*)\n"
966 		"  --" CMD_LINE_OPT_REASSEMBLE " NUM"
967 		": max number of entries in reassemble(fragment) table\n"
968 		"    (zero (default value) disables reassembly)\n"
969 		"  --" CMD_LINE_OPT_MTU " MTU"
970 		": MTU value on all ports (default value: 1500)\n"
971 		"    outgoing packets with bigger size will be fragmented\n"
972 		"    incoming packets with bigger size will be discarded\n"
973 		"  --" CMD_LINE_OPT_FRAG_TTL " FRAG_TTL_NS"
974 		": fragments lifetime in nanoseconds, default\n"
975 		"    and maximum value is 10.000.000.000 ns (10 s)\n"
976 		"  --event-vector enables event vectorization\n"
977 		"  --vector-size Max vector size (default value: 16)\n"
978 		"  --vector-tmo Max vector timeout in nanoseconds"
979 		"    (default value: 102400)\n"
980 		"  --" CMD_LINE_OPT_PER_PORT_POOL " Enable per port mbuf pool\n"
981 		"  --" CMD_LINE_OPT_VECTOR_POOL_SZ " Vector pool size\n"
982 		"                    (default value is based on mbuf count)\n"
983 		"  --" CMD_LINE_OPT_QP_DESC_NB " DESC_NB"
984 		": Number of descriptors per queue pair (default value: 2048)\n"
985 		"\n",
986 		prgname);
987 }
988 
989 static int
990 parse_mask(const char *str, uint64_t *val)
991 {
992 	char *end;
993 	unsigned long t;
994 
995 	errno = 0;
996 	t = strtoul(str, &end, 0);
997 	if (errno != 0 || end[0] != 0)
998 		return -EINVAL;
999 
1000 	*val = t;
1001 	return 0;
1002 }
1003 
1004 static int32_t
1005 parse_portmask(const char *portmask)
1006 {
1007 	char *end = NULL;
1008 	unsigned long pm;
1009 
1010 	errno = 0;
1011 
1012 	/* parse hexadecimal string */
1013 	pm = strtoul(portmask, &end, 16);
1014 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1015 		return -1;
1016 
1017 	if ((pm == 0) && errno)
1018 		return -1;
1019 
1020 	return pm;
1021 }
1022 
1023 static int64_t
1024 parse_decimal(const char *str)
1025 {
1026 	char *end = NULL;
1027 	uint64_t num;
1028 
1029 	num = strtoull(str, &end, 10);
1030 	if ((str[0] == '\0') || (end == NULL) || (*end != '\0')
1031 		|| num > INT64_MAX)
1032 		return -1;
1033 
1034 	return num;
1035 }
1036 
1037 static int32_t
1038 parse_config(const char *q_arg)
1039 {
1040 	char s[256];
1041 	const char *p, *p0 = q_arg;
1042 	char *end;
1043 	enum fieldnames {
1044 		FLD_PORT = 0,
1045 		FLD_QUEUE,
1046 		FLD_LCORE,
1047 		_NUM_FLD
1048 	};
1049 	unsigned long int_fld[_NUM_FLD];
1050 	char *str_fld[_NUM_FLD];
1051 	int32_t i;
1052 	uint32_t size;
1053 	uint32_t max_fld[_NUM_FLD] = {
1054 		RTE_MAX_ETHPORTS,
1055 		RTE_MAX_QUEUES_PER_PORT,
1056 		RTE_MAX_LCORE
1057 	};
1058 
1059 	nb_lcore_params = 0;
1060 
1061 	while ((p = strchr(p0, '(')) != NULL) {
1062 		++p;
1063 		p0 = strchr(p, ')');
1064 		if (p0 == NULL)
1065 			return -1;
1066 
1067 		size = p0 - p;
1068 		if (size >= sizeof(s))
1069 			return -1;
1070 
1071 		snprintf(s, sizeof(s), "%.*s", size, p);
1072 		if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1073 				_NUM_FLD)
1074 			return -1;
1075 		for (i = 0; i < _NUM_FLD; i++) {
1076 			errno = 0;
1077 			int_fld[i] = strtoul(str_fld[i], &end, 0);
1078 			if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
1079 				return -1;
1080 		}
1081 		if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1082 			printf("exceeded max number of lcore params: %hu\n",
1083 				nb_lcore_params);
1084 			return -1;
1085 		}
1086 		lcore_params_array[nb_lcore_params].port_id =
1087 			(uint16_t)int_fld[FLD_PORT];
1088 		lcore_params_array[nb_lcore_params].queue_id =
1089 			(uint16_t)int_fld[FLD_QUEUE];
1090 		lcore_params_array[nb_lcore_params].lcore_id =
1091 			(uint32_t)int_fld[FLD_LCORE];
1092 		++nb_lcore_params;
1093 	}
1094 	lcore_params = lcore_params_array;
1095 	return 0;
1096 }
1097 
1098 static void
1099 print_app_sa_prm(const struct app_sa_prm *prm)
1100 {
1101 	printf("librte_ipsec usage: %s\n",
1102 		(prm->enable == 0) ? "disabled" : "enabled");
1103 
1104 	printf("replay window size: %u\n", prm->window_size);
1105 	printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled");
1106 	printf("SA flags: %#" PRIx64 "\n", prm->flags);
1107 	printf("Frag TTL: %" PRIu64 " ns\n", frag_ttl_ns);
1108 }
1109 
1110 static int
1111 parse_transfer_mode(struct eh_conf *conf, const char *optarg)
1112 {
1113 	if (!strcmp(CMD_LINE_ARG_POLL, optarg))
1114 		conf->mode = EH_PKT_TRANSFER_MODE_POLL;
1115 	else if (!strcmp(CMD_LINE_ARG_EVENT, optarg))
1116 		conf->mode = EH_PKT_TRANSFER_MODE_EVENT;
1117 	else {
1118 		printf("Unsupported packet transfer mode\n");
1119 		return -EINVAL;
1120 	}
1121 
1122 	return 0;
1123 }
1124 
1125 static int
1126 parse_schedule_type(struct eh_conf *conf, const char *optarg)
1127 {
1128 	struct eventmode_conf *em_conf = NULL;
1129 
1130 	/* Get eventmode conf */
1131 	em_conf = conf->mode_params;
1132 
1133 	if (!strcmp(CMD_LINE_ARG_ORDERED, optarg))
1134 		em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
1135 	else if (!strcmp(CMD_LINE_ARG_ATOMIC, optarg))
1136 		em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ATOMIC;
1137 	else if (!strcmp(CMD_LINE_ARG_PARALLEL, optarg))
1138 		em_conf->ext_params.sched_type = RTE_SCHED_TYPE_PARALLEL;
1139 	else {
1140 		printf("Unsupported queue schedule type\n");
1141 		return -EINVAL;
1142 	}
1143 
1144 	return 0;
1145 }
1146 
1147 static int32_t
1148 parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
1149 {
1150 	int opt;
1151 	int64_t ret;
1152 	char **argvopt;
1153 	int32_t option_index;
1154 	char *prgname = argv[0];
1155 	int32_t f_present = 0;
1156 	struct eventmode_conf *em_conf = NULL;
1157 
1158 	argvopt = argv;
1159 
1160 	while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:t:s:",
1161 				lgopts, &option_index)) != EOF) {
1162 
1163 		switch (opt) {
1164 		case 'p':
1165 			enabled_port_mask = parse_portmask(optarg);
1166 			if (enabled_port_mask == 0) {
1167 				printf("invalid portmask\n");
1168 				print_usage(prgname);
1169 				return -1;
1170 			}
1171 			break;
1172 		case 'P':
1173 			printf("Promiscuous mode selected\n");
1174 			promiscuous_on = 1;
1175 			break;
1176 		case 'u':
1177 			unprotected_port_mask = parse_portmask(optarg);
1178 			if (unprotected_port_mask == 0) {
1179 				printf("invalid unprotected portmask\n");
1180 				print_usage(prgname);
1181 				return -1;
1182 			}
1183 			break;
1184 		case 'f':
1185 			if (f_present == 1) {
1186 				printf("\"-f\" option present more than "
1187 					"once!\n");
1188 				print_usage(prgname);
1189 				return -1;
1190 			}
1191 			cfgfile = optarg;
1192 			f_present = 1;
1193 			break;
1194 
1195 		case 's':
1196 			ret = parse_decimal(optarg);
1197 			if (ret < 0) {
1198 				printf("Invalid number of buffers in a pool: "
1199 					"%s\n", optarg);
1200 				print_usage(prgname);
1201 				return -1;
1202 			}
1203 
1204 			nb_bufs_in_pool = ret;
1205 			break;
1206 
1207 		case 'j':
1208 			ret = parse_decimal(optarg);
1209 			if (ret < RTE_MBUF_DEFAULT_BUF_SIZE ||
1210 					ret > UINT16_MAX) {
1211 				printf("Invalid frame buffer size value: %s\n",
1212 					optarg);
1213 				print_usage(prgname);
1214 				return -1;
1215 			}
1216 			frame_buf_size = ret;
1217 			printf("Custom frame buffer size %u\n", frame_buf_size);
1218 			break;
1219 		case 'l':
1220 			app_sa_prm.enable = 1;
1221 			break;
1222 		case 'w':
1223 			app_sa_prm.window_size = parse_decimal(optarg);
1224 			break;
1225 		case 'e':
1226 			app_sa_prm.enable_esn = 1;
1227 			break;
1228 		case 'a':
1229 			app_sa_prm.enable = 1;
1230 			app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM;
1231 			break;
1232 		case 'c':
1233 			ret = parse_decimal(optarg);
1234 			if (ret < 0) {
1235 				printf("Invalid SA cache size: %s\n", optarg);
1236 				print_usage(prgname);
1237 				return -1;
1238 			}
1239 			app_sa_prm.cache_sz = ret;
1240 			break;
1241 		case 't':
1242 			ret = parse_decimal(optarg);
1243 			if (ret < 0) {
1244 				printf("Invalid interval value: %s\n", optarg);
1245 				print_usage(prgname);
1246 				return -1;
1247 			}
1248 			stats_interval = ret;
1249 			break;
1250 		case CMD_LINE_OPT_CONFIG_NUM:
1251 			ret = parse_config(optarg);
1252 			if (ret) {
1253 				printf("Invalid config\n");
1254 				print_usage(prgname);
1255 				return -1;
1256 			}
1257 			break;
1258 		case CMD_LINE_OPT_SINGLE_SA_NUM:
1259 			ret = parse_decimal(optarg);
1260 			if (ret == -1 || ret > UINT32_MAX) {
1261 				printf("Invalid argument[sa_idx]\n");
1262 				print_usage(prgname);
1263 				return -1;
1264 			}
1265 
1266 			/* else */
1267 			single_sa = 1;
1268 			single_sa_idx = ret;
1269 			eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
1270 			wrkr_flags |= SS_F;
1271 			printf("Configured with single SA index %u\n",
1272 					single_sa_idx);
1273 			break;
1274 		case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
1275 			ret = parse_portmask(optarg);
1276 			if (ret == -1) {
1277 				printf("Invalid argument[portmask]\n");
1278 				print_usage(prgname);
1279 				return -1;
1280 			}
1281 
1282 			/* else */
1283 			enabled_cryptodev_mask = ret;
1284 			break;
1285 
1286 		case CMD_LINE_OPT_TRANSFER_MODE_NUM:
1287 			ret = parse_transfer_mode(eh_conf, optarg);
1288 			if (ret < 0) {
1289 				printf("Invalid packet transfer mode\n");
1290 				print_usage(prgname);
1291 				return -1;
1292 			}
1293 			break;
1294 
1295 		case CMD_LINE_OPT_SCHEDULE_TYPE_NUM:
1296 			ret = parse_schedule_type(eh_conf, optarg);
1297 			if (ret < 0) {
1298 				printf("Invalid queue schedule type\n");
1299 				print_usage(prgname);
1300 				return -1;
1301 			}
1302 			break;
1303 
1304 		case CMD_LINE_OPT_RX_OFFLOAD_NUM:
1305 			ret = parse_mask(optarg, &dev_rx_offload);
1306 			if (ret != 0) {
1307 				printf("Invalid argument for \'%s\': %s\n",
1308 					CMD_LINE_OPT_RX_OFFLOAD, optarg);
1309 				print_usage(prgname);
1310 				return -1;
1311 			}
1312 			break;
1313 		case CMD_LINE_OPT_TX_OFFLOAD_NUM:
1314 			ret = parse_mask(optarg, &dev_tx_offload);
1315 			if (ret != 0) {
1316 				printf("Invalid argument for \'%s\': %s\n",
1317 					CMD_LINE_OPT_TX_OFFLOAD, optarg);
1318 				print_usage(prgname);
1319 				return -1;
1320 			}
1321 			break;
1322 		case CMD_LINE_OPT_REASSEMBLE_NUM:
1323 			ret = parse_decimal(optarg);
1324 			if (ret < 0 || ret > UINT32_MAX) {
1325 				printf("Invalid argument for \'%s\': %s\n",
1326 					CMD_LINE_OPT_REASSEMBLE, optarg);
1327 				print_usage(prgname);
1328 				return -1;
1329 			}
1330 			frag_tbl_sz = ret;
1331 			break;
1332 		case CMD_LINE_OPT_MTU_NUM:
1333 			ret = parse_decimal(optarg);
1334 			if (ret < 0 || ret > RTE_IPV4_MAX_PKT_LEN) {
1335 				printf("Invalid argument for \'%s\': %s\n",
1336 					CMD_LINE_OPT_MTU, optarg);
1337 				print_usage(prgname);
1338 				return -1;
1339 			}
1340 			mtu_size = ret;
1341 			break;
1342 		case CMD_LINE_OPT_FRAG_TTL_NUM:
1343 			ret = parse_decimal(optarg);
1344 			if (ret < 0 || ret > MAX_FRAG_TTL_NS) {
1345 				printf("Invalid argument for \'%s\': %s\n",
1346 					CMD_LINE_OPT_MTU, optarg);
1347 				print_usage(prgname);
1348 				return -1;
1349 			}
1350 			frag_ttl_ns = ret;
1351 			break;
1352 		case CMD_LINE_OPT_EVENT_VECTOR_NUM:
1353 			em_conf = eh_conf->mode_params;
1354 			em_conf->ext_params.event_vector = 1;
1355 			break;
1356 		case CMD_LINE_OPT_VECTOR_SIZE_NUM:
1357 			ret = parse_decimal(optarg);
1358 
1359 			if (ret > MAX_PKT_BURST_VEC) {
1360 				printf("Invalid argument for \'%s\': %s\n",
1361 					CMD_LINE_OPT_VECTOR_SIZE, optarg);
1362 				print_usage(prgname);
1363 				return -1;
1364 			}
1365 			em_conf = eh_conf->mode_params;
1366 			em_conf->ext_params.vector_size = ret;
1367 			break;
1368 		case CMD_LINE_OPT_VECTOR_TIMEOUT_NUM:
1369 			ret = parse_decimal(optarg);
1370 
1371 			em_conf = eh_conf->mode_params;
1372 			em_conf->vector_tmo_ns = ret;
1373 			break;
1374 		case CMD_LINE_OPT_VECTOR_POOL_SZ_NUM:
1375 			ret = parse_decimal(optarg);
1376 
1377 			em_conf = eh_conf->mode_params;
1378 			em_conf->vector_pool_sz = ret;
1379 			break;
1380 		case CMD_LINE_OPT_PER_PORT_POOL_NUM:
1381 			per_port_pool = 1;
1382 			break;
1383 		case CMD_LINE_OPT_QP_DESC_NB_NUM:
1384 			qp_desc_nb = parse_decimal(optarg);
1385 			break;
1386 		default:
1387 			print_usage(prgname);
1388 			return -1;
1389 		}
1390 	}
1391 
1392 	if (f_present == 0) {
1393 		printf("Mandatory option \"-f\" not present\n");
1394 		return -1;
1395 	}
1396 
1397 	/* check do we need to enable multi-seg support */
1398 	if (multi_seg_required()) {
1399 		/* legacy mode doesn't support multi-seg */
1400 		app_sa_prm.enable = 1;
1401 		printf("frame buf size: %u, mtu: %u, "
1402 			"number of reassemble entries: %u\n"
1403 			"multi-segment support is required\n",
1404 			frame_buf_size, mtu_size, frag_tbl_sz);
1405 	}
1406 
1407 	print_app_sa_prm(&app_sa_prm);
1408 
1409 	if (optind >= 0)
1410 		argv[optind-1] = prgname;
1411 
1412 	ret = optind-1;
1413 	optind = 1; /* reset getopt lib */
1414 	return ret;
1415 }
1416 
1417 static void
1418 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
1419 {
1420 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
1421 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
1422 	printf("%s%s", name, buf);
1423 }
1424 
1425 /*
1426  * Update destination ethaddr for the port.
1427  */
1428 int
1429 add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr)
1430 {
1431 	if (port >= RTE_DIM(ethaddr_tbl))
1432 		return -EINVAL;
1433 
1434 	rte_ether_addr_copy(addr, &ethaddr_tbl[port].dst);
1435 	rte_ether_addr_copy(addr, (struct rte_ether_addr *)(val_eth + port));
1436 	return 0;
1437 }
1438 
1439 /* Check the link status of all ports in up to 9s, and print them finally */
1440 static void
1441 check_all_ports_link_status(uint32_t port_mask)
1442 {
1443 #define CHECK_INTERVAL 100 /* 100ms */
1444 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1445 	uint16_t portid;
1446 	uint8_t count, all_ports_up, print_flag = 0;
1447 	struct rte_eth_link link;
1448 	int ret;
1449 	char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
1450 
1451 	printf("\nChecking link status");
1452 	fflush(stdout);
1453 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1454 		all_ports_up = 1;
1455 		RTE_ETH_FOREACH_DEV(portid) {
1456 			if ((port_mask & (1 << portid)) == 0)
1457 				continue;
1458 			memset(&link, 0, sizeof(link));
1459 			ret = rte_eth_link_get_nowait(portid, &link);
1460 			if (ret < 0) {
1461 				all_ports_up = 0;
1462 				if (print_flag == 1)
1463 					printf("Port %u link get failed: %s\n",
1464 						portid, rte_strerror(-ret));
1465 				continue;
1466 			}
1467 			/* print link status if flag set */
1468 			if (print_flag == 1) {
1469 				rte_eth_link_to_str(link_status_text,
1470 					sizeof(link_status_text), &link);
1471 				printf("Port %d %s\n", portid,
1472 				       link_status_text);
1473 				continue;
1474 			}
1475 			/* clear all_ports_up flag if any link down */
1476 			if (link.link_status == RTE_ETH_LINK_DOWN) {
1477 				all_ports_up = 0;
1478 				break;
1479 			}
1480 		}
1481 		/* after finally printing all link status, get out */
1482 		if (print_flag == 1)
1483 			break;
1484 
1485 		if (all_ports_up == 0) {
1486 			printf(".");
1487 			fflush(stdout);
1488 			rte_delay_ms(CHECK_INTERVAL);
1489 		}
1490 
1491 		/* set the print_flag if all ports up or timeout */
1492 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1493 			print_flag = 1;
1494 			printf("done\n");
1495 		}
1496 	}
1497 }
1498 
1499 static int32_t
1500 add_mapping(const char *str, uint16_t cdev_id,
1501 		uint16_t qp, struct lcore_params *params,
1502 		struct ipsec_ctx *ipsec_ctx,
1503 		const struct rte_cryptodev_capabilities *cipher,
1504 		const struct rte_cryptodev_capabilities *auth,
1505 		const struct rte_cryptodev_capabilities *aead)
1506 {
1507 	int32_t ret = 0;
1508 	unsigned long i;
1509 	struct cdev_key key = { 0 };
1510 
1511 	key.lcore_id = params->lcore_id;
1512 	if (cipher)
1513 		key.cipher_algo = cipher->sym.cipher.algo;
1514 	if (auth)
1515 		key.auth_algo = auth->sym.auth.algo;
1516 	if (aead)
1517 		key.aead_algo = aead->sym.aead.algo;
1518 
1519 	ret = rte_hash_lookup(ipsec_ctx->cdev_map, &key);
1520 	if (ret != -ENOENT)
1521 		return 0;
1522 
1523 	for (i = 0; i < ipsec_ctx->nb_qps; i++)
1524 		if (ipsec_ctx->tbl[i].id == cdev_id)
1525 			break;
1526 
1527 	if (i == ipsec_ctx->nb_qps) {
1528 		if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1529 			printf("Maximum number of crypto devices assigned to "
1530 				"a core, increase MAX_QP_PER_LCORE value\n");
1531 			return 0;
1532 		}
1533 		ipsec_ctx->tbl[i].id = cdev_id;
1534 		ipsec_ctx->tbl[i].qp = qp;
1535 		ipsec_ctx->nb_qps++;
1536 		printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1537 				"(cdev_id_qp %lu)\n", str, key.lcore_id,
1538 				cdev_id, qp, i);
1539 	}
1540 
1541 	ret = rte_hash_add_key_data(ipsec_ctx->cdev_map, &key, (void *)i);
1542 	if (ret < 0) {
1543 		printf("Failed to insert cdev mapping for (lcore %u, "
1544 				"cdev %u, qp %u), errno %d\n",
1545 				key.lcore_id, ipsec_ctx->tbl[i].id,
1546 				ipsec_ctx->tbl[i].qp, ret);
1547 		return 0;
1548 	}
1549 
1550 	return 1;
1551 }
1552 
1553 static int32_t
1554 add_cdev_mapping(const struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1555 		uint16_t qp, struct lcore_params *params)
1556 {
1557 	int32_t ret = 0;
1558 	const struct rte_cryptodev_capabilities *i, *j;
1559 	struct lcore_conf *qconf;
1560 	struct ipsec_ctx *ipsec_ctx;
1561 	const char *str;
1562 	void *sec_ctx;
1563 	const struct rte_security_capability *sec_cap;
1564 
1565 	qconf = &lcore_conf[params->lcore_id];
1566 
1567 	if (!is_unprotected_port(params->port_id)) {
1568 		ipsec_ctx = &qconf->outbound;
1569 		ipsec_ctx->cdev_map = cdev_map_out;
1570 		str = "Outbound";
1571 	} else {
1572 		ipsec_ctx = &qconf->inbound;
1573 		ipsec_ctx->cdev_map = cdev_map_in;
1574 		str = "Inbound";
1575 	}
1576 
1577 	/* Required cryptodevs with operation chaining */
1578 	if (!(dev_info->feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING) &&
1579 			!(dev_info->feature_flags & RTE_CRYPTODEV_FF_SECURITY))
1580 		return ret;
1581 
1582 	for (i = dev_info->capabilities;
1583 			i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1584 		if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1585 			continue;
1586 
1587 		if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1588 			ret |= add_mapping(str, cdev_id, qp, params,
1589 					ipsec_ctx, NULL, NULL, i);
1590 			continue;
1591 		}
1592 
1593 		if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1594 			continue;
1595 
1596 		for (j = dev_info->capabilities;
1597 				j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1598 			if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1599 				continue;
1600 
1601 			if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1602 				continue;
1603 
1604 			ret |= add_mapping(str, cdev_id, qp, params,
1605 						ipsec_ctx, i, j, NULL);
1606 		}
1607 	}
1608 
1609 	sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
1610 	if (sec_ctx == NULL)
1611 		return ret;
1612 
1613 	sec_cap = rte_security_capabilities_get(sec_ctx);
1614 	if (sec_cap == NULL)
1615 		return ret;
1616 
1617 	for (i = sec_cap->crypto_capabilities;
1618 			i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1619 		if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1620 			continue;
1621 
1622 		if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1623 			ret |= add_mapping(str, cdev_id, qp, params,
1624 					ipsec_ctx, NULL, NULL, i);
1625 			continue;
1626 		}
1627 
1628 		if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1629 			continue;
1630 
1631 		for (j = sec_cap->crypto_capabilities;
1632 				j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1633 			if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1634 				continue;
1635 
1636 			if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1637 				continue;
1638 
1639 			ret |= add_mapping(str, cdev_id, qp, params,
1640 						ipsec_ctx, i, j, NULL);
1641 		}
1642 	}
1643 
1644 	return ret;
1645 }
1646 
1647 static uint16_t
1648 map_cdev_to_cores_from_config(enum eh_pkt_transfer_mode mode, int16_t cdev_id,
1649 		const struct rte_cryptodev_info *cdev_info,
1650 		uint16_t *last_used_lcore_id)
1651 {
1652 	uint16_t nb_qp = 0, i = 0, max_nb_qps;
1653 
1654 	/* For event lookaside mode all sessions are bound to single qp.
1655 	 * It's enough to bind one core, since all cores will share same qp
1656 	 * Event inline mode do not use this functionality.
1657 	 */
1658 	if (mode == EH_PKT_TRANSFER_MODE_EVENT) {
1659 		add_cdev_mapping(cdev_info, cdev_id, nb_qp, &lcore_params[0]);
1660 		return 1;
1661 	}
1662 
1663 	/* Check if there are enough queue pairs for all configured cores */
1664 	max_nb_qps = RTE_MIN(nb_lcore_params, cdev_info->max_nb_queue_pairs);
1665 
1666 	while (nb_qp < max_nb_qps && i < nb_lcore_params) {
1667 		if (add_cdev_mapping(cdev_info, cdev_id, nb_qp,
1668 					&lcore_params[*last_used_lcore_id]))
1669 			nb_qp++;
1670 		(*last_used_lcore_id)++;
1671 		*last_used_lcore_id %= nb_lcore_params;
1672 		i++;
1673 	}
1674 
1675 	return nb_qp;
1676 }
1677 
1678 /* Check if the device is enabled by cryptodev_mask */
1679 static int
1680 check_cryptodev_mask(uint8_t cdev_id)
1681 {
1682 	if (enabled_cryptodev_mask & (1 << cdev_id))
1683 		return 0;
1684 
1685 	return -1;
1686 }
1687 
1688 static uint16_t
1689 cryptodevs_init(enum eh_pkt_transfer_mode mode)
1690 {
1691 	struct rte_hash_parameters params = { 0 };
1692 	struct rte_cryptodev_config dev_conf;
1693 	struct rte_cryptodev_qp_conf qp_conf;
1694 	uint16_t idx, qp, total_nb_qps;
1695 	int16_t cdev_id;
1696 
1697 	const uint64_t mseg_flag = multi_seg_required() ?
1698 				RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0;
1699 
1700 	params.entries = CDEV_MAP_ENTRIES;
1701 	params.key_len = sizeof(struct cdev_key);
1702 	params.hash_func = rte_jhash;
1703 	params.hash_func_init_val = 0;
1704 	params.socket_id = rte_socket_id();
1705 
1706 	params.name = "cdev_map_in";
1707 	cdev_map_in = rte_hash_create(&params);
1708 	if (cdev_map_in == NULL)
1709 		rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1710 				rte_errno);
1711 
1712 	params.name = "cdev_map_out";
1713 	cdev_map_out = rte_hash_create(&params);
1714 	if (cdev_map_out == NULL)
1715 		rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1716 				rte_errno);
1717 
1718 	printf("lcore/cryptodev/qp mappings:\n");
1719 
1720 	idx = 0;
1721 	total_nb_qps = 0;
1722 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1723 		struct rte_cryptodev_info cdev_info;
1724 
1725 		if (check_cryptodev_mask((uint8_t)cdev_id))
1726 			continue;
1727 
1728 		rte_cryptodev_info_get(cdev_id, &cdev_info);
1729 
1730 		if ((mseg_flag & cdev_info.feature_flags) != mseg_flag)
1731 			rte_exit(EXIT_FAILURE,
1732 				"Device %hd does not support \'%s\' feature\n",
1733 				cdev_id,
1734 				rte_cryptodev_get_feature_name(mseg_flag));
1735 
1736 
1737 		qp = map_cdev_to_cores_from_config(mode, cdev_id, &cdev_info, &idx);
1738 		if (qp == 0)
1739 			continue;
1740 
1741 		total_nb_qps += qp;
1742 		dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1743 		/* Use the first socket if SOCKET_ID_ANY is returned. */
1744 		if (dev_conf.socket_id == SOCKET_ID_ANY)
1745 			dev_conf.socket_id = 0;
1746 		dev_conf.nb_queue_pairs = qp;
1747 		dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
1748 
1749 		uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
1750 		if (dev_max_sess != 0 &&
1751 				dev_max_sess < get_nb_crypto_sessions())
1752 			rte_exit(EXIT_FAILURE,
1753 				"Device does not support at least %u "
1754 				"sessions", get_nb_crypto_sessions());
1755 
1756 		if (rte_cryptodev_configure(cdev_id, &dev_conf))
1757 			rte_panic("Failed to initialize cryptodev %u\n",
1758 					cdev_id);
1759 
1760 		qp_conf.nb_descriptors = qp_desc_nb;
1761 		qp_conf.mp_session =
1762 			socket_ctx[dev_conf.socket_id].session_pool;
1763 		for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1764 			if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1765 					&qp_conf, dev_conf.socket_id))
1766 				rte_panic("Failed to setup queue %u for "
1767 						"cdev_id %u\n",	0, cdev_id);
1768 
1769 		if (rte_cryptodev_start(cdev_id))
1770 			rte_panic("Failed to start cryptodev %u\n",
1771 					cdev_id);
1772 	}
1773 
1774 	printf("\n");
1775 
1776 	return total_nb_qps;
1777 }
1778 
1779 static int
1780 check_ptype(int portid)
1781 {
1782 	int l3_ipv4 = 0, l3_ipv6 = 0, l4_udp = 0, tunnel_esp = 0;
1783 	int i, nb_ptypes;
1784 	uint32_t mask;
1785 
1786 	mask = (RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
1787 		      RTE_PTYPE_TUNNEL_MASK);
1788 
1789 	nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, NULL, 0);
1790 	if (nb_ptypes <= 0)
1791 		return 0;
1792 
1793 	uint32_t ptypes[nb_ptypes];
1794 
1795 	nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, ptypes, nb_ptypes);
1796 	for (i = 0; i < nb_ptypes; ++i) {
1797 		if (RTE_ETH_IS_IPV4_HDR(ptypes[i]))
1798 			l3_ipv4 = 1;
1799 		if (RTE_ETH_IS_IPV6_HDR(ptypes[i]))
1800 			l3_ipv6 = 1;
1801 		if ((ptypes[i] & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
1802 			tunnel_esp = 1;
1803 		if ((ptypes[i] & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
1804 			l4_udp = 1;
1805 	}
1806 
1807 	if (l3_ipv4 == 0)
1808 		printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
1809 
1810 	if (l3_ipv6 == 0)
1811 		printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
1812 
1813 	if (l4_udp == 0)
1814 		printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid);
1815 
1816 	if (tunnel_esp == 0)
1817 		printf("port %d cannot parse RTE_PTYPE_TUNNEL_ESP\n", portid);
1818 
1819 	if (l3_ipv4 && l3_ipv6 && l4_udp && tunnel_esp)
1820 		return 1;
1821 
1822 	return 0;
1823 
1824 }
1825 
1826 static inline void
1827 parse_ptype(struct rte_mbuf *m)
1828 {
1829 	uint32_t packet_type = RTE_PTYPE_UNKNOWN;
1830 	const struct rte_ipv4_hdr *iph4;
1831 	const struct rte_ipv6_hdr *iph6;
1832 	const struct rte_ether_hdr *eth;
1833 	const struct rte_udp_hdr *udp;
1834 	uint16_t nat_port, ether_type;
1835 	int next_proto = 0;
1836 	size_t ext_len = 0;
1837 	const uint8_t *p;
1838 	uint32_t l3len;
1839 
1840 	eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1841 	ether_type = eth->ether_type;
1842 
1843 	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
1844 		iph4 = (const struct rte_ipv4_hdr *)(eth + 1);
1845 		l3len = ((iph4->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
1846 			       RTE_IPV4_IHL_MULTIPLIER);
1847 
1848 		if (l3len == sizeof(struct rte_ipv4_hdr))
1849 			packet_type |= RTE_PTYPE_L3_IPV4;
1850 		else
1851 			packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
1852 
1853 		next_proto = iph4->next_proto_id;
1854 		p = (const uint8_t *)iph4;
1855 	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
1856 		iph6 = (const struct rte_ipv6_hdr *)(eth + 1);
1857 		l3len = sizeof(struct ip6_hdr);
1858 
1859 		/* determine l3 header size up to ESP extension */
1860 		next_proto = iph6->proto;
1861 		p = (const uint8_t *)iph6;
1862 		while (next_proto != IPPROTO_ESP && l3len < m->data_len &&
1863 			(next_proto = rte_ipv6_get_next_ext(p + l3len,
1864 						next_proto, &ext_len)) >= 0)
1865 			l3len += ext_len;
1866 
1867 		/* Skip IPv6 header exceeds first segment length */
1868 		if (unlikely(l3len + RTE_ETHER_HDR_LEN > m->data_len))
1869 			goto exit;
1870 
1871 		if (l3len == sizeof(struct ip6_hdr))
1872 			packet_type |= RTE_PTYPE_L3_IPV6;
1873 		else
1874 			packet_type |= RTE_PTYPE_L3_IPV6_EXT;
1875 	}
1876 
1877 	switch (next_proto) {
1878 	case IPPROTO_ESP:
1879 		packet_type |= RTE_PTYPE_TUNNEL_ESP;
1880 		break;
1881 	case IPPROTO_UDP:
1882 		if (app_sa_prm.udp_encap == 1) {
1883 			udp = (const struct rte_udp_hdr *)(p + l3len);
1884 			nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
1885 			if (udp->src_port == nat_port ||
1886 			    udp->dst_port == nat_port)
1887 				packet_type |=
1888 					MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
1889 		}
1890 		break;
1891 	default:
1892 		break;
1893 	}
1894 exit:
1895 	m->packet_type |= packet_type;
1896 }
1897 
1898 static uint16_t
1899 parse_ptype_cb(uint16_t port __rte_unused, uint16_t queue __rte_unused,
1900 	       struct rte_mbuf *pkts[], uint16_t nb_pkts,
1901 	       uint16_t max_pkts __rte_unused,
1902 	       void *user_param __rte_unused)
1903 {
1904 	uint32_t i;
1905 
1906 	if (unlikely(nb_pkts == 0))
1907 		return nb_pkts;
1908 
1909 	rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *));
1910 	for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) {
1911 		rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1],
1912 			struct ether_hdr *));
1913 		parse_ptype(pkts[i]);
1914 	}
1915 	parse_ptype(pkts[i]);
1916 
1917 	return nb_pkts;
1918 }
1919 
1920 static void
1921 port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads,
1922 	  uint8_t hw_reassembly)
1923 {
1924 	struct rte_eth_dev_info dev_info;
1925 	struct rte_eth_txconf *txconf;
1926 	uint16_t nb_tx_queue, nb_rx_queue;
1927 	uint16_t tx_queueid, rx_queueid, queue;
1928 	uint32_t lcore_id;
1929 	int32_t ret, socket_id;
1930 	struct lcore_conf *qconf;
1931 	struct rte_ether_addr ethaddr;
1932 	struct rte_eth_conf local_port_conf = port_conf;
1933 	struct rte_eth_ip_reassembly_params reass_capa = {0};
1934 	int ptype_supported;
1935 
1936 	ret = rte_eth_dev_info_get(portid, &dev_info);
1937 	if (ret != 0)
1938 		rte_exit(EXIT_FAILURE,
1939 			"Error during getting device (port %u) info: %s\n",
1940 			portid, strerror(-ret));
1941 
1942 	/* limit allowed HW offloads, as user requested */
1943 	dev_info.rx_offload_capa &= dev_rx_offload;
1944 	dev_info.tx_offload_capa &= dev_tx_offload;
1945 
1946 	printf("Configuring device port %u:\n", portid);
1947 
1948 	ret = rte_eth_macaddr_get(portid, &ethaddr);
1949 	if (ret != 0)
1950 		rte_exit(EXIT_FAILURE,
1951 			"Error getting MAC address (port %u): %s\n",
1952 			portid, rte_strerror(-ret));
1953 
1954 	rte_ether_addr_copy(&ethaddr, &ethaddr_tbl[portid].src);
1955 
1956 	rte_ether_addr_copy(&ethaddr_tbl[portid].dst,
1957 			    (struct rte_ether_addr *)(val_eth + portid));
1958 
1959 	rte_ether_addr_copy(&ethaddr_tbl[portid].src,
1960 			    (struct rte_ether_addr *)(val_eth + portid) + 1);
1961 
1962 	print_ethaddr("Address: ", &ethaddr);
1963 	printf("\n");
1964 
1965 	nb_rx_queue = get_port_nb_rx_queues(portid);
1966 	nb_tx_queue = nb_lcores;
1967 
1968 	if (nb_rx_queue > dev_info.max_rx_queues)
1969 		rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1970 				"(max rx queue is %u)\n",
1971 				nb_rx_queue, dev_info.max_rx_queues);
1972 
1973 	if (nb_tx_queue > dev_info.max_tx_queues)
1974 		rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1975 				"(max tx queue is %u)\n",
1976 				nb_tx_queue, dev_info.max_tx_queues);
1977 
1978 	printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1979 			nb_rx_queue, nb_tx_queue);
1980 
1981 	local_port_conf.rxmode.mtu = mtu_size;
1982 
1983 	if (multi_seg_required()) {
1984 		local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
1985 		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1986 	}
1987 
1988 	local_port_conf.rxmode.offloads |= req_rx_offloads;
1989 	local_port_conf.txmode.offloads |= req_tx_offloads;
1990 
1991 	/* Check that all required capabilities are supported */
1992 	if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1993 			local_port_conf.rxmode.offloads)
1994 		rte_exit(EXIT_FAILURE,
1995 			"Error: port %u required RX offloads: 0x%" PRIx64
1996 			", available RX offloads: 0x%" PRIx64 "\n",
1997 			portid, local_port_conf.rxmode.offloads,
1998 			dev_info.rx_offload_capa);
1999 
2000 	if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) !=
2001 			local_port_conf.txmode.offloads)
2002 		rte_exit(EXIT_FAILURE,
2003 			"Error: port %u required TX offloads: 0x%" PRIx64
2004 			", available TX offloads: 0x%" PRIx64 "\n",
2005 			portid, local_port_conf.txmode.offloads,
2006 			dev_info.tx_offload_capa);
2007 
2008 	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
2009 		local_port_conf.txmode.offloads |=
2010 			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
2011 
2012 	printf("port %u configuring rx_offloads=0x%" PRIx64
2013 		", tx_offloads=0x%" PRIx64 "\n",
2014 		portid, local_port_conf.rxmode.offloads,
2015 		local_port_conf.txmode.offloads);
2016 
2017 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
2018 		dev_info.flow_type_rss_offloads;
2019 	if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
2020 			port_conf.rx_adv_conf.rss_conf.rss_hf) {
2021 		printf("Port %u modified RSS hash function based on hardware support,"
2022 			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
2023 			portid,
2024 			port_conf.rx_adv_conf.rss_conf.rss_hf,
2025 			local_port_conf.rx_adv_conf.rss_conf.rss_hf);
2026 	}
2027 
2028 	ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
2029 			&local_port_conf);
2030 	if (ret < 0)
2031 		rte_exit(EXIT_FAILURE, "Cannot configure device: "
2032 				"err=%d, port=%d\n", ret, portid);
2033 
2034 	ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
2035 	if (ret < 0)
2036 		rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
2037 				"err=%d, port=%d\n", ret, portid);
2038 
2039 	/* Check if required ptypes are supported */
2040 	ptype_supported = check_ptype(portid);
2041 	if (!ptype_supported)
2042 		printf("Port %d: softly parse packet type info\n", portid);
2043 
2044 	/* init one TX queue per lcore */
2045 	tx_queueid = 0;
2046 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2047 		if (rte_lcore_is_enabled(lcore_id) == 0)
2048 			continue;
2049 
2050 		if (numa_on)
2051 			socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2052 		else
2053 			socket_id = 0;
2054 
2055 		/* init TX queue */
2056 		printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
2057 
2058 		txconf = &dev_info.default_txconf;
2059 		txconf->offloads = local_port_conf.txmode.offloads;
2060 
2061 		ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
2062 				socket_id, txconf);
2063 		if (ret < 0)
2064 			rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
2065 					"err=%d, port=%d\n", ret, portid);
2066 
2067 		qconf = &lcore_conf[lcore_id];
2068 		qconf->tx_queue_id[portid] = tx_queueid;
2069 
2070 		tx_queueid++;
2071 
2072 		/* init RX queues */
2073 		for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
2074 			struct rte_eth_rxconf rxq_conf;
2075 			struct rte_mempool *pool;
2076 
2077 			if (portid != qconf->rx_queue_list[queue].port_id)
2078 				continue;
2079 
2080 			rx_queueid = qconf->rx_queue_list[queue].queue_id;
2081 
2082 			printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
2083 					socket_id);
2084 
2085 			rxq_conf = dev_info.default_rxconf;
2086 			rxq_conf.offloads = local_port_conf.rxmode.offloads;
2087 
2088 			if (per_port_pool)
2089 				pool = socket_ctx[socket_id].mbuf_pool[portid];
2090 			else
2091 				pool = socket_ctx[socket_id].mbuf_pool[0];
2092 
2093 			ret = rte_eth_rx_queue_setup(portid, rx_queueid,
2094 					nb_rxd,	socket_id, &rxq_conf, pool);
2095 			if (ret < 0)
2096 				rte_exit(EXIT_FAILURE,
2097 					"rte_eth_rx_queue_setup: err=%d, "
2098 					"port=%d\n", ret, portid);
2099 
2100 			/* Register Rx callback if ptypes are not supported */
2101 			if (!ptype_supported &&
2102 			    !rte_eth_add_rx_callback(portid, rx_queueid,
2103 						     parse_ptype_cb, NULL)) {
2104 				printf("Failed to add rx callback: port=%d, "
2105 				       "rx_queueid=%d\n", portid, rx_queueid);
2106 			}
2107 
2108 
2109 		}
2110 	}
2111 
2112 	if (hw_reassembly) {
2113 		rte_eth_ip_reassembly_capability_get(portid, &reass_capa);
2114 		reass_capa.timeout_ms = frag_ttl_ns;
2115 		rte_eth_ip_reassembly_conf_set(portid, &reass_capa);
2116 	}
2117 	printf("\n");
2118 }
2119 
2120 static size_t
2121 max_session_size(void)
2122 {
2123 	size_t max_sz, sz;
2124 	void *sec_ctx;
2125 	int16_t cdev_id, port_id, n;
2126 
2127 	max_sz = 0;
2128 	n =  rte_cryptodev_count();
2129 	for (cdev_id = 0; cdev_id != n; cdev_id++) {
2130 		sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
2131 		if (sz > max_sz)
2132 			max_sz = sz;
2133 		/*
2134 		 * If crypto device is security capable, need to check the
2135 		 * size of security session as well.
2136 		 */
2137 
2138 		/* Get security context of the crypto device */
2139 		sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
2140 		if (sec_ctx == NULL)
2141 			continue;
2142 
2143 		/* Get size of security session */
2144 		sz = rte_security_session_get_size(sec_ctx);
2145 		if (sz > max_sz)
2146 			max_sz = sz;
2147 	}
2148 
2149 	RTE_ETH_FOREACH_DEV(port_id) {
2150 		if ((enabled_port_mask & (1 << port_id)) == 0)
2151 			continue;
2152 
2153 		sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
2154 		if (sec_ctx == NULL)
2155 			continue;
2156 
2157 		sz = rte_security_session_get_size(sec_ctx);
2158 		if (sz > max_sz)
2159 			max_sz = sz;
2160 	}
2161 
2162 	return max_sz;
2163 }
2164 
2165 static void
2166 session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
2167 {
2168 	char mp_name[RTE_MEMPOOL_NAMESIZE];
2169 	struct rte_mempool *sess_mp;
2170 	uint32_t nb_sess;
2171 
2172 	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2173 			"sess_mp_%u", socket_id);
2174 	nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
2175 		rte_lcore_count());
2176 	nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
2177 			CDEV_MP_CACHE_MULTIPLIER);
2178 	sess_mp = rte_cryptodev_sym_session_pool_create(
2179 			mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ,
2180 			0, socket_id);
2181 	ctx->session_pool = sess_mp;
2182 
2183 	if (ctx->session_pool == NULL)
2184 		rte_exit(EXIT_FAILURE,
2185 			"Cannot init session pool on socket %d\n", socket_id);
2186 	else
2187 		printf("Allocated session pool on socket %d\n",	socket_id);
2188 }
2189 
2190 static void
2191 pool_init(struct socket_ctx *ctx, int32_t socket_id, int portid,
2192 	  uint32_t nb_mbuf)
2193 {
2194 	char s[64];
2195 	int32_t ms;
2196 
2197 
2198 	/* mbuf_pool is initialised by the pool_init() function*/
2199 	if (socket_ctx[socket_id].mbuf_pool[portid])
2200 		return;
2201 
2202 	snprintf(s, sizeof(s), "mbuf_pool_%d_%d", socket_id, portid);
2203 	ctx->mbuf_pool[portid] = rte_pktmbuf_pool_create(s, nb_mbuf,
2204 							 MEMPOOL_CACHE_SIZE,
2205 							 ipsec_metadata_size(),
2206 							 frame_buf_size,
2207 							 socket_id);
2208 
2209 	/*
2210 	 * if multi-segment support is enabled, then create a pool
2211 	 * for indirect mbufs. This is not per-port but global.
2212 	 */
2213 	ms = multi_seg_required();
2214 	if (ms != 0 && !ctx->mbuf_pool_indir) {
2215 		snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
2216 		ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
2217 			MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
2218 	}
2219 
2220 	if (ctx->mbuf_pool[portid] == NULL ||
2221 	    (ms != 0 && ctx->mbuf_pool_indir == NULL))
2222 		rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
2223 				socket_id);
2224 	else
2225 		printf("Allocated mbuf pool on socket %d\n", socket_id);
2226 }
2227 
2228 static int
2229 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2230 		 void *param, void *ret_param)
2231 {
2232 	uint64_t md;
2233 	struct rte_eth_event_ipsec_desc *event_desc = NULL;
2234 
2235 	RTE_SET_USED(param);
2236 	RTE_SET_USED(port_id);
2237 
2238 	if (type != RTE_ETH_EVENT_IPSEC)
2239 		return -1;
2240 
2241 	event_desc = ret_param;
2242 	if (event_desc == NULL) {
2243 		printf("Event descriptor not set\n");
2244 		return -1;
2245 	}
2246 
2247 	md = event_desc->metadata;
2248 
2249 	if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW) {
2250 		if (md == 0)
2251 			return -1;
2252 	}
2253 	else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
2254 		printf("Invalid IPsec event reported\n");
2255 		return -1;
2256 	}
2257 
2258 	return -1;
2259 }
2260 
2261 static int
2262 ethdev_reset_event_callback(uint16_t port_id,
2263 		enum rte_eth_event_type type,
2264 		 void *param __rte_unused, void *ret_param __rte_unused)
2265 {
2266 	printf("Reset Event on port id %d type %d\n", port_id, type);
2267 	printf("Force quit application");
2268 	force_quit = true;
2269 	return 0;
2270 }
2271 
2272 static uint16_t
2273 rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
2274 	struct rte_mbuf *pkt[], uint16_t nb_pkts,
2275 	__rte_unused uint16_t max_pkts, void *user_param)
2276 {
2277 	uint64_t tm;
2278 	uint32_t i, k;
2279 	struct lcore_conf *lc;
2280 	struct rte_mbuf *mb;
2281 	struct rte_ether_hdr *eth;
2282 
2283 	lc = user_param;
2284 	k = 0;
2285 	tm = 0;
2286 
2287 	for (i = 0; i != nb_pkts; i++) {
2288 
2289 		mb = pkt[i];
2290 		eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
2291 		if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
2292 
2293 			struct rte_ipv4_hdr *iph;
2294 
2295 			iph = (struct rte_ipv4_hdr *)(eth + 1);
2296 			if (rte_ipv4_frag_pkt_is_fragmented(iph)) {
2297 
2298 				mb->l2_len = sizeof(*eth);
2299 				mb->l3_len = sizeof(*iph);
2300 				tm = (tm != 0) ? tm : rte_rdtsc();
2301 				mb = rte_ipv4_frag_reassemble_packet(
2302 					lc->frag.tbl, &lc->frag.dr,
2303 					mb, tm, iph);
2304 
2305 				if (mb != NULL) {
2306 					/* fix ip cksum after reassemble. */
2307 					iph = rte_pktmbuf_mtod_offset(mb,
2308 						struct rte_ipv4_hdr *,
2309 						mb->l2_len);
2310 					iph->hdr_checksum = 0;
2311 					iph->hdr_checksum = rte_ipv4_cksum(iph);
2312 				}
2313 			}
2314 		} else if (eth->ether_type ==
2315 				rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
2316 
2317 			struct rte_ipv6_hdr *iph;
2318 			struct rte_ipv6_fragment_ext *fh;
2319 
2320 			iph = (struct rte_ipv6_hdr *)(eth + 1);
2321 			fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
2322 			if (fh != NULL) {
2323 				mb->l2_len = sizeof(*eth);
2324 				mb->l3_len = (uintptr_t)fh - (uintptr_t)iph +
2325 					sizeof(*fh);
2326 				tm = (tm != 0) ? tm : rte_rdtsc();
2327 				mb = rte_ipv6_frag_reassemble_packet(
2328 					lc->frag.tbl, &lc->frag.dr,
2329 					mb, tm, iph, fh);
2330 				if (mb != NULL)
2331 					/* fix l3_len after reassemble. */
2332 					mb->l3_len = mb->l3_len - sizeof(*fh);
2333 			}
2334 		}
2335 
2336 		pkt[k] = mb;
2337 		k += (mb != NULL);
2338 	}
2339 
2340 	/* some fragments were encountered, drain death row */
2341 	if (tm != 0)
2342 		rte_ip_frag_free_death_row(&lc->frag.dr, 0);
2343 
2344 	return k;
2345 }
2346 
2347 
2348 static int
2349 reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid)
2350 {
2351 	int32_t sid;
2352 	uint32_t i;
2353 	uint64_t frag_cycles;
2354 	const struct lcore_rx_queue *rxq;
2355 	const struct rte_eth_rxtx_callback *cb;
2356 
2357 	/* create fragment table */
2358 	sid = rte_lcore_to_socket_id(cid);
2359 	frag_cycles = (rte_get_tsc_hz() + NS_PER_S - 1) /
2360 		NS_PER_S * frag_ttl_ns;
2361 
2362 	lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz,
2363 		FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid);
2364 	if (lc->frag.tbl == NULL) {
2365 		printf("%s(%u): failed to create fragment table of size: %u, "
2366 			"error code: %d\n",
2367 			__func__, cid, frag_tbl_sz, rte_errno);
2368 		return -ENOMEM;
2369 	}
2370 
2371 	/* setup reassemble RX callbacks for all queues */
2372 	for (i = 0; i != lc->nb_rx_queue; i++) {
2373 
2374 		rxq = lc->rx_queue_list + i;
2375 		cb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id,
2376 			rx_callback, lc);
2377 		if (cb == NULL) {
2378 			printf("%s(%u): failed to install RX callback for "
2379 				"portid=%u, queueid=%u, error code: %d\n",
2380 				__func__, cid,
2381 				rxq->port_id, rxq->queue_id, rte_errno);
2382 			return -ENOMEM;
2383 		}
2384 	}
2385 
2386 	return 0;
2387 }
2388 
2389 static int
2390 reassemble_init(void)
2391 {
2392 	int32_t rc;
2393 	uint32_t i, lc;
2394 
2395 	rc = 0;
2396 	for (i = 0; i != nb_lcore_params; i++) {
2397 		lc = lcore_params[i].lcore_id;
2398 		rc = reassemble_lcore_init(lcore_conf + lc, lc);
2399 		if (rc != 0)
2400 			break;
2401 	}
2402 
2403 	return rc;
2404 }
2405 
2406 static void
2407 create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
2408 {
2409 	struct rte_flow_action action[2];
2410 	struct rte_flow_item pattern[2];
2411 	struct rte_flow_attr attr = {0};
2412 	struct rte_flow_error err;
2413 	struct rte_flow *flow;
2414 	int ret;
2415 
2416 	if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
2417 		return;
2418 
2419 	/* Add the default rte_flow to enable SECURITY for all ESP packets */
2420 
2421 	pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
2422 	pattern[0].spec = NULL;
2423 	pattern[0].mask = NULL;
2424 	pattern[0].last = NULL;
2425 	pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
2426 
2427 	action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
2428 	action[0].conf = NULL;
2429 	action[1].type = RTE_FLOW_ACTION_TYPE_END;
2430 	action[1].conf = NULL;
2431 
2432 	attr.ingress = 1;
2433 
2434 	ret = rte_flow_validate(port_id, &attr, pattern, action, &err);
2435 	if (ret)
2436 		return;
2437 
2438 	flow = rte_flow_create(port_id, &attr, pattern, action, &err);
2439 	if (flow == NULL)
2440 		return;
2441 
2442 	flow_info_tbl[port_id].rx_def_flow = flow;
2443 	RTE_LOG(INFO, IPSEC,
2444 		"Created default flow enabling SECURITY for all ESP traffic on port %d\n",
2445 		port_id);
2446 }
2447 
2448 static void
2449 signal_handler(int signum)
2450 {
2451 	if (signum == SIGINT || signum == SIGTERM) {
2452 		printf("\n\nSignal %d received, preparing to exit...\n",
2453 				signum);
2454 		force_quit = true;
2455 	}
2456 }
2457 
2458 static void
2459 ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa,
2460 		struct eventmode_conf *em_conf)
2461 {
2462 	struct rte_ipsec_session *ips;
2463 	int32_t i;
2464 
2465 	if (!sa || !nb_sa)
2466 		return;
2467 
2468 	for (i = 0; i < nb_sa; i++) {
2469 		ips = ipsec_get_primary_session(&sa[i]);
2470 		if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
2471 			em_conf->enable_event_crypto_adapter = true;
2472 		else if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
2473 			rte_exit(EXIT_FAILURE, "Event mode supports inline "
2474 				 "and lookaside protocol sessions\n");
2475 	}
2476 
2477 }
2478 
2479 static int32_t
2480 check_event_mode_params(struct eh_conf *eh_conf)
2481 {
2482 	struct eventmode_conf *em_conf = NULL;
2483 	struct lcore_params *params;
2484 	uint16_t portid;
2485 
2486 	if (!eh_conf || !eh_conf->mode_params)
2487 		return -EINVAL;
2488 
2489 	/* Get eventmode conf */
2490 	em_conf = eh_conf->mode_params;
2491 
2492 	if (eh_conf->mode == EH_PKT_TRANSFER_MODE_POLL &&
2493 	    em_conf->ext_params.sched_type != SCHED_TYPE_NOT_SET) {
2494 		printf("error: option --event-schedule-type applies only to "
2495 		       "event mode\n");
2496 		return -EINVAL;
2497 	}
2498 
2499 	if (eh_conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
2500 		return 0;
2501 
2502 	/* Set schedule type to ORDERED if it wasn't explicitly set by user */
2503 	if (em_conf->ext_params.sched_type == SCHED_TYPE_NOT_SET)
2504 		em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
2505 
2506 	/*
2507 	 * Event mode currently supports inline and lookaside protocol
2508 	 * sessions. If there are other types of sessions configured then exit
2509 	 * with error.
2510 	 */
2511 	ev_mode_sess_verify(sa_in, nb_sa_in, em_conf);
2512 	ev_mode_sess_verify(sa_out, nb_sa_out, em_conf);
2513 
2514 	/* Option --config does not apply to event mode */
2515 	if (nb_lcore_params > 0) {
2516 		printf("error: option --config applies only to poll mode\n");
2517 		return -EINVAL;
2518 	}
2519 
2520 	/*
2521 	 * In order to use the same port_init routine for both poll and event
2522 	 * modes initialize lcore_params with one queue for each eth port
2523 	 */
2524 	lcore_params = lcore_params_array;
2525 	RTE_ETH_FOREACH_DEV(portid) {
2526 		if ((enabled_port_mask & (1 << portid)) == 0)
2527 			continue;
2528 
2529 		params = &lcore_params[nb_lcore_params++];
2530 		params->port_id = portid;
2531 		params->queue_id = 0;
2532 		params->lcore_id = rte_get_next_lcore(0, 0, 1);
2533 	}
2534 
2535 	return 0;
2536 }
2537 
2538 static int
2539 one_session_free(struct rte_ipsec_session *ips)
2540 {
2541 	int32_t ret = 0;
2542 
2543 	if (ips->type == RTE_SECURITY_ACTION_TYPE_NONE ||
2544 		ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
2545 		/* Session has not been created */
2546 		if (ips->crypto.ses == NULL)
2547 			return 0;
2548 
2549 		ret = rte_cryptodev_sym_session_free(ips->crypto.dev_id,
2550 				ips->crypto.ses);
2551 	} else {
2552 		/* Session has not been created */
2553 		if (ips->security.ctx == NULL || ips->security.ses == NULL)
2554 			return 0;
2555 
2556 		ret = rte_security_session_destroy(ips->security.ctx,
2557 						   ips->security.ses);
2558 	}
2559 
2560 	return ret;
2561 }
2562 
2563 static void
2564 sessions_free(struct sa_ctx *sa_ctx)
2565 {
2566 	struct rte_ipsec_session *ips;
2567 	struct ipsec_sa *sa;
2568 	int32_t ret;
2569 	uint32_t i;
2570 
2571 	if (!sa_ctx)
2572 		return;
2573 
2574 	for (i = 0; i < sa_ctx->nb_sa; i++) {
2575 
2576 		sa = &sa_ctx->sa[i];
2577 		if (!sa->spi)
2578 			continue;
2579 
2580 		ips = ipsec_get_primary_session(sa);
2581 		ret = one_session_free(ips);
2582 		if (ret)
2583 			RTE_LOG(ERR, IPSEC, "Failed to destroy security "
2584 					    "session type %d, spi %d\n",
2585 					    ips->type, sa->spi);
2586 	}
2587 }
2588 
2589 static uint32_t
2590 calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq,
2591 		uint32_t nb_txq)
2592 {
2593 	return RTE_MAX((nb_rxq * nb_rxd +
2594 			nb_ports * nb_lcores * MAX_PKT_BURST +
2595 			nb_ports * nb_txq * nb_txd +
2596 			nb_lcores * MEMPOOL_CACHE_SIZE +
2597 			nb_crypto_qp * qp_desc_nb +
2598 			nb_lcores * frag_tbl_sz *
2599 			FRAG_TBL_BUCKET_ENTRIES),
2600 		       8192U);
2601 }
2602 
2603 
2604 static int
2605 handle_telemetry_cmd_ipsec_secgw_stats(const char *cmd __rte_unused,
2606 		const char *params, struct rte_tel_data *data)
2607 {
2608 	uint64_t total_pkts_dropped = 0, total_pkts_tx = 0, total_pkts_rx = 0;
2609 	unsigned int coreid;
2610 
2611 	rte_tel_data_start_dict(data);
2612 
2613 	if (params) {
2614 		coreid = (uint32_t)atoi(params);
2615 		if (rte_lcore_is_enabled(coreid) == 0)
2616 			return -EINVAL;
2617 
2618 		total_pkts_dropped = core_statistics[coreid].dropped;
2619 		total_pkts_tx = core_statistics[coreid].tx;
2620 		total_pkts_rx = core_statistics[coreid].rx;
2621 
2622 	} else {
2623 		for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
2624 
2625 			/* skip disabled cores */
2626 			if (rte_lcore_is_enabled(coreid) == 0)
2627 				continue;
2628 
2629 			total_pkts_dropped += core_statistics[coreid].dropped;
2630 			total_pkts_tx += core_statistics[coreid].tx;
2631 			total_pkts_rx += core_statistics[coreid].rx;
2632 		}
2633 	}
2634 
2635 	/* add telemetry key/values pairs */
2636 	rte_tel_data_add_dict_uint(data, "packets received", total_pkts_rx);
2637 
2638 	rte_tel_data_add_dict_uint(data, "packets transmitted", total_pkts_tx);
2639 
2640 	rte_tel_data_add_dict_uint(data, "packets dropped",
2641 				   total_pkts_dropped);
2642 
2643 
2644 	return 0;
2645 }
2646 
2647 static void
2648 update_lcore_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
2649 {
2650 	struct ipsec_core_statistics *lcore_stats;
2651 
2652 	/* skip disabled cores */
2653 	if (rte_lcore_is_enabled(coreid) == 0)
2654 		return;
2655 
2656 	lcore_stats = &core_statistics[coreid];
2657 
2658 	total->rx = lcore_stats->rx;
2659 	total->dropped = lcore_stats->dropped;
2660 	total->frag_dropped = lcore_stats->frag_dropped;
2661 	total->tx = lcore_stats->tx;
2662 
2663 	/* outbound stats */
2664 	total->outbound.spd6.protect += lcore_stats->outbound.spd6.protect;
2665 	total->outbound.spd6.bypass += lcore_stats->outbound.spd6.bypass;
2666 	total->outbound.spd6.discard += lcore_stats->outbound.spd6.discard;
2667 
2668 	total->outbound.spd4.protect += lcore_stats->outbound.spd4.protect;
2669 	total->outbound.spd4.bypass += lcore_stats->outbound.spd4.bypass;
2670 	total->outbound.spd4.discard += lcore_stats->outbound.spd4.discard;
2671 
2672 	total->outbound.sad.miss += lcore_stats->outbound.sad.miss;
2673 
2674 	/* inbound stats */
2675 	total->inbound.spd6.protect += lcore_stats->inbound.spd6.protect;
2676 	total->inbound.spd6.bypass += lcore_stats->inbound.spd6.bypass;
2677 	total->inbound.spd6.discard += lcore_stats->inbound.spd6.discard;
2678 
2679 	total->inbound.spd4.protect += lcore_stats->inbound.spd4.protect;
2680 	total->inbound.spd4.bypass += lcore_stats->inbound.spd4.bypass;
2681 	total->inbound.spd4.discard += lcore_stats->inbound.spd4.discard;
2682 
2683 	total->inbound.sad.miss += lcore_stats->inbound.sad.miss;
2684 
2685 
2686 	/* routing stats */
2687 	total->lpm4.miss += lcore_stats->lpm4.miss;
2688 	total->lpm6.miss += lcore_stats->lpm6.miss;
2689 }
2690 
2691 static void
2692 update_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
2693 {
2694 	memset(total, 0, sizeof(*total));
2695 
2696 	if (coreid != UINT32_MAX) {
2697 		update_lcore_statistics(total, coreid);
2698 	} else {
2699 		for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++)
2700 			update_lcore_statistics(total, coreid);
2701 	}
2702 }
2703 
2704 static int
2705 handle_telemetry_cmd_ipsec_secgw_stats_outbound(const char *cmd __rte_unused,
2706 		const char *params, struct rte_tel_data *data)
2707 {
2708 	struct ipsec_core_statistics total_stats;
2709 
2710 	struct rte_tel_data *spd4_data = rte_tel_data_alloc();
2711 	struct rte_tel_data *spd6_data = rte_tel_data_alloc();
2712 	struct rte_tel_data *sad_data = rte_tel_data_alloc();
2713 	unsigned int coreid = UINT32_MAX;
2714 	int rc = 0;
2715 
2716 	/* verify allocated telemetry data structures */
2717 	if (!spd4_data || !spd6_data || !sad_data) {
2718 		rc = -ENOMEM;
2719 		goto exit;
2720 	}
2721 
2722 	/* initialize telemetry data structs as dicts */
2723 	rte_tel_data_start_dict(data);
2724 
2725 	rte_tel_data_start_dict(spd4_data);
2726 	rte_tel_data_start_dict(spd6_data);
2727 	rte_tel_data_start_dict(sad_data);
2728 
2729 	if (params) {
2730 		coreid = (uint32_t)atoi(params);
2731 		if (rte_lcore_is_enabled(coreid) == 0) {
2732 			rc = -EINVAL;
2733 			goto exit;
2734 		}
2735 	}
2736 
2737 	update_statistics(&total_stats, coreid);
2738 
2739 	/* add spd 4 telemetry key/values pairs */
2740 
2741 	rte_tel_data_add_dict_uint(spd4_data, "protect",
2742 				   total_stats.outbound.spd4.protect);
2743 	rte_tel_data_add_dict_uint(spd4_data, "bypass",
2744 				   total_stats.outbound.spd4.bypass);
2745 	rte_tel_data_add_dict_uint(spd4_data, "discard",
2746 				   total_stats.outbound.spd4.discard);
2747 
2748 	rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0);
2749 
2750 	/* add spd 6 telemetry key/values pairs */
2751 
2752 	rte_tel_data_add_dict_uint(spd6_data, "protect",
2753 				   total_stats.outbound.spd6.protect);
2754 	rte_tel_data_add_dict_uint(spd6_data, "bypass",
2755 				   total_stats.outbound.spd6.bypass);
2756 	rte_tel_data_add_dict_uint(spd6_data, "discard",
2757 				   total_stats.outbound.spd6.discard);
2758 
2759 	rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
2760 
2761 	/* add sad telemetry key/values pairs */
2762 
2763 	rte_tel_data_add_dict_uint(sad_data, "miss",
2764 				   total_stats.outbound.sad.miss);
2765 
2766 	rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
2767 
2768 exit:
2769 	if (rc) {
2770 		rte_tel_data_free(spd4_data);
2771 		rte_tel_data_free(spd6_data);
2772 		rte_tel_data_free(sad_data);
2773 	}
2774 	return rc;
2775 }
2776 
2777 static int
2778 handle_telemetry_cmd_ipsec_secgw_stats_inbound(const char *cmd __rte_unused,
2779 		const char *params, struct rte_tel_data *data)
2780 {
2781 	struct ipsec_core_statistics total_stats;
2782 
2783 	struct rte_tel_data *spd4_data = rte_tel_data_alloc();
2784 	struct rte_tel_data *spd6_data = rte_tel_data_alloc();
2785 	struct rte_tel_data *sad_data = rte_tel_data_alloc();
2786 	unsigned int coreid = UINT32_MAX;
2787 	int rc = 0;
2788 
2789 	/* verify allocated telemetry data structures */
2790 	if (!spd4_data || !spd6_data || !sad_data) {
2791 		rc = -ENOMEM;
2792 		goto exit;
2793 	}
2794 
2795 	/* initialize telemetry data structs as dicts */
2796 	rte_tel_data_start_dict(data);
2797 	rte_tel_data_start_dict(spd4_data);
2798 	rte_tel_data_start_dict(spd6_data);
2799 	rte_tel_data_start_dict(sad_data);
2800 
2801 	/* add children dicts to parent dict */
2802 
2803 	if (params) {
2804 		coreid = (uint32_t)atoi(params);
2805 		if (rte_lcore_is_enabled(coreid) == 0) {
2806 			rc = -EINVAL;
2807 			goto exit;
2808 		}
2809 	}
2810 
2811 	update_statistics(&total_stats, coreid);
2812 
2813 	/* add sad telemetry key/values pairs */
2814 
2815 	rte_tel_data_add_dict_uint(sad_data, "miss",
2816 				   total_stats.inbound.sad.miss);
2817 
2818 	rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
2819 
2820 	/* add spd 4 telemetry key/values pairs */
2821 
2822 	rte_tel_data_add_dict_uint(spd4_data, "protect",
2823 				   total_stats.inbound.spd4.protect);
2824 	rte_tel_data_add_dict_uint(spd4_data, "bypass",
2825 				   total_stats.inbound.spd4.bypass);
2826 	rte_tel_data_add_dict_uint(spd4_data, "discard",
2827 				   total_stats.inbound.spd4.discard);
2828 
2829 	rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0);
2830 
2831 	/* add spd 6 telemetry key/values pairs */
2832 
2833 	rte_tel_data_add_dict_uint(spd6_data, "protect",
2834 				   total_stats.inbound.spd6.protect);
2835 	rte_tel_data_add_dict_uint(spd6_data, "bypass",
2836 				   total_stats.inbound.spd6.bypass);
2837 	rte_tel_data_add_dict_uint(spd6_data, "discard",
2838 				   total_stats.inbound.spd6.discard);
2839 
2840 	rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
2841 
2842 exit:
2843 	if (rc) {
2844 		rte_tel_data_free(spd4_data);
2845 		rte_tel_data_free(spd6_data);
2846 		rte_tel_data_free(sad_data);
2847 	}
2848 	return rc;
2849 }
2850 
2851 static int
2852 handle_telemetry_cmd_ipsec_secgw_stats_routing(const char *cmd __rte_unused,
2853 		const char *params, struct rte_tel_data *data)
2854 {
2855 	struct ipsec_core_statistics total_stats;
2856 
2857 	struct rte_tel_data *lpm4_data = rte_tel_data_alloc();
2858 	struct rte_tel_data *lpm6_data = rte_tel_data_alloc();
2859 	unsigned int coreid = UINT32_MAX;
2860 	int rc = 0;
2861 
2862 	/* verify allocated telemetry data structures */
2863 	if (!lpm4_data || !lpm6_data) {
2864 		rc = -ENOMEM;
2865 		goto exit;
2866 	}
2867 
2868 	/* initialize telemetry data structs as dicts */
2869 	rte_tel_data_start_dict(data);
2870 	rte_tel_data_start_dict(lpm4_data);
2871 	rte_tel_data_start_dict(lpm6_data);
2872 
2873 
2874 	if (params) {
2875 		coreid = (uint32_t)atoi(params);
2876 		if (rte_lcore_is_enabled(coreid) == 0) {
2877 			rc = -EINVAL;
2878 			goto exit;
2879 		}
2880 	}
2881 
2882 	update_statistics(&total_stats, coreid);
2883 
2884 	/* add lpm 4 telemetry key/values pairs */
2885 	rte_tel_data_add_dict_uint(lpm4_data, "miss", total_stats.lpm4.miss);
2886 
2887 	rte_tel_data_add_dict_container(data, "IPv4 LPM", lpm4_data, 0);
2888 
2889 	/* add lpm 6 telemetry key/values pairs */
2890 	rte_tel_data_add_dict_uint(lpm6_data, "miss", total_stats.lpm6.miss);
2891 
2892 	rte_tel_data_add_dict_container(data, "IPv6 LPM", lpm6_data, 0);
2893 
2894 exit:
2895 	if (rc) {
2896 		rte_tel_data_free(lpm4_data);
2897 		rte_tel_data_free(lpm6_data);
2898 	}
2899 	return rc;
2900 }
2901 
2902 static void
2903 ipsec_secgw_telemetry_init(void)
2904 {
2905 	rte_telemetry_register_cmd("/examples/ipsec-secgw/stats",
2906 		handle_telemetry_cmd_ipsec_secgw_stats,
2907 		"Returns global stats. "
2908 		"Optional Parameters: int <logical core id>");
2909 
2910 	rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/outbound",
2911 		handle_telemetry_cmd_ipsec_secgw_stats_outbound,
2912 		"Returns outbound global stats. "
2913 		"Optional Parameters: int <logical core id>");
2914 
2915 	rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/inbound",
2916 		handle_telemetry_cmd_ipsec_secgw_stats_inbound,
2917 		"Returns inbound global stats. "
2918 		"Optional Parameters: int <logical core id>");
2919 
2920 	rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/routing",
2921 		handle_telemetry_cmd_ipsec_secgw_stats_routing,
2922 		"Returns routing stats. "
2923 		"Optional Parameters: int <logical core id>");
2924 }
2925 
2926 int32_t
2927 main(int32_t argc, char **argv)
2928 {
2929 	int32_t ret;
2930 	uint32_t lcore_id, nb_txq, nb_rxq = 0;
2931 	uint32_t cdev_id;
2932 	uint32_t i;
2933 	uint8_t socket_id;
2934 	uint16_t portid, nb_crypto_qp, nb_ports = 0;
2935 	uint64_t req_rx_offloads[RTE_MAX_ETHPORTS];
2936 	uint64_t req_tx_offloads[RTE_MAX_ETHPORTS];
2937 	uint8_t req_hw_reassembly[RTE_MAX_ETHPORTS];
2938 	struct eh_conf *eh_conf = NULL;
2939 	uint32_t ipv4_cksum_port_mask = 0;
2940 	size_t sess_sz;
2941 
2942 	nb_bufs_in_pool = 0;
2943 
2944 	/* init EAL */
2945 	ret = rte_eal_init(argc, argv);
2946 	if (ret < 0)
2947 		rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2948 	argc -= ret;
2949 	argv += ret;
2950 
2951 	force_quit = false;
2952 	signal(SIGINT, signal_handler);
2953 	signal(SIGTERM, signal_handler);
2954 
2955 	/* initialize event helper configuration */
2956 	eh_conf = eh_conf_init();
2957 	if (eh_conf == NULL)
2958 		rte_exit(EXIT_FAILURE, "Failed to init event helper config");
2959 
2960 	/* parse application arguments (after the EAL ones) */
2961 	ret = parse_args(argc, argv, eh_conf);
2962 	if (ret < 0)
2963 		rte_exit(EXIT_FAILURE, "Invalid parameters\n");
2964 
2965 	ipsec_secgw_telemetry_init();
2966 
2967 	/* parse configuration file */
2968 	if (parse_cfg_file(cfgfile) < 0) {
2969 		printf("parsing file \"%s\" failed\n",
2970 			optarg);
2971 		print_usage(argv[0]);
2972 		return -1;
2973 	}
2974 
2975 	if ((unprotected_port_mask & enabled_port_mask) !=
2976 			unprotected_port_mask)
2977 		rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
2978 				unprotected_port_mask);
2979 
2980 	if (unprotected_port_mask && !nb_sa_in)
2981 		rte_exit(EXIT_FAILURE, "Cannot use unprotected portmask without configured SA inbound\n");
2982 
2983 	if (check_poll_mode_params(eh_conf) < 0)
2984 		rte_exit(EXIT_FAILURE, "check_poll_mode_params failed\n");
2985 
2986 	if (check_event_mode_params(eh_conf) < 0)
2987 		rte_exit(EXIT_FAILURE, "check_event_mode_params failed\n");
2988 
2989 	ret = init_lcore_rx_queues();
2990 	if (ret < 0)
2991 		rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2992 
2993 	nb_lcores = rte_lcore_count();
2994 
2995 	sess_sz = max_session_size();
2996 
2997 	/*
2998 	 * In event mode request minimum number of crypto queues
2999 	 * to be reserved equal to number of ports.
3000 	 */
3001 	if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT)
3002 		nb_crypto_qp = rte_eth_dev_count_avail();
3003 	else
3004 		nb_crypto_qp = 0;
3005 
3006 	nb_crypto_qp = cryptodevs_init(nb_crypto_qp);
3007 
3008 	if (nb_bufs_in_pool == 0) {
3009 		RTE_ETH_FOREACH_DEV(portid) {
3010 			if ((enabled_port_mask & (1 << portid)) == 0)
3011 				continue;
3012 			nb_ports++;
3013 			nb_rxq += get_port_nb_rx_queues(portid);
3014 		}
3015 
3016 		nb_txq = nb_lcores;
3017 
3018 		nb_bufs_in_pool = calculate_nb_mbufs(nb_ports, nb_crypto_qp,
3019 						nb_rxq, nb_txq);
3020 	}
3021 
3022 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
3023 		if (rte_lcore_is_enabled(lcore_id) == 0)
3024 			continue;
3025 
3026 		if (numa_on)
3027 			socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
3028 		else
3029 			socket_id = 0;
3030 
3031 		if (per_port_pool) {
3032 			RTE_ETH_FOREACH_DEV(portid) {
3033 				if ((enabled_port_mask & (1 << portid)) == 0)
3034 					continue;
3035 
3036 				pool_init(&socket_ctx[socket_id], socket_id,
3037 					  portid, nb_bufs_in_pool);
3038 			}
3039 		} else {
3040 			pool_init(&socket_ctx[socket_id], socket_id, 0,
3041 				  nb_bufs_in_pool);
3042 		}
3043 
3044 		if (socket_ctx[socket_id].session_pool)
3045 			continue;
3046 
3047 		session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
3048 	}
3049 	printf("Number of mbufs in packet pool %d\n", nb_bufs_in_pool);
3050 
3051 	RTE_ETH_FOREACH_DEV(portid) {
3052 		if ((enabled_port_mask & (1 << portid)) == 0)
3053 			continue;
3054 
3055 		sa_check_offloads(portid, &req_rx_offloads[portid], &req_tx_offloads[portid],
3056 				  &req_hw_reassembly[portid]);
3057 		port_init(portid, req_rx_offloads[portid], req_tx_offloads[portid],
3058 			  req_hw_reassembly[portid]);
3059 		if ((req_tx_offloads[portid] & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
3060 			ipv4_cksum_port_mask |= 1U << portid;
3061 	}
3062 
3063 	tx_offloads.ipv4_offloads = RTE_MBUF_F_TX_IPV4;
3064 	tx_offloads.ipv6_offloads = RTE_MBUF_F_TX_IPV6;
3065 	/* Update per lcore checksum offload support only if all ports support it */
3066 	if (ipv4_cksum_port_mask == enabled_port_mask)
3067 		tx_offloads.ipv4_offloads |= RTE_MBUF_F_TX_IP_CKSUM;
3068 
3069 	lcore_id = 0;
3070 	RTE_LCORE_FOREACH(lcore_id) {
3071 		/* Pre-populate pkt offloads based on capabilities */
3072 		lcore_conf[lcore_id].outbound.ipv4_offloads = tx_offloads.ipv4_offloads;
3073 		lcore_conf[lcore_id].outbound.ipv6_offloads = tx_offloads.ipv6_offloads;
3074 	}
3075 
3076 	/*
3077 	 * Set the enabled port mask in helper config for use by helper
3078 	 * sub-system. This will be used while initializing devices using
3079 	 * helper sub-system.
3080 	 */
3081 	eh_conf->eth_portmask = enabled_port_mask;
3082 
3083 	/* Initialize eventmode components */
3084 	ret = eh_devs_init(eh_conf);
3085 	if (ret < 0)
3086 		rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret);
3087 
3088 	/* start ports */
3089 	RTE_ETH_FOREACH_DEV(portid) {
3090 		if ((enabled_port_mask & (1 << portid)) == 0)
3091 			continue;
3092 
3093 		ret = rte_eth_dev_start(portid);
3094 		if (ret < 0)
3095 			rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
3096 					"err=%d, port=%d\n", ret, portid);
3097 
3098 		/* Create flow after starting the device */
3099 		create_default_ipsec_flow(portid, req_rx_offloads[portid]);
3100 
3101 		/*
3102 		 * If enabled, put device in promiscuous mode.
3103 		 * This allows IO forwarding mode to forward packets
3104 		 * to itself through 2 cross-connected  ports of the
3105 		 * target machine.
3106 		 */
3107 		if (promiscuous_on) {
3108 			ret = rte_eth_promiscuous_enable(portid);
3109 			if (ret != 0)
3110 				rte_exit(EXIT_FAILURE,
3111 					"rte_eth_promiscuous_enable: err=%s, port=%d\n",
3112 					rte_strerror(-ret), portid);
3113 		}
3114 
3115 		rte_eth_dev_callback_register(portid, RTE_ETH_EVENT_INTR_RESET,
3116 			ethdev_reset_event_callback, NULL);
3117 
3118 		rte_eth_dev_callback_register(portid,
3119 			RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
3120 	}
3121 
3122 	/* fragment reassemble is enabled */
3123 	if (frag_tbl_sz != 0) {
3124 		ret = reassemble_init();
3125 		if (ret != 0)
3126 			rte_exit(EXIT_FAILURE, "failed at reassemble init");
3127 	}
3128 
3129 	/* Replicate each context per socket */
3130 	for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
3131 		socket_id = rte_socket_id_by_idx(i);
3132 		if ((socket_ctx[socket_id].session_pool != NULL) &&
3133 			(socket_ctx[socket_id].sa_in == NULL) &&
3134 			(socket_ctx[socket_id].sa_out == NULL)) {
3135 			sa_init(&socket_ctx[socket_id], socket_id, lcore_conf,
3136 				eh_conf->mode_params);
3137 			sp4_init(&socket_ctx[socket_id], socket_id);
3138 			sp6_init(&socket_ctx[socket_id], socket_id);
3139 			rt_init(&socket_ctx[socket_id], socket_id);
3140 		}
3141 	}
3142 
3143 	flow_init();
3144 
3145 	/* Get security context if available and only if dynamic field is
3146 	 * registered for fast path access.
3147 	 */
3148 	if (!rte_security_dynfield_is_registered())
3149 		goto skip_sec_ctx;
3150 
3151 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
3152 		for (i = 0; i < lcore_conf[lcore_id].nb_rx_queue; i++) {
3153 			portid = lcore_conf[lcore_id].rx_queue_list[i].port_id;
3154 			lcore_conf[lcore_id].rx_queue_list[i].sec_ctx =
3155 				rte_eth_dev_get_sec_ctx(portid);
3156 		}
3157 	}
3158 skip_sec_ctx:
3159 
3160 	check_all_ports_link_status(enabled_port_mask);
3161 
3162 	if (stats_interval > 0)
3163 		rte_eal_alarm_set(stats_interval * US_PER_S,
3164 				print_stats_cb, NULL);
3165 	else
3166 		RTE_LOG(INFO, IPSEC, "Stats display disabled\n");
3167 
3168 	/* launch per-lcore init on every lcore */
3169 	rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MAIN);
3170 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
3171 		if (rte_eal_wait_lcore(lcore_id) < 0)
3172 			return -1;
3173 	}
3174 
3175 	/* Uninitialize eventmode components */
3176 	ret = eh_devs_uninit(eh_conf);
3177 	if (ret < 0)
3178 		rte_exit(EXIT_FAILURE, "eh_devs_uninit failed, err=%d\n", ret);
3179 
3180 	/* Free eventmode configuration memory */
3181 	eh_conf_uninit(eh_conf);
3182 
3183 	/* Destroy inbound and outbound sessions */
3184 	for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
3185 		socket_id = rte_socket_id_by_idx(i);
3186 		sessions_free(socket_ctx[socket_id].sa_in);
3187 		sessions_free(socket_ctx[socket_id].sa_out);
3188 	}
3189 
3190 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
3191 		printf("Closing cryptodev %d...", cdev_id);
3192 		rte_cryptodev_stop(cdev_id);
3193 		rte_cryptodev_close(cdev_id);
3194 		printf(" Done\n");
3195 	}
3196 
3197 	flow_print_counters();
3198 
3199 	RTE_ETH_FOREACH_DEV(portid) {
3200 		if ((enabled_port_mask & (1 << portid)) == 0)
3201 			continue;
3202 
3203 		printf("Closing port %d...", portid);
3204 		if (flow_info_tbl[portid].rx_def_flow) {
3205 			struct rte_flow_error err;
3206 
3207 			ret = rte_flow_destroy(portid,
3208 				flow_info_tbl[portid].rx_def_flow, &err);
3209 			if (ret)
3210 				RTE_LOG(ERR, IPSEC, "Failed to destroy flow "
3211 					" for port %u, err msg: %s\n", portid,
3212 					err.message);
3213 		}
3214 		ret = rte_eth_dev_stop(portid);
3215 		if (ret != 0)
3216 			RTE_LOG(ERR, IPSEC,
3217 				"rte_eth_dev_stop: err=%s, port=%u\n",
3218 				rte_strerror(-ret), portid);
3219 
3220 		rte_eth_dev_close(portid);
3221 		printf(" Done\n");
3222 	}
3223 
3224 	/* clean up the EAL */
3225 	rte_eal_cleanup();
3226 	printf("Bye...\n");
3227 
3228 	return 0;
3229 }
3230