xref: /dpdk/examples/ipsec-secgw/ipsec-secgw.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Intel Corporation
3  */
4 
5 #include <stdbool.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <sys/types.h>
11 #include <netinet/in.h>
12 #include <netinet/ip.h>
13 #include <netinet/ip6.h>
14 #include <string.h>
15 #include <sys/queue.h>
16 #include <stdarg.h>
17 #include <errno.h>
18 #include <signal.h>
19 #include <getopt.h>
20 
21 #include <rte_common.h>
22 #include <rte_bitmap.h>
23 #include <rte_byteorder.h>
24 #include <rte_log.h>
25 #include <rte_eal.h>
26 #include <rte_launch.h>
27 #include <rte_cycles.h>
28 #include <rte_prefetch.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_interrupts.h>
33 #include <rte_random.h>
34 #include <rte_debug.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev.h>
37 #include <rte_mempool.h>
38 #include <rte_mbuf.h>
39 #include <rte_acl.h>
40 #include <rte_lpm.h>
41 #include <rte_lpm6.h>
42 #include <rte_hash.h>
43 #include <rte_jhash.h>
44 #include <rte_cryptodev.h>
45 #include <rte_security.h>
46 #include <rte_eventdev.h>
47 #include <rte_event_crypto_adapter.h>
48 #include <rte_ip.h>
49 #include <rte_ip_frag.h>
50 #include <rte_alarm.h>
51 #include <rte_telemetry.h>
52 
53 #include "event_helper.h"
54 #include "flow.h"
55 #include "ipsec.h"
56 #include "ipsec_worker.h"
57 #include "parser.h"
58 #include "sad.h"
59 
60 #if defined(__ARM_NEON)
61 #include "ipsec_lpm_neon.h"
62 #endif
63 
64 volatile bool force_quit;
65 
66 #define MAX_JUMBO_PKT_LEN  9600
67 
68 #define MEMPOOL_CACHE_SIZE 256
69 
70 #define CDEV_MAP_ENTRIES 16384
71 #define CDEV_MP_CACHE_SZ 64
72 #define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */
73 #define MAX_QUEUE_PAIRS 1
74 
75 #define MAX_LCORE_PARAMS 1024
76 
77 /*
78  * Configurable number of RX/TX ring descriptors
79  */
80 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
81 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
82 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
83 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
84 
85 /*
86  * Configurable number of descriptors per queue pair
87  */
88 uint32_t qp_desc_nb = 2048;
89 
90 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
91 		(addr)->addr_bytes[0], (addr)->addr_bytes[1], \
92 		(addr)->addr_bytes[2], (addr)->addr_bytes[3], \
93 		(addr)->addr_bytes[4], (addr)->addr_bytes[5], \
94 		0, 0)
95 
96 #define	FRAG_TBL_BUCKET_ENTRIES	4
97 #define	MAX_FRAG_TTL_NS		(10LL * NS_PER_S)
98 
99 #define MTU_TO_FRAMELEN(x)	((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
100 
101 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
102 	{ {{0}}, {{0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a}} },
103 	{ {{0}}, {{0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9}} },
104 	{ {{0}}, {{0x00, 0x16, 0x3e, 0x08, 0x69, 0x26}} },
105 	{ {{0}}, {{0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd}} }
106 };
107 
108 struct offloads tx_offloads;
109 
110 /*
111  * To hold ethernet header per port, which will be applied
112  * to outgoing packets.
113  */
114 xmm_t val_eth[RTE_MAX_ETHPORTS];
115 
116 struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS];
117 
118 #define CMD_LINE_OPT_CONFIG		"config"
119 #define CMD_LINE_OPT_SINGLE_SA		"single-sa"
120 #define CMD_LINE_OPT_CRYPTODEV_MASK	"cryptodev_mask"
121 #define CMD_LINE_OPT_TRANSFER_MODE	"transfer-mode"
122 #define CMD_LINE_OPT_SCHEDULE_TYPE	"event-schedule-type"
123 #define CMD_LINE_OPT_RX_OFFLOAD		"rxoffload"
124 #define CMD_LINE_OPT_TX_OFFLOAD		"txoffload"
125 #define CMD_LINE_OPT_REASSEMBLE		"reassemble"
126 #define CMD_LINE_OPT_MTU		"mtu"
127 #define CMD_LINE_OPT_FRAG_TTL		"frag-ttl"
128 #define CMD_LINE_OPT_EVENT_VECTOR	"event-vector"
129 #define CMD_LINE_OPT_VECTOR_SIZE	"vector-size"
130 #define CMD_LINE_OPT_VECTOR_TIMEOUT	"vector-tmo"
131 #define CMD_LINE_OPT_VECTOR_POOL_SZ	"vector-pool-sz"
132 #define CMD_LINE_OPT_PER_PORT_POOL	"per-port-pool"
133 #define CMD_LINE_OPT_QP_DESC_NB		"desc-nb"
134 
135 #define CMD_LINE_ARG_EVENT	"event"
136 #define CMD_LINE_ARG_POLL	"poll"
137 #define CMD_LINE_ARG_ORDERED	"ordered"
138 #define CMD_LINE_ARG_ATOMIC	"atomic"
139 #define CMD_LINE_ARG_PARALLEL	"parallel"
140 
141 enum {
142 	/* long options mapped to a short option */
143 
144 	/* first long only option value must be >= 256, so that we won't
145 	 * conflict with short options
146 	 */
147 	CMD_LINE_OPT_MIN_NUM = 256,
148 	CMD_LINE_OPT_CONFIG_NUM,
149 	CMD_LINE_OPT_SINGLE_SA_NUM,
150 	CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
151 	CMD_LINE_OPT_TRANSFER_MODE_NUM,
152 	CMD_LINE_OPT_SCHEDULE_TYPE_NUM,
153 	CMD_LINE_OPT_RX_OFFLOAD_NUM,
154 	CMD_LINE_OPT_TX_OFFLOAD_NUM,
155 	CMD_LINE_OPT_REASSEMBLE_NUM,
156 	CMD_LINE_OPT_MTU_NUM,
157 	CMD_LINE_OPT_FRAG_TTL_NUM,
158 	CMD_LINE_OPT_EVENT_VECTOR_NUM,
159 	CMD_LINE_OPT_VECTOR_SIZE_NUM,
160 	CMD_LINE_OPT_VECTOR_TIMEOUT_NUM,
161 	CMD_LINE_OPT_VECTOR_POOL_SZ_NUM,
162 	CMD_LINE_OPT_PER_PORT_POOL_NUM,
163 	CMD_LINE_OPT_QP_DESC_NB_NUM,
164 };
165 
166 static const struct option lgopts[] = {
167 	{CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
168 	{CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
169 	{CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
170 	{CMD_LINE_OPT_TRANSFER_MODE, 1, 0, CMD_LINE_OPT_TRANSFER_MODE_NUM},
171 	{CMD_LINE_OPT_SCHEDULE_TYPE, 1, 0, CMD_LINE_OPT_SCHEDULE_TYPE_NUM},
172 	{CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},
173 	{CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},
174 	{CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM},
175 	{CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM},
176 	{CMD_LINE_OPT_FRAG_TTL, 1, 0, CMD_LINE_OPT_FRAG_TTL_NUM},
177 	{CMD_LINE_OPT_EVENT_VECTOR, 0, 0, CMD_LINE_OPT_EVENT_VECTOR_NUM},
178 	{CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
179 	{CMD_LINE_OPT_VECTOR_TIMEOUT, 1, 0, CMD_LINE_OPT_VECTOR_TIMEOUT_NUM},
180 	{CMD_LINE_OPT_VECTOR_POOL_SZ, 1, 0, CMD_LINE_OPT_VECTOR_POOL_SZ_NUM},
181 	{CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PER_PORT_POOL_NUM},
182 	{CMD_LINE_OPT_QP_DESC_NB, 1, 0, CMD_LINE_OPT_QP_DESC_NB_NUM},
183 	{NULL, 0, 0, 0}
184 };
185 
186 uint32_t unprotected_port_mask;
187 uint32_t single_sa_idx;
188 /* mask of enabled ports */
189 static uint32_t enabled_port_mask;
190 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
191 static int32_t promiscuous_on;
192 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
193 static uint32_t nb_lcores;
194 uint32_t single_sa;
195 uint32_t nb_bufs_in_pool;
196 
197 /*
198  * RX/TX HW offload capabilities to enable/use on ethernet ports.
199  * By default all capabilities are enabled.
200  */
201 static uint64_t dev_rx_offload = UINT64_MAX;
202 static uint64_t dev_tx_offload = UINT64_MAX;
203 
204 /*
205  * global values that determine multi-seg policy
206  */
207 uint32_t frag_tbl_sz;
208 static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
209 uint32_t mtu_size = RTE_ETHER_MTU;
210 static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
211 static uint32_t stats_interval;
212 
213 /* application wide librte_ipsec/SA parameters */
214 struct app_sa_prm app_sa_prm = {
215 			.enable = 0,
216 			.cache_sz = SA_CACHE_SZ,
217 			.udp_encap = 0
218 		};
219 static const char *cfgfile;
220 
221 struct lcore_params {
222 	uint16_t port_id;
223 	uint8_t queue_id;
224 	uint8_t lcore_id;
225 } __rte_cache_aligned;
226 
227 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
228 
229 static struct lcore_params *lcore_params;
230 static uint16_t nb_lcore_params;
231 
232 static struct rte_hash *cdev_map_in;
233 static struct rte_hash *cdev_map_out;
234 
235 struct lcore_conf lcore_conf[RTE_MAX_LCORE];
236 
237 static struct rte_eth_conf port_conf = {
238 	.rxmode = {
239 		.mq_mode	= RTE_ETH_MQ_RX_RSS,
240 		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
241 	},
242 	.rx_adv_conf = {
243 		.rss_conf = {
244 			.rss_key = NULL,
245 			.rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
246 				RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
247 		},
248 	},
249 	.txmode = {
250 		.mq_mode = RTE_ETH_MQ_TX_NONE,
251 	},
252 };
253 
254 struct socket_ctx socket_ctx[NB_SOCKETS];
255 
256 bool per_port_pool;
257 
258 uint16_t wrkr_flags;
259 /*
260  * Determine is multi-segment support required:
261  *  - either frame buffer size is smaller then mtu
262  *  - or reassemble support is requested
263  */
264 static int
265 multi_seg_required(void)
266 {
267 	return (MTU_TO_FRAMELEN(mtu_size) + RTE_PKTMBUF_HEADROOM >
268 		frame_buf_size || frag_tbl_sz != 0);
269 }
270 
271 
272 struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
273 
274 /* Print out statistics on packet distribution */
275 static void
276 print_stats_cb(__rte_unused void *param)
277 {
278 	uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
279 	uint64_t total_frag_packets_dropped = 0;
280 	float burst_percent, rx_per_call, tx_per_call;
281 	unsigned int coreid;
282 
283 	total_packets_dropped = 0;
284 	total_packets_tx = 0;
285 	total_packets_rx = 0;
286 
287 	const char clr[] = { 27, '[', '2', 'J', '\0' };
288 	const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
289 
290 	/* Clear screen and move to top left */
291 	printf("%s%s", clr, topLeft);
292 
293 	printf("\nCore statistics ====================================");
294 
295 	for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
296 		/* skip disabled cores */
297 		if (rte_lcore_is_enabled(coreid) == 0)
298 			continue;
299 		burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/
300 					core_statistics[coreid].rx;
301 		rx_per_call =  (float)(core_statistics[coreid].rx)/
302 				       core_statistics[coreid].rx_call;
303 		tx_per_call =  (float)(core_statistics[coreid].tx)/
304 				       core_statistics[coreid].tx_call;
305 		printf("\nStatistics for core %u ------------------------------"
306 			   "\nPackets received: %20"PRIu64
307 			   "\nPackets sent: %24"PRIu64
308 			   "\nPackets dropped: %21"PRIu64
309 			   "\nFrag Packets dropped: %16"PRIu64
310 			   "\nBurst percent: %23.2f"
311 			   "\nPackets per Rx call: %17.2f"
312 			   "\nPackets per Tx call: %17.2f",
313 			   coreid,
314 			   core_statistics[coreid].rx,
315 			   core_statistics[coreid].tx,
316 			   core_statistics[coreid].dropped,
317 			   core_statistics[coreid].frag_dropped,
318 			   burst_percent,
319 			   rx_per_call,
320 			   tx_per_call);
321 
322 		total_packets_dropped += core_statistics[coreid].dropped;
323 		total_frag_packets_dropped += core_statistics[coreid].frag_dropped;
324 		total_packets_tx += core_statistics[coreid].tx;
325 		total_packets_rx += core_statistics[coreid].rx;
326 	}
327 	printf("\nAggregate statistics ==============================="
328 		   "\nTotal packets received: %14"PRIu64
329 		   "\nTotal packets sent: %18"PRIu64
330 		   "\nTotal packets dropped: %15"PRIu64
331 		   "\nTotal frag packets dropped: %10"PRIu64,
332 		   total_packets_rx,
333 		   total_packets_tx,
334 		   total_packets_dropped,
335 		   total_frag_packets_dropped);
336 	printf("\n====================================================\n");
337 
338 	rte_eal_alarm_set(stats_interval * US_PER_S, print_stats_cb, NULL);
339 }
340 
341 static void
342 split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
343 {
344 	uint32_t i, n4, n6;
345 	struct ip *ip;
346 	struct rte_mbuf *m;
347 
348 	n4 = trf->ip4.num;
349 	n6 = trf->ip6.num;
350 
351 	for (i = 0; i < num; i++) {
352 
353 		m = mb[i];
354 		ip = rte_pktmbuf_mtod(m, struct ip *);
355 
356 		if (ip->ip_v == IPVERSION) {
357 			trf->ip4.pkts[n4] = m;
358 			trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
359 					uint8_t *, offsetof(struct ip, ip_p));
360 			n4++;
361 		} else if (ip->ip_v == IP6_VERSION) {
362 			trf->ip6.pkts[n6] = m;
363 			trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
364 					uint8_t *,
365 					offsetof(struct ip6_hdr, ip6_nxt));
366 			n6++;
367 		} else
368 			free_pkts(&m, 1);
369 	}
370 
371 	trf->ip4.num = n4;
372 	trf->ip6.num = n6;
373 }
374 
375 
376 static inline void
377 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
378 		struct ipsec_traffic *traffic)
379 {
380 	unsigned int lcoreid = rte_lcore_id();
381 	uint16_t nb_pkts_in, n_ip4, n_ip6;
382 
383 	n_ip4 = traffic->ip4.num;
384 	n_ip6 = traffic->ip6.num;
385 
386 	if (app_sa_prm.enable == 0) {
387 		nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
388 				traffic->ipsec.num, MAX_PKT_BURST);
389 		split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
390 	} else {
391 		inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
392 			traffic->ipsec.saptr, traffic->ipsec.num);
393 		ipsec_process(ipsec_ctx, traffic);
394 	}
395 
396 	inbound_sp_sa(ipsec_ctx->sp4_ctx,
397 		ipsec_ctx->sa_ctx, &traffic->ip4, n_ip4,
398 		&core_statistics[lcoreid].inbound.spd4);
399 
400 	inbound_sp_sa(ipsec_ctx->sp6_ctx,
401 		ipsec_ctx->sa_ctx, &traffic->ip6, n_ip6,
402 		&core_statistics[lcoreid].inbound.spd6);
403 }
404 
405 static inline void
406 outbound_spd_lookup(struct sp_ctx *sp,
407 		struct traffic_type *ip,
408 		struct traffic_type *ipsec,
409 		struct ipsec_spd_stats *stats)
410 {
411 	struct rte_mbuf *m;
412 	uint32_t i, j, sa_idx;
413 
414 	if (ip->num == 0 || sp == NULL)
415 		return;
416 
417 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
418 			ip->num, DEFAULT_MAX_CATEGORIES);
419 
420 	for (i = 0, j = 0; i < ip->num; i++) {
421 		m = ip->pkts[i];
422 		sa_idx = ip->res[i] - 1;
423 
424 		if (unlikely(ip->res[i] == DISCARD)) {
425 			free_pkts(&m, 1);
426 
427 			stats->discard++;
428 		} else if (unlikely(ip->res[i] == BYPASS)) {
429 			ip->pkts[j++] = m;
430 
431 			stats->bypass++;
432 		} else {
433 			ipsec->res[ipsec->num] = sa_idx;
434 			ipsec->pkts[ipsec->num++] = m;
435 
436 			stats->protect++;
437 		}
438 	}
439 	ip->num = j;
440 }
441 
442 static inline void
443 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
444 		struct ipsec_traffic *traffic)
445 {
446 	struct rte_mbuf *m;
447 	uint16_t idx, nb_pkts_out, i;
448 	unsigned int lcoreid = rte_lcore_id();
449 
450 	/* Drop any IPsec traffic from protected ports */
451 	free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
452 
453 	traffic->ipsec.num = 0;
454 
455 	outbound_spd_lookup(ipsec_ctx->sp4_ctx,
456 		&traffic->ip4, &traffic->ipsec,
457 		&core_statistics[lcoreid].outbound.spd4);
458 
459 	outbound_spd_lookup(ipsec_ctx->sp6_ctx,
460 		&traffic->ip6, &traffic->ipsec,
461 		&core_statistics[lcoreid].outbound.spd6);
462 
463 	if (app_sa_prm.enable == 0) {
464 
465 		nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
466 				traffic->ipsec.res, traffic->ipsec.num,
467 				MAX_PKT_BURST);
468 
469 		for (i = 0; i < nb_pkts_out; i++) {
470 			m = traffic->ipsec.pkts[i];
471 			struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
472 			if (ip->ip_v == IPVERSION) {
473 				idx = traffic->ip4.num++;
474 				traffic->ip4.pkts[idx] = m;
475 			} else {
476 				idx = traffic->ip6.num++;
477 				traffic->ip6.pkts[idx] = m;
478 			}
479 		}
480 	} else {
481 		outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
482 			traffic->ipsec.saptr, traffic->ipsec.num);
483 		ipsec_process(ipsec_ctx, traffic);
484 	}
485 }
486 
487 static inline void
488 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
489 		struct ipsec_traffic *traffic)
490 {
491 	struct rte_mbuf *m;
492 	uint32_t nb_pkts_in, i, idx;
493 
494 	if (app_sa_prm.enable == 0) {
495 
496 		nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
497 				traffic->ipsec.num, MAX_PKT_BURST);
498 
499 		for (i = 0; i < nb_pkts_in; i++) {
500 			m = traffic->ipsec.pkts[i];
501 			struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
502 			if (ip->ip_v == IPVERSION) {
503 				idx = traffic->ip4.num++;
504 				traffic->ip4.pkts[idx] = m;
505 			} else {
506 				idx = traffic->ip6.num++;
507 				traffic->ip6.pkts[idx] = m;
508 			}
509 		}
510 	} else {
511 		inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
512 			traffic->ipsec.saptr, traffic->ipsec.num);
513 		ipsec_process(ipsec_ctx, traffic);
514 	}
515 }
516 
517 static inline void
518 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
519 		struct ipsec_traffic *traffic)
520 {
521 	struct rte_mbuf *m;
522 	uint32_t nb_pkts_out, i, n;
523 	struct ip *ip;
524 
525 	/* Drop any IPsec traffic from protected ports */
526 	free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
527 
528 	n = 0;
529 
530 	for (i = 0; i < traffic->ip4.num; i++) {
531 		traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
532 		traffic->ipsec.res[n++] = single_sa_idx;
533 	}
534 
535 	for (i = 0; i < traffic->ip6.num; i++) {
536 		traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
537 		traffic->ipsec.res[n++] = single_sa_idx;
538 	}
539 
540 	traffic->ip4.num = 0;
541 	traffic->ip6.num = 0;
542 	traffic->ipsec.num = n;
543 
544 	if (app_sa_prm.enable == 0) {
545 
546 		nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
547 				traffic->ipsec.res, traffic->ipsec.num,
548 				MAX_PKT_BURST);
549 
550 		/* They all sue the same SA (ip4 or ip6 tunnel) */
551 		m = traffic->ipsec.pkts[0];
552 		ip = rte_pktmbuf_mtod(m, struct ip *);
553 		if (ip->ip_v == IPVERSION) {
554 			traffic->ip4.num = nb_pkts_out;
555 			for (i = 0; i < nb_pkts_out; i++)
556 				traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
557 		} else {
558 			traffic->ip6.num = nb_pkts_out;
559 			for (i = 0; i < nb_pkts_out; i++)
560 				traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
561 		}
562 	} else {
563 		outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
564 			traffic->ipsec.saptr, traffic->ipsec.num);
565 		ipsec_process(ipsec_ctx, traffic);
566 	}
567 }
568 
569 static inline void
570 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
571 	     uint16_t nb_pkts, uint16_t portid, void *ctx)
572 {
573 	struct ipsec_traffic traffic;
574 
575 	prepare_traffic(ctx, pkts, &traffic, nb_pkts);
576 
577 	if (unlikely(single_sa)) {
578 		if (is_unprotected_port(portid))
579 			process_pkts_inbound_nosp(&qconf->inbound, &traffic);
580 		else
581 			process_pkts_outbound_nosp(&qconf->outbound, &traffic);
582 	} else {
583 		if (is_unprotected_port(portid))
584 			process_pkts_inbound(&qconf->inbound, &traffic);
585 		else
586 			process_pkts_outbound(&qconf->outbound, &traffic);
587 	}
588 
589 #if defined __ARM_NEON
590 	/* Neon optimized packet routing */
591 	route4_pkts_neon(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num,
592 			 qconf->outbound.ipv4_offloads, true);
593 	route6_pkts_neon(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
594 #else
595 	route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num,
596 		    qconf->outbound.ipv4_offloads, true);
597 	route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
598 #endif
599 }
600 
601 static inline void
602 drain_crypto_buffers(struct lcore_conf *qconf)
603 {
604 	uint32_t i;
605 	struct ipsec_ctx *ctx;
606 
607 	/* drain inbound buffers*/
608 	ctx = &qconf->inbound;
609 	for (i = 0; i != ctx->nb_qps; i++) {
610 		if (ctx->tbl[i].len != 0)
611 			enqueue_cop_burst(ctx->tbl  + i);
612 	}
613 
614 	/* drain outbound buffers*/
615 	ctx = &qconf->outbound;
616 	for (i = 0; i != ctx->nb_qps; i++) {
617 		if (ctx->tbl[i].len != 0)
618 			enqueue_cop_burst(ctx->tbl  + i);
619 	}
620 }
621 
622 static void
623 drain_inbound_crypto_queues(const struct lcore_conf *qconf,
624 		struct ipsec_ctx *ctx)
625 {
626 	uint32_t n;
627 	struct ipsec_traffic trf;
628 	unsigned int lcoreid = rte_lcore_id();
629 
630 	if (app_sa_prm.enable == 0) {
631 
632 		/* dequeue packets from crypto-queue */
633 		n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
634 			RTE_DIM(trf.ipsec.pkts));
635 
636 		trf.ip4.num = 0;
637 		trf.ip6.num = 0;
638 
639 		/* split traffic by ipv4-ipv6 */
640 		split46_traffic(&trf, trf.ipsec.pkts, n);
641 	} else
642 		ipsec_cqp_process(ctx, &trf);
643 
644 	/* process ipv4 packets */
645 	if (trf.ip4.num != 0) {
646 		inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0,
647 			&core_statistics[lcoreid].inbound.spd4);
648 		route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
649 			    qconf->outbound.ipv4_offloads, true);
650 	}
651 
652 	/* process ipv6 packets */
653 	if (trf.ip6.num != 0) {
654 		inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0,
655 			&core_statistics[lcoreid].inbound.spd6);
656 		route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
657 	}
658 }
659 
660 static void
661 drain_outbound_crypto_queues(const struct lcore_conf *qconf,
662 		struct ipsec_ctx *ctx)
663 {
664 	uint32_t n;
665 	struct ipsec_traffic trf;
666 
667 	if (app_sa_prm.enable == 0) {
668 
669 		/* dequeue packets from crypto-queue */
670 		n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
671 			RTE_DIM(trf.ipsec.pkts));
672 
673 		trf.ip4.num = 0;
674 		trf.ip6.num = 0;
675 
676 		/* split traffic by ipv4-ipv6 */
677 		split46_traffic(&trf, trf.ipsec.pkts, n);
678 	} else
679 		ipsec_cqp_process(ctx, &trf);
680 
681 	/* process ipv4 packets */
682 	if (trf.ip4.num != 0)
683 		route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
684 			    qconf->outbound.ipv4_offloads, true);
685 
686 	/* process ipv6 packets */
687 	if (trf.ip6.num != 0)
688 		route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
689 }
690 
691 /* main processing loop */
692 void
693 ipsec_poll_mode_worker(void)
694 {
695 	struct rte_mbuf *pkts[MAX_PKT_BURST];
696 	uint32_t lcore_id;
697 	uint64_t prev_tsc, diff_tsc, cur_tsc;
698 	uint16_t i, nb_rx, portid;
699 	uint8_t queueid;
700 	struct lcore_conf *qconf;
701 	int32_t rc, socket_id;
702 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
703 			/ US_PER_S * BURST_TX_DRAIN_US;
704 	struct lcore_rx_queue *rxql;
705 
706 	prev_tsc = 0;
707 	lcore_id = rte_lcore_id();
708 	qconf = &lcore_conf[lcore_id];
709 	rxql = qconf->rx_queue_list;
710 	socket_id = rte_lcore_to_socket_id(lcore_id);
711 
712 	qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
713 	qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
714 	qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
715 	qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
716 	qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
717 	qconf->inbound.cdev_map = cdev_map_in;
718 	qconf->inbound.lcore_id = lcore_id;
719 	qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
720 	qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
721 	qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
722 	qconf->outbound.cdev_map = cdev_map_out;
723 	qconf->outbound.lcore_id = lcore_id;
724 	qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
725 
726 	rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
727 	if (rc != 0) {
728 		RTE_LOG(ERR, IPSEC,
729 			"SAD cache init on lcore %u, failed with code: %d\n",
730 			lcore_id, rc);
731 		return;
732 	}
733 
734 	if (qconf->nb_rx_queue == 0) {
735 		RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
736 			lcore_id);
737 		return;
738 	}
739 
740 	RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
741 
742 	for (i = 0; i < qconf->nb_rx_queue; i++) {
743 		portid = rxql[i].port_id;
744 		queueid = rxql[i].queue_id;
745 		RTE_LOG(INFO, IPSEC,
746 			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
747 			lcore_id, portid, queueid);
748 	}
749 
750 	while (!force_quit) {
751 		cur_tsc = rte_rdtsc();
752 
753 		/* TX queue buffer drain */
754 		diff_tsc = cur_tsc - prev_tsc;
755 
756 		if (unlikely(diff_tsc > drain_tsc)) {
757 			drain_tx_buffers(qconf);
758 			drain_crypto_buffers(qconf);
759 			prev_tsc = cur_tsc;
760 		}
761 
762 		for (i = 0; i < qconf->nb_rx_queue; ++i) {
763 
764 			/* Read packets from RX queues */
765 			portid = rxql[i].port_id;
766 			queueid = rxql[i].queue_id;
767 			nb_rx = rte_eth_rx_burst(portid, queueid,
768 					pkts, MAX_PKT_BURST);
769 
770 			if (nb_rx > 0) {
771 				core_stats_update_rx(nb_rx);
772 				process_pkts(qconf, pkts, nb_rx, portid,
773 					     rxql->sec_ctx);
774 			}
775 
776 			/* dequeue and process completed crypto-ops */
777 			if (is_unprotected_port(portid))
778 				drain_inbound_crypto_queues(qconf,
779 					&qconf->inbound);
780 			else
781 				drain_outbound_crypto_queues(qconf,
782 					&qconf->outbound);
783 		}
784 	}
785 }
786 
787 int
788 check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
789 {
790 	uint16_t i;
791 	uint16_t portid;
792 	uint8_t queueid;
793 
794 	for (i = 0; i < nb_lcore_params; ++i) {
795 		portid = lcore_params_array[i].port_id;
796 		if (portid == fdir_portid) {
797 			queueid = lcore_params_array[i].queue_id;
798 			if (queueid == fdir_qid)
799 				break;
800 		}
801 
802 		if (i == nb_lcore_params - 1)
803 			return -1;
804 	}
805 
806 	return 1;
807 }
808 
809 static int32_t
810 check_poll_mode_params(struct eh_conf *eh_conf)
811 {
812 	uint8_t lcore;
813 	uint16_t portid;
814 	uint16_t i;
815 	int32_t socket_id;
816 
817 	if (!eh_conf)
818 		return -EINVAL;
819 
820 	if (eh_conf->mode != EH_PKT_TRANSFER_MODE_POLL)
821 		return 0;
822 
823 	if (lcore_params == NULL) {
824 		printf("Error: No port/queue/core mappings\n");
825 		return -1;
826 	}
827 
828 	for (i = 0; i < nb_lcore_params; ++i) {
829 		lcore = lcore_params[i].lcore_id;
830 		if (!rte_lcore_is_enabled(lcore)) {
831 			printf("error: lcore %hhu is not enabled in "
832 				"lcore mask\n", lcore);
833 			return -1;
834 		}
835 		socket_id = rte_lcore_to_socket_id(lcore);
836 		if (socket_id != 0 && numa_on == 0) {
837 			printf("warning: lcore %hhu is on socket %d "
838 				"with numa off\n",
839 				lcore, socket_id);
840 		}
841 		portid = lcore_params[i].port_id;
842 		if ((enabled_port_mask & (1 << portid)) == 0) {
843 			printf("port %u is not enabled in port mask\n", portid);
844 			return -1;
845 		}
846 		if (!rte_eth_dev_is_valid_port(portid)) {
847 			printf("port %u is not present on the board\n", portid);
848 			return -1;
849 		}
850 	}
851 	return 0;
852 }
853 
854 static uint8_t
855 get_port_nb_rx_queues(const uint16_t port)
856 {
857 	int32_t queue = -1;
858 	uint16_t i;
859 
860 	for (i = 0; i < nb_lcore_params; ++i) {
861 		if (lcore_params[i].port_id == port &&
862 				lcore_params[i].queue_id > queue)
863 			queue = lcore_params[i].queue_id;
864 	}
865 	return (uint8_t)(++queue);
866 }
867 
868 static int32_t
869 init_lcore_rx_queues(void)
870 {
871 	uint16_t i, nb_rx_queue;
872 	uint8_t lcore;
873 
874 	for (i = 0; i < nb_lcore_params; ++i) {
875 		lcore = lcore_params[i].lcore_id;
876 		nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
877 		if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
878 			printf("error: too many queues (%u) for lcore: %u\n",
879 					nb_rx_queue + 1, lcore);
880 			return -1;
881 		}
882 		lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
883 			lcore_params[i].port_id;
884 		lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
885 			lcore_params[i].queue_id;
886 		lcore_conf[lcore].nb_rx_queue++;
887 	}
888 	return 0;
889 }
890 
891 /* display usage */
892 static void
893 print_usage(const char *prgname)
894 {
895 	fprintf(stderr, "%s [EAL options] --"
896 		" -p PORTMASK"
897 		" [-P]"
898 		" [-u PORTMASK]"
899 		" [-j FRAMESIZE]"
900 		" [-l]"
901 		" [-w REPLAY_WINDOW_SIZE]"
902 		" [-e]"
903 		" [-a]"
904 		" [-c]"
905 		" [-t STATS_INTERVAL]"
906 		" [-s NUMBER_OF_MBUFS_IN_PKT_POOL]"
907 		" -f CONFIG_FILE"
908 		" --config (port,queue,lcore)[,(port,queue,lcore)]"
909 		" [--single-sa SAIDX]"
910 		" [--cryptodev_mask MASK]"
911 		" [--transfer-mode MODE]"
912 		" [--event-schedule-type TYPE]"
913 		" [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]"
914 		" [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
915 		" [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]"
916 		" [--" CMD_LINE_OPT_MTU " MTU]"
917 		" [--event-vector]"
918 		" [--vector-size SIZE]"
919 		" [--vector-tmo TIMEOUT in ns]"
920 		" [--" CMD_LINE_OPT_QP_DESC_NB " NUMBER_OF_DESC]"
921 		"\n\n"
922 		"  -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
923 		"  -P : Enable promiscuous mode\n"
924 		"  -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
925 		"  -j FRAMESIZE: Data buffer size, minimum (and default)\n"
926 		"     value: RTE_MBUF_DEFAULT_BUF_SIZE\n"
927 		"  -l enables code-path that uses librte_ipsec\n"
928 		"  -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n"
929 		"     size for each SA\n"
930 		"  -e enables ESN\n"
931 		"  -a enables SA SQN atomic behaviour\n"
932 		"  -c specifies inbound SAD cache size,\n"
933 		"     zero value disables the cache (default value: 128)\n"
934 		"  -t specifies statistics screen update interval,\n"
935 		"     zero disables statistics screen (default value: 0)\n"
936 		"  -s number of mbufs in packet pool, if not specified number\n"
937 		"     of mbufs will be calculated based on number of cores,\n"
938 		"     ports and crypto queues\n"
939 		"  -f CONFIG_FILE: Configuration file\n"
940 		"  --config (port,queue,lcore): Rx queue configuration. In poll\n"
941 		"                               mode determines which queues from\n"
942 		"                               which ports are mapped to which cores.\n"
943 		"                               In event mode this option is not used\n"
944 		"                               as packets are dynamically scheduled\n"
945 		"                               to cores by HW.\n"
946 		"  --single-sa SAIDX: In poll mode use single SA index for\n"
947 		"                     outbound traffic, bypassing the SP\n"
948 		"                     In event mode selects driver submode,\n"
949 		"                     SA index value is ignored\n"
950 		"  --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
951 		"                         devices to configure\n"
952 		"  --transfer-mode MODE\n"
953 		"               \"poll\"  : Packet transfer via polling (default)\n"
954 		"               \"event\" : Packet transfer via event device\n"
955 		"  --event-schedule-type TYPE queue schedule type, used only when\n"
956 		"                             transfer mode is set to event\n"
957 		"               \"ordered\"  : Ordered (default)\n"
958 		"               \"atomic\"   : Atomic\n"
959 		"               \"parallel\" : Parallel\n"
960 		"  --" CMD_LINE_OPT_RX_OFFLOAD
961 		": bitmask of the RX HW offload capabilities to enable/use\n"
962 		"                         (RTE_ETH_RX_OFFLOAD_*)\n"
963 		"  --" CMD_LINE_OPT_TX_OFFLOAD
964 		": bitmask of the TX HW offload capabilities to enable/use\n"
965 		"                         (RTE_ETH_TX_OFFLOAD_*)\n"
966 		"  --" CMD_LINE_OPT_REASSEMBLE " NUM"
967 		": max number of entries in reassemble(fragment) table\n"
968 		"    (zero (default value) disables reassembly)\n"
969 		"  --" CMD_LINE_OPT_MTU " MTU"
970 		": MTU value on all ports (default value: 1500)\n"
971 		"    outgoing packets with bigger size will be fragmented\n"
972 		"    incoming packets with bigger size will be discarded\n"
973 		"  --" CMD_LINE_OPT_FRAG_TTL " FRAG_TTL_NS"
974 		": fragments lifetime in nanoseconds, default\n"
975 		"    and maximum value is 10.000.000.000 ns (10 s)\n"
976 		"  --event-vector enables event vectorization\n"
977 		"  --vector-size Max vector size (default value: 16)\n"
978 		"  --vector-tmo Max vector timeout in nanoseconds"
979 		"    (default value: 102400)\n"
980 		"  --" CMD_LINE_OPT_PER_PORT_POOL " Enable per port mbuf pool\n"
981 		"  --" CMD_LINE_OPT_VECTOR_POOL_SZ " Vector pool size\n"
982 		"                    (default value is based on mbuf count)\n"
983 		"  --" CMD_LINE_OPT_QP_DESC_NB " DESC_NB"
984 		": Number of descriptors per queue pair (default value: 2048)\n"
985 		"\n",
986 		prgname);
987 }
988 
989 static int
990 parse_mask(const char *str, uint64_t *val)
991 {
992 	char *end;
993 	unsigned long t;
994 
995 	errno = 0;
996 	t = strtoul(str, &end, 0);
997 	if (errno != 0 || end[0] != 0)
998 		return -EINVAL;
999 
1000 	*val = t;
1001 	return 0;
1002 }
1003 
1004 static int32_t
1005 parse_portmask(const char *portmask)
1006 {
1007 	char *end = NULL;
1008 	unsigned long pm;
1009 
1010 	errno = 0;
1011 
1012 	/* parse hexadecimal string */
1013 	pm = strtoul(portmask, &end, 16);
1014 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1015 		return -1;
1016 
1017 	if ((pm == 0) && errno)
1018 		return -1;
1019 
1020 	return pm;
1021 }
1022 
1023 static int64_t
1024 parse_decimal(const char *str)
1025 {
1026 	char *end = NULL;
1027 	uint64_t num;
1028 
1029 	num = strtoull(str, &end, 10);
1030 	if ((str[0] == '\0') || (end == NULL) || (*end != '\0')
1031 		|| num > INT64_MAX)
1032 		return -1;
1033 
1034 	return num;
1035 }
1036 
1037 static int32_t
1038 parse_config(const char *q_arg)
1039 {
1040 	char s[256];
1041 	const char *p, *p0 = q_arg;
1042 	char *end;
1043 	enum fieldnames {
1044 		FLD_PORT = 0,
1045 		FLD_QUEUE,
1046 		FLD_LCORE,
1047 		_NUM_FLD
1048 	};
1049 	unsigned long int_fld[_NUM_FLD];
1050 	char *str_fld[_NUM_FLD];
1051 	int32_t i;
1052 	uint32_t size;
1053 
1054 	nb_lcore_params = 0;
1055 
1056 	while ((p = strchr(p0, '(')) != NULL) {
1057 		++p;
1058 		p0 = strchr(p, ')');
1059 		if (p0 == NULL)
1060 			return -1;
1061 
1062 		size = p0 - p;
1063 		if (size >= sizeof(s))
1064 			return -1;
1065 
1066 		snprintf(s, sizeof(s), "%.*s", size, p);
1067 		if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1068 				_NUM_FLD)
1069 			return -1;
1070 		for (i = 0; i < _NUM_FLD; i++) {
1071 			errno = 0;
1072 			int_fld[i] = strtoul(str_fld[i], &end, 0);
1073 			if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1074 				return -1;
1075 		}
1076 		if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1077 			printf("exceeded max number of lcore params: %hu\n",
1078 				nb_lcore_params);
1079 			return -1;
1080 		}
1081 		lcore_params_array[nb_lcore_params].port_id =
1082 			(uint8_t)int_fld[FLD_PORT];
1083 		lcore_params_array[nb_lcore_params].queue_id =
1084 			(uint8_t)int_fld[FLD_QUEUE];
1085 		lcore_params_array[nb_lcore_params].lcore_id =
1086 			(uint8_t)int_fld[FLD_LCORE];
1087 		++nb_lcore_params;
1088 	}
1089 	lcore_params = lcore_params_array;
1090 	return 0;
1091 }
1092 
1093 static void
1094 print_app_sa_prm(const struct app_sa_prm *prm)
1095 {
1096 	printf("librte_ipsec usage: %s\n",
1097 		(prm->enable == 0) ? "disabled" : "enabled");
1098 
1099 	printf("replay window size: %u\n", prm->window_size);
1100 	printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled");
1101 	printf("SA flags: %#" PRIx64 "\n", prm->flags);
1102 	printf("Frag TTL: %" PRIu64 " ns\n", frag_ttl_ns);
1103 }
1104 
1105 static int
1106 parse_transfer_mode(struct eh_conf *conf, const char *optarg)
1107 {
1108 	if (!strcmp(CMD_LINE_ARG_POLL, optarg))
1109 		conf->mode = EH_PKT_TRANSFER_MODE_POLL;
1110 	else if (!strcmp(CMD_LINE_ARG_EVENT, optarg))
1111 		conf->mode = EH_PKT_TRANSFER_MODE_EVENT;
1112 	else {
1113 		printf("Unsupported packet transfer mode\n");
1114 		return -EINVAL;
1115 	}
1116 
1117 	return 0;
1118 }
1119 
1120 static int
1121 parse_schedule_type(struct eh_conf *conf, const char *optarg)
1122 {
1123 	struct eventmode_conf *em_conf = NULL;
1124 
1125 	/* Get eventmode conf */
1126 	em_conf = conf->mode_params;
1127 
1128 	if (!strcmp(CMD_LINE_ARG_ORDERED, optarg))
1129 		em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
1130 	else if (!strcmp(CMD_LINE_ARG_ATOMIC, optarg))
1131 		em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ATOMIC;
1132 	else if (!strcmp(CMD_LINE_ARG_PARALLEL, optarg))
1133 		em_conf->ext_params.sched_type = RTE_SCHED_TYPE_PARALLEL;
1134 	else {
1135 		printf("Unsupported queue schedule type\n");
1136 		return -EINVAL;
1137 	}
1138 
1139 	return 0;
1140 }
1141 
1142 static int32_t
1143 parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
1144 {
1145 	int opt;
1146 	int64_t ret;
1147 	char **argvopt;
1148 	int32_t option_index;
1149 	char *prgname = argv[0];
1150 	int32_t f_present = 0;
1151 	struct eventmode_conf *em_conf = NULL;
1152 
1153 	argvopt = argv;
1154 
1155 	while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:t:s:",
1156 				lgopts, &option_index)) != EOF) {
1157 
1158 		switch (opt) {
1159 		case 'p':
1160 			enabled_port_mask = parse_portmask(optarg);
1161 			if (enabled_port_mask == 0) {
1162 				printf("invalid portmask\n");
1163 				print_usage(prgname);
1164 				return -1;
1165 			}
1166 			break;
1167 		case 'P':
1168 			printf("Promiscuous mode selected\n");
1169 			promiscuous_on = 1;
1170 			break;
1171 		case 'u':
1172 			unprotected_port_mask = parse_portmask(optarg);
1173 			if (unprotected_port_mask == 0) {
1174 				printf("invalid unprotected portmask\n");
1175 				print_usage(prgname);
1176 				return -1;
1177 			}
1178 			break;
1179 		case 'f':
1180 			if (f_present == 1) {
1181 				printf("\"-f\" option present more than "
1182 					"once!\n");
1183 				print_usage(prgname);
1184 				return -1;
1185 			}
1186 			cfgfile = optarg;
1187 			f_present = 1;
1188 			break;
1189 
1190 		case 's':
1191 			ret = parse_decimal(optarg);
1192 			if (ret < 0) {
1193 				printf("Invalid number of buffers in a pool: "
1194 					"%s\n", optarg);
1195 				print_usage(prgname);
1196 				return -1;
1197 			}
1198 
1199 			nb_bufs_in_pool = ret;
1200 			break;
1201 
1202 		case 'j':
1203 			ret = parse_decimal(optarg);
1204 			if (ret < RTE_MBUF_DEFAULT_BUF_SIZE ||
1205 					ret > UINT16_MAX) {
1206 				printf("Invalid frame buffer size value: %s\n",
1207 					optarg);
1208 				print_usage(prgname);
1209 				return -1;
1210 			}
1211 			frame_buf_size = ret;
1212 			printf("Custom frame buffer size %u\n", frame_buf_size);
1213 			break;
1214 		case 'l':
1215 			app_sa_prm.enable = 1;
1216 			break;
1217 		case 'w':
1218 			app_sa_prm.window_size = parse_decimal(optarg);
1219 			break;
1220 		case 'e':
1221 			app_sa_prm.enable_esn = 1;
1222 			break;
1223 		case 'a':
1224 			app_sa_prm.enable = 1;
1225 			app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM;
1226 			break;
1227 		case 'c':
1228 			ret = parse_decimal(optarg);
1229 			if (ret < 0) {
1230 				printf("Invalid SA cache size: %s\n", optarg);
1231 				print_usage(prgname);
1232 				return -1;
1233 			}
1234 			app_sa_prm.cache_sz = ret;
1235 			break;
1236 		case 't':
1237 			ret = parse_decimal(optarg);
1238 			if (ret < 0) {
1239 				printf("Invalid interval value: %s\n", optarg);
1240 				print_usage(prgname);
1241 				return -1;
1242 			}
1243 			stats_interval = ret;
1244 			break;
1245 		case CMD_LINE_OPT_CONFIG_NUM:
1246 			ret = parse_config(optarg);
1247 			if (ret) {
1248 				printf("Invalid config\n");
1249 				print_usage(prgname);
1250 				return -1;
1251 			}
1252 			break;
1253 		case CMD_LINE_OPT_SINGLE_SA_NUM:
1254 			ret = parse_decimal(optarg);
1255 			if (ret == -1 || ret > UINT32_MAX) {
1256 				printf("Invalid argument[sa_idx]\n");
1257 				print_usage(prgname);
1258 				return -1;
1259 			}
1260 
1261 			/* else */
1262 			single_sa = 1;
1263 			single_sa_idx = ret;
1264 			eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
1265 			wrkr_flags |= SS_F;
1266 			printf("Configured with single SA index %u\n",
1267 					single_sa_idx);
1268 			break;
1269 		case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
1270 			ret = parse_portmask(optarg);
1271 			if (ret == -1) {
1272 				printf("Invalid argument[portmask]\n");
1273 				print_usage(prgname);
1274 				return -1;
1275 			}
1276 
1277 			/* else */
1278 			enabled_cryptodev_mask = ret;
1279 			break;
1280 
1281 		case CMD_LINE_OPT_TRANSFER_MODE_NUM:
1282 			ret = parse_transfer_mode(eh_conf, optarg);
1283 			if (ret < 0) {
1284 				printf("Invalid packet transfer mode\n");
1285 				print_usage(prgname);
1286 				return -1;
1287 			}
1288 			break;
1289 
1290 		case CMD_LINE_OPT_SCHEDULE_TYPE_NUM:
1291 			ret = parse_schedule_type(eh_conf, optarg);
1292 			if (ret < 0) {
1293 				printf("Invalid queue schedule type\n");
1294 				print_usage(prgname);
1295 				return -1;
1296 			}
1297 			break;
1298 
1299 		case CMD_LINE_OPT_RX_OFFLOAD_NUM:
1300 			ret = parse_mask(optarg, &dev_rx_offload);
1301 			if (ret != 0) {
1302 				printf("Invalid argument for \'%s\': %s\n",
1303 					CMD_LINE_OPT_RX_OFFLOAD, optarg);
1304 				print_usage(prgname);
1305 				return -1;
1306 			}
1307 			break;
1308 		case CMD_LINE_OPT_TX_OFFLOAD_NUM:
1309 			ret = parse_mask(optarg, &dev_tx_offload);
1310 			if (ret != 0) {
1311 				printf("Invalid argument for \'%s\': %s\n",
1312 					CMD_LINE_OPT_TX_OFFLOAD, optarg);
1313 				print_usage(prgname);
1314 				return -1;
1315 			}
1316 			break;
1317 		case CMD_LINE_OPT_REASSEMBLE_NUM:
1318 			ret = parse_decimal(optarg);
1319 			if (ret < 0 || ret > UINT32_MAX) {
1320 				printf("Invalid argument for \'%s\': %s\n",
1321 					CMD_LINE_OPT_REASSEMBLE, optarg);
1322 				print_usage(prgname);
1323 				return -1;
1324 			}
1325 			frag_tbl_sz = ret;
1326 			break;
1327 		case CMD_LINE_OPT_MTU_NUM:
1328 			ret = parse_decimal(optarg);
1329 			if (ret < 0 || ret > RTE_IPV4_MAX_PKT_LEN) {
1330 				printf("Invalid argument for \'%s\': %s\n",
1331 					CMD_LINE_OPT_MTU, optarg);
1332 				print_usage(prgname);
1333 				return -1;
1334 			}
1335 			mtu_size = ret;
1336 			break;
1337 		case CMD_LINE_OPT_FRAG_TTL_NUM:
1338 			ret = parse_decimal(optarg);
1339 			if (ret < 0 || ret > MAX_FRAG_TTL_NS) {
1340 				printf("Invalid argument for \'%s\': %s\n",
1341 					CMD_LINE_OPT_MTU, optarg);
1342 				print_usage(prgname);
1343 				return -1;
1344 			}
1345 			frag_ttl_ns = ret;
1346 			break;
1347 		case CMD_LINE_OPT_EVENT_VECTOR_NUM:
1348 			em_conf = eh_conf->mode_params;
1349 			em_conf->ext_params.event_vector = 1;
1350 			break;
1351 		case CMD_LINE_OPT_VECTOR_SIZE_NUM:
1352 			ret = parse_decimal(optarg);
1353 
1354 			if (ret > MAX_PKT_BURST_VEC) {
1355 				printf("Invalid argument for \'%s\': %s\n",
1356 					CMD_LINE_OPT_VECTOR_SIZE, optarg);
1357 				print_usage(prgname);
1358 				return -1;
1359 			}
1360 			em_conf = eh_conf->mode_params;
1361 			em_conf->ext_params.vector_size = ret;
1362 			break;
1363 		case CMD_LINE_OPT_VECTOR_TIMEOUT_NUM:
1364 			ret = parse_decimal(optarg);
1365 
1366 			em_conf = eh_conf->mode_params;
1367 			em_conf->vector_tmo_ns = ret;
1368 			break;
1369 		case CMD_LINE_OPT_VECTOR_POOL_SZ_NUM:
1370 			ret = parse_decimal(optarg);
1371 
1372 			em_conf = eh_conf->mode_params;
1373 			em_conf->vector_pool_sz = ret;
1374 			break;
1375 		case CMD_LINE_OPT_PER_PORT_POOL_NUM:
1376 			per_port_pool = 1;
1377 			break;
1378 		case CMD_LINE_OPT_QP_DESC_NB_NUM:
1379 			qp_desc_nb = parse_decimal(optarg);
1380 			break;
1381 		default:
1382 			print_usage(prgname);
1383 			return -1;
1384 		}
1385 	}
1386 
1387 	if (f_present == 0) {
1388 		printf("Mandatory option \"-f\" not present\n");
1389 		return -1;
1390 	}
1391 
1392 	/* check do we need to enable multi-seg support */
1393 	if (multi_seg_required()) {
1394 		/* legacy mode doesn't support multi-seg */
1395 		app_sa_prm.enable = 1;
1396 		printf("frame buf size: %u, mtu: %u, "
1397 			"number of reassemble entries: %u\n"
1398 			"multi-segment support is required\n",
1399 			frame_buf_size, mtu_size, frag_tbl_sz);
1400 	}
1401 
1402 	print_app_sa_prm(&app_sa_prm);
1403 
1404 	if (optind >= 0)
1405 		argv[optind-1] = prgname;
1406 
1407 	ret = optind-1;
1408 	optind = 1; /* reset getopt lib */
1409 	return ret;
1410 }
1411 
1412 static void
1413 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
1414 {
1415 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
1416 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
1417 	printf("%s%s", name, buf);
1418 }
1419 
1420 /*
1421  * Update destination ethaddr for the port.
1422  */
1423 int
1424 add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr)
1425 {
1426 	if (port >= RTE_DIM(ethaddr_tbl))
1427 		return -EINVAL;
1428 
1429 	rte_ether_addr_copy(addr, &ethaddr_tbl[port].dst);
1430 	rte_ether_addr_copy(addr, (struct rte_ether_addr *)(val_eth + port));
1431 	return 0;
1432 }
1433 
1434 /* Check the link status of all ports in up to 9s, and print them finally */
1435 static void
1436 check_all_ports_link_status(uint32_t port_mask)
1437 {
1438 #define CHECK_INTERVAL 100 /* 100ms */
1439 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1440 	uint16_t portid;
1441 	uint8_t count, all_ports_up, print_flag = 0;
1442 	struct rte_eth_link link;
1443 	int ret;
1444 	char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
1445 
1446 	printf("\nChecking link status");
1447 	fflush(stdout);
1448 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1449 		all_ports_up = 1;
1450 		RTE_ETH_FOREACH_DEV(portid) {
1451 			if ((port_mask & (1 << portid)) == 0)
1452 				continue;
1453 			memset(&link, 0, sizeof(link));
1454 			ret = rte_eth_link_get_nowait(portid, &link);
1455 			if (ret < 0) {
1456 				all_ports_up = 0;
1457 				if (print_flag == 1)
1458 					printf("Port %u link get failed: %s\n",
1459 						portid, rte_strerror(-ret));
1460 				continue;
1461 			}
1462 			/* print link status if flag set */
1463 			if (print_flag == 1) {
1464 				rte_eth_link_to_str(link_status_text,
1465 					sizeof(link_status_text), &link);
1466 				printf("Port %d %s\n", portid,
1467 				       link_status_text);
1468 				continue;
1469 			}
1470 			/* clear all_ports_up flag if any link down */
1471 			if (link.link_status == RTE_ETH_LINK_DOWN) {
1472 				all_ports_up = 0;
1473 				break;
1474 			}
1475 		}
1476 		/* after finally printing all link status, get out */
1477 		if (print_flag == 1)
1478 			break;
1479 
1480 		if (all_ports_up == 0) {
1481 			printf(".");
1482 			fflush(stdout);
1483 			rte_delay_ms(CHECK_INTERVAL);
1484 		}
1485 
1486 		/* set the print_flag if all ports up or timeout */
1487 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1488 			print_flag = 1;
1489 			printf("done\n");
1490 		}
1491 	}
1492 }
1493 
1494 static int32_t
1495 add_mapping(const char *str, uint16_t cdev_id,
1496 		uint16_t qp, struct lcore_params *params,
1497 		struct ipsec_ctx *ipsec_ctx,
1498 		const struct rte_cryptodev_capabilities *cipher,
1499 		const struct rte_cryptodev_capabilities *auth,
1500 		const struct rte_cryptodev_capabilities *aead)
1501 {
1502 	int32_t ret = 0;
1503 	unsigned long i;
1504 	struct cdev_key key = { 0 };
1505 
1506 	key.lcore_id = params->lcore_id;
1507 	if (cipher)
1508 		key.cipher_algo = cipher->sym.cipher.algo;
1509 	if (auth)
1510 		key.auth_algo = auth->sym.auth.algo;
1511 	if (aead)
1512 		key.aead_algo = aead->sym.aead.algo;
1513 
1514 	ret = rte_hash_lookup(ipsec_ctx->cdev_map, &key);
1515 	if (ret != -ENOENT)
1516 		return 0;
1517 
1518 	for (i = 0; i < ipsec_ctx->nb_qps; i++)
1519 		if (ipsec_ctx->tbl[i].id == cdev_id)
1520 			break;
1521 
1522 	if (i == ipsec_ctx->nb_qps) {
1523 		if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1524 			printf("Maximum number of crypto devices assigned to "
1525 				"a core, increase MAX_QP_PER_LCORE value\n");
1526 			return 0;
1527 		}
1528 		ipsec_ctx->tbl[i].id = cdev_id;
1529 		ipsec_ctx->tbl[i].qp = qp;
1530 		ipsec_ctx->nb_qps++;
1531 		printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1532 				"(cdev_id_qp %lu)\n", str, key.lcore_id,
1533 				cdev_id, qp, i);
1534 	}
1535 
1536 	ret = rte_hash_add_key_data(ipsec_ctx->cdev_map, &key, (void *)i);
1537 	if (ret < 0) {
1538 		printf("Failed to insert cdev mapping for (lcore %u, "
1539 				"cdev %u, qp %u), errno %d\n",
1540 				key.lcore_id, ipsec_ctx->tbl[i].id,
1541 				ipsec_ctx->tbl[i].qp, ret);
1542 		return 0;
1543 	}
1544 
1545 	return 1;
1546 }
1547 
1548 static int32_t
1549 add_cdev_mapping(const struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1550 		uint16_t qp, struct lcore_params *params)
1551 {
1552 	int32_t ret = 0;
1553 	const struct rte_cryptodev_capabilities *i, *j;
1554 	struct lcore_conf *qconf;
1555 	struct ipsec_ctx *ipsec_ctx;
1556 	const char *str;
1557 	void *sec_ctx;
1558 	const struct rte_security_capability *sec_cap;
1559 
1560 	qconf = &lcore_conf[params->lcore_id];
1561 
1562 	if (!is_unprotected_port(params->port_id)) {
1563 		ipsec_ctx = &qconf->outbound;
1564 		ipsec_ctx->cdev_map = cdev_map_out;
1565 		str = "Outbound";
1566 	} else {
1567 		ipsec_ctx = &qconf->inbound;
1568 		ipsec_ctx->cdev_map = cdev_map_in;
1569 		str = "Inbound";
1570 	}
1571 
1572 	/* Required cryptodevs with operation chaining */
1573 	if (!(dev_info->feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING) &&
1574 			!(dev_info->feature_flags & RTE_CRYPTODEV_FF_SECURITY))
1575 		return ret;
1576 
1577 	for (i = dev_info->capabilities;
1578 			i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1579 		if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1580 			continue;
1581 
1582 		if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1583 			ret |= add_mapping(str, cdev_id, qp, params,
1584 					ipsec_ctx, NULL, NULL, i);
1585 			continue;
1586 		}
1587 
1588 		if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1589 			continue;
1590 
1591 		for (j = dev_info->capabilities;
1592 				j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1593 			if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1594 				continue;
1595 
1596 			if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1597 				continue;
1598 
1599 			ret |= add_mapping(str, cdev_id, qp, params,
1600 						ipsec_ctx, i, j, NULL);
1601 		}
1602 	}
1603 
1604 	sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
1605 	if (sec_ctx == NULL)
1606 		return ret;
1607 
1608 	sec_cap = rte_security_capabilities_get(sec_ctx);
1609 	if (sec_cap == NULL)
1610 		return ret;
1611 
1612 	for (i = sec_cap->crypto_capabilities;
1613 			i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1614 		if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1615 			continue;
1616 
1617 		if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1618 			ret |= add_mapping(str, cdev_id, qp, params,
1619 					ipsec_ctx, NULL, NULL, i);
1620 			continue;
1621 		}
1622 
1623 		if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1624 			continue;
1625 
1626 		for (j = sec_cap->crypto_capabilities;
1627 				j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1628 			if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1629 				continue;
1630 
1631 			if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1632 				continue;
1633 
1634 			ret |= add_mapping(str, cdev_id, qp, params,
1635 						ipsec_ctx, i, j, NULL);
1636 		}
1637 	}
1638 
1639 	return ret;
1640 }
1641 
1642 static uint16_t
1643 map_cdev_to_cores_from_config(enum eh_pkt_transfer_mode mode, int16_t cdev_id,
1644 		const struct rte_cryptodev_info *cdev_info,
1645 		uint16_t *last_used_lcore_id)
1646 {
1647 	uint16_t nb_qp = 0, i = 0, max_nb_qps;
1648 
1649 	/* For event lookaside mode all sessions are bound to single qp.
1650 	 * It's enough to bind one core, since all cores will share same qp
1651 	 * Event inline mode do not use this functionality.
1652 	 */
1653 	if (mode == EH_PKT_TRANSFER_MODE_EVENT) {
1654 		add_cdev_mapping(cdev_info, cdev_id, nb_qp, &lcore_params[0]);
1655 		return 1;
1656 	}
1657 
1658 	/* Check if there are enough queue pairs for all configured cores */
1659 	max_nb_qps = RTE_MIN(nb_lcore_params, cdev_info->max_nb_queue_pairs);
1660 
1661 	while (nb_qp < max_nb_qps && i < nb_lcore_params) {
1662 		if (add_cdev_mapping(cdev_info, cdev_id, nb_qp,
1663 					&lcore_params[*last_used_lcore_id]))
1664 			nb_qp++;
1665 		(*last_used_lcore_id)++;
1666 		*last_used_lcore_id %= nb_lcore_params;
1667 		i++;
1668 	}
1669 
1670 	return nb_qp;
1671 }
1672 
1673 /* Check if the device is enabled by cryptodev_mask */
1674 static int
1675 check_cryptodev_mask(uint8_t cdev_id)
1676 {
1677 	if (enabled_cryptodev_mask & (1 << cdev_id))
1678 		return 0;
1679 
1680 	return -1;
1681 }
1682 
1683 static uint16_t
1684 cryptodevs_init(enum eh_pkt_transfer_mode mode)
1685 {
1686 	struct rte_hash_parameters params = { 0 };
1687 	struct rte_cryptodev_config dev_conf;
1688 	struct rte_cryptodev_qp_conf qp_conf;
1689 	uint16_t idx, qp, total_nb_qps;
1690 	int16_t cdev_id;
1691 
1692 	const uint64_t mseg_flag = multi_seg_required() ?
1693 				RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0;
1694 
1695 	params.entries = CDEV_MAP_ENTRIES;
1696 	params.key_len = sizeof(struct cdev_key);
1697 	params.hash_func = rte_jhash;
1698 	params.hash_func_init_val = 0;
1699 	params.socket_id = rte_socket_id();
1700 
1701 	params.name = "cdev_map_in";
1702 	cdev_map_in = rte_hash_create(&params);
1703 	if (cdev_map_in == NULL)
1704 		rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1705 				rte_errno);
1706 
1707 	params.name = "cdev_map_out";
1708 	cdev_map_out = rte_hash_create(&params);
1709 	if (cdev_map_out == NULL)
1710 		rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1711 				rte_errno);
1712 
1713 	printf("lcore/cryptodev/qp mappings:\n");
1714 
1715 	idx = 0;
1716 	total_nb_qps = 0;
1717 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1718 		struct rte_cryptodev_info cdev_info;
1719 
1720 		if (check_cryptodev_mask((uint8_t)cdev_id))
1721 			continue;
1722 
1723 		rte_cryptodev_info_get(cdev_id, &cdev_info);
1724 
1725 		if ((mseg_flag & cdev_info.feature_flags) != mseg_flag)
1726 			rte_exit(EXIT_FAILURE,
1727 				"Device %hd does not support \'%s\' feature\n",
1728 				cdev_id,
1729 				rte_cryptodev_get_feature_name(mseg_flag));
1730 
1731 
1732 		qp = map_cdev_to_cores_from_config(mode, cdev_id, &cdev_info, &idx);
1733 		if (qp == 0)
1734 			continue;
1735 
1736 		total_nb_qps += qp;
1737 		dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1738 		/* Use the first socket if SOCKET_ID_ANY is returned. */
1739 		if (dev_conf.socket_id == SOCKET_ID_ANY)
1740 			dev_conf.socket_id = 0;
1741 		dev_conf.nb_queue_pairs = qp;
1742 		dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
1743 
1744 		uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
1745 		if (dev_max_sess != 0 &&
1746 				dev_max_sess < get_nb_crypto_sessions())
1747 			rte_exit(EXIT_FAILURE,
1748 				"Device does not support at least %u "
1749 				"sessions", get_nb_crypto_sessions());
1750 
1751 		if (rte_cryptodev_configure(cdev_id, &dev_conf))
1752 			rte_panic("Failed to initialize cryptodev %u\n",
1753 					cdev_id);
1754 
1755 		qp_conf.nb_descriptors = qp_desc_nb;
1756 		qp_conf.mp_session =
1757 			socket_ctx[dev_conf.socket_id].session_pool;
1758 		for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1759 			if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1760 					&qp_conf, dev_conf.socket_id))
1761 				rte_panic("Failed to setup queue %u for "
1762 						"cdev_id %u\n",	0, cdev_id);
1763 
1764 		if (rte_cryptodev_start(cdev_id))
1765 			rte_panic("Failed to start cryptodev %u\n",
1766 					cdev_id);
1767 	}
1768 
1769 	printf("\n");
1770 
1771 	return total_nb_qps;
1772 }
1773 
1774 static int
1775 check_ptype(int portid)
1776 {
1777 	int l3_ipv4 = 0, l3_ipv6 = 0, l4_udp = 0, tunnel_esp = 0;
1778 	int i, nb_ptypes;
1779 	uint32_t mask;
1780 
1781 	mask = (RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
1782 		      RTE_PTYPE_TUNNEL_MASK);
1783 
1784 	nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, NULL, 0);
1785 	if (nb_ptypes <= 0)
1786 		return 0;
1787 
1788 	uint32_t ptypes[nb_ptypes];
1789 
1790 	nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, ptypes, nb_ptypes);
1791 	for (i = 0; i < nb_ptypes; ++i) {
1792 		if (RTE_ETH_IS_IPV4_HDR(ptypes[i]))
1793 			l3_ipv4 = 1;
1794 		if (RTE_ETH_IS_IPV6_HDR(ptypes[i]))
1795 			l3_ipv6 = 1;
1796 		if ((ptypes[i] & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
1797 			tunnel_esp = 1;
1798 		if ((ptypes[i] & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
1799 			l4_udp = 1;
1800 	}
1801 
1802 	if (l3_ipv4 == 0)
1803 		printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
1804 
1805 	if (l3_ipv6 == 0)
1806 		printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
1807 
1808 	if (l4_udp == 0)
1809 		printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid);
1810 
1811 	if (tunnel_esp == 0)
1812 		printf("port %d cannot parse RTE_PTYPE_TUNNEL_ESP\n", portid);
1813 
1814 	if (l3_ipv4 && l3_ipv6 && l4_udp && tunnel_esp)
1815 		return 1;
1816 
1817 	return 0;
1818 
1819 }
1820 
1821 static inline void
1822 parse_ptype(struct rte_mbuf *m)
1823 {
1824 	uint32_t packet_type = RTE_PTYPE_UNKNOWN;
1825 	const struct rte_ipv4_hdr *iph4;
1826 	const struct rte_ipv6_hdr *iph6;
1827 	const struct rte_ether_hdr *eth;
1828 	const struct rte_udp_hdr *udp;
1829 	uint16_t nat_port, ether_type;
1830 	int next_proto = 0;
1831 	size_t ext_len = 0;
1832 	const uint8_t *p;
1833 	uint32_t l3len;
1834 
1835 	eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1836 	ether_type = eth->ether_type;
1837 
1838 	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
1839 		iph4 = (const struct rte_ipv4_hdr *)(eth + 1);
1840 		l3len = ((iph4->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
1841 			       RTE_IPV4_IHL_MULTIPLIER);
1842 
1843 		if (l3len == sizeof(struct rte_ipv4_hdr))
1844 			packet_type |= RTE_PTYPE_L3_IPV4;
1845 		else
1846 			packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
1847 
1848 		next_proto = iph4->next_proto_id;
1849 		p = (const uint8_t *)iph4;
1850 	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
1851 		iph6 = (const struct rte_ipv6_hdr *)(eth + 1);
1852 		l3len = sizeof(struct ip6_hdr);
1853 
1854 		/* determine l3 header size up to ESP extension */
1855 		next_proto = iph6->proto;
1856 		p = (const uint8_t *)iph6;
1857 		while (next_proto != IPPROTO_ESP && l3len < m->data_len &&
1858 			(next_proto = rte_ipv6_get_next_ext(p + l3len,
1859 						next_proto, &ext_len)) >= 0)
1860 			l3len += ext_len;
1861 
1862 		/* Skip IPv6 header exceeds first segment length */
1863 		if (unlikely(l3len + RTE_ETHER_HDR_LEN > m->data_len))
1864 			goto exit;
1865 
1866 		if (l3len == sizeof(struct ip6_hdr))
1867 			packet_type |= RTE_PTYPE_L3_IPV6;
1868 		else
1869 			packet_type |= RTE_PTYPE_L3_IPV6_EXT;
1870 	}
1871 
1872 	switch (next_proto) {
1873 	case IPPROTO_ESP:
1874 		packet_type |= RTE_PTYPE_TUNNEL_ESP;
1875 		break;
1876 	case IPPROTO_UDP:
1877 		if (app_sa_prm.udp_encap == 1) {
1878 			udp = (const struct rte_udp_hdr *)(p + l3len);
1879 			nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
1880 			if (udp->src_port == nat_port ||
1881 			    udp->dst_port == nat_port)
1882 				packet_type |=
1883 					MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
1884 		}
1885 		break;
1886 	default:
1887 		break;
1888 	}
1889 exit:
1890 	m->packet_type |= packet_type;
1891 }
1892 
1893 static uint16_t
1894 parse_ptype_cb(uint16_t port __rte_unused, uint16_t queue __rte_unused,
1895 	       struct rte_mbuf *pkts[], uint16_t nb_pkts,
1896 	       uint16_t max_pkts __rte_unused,
1897 	       void *user_param __rte_unused)
1898 {
1899 	uint32_t i;
1900 
1901 	if (unlikely(nb_pkts == 0))
1902 		return nb_pkts;
1903 
1904 	rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *));
1905 	for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) {
1906 		rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1],
1907 			struct ether_hdr *));
1908 		parse_ptype(pkts[i]);
1909 	}
1910 	parse_ptype(pkts[i]);
1911 
1912 	return nb_pkts;
1913 }
1914 
1915 static void
1916 port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads,
1917 	  uint8_t hw_reassembly)
1918 {
1919 	struct rte_eth_dev_info dev_info;
1920 	struct rte_eth_txconf *txconf;
1921 	uint16_t nb_tx_queue, nb_rx_queue;
1922 	uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1923 	int32_t ret, socket_id;
1924 	struct lcore_conf *qconf;
1925 	struct rte_ether_addr ethaddr;
1926 	struct rte_eth_conf local_port_conf = port_conf;
1927 	struct rte_eth_ip_reassembly_params reass_capa = {0};
1928 	int ptype_supported;
1929 
1930 	ret = rte_eth_dev_info_get(portid, &dev_info);
1931 	if (ret != 0)
1932 		rte_exit(EXIT_FAILURE,
1933 			"Error during getting device (port %u) info: %s\n",
1934 			portid, strerror(-ret));
1935 
1936 	/* limit allowed HW offloads, as user requested */
1937 	dev_info.rx_offload_capa &= dev_rx_offload;
1938 	dev_info.tx_offload_capa &= dev_tx_offload;
1939 
1940 	printf("Configuring device port %u:\n", portid);
1941 
1942 	ret = rte_eth_macaddr_get(portid, &ethaddr);
1943 	if (ret != 0)
1944 		rte_exit(EXIT_FAILURE,
1945 			"Error getting MAC address (port %u): %s\n",
1946 			portid, rte_strerror(-ret));
1947 
1948 	rte_ether_addr_copy(&ethaddr, &ethaddr_tbl[portid].src);
1949 
1950 	rte_ether_addr_copy(&ethaddr_tbl[portid].dst,
1951 			    (struct rte_ether_addr *)(val_eth + portid));
1952 
1953 	rte_ether_addr_copy(&ethaddr_tbl[portid].src,
1954 			    (struct rte_ether_addr *)(val_eth + portid) + 1);
1955 
1956 	print_ethaddr("Address: ", &ethaddr);
1957 	printf("\n");
1958 
1959 	nb_rx_queue = get_port_nb_rx_queues(portid);
1960 	nb_tx_queue = nb_lcores;
1961 
1962 	if (nb_rx_queue > dev_info.max_rx_queues)
1963 		rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1964 				"(max rx queue is %u)\n",
1965 				nb_rx_queue, dev_info.max_rx_queues);
1966 
1967 	if (nb_tx_queue > dev_info.max_tx_queues)
1968 		rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1969 				"(max tx queue is %u)\n",
1970 				nb_tx_queue, dev_info.max_tx_queues);
1971 
1972 	printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1973 			nb_rx_queue, nb_tx_queue);
1974 
1975 	local_port_conf.rxmode.mtu = mtu_size;
1976 
1977 	if (multi_seg_required()) {
1978 		local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
1979 		local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1980 	}
1981 
1982 	local_port_conf.rxmode.offloads |= req_rx_offloads;
1983 	local_port_conf.txmode.offloads |= req_tx_offloads;
1984 
1985 	/* Check that all required capabilities are supported */
1986 	if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1987 			local_port_conf.rxmode.offloads)
1988 		rte_exit(EXIT_FAILURE,
1989 			"Error: port %u required RX offloads: 0x%" PRIx64
1990 			", available RX offloads: 0x%" PRIx64 "\n",
1991 			portid, local_port_conf.rxmode.offloads,
1992 			dev_info.rx_offload_capa);
1993 
1994 	if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) !=
1995 			local_port_conf.txmode.offloads)
1996 		rte_exit(EXIT_FAILURE,
1997 			"Error: port %u required TX offloads: 0x%" PRIx64
1998 			", available TX offloads: 0x%" PRIx64 "\n",
1999 			portid, local_port_conf.txmode.offloads,
2000 			dev_info.tx_offload_capa);
2001 
2002 	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
2003 		local_port_conf.txmode.offloads |=
2004 			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
2005 
2006 	printf("port %u configuring rx_offloads=0x%" PRIx64
2007 		", tx_offloads=0x%" PRIx64 "\n",
2008 		portid, local_port_conf.rxmode.offloads,
2009 		local_port_conf.txmode.offloads);
2010 
2011 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
2012 		dev_info.flow_type_rss_offloads;
2013 	if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
2014 			port_conf.rx_adv_conf.rss_conf.rss_hf) {
2015 		printf("Port %u modified RSS hash function based on hardware support,"
2016 			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
2017 			portid,
2018 			port_conf.rx_adv_conf.rss_conf.rss_hf,
2019 			local_port_conf.rx_adv_conf.rss_conf.rss_hf);
2020 	}
2021 
2022 	ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
2023 			&local_port_conf);
2024 	if (ret < 0)
2025 		rte_exit(EXIT_FAILURE, "Cannot configure device: "
2026 				"err=%d, port=%d\n", ret, portid);
2027 
2028 	ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
2029 	if (ret < 0)
2030 		rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
2031 				"err=%d, port=%d\n", ret, portid);
2032 
2033 	/* Check if required ptypes are supported */
2034 	ptype_supported = check_ptype(portid);
2035 	if (!ptype_supported)
2036 		printf("Port %d: softly parse packet type info\n", portid);
2037 
2038 	/* init one TX queue per lcore */
2039 	tx_queueid = 0;
2040 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2041 		if (rte_lcore_is_enabled(lcore_id) == 0)
2042 			continue;
2043 
2044 		if (numa_on)
2045 			socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2046 		else
2047 			socket_id = 0;
2048 
2049 		/* init TX queue */
2050 		printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
2051 
2052 		txconf = &dev_info.default_txconf;
2053 		txconf->offloads = local_port_conf.txmode.offloads;
2054 
2055 		ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
2056 				socket_id, txconf);
2057 		if (ret < 0)
2058 			rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
2059 					"err=%d, port=%d\n", ret, portid);
2060 
2061 		qconf = &lcore_conf[lcore_id];
2062 		qconf->tx_queue_id[portid] = tx_queueid;
2063 
2064 		tx_queueid++;
2065 
2066 		/* init RX queues */
2067 		for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
2068 			struct rte_eth_rxconf rxq_conf;
2069 			struct rte_mempool *pool;
2070 
2071 			if (portid != qconf->rx_queue_list[queue].port_id)
2072 				continue;
2073 
2074 			rx_queueid = qconf->rx_queue_list[queue].queue_id;
2075 
2076 			printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
2077 					socket_id);
2078 
2079 			rxq_conf = dev_info.default_rxconf;
2080 			rxq_conf.offloads = local_port_conf.rxmode.offloads;
2081 
2082 			if (per_port_pool)
2083 				pool = socket_ctx[socket_id].mbuf_pool[portid];
2084 			else
2085 				pool = socket_ctx[socket_id].mbuf_pool[0];
2086 
2087 			ret = rte_eth_rx_queue_setup(portid, rx_queueid,
2088 					nb_rxd,	socket_id, &rxq_conf, pool);
2089 			if (ret < 0)
2090 				rte_exit(EXIT_FAILURE,
2091 					"rte_eth_rx_queue_setup: err=%d, "
2092 					"port=%d\n", ret, portid);
2093 
2094 			/* Register Rx callback if ptypes are not supported */
2095 			if (!ptype_supported &&
2096 			    !rte_eth_add_rx_callback(portid, queue,
2097 						     parse_ptype_cb, NULL)) {
2098 				printf("Failed to add rx callback: port=%d, "
2099 				       "queue=%d\n", portid, queue);
2100 			}
2101 
2102 
2103 		}
2104 	}
2105 
2106 	if (hw_reassembly) {
2107 		rte_eth_ip_reassembly_capability_get(portid, &reass_capa);
2108 		reass_capa.timeout_ms = frag_ttl_ns;
2109 		rte_eth_ip_reassembly_conf_set(portid, &reass_capa);
2110 	}
2111 	printf("\n");
2112 }
2113 
2114 static size_t
2115 max_session_size(void)
2116 {
2117 	size_t max_sz, sz;
2118 	void *sec_ctx;
2119 	int16_t cdev_id, port_id, n;
2120 
2121 	max_sz = 0;
2122 	n =  rte_cryptodev_count();
2123 	for (cdev_id = 0; cdev_id != n; cdev_id++) {
2124 		sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
2125 		if (sz > max_sz)
2126 			max_sz = sz;
2127 		/*
2128 		 * If crypto device is security capable, need to check the
2129 		 * size of security session as well.
2130 		 */
2131 
2132 		/* Get security context of the crypto device */
2133 		sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
2134 		if (sec_ctx == NULL)
2135 			continue;
2136 
2137 		/* Get size of security session */
2138 		sz = rte_security_session_get_size(sec_ctx);
2139 		if (sz > max_sz)
2140 			max_sz = sz;
2141 	}
2142 
2143 	RTE_ETH_FOREACH_DEV(port_id) {
2144 		if ((enabled_port_mask & (1 << port_id)) == 0)
2145 			continue;
2146 
2147 		sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
2148 		if (sec_ctx == NULL)
2149 			continue;
2150 
2151 		sz = rte_security_session_get_size(sec_ctx);
2152 		if (sz > max_sz)
2153 			max_sz = sz;
2154 	}
2155 
2156 	return max_sz;
2157 }
2158 
2159 static void
2160 session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
2161 {
2162 	char mp_name[RTE_MEMPOOL_NAMESIZE];
2163 	struct rte_mempool *sess_mp;
2164 	uint32_t nb_sess;
2165 
2166 	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2167 			"sess_mp_%u", socket_id);
2168 	nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
2169 		rte_lcore_count());
2170 	nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
2171 			CDEV_MP_CACHE_MULTIPLIER);
2172 	sess_mp = rte_cryptodev_sym_session_pool_create(
2173 			mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ,
2174 			0, socket_id);
2175 	ctx->session_pool = sess_mp;
2176 
2177 	if (ctx->session_pool == NULL)
2178 		rte_exit(EXIT_FAILURE,
2179 			"Cannot init session pool on socket %d\n", socket_id);
2180 	else
2181 		printf("Allocated session pool on socket %d\n",	socket_id);
2182 }
2183 
2184 static void
2185 pool_init(struct socket_ctx *ctx, int32_t socket_id, int portid,
2186 	  uint32_t nb_mbuf)
2187 {
2188 	char s[64];
2189 	int32_t ms;
2190 
2191 
2192 	/* mbuf_pool is initialised by the pool_init() function*/
2193 	if (socket_ctx[socket_id].mbuf_pool[portid])
2194 		return;
2195 
2196 	snprintf(s, sizeof(s), "mbuf_pool_%d_%d", socket_id, portid);
2197 	ctx->mbuf_pool[portid] = rte_pktmbuf_pool_create(s, nb_mbuf,
2198 							 MEMPOOL_CACHE_SIZE,
2199 							 ipsec_metadata_size(),
2200 							 frame_buf_size,
2201 							 socket_id);
2202 
2203 	/*
2204 	 * if multi-segment support is enabled, then create a pool
2205 	 * for indirect mbufs. This is not per-port but global.
2206 	 */
2207 	ms = multi_seg_required();
2208 	if (ms != 0 && !ctx->mbuf_pool_indir) {
2209 		snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
2210 		ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
2211 			MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
2212 	}
2213 
2214 	if (ctx->mbuf_pool[portid] == NULL ||
2215 	    (ms != 0 && ctx->mbuf_pool_indir == NULL))
2216 		rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
2217 				socket_id);
2218 	else
2219 		printf("Allocated mbuf pool on socket %d\n", socket_id);
2220 }
2221 
2222 static int
2223 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2224 		 void *param, void *ret_param)
2225 {
2226 	uint64_t md;
2227 	struct rte_eth_event_ipsec_desc *event_desc = NULL;
2228 
2229 	RTE_SET_USED(param);
2230 	RTE_SET_USED(port_id);
2231 
2232 	if (type != RTE_ETH_EVENT_IPSEC)
2233 		return -1;
2234 
2235 	event_desc = ret_param;
2236 	if (event_desc == NULL) {
2237 		printf("Event descriptor not set\n");
2238 		return -1;
2239 	}
2240 
2241 	md = event_desc->metadata;
2242 
2243 	if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW) {
2244 		if (md == 0)
2245 			return -1;
2246 	}
2247 	else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
2248 		printf("Invalid IPsec event reported\n");
2249 		return -1;
2250 	}
2251 
2252 	return -1;
2253 }
2254 
2255 static int
2256 ethdev_reset_event_callback(uint16_t port_id,
2257 		enum rte_eth_event_type type,
2258 		 void *param __rte_unused, void *ret_param __rte_unused)
2259 {
2260 	printf("Reset Event on port id %d type %d\n", port_id, type);
2261 	printf("Force quit application");
2262 	force_quit = true;
2263 	return 0;
2264 }
2265 
2266 static uint16_t
2267 rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
2268 	struct rte_mbuf *pkt[], uint16_t nb_pkts,
2269 	__rte_unused uint16_t max_pkts, void *user_param)
2270 {
2271 	uint64_t tm;
2272 	uint32_t i, k;
2273 	struct lcore_conf *lc;
2274 	struct rte_mbuf *mb;
2275 	struct rte_ether_hdr *eth;
2276 
2277 	lc = user_param;
2278 	k = 0;
2279 	tm = 0;
2280 
2281 	for (i = 0; i != nb_pkts; i++) {
2282 
2283 		mb = pkt[i];
2284 		eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
2285 		if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
2286 
2287 			struct rte_ipv4_hdr *iph;
2288 
2289 			iph = (struct rte_ipv4_hdr *)(eth + 1);
2290 			if (rte_ipv4_frag_pkt_is_fragmented(iph)) {
2291 
2292 				mb->l2_len = sizeof(*eth);
2293 				mb->l3_len = sizeof(*iph);
2294 				tm = (tm != 0) ? tm : rte_rdtsc();
2295 				mb = rte_ipv4_frag_reassemble_packet(
2296 					lc->frag.tbl, &lc->frag.dr,
2297 					mb, tm, iph);
2298 
2299 				if (mb != NULL) {
2300 					/* fix ip cksum after reassemble. */
2301 					iph = rte_pktmbuf_mtod_offset(mb,
2302 						struct rte_ipv4_hdr *,
2303 						mb->l2_len);
2304 					iph->hdr_checksum = 0;
2305 					iph->hdr_checksum = rte_ipv4_cksum(iph);
2306 				}
2307 			}
2308 		} else if (eth->ether_type ==
2309 				rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
2310 
2311 			struct rte_ipv6_hdr *iph;
2312 			struct rte_ipv6_fragment_ext *fh;
2313 
2314 			iph = (struct rte_ipv6_hdr *)(eth + 1);
2315 			fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
2316 			if (fh != NULL) {
2317 				mb->l2_len = sizeof(*eth);
2318 				mb->l3_len = (uintptr_t)fh - (uintptr_t)iph +
2319 					sizeof(*fh);
2320 				tm = (tm != 0) ? tm : rte_rdtsc();
2321 				mb = rte_ipv6_frag_reassemble_packet(
2322 					lc->frag.tbl, &lc->frag.dr,
2323 					mb, tm, iph, fh);
2324 				if (mb != NULL)
2325 					/* fix l3_len after reassemble. */
2326 					mb->l3_len = mb->l3_len - sizeof(*fh);
2327 			}
2328 		}
2329 
2330 		pkt[k] = mb;
2331 		k += (mb != NULL);
2332 	}
2333 
2334 	/* some fragments were encountered, drain death row */
2335 	if (tm != 0)
2336 		rte_ip_frag_free_death_row(&lc->frag.dr, 0);
2337 
2338 	return k;
2339 }
2340 
2341 
2342 static int
2343 reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid)
2344 {
2345 	int32_t sid;
2346 	uint32_t i;
2347 	uint64_t frag_cycles;
2348 	const struct lcore_rx_queue *rxq;
2349 	const struct rte_eth_rxtx_callback *cb;
2350 
2351 	/* create fragment table */
2352 	sid = rte_lcore_to_socket_id(cid);
2353 	frag_cycles = (rte_get_tsc_hz() + NS_PER_S - 1) /
2354 		NS_PER_S * frag_ttl_ns;
2355 
2356 	lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz,
2357 		FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid);
2358 	if (lc->frag.tbl == NULL) {
2359 		printf("%s(%u): failed to create fragment table of size: %u, "
2360 			"error code: %d\n",
2361 			__func__, cid, frag_tbl_sz, rte_errno);
2362 		return -ENOMEM;
2363 	}
2364 
2365 	/* setup reassemble RX callbacks for all queues */
2366 	for (i = 0; i != lc->nb_rx_queue; i++) {
2367 
2368 		rxq = lc->rx_queue_list + i;
2369 		cb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id,
2370 			rx_callback, lc);
2371 		if (cb == NULL) {
2372 			printf("%s(%u): failed to install RX callback for "
2373 				"portid=%u, queueid=%u, error code: %d\n",
2374 				__func__, cid,
2375 				rxq->port_id, rxq->queue_id, rte_errno);
2376 			return -ENOMEM;
2377 		}
2378 	}
2379 
2380 	return 0;
2381 }
2382 
2383 static int
2384 reassemble_init(void)
2385 {
2386 	int32_t rc;
2387 	uint32_t i, lc;
2388 
2389 	rc = 0;
2390 	for (i = 0; i != nb_lcore_params; i++) {
2391 		lc = lcore_params[i].lcore_id;
2392 		rc = reassemble_lcore_init(lcore_conf + lc, lc);
2393 		if (rc != 0)
2394 			break;
2395 	}
2396 
2397 	return rc;
2398 }
2399 
2400 static void
2401 create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
2402 {
2403 	struct rte_flow_action action[2];
2404 	struct rte_flow_item pattern[2];
2405 	struct rte_flow_attr attr = {0};
2406 	struct rte_flow_error err;
2407 	struct rte_flow *flow;
2408 	int ret;
2409 
2410 	if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
2411 		return;
2412 
2413 	/* Add the default rte_flow to enable SECURITY for all ESP packets */
2414 
2415 	pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
2416 	pattern[0].spec = NULL;
2417 	pattern[0].mask = NULL;
2418 	pattern[0].last = NULL;
2419 	pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
2420 
2421 	action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
2422 	action[0].conf = NULL;
2423 	action[1].type = RTE_FLOW_ACTION_TYPE_END;
2424 	action[1].conf = NULL;
2425 
2426 	attr.ingress = 1;
2427 
2428 	ret = rte_flow_validate(port_id, &attr, pattern, action, &err);
2429 	if (ret)
2430 		return;
2431 
2432 	flow = rte_flow_create(port_id, &attr, pattern, action, &err);
2433 	if (flow == NULL)
2434 		return;
2435 
2436 	flow_info_tbl[port_id].rx_def_flow = flow;
2437 	RTE_LOG(INFO, IPSEC,
2438 		"Created default flow enabling SECURITY for all ESP traffic on port %d\n",
2439 		port_id);
2440 }
2441 
2442 static void
2443 signal_handler(int signum)
2444 {
2445 	if (signum == SIGINT || signum == SIGTERM) {
2446 		printf("\n\nSignal %d received, preparing to exit...\n",
2447 				signum);
2448 		force_quit = true;
2449 	}
2450 }
2451 
2452 static void
2453 ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa,
2454 		struct eventmode_conf *em_conf)
2455 {
2456 	struct rte_ipsec_session *ips;
2457 	int32_t i;
2458 
2459 	if (!sa || !nb_sa)
2460 		return;
2461 
2462 	for (i = 0; i < nb_sa; i++) {
2463 		ips = ipsec_get_primary_session(&sa[i]);
2464 		if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
2465 			em_conf->enable_event_crypto_adapter = true;
2466 		else if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
2467 			rte_exit(EXIT_FAILURE, "Event mode supports inline "
2468 				 "and lookaside protocol sessions\n");
2469 	}
2470 
2471 }
2472 
2473 static int32_t
2474 check_event_mode_params(struct eh_conf *eh_conf)
2475 {
2476 	struct eventmode_conf *em_conf = NULL;
2477 	struct lcore_params *params;
2478 	uint16_t portid;
2479 
2480 	if (!eh_conf || !eh_conf->mode_params)
2481 		return -EINVAL;
2482 
2483 	/* Get eventmode conf */
2484 	em_conf = eh_conf->mode_params;
2485 
2486 	if (eh_conf->mode == EH_PKT_TRANSFER_MODE_POLL &&
2487 	    em_conf->ext_params.sched_type != SCHED_TYPE_NOT_SET) {
2488 		printf("error: option --event-schedule-type applies only to "
2489 		       "event mode\n");
2490 		return -EINVAL;
2491 	}
2492 
2493 	if (eh_conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
2494 		return 0;
2495 
2496 	/* Set schedule type to ORDERED if it wasn't explicitly set by user */
2497 	if (em_conf->ext_params.sched_type == SCHED_TYPE_NOT_SET)
2498 		em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
2499 
2500 	/*
2501 	 * Event mode currently supports inline and lookaside protocol
2502 	 * sessions. If there are other types of sessions configured then exit
2503 	 * with error.
2504 	 */
2505 	ev_mode_sess_verify(sa_in, nb_sa_in, em_conf);
2506 	ev_mode_sess_verify(sa_out, nb_sa_out, em_conf);
2507 
2508 	/* Option --config does not apply to event mode */
2509 	if (nb_lcore_params > 0) {
2510 		printf("error: option --config applies only to poll mode\n");
2511 		return -EINVAL;
2512 	}
2513 
2514 	/*
2515 	 * In order to use the same port_init routine for both poll and event
2516 	 * modes initialize lcore_params with one queue for each eth port
2517 	 */
2518 	lcore_params = lcore_params_array;
2519 	RTE_ETH_FOREACH_DEV(portid) {
2520 		if ((enabled_port_mask & (1 << portid)) == 0)
2521 			continue;
2522 
2523 		params = &lcore_params[nb_lcore_params++];
2524 		params->port_id = portid;
2525 		params->queue_id = 0;
2526 		params->lcore_id = rte_get_next_lcore(0, 0, 1);
2527 	}
2528 
2529 	return 0;
2530 }
2531 
2532 static int
2533 one_session_free(struct rte_ipsec_session *ips)
2534 {
2535 	int32_t ret = 0;
2536 
2537 	if (ips->type == RTE_SECURITY_ACTION_TYPE_NONE ||
2538 		ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
2539 		/* Session has not been created */
2540 		if (ips->crypto.ses == NULL)
2541 			return 0;
2542 
2543 		ret = rte_cryptodev_sym_session_free(ips->crypto.dev_id,
2544 				ips->crypto.ses);
2545 	} else {
2546 		/* Session has not been created */
2547 		if (ips->security.ctx == NULL || ips->security.ses == NULL)
2548 			return 0;
2549 
2550 		ret = rte_security_session_destroy(ips->security.ctx,
2551 						   ips->security.ses);
2552 	}
2553 
2554 	return ret;
2555 }
2556 
2557 static void
2558 sessions_free(struct sa_ctx *sa_ctx)
2559 {
2560 	struct rte_ipsec_session *ips;
2561 	struct ipsec_sa *sa;
2562 	int32_t ret;
2563 	uint32_t i;
2564 
2565 	if (!sa_ctx)
2566 		return;
2567 
2568 	for (i = 0; i < sa_ctx->nb_sa; i++) {
2569 
2570 		sa = &sa_ctx->sa[i];
2571 		if (!sa->spi)
2572 			continue;
2573 
2574 		ips = ipsec_get_primary_session(sa);
2575 		ret = one_session_free(ips);
2576 		if (ret)
2577 			RTE_LOG(ERR, IPSEC, "Failed to destroy security "
2578 					    "session type %d, spi %d\n",
2579 					    ips->type, sa->spi);
2580 	}
2581 }
2582 
2583 static uint32_t
2584 calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq,
2585 		uint32_t nb_txq)
2586 {
2587 	return RTE_MAX((nb_rxq * nb_rxd +
2588 			nb_ports * nb_lcores * MAX_PKT_BURST +
2589 			nb_ports * nb_txq * nb_txd +
2590 			nb_lcores * MEMPOOL_CACHE_SIZE +
2591 			nb_crypto_qp * qp_desc_nb +
2592 			nb_lcores * frag_tbl_sz *
2593 			FRAG_TBL_BUCKET_ENTRIES),
2594 		       8192U);
2595 }
2596 
2597 
2598 static int
2599 handle_telemetry_cmd_ipsec_secgw_stats(const char *cmd __rte_unused,
2600 		const char *params, struct rte_tel_data *data)
2601 {
2602 	uint64_t total_pkts_dropped = 0, total_pkts_tx = 0, total_pkts_rx = 0;
2603 	unsigned int coreid;
2604 
2605 	rte_tel_data_start_dict(data);
2606 
2607 	if (params) {
2608 		coreid = (uint32_t)atoi(params);
2609 		if (rte_lcore_is_enabled(coreid) == 0)
2610 			return -EINVAL;
2611 
2612 		total_pkts_dropped = core_statistics[coreid].dropped;
2613 		total_pkts_tx = core_statistics[coreid].tx;
2614 		total_pkts_rx = core_statistics[coreid].rx;
2615 
2616 	} else {
2617 		for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
2618 
2619 			/* skip disabled cores */
2620 			if (rte_lcore_is_enabled(coreid) == 0)
2621 				continue;
2622 
2623 			total_pkts_dropped += core_statistics[coreid].dropped;
2624 			total_pkts_tx += core_statistics[coreid].tx;
2625 			total_pkts_rx += core_statistics[coreid].rx;
2626 		}
2627 	}
2628 
2629 	/* add telemetry key/values pairs */
2630 	rte_tel_data_add_dict_uint(data, "packets received", total_pkts_rx);
2631 
2632 	rte_tel_data_add_dict_uint(data, "packets transmitted", total_pkts_tx);
2633 
2634 	rte_tel_data_add_dict_uint(data, "packets dropped",
2635 				   total_pkts_dropped);
2636 
2637 
2638 	return 0;
2639 }
2640 
2641 static void
2642 update_lcore_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
2643 {
2644 	struct ipsec_core_statistics *lcore_stats;
2645 
2646 	/* skip disabled cores */
2647 	if (rte_lcore_is_enabled(coreid) == 0)
2648 		return;
2649 
2650 	lcore_stats = &core_statistics[coreid];
2651 
2652 	total->rx = lcore_stats->rx;
2653 	total->dropped = lcore_stats->dropped;
2654 	total->frag_dropped = lcore_stats->frag_dropped;
2655 	total->tx = lcore_stats->tx;
2656 
2657 	/* outbound stats */
2658 	total->outbound.spd6.protect += lcore_stats->outbound.spd6.protect;
2659 	total->outbound.spd6.bypass += lcore_stats->outbound.spd6.bypass;
2660 	total->outbound.spd6.discard += lcore_stats->outbound.spd6.discard;
2661 
2662 	total->outbound.spd4.protect += lcore_stats->outbound.spd4.protect;
2663 	total->outbound.spd4.bypass += lcore_stats->outbound.spd4.bypass;
2664 	total->outbound.spd4.discard += lcore_stats->outbound.spd4.discard;
2665 
2666 	total->outbound.sad.miss += lcore_stats->outbound.sad.miss;
2667 
2668 	/* inbound stats */
2669 	total->inbound.spd6.protect += lcore_stats->inbound.spd6.protect;
2670 	total->inbound.spd6.bypass += lcore_stats->inbound.spd6.bypass;
2671 	total->inbound.spd6.discard += lcore_stats->inbound.spd6.discard;
2672 
2673 	total->inbound.spd4.protect += lcore_stats->inbound.spd4.protect;
2674 	total->inbound.spd4.bypass += lcore_stats->inbound.spd4.bypass;
2675 	total->inbound.spd4.discard += lcore_stats->inbound.spd4.discard;
2676 
2677 	total->inbound.sad.miss += lcore_stats->inbound.sad.miss;
2678 
2679 
2680 	/* routing stats */
2681 	total->lpm4.miss += lcore_stats->lpm4.miss;
2682 	total->lpm6.miss += lcore_stats->lpm6.miss;
2683 }
2684 
2685 static void
2686 update_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
2687 {
2688 	memset(total, 0, sizeof(*total));
2689 
2690 	if (coreid != UINT32_MAX) {
2691 		update_lcore_statistics(total, coreid);
2692 	} else {
2693 		for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++)
2694 			update_lcore_statistics(total, coreid);
2695 	}
2696 }
2697 
2698 static int
2699 handle_telemetry_cmd_ipsec_secgw_stats_outbound(const char *cmd __rte_unused,
2700 		const char *params, struct rte_tel_data *data)
2701 {
2702 	struct ipsec_core_statistics total_stats;
2703 
2704 	struct rte_tel_data *spd4_data = rte_tel_data_alloc();
2705 	struct rte_tel_data *spd6_data = rte_tel_data_alloc();
2706 	struct rte_tel_data *sad_data = rte_tel_data_alloc();
2707 	unsigned int coreid = UINT32_MAX;
2708 	int rc = 0;
2709 
2710 	/* verify allocated telemetry data structures */
2711 	if (!spd4_data || !spd6_data || !sad_data) {
2712 		rc = -ENOMEM;
2713 		goto exit;
2714 	}
2715 
2716 	/* initialize telemetry data structs as dicts */
2717 	rte_tel_data_start_dict(data);
2718 
2719 	rte_tel_data_start_dict(spd4_data);
2720 	rte_tel_data_start_dict(spd6_data);
2721 	rte_tel_data_start_dict(sad_data);
2722 
2723 	if (params) {
2724 		coreid = (uint32_t)atoi(params);
2725 		if (rte_lcore_is_enabled(coreid) == 0) {
2726 			rc = -EINVAL;
2727 			goto exit;
2728 		}
2729 	}
2730 
2731 	update_statistics(&total_stats, coreid);
2732 
2733 	/* add spd 4 telemetry key/values pairs */
2734 
2735 	rte_tel_data_add_dict_uint(spd4_data, "protect",
2736 				   total_stats.outbound.spd4.protect);
2737 	rte_tel_data_add_dict_uint(spd4_data, "bypass",
2738 				   total_stats.outbound.spd4.bypass);
2739 	rte_tel_data_add_dict_uint(spd4_data, "discard",
2740 				   total_stats.outbound.spd4.discard);
2741 
2742 	rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0);
2743 
2744 	/* add spd 6 telemetry key/values pairs */
2745 
2746 	rte_tel_data_add_dict_uint(spd6_data, "protect",
2747 				   total_stats.outbound.spd6.protect);
2748 	rte_tel_data_add_dict_uint(spd6_data, "bypass",
2749 				   total_stats.outbound.spd6.bypass);
2750 	rte_tel_data_add_dict_uint(spd6_data, "discard",
2751 				   total_stats.outbound.spd6.discard);
2752 
2753 	rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
2754 
2755 	/* add sad telemetry key/values pairs */
2756 
2757 	rte_tel_data_add_dict_uint(sad_data, "miss",
2758 				   total_stats.outbound.sad.miss);
2759 
2760 	rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
2761 
2762 exit:
2763 	if (rc) {
2764 		rte_tel_data_free(spd4_data);
2765 		rte_tel_data_free(spd6_data);
2766 		rte_tel_data_free(sad_data);
2767 	}
2768 	return rc;
2769 }
2770 
2771 static int
2772 handle_telemetry_cmd_ipsec_secgw_stats_inbound(const char *cmd __rte_unused,
2773 		const char *params, struct rte_tel_data *data)
2774 {
2775 	struct ipsec_core_statistics total_stats;
2776 
2777 	struct rte_tel_data *spd4_data = rte_tel_data_alloc();
2778 	struct rte_tel_data *spd6_data = rte_tel_data_alloc();
2779 	struct rte_tel_data *sad_data = rte_tel_data_alloc();
2780 	unsigned int coreid = UINT32_MAX;
2781 	int rc = 0;
2782 
2783 	/* verify allocated telemetry data structures */
2784 	if (!spd4_data || !spd6_data || !sad_data) {
2785 		rc = -ENOMEM;
2786 		goto exit;
2787 	}
2788 
2789 	/* initialize telemetry data structs as dicts */
2790 	rte_tel_data_start_dict(data);
2791 	rte_tel_data_start_dict(spd4_data);
2792 	rte_tel_data_start_dict(spd6_data);
2793 	rte_tel_data_start_dict(sad_data);
2794 
2795 	/* add children dicts to parent dict */
2796 
2797 	if (params) {
2798 		coreid = (uint32_t)atoi(params);
2799 		if (rte_lcore_is_enabled(coreid) == 0) {
2800 			rc = -EINVAL;
2801 			goto exit;
2802 		}
2803 	}
2804 
2805 	update_statistics(&total_stats, coreid);
2806 
2807 	/* add sad telemetry key/values pairs */
2808 
2809 	rte_tel_data_add_dict_uint(sad_data, "miss",
2810 				   total_stats.inbound.sad.miss);
2811 
2812 	rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
2813 
2814 	/* add spd 4 telemetry key/values pairs */
2815 
2816 	rte_tel_data_add_dict_uint(spd4_data, "protect",
2817 				   total_stats.inbound.spd4.protect);
2818 	rte_tel_data_add_dict_uint(spd4_data, "bypass",
2819 				   total_stats.inbound.spd4.bypass);
2820 	rte_tel_data_add_dict_uint(spd4_data, "discard",
2821 				   total_stats.inbound.spd4.discard);
2822 
2823 	rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0);
2824 
2825 	/* add spd 6 telemetry key/values pairs */
2826 
2827 	rte_tel_data_add_dict_uint(spd6_data, "protect",
2828 				   total_stats.inbound.spd6.protect);
2829 	rte_tel_data_add_dict_uint(spd6_data, "bypass",
2830 				   total_stats.inbound.spd6.bypass);
2831 	rte_tel_data_add_dict_uint(spd6_data, "discard",
2832 				   total_stats.inbound.spd6.discard);
2833 
2834 	rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
2835 
2836 exit:
2837 	if (rc) {
2838 		rte_tel_data_free(spd4_data);
2839 		rte_tel_data_free(spd6_data);
2840 		rte_tel_data_free(sad_data);
2841 	}
2842 	return rc;
2843 }
2844 
2845 static int
2846 handle_telemetry_cmd_ipsec_secgw_stats_routing(const char *cmd __rte_unused,
2847 		const char *params, struct rte_tel_data *data)
2848 {
2849 	struct ipsec_core_statistics total_stats;
2850 
2851 	struct rte_tel_data *lpm4_data = rte_tel_data_alloc();
2852 	struct rte_tel_data *lpm6_data = rte_tel_data_alloc();
2853 	unsigned int coreid = UINT32_MAX;
2854 	int rc = 0;
2855 
2856 	/* verify allocated telemetry data structures */
2857 	if (!lpm4_data || !lpm6_data) {
2858 		rc = -ENOMEM;
2859 		goto exit;
2860 	}
2861 
2862 	/* initialize telemetry data structs as dicts */
2863 	rte_tel_data_start_dict(data);
2864 	rte_tel_data_start_dict(lpm4_data);
2865 	rte_tel_data_start_dict(lpm6_data);
2866 
2867 
2868 	if (params) {
2869 		coreid = (uint32_t)atoi(params);
2870 		if (rte_lcore_is_enabled(coreid) == 0) {
2871 			rc = -EINVAL;
2872 			goto exit;
2873 		}
2874 	}
2875 
2876 	update_statistics(&total_stats, coreid);
2877 
2878 	/* add lpm 4 telemetry key/values pairs */
2879 	rte_tel_data_add_dict_uint(lpm4_data, "miss", total_stats.lpm4.miss);
2880 
2881 	rte_tel_data_add_dict_container(data, "IPv4 LPM", lpm4_data, 0);
2882 
2883 	/* add lpm 6 telemetry key/values pairs */
2884 	rte_tel_data_add_dict_uint(lpm6_data, "miss", total_stats.lpm6.miss);
2885 
2886 	rte_tel_data_add_dict_container(data, "IPv6 LPM", lpm6_data, 0);
2887 
2888 exit:
2889 	if (rc) {
2890 		rte_tel_data_free(lpm4_data);
2891 		rte_tel_data_free(lpm6_data);
2892 	}
2893 	return rc;
2894 }
2895 
2896 static void
2897 ipsec_secgw_telemetry_init(void)
2898 {
2899 	rte_telemetry_register_cmd("/examples/ipsec-secgw/stats",
2900 		handle_telemetry_cmd_ipsec_secgw_stats,
2901 		"Returns global stats. "
2902 		"Optional Parameters: int <logical core id>");
2903 
2904 	rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/outbound",
2905 		handle_telemetry_cmd_ipsec_secgw_stats_outbound,
2906 		"Returns outbound global stats. "
2907 		"Optional Parameters: int <logical core id>");
2908 
2909 	rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/inbound",
2910 		handle_telemetry_cmd_ipsec_secgw_stats_inbound,
2911 		"Returns inbound global stats. "
2912 		"Optional Parameters: int <logical core id>");
2913 
2914 	rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/routing",
2915 		handle_telemetry_cmd_ipsec_secgw_stats_routing,
2916 		"Returns routing stats. "
2917 		"Optional Parameters: int <logical core id>");
2918 }
2919 
2920 int32_t
2921 main(int32_t argc, char **argv)
2922 {
2923 	int32_t ret;
2924 	uint32_t lcore_id, nb_txq, nb_rxq = 0;
2925 	uint32_t cdev_id;
2926 	uint32_t i;
2927 	uint8_t socket_id;
2928 	uint16_t portid, nb_crypto_qp, nb_ports = 0;
2929 	uint64_t req_rx_offloads[RTE_MAX_ETHPORTS];
2930 	uint64_t req_tx_offloads[RTE_MAX_ETHPORTS];
2931 	uint8_t req_hw_reassembly[RTE_MAX_ETHPORTS];
2932 	struct eh_conf *eh_conf = NULL;
2933 	uint32_t ipv4_cksum_port_mask = 0;
2934 	size_t sess_sz;
2935 
2936 	nb_bufs_in_pool = 0;
2937 
2938 	/* init EAL */
2939 	ret = rte_eal_init(argc, argv);
2940 	if (ret < 0)
2941 		rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2942 	argc -= ret;
2943 	argv += ret;
2944 
2945 	force_quit = false;
2946 	signal(SIGINT, signal_handler);
2947 	signal(SIGTERM, signal_handler);
2948 
2949 	/* initialize event helper configuration */
2950 	eh_conf = eh_conf_init();
2951 	if (eh_conf == NULL)
2952 		rte_exit(EXIT_FAILURE, "Failed to init event helper config");
2953 
2954 	/* parse application arguments (after the EAL ones) */
2955 	ret = parse_args(argc, argv, eh_conf);
2956 	if (ret < 0)
2957 		rte_exit(EXIT_FAILURE, "Invalid parameters\n");
2958 
2959 	ipsec_secgw_telemetry_init();
2960 
2961 	/* parse configuration file */
2962 	if (parse_cfg_file(cfgfile) < 0) {
2963 		printf("parsing file \"%s\" failed\n",
2964 			optarg);
2965 		print_usage(argv[0]);
2966 		return -1;
2967 	}
2968 
2969 	if ((unprotected_port_mask & enabled_port_mask) !=
2970 			unprotected_port_mask)
2971 		rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
2972 				unprotected_port_mask);
2973 
2974 	if (unprotected_port_mask && !nb_sa_in)
2975 		rte_exit(EXIT_FAILURE, "Cannot use unprotected portmask without configured SA inbound\n");
2976 
2977 	if (check_poll_mode_params(eh_conf) < 0)
2978 		rte_exit(EXIT_FAILURE, "check_poll_mode_params failed\n");
2979 
2980 	if (check_event_mode_params(eh_conf) < 0)
2981 		rte_exit(EXIT_FAILURE, "check_event_mode_params failed\n");
2982 
2983 	ret = init_lcore_rx_queues();
2984 	if (ret < 0)
2985 		rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2986 
2987 	nb_lcores = rte_lcore_count();
2988 
2989 	sess_sz = max_session_size();
2990 
2991 	/*
2992 	 * In event mode request minimum number of crypto queues
2993 	 * to be reserved equal to number of ports.
2994 	 */
2995 	if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT)
2996 		nb_crypto_qp = rte_eth_dev_count_avail();
2997 	else
2998 		nb_crypto_qp = 0;
2999 
3000 	nb_crypto_qp = cryptodevs_init(nb_crypto_qp);
3001 
3002 	if (nb_bufs_in_pool == 0) {
3003 		RTE_ETH_FOREACH_DEV(portid) {
3004 			if ((enabled_port_mask & (1 << portid)) == 0)
3005 				continue;
3006 			nb_ports++;
3007 			nb_rxq += get_port_nb_rx_queues(portid);
3008 		}
3009 
3010 		nb_txq = nb_lcores;
3011 
3012 		nb_bufs_in_pool = calculate_nb_mbufs(nb_ports, nb_crypto_qp,
3013 						nb_rxq, nb_txq);
3014 	}
3015 
3016 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
3017 		if (rte_lcore_is_enabled(lcore_id) == 0)
3018 			continue;
3019 
3020 		if (numa_on)
3021 			socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
3022 		else
3023 			socket_id = 0;
3024 
3025 		if (per_port_pool) {
3026 			RTE_ETH_FOREACH_DEV(portid) {
3027 				if ((enabled_port_mask & (1 << portid)) == 0)
3028 					continue;
3029 
3030 				pool_init(&socket_ctx[socket_id], socket_id,
3031 					  portid, nb_bufs_in_pool);
3032 			}
3033 		} else {
3034 			pool_init(&socket_ctx[socket_id], socket_id, 0,
3035 				  nb_bufs_in_pool);
3036 		}
3037 
3038 		if (socket_ctx[socket_id].session_pool)
3039 			continue;
3040 
3041 		session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
3042 	}
3043 	printf("Number of mbufs in packet pool %d\n", nb_bufs_in_pool);
3044 
3045 	RTE_ETH_FOREACH_DEV(portid) {
3046 		if ((enabled_port_mask & (1 << portid)) == 0)
3047 			continue;
3048 
3049 		sa_check_offloads(portid, &req_rx_offloads[portid], &req_tx_offloads[portid],
3050 				  &req_hw_reassembly[portid]);
3051 		port_init(portid, req_rx_offloads[portid], req_tx_offloads[portid],
3052 			  req_hw_reassembly[portid]);
3053 		if ((req_tx_offloads[portid] & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
3054 			ipv4_cksum_port_mask |= 1U << portid;
3055 	}
3056 
3057 	tx_offloads.ipv4_offloads = RTE_MBUF_F_TX_IPV4;
3058 	tx_offloads.ipv6_offloads = RTE_MBUF_F_TX_IPV6;
3059 	/* Update per lcore checksum offload support only if all ports support it */
3060 	if (ipv4_cksum_port_mask == enabled_port_mask)
3061 		tx_offloads.ipv4_offloads |= RTE_MBUF_F_TX_IP_CKSUM;
3062 
3063 	lcore_id = 0;
3064 	RTE_LCORE_FOREACH(lcore_id) {
3065 		/* Pre-populate pkt offloads based on capabilities */
3066 		lcore_conf[lcore_id].outbound.ipv4_offloads = tx_offloads.ipv4_offloads;
3067 		lcore_conf[lcore_id].outbound.ipv6_offloads = tx_offloads.ipv6_offloads;
3068 	}
3069 
3070 	/*
3071 	 * Set the enabled port mask in helper config for use by helper
3072 	 * sub-system. This will be used while initializing devices using
3073 	 * helper sub-system.
3074 	 */
3075 	eh_conf->eth_portmask = enabled_port_mask;
3076 
3077 	/* Initialize eventmode components */
3078 	ret = eh_devs_init(eh_conf);
3079 	if (ret < 0)
3080 		rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret);
3081 
3082 	/* start ports */
3083 	RTE_ETH_FOREACH_DEV(portid) {
3084 		if ((enabled_port_mask & (1 << portid)) == 0)
3085 			continue;
3086 
3087 		ret = rte_eth_dev_start(portid);
3088 		if (ret < 0)
3089 			rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
3090 					"err=%d, port=%d\n", ret, portid);
3091 
3092 		/* Create flow after starting the device */
3093 		create_default_ipsec_flow(portid, req_rx_offloads[portid]);
3094 
3095 		/*
3096 		 * If enabled, put device in promiscuous mode.
3097 		 * This allows IO forwarding mode to forward packets
3098 		 * to itself through 2 cross-connected  ports of the
3099 		 * target machine.
3100 		 */
3101 		if (promiscuous_on) {
3102 			ret = rte_eth_promiscuous_enable(portid);
3103 			if (ret != 0)
3104 				rte_exit(EXIT_FAILURE,
3105 					"rte_eth_promiscuous_enable: err=%s, port=%d\n",
3106 					rte_strerror(-ret), portid);
3107 		}
3108 
3109 		rte_eth_dev_callback_register(portid, RTE_ETH_EVENT_INTR_RESET,
3110 			ethdev_reset_event_callback, NULL);
3111 
3112 		rte_eth_dev_callback_register(portid,
3113 			RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
3114 	}
3115 
3116 	/* fragment reassemble is enabled */
3117 	if (frag_tbl_sz != 0) {
3118 		ret = reassemble_init();
3119 		if (ret != 0)
3120 			rte_exit(EXIT_FAILURE, "failed at reassemble init");
3121 	}
3122 
3123 	/* Replicate each context per socket */
3124 	for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
3125 		socket_id = rte_socket_id_by_idx(i);
3126 		if ((socket_ctx[socket_id].session_pool != NULL) &&
3127 			(socket_ctx[socket_id].sa_in == NULL) &&
3128 			(socket_ctx[socket_id].sa_out == NULL)) {
3129 			sa_init(&socket_ctx[socket_id], socket_id, lcore_conf,
3130 				eh_conf->mode_params);
3131 			sp4_init(&socket_ctx[socket_id], socket_id);
3132 			sp6_init(&socket_ctx[socket_id], socket_id);
3133 			rt_init(&socket_ctx[socket_id], socket_id);
3134 		}
3135 	}
3136 
3137 	flow_init();
3138 
3139 	/* Get security context if available and only if dynamic field is
3140 	 * registered for fast path access.
3141 	 */
3142 	if (!rte_security_dynfield_is_registered())
3143 		goto skip_sec_ctx;
3144 
3145 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
3146 		for (i = 0; i < lcore_conf[lcore_id].nb_rx_queue; i++) {
3147 			portid = lcore_conf[lcore_id].rx_queue_list[i].port_id;
3148 			lcore_conf[lcore_id].rx_queue_list[i].sec_ctx =
3149 				rte_eth_dev_get_sec_ctx(portid);
3150 		}
3151 	}
3152 skip_sec_ctx:
3153 
3154 	check_all_ports_link_status(enabled_port_mask);
3155 
3156 	if (stats_interval > 0)
3157 		rte_eal_alarm_set(stats_interval * US_PER_S,
3158 				print_stats_cb, NULL);
3159 	else
3160 		RTE_LOG(INFO, IPSEC, "Stats display disabled\n");
3161 
3162 	/* launch per-lcore init on every lcore */
3163 	rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MAIN);
3164 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
3165 		if (rte_eal_wait_lcore(lcore_id) < 0)
3166 			return -1;
3167 	}
3168 
3169 	/* Uninitialize eventmode components */
3170 	ret = eh_devs_uninit(eh_conf);
3171 	if (ret < 0)
3172 		rte_exit(EXIT_FAILURE, "eh_devs_uninit failed, err=%d\n", ret);
3173 
3174 	/* Free eventmode configuration memory */
3175 	eh_conf_uninit(eh_conf);
3176 
3177 	/* Destroy inbound and outbound sessions */
3178 	for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
3179 		socket_id = rte_socket_id_by_idx(i);
3180 		sessions_free(socket_ctx[socket_id].sa_in);
3181 		sessions_free(socket_ctx[socket_id].sa_out);
3182 	}
3183 
3184 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
3185 		printf("Closing cryptodev %d...", cdev_id);
3186 		rte_cryptodev_stop(cdev_id);
3187 		rte_cryptodev_close(cdev_id);
3188 		printf(" Done\n");
3189 	}
3190 
3191 	flow_print_counters();
3192 
3193 	RTE_ETH_FOREACH_DEV(portid) {
3194 		if ((enabled_port_mask & (1 << portid)) == 0)
3195 			continue;
3196 
3197 		printf("Closing port %d...", portid);
3198 		if (flow_info_tbl[portid].rx_def_flow) {
3199 			struct rte_flow_error err;
3200 
3201 			ret = rte_flow_destroy(portid,
3202 				flow_info_tbl[portid].rx_def_flow, &err);
3203 			if (ret)
3204 				RTE_LOG(ERR, IPSEC, "Failed to destroy flow "
3205 					" for port %u, err msg: %s\n", portid,
3206 					err.message);
3207 		}
3208 		ret = rte_eth_dev_stop(portid);
3209 		if (ret != 0)
3210 			RTE_LOG(ERR, IPSEC,
3211 				"rte_eth_dev_stop: err=%s, port=%u\n",
3212 				rte_strerror(-ret), portid);
3213 
3214 		rte_eth_dev_close(portid);
3215 		printf(" Done\n");
3216 	}
3217 
3218 	/* clean up the EAL */
3219 	rte_eal_cleanup();
3220 	printf("Bye...\n");
3221 
3222 	return 0;
3223 }
3224