xref: /dpdk/examples/ipsec-secgw/ipsec-secgw.c (revision 1cde1b9a9b4dbf31cb5e5ccdfc5da3cb079f43a2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/types.h>
10 #include <netinet/in.h>
11 #include <netinet/ip.h>
12 #include <netinet/ip6.h>
13 #include <string.h>
14 #include <sys/queue.h>
15 #include <stdarg.h>
16 #include <errno.h>
17 #include <getopt.h>
18 
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_log.h>
22 #include <rte_eal.h>
23 #include <rte_launch.h>
24 #include <rte_atomic.h>
25 #include <rte_cycles.h>
26 #include <rte_prefetch.h>
27 #include <rte_lcore.h>
28 #include <rte_per_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_interrupts.h>
31 #include <rte_random.h>
32 #include <rte_debug.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
35 #include <rte_mempool.h>
36 #include <rte_mbuf.h>
37 #include <rte_acl.h>
38 #include <rte_lpm.h>
39 #include <rte_lpm6.h>
40 #include <rte_hash.h>
41 #include <rte_jhash.h>
42 #include <rte_cryptodev.h>
43 #include <rte_security.h>
44 #include <rte_ip.h>
45 #include <rte_ip_frag.h>
46 
47 #include "ipsec.h"
48 #include "parser.h"
49 
50 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
51 
52 #define MAX_JUMBO_PKT_LEN  9600
53 
54 #define MEMPOOL_CACHE_SIZE 256
55 
56 #define NB_MBUF	(32000)
57 
58 #define CDEV_QUEUE_DESC 2048
59 #define CDEV_MAP_ENTRIES 16384
60 #define CDEV_MP_NB_OBJS 1024
61 #define CDEV_MP_CACHE_SZ 64
62 #define MAX_QUEUE_PAIRS 1
63 
64 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
65 
66 #define NB_SOCKETS 4
67 
68 /* Configure how many packets ahead to prefetch, when reading packets */
69 #define PREFETCH_OFFSET	3
70 
71 #define MAX_RX_QUEUE_PER_LCORE 16
72 
73 #define MAX_LCORE_PARAMS 1024
74 
75 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
76 
77 /*
78  * Configurable number of RX/TX ring descriptors
79  */
80 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
81 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
82 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
83 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
84 
85 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
86 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
87 	(((uint64_t)((a) & 0xff) << 56) | \
88 	((uint64_t)((b) & 0xff) << 48) | \
89 	((uint64_t)((c) & 0xff) << 40) | \
90 	((uint64_t)((d) & 0xff) << 32) | \
91 	((uint64_t)((e) & 0xff) << 24) | \
92 	((uint64_t)((f) & 0xff) << 16) | \
93 	((uint64_t)((g) & 0xff) << 8)  | \
94 	((uint64_t)(h) & 0xff))
95 #else
96 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
97 	(((uint64_t)((h) & 0xff) << 56) | \
98 	((uint64_t)((g) & 0xff) << 48) | \
99 	((uint64_t)((f) & 0xff) << 40) | \
100 	((uint64_t)((e) & 0xff) << 32) | \
101 	((uint64_t)((d) & 0xff) << 24) | \
102 	((uint64_t)((c) & 0xff) << 16) | \
103 	((uint64_t)((b) & 0xff) << 8) | \
104 	((uint64_t)(a) & 0xff))
105 #endif
106 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
107 
108 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
109 		(addr)->addr_bytes[0], (addr)->addr_bytes[1], \
110 		(addr)->addr_bytes[2], (addr)->addr_bytes[3], \
111 		(addr)->addr_bytes[4], (addr)->addr_bytes[5], \
112 		0, 0)
113 
114 #define	FRAG_TBL_BUCKET_ENTRIES	4
115 #define	FRAG_TTL_MS		(10 * MS_PER_S)
116 
117 #define MTU_TO_FRAMELEN(x)	((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
118 
119 /* port/source ethernet addr and destination ethernet addr */
120 struct ethaddr_info {
121 	uint64_t src, dst;
122 };
123 
124 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
125 	{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
126 	{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
127 	{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
128 	{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
129 };
130 
131 #define CMD_LINE_OPT_CONFIG		"config"
132 #define CMD_LINE_OPT_SINGLE_SA		"single-sa"
133 #define CMD_LINE_OPT_CRYPTODEV_MASK	"cryptodev_mask"
134 #define CMD_LINE_OPT_RX_OFFLOAD		"rxoffload"
135 #define CMD_LINE_OPT_TX_OFFLOAD		"txoffload"
136 #define CMD_LINE_OPT_REASSEMBLE		"reassemble"
137 #define CMD_LINE_OPT_MTU		"mtu"
138 
139 enum {
140 	/* long options mapped to a short option */
141 
142 	/* first long only option value must be >= 256, so that we won't
143 	 * conflict with short options
144 	 */
145 	CMD_LINE_OPT_MIN_NUM = 256,
146 	CMD_LINE_OPT_CONFIG_NUM,
147 	CMD_LINE_OPT_SINGLE_SA_NUM,
148 	CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
149 	CMD_LINE_OPT_RX_OFFLOAD_NUM,
150 	CMD_LINE_OPT_TX_OFFLOAD_NUM,
151 	CMD_LINE_OPT_REASSEMBLE_NUM,
152 	CMD_LINE_OPT_MTU_NUM,
153 };
154 
155 static const struct option lgopts[] = {
156 	{CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
157 	{CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
158 	{CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
159 	{CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},
160 	{CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},
161 	{CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM},
162 	{CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM},
163 	{NULL, 0, 0, 0}
164 };
165 
166 /* mask of enabled ports */
167 static uint32_t enabled_port_mask;
168 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
169 static uint32_t unprotected_port_mask;
170 static int32_t promiscuous_on = 1;
171 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
172 static uint32_t nb_lcores;
173 static uint32_t single_sa;
174 static uint32_t single_sa_idx;
175 
176 /*
177  * RX/TX HW offload capabilities to enable/use on ethernet ports.
178  * By default all capabilities are enabled.
179  */
180 static uint64_t dev_rx_offload = UINT64_MAX;
181 static uint64_t dev_tx_offload = UINT64_MAX;
182 
183 /*
184  * global values that determine multi-seg policy
185  */
186 static uint32_t frag_tbl_sz;
187 static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
188 static uint32_t mtu_size = RTE_ETHER_MTU;
189 
190 /* application wide librte_ipsec/SA parameters */
191 struct app_sa_prm app_sa_prm = {.enable = 0};
192 
193 struct lcore_rx_queue {
194 	uint16_t port_id;
195 	uint8_t queue_id;
196 } __rte_cache_aligned;
197 
198 struct lcore_params {
199 	uint16_t port_id;
200 	uint8_t queue_id;
201 	uint8_t lcore_id;
202 } __rte_cache_aligned;
203 
204 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
205 
206 static struct lcore_params *lcore_params;
207 static uint16_t nb_lcore_params;
208 
209 static struct rte_hash *cdev_map_in;
210 static struct rte_hash *cdev_map_out;
211 
212 struct buffer {
213 	uint16_t len;
214 	struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
215 };
216 
217 struct lcore_conf {
218 	uint16_t nb_rx_queue;
219 	struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
220 	uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
221 	struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
222 	struct ipsec_ctx inbound;
223 	struct ipsec_ctx outbound;
224 	struct rt_ctx *rt4_ctx;
225 	struct rt_ctx *rt6_ctx;
226 	struct {
227 		struct rte_ip_frag_tbl *tbl;
228 		struct rte_mempool *pool_dir;
229 		struct rte_mempool *pool_indir;
230 		struct rte_ip_frag_death_row dr;
231 	} frag;
232 } __rte_cache_aligned;
233 
234 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
235 
236 static struct rte_eth_conf port_conf = {
237 	.rxmode = {
238 		.mq_mode	= ETH_MQ_RX_RSS,
239 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
240 		.split_hdr_size = 0,
241 		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
242 	},
243 	.rx_adv_conf = {
244 		.rss_conf = {
245 			.rss_key = NULL,
246 			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
247 				ETH_RSS_TCP | ETH_RSS_SCTP,
248 		},
249 	},
250 	.txmode = {
251 		.mq_mode = ETH_MQ_TX_NONE,
252 	},
253 };
254 
255 static struct socket_ctx socket_ctx[NB_SOCKETS];
256 
257 /*
258  * Determine is multi-segment support required:
259  *  - either frame buffer size is smaller then mtu
260  *  - or reassmeble support is requested
261  */
262 static int
263 multi_seg_required(void)
264 {
265 	return (MTU_TO_FRAMELEN(mtu_size) + RTE_PKTMBUF_HEADROOM >
266 		frame_buf_size || frag_tbl_sz != 0);
267 }
268 
269 static inline void
270 adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
271 	uint32_t l2_len)
272 {
273 	uint32_t plen, trim;
274 
275 	plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
276 	if (plen < m->pkt_len) {
277 		trim = m->pkt_len - plen;
278 		rte_pktmbuf_trim(m, trim);
279 	}
280 }
281 
282 static inline void
283 adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
284 	uint32_t l2_len)
285 {
286 	uint32_t plen, trim;
287 
288 	plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
289 	if (plen < m->pkt_len) {
290 		trim = m->pkt_len - plen;
291 		rte_pktmbuf_trim(m, trim);
292 	}
293 }
294 
295 static inline void
296 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
297 {
298 	const struct rte_ether_hdr *eth;
299 	const struct rte_ipv4_hdr *iph4;
300 	const struct rte_ipv6_hdr *iph6;
301 
302 	eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
303 	if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
304 
305 		iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
306 			RTE_ETHER_HDR_LEN);
307 		adjust_ipv4_pktlen(pkt, iph4, 0);
308 
309 		if (iph4->next_proto_id == IPPROTO_ESP)
310 			t->ipsec.pkts[(t->ipsec.num)++] = pkt;
311 		else {
312 			t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
313 			t->ip4.pkts[(t->ip4.num)++] = pkt;
314 		}
315 		pkt->l2_len = 0;
316 		pkt->l3_len = sizeof(*iph4);
317 	} else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
318 		int next_proto;
319 		size_t l3len, ext_len;
320 		uint8_t *p;
321 
322 		/* get protocol type */
323 		iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
324 			RTE_ETHER_HDR_LEN);
325 		adjust_ipv6_pktlen(pkt, iph6, 0);
326 
327 		next_proto = iph6->proto;
328 
329 		/* determine l3 header size up to ESP extension */
330 		l3len = sizeof(struct ip6_hdr);
331 		p = rte_pktmbuf_mtod(pkt, uint8_t *);
332 		while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
333 			(next_proto = rte_ipv6_get_next_ext(p + l3len,
334 						next_proto, &ext_len)) >= 0)
335 			l3len += ext_len;
336 
337 		/* drop packet when IPv6 header exceeds first segment length */
338 		if (unlikely(l3len > pkt->data_len)) {
339 			rte_pktmbuf_free(pkt);
340 			return;
341 		}
342 
343 		if (next_proto == IPPROTO_ESP)
344 			t->ipsec.pkts[(t->ipsec.num)++] = pkt;
345 		else {
346 			t->ip6.data[t->ip6.num] = &iph6->proto;
347 			t->ip6.pkts[(t->ip6.num)++] = pkt;
348 		}
349 		pkt->l2_len = 0;
350 		pkt->l3_len = l3len;
351 	} else {
352 		/* Unknown/Unsupported type, drop the packet */
353 		RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
354 			rte_be_to_cpu_16(eth->ether_type));
355 		rte_pktmbuf_free(pkt);
356 		return;
357 	}
358 
359 	/* Check if the packet has been processed inline. For inline protocol
360 	 * processed packets, the metadata in the mbuf can be used to identify
361 	 * the security processing done on the packet. The metadata will be
362 	 * used to retrieve the application registered userdata associated
363 	 * with the security session.
364 	 */
365 
366 	if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
367 		struct ipsec_sa *sa;
368 		struct ipsec_mbuf_metadata *priv;
369 		struct rte_security_ctx *ctx = (struct rte_security_ctx *)
370 						rte_eth_dev_get_sec_ctx(
371 						pkt->port);
372 
373 		/* Retrieve the userdata registered. Here, the userdata
374 		 * registered is the SA pointer.
375 		 */
376 
377 		sa = (struct ipsec_sa *)
378 				rte_security_get_userdata(ctx, pkt->udata64);
379 
380 		if (sa == NULL) {
381 			/* userdata could not be retrieved */
382 			return;
383 		}
384 
385 		/* Save SA as priv member in mbuf. This will be used in the
386 		 * IPsec selector(SP-SA) check.
387 		 */
388 
389 		priv = get_priv(pkt);
390 		priv->sa = sa;
391 	}
392 }
393 
394 static inline void
395 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
396 		uint16_t nb_pkts)
397 {
398 	int32_t i;
399 
400 	t->ipsec.num = 0;
401 	t->ip4.num = 0;
402 	t->ip6.num = 0;
403 
404 	for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
405 		rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
406 					void *));
407 		prepare_one_packet(pkts[i], t);
408 	}
409 	/* Process left packets */
410 	for (; i < nb_pkts; i++)
411 		prepare_one_packet(pkts[i], t);
412 }
413 
414 static inline void
415 prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
416 		const struct lcore_conf *qconf)
417 {
418 	struct ip *ip;
419 	struct rte_ether_hdr *ethhdr;
420 
421 	ip = rte_pktmbuf_mtod(pkt, struct ip *);
422 
423 	ethhdr = (struct rte_ether_hdr *)
424 		rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
425 
426 	if (ip->ip_v == IPVERSION) {
427 		pkt->ol_flags |= qconf->outbound.ipv4_offloads;
428 		pkt->l3_len = sizeof(struct ip);
429 		pkt->l2_len = RTE_ETHER_HDR_LEN;
430 
431 		ip->ip_sum = 0;
432 
433 		/* calculate IPv4 cksum in SW */
434 		if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
435 			ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
436 
437 		ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
438 	} else {
439 		pkt->ol_flags |= qconf->outbound.ipv6_offloads;
440 		pkt->l3_len = sizeof(struct ip6_hdr);
441 		pkt->l2_len = RTE_ETHER_HDR_LEN;
442 
443 		ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
444 	}
445 
446 	memcpy(&ethhdr->s_addr, &ethaddr_tbl[port].src,
447 			sizeof(struct rte_ether_addr));
448 	memcpy(&ethhdr->d_addr, &ethaddr_tbl[port].dst,
449 			sizeof(struct rte_ether_addr));
450 }
451 
452 static inline void
453 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
454 		const struct lcore_conf *qconf)
455 {
456 	int32_t i;
457 	const int32_t prefetch_offset = 2;
458 
459 	for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
460 		rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
461 		prepare_tx_pkt(pkts[i], port, qconf);
462 	}
463 	/* Process left packets */
464 	for (; i < nb_pkts; i++)
465 		prepare_tx_pkt(pkts[i], port, qconf);
466 }
467 
468 /* Send burst of packets on an output interface */
469 static inline int32_t
470 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
471 {
472 	struct rte_mbuf **m_table;
473 	int32_t ret;
474 	uint16_t queueid;
475 
476 	queueid = qconf->tx_queue_id[port];
477 	m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
478 
479 	prepare_tx_burst(m_table, n, port, qconf);
480 
481 	ret = rte_eth_tx_burst(port, queueid, m_table, n);
482 	if (unlikely(ret < n)) {
483 		do {
484 			rte_pktmbuf_free(m_table[ret]);
485 		} while (++ret < n);
486 	}
487 
488 	return 0;
489 }
490 
491 /*
492  * Helper function to fragment and queue for TX one packet.
493  */
494 static inline uint32_t
495 send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
496 	uint16_t port, uint8_t proto)
497 {
498 	struct buffer *tbl;
499 	uint32_t len, n;
500 	int32_t rc;
501 
502 	tbl =  qconf->tx_mbufs + port;
503 	len = tbl->len;
504 
505 	/* free space for new fragments */
506 	if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >=  RTE_DIM(tbl->m_table)) {
507 		send_burst(qconf, len, port);
508 		len = 0;
509 	}
510 
511 	n = RTE_DIM(tbl->m_table) - len;
512 
513 	if (proto == IPPROTO_IP)
514 		rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
515 			n, mtu_size, qconf->frag.pool_dir,
516 			qconf->frag.pool_indir);
517 	else
518 		rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
519 			n, mtu_size, qconf->frag.pool_dir,
520 			qconf->frag.pool_indir);
521 
522 	if (rc >= 0)
523 		len += rc;
524 	else
525 		RTE_LOG(ERR, IPSEC,
526 			"%s: failed to fragment packet with size %u, "
527 			"error code: %d\n",
528 			__func__, m->pkt_len, rte_errno);
529 
530 	rte_pktmbuf_free(m);
531 	return len;
532 }
533 
534 /* Enqueue a single packet, and send burst if queue is filled */
535 static inline int32_t
536 send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
537 {
538 	uint32_t lcore_id;
539 	uint16_t len;
540 	struct lcore_conf *qconf;
541 
542 	lcore_id = rte_lcore_id();
543 
544 	qconf = &lcore_conf[lcore_id];
545 	len = qconf->tx_mbufs[port].len;
546 
547 	if (m->pkt_len <= mtu_size) {
548 		qconf->tx_mbufs[port].m_table[len] = m;
549 		len++;
550 
551 	/* need to fragment the packet */
552 	} else if (frag_tbl_sz > 0)
553 		len = send_fragment_packet(qconf, m, port, proto);
554 	else
555 		rte_pktmbuf_free(m);
556 
557 	/* enough pkts to be sent */
558 	if (unlikely(len == MAX_PKT_BURST)) {
559 		send_burst(qconf, MAX_PKT_BURST, port);
560 		len = 0;
561 	}
562 
563 	qconf->tx_mbufs[port].len = len;
564 	return 0;
565 }
566 
567 static inline void
568 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
569 		uint16_t lim)
570 {
571 	struct rte_mbuf *m;
572 	uint32_t i, j, res, sa_idx;
573 
574 	if (ip->num == 0 || sp == NULL)
575 		return;
576 
577 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
578 			ip->num, DEFAULT_MAX_CATEGORIES);
579 
580 	j = 0;
581 	for (i = 0; i < ip->num; i++) {
582 		m = ip->pkts[i];
583 		res = ip->res[i];
584 		if (res == BYPASS) {
585 			ip->pkts[j++] = m;
586 			continue;
587 		}
588 		if (res == DISCARD) {
589 			rte_pktmbuf_free(m);
590 			continue;
591 		}
592 
593 		/* Only check SPI match for processed IPSec packets */
594 		if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
595 			rte_pktmbuf_free(m);
596 			continue;
597 		}
598 
599 		sa_idx = SPI2IDX(res);
600 		if (!inbound_sa_check(sa, m, sa_idx)) {
601 			rte_pktmbuf_free(m);
602 			continue;
603 		}
604 		ip->pkts[j++] = m;
605 	}
606 	ip->num = j;
607 }
608 
609 static void
610 split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
611 {
612 	uint32_t i, n4, n6;
613 	struct ip *ip;
614 	struct rte_mbuf *m;
615 
616 	n4 = trf->ip4.num;
617 	n6 = trf->ip6.num;
618 
619 	for (i = 0; i < num; i++) {
620 
621 		m = mb[i];
622 		ip = rte_pktmbuf_mtod(m, struct ip *);
623 
624 		if (ip->ip_v == IPVERSION) {
625 			trf->ip4.pkts[n4] = m;
626 			trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
627 					uint8_t *, offsetof(struct ip, ip_p));
628 			n4++;
629 		} else if (ip->ip_v == IP6_VERSION) {
630 			trf->ip6.pkts[n6] = m;
631 			trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
632 					uint8_t *,
633 					offsetof(struct ip6_hdr, ip6_nxt));
634 			n6++;
635 		} else
636 			rte_pktmbuf_free(m);
637 	}
638 
639 	trf->ip4.num = n4;
640 	trf->ip6.num = n6;
641 }
642 
643 
644 static inline void
645 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
646 		struct ipsec_traffic *traffic)
647 {
648 	uint16_t nb_pkts_in, n_ip4, n_ip6;
649 
650 	n_ip4 = traffic->ip4.num;
651 	n_ip6 = traffic->ip6.num;
652 
653 	if (app_sa_prm.enable == 0) {
654 		nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
655 				traffic->ipsec.num, MAX_PKT_BURST);
656 		split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
657 	} else {
658 		inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
659 			traffic->ipsec.saptr, traffic->ipsec.num);
660 		ipsec_process(ipsec_ctx, traffic);
661 	}
662 
663 	inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
664 			n_ip4);
665 
666 	inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
667 			n_ip6);
668 }
669 
670 static inline void
671 outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
672 		struct traffic_type *ipsec)
673 {
674 	struct rte_mbuf *m;
675 	uint32_t i, j, sa_idx;
676 
677 	if (ip->num == 0 || sp == NULL)
678 		return;
679 
680 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
681 			ip->num, DEFAULT_MAX_CATEGORIES);
682 
683 	j = 0;
684 	for (i = 0; i < ip->num; i++) {
685 		m = ip->pkts[i];
686 		sa_idx = SPI2IDX(ip->res[i]);
687 		if (ip->res[i] == DISCARD)
688 			rte_pktmbuf_free(m);
689 		else if (ip->res[i] == BYPASS)
690 			ip->pkts[j++] = m;
691 		else {
692 			ipsec->res[ipsec->num] = sa_idx;
693 			ipsec->pkts[ipsec->num++] = m;
694 		}
695 	}
696 	ip->num = j;
697 }
698 
699 static inline void
700 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
701 		struct ipsec_traffic *traffic)
702 {
703 	struct rte_mbuf *m;
704 	uint16_t idx, nb_pkts_out, i;
705 
706 	/* Drop any IPsec traffic from protected ports */
707 	for (i = 0; i < traffic->ipsec.num; i++)
708 		rte_pktmbuf_free(traffic->ipsec.pkts[i]);
709 
710 	traffic->ipsec.num = 0;
711 
712 	outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
713 
714 	outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
715 
716 	if (app_sa_prm.enable == 0) {
717 
718 		nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
719 				traffic->ipsec.res, traffic->ipsec.num,
720 				MAX_PKT_BURST);
721 
722 		for (i = 0; i < nb_pkts_out; i++) {
723 			m = traffic->ipsec.pkts[i];
724 			struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
725 			if (ip->ip_v == IPVERSION) {
726 				idx = traffic->ip4.num++;
727 				traffic->ip4.pkts[idx] = m;
728 			} else {
729 				idx = traffic->ip6.num++;
730 				traffic->ip6.pkts[idx] = m;
731 			}
732 		}
733 	} else {
734 		outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
735 			traffic->ipsec.saptr, traffic->ipsec.num);
736 		ipsec_process(ipsec_ctx, traffic);
737 	}
738 }
739 
740 static inline void
741 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
742 		struct ipsec_traffic *traffic)
743 {
744 	struct rte_mbuf *m;
745 	uint32_t nb_pkts_in, i, idx;
746 
747 	/* Drop any IPv4 traffic from unprotected ports */
748 	for (i = 0; i < traffic->ip4.num; i++)
749 		rte_pktmbuf_free(traffic->ip4.pkts[i]);
750 
751 	traffic->ip4.num = 0;
752 
753 	/* Drop any IPv6 traffic from unprotected ports */
754 	for (i = 0; i < traffic->ip6.num; i++)
755 		rte_pktmbuf_free(traffic->ip6.pkts[i]);
756 
757 	traffic->ip6.num = 0;
758 
759 	if (app_sa_prm.enable == 0) {
760 
761 		nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
762 				traffic->ipsec.num, MAX_PKT_BURST);
763 
764 		for (i = 0; i < nb_pkts_in; i++) {
765 			m = traffic->ipsec.pkts[i];
766 			struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
767 			if (ip->ip_v == IPVERSION) {
768 				idx = traffic->ip4.num++;
769 				traffic->ip4.pkts[idx] = m;
770 			} else {
771 				idx = traffic->ip6.num++;
772 				traffic->ip6.pkts[idx] = m;
773 			}
774 		}
775 	} else {
776 		inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
777 			traffic->ipsec.saptr, traffic->ipsec.num);
778 		ipsec_process(ipsec_ctx, traffic);
779 	}
780 }
781 
782 static inline void
783 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
784 		struct ipsec_traffic *traffic)
785 {
786 	struct rte_mbuf *m;
787 	uint32_t nb_pkts_out, i, n;
788 	struct ip *ip;
789 
790 	/* Drop any IPsec traffic from protected ports */
791 	for (i = 0; i < traffic->ipsec.num; i++)
792 		rte_pktmbuf_free(traffic->ipsec.pkts[i]);
793 
794 	n = 0;
795 
796 	for (i = 0; i < traffic->ip4.num; i++) {
797 		traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
798 		traffic->ipsec.res[n++] = single_sa_idx;
799 	}
800 
801 	for (i = 0; i < traffic->ip6.num; i++) {
802 		traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
803 		traffic->ipsec.res[n++] = single_sa_idx;
804 	}
805 
806 	traffic->ip4.num = 0;
807 	traffic->ip6.num = 0;
808 	traffic->ipsec.num = n;
809 
810 	if (app_sa_prm.enable == 0) {
811 
812 		nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
813 				traffic->ipsec.res, traffic->ipsec.num,
814 				MAX_PKT_BURST);
815 
816 		/* They all sue the same SA (ip4 or ip6 tunnel) */
817 		m = traffic->ipsec.pkts[0];
818 		ip = rte_pktmbuf_mtod(m, struct ip *);
819 		if (ip->ip_v == IPVERSION) {
820 			traffic->ip4.num = nb_pkts_out;
821 			for (i = 0; i < nb_pkts_out; i++)
822 				traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
823 		} else {
824 			traffic->ip6.num = nb_pkts_out;
825 			for (i = 0; i < nb_pkts_out; i++)
826 				traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
827 		}
828 	} else {
829 		outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
830 			traffic->ipsec.saptr, traffic->ipsec.num);
831 		ipsec_process(ipsec_ctx, traffic);
832 	}
833 }
834 
835 static inline int32_t
836 get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
837 {
838 	struct ipsec_mbuf_metadata *priv;
839 	struct ipsec_sa *sa;
840 
841 	priv = get_priv(pkt);
842 
843 	sa = priv->sa;
844 	if (unlikely(sa == NULL)) {
845 		RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
846 		goto fail;
847 	}
848 
849 	if (is_ipv6)
850 		return sa->portid;
851 
852 	/* else */
853 	return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
854 
855 fail:
856 	if (is_ipv6)
857 		return -1;
858 
859 	/* else */
860 	return 0;
861 }
862 
863 static inline void
864 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
865 {
866 	uint32_t hop[MAX_PKT_BURST * 2];
867 	uint32_t dst_ip[MAX_PKT_BURST * 2];
868 	int32_t pkt_hop = 0;
869 	uint16_t i, offset;
870 	uint16_t lpm_pkts = 0;
871 
872 	if (nb_pkts == 0)
873 		return;
874 
875 	/* Need to do an LPM lookup for non-inline packets. Inline packets will
876 	 * have port ID in the SA
877 	 */
878 
879 	for (i = 0; i < nb_pkts; i++) {
880 		if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
881 			/* Security offload not enabled. So an LPM lookup is
882 			 * required to get the hop
883 			 */
884 			offset = offsetof(struct ip, ip_dst);
885 			dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
886 					uint32_t *, offset);
887 			dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
888 			lpm_pkts++;
889 		}
890 	}
891 
892 	rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
893 
894 	lpm_pkts = 0;
895 
896 	for (i = 0; i < nb_pkts; i++) {
897 		if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
898 			/* Read hop from the SA */
899 			pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
900 		} else {
901 			/* Need to use hop returned by lookup */
902 			pkt_hop = hop[lpm_pkts++];
903 		}
904 
905 		if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
906 			rte_pktmbuf_free(pkts[i]);
907 			continue;
908 		}
909 		send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
910 	}
911 }
912 
913 static inline void
914 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
915 {
916 	int32_t hop[MAX_PKT_BURST * 2];
917 	uint8_t dst_ip[MAX_PKT_BURST * 2][16];
918 	uint8_t *ip6_dst;
919 	int32_t pkt_hop = 0;
920 	uint16_t i, offset;
921 	uint16_t lpm_pkts = 0;
922 
923 	if (nb_pkts == 0)
924 		return;
925 
926 	/* Need to do an LPM lookup for non-inline packets. Inline packets will
927 	 * have port ID in the SA
928 	 */
929 
930 	for (i = 0; i < nb_pkts; i++) {
931 		if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
932 			/* Security offload not enabled. So an LPM lookup is
933 			 * required to get the hop
934 			 */
935 			offset = offsetof(struct ip6_hdr, ip6_dst);
936 			ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
937 					offset);
938 			memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
939 			lpm_pkts++;
940 		}
941 	}
942 
943 	rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
944 			lpm_pkts);
945 
946 	lpm_pkts = 0;
947 
948 	for (i = 0; i < nb_pkts; i++) {
949 		if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
950 			/* Read hop from the SA */
951 			pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
952 		} else {
953 			/* Need to use hop returned by lookup */
954 			pkt_hop = hop[lpm_pkts++];
955 		}
956 
957 		if (pkt_hop == -1) {
958 			rte_pktmbuf_free(pkts[i]);
959 			continue;
960 		}
961 		send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
962 	}
963 }
964 
965 static inline void
966 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
967 		uint8_t nb_pkts, uint16_t portid)
968 {
969 	struct ipsec_traffic traffic;
970 
971 	prepare_traffic(pkts, &traffic, nb_pkts);
972 
973 	if (unlikely(single_sa)) {
974 		if (UNPROTECTED_PORT(portid))
975 			process_pkts_inbound_nosp(&qconf->inbound, &traffic);
976 		else
977 			process_pkts_outbound_nosp(&qconf->outbound, &traffic);
978 	} else {
979 		if (UNPROTECTED_PORT(portid))
980 			process_pkts_inbound(&qconf->inbound, &traffic);
981 		else
982 			process_pkts_outbound(&qconf->outbound, &traffic);
983 	}
984 
985 	route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
986 	route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
987 }
988 
989 static inline void
990 drain_tx_buffers(struct lcore_conf *qconf)
991 {
992 	struct buffer *buf;
993 	uint32_t portid;
994 
995 	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
996 		buf = &qconf->tx_mbufs[portid];
997 		if (buf->len == 0)
998 			continue;
999 		send_burst(qconf, buf->len, portid);
1000 		buf->len = 0;
1001 	}
1002 }
1003 
1004 static inline void
1005 drain_crypto_buffers(struct lcore_conf *qconf)
1006 {
1007 	uint32_t i;
1008 	struct ipsec_ctx *ctx;
1009 
1010 	/* drain inbound buffers*/
1011 	ctx = &qconf->inbound;
1012 	for (i = 0; i != ctx->nb_qps; i++) {
1013 		if (ctx->tbl[i].len != 0)
1014 			enqueue_cop_burst(ctx->tbl  + i);
1015 	}
1016 
1017 	/* drain outbound buffers*/
1018 	ctx = &qconf->outbound;
1019 	for (i = 0; i != ctx->nb_qps; i++) {
1020 		if (ctx->tbl[i].len != 0)
1021 			enqueue_cop_burst(ctx->tbl  + i);
1022 	}
1023 }
1024 
1025 static void
1026 drain_inbound_crypto_queues(const struct lcore_conf *qconf,
1027 		struct ipsec_ctx *ctx)
1028 {
1029 	uint32_t n;
1030 	struct ipsec_traffic trf;
1031 
1032 	if (app_sa_prm.enable == 0) {
1033 
1034 		/* dequeue packets from crypto-queue */
1035 		n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
1036 			RTE_DIM(trf.ipsec.pkts));
1037 
1038 		trf.ip4.num = 0;
1039 		trf.ip6.num = 0;
1040 
1041 		/* split traffic by ipv4-ipv6 */
1042 		split46_traffic(&trf, trf.ipsec.pkts, n);
1043 	} else
1044 		ipsec_cqp_process(ctx, &trf);
1045 
1046 	/* process ipv4 packets */
1047 	if (trf.ip4.num != 0) {
1048 		inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
1049 		route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
1050 	}
1051 
1052 	/* process ipv6 packets */
1053 	if (trf.ip6.num != 0) {
1054 		inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
1055 		route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
1056 	}
1057 }
1058 
1059 static void
1060 drain_outbound_crypto_queues(const struct lcore_conf *qconf,
1061 		struct ipsec_ctx *ctx)
1062 {
1063 	uint32_t n;
1064 	struct ipsec_traffic trf;
1065 
1066 	if (app_sa_prm.enable == 0) {
1067 
1068 		/* dequeue packets from crypto-queue */
1069 		n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
1070 			RTE_DIM(trf.ipsec.pkts));
1071 
1072 		trf.ip4.num = 0;
1073 		trf.ip6.num = 0;
1074 
1075 		/* split traffic by ipv4-ipv6 */
1076 		split46_traffic(&trf, trf.ipsec.pkts, n);
1077 	} else
1078 		ipsec_cqp_process(ctx, &trf);
1079 
1080 	/* process ipv4 packets */
1081 	if (trf.ip4.num != 0)
1082 		route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
1083 
1084 	/* process ipv6 packets */
1085 	if (trf.ip6.num != 0)
1086 		route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
1087 }
1088 
1089 /* main processing loop */
1090 static int32_t
1091 main_loop(__attribute__((unused)) void *dummy)
1092 {
1093 	struct rte_mbuf *pkts[MAX_PKT_BURST];
1094 	uint32_t lcore_id;
1095 	uint64_t prev_tsc, diff_tsc, cur_tsc;
1096 	int32_t i, nb_rx;
1097 	uint16_t portid;
1098 	uint8_t queueid;
1099 	struct lcore_conf *qconf;
1100 	int32_t socket_id;
1101 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1102 			/ US_PER_S * BURST_TX_DRAIN_US;
1103 	struct lcore_rx_queue *rxql;
1104 
1105 	prev_tsc = 0;
1106 	lcore_id = rte_lcore_id();
1107 	qconf = &lcore_conf[lcore_id];
1108 	rxql = qconf->rx_queue_list;
1109 	socket_id = rte_lcore_to_socket_id(lcore_id);
1110 
1111 	qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
1112 	qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
1113 	qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
1114 	qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
1115 	qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
1116 	qconf->inbound.cdev_map = cdev_map_in;
1117 	qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
1118 	qconf->inbound.session_priv_pool =
1119 			socket_ctx[socket_id].session_priv_pool;
1120 	qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
1121 	qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
1122 	qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
1123 	qconf->outbound.cdev_map = cdev_map_out;
1124 	qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
1125 	qconf->outbound.session_priv_pool =
1126 			socket_ctx[socket_id].session_priv_pool;
1127 	qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool;
1128 	qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1129 
1130 	if (qconf->nb_rx_queue == 0) {
1131 		RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1132 			lcore_id);
1133 		return 0;
1134 	}
1135 
1136 	RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1137 
1138 	for (i = 0; i < qconf->nb_rx_queue; i++) {
1139 		portid = rxql[i].port_id;
1140 		queueid = rxql[i].queue_id;
1141 		RTE_LOG(INFO, IPSEC,
1142 			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1143 			lcore_id, portid, queueid);
1144 	}
1145 
1146 	while (1) {
1147 		cur_tsc = rte_rdtsc();
1148 
1149 		/* TX queue buffer drain */
1150 		diff_tsc = cur_tsc - prev_tsc;
1151 
1152 		if (unlikely(diff_tsc > drain_tsc)) {
1153 			drain_tx_buffers(qconf);
1154 			drain_crypto_buffers(qconf);
1155 			prev_tsc = cur_tsc;
1156 		}
1157 
1158 		for (i = 0; i < qconf->nb_rx_queue; ++i) {
1159 
1160 			/* Read packets from RX queues */
1161 			portid = rxql[i].port_id;
1162 			queueid = rxql[i].queue_id;
1163 			nb_rx = rte_eth_rx_burst(portid, queueid,
1164 					pkts, MAX_PKT_BURST);
1165 
1166 			if (nb_rx > 0)
1167 				process_pkts(qconf, pkts, nb_rx, portid);
1168 
1169 			/* dequeue and process completed crypto-ops */
1170 			if (UNPROTECTED_PORT(portid))
1171 				drain_inbound_crypto_queues(qconf,
1172 					&qconf->inbound);
1173 			else
1174 				drain_outbound_crypto_queues(qconf,
1175 					&qconf->outbound);
1176 		}
1177 	}
1178 }
1179 
1180 static int32_t
1181 check_params(void)
1182 {
1183 	uint8_t lcore;
1184 	uint16_t portid;
1185 	uint16_t i;
1186 	int32_t socket_id;
1187 
1188 	if (lcore_params == NULL) {
1189 		printf("Error: No port/queue/core mappings\n");
1190 		return -1;
1191 	}
1192 
1193 	for (i = 0; i < nb_lcore_params; ++i) {
1194 		lcore = lcore_params[i].lcore_id;
1195 		if (!rte_lcore_is_enabled(lcore)) {
1196 			printf("error: lcore %hhu is not enabled in "
1197 				"lcore mask\n", lcore);
1198 			return -1;
1199 		}
1200 		socket_id = rte_lcore_to_socket_id(lcore);
1201 		if (socket_id != 0 && numa_on == 0) {
1202 			printf("warning: lcore %hhu is on socket %d "
1203 				"with numa off\n",
1204 				lcore, socket_id);
1205 		}
1206 		portid = lcore_params[i].port_id;
1207 		if ((enabled_port_mask & (1 << portid)) == 0) {
1208 			printf("port %u is not enabled in port mask\n", portid);
1209 			return -1;
1210 		}
1211 		if (!rte_eth_dev_is_valid_port(portid)) {
1212 			printf("port %u is not present on the board\n", portid);
1213 			return -1;
1214 		}
1215 	}
1216 	return 0;
1217 }
1218 
1219 static uint8_t
1220 get_port_nb_rx_queues(const uint16_t port)
1221 {
1222 	int32_t queue = -1;
1223 	uint16_t i;
1224 
1225 	for (i = 0; i < nb_lcore_params; ++i) {
1226 		if (lcore_params[i].port_id == port &&
1227 				lcore_params[i].queue_id > queue)
1228 			queue = lcore_params[i].queue_id;
1229 	}
1230 	return (uint8_t)(++queue);
1231 }
1232 
1233 static int32_t
1234 init_lcore_rx_queues(void)
1235 {
1236 	uint16_t i, nb_rx_queue;
1237 	uint8_t lcore;
1238 
1239 	for (i = 0; i < nb_lcore_params; ++i) {
1240 		lcore = lcore_params[i].lcore_id;
1241 		nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
1242 		if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
1243 			printf("error: too many queues (%u) for lcore: %u\n",
1244 					nb_rx_queue + 1, lcore);
1245 			return -1;
1246 		}
1247 		lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
1248 			lcore_params[i].port_id;
1249 		lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
1250 			lcore_params[i].queue_id;
1251 		lcore_conf[lcore].nb_rx_queue++;
1252 	}
1253 	return 0;
1254 }
1255 
1256 /* display usage */
1257 static void
1258 print_usage(const char *prgname)
1259 {
1260 	fprintf(stderr, "%s [EAL options] --"
1261 		" -p PORTMASK"
1262 		" [-P]"
1263 		" [-u PORTMASK]"
1264 		" [-j FRAMESIZE]"
1265 		" [-l]"
1266 		" [-w REPLAY_WINDOW_SIZE]"
1267 		" [-e]"
1268 		" [-a]"
1269 		" -f CONFIG_FILE"
1270 		" --config (port,queue,lcore)[,(port,queue,lcore)]"
1271 		" [--single-sa SAIDX]"
1272 		" [--cryptodev_mask MASK]"
1273 		" [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]"
1274 		" [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
1275 		" [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]"
1276 		" [--" CMD_LINE_OPT_MTU " MTU]"
1277 		"\n\n"
1278 		"  -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
1279 		"  -P : Enable promiscuous mode\n"
1280 		"  -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
1281 		"  -j FRAMESIZE: Data buffer size, minimum (and default)\n"
1282 		"     value: RTE_MBUF_DEFAULT_BUF_SIZE\n"
1283 		"  -l enables code-path that uses librte_ipsec\n"
1284 		"  -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n"
1285 		"     size for each SA\n"
1286 		"  -e enables ESN\n"
1287 		"  -a enables SA SQN atomic behaviour\n"
1288 		"  -f CONFIG_FILE: Configuration file\n"
1289 		"  --config (port,queue,lcore): Rx queue configuration\n"
1290 		"  --single-sa SAIDX: Use single SA index for outbound traffic,\n"
1291 		"                     bypassing the SP\n"
1292 		"  --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
1293 		"                         devices to configure\n"
1294 		"  --" CMD_LINE_OPT_RX_OFFLOAD
1295 		": bitmask of the RX HW offload capabilities to enable/use\n"
1296 		"                         (DEV_RX_OFFLOAD_*)\n"
1297 		"  --" CMD_LINE_OPT_TX_OFFLOAD
1298 		": bitmask of the TX HW offload capabilities to enable/use\n"
1299 		"                         (DEV_TX_OFFLOAD_*)\n"
1300 		"  --" CMD_LINE_OPT_REASSEMBLE " NUM"
1301 		": max number of entries in reassemble(fragment) table\n"
1302 		"    (zero (default value) disables reassembly)\n"
1303 		"  --" CMD_LINE_OPT_MTU " MTU"
1304 		": MTU value on all ports (default value: 1500)\n"
1305 		"    outgoing packets with bigger size will be fragmented\n"
1306 		"    incoming packets with bigger size will be discarded\n"
1307 		"\n",
1308 		prgname);
1309 }
1310 
1311 static int
1312 parse_mask(const char *str, uint64_t *val)
1313 {
1314 	char *end;
1315 	unsigned long t;
1316 
1317 	errno = 0;
1318 	t = strtoul(str, &end, 0);
1319 	if (errno != 0 || end[0] != 0)
1320 		return -EINVAL;
1321 
1322 	*val = t;
1323 	return 0;
1324 }
1325 
1326 static int32_t
1327 parse_portmask(const char *portmask)
1328 {
1329 	char *end = NULL;
1330 	unsigned long pm;
1331 
1332 	/* parse hexadecimal string */
1333 	pm = strtoul(portmask, &end, 16);
1334 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1335 		return -1;
1336 
1337 	if ((pm == 0) && errno)
1338 		return -1;
1339 
1340 	return pm;
1341 }
1342 
1343 static int32_t
1344 parse_decimal(const char *str)
1345 {
1346 	char *end = NULL;
1347 	unsigned long num;
1348 
1349 	num = strtoul(str, &end, 10);
1350 	if ((str[0] == '\0') || (end == NULL) || (*end != '\0'))
1351 		return -1;
1352 
1353 	return num;
1354 }
1355 
1356 static int32_t
1357 parse_config(const char *q_arg)
1358 {
1359 	char s[256];
1360 	const char *p, *p0 = q_arg;
1361 	char *end;
1362 	enum fieldnames {
1363 		FLD_PORT = 0,
1364 		FLD_QUEUE,
1365 		FLD_LCORE,
1366 		_NUM_FLD
1367 	};
1368 	unsigned long int_fld[_NUM_FLD];
1369 	char *str_fld[_NUM_FLD];
1370 	int32_t i;
1371 	uint32_t size;
1372 
1373 	nb_lcore_params = 0;
1374 
1375 	while ((p = strchr(p0, '(')) != NULL) {
1376 		++p;
1377 		p0 = strchr(p, ')');
1378 		if (p0 == NULL)
1379 			return -1;
1380 
1381 		size = p0 - p;
1382 		if (size >= sizeof(s))
1383 			return -1;
1384 
1385 		snprintf(s, sizeof(s), "%.*s", size, p);
1386 		if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1387 				_NUM_FLD)
1388 			return -1;
1389 		for (i = 0; i < _NUM_FLD; i++) {
1390 			errno = 0;
1391 			int_fld[i] = strtoul(str_fld[i], &end, 0);
1392 			if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1393 				return -1;
1394 		}
1395 		if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1396 			printf("exceeded max number of lcore params: %hu\n",
1397 				nb_lcore_params);
1398 			return -1;
1399 		}
1400 		lcore_params_array[nb_lcore_params].port_id =
1401 			(uint8_t)int_fld[FLD_PORT];
1402 		lcore_params_array[nb_lcore_params].queue_id =
1403 			(uint8_t)int_fld[FLD_QUEUE];
1404 		lcore_params_array[nb_lcore_params].lcore_id =
1405 			(uint8_t)int_fld[FLD_LCORE];
1406 		++nb_lcore_params;
1407 	}
1408 	lcore_params = lcore_params_array;
1409 	return 0;
1410 }
1411 
1412 static void
1413 print_app_sa_prm(const struct app_sa_prm *prm)
1414 {
1415 	printf("librte_ipsec usage: %s\n",
1416 		(prm->enable == 0) ? "disabled" : "enabled");
1417 
1418 	if (prm->enable == 0)
1419 		return;
1420 
1421 	printf("replay window size: %u\n", prm->window_size);
1422 	printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled");
1423 	printf("SA flags: %#" PRIx64 "\n", prm->flags);
1424 }
1425 
1426 static int32_t
1427 parse_args(int32_t argc, char **argv)
1428 {
1429 	int32_t opt, ret;
1430 	char **argvopt;
1431 	int32_t option_index;
1432 	char *prgname = argv[0];
1433 	int32_t f_present = 0;
1434 
1435 	argvopt = argv;
1436 
1437 	while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:",
1438 				lgopts, &option_index)) != EOF) {
1439 
1440 		switch (opt) {
1441 		case 'p':
1442 			enabled_port_mask = parse_portmask(optarg);
1443 			if (enabled_port_mask == 0) {
1444 				printf("invalid portmask\n");
1445 				print_usage(prgname);
1446 				return -1;
1447 			}
1448 			break;
1449 		case 'P':
1450 			printf("Promiscuous mode selected\n");
1451 			promiscuous_on = 1;
1452 			break;
1453 		case 'u':
1454 			unprotected_port_mask = parse_portmask(optarg);
1455 			if (unprotected_port_mask == 0) {
1456 				printf("invalid unprotected portmask\n");
1457 				print_usage(prgname);
1458 				return -1;
1459 			}
1460 			break;
1461 		case 'f':
1462 			if (f_present == 1) {
1463 				printf("\"-f\" option present more than "
1464 					"once!\n");
1465 				print_usage(prgname);
1466 				return -1;
1467 			}
1468 			if (parse_cfg_file(optarg) < 0) {
1469 				printf("parsing file \"%s\" failed\n",
1470 					optarg);
1471 				print_usage(prgname);
1472 				return -1;
1473 			}
1474 			f_present = 1;
1475 			break;
1476 		case 'j':
1477 			ret = parse_decimal(optarg);
1478 			if (ret < RTE_MBUF_DEFAULT_BUF_SIZE ||
1479 					ret > UINT16_MAX) {
1480 				printf("Invalid frame buffer size value: %s\n",
1481 					optarg);
1482 				print_usage(prgname);
1483 				return -1;
1484 			}
1485 			frame_buf_size = ret;
1486 			printf("Custom frame buffer size %u\n", frame_buf_size);
1487 			break;
1488 		case 'l':
1489 			app_sa_prm.enable = 1;
1490 			break;
1491 		case 'w':
1492 			app_sa_prm.enable = 1;
1493 			app_sa_prm.window_size = parse_decimal(optarg);
1494 			break;
1495 		case 'e':
1496 			app_sa_prm.enable = 1;
1497 			app_sa_prm.enable_esn = 1;
1498 			break;
1499 		case 'a':
1500 			app_sa_prm.enable = 1;
1501 			app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM;
1502 			break;
1503 		case CMD_LINE_OPT_CONFIG_NUM:
1504 			ret = parse_config(optarg);
1505 			if (ret) {
1506 				printf("Invalid config\n");
1507 				print_usage(prgname);
1508 				return -1;
1509 			}
1510 			break;
1511 		case CMD_LINE_OPT_SINGLE_SA_NUM:
1512 			ret = parse_decimal(optarg);
1513 			if (ret == -1) {
1514 				printf("Invalid argument[sa_idx]\n");
1515 				print_usage(prgname);
1516 				return -1;
1517 			}
1518 
1519 			/* else */
1520 			single_sa = 1;
1521 			single_sa_idx = ret;
1522 			printf("Configured with single SA index %u\n",
1523 					single_sa_idx);
1524 			break;
1525 		case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
1526 			ret = parse_portmask(optarg);
1527 			if (ret == -1) {
1528 				printf("Invalid argument[portmask]\n");
1529 				print_usage(prgname);
1530 				return -1;
1531 			}
1532 
1533 			/* else */
1534 			enabled_cryptodev_mask = ret;
1535 			break;
1536 		case CMD_LINE_OPT_RX_OFFLOAD_NUM:
1537 			ret = parse_mask(optarg, &dev_rx_offload);
1538 			if (ret != 0) {
1539 				printf("Invalid argument for \'%s\': %s\n",
1540 					CMD_LINE_OPT_RX_OFFLOAD, optarg);
1541 				print_usage(prgname);
1542 				return -1;
1543 			}
1544 			break;
1545 		case CMD_LINE_OPT_TX_OFFLOAD_NUM:
1546 			ret = parse_mask(optarg, &dev_tx_offload);
1547 			if (ret != 0) {
1548 				printf("Invalid argument for \'%s\': %s\n",
1549 					CMD_LINE_OPT_TX_OFFLOAD, optarg);
1550 				print_usage(prgname);
1551 				return -1;
1552 			}
1553 			break;
1554 		case CMD_LINE_OPT_REASSEMBLE_NUM:
1555 			ret = parse_decimal(optarg);
1556 			if (ret < 0) {
1557 				printf("Invalid argument for \'%s\': %s\n",
1558 					CMD_LINE_OPT_REASSEMBLE, optarg);
1559 				print_usage(prgname);
1560 				return -1;
1561 			}
1562 			frag_tbl_sz = ret;
1563 			break;
1564 		case CMD_LINE_OPT_MTU_NUM:
1565 			ret = parse_decimal(optarg);
1566 			if (ret < 0 || ret > RTE_IPV4_MAX_PKT_LEN) {
1567 				printf("Invalid argument for \'%s\': %s\n",
1568 					CMD_LINE_OPT_MTU, optarg);
1569 				print_usage(prgname);
1570 				return -1;
1571 			}
1572 			mtu_size = ret;
1573 			break;
1574 		default:
1575 			print_usage(prgname);
1576 			return -1;
1577 		}
1578 	}
1579 
1580 	if (f_present == 0) {
1581 		printf("Mandatory option \"-f\" not present\n");
1582 		return -1;
1583 	}
1584 
1585 	/* check do we need to enable multi-seg support */
1586 	if (multi_seg_required()) {
1587 		/* legacy mode doesn't support multi-seg */
1588 		app_sa_prm.enable = 1;
1589 		printf("frame buf size: %u, mtu: %u, "
1590 			"number of reassemble entries: %u\n"
1591 			"multi-segment support is required\n",
1592 			frame_buf_size, mtu_size, frag_tbl_sz);
1593 	}
1594 
1595 	print_app_sa_prm(&app_sa_prm);
1596 
1597 	if (optind >= 0)
1598 		argv[optind-1] = prgname;
1599 
1600 	ret = optind-1;
1601 	optind = 1; /* reset getopt lib */
1602 	return ret;
1603 }
1604 
1605 static void
1606 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
1607 {
1608 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
1609 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
1610 	printf("%s%s", name, buf);
1611 }
1612 
1613 /*
1614  * Update destination ethaddr for the port.
1615  */
1616 int
1617 add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr)
1618 {
1619 	if (port >= RTE_DIM(ethaddr_tbl))
1620 		return -EINVAL;
1621 
1622 	ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr);
1623 	return 0;
1624 }
1625 
1626 /* Check the link status of all ports in up to 9s, and print them finally */
1627 static void
1628 check_all_ports_link_status(uint32_t port_mask)
1629 {
1630 #define CHECK_INTERVAL 100 /* 100ms */
1631 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1632 	uint16_t portid;
1633 	uint8_t count, all_ports_up, print_flag = 0;
1634 	struct rte_eth_link link;
1635 	int ret;
1636 
1637 	printf("\nChecking link status");
1638 	fflush(stdout);
1639 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1640 		all_ports_up = 1;
1641 		RTE_ETH_FOREACH_DEV(portid) {
1642 			if ((port_mask & (1 << portid)) == 0)
1643 				continue;
1644 			memset(&link, 0, sizeof(link));
1645 			ret = rte_eth_link_get_nowait(portid, &link);
1646 			if (ret < 0) {
1647 				all_ports_up = 0;
1648 				if (print_flag == 1)
1649 					printf("Port %u link get failed: %s\n",
1650 						portid, rte_strerror(-ret));
1651 				continue;
1652 			}
1653 			/* print link status if flag set */
1654 			if (print_flag == 1) {
1655 				if (link.link_status)
1656 					printf(
1657 					"Port%d Link Up - speed %u Mbps -%s\n",
1658 						portid, link.link_speed,
1659 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1660 					("full-duplex") : ("half-duplex\n"));
1661 				else
1662 					printf("Port %d Link Down\n", portid);
1663 				continue;
1664 			}
1665 			/* clear all_ports_up flag if any link down */
1666 			if (link.link_status == ETH_LINK_DOWN) {
1667 				all_ports_up = 0;
1668 				break;
1669 			}
1670 		}
1671 		/* after finally printing all link status, get out */
1672 		if (print_flag == 1)
1673 			break;
1674 
1675 		if (all_ports_up == 0) {
1676 			printf(".");
1677 			fflush(stdout);
1678 			rte_delay_ms(CHECK_INTERVAL);
1679 		}
1680 
1681 		/* set the print_flag if all ports up or timeout */
1682 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1683 			print_flag = 1;
1684 			printf("done\n");
1685 		}
1686 	}
1687 }
1688 
1689 static int32_t
1690 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
1691 		uint16_t qp, struct lcore_params *params,
1692 		struct ipsec_ctx *ipsec_ctx,
1693 		const struct rte_cryptodev_capabilities *cipher,
1694 		const struct rte_cryptodev_capabilities *auth,
1695 		const struct rte_cryptodev_capabilities *aead)
1696 {
1697 	int32_t ret = 0;
1698 	unsigned long i;
1699 	struct cdev_key key = { 0 };
1700 
1701 	key.lcore_id = params->lcore_id;
1702 	if (cipher)
1703 		key.cipher_algo = cipher->sym.cipher.algo;
1704 	if (auth)
1705 		key.auth_algo = auth->sym.auth.algo;
1706 	if (aead)
1707 		key.aead_algo = aead->sym.aead.algo;
1708 
1709 	ret = rte_hash_lookup(map, &key);
1710 	if (ret != -ENOENT)
1711 		return 0;
1712 
1713 	for (i = 0; i < ipsec_ctx->nb_qps; i++)
1714 		if (ipsec_ctx->tbl[i].id == cdev_id)
1715 			break;
1716 
1717 	if (i == ipsec_ctx->nb_qps) {
1718 		if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1719 			printf("Maximum number of crypto devices assigned to "
1720 				"a core, increase MAX_QP_PER_LCORE value\n");
1721 			return 0;
1722 		}
1723 		ipsec_ctx->tbl[i].id = cdev_id;
1724 		ipsec_ctx->tbl[i].qp = qp;
1725 		ipsec_ctx->nb_qps++;
1726 		printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1727 				"(cdev_id_qp %lu)\n", str, key.lcore_id,
1728 				cdev_id, qp, i);
1729 	}
1730 
1731 	ret = rte_hash_add_key_data(map, &key, (void *)i);
1732 	if (ret < 0) {
1733 		printf("Faled to insert cdev mapping for (lcore %u, "
1734 				"cdev %u, qp %u), errno %d\n",
1735 				key.lcore_id, ipsec_ctx->tbl[i].id,
1736 				ipsec_ctx->tbl[i].qp, ret);
1737 		return 0;
1738 	}
1739 
1740 	return 1;
1741 }
1742 
1743 static int32_t
1744 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1745 		uint16_t qp, struct lcore_params *params)
1746 {
1747 	int32_t ret = 0;
1748 	const struct rte_cryptodev_capabilities *i, *j;
1749 	struct rte_hash *map;
1750 	struct lcore_conf *qconf;
1751 	struct ipsec_ctx *ipsec_ctx;
1752 	const char *str;
1753 
1754 	qconf = &lcore_conf[params->lcore_id];
1755 
1756 	if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1757 		map = cdev_map_out;
1758 		ipsec_ctx = &qconf->outbound;
1759 		str = "Outbound";
1760 	} else {
1761 		map = cdev_map_in;
1762 		ipsec_ctx = &qconf->inbound;
1763 		str = "Inbound";
1764 	}
1765 
1766 	/* Required cryptodevs with operation chainning */
1767 	if (!(dev_info->feature_flags &
1768 				RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1769 		return ret;
1770 
1771 	for (i = dev_info->capabilities;
1772 			i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1773 		if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1774 			continue;
1775 
1776 		if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1777 			ret |= add_mapping(map, str, cdev_id, qp, params,
1778 					ipsec_ctx, NULL, NULL, i);
1779 			continue;
1780 		}
1781 
1782 		if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1783 			continue;
1784 
1785 		for (j = dev_info->capabilities;
1786 				j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1787 			if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1788 				continue;
1789 
1790 			if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1791 				continue;
1792 
1793 			ret |= add_mapping(map, str, cdev_id, qp, params,
1794 						ipsec_ctx, i, j, NULL);
1795 		}
1796 	}
1797 
1798 	return ret;
1799 }
1800 
1801 /* Check if the device is enabled by cryptodev_mask */
1802 static int
1803 check_cryptodev_mask(uint8_t cdev_id)
1804 {
1805 	if (enabled_cryptodev_mask & (1 << cdev_id))
1806 		return 0;
1807 
1808 	return -1;
1809 }
1810 
1811 static int32_t
1812 cryptodevs_init(void)
1813 {
1814 	struct rte_cryptodev_config dev_conf;
1815 	struct rte_cryptodev_qp_conf qp_conf;
1816 	uint16_t idx, max_nb_qps, qp, i;
1817 	int16_t cdev_id;
1818 	struct rte_hash_parameters params = { 0 };
1819 
1820 	const uint64_t mseg_flag = multi_seg_required() ?
1821 				RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0;
1822 
1823 	params.entries = CDEV_MAP_ENTRIES;
1824 	params.key_len = sizeof(struct cdev_key);
1825 	params.hash_func = rte_jhash;
1826 	params.hash_func_init_val = 0;
1827 	params.socket_id = rte_socket_id();
1828 
1829 	params.name = "cdev_map_in";
1830 	cdev_map_in = rte_hash_create(&params);
1831 	if (cdev_map_in == NULL)
1832 		rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1833 				rte_errno);
1834 
1835 	params.name = "cdev_map_out";
1836 	cdev_map_out = rte_hash_create(&params);
1837 	if (cdev_map_out == NULL)
1838 		rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1839 				rte_errno);
1840 
1841 	printf("lcore/cryptodev/qp mappings:\n");
1842 
1843 	idx = 0;
1844 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1845 		struct rte_cryptodev_info cdev_info;
1846 
1847 		if (check_cryptodev_mask((uint8_t)cdev_id))
1848 			continue;
1849 
1850 		rte_cryptodev_info_get(cdev_id, &cdev_info);
1851 
1852 		if ((mseg_flag & cdev_info.feature_flags) != mseg_flag)
1853 			rte_exit(EXIT_FAILURE,
1854 				"Device %hd does not support \'%s\' feature\n",
1855 				cdev_id,
1856 				rte_cryptodev_get_feature_name(mseg_flag));
1857 
1858 		if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1859 			max_nb_qps = cdev_info.max_nb_queue_pairs;
1860 		else
1861 			max_nb_qps = nb_lcore_params;
1862 
1863 		qp = 0;
1864 		i = 0;
1865 		while (qp < max_nb_qps && i < nb_lcore_params) {
1866 			if (add_cdev_mapping(&cdev_info, cdev_id, qp,
1867 						&lcore_params[idx]))
1868 				qp++;
1869 			idx++;
1870 			idx = idx % nb_lcore_params;
1871 			i++;
1872 		}
1873 
1874 		if (qp == 0)
1875 			continue;
1876 
1877 		dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1878 		dev_conf.nb_queue_pairs = qp;
1879 		dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
1880 
1881 		uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
1882 		if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS)
1883 			rte_exit(EXIT_FAILURE,
1884 				"Device does not support at least %u "
1885 				"sessions", CDEV_MP_NB_OBJS);
1886 
1887 		if (rte_cryptodev_configure(cdev_id, &dev_conf))
1888 			rte_panic("Failed to initialize cryptodev %u\n",
1889 					cdev_id);
1890 
1891 		qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
1892 		qp_conf.mp_session =
1893 			socket_ctx[dev_conf.socket_id].session_pool;
1894 		qp_conf.mp_session_private =
1895 			socket_ctx[dev_conf.socket_id].session_priv_pool;
1896 		for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1897 			if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1898 					&qp_conf, dev_conf.socket_id))
1899 				rte_panic("Failed to setup queue %u for "
1900 						"cdev_id %u\n",	0, cdev_id);
1901 
1902 		if (rte_cryptodev_start(cdev_id))
1903 			rte_panic("Failed to start cryptodev %u\n",
1904 					cdev_id);
1905 	}
1906 
1907 	printf("\n");
1908 
1909 	return 0;
1910 }
1911 
1912 static void
1913 port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
1914 {
1915 	uint32_t frame_size;
1916 	struct rte_eth_dev_info dev_info;
1917 	struct rte_eth_txconf *txconf;
1918 	uint16_t nb_tx_queue, nb_rx_queue;
1919 	uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1920 	int32_t ret, socket_id;
1921 	struct lcore_conf *qconf;
1922 	struct rte_ether_addr ethaddr;
1923 	struct rte_eth_conf local_port_conf = port_conf;
1924 
1925 	ret = rte_eth_dev_info_get(portid, &dev_info);
1926 	if (ret != 0)
1927 		rte_exit(EXIT_FAILURE,
1928 			"Error during getting device (port %u) info: %s\n",
1929 			portid, strerror(-ret));
1930 
1931 	/* limit allowed HW offloafs, as user requested */
1932 	dev_info.rx_offload_capa &= dev_rx_offload;
1933 	dev_info.tx_offload_capa &= dev_tx_offload;
1934 
1935 	printf("Configuring device port %u:\n", portid);
1936 
1937 	ret = rte_eth_macaddr_get(portid, &ethaddr);
1938 	if (ret != 0)
1939 		rte_exit(EXIT_FAILURE,
1940 			"Error getting MAC address (port %u): %s\n",
1941 			portid, rte_strerror(-ret));
1942 
1943 	ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(&ethaddr);
1944 	print_ethaddr("Address: ", &ethaddr);
1945 	printf("\n");
1946 
1947 	nb_rx_queue = get_port_nb_rx_queues(portid);
1948 	nb_tx_queue = nb_lcores;
1949 
1950 	if (nb_rx_queue > dev_info.max_rx_queues)
1951 		rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1952 				"(max rx queue is %u)\n",
1953 				nb_rx_queue, dev_info.max_rx_queues);
1954 
1955 	if (nb_tx_queue > dev_info.max_tx_queues)
1956 		rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1957 				"(max tx queue is %u)\n",
1958 				nb_tx_queue, dev_info.max_tx_queues);
1959 
1960 	printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1961 			nb_rx_queue, nb_tx_queue);
1962 
1963 	frame_size = MTU_TO_FRAMELEN(mtu_size);
1964 	if (frame_size > local_port_conf.rxmode.max_rx_pkt_len)
1965 		local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1966 	local_port_conf.rxmode.max_rx_pkt_len = frame_size;
1967 
1968 	if (multi_seg_required()) {
1969 		local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
1970 		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1971 	}
1972 
1973 	local_port_conf.rxmode.offloads |= req_rx_offloads;
1974 	local_port_conf.txmode.offloads |= req_tx_offloads;
1975 
1976 	/* Check that all required capabilities are supported */
1977 	if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1978 			local_port_conf.rxmode.offloads)
1979 		rte_exit(EXIT_FAILURE,
1980 			"Error: port %u required RX offloads: 0x%" PRIx64
1981 			", avaialbe RX offloads: 0x%" PRIx64 "\n",
1982 			portid, local_port_conf.rxmode.offloads,
1983 			dev_info.rx_offload_capa);
1984 
1985 	if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) !=
1986 			local_port_conf.txmode.offloads)
1987 		rte_exit(EXIT_FAILURE,
1988 			"Error: port %u required TX offloads: 0x%" PRIx64
1989 			", avaialbe TX offloads: 0x%" PRIx64 "\n",
1990 			portid, local_port_conf.txmode.offloads,
1991 			dev_info.tx_offload_capa);
1992 
1993 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1994 		local_port_conf.txmode.offloads |=
1995 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1996 
1997 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
1998 		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
1999 
2000 	printf("port %u configurng rx_offloads=0x%" PRIx64
2001 		", tx_offloads=0x%" PRIx64 "\n",
2002 		portid, local_port_conf.rxmode.offloads,
2003 		local_port_conf.txmode.offloads);
2004 
2005 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
2006 		dev_info.flow_type_rss_offloads;
2007 	if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
2008 			port_conf.rx_adv_conf.rss_conf.rss_hf) {
2009 		printf("Port %u modified RSS hash function based on hardware support,"
2010 			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
2011 			portid,
2012 			port_conf.rx_adv_conf.rss_conf.rss_hf,
2013 			local_port_conf.rx_adv_conf.rss_conf.rss_hf);
2014 	}
2015 
2016 	ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
2017 			&local_port_conf);
2018 	if (ret < 0)
2019 		rte_exit(EXIT_FAILURE, "Cannot configure device: "
2020 				"err=%d, port=%d\n", ret, portid);
2021 
2022 	ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
2023 	if (ret < 0)
2024 		rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
2025 				"err=%d, port=%d\n", ret, portid);
2026 
2027 	/* init one TX queue per lcore */
2028 	tx_queueid = 0;
2029 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2030 		if (rte_lcore_is_enabled(lcore_id) == 0)
2031 			continue;
2032 
2033 		if (numa_on)
2034 			socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2035 		else
2036 			socket_id = 0;
2037 
2038 		/* init TX queue */
2039 		printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
2040 
2041 		txconf = &dev_info.default_txconf;
2042 		txconf->offloads = local_port_conf.txmode.offloads;
2043 
2044 		ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
2045 				socket_id, txconf);
2046 		if (ret < 0)
2047 			rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
2048 					"err=%d, port=%d\n", ret, portid);
2049 
2050 		qconf = &lcore_conf[lcore_id];
2051 		qconf->tx_queue_id[portid] = tx_queueid;
2052 
2053 		/* Pre-populate pkt offloads based on capabilities */
2054 		qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
2055 		qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
2056 		if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
2057 			qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
2058 
2059 		tx_queueid++;
2060 
2061 		/* init RX queues */
2062 		for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
2063 			struct rte_eth_rxconf rxq_conf;
2064 
2065 			if (portid != qconf->rx_queue_list[queue].port_id)
2066 				continue;
2067 
2068 			rx_queueid = qconf->rx_queue_list[queue].queue_id;
2069 
2070 			printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
2071 					socket_id);
2072 
2073 			rxq_conf = dev_info.default_rxconf;
2074 			rxq_conf.offloads = local_port_conf.rxmode.offloads;
2075 			ret = rte_eth_rx_queue_setup(portid, rx_queueid,
2076 					nb_rxd,	socket_id, &rxq_conf,
2077 					socket_ctx[socket_id].mbuf_pool);
2078 			if (ret < 0)
2079 				rte_exit(EXIT_FAILURE,
2080 					"rte_eth_rx_queue_setup: err=%d, "
2081 					"port=%d\n", ret, portid);
2082 		}
2083 	}
2084 	printf("\n");
2085 }
2086 
2087 static size_t
2088 max_session_size(void)
2089 {
2090 	size_t max_sz, sz;
2091 	void *sec_ctx;
2092 	int16_t cdev_id, port_id, n;
2093 
2094 	max_sz = 0;
2095 	n =  rte_cryptodev_count();
2096 	for (cdev_id = 0; cdev_id != n; cdev_id++) {
2097 		sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
2098 		if (sz > max_sz)
2099 			max_sz = sz;
2100 		/*
2101 		 * If crypto device is security capable, need to check the
2102 		 * size of security session as well.
2103 		 */
2104 
2105 		/* Get security context of the crypto device */
2106 		sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
2107 		if (sec_ctx == NULL)
2108 			continue;
2109 
2110 		/* Get size of security session */
2111 		sz = rte_security_session_get_size(sec_ctx);
2112 		if (sz > max_sz)
2113 			max_sz = sz;
2114 	}
2115 
2116 	RTE_ETH_FOREACH_DEV(port_id) {
2117 		if ((enabled_port_mask & (1 << port_id)) == 0)
2118 			continue;
2119 
2120 		sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
2121 		if (sec_ctx == NULL)
2122 			continue;
2123 
2124 		sz = rte_security_session_get_size(sec_ctx);
2125 		if (sz > max_sz)
2126 			max_sz = sz;
2127 	}
2128 
2129 	return max_sz;
2130 }
2131 
2132 static void
2133 session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
2134 {
2135 	char mp_name[RTE_MEMPOOL_NAMESIZE];
2136 	struct rte_mempool *sess_mp;
2137 
2138 	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2139 			"sess_mp_%u", socket_id);
2140 	sess_mp = rte_cryptodev_sym_session_pool_create(
2141 			mp_name, CDEV_MP_NB_OBJS,
2142 			sess_sz, CDEV_MP_CACHE_SZ, 0,
2143 			socket_id);
2144 	ctx->session_pool = sess_mp;
2145 
2146 	if (ctx->session_pool == NULL)
2147 		rte_exit(EXIT_FAILURE,
2148 			"Cannot init session pool on socket %d\n", socket_id);
2149 	else
2150 		printf("Allocated session pool on socket %d\n",	socket_id);
2151 }
2152 
2153 static void
2154 session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
2155 	size_t sess_sz)
2156 {
2157 	char mp_name[RTE_MEMPOOL_NAMESIZE];
2158 	struct rte_mempool *sess_mp;
2159 
2160 	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2161 			"sess_mp_priv_%u", socket_id);
2162 	sess_mp = rte_mempool_create(mp_name,
2163 			CDEV_MP_NB_OBJS,
2164 			sess_sz,
2165 			CDEV_MP_CACHE_SZ,
2166 			0, NULL, NULL, NULL,
2167 			NULL, socket_id,
2168 			0);
2169 	ctx->session_priv_pool = sess_mp;
2170 
2171 	if (ctx->session_priv_pool == NULL)
2172 		rte_exit(EXIT_FAILURE,
2173 			"Cannot init session priv pool on socket %d\n",
2174 			socket_id);
2175 	else
2176 		printf("Allocated session priv pool on socket %d\n",
2177 			socket_id);
2178 }
2179 
2180 static void
2181 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
2182 {
2183 	char s[64];
2184 	int32_t ms;
2185 
2186 	snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
2187 	ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
2188 			MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
2189 			frame_buf_size, socket_id);
2190 
2191 	/*
2192 	 * if multi-segment support is enabled, then create a pool
2193 	 * for indirect mbufs.
2194 	 */
2195 	ms = multi_seg_required();
2196 	if (ms != 0) {
2197 		snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
2198 		ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
2199 			MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
2200 	}
2201 
2202 	if (ctx->mbuf_pool == NULL || (ms != 0 && ctx->mbuf_pool_indir == NULL))
2203 		rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
2204 				socket_id);
2205 	else
2206 		printf("Allocated mbuf pool on socket %d\n", socket_id);
2207 }
2208 
2209 static inline int
2210 inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
2211 {
2212 	struct ipsec_sa *sa;
2213 
2214 	/* For inline protocol processing, the metadata in the event will
2215 	 * uniquely identify the security session which raised the event.
2216 	 * Application would then need the userdata it had registered with the
2217 	 * security session to process the event.
2218 	 */
2219 
2220 	sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
2221 
2222 	if (sa == NULL) {
2223 		/* userdata could not be retrieved */
2224 		return -1;
2225 	}
2226 
2227 	/* Sequence number over flow. SA need to be re-established */
2228 	RTE_SET_USED(sa);
2229 	return 0;
2230 }
2231 
2232 static int
2233 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2234 		 void *param, void *ret_param)
2235 {
2236 	uint64_t md;
2237 	struct rte_eth_event_ipsec_desc *event_desc = NULL;
2238 	struct rte_security_ctx *ctx = (struct rte_security_ctx *)
2239 					rte_eth_dev_get_sec_ctx(port_id);
2240 
2241 	RTE_SET_USED(param);
2242 
2243 	if (type != RTE_ETH_EVENT_IPSEC)
2244 		return -1;
2245 
2246 	event_desc = ret_param;
2247 	if (event_desc == NULL) {
2248 		printf("Event descriptor not set\n");
2249 		return -1;
2250 	}
2251 
2252 	md = event_desc->metadata;
2253 
2254 	if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
2255 		return inline_ipsec_event_esn_overflow(ctx, md);
2256 	else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
2257 		printf("Invalid IPsec event reported\n");
2258 		return -1;
2259 	}
2260 
2261 	return -1;
2262 }
2263 
2264 static uint16_t
2265 rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
2266 	struct rte_mbuf *pkt[], uint16_t nb_pkts,
2267 	__rte_unused uint16_t max_pkts, void *user_param)
2268 {
2269 	uint64_t tm;
2270 	uint32_t i, k;
2271 	struct lcore_conf *lc;
2272 	struct rte_mbuf *mb;
2273 	struct rte_ether_hdr *eth;
2274 
2275 	lc = user_param;
2276 	k = 0;
2277 	tm = 0;
2278 
2279 	for (i = 0; i != nb_pkts; i++) {
2280 
2281 		mb = pkt[i];
2282 		eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
2283 		if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
2284 
2285 			struct rte_ipv4_hdr *iph;
2286 
2287 			iph = (struct rte_ipv4_hdr *)(eth + 1);
2288 			if (rte_ipv4_frag_pkt_is_fragmented(iph)) {
2289 
2290 				mb->l2_len = sizeof(*eth);
2291 				mb->l3_len = sizeof(*iph);
2292 				tm = (tm != 0) ? tm : rte_rdtsc();
2293 				mb = rte_ipv4_frag_reassemble_packet(
2294 					lc->frag.tbl, &lc->frag.dr,
2295 					mb, tm, iph);
2296 
2297 				if (mb != NULL) {
2298 					/* fix ip cksum after reassemble. */
2299 					iph = rte_pktmbuf_mtod_offset(mb,
2300 						struct rte_ipv4_hdr *,
2301 						mb->l2_len);
2302 					iph->hdr_checksum = 0;
2303 					iph->hdr_checksum = rte_ipv4_cksum(iph);
2304 				}
2305 			}
2306 		} else if (eth->ether_type ==
2307 				rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
2308 
2309 			struct rte_ipv6_hdr *iph;
2310 			struct ipv6_extension_fragment *fh;
2311 
2312 			iph = (struct rte_ipv6_hdr *)(eth + 1);
2313 			fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
2314 			if (fh != NULL) {
2315 				mb->l2_len = sizeof(*eth);
2316 				mb->l3_len = (uintptr_t)fh - (uintptr_t)iph +
2317 					sizeof(*fh);
2318 				tm = (tm != 0) ? tm : rte_rdtsc();
2319 				mb = rte_ipv6_frag_reassemble_packet(
2320 					lc->frag.tbl, &lc->frag.dr,
2321 					mb, tm, iph, fh);
2322 				if (mb != NULL)
2323 					/* fix l3_len after reassemble. */
2324 					mb->l3_len = mb->l3_len - sizeof(*fh);
2325 			}
2326 		}
2327 
2328 		pkt[k] = mb;
2329 		k += (mb != NULL);
2330 	}
2331 
2332 	/* some fragments were encountered, drain death row */
2333 	if (tm != 0)
2334 		rte_ip_frag_free_death_row(&lc->frag.dr, 0);
2335 
2336 	return k;
2337 }
2338 
2339 
2340 static int
2341 reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid)
2342 {
2343 	int32_t sid;
2344 	uint32_t i;
2345 	uint64_t frag_cycles;
2346 	const struct lcore_rx_queue *rxq;
2347 	const struct rte_eth_rxtx_callback *cb;
2348 
2349 	/* create fragment table */
2350 	sid = rte_lcore_to_socket_id(cid);
2351 	frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) /
2352 		MS_PER_S * FRAG_TTL_MS;
2353 
2354 	lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz,
2355 		FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid);
2356 	if (lc->frag.tbl == NULL) {
2357 		printf("%s(%u): failed to create fragment table of size: %u, "
2358 			"error code: %d\n",
2359 			__func__, cid, frag_tbl_sz, rte_errno);
2360 		return -ENOMEM;
2361 	}
2362 
2363 	/* setup reassemble RX callbacks for all queues */
2364 	for (i = 0; i != lc->nb_rx_queue; i++) {
2365 
2366 		rxq = lc->rx_queue_list + i;
2367 		cb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id,
2368 			rx_callback, lc);
2369 		if (cb == NULL) {
2370 			printf("%s(%u): failed to install RX callback for "
2371 				"portid=%u, queueid=%u, error code: %d\n",
2372 				__func__, cid,
2373 				rxq->port_id, rxq->queue_id, rte_errno);
2374 			return -ENOMEM;
2375 		}
2376 	}
2377 
2378 	return 0;
2379 }
2380 
2381 static int
2382 reassemble_init(void)
2383 {
2384 	int32_t rc;
2385 	uint32_t i, lc;
2386 
2387 	rc = 0;
2388 	for (i = 0; i != nb_lcore_params; i++) {
2389 		lc = lcore_params[i].lcore_id;
2390 		rc = reassemble_lcore_init(lcore_conf + lc, lc);
2391 		if (rc != 0)
2392 			break;
2393 	}
2394 
2395 	return rc;
2396 }
2397 
2398 int32_t
2399 main(int32_t argc, char **argv)
2400 {
2401 	int32_t ret;
2402 	uint32_t lcore_id;
2403 	uint32_t i;
2404 	uint8_t socket_id;
2405 	uint16_t portid;
2406 	uint64_t req_rx_offloads, req_tx_offloads;
2407 	size_t sess_sz;
2408 
2409 	/* init EAL */
2410 	ret = rte_eal_init(argc, argv);
2411 	if (ret < 0)
2412 		rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2413 	argc -= ret;
2414 	argv += ret;
2415 
2416 	/* parse application arguments (after the EAL ones) */
2417 	ret = parse_args(argc, argv);
2418 	if (ret < 0)
2419 		rte_exit(EXIT_FAILURE, "Invalid parameters\n");
2420 
2421 	if ((unprotected_port_mask & enabled_port_mask) !=
2422 			unprotected_port_mask)
2423 		rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
2424 				unprotected_port_mask);
2425 
2426 	if (check_params() < 0)
2427 		rte_exit(EXIT_FAILURE, "check_params failed\n");
2428 
2429 	ret = init_lcore_rx_queues();
2430 	if (ret < 0)
2431 		rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2432 
2433 	nb_lcores = rte_lcore_count();
2434 
2435 	sess_sz = max_session_size();
2436 
2437 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2438 		if (rte_lcore_is_enabled(lcore_id) == 0)
2439 			continue;
2440 
2441 		if (numa_on)
2442 			socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2443 		else
2444 			socket_id = 0;
2445 
2446 		/* mbuf_pool is initialised by the pool_init() function*/
2447 		if (socket_ctx[socket_id].mbuf_pool)
2448 			continue;
2449 
2450 		pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
2451 		session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
2452 		session_priv_pool_init(&socket_ctx[socket_id], socket_id,
2453 			sess_sz);
2454 	}
2455 
2456 	RTE_ETH_FOREACH_DEV(portid) {
2457 		if ((enabled_port_mask & (1 << portid)) == 0)
2458 			continue;
2459 
2460 		sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads);
2461 		port_init(portid, req_rx_offloads, req_tx_offloads);
2462 	}
2463 
2464 	cryptodevs_init();
2465 
2466 	/* start ports */
2467 	RTE_ETH_FOREACH_DEV(portid) {
2468 		if ((enabled_port_mask & (1 << portid)) == 0)
2469 			continue;
2470 
2471 		/*
2472 		 * Start device
2473 		 * note: device must be started before a flow rule
2474 		 * can be installed.
2475 		 */
2476 		ret = rte_eth_dev_start(portid);
2477 		if (ret < 0)
2478 			rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
2479 					"err=%d, port=%d\n", ret, portid);
2480 		/*
2481 		 * If enabled, put device in promiscuous mode.
2482 		 * This allows IO forwarding mode to forward packets
2483 		 * to itself through 2 cross-connected  ports of the
2484 		 * target machine.
2485 		 */
2486 		if (promiscuous_on) {
2487 			ret = rte_eth_promiscuous_enable(portid);
2488 			if (ret != 0)
2489 				rte_exit(EXIT_FAILURE,
2490 					"rte_eth_promiscuous_enable: err=%s, port=%d\n",
2491 					rte_strerror(-ret), portid);
2492 		}
2493 
2494 		rte_eth_dev_callback_register(portid,
2495 			RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
2496 	}
2497 
2498 	/* fragment reassemble is enabled */
2499 	if (frag_tbl_sz != 0) {
2500 		ret = reassemble_init();
2501 		if (ret != 0)
2502 			rte_exit(EXIT_FAILURE, "failed at reassemble init");
2503 	}
2504 
2505 	/* Replicate each context per socket */
2506 	for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
2507 		socket_id = rte_socket_id_by_idx(i);
2508 		if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
2509 			(socket_ctx[socket_id].sa_in == NULL) &&
2510 			(socket_ctx[socket_id].sa_out == NULL)) {
2511 			sa_init(&socket_ctx[socket_id], socket_id);
2512 			sp4_init(&socket_ctx[socket_id], socket_id);
2513 			sp6_init(&socket_ctx[socket_id], socket_id);
2514 			rt_init(&socket_ctx[socket_id], socket_id);
2515 		}
2516 	}
2517 
2518 	check_all_ports_link_status(enabled_port_mask);
2519 
2520 	/* launch per-lcore init on every lcore */
2521 	rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2522 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2523 		if (rte_eal_wait_lcore(lcore_id) < 0)
2524 			return -1;
2525 	}
2526 
2527 	return 0;
2528 }
2529