xref: /dpdk/examples/ipsec-secgw/ipsec-secgw.c (revision c7f5dba7d4bb7971fac51755aad09b71b10cef90)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/types.h>
10 #include <netinet/in.h>
11 #include <netinet/ip.h>
12 #include <netinet/ip6.h>
13 #include <string.h>
14 #include <sys/queue.h>
15 #include <stdarg.h>
16 #include <errno.h>
17 #include <getopt.h>
18 
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_log.h>
22 #include <rte_eal.h>
23 #include <rte_launch.h>
24 #include <rte_atomic.h>
25 #include <rte_cycles.h>
26 #include <rte_prefetch.h>
27 #include <rte_lcore.h>
28 #include <rte_per_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_interrupts.h>
31 #include <rte_random.h>
32 #include <rte_debug.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
35 #include <rte_mempool.h>
36 #include <rte_mbuf.h>
37 #include <rte_acl.h>
38 #include <rte_lpm.h>
39 #include <rte_lpm6.h>
40 #include <rte_hash.h>
41 #include <rte_jhash.h>
42 #include <rte_cryptodev.h>
43 #include <rte_security.h>
44 
45 #include "ipsec.h"
46 #include "parser.h"
47 
48 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
49 
50 #define MAX_JUMBO_PKT_LEN  9600
51 
52 #define MEMPOOL_CACHE_SIZE 256
53 
54 #define NB_MBUF	(32000)
55 
56 #define CDEV_QUEUE_DESC 2048
57 #define CDEV_MAP_ENTRIES 16384
58 #define CDEV_MP_NB_OBJS 2048
59 #define CDEV_MP_CACHE_SZ 64
60 #define MAX_QUEUE_PAIRS 1
61 
62 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
63 
64 #define NB_SOCKETS 4
65 
66 /* Configure how many packets ahead to prefetch, when reading packets */
67 #define PREFETCH_OFFSET	3
68 
69 #define MAX_RX_QUEUE_PER_LCORE 16
70 
71 #define MAX_LCORE_PARAMS 1024
72 
73 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
74 
75 /*
76  * Configurable number of RX/TX ring descriptors
77  */
78 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
79 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
80 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
81 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
82 
83 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
84 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
85 	(((uint64_t)((a) & 0xff) << 56) | \
86 	((uint64_t)((b) & 0xff) << 48) | \
87 	((uint64_t)((c) & 0xff) << 40) | \
88 	((uint64_t)((d) & 0xff) << 32) | \
89 	((uint64_t)((e) & 0xff) << 24) | \
90 	((uint64_t)((f) & 0xff) << 16) | \
91 	((uint64_t)((g) & 0xff) << 8)  | \
92 	((uint64_t)(h) & 0xff))
93 #else
94 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
95 	(((uint64_t)((h) & 0xff) << 56) | \
96 	((uint64_t)((g) & 0xff) << 48) | \
97 	((uint64_t)((f) & 0xff) << 40) | \
98 	((uint64_t)((e) & 0xff) << 32) | \
99 	((uint64_t)((d) & 0xff) << 24) | \
100 	((uint64_t)((c) & 0xff) << 16) | \
101 	((uint64_t)((b) & 0xff) << 8) | \
102 	((uint64_t)(a) & 0xff))
103 #endif
104 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
105 
106 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
107 		addr.addr_bytes[0], addr.addr_bytes[1], \
108 		addr.addr_bytes[2], addr.addr_bytes[3], \
109 		addr.addr_bytes[4], addr.addr_bytes[5], \
110 		0, 0)
111 
112 /* port/source ethernet addr and destination ethernet addr */
113 struct ethaddr_info {
114 	uint64_t src, dst;
115 };
116 
117 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
118 	{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
119 	{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
120 	{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
121 	{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
122 };
123 
124 #define CMD_LINE_OPT_CONFIG		"config"
125 #define CMD_LINE_OPT_SINGLE_SA		"single-sa"
126 #define CMD_LINE_OPT_CRYPTODEV_MASK	"cryptodev_mask"
127 
128 enum {
129 	/* long options mapped to a short option */
130 
131 	/* first long only option value must be >= 256, so that we won't
132 	 * conflict with short options
133 	 */
134 	CMD_LINE_OPT_MIN_NUM = 256,
135 	CMD_LINE_OPT_CONFIG_NUM,
136 	CMD_LINE_OPT_SINGLE_SA_NUM,
137 	CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
138 };
139 
140 static const struct option lgopts[] = {
141 	{CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
142 	{CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
143 	{CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
144 	{NULL, 0, 0, 0}
145 };
146 
147 /* mask of enabled ports */
148 static uint32_t enabled_port_mask;
149 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
150 static uint32_t unprotected_port_mask;
151 static int32_t promiscuous_on = 1;
152 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
153 static uint32_t nb_lcores;
154 static uint32_t single_sa;
155 static uint32_t single_sa_idx;
156 static uint32_t frame_size;
157 
158 struct lcore_rx_queue {
159 	uint16_t port_id;
160 	uint8_t queue_id;
161 } __rte_cache_aligned;
162 
163 struct lcore_params {
164 	uint16_t port_id;
165 	uint8_t queue_id;
166 	uint8_t lcore_id;
167 } __rte_cache_aligned;
168 
169 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
170 
171 static struct lcore_params *lcore_params;
172 static uint16_t nb_lcore_params;
173 
174 static struct rte_hash *cdev_map_in;
175 static struct rte_hash *cdev_map_out;
176 
177 struct buffer {
178 	uint16_t len;
179 	struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
180 };
181 
182 struct lcore_conf {
183 	uint16_t nb_rx_queue;
184 	struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
185 	uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
186 	struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
187 	struct ipsec_ctx inbound;
188 	struct ipsec_ctx outbound;
189 	struct rt_ctx *rt4_ctx;
190 	struct rt_ctx *rt6_ctx;
191 } __rte_cache_aligned;
192 
193 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
194 
195 static struct rte_eth_conf port_conf = {
196 	.rxmode = {
197 		.mq_mode	= ETH_MQ_RX_RSS,
198 		.max_rx_pkt_len = ETHER_MAX_LEN,
199 		.split_hdr_size = 0,
200 		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
201 	},
202 	.rx_adv_conf = {
203 		.rss_conf = {
204 			.rss_key = NULL,
205 			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
206 				ETH_RSS_TCP | ETH_RSS_SCTP,
207 		},
208 	},
209 	.txmode = {
210 		.mq_mode = ETH_MQ_TX_NONE,
211 		.offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
212 			     DEV_TX_OFFLOAD_MULTI_SEGS),
213 	},
214 };
215 
216 static struct socket_ctx socket_ctx[NB_SOCKETS];
217 
218 struct traffic_type {
219 	const uint8_t *data[MAX_PKT_BURST * 2];
220 	struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
221 	uint32_t res[MAX_PKT_BURST * 2];
222 	uint32_t num;
223 };
224 
225 struct ipsec_traffic {
226 	struct traffic_type ipsec;
227 	struct traffic_type ip4;
228 	struct traffic_type ip6;
229 };
230 
231 static inline void
232 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
233 {
234 	uint8_t *nlp;
235 	struct ether_hdr *eth;
236 
237 	eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
238 	if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
239 		nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
240 		nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p));
241 		if (*nlp == IPPROTO_ESP)
242 			t->ipsec.pkts[(t->ipsec.num)++] = pkt;
243 		else {
244 			t->ip4.data[t->ip4.num] = nlp;
245 			t->ip4.pkts[(t->ip4.num)++] = pkt;
246 		}
247 	} else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
248 		nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
249 		nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt));
250 		if (*nlp == IPPROTO_ESP)
251 			t->ipsec.pkts[(t->ipsec.num)++] = pkt;
252 		else {
253 			t->ip6.data[t->ip6.num] = nlp;
254 			t->ip6.pkts[(t->ip6.num)++] = pkt;
255 		}
256 	} else {
257 		/* Unknown/Unsupported type, drop the packet */
258 		RTE_LOG(ERR, IPSEC, "Unsupported packet type\n");
259 		rte_pktmbuf_free(pkt);
260 	}
261 
262 	/* Check if the packet has been processed inline. For inline protocol
263 	 * processed packets, the metadata in the mbuf can be used to identify
264 	 * the security processing done on the packet. The metadata will be
265 	 * used to retrieve the application registered userdata associated
266 	 * with the security session.
267 	 */
268 
269 	if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
270 		struct ipsec_sa *sa;
271 		struct ipsec_mbuf_metadata *priv;
272 		struct rte_security_ctx *ctx = (struct rte_security_ctx *)
273 						rte_eth_dev_get_sec_ctx(
274 						pkt->port);
275 
276 		/* Retrieve the userdata registered. Here, the userdata
277 		 * registered is the SA pointer.
278 		 */
279 
280 		sa = (struct ipsec_sa *)
281 				rte_security_get_userdata(ctx, pkt->udata64);
282 
283 		if (sa == NULL) {
284 			/* userdata could not be retrieved */
285 			return;
286 		}
287 
288 		/* Save SA as priv member in mbuf. This will be used in the
289 		 * IPsec selector(SP-SA) check.
290 		 */
291 
292 		priv = get_priv(pkt);
293 		priv->sa = sa;
294 	}
295 }
296 
297 static inline void
298 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
299 		uint16_t nb_pkts)
300 {
301 	int32_t i;
302 
303 	t->ipsec.num = 0;
304 	t->ip4.num = 0;
305 	t->ip6.num = 0;
306 
307 	for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
308 		rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
309 					void *));
310 		prepare_one_packet(pkts[i], t);
311 	}
312 	/* Process left packets */
313 	for (; i < nb_pkts; i++)
314 		prepare_one_packet(pkts[i], t);
315 }
316 
317 static inline void
318 prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port)
319 {
320 	struct ip *ip;
321 	struct ether_hdr *ethhdr;
322 
323 	ip = rte_pktmbuf_mtod(pkt, struct ip *);
324 
325 	ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);
326 
327 	if (ip->ip_v == IPVERSION) {
328 		pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
329 		pkt->l3_len = sizeof(struct ip);
330 		pkt->l2_len = ETHER_HDR_LEN;
331 
332 		ip->ip_sum = 0;
333 		ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
334 	} else {
335 		pkt->ol_flags |= PKT_TX_IPV6;
336 		pkt->l3_len = sizeof(struct ip6_hdr);
337 		pkt->l2_len = ETHER_HDR_LEN;
338 
339 		ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
340 	}
341 
342 	memcpy(&ethhdr->s_addr, &ethaddr_tbl[port].src,
343 			sizeof(struct ether_addr));
344 	memcpy(&ethhdr->d_addr, &ethaddr_tbl[port].dst,
345 			sizeof(struct ether_addr));
346 }
347 
348 static inline void
349 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port)
350 {
351 	int32_t i;
352 	const int32_t prefetch_offset = 2;
353 
354 	for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
355 		rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
356 		prepare_tx_pkt(pkts[i], port);
357 	}
358 	/* Process left packets */
359 	for (; i < nb_pkts; i++)
360 		prepare_tx_pkt(pkts[i], port);
361 }
362 
363 /* Send burst of packets on an output interface */
364 static inline int32_t
365 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
366 {
367 	struct rte_mbuf **m_table;
368 	int32_t ret;
369 	uint16_t queueid;
370 
371 	queueid = qconf->tx_queue_id[port];
372 	m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
373 
374 	prepare_tx_burst(m_table, n, port);
375 
376 	ret = rte_eth_tx_burst(port, queueid, m_table, n);
377 	if (unlikely(ret < n)) {
378 		do {
379 			rte_pktmbuf_free(m_table[ret]);
380 		} while (++ret < n);
381 	}
382 
383 	return 0;
384 }
385 
386 /* Enqueue a single packet, and send burst if queue is filled */
387 static inline int32_t
388 send_single_packet(struct rte_mbuf *m, uint16_t port)
389 {
390 	uint32_t lcore_id;
391 	uint16_t len;
392 	struct lcore_conf *qconf;
393 
394 	lcore_id = rte_lcore_id();
395 
396 	qconf = &lcore_conf[lcore_id];
397 	len = qconf->tx_mbufs[port].len;
398 	qconf->tx_mbufs[port].m_table[len] = m;
399 	len++;
400 
401 	/* enough pkts to be sent */
402 	if (unlikely(len == MAX_PKT_BURST)) {
403 		send_burst(qconf, MAX_PKT_BURST, port);
404 		len = 0;
405 	}
406 
407 	qconf->tx_mbufs[port].len = len;
408 	return 0;
409 }
410 
411 static inline void
412 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
413 		uint16_t lim)
414 {
415 	struct rte_mbuf *m;
416 	uint32_t i, j, res, sa_idx;
417 
418 	if (ip->num == 0 || sp == NULL)
419 		return;
420 
421 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
422 			ip->num, DEFAULT_MAX_CATEGORIES);
423 
424 	j = 0;
425 	for (i = 0; i < ip->num; i++) {
426 		m = ip->pkts[i];
427 		res = ip->res[i];
428 		if (res & BYPASS) {
429 			ip->pkts[j++] = m;
430 			continue;
431 		}
432 		if (res & DISCARD) {
433 			rte_pktmbuf_free(m);
434 			continue;
435 		}
436 
437 		/* Only check SPI match for processed IPSec packets */
438 		if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
439 			rte_pktmbuf_free(m);
440 			continue;
441 		}
442 
443 		sa_idx = ip->res[i] & PROTECT_MASK;
444 		if (sa_idx >= IPSEC_SA_MAX_ENTRIES ||
445 				!inbound_sa_check(sa, m, sa_idx)) {
446 			rte_pktmbuf_free(m);
447 			continue;
448 		}
449 		ip->pkts[j++] = m;
450 	}
451 	ip->num = j;
452 }
453 
454 static inline void
455 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
456 		struct ipsec_traffic *traffic)
457 {
458 	struct rte_mbuf *m;
459 	uint16_t idx, nb_pkts_in, i, n_ip4, n_ip6;
460 
461 	nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
462 			traffic->ipsec.num, MAX_PKT_BURST);
463 
464 	n_ip4 = traffic->ip4.num;
465 	n_ip6 = traffic->ip6.num;
466 
467 	/* SP/ACL Inbound check ipsec and ip4 */
468 	for (i = 0; i < nb_pkts_in; i++) {
469 		m = traffic->ipsec.pkts[i];
470 		struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
471 		if (ip->ip_v == IPVERSION) {
472 			idx = traffic->ip4.num++;
473 			traffic->ip4.pkts[idx] = m;
474 			traffic->ip4.data[idx] = rte_pktmbuf_mtod_offset(m,
475 					uint8_t *, offsetof(struct ip, ip_p));
476 		} else if (ip->ip_v == IP6_VERSION) {
477 			idx = traffic->ip6.num++;
478 			traffic->ip6.pkts[idx] = m;
479 			traffic->ip6.data[idx] = rte_pktmbuf_mtod_offset(m,
480 					uint8_t *,
481 					offsetof(struct ip6_hdr, ip6_nxt));
482 		} else
483 			rte_pktmbuf_free(m);
484 	}
485 
486 	inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
487 			n_ip4);
488 
489 	inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
490 			n_ip6);
491 }
492 
493 static inline void
494 outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
495 		struct traffic_type *ipsec)
496 {
497 	struct rte_mbuf *m;
498 	uint32_t i, j, sa_idx;
499 
500 	if (ip->num == 0 || sp == NULL)
501 		return;
502 
503 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
504 			ip->num, DEFAULT_MAX_CATEGORIES);
505 
506 	j = 0;
507 	for (i = 0; i < ip->num; i++) {
508 		m = ip->pkts[i];
509 		sa_idx = ip->res[i] & PROTECT_MASK;
510 		if (ip->res[i] & DISCARD)
511 			rte_pktmbuf_free(m);
512 		else if (ip->res[i] & BYPASS)
513 			ip->pkts[j++] = m;
514 		else if (sa_idx < IPSEC_SA_MAX_ENTRIES) {
515 			ipsec->res[ipsec->num] = sa_idx;
516 			ipsec->pkts[ipsec->num++] = m;
517 		} else /* invalid SA idx */
518 			rte_pktmbuf_free(m);
519 	}
520 	ip->num = j;
521 }
522 
523 static inline void
524 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
525 		struct ipsec_traffic *traffic)
526 {
527 	struct rte_mbuf *m;
528 	uint16_t idx, nb_pkts_out, i;
529 
530 	/* Drop any IPsec traffic from protected ports */
531 	for (i = 0; i < traffic->ipsec.num; i++)
532 		rte_pktmbuf_free(traffic->ipsec.pkts[i]);
533 
534 	traffic->ipsec.num = 0;
535 
536 	outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
537 
538 	outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
539 
540 	nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
541 			traffic->ipsec.res, traffic->ipsec.num,
542 			MAX_PKT_BURST);
543 
544 	for (i = 0; i < nb_pkts_out; i++) {
545 		m = traffic->ipsec.pkts[i];
546 		struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
547 		if (ip->ip_v == IPVERSION) {
548 			idx = traffic->ip4.num++;
549 			traffic->ip4.pkts[idx] = m;
550 		} else {
551 			idx = traffic->ip6.num++;
552 			traffic->ip6.pkts[idx] = m;
553 		}
554 	}
555 }
556 
557 static inline void
558 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
559 		struct ipsec_traffic *traffic)
560 {
561 	struct rte_mbuf *m;
562 	uint32_t nb_pkts_in, i, idx;
563 
564 	/* Drop any IPv4 traffic from unprotected ports */
565 	for (i = 0; i < traffic->ip4.num; i++)
566 		rte_pktmbuf_free(traffic->ip4.pkts[i]);
567 
568 	traffic->ip4.num = 0;
569 
570 	/* Drop any IPv6 traffic from unprotected ports */
571 	for (i = 0; i < traffic->ip6.num; i++)
572 		rte_pktmbuf_free(traffic->ip6.pkts[i]);
573 
574 	traffic->ip6.num = 0;
575 
576 	nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
577 			traffic->ipsec.num, MAX_PKT_BURST);
578 
579 	for (i = 0; i < nb_pkts_in; i++) {
580 		m = traffic->ipsec.pkts[i];
581 		struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
582 		if (ip->ip_v == IPVERSION) {
583 			idx = traffic->ip4.num++;
584 			traffic->ip4.pkts[idx] = m;
585 		} else {
586 			idx = traffic->ip6.num++;
587 			traffic->ip6.pkts[idx] = m;
588 		}
589 	}
590 }
591 
592 static inline void
593 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
594 		struct ipsec_traffic *traffic)
595 {
596 	struct rte_mbuf *m;
597 	uint32_t nb_pkts_out, i;
598 	struct ip *ip;
599 
600 	/* Drop any IPsec traffic from protected ports */
601 	for (i = 0; i < traffic->ipsec.num; i++)
602 		rte_pktmbuf_free(traffic->ipsec.pkts[i]);
603 
604 	traffic->ipsec.num = 0;
605 
606 	for (i = 0; i < traffic->ip4.num; i++)
607 		traffic->ip4.res[i] = single_sa_idx;
608 
609 	for (i = 0; i < traffic->ip6.num; i++)
610 		traffic->ip6.res[i] = single_sa_idx;
611 
612 	nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ip4.pkts,
613 			traffic->ip4.res, traffic->ip4.num,
614 			MAX_PKT_BURST);
615 
616 	/* They all sue the same SA (ip4 or ip6 tunnel) */
617 	m = traffic->ipsec.pkts[i];
618 	ip = rte_pktmbuf_mtod(m, struct ip *);
619 	if (ip->ip_v == IPVERSION)
620 		traffic->ip4.num = nb_pkts_out;
621 	else
622 		traffic->ip6.num = nb_pkts_out;
623 }
624 
625 static inline int32_t
626 get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
627 {
628 	struct ipsec_mbuf_metadata *priv;
629 	struct ipsec_sa *sa;
630 
631 	priv = get_priv(pkt);
632 
633 	sa = priv->sa;
634 	if (unlikely(sa == NULL)) {
635 		RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
636 		goto fail;
637 	}
638 
639 	if (is_ipv6)
640 		return sa->portid;
641 
642 	/* else */
643 	return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
644 
645 fail:
646 	if (is_ipv6)
647 		return -1;
648 
649 	/* else */
650 	return 0;
651 }
652 
653 static inline void
654 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
655 {
656 	uint32_t hop[MAX_PKT_BURST * 2];
657 	uint32_t dst_ip[MAX_PKT_BURST * 2];
658 	int32_t pkt_hop = 0;
659 	uint16_t i, offset;
660 	uint16_t lpm_pkts = 0;
661 
662 	if (nb_pkts == 0)
663 		return;
664 
665 	/* Need to do an LPM lookup for non-inline packets. Inline packets will
666 	 * have port ID in the SA
667 	 */
668 
669 	for (i = 0; i < nb_pkts; i++) {
670 		if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
671 			/* Security offload not enabled. So an LPM lookup is
672 			 * required to get the hop
673 			 */
674 			offset = offsetof(struct ip, ip_dst);
675 			dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
676 					uint32_t *, offset);
677 			dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
678 			lpm_pkts++;
679 		}
680 	}
681 
682 	rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
683 
684 	lpm_pkts = 0;
685 
686 	for (i = 0; i < nb_pkts; i++) {
687 		if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
688 			/* Read hop from the SA */
689 			pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
690 		} else {
691 			/* Need to use hop returned by lookup */
692 			pkt_hop = hop[lpm_pkts++];
693 		}
694 
695 		if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
696 			rte_pktmbuf_free(pkts[i]);
697 			continue;
698 		}
699 		send_single_packet(pkts[i], pkt_hop & 0xff);
700 	}
701 }
702 
703 static inline void
704 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
705 {
706 	int32_t hop[MAX_PKT_BURST * 2];
707 	uint8_t dst_ip[MAX_PKT_BURST * 2][16];
708 	uint8_t *ip6_dst;
709 	int32_t pkt_hop = 0;
710 	uint16_t i, offset;
711 	uint16_t lpm_pkts = 0;
712 
713 	if (nb_pkts == 0)
714 		return;
715 
716 	/* Need to do an LPM lookup for non-inline packets. Inline packets will
717 	 * have port ID in the SA
718 	 */
719 
720 	for (i = 0; i < nb_pkts; i++) {
721 		if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
722 			/* Security offload not enabled. So an LPM lookup is
723 			 * required to get the hop
724 			 */
725 			offset = offsetof(struct ip6_hdr, ip6_dst);
726 			ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
727 					offset);
728 			memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
729 			lpm_pkts++;
730 		}
731 	}
732 
733 	rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
734 			lpm_pkts);
735 
736 	lpm_pkts = 0;
737 
738 	for (i = 0; i < nb_pkts; i++) {
739 		if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
740 			/* Read hop from the SA */
741 			pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
742 		} else {
743 			/* Need to use hop returned by lookup */
744 			pkt_hop = hop[lpm_pkts++];
745 		}
746 
747 		if (pkt_hop == -1) {
748 			rte_pktmbuf_free(pkts[i]);
749 			continue;
750 		}
751 		send_single_packet(pkts[i], pkt_hop & 0xff);
752 	}
753 }
754 
755 static inline void
756 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
757 		uint8_t nb_pkts, uint16_t portid)
758 {
759 	struct ipsec_traffic traffic;
760 
761 	prepare_traffic(pkts, &traffic, nb_pkts);
762 
763 	if (unlikely(single_sa)) {
764 		if (UNPROTECTED_PORT(portid))
765 			process_pkts_inbound_nosp(&qconf->inbound, &traffic);
766 		else
767 			process_pkts_outbound_nosp(&qconf->outbound, &traffic);
768 	} else {
769 		if (UNPROTECTED_PORT(portid))
770 			process_pkts_inbound(&qconf->inbound, &traffic);
771 		else
772 			process_pkts_outbound(&qconf->outbound, &traffic);
773 	}
774 
775 	route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
776 	route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
777 }
778 
779 static inline void
780 drain_buffers(struct lcore_conf *qconf)
781 {
782 	struct buffer *buf;
783 	uint32_t portid;
784 
785 	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
786 		buf = &qconf->tx_mbufs[portid];
787 		if (buf->len == 0)
788 			continue;
789 		send_burst(qconf, buf->len, portid);
790 		buf->len = 0;
791 	}
792 }
793 
794 /* main processing loop */
795 static int32_t
796 main_loop(__attribute__((unused)) void *dummy)
797 {
798 	struct rte_mbuf *pkts[MAX_PKT_BURST];
799 	uint32_t lcore_id;
800 	uint64_t prev_tsc, diff_tsc, cur_tsc;
801 	int32_t i, nb_rx;
802 	uint16_t portid;
803 	uint8_t queueid;
804 	struct lcore_conf *qconf;
805 	int32_t socket_id;
806 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
807 			/ US_PER_S * BURST_TX_DRAIN_US;
808 	struct lcore_rx_queue *rxql;
809 
810 	prev_tsc = 0;
811 	lcore_id = rte_lcore_id();
812 	qconf = &lcore_conf[lcore_id];
813 	rxql = qconf->rx_queue_list;
814 	socket_id = rte_lcore_to_socket_id(lcore_id);
815 
816 	qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
817 	qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
818 	qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
819 	qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
820 	qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
821 	qconf->inbound.cdev_map = cdev_map_in;
822 	qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
823 	qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
824 	qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
825 	qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
826 	qconf->outbound.cdev_map = cdev_map_out;
827 	qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
828 
829 	if (qconf->nb_rx_queue == 0) {
830 		RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id);
831 		return 0;
832 	}
833 
834 	RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
835 
836 	for (i = 0; i < qconf->nb_rx_queue; i++) {
837 		portid = rxql[i].port_id;
838 		queueid = rxql[i].queue_id;
839 		RTE_LOG(INFO, IPSEC,
840 			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
841 			lcore_id, portid, queueid);
842 	}
843 
844 	while (1) {
845 		cur_tsc = rte_rdtsc();
846 
847 		/* TX queue buffer drain */
848 		diff_tsc = cur_tsc - prev_tsc;
849 
850 		if (unlikely(diff_tsc > drain_tsc)) {
851 			drain_buffers(qconf);
852 			prev_tsc = cur_tsc;
853 		}
854 
855 		/* Read packet from RX queues */
856 		for (i = 0; i < qconf->nb_rx_queue; ++i) {
857 			portid = rxql[i].port_id;
858 			queueid = rxql[i].queue_id;
859 			nb_rx = rte_eth_rx_burst(portid, queueid,
860 					pkts, MAX_PKT_BURST);
861 
862 			if (nb_rx > 0)
863 				process_pkts(qconf, pkts, nb_rx, portid);
864 		}
865 	}
866 }
867 
868 static int32_t
869 check_params(void)
870 {
871 	uint8_t lcore;
872 	uint16_t portid;
873 	uint16_t i;
874 	int32_t socket_id;
875 
876 	if (lcore_params == NULL) {
877 		printf("Error: No port/queue/core mappings\n");
878 		return -1;
879 	}
880 
881 	for (i = 0; i < nb_lcore_params; ++i) {
882 		lcore = lcore_params[i].lcore_id;
883 		if (!rte_lcore_is_enabled(lcore)) {
884 			printf("error: lcore %hhu is not enabled in "
885 				"lcore mask\n", lcore);
886 			return -1;
887 		}
888 		socket_id = rte_lcore_to_socket_id(lcore);
889 		if (socket_id != 0 && numa_on == 0) {
890 			printf("warning: lcore %hhu is on socket %d "
891 				"with numa off\n",
892 				lcore, socket_id);
893 		}
894 		portid = lcore_params[i].port_id;
895 		if ((enabled_port_mask & (1 << portid)) == 0) {
896 			printf("port %u is not enabled in port mask\n", portid);
897 			return -1;
898 		}
899 		if (!rte_eth_dev_is_valid_port(portid)) {
900 			printf("port %u is not present on the board\n", portid);
901 			return -1;
902 		}
903 	}
904 	return 0;
905 }
906 
907 static uint8_t
908 get_port_nb_rx_queues(const uint16_t port)
909 {
910 	int32_t queue = -1;
911 	uint16_t i;
912 
913 	for (i = 0; i < nb_lcore_params; ++i) {
914 		if (lcore_params[i].port_id == port &&
915 				lcore_params[i].queue_id > queue)
916 			queue = lcore_params[i].queue_id;
917 	}
918 	return (uint8_t)(++queue);
919 }
920 
921 static int32_t
922 init_lcore_rx_queues(void)
923 {
924 	uint16_t i, nb_rx_queue;
925 	uint8_t lcore;
926 
927 	for (i = 0; i < nb_lcore_params; ++i) {
928 		lcore = lcore_params[i].lcore_id;
929 		nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
930 		if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
931 			printf("error: too many queues (%u) for lcore: %u\n",
932 					nb_rx_queue + 1, lcore);
933 			return -1;
934 		}
935 		lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
936 			lcore_params[i].port_id;
937 		lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
938 			lcore_params[i].queue_id;
939 		lcore_conf[lcore].nb_rx_queue++;
940 	}
941 	return 0;
942 }
943 
944 /* display usage */
945 static void
946 print_usage(const char *prgname)
947 {
948 	fprintf(stderr, "%s [EAL options] --"
949 		" -p PORTMASK"
950 		" [-P]"
951 		" [-u PORTMASK]"
952 		" [-j FRAMESIZE]"
953 		" -f CONFIG_FILE"
954 		" --config (port,queue,lcore)[,(port,queue,lcore)]"
955 		" [--single-sa SAIDX]"
956 		" [--cryptodev_mask MASK]"
957 		"\n\n"
958 		"  -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
959 		"  -P : Enable promiscuous mode\n"
960 		"  -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
961 		"  -j FRAMESIZE: Enable jumbo frame with 'FRAMESIZE' as maximum\n"
962 		"                packet size\n"
963 		"  -f CONFIG_FILE: Configuration file\n"
964 		"  --config (port,queue,lcore): Rx queue configuration\n"
965 		"  --single-sa SAIDX: Use single SA index for outbound traffic,\n"
966 		"                     bypassing the SP\n"
967 		"  --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
968 		"                         devices to configure\n"
969 		"\n",
970 		prgname);
971 }
972 
973 static int32_t
974 parse_portmask(const char *portmask)
975 {
976 	char *end = NULL;
977 	unsigned long pm;
978 
979 	/* parse hexadecimal string */
980 	pm = strtoul(portmask, &end, 16);
981 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
982 		return -1;
983 
984 	if ((pm == 0) && errno)
985 		return -1;
986 
987 	return pm;
988 }
989 
990 static int32_t
991 parse_decimal(const char *str)
992 {
993 	char *end = NULL;
994 	unsigned long num;
995 
996 	num = strtoul(str, &end, 10);
997 	if ((str[0] == '\0') || (end == NULL) || (*end != '\0'))
998 		return -1;
999 
1000 	return num;
1001 }
1002 
1003 static int32_t
1004 parse_config(const char *q_arg)
1005 {
1006 	char s[256];
1007 	const char *p, *p0 = q_arg;
1008 	char *end;
1009 	enum fieldnames {
1010 		FLD_PORT = 0,
1011 		FLD_QUEUE,
1012 		FLD_LCORE,
1013 		_NUM_FLD
1014 	};
1015 	unsigned long int_fld[_NUM_FLD];
1016 	char *str_fld[_NUM_FLD];
1017 	int32_t i;
1018 	uint32_t size;
1019 
1020 	nb_lcore_params = 0;
1021 
1022 	while ((p = strchr(p0, '(')) != NULL) {
1023 		++p;
1024 		p0 = strchr(p, ')');
1025 		if (p0 == NULL)
1026 			return -1;
1027 
1028 		size = p0 - p;
1029 		if (size >= sizeof(s))
1030 			return -1;
1031 
1032 		snprintf(s, sizeof(s), "%.*s", size, p);
1033 		if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1034 				_NUM_FLD)
1035 			return -1;
1036 		for (i = 0; i < _NUM_FLD; i++) {
1037 			errno = 0;
1038 			int_fld[i] = strtoul(str_fld[i], &end, 0);
1039 			if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1040 				return -1;
1041 		}
1042 		if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1043 			printf("exceeded max number of lcore params: %hu\n",
1044 				nb_lcore_params);
1045 			return -1;
1046 		}
1047 		lcore_params_array[nb_lcore_params].port_id =
1048 			(uint8_t)int_fld[FLD_PORT];
1049 		lcore_params_array[nb_lcore_params].queue_id =
1050 			(uint8_t)int_fld[FLD_QUEUE];
1051 		lcore_params_array[nb_lcore_params].lcore_id =
1052 			(uint8_t)int_fld[FLD_LCORE];
1053 		++nb_lcore_params;
1054 	}
1055 	lcore_params = lcore_params_array;
1056 	return 0;
1057 }
1058 
1059 static int32_t
1060 parse_args(int32_t argc, char **argv)
1061 {
1062 	int32_t opt, ret;
1063 	char **argvopt;
1064 	int32_t option_index;
1065 	char *prgname = argv[0];
1066 	int32_t f_present = 0;
1067 
1068 	argvopt = argv;
1069 
1070 	while ((opt = getopt_long(argc, argvopt, "p:Pu:f:j:",
1071 				lgopts, &option_index)) != EOF) {
1072 
1073 		switch (opt) {
1074 		case 'p':
1075 			enabled_port_mask = parse_portmask(optarg);
1076 			if (enabled_port_mask == 0) {
1077 				printf("invalid portmask\n");
1078 				print_usage(prgname);
1079 				return -1;
1080 			}
1081 			break;
1082 		case 'P':
1083 			printf("Promiscuous mode selected\n");
1084 			promiscuous_on = 1;
1085 			break;
1086 		case 'u':
1087 			unprotected_port_mask = parse_portmask(optarg);
1088 			if (unprotected_port_mask == 0) {
1089 				printf("invalid unprotected portmask\n");
1090 				print_usage(prgname);
1091 				return -1;
1092 			}
1093 			break;
1094 		case 'f':
1095 			if (f_present == 1) {
1096 				printf("\"-f\" option present more than "
1097 					"once!\n");
1098 				print_usage(prgname);
1099 				return -1;
1100 			}
1101 			if (parse_cfg_file(optarg) < 0) {
1102 				printf("parsing file \"%s\" failed\n",
1103 					optarg);
1104 				print_usage(prgname);
1105 				return -1;
1106 			}
1107 			f_present = 1;
1108 			break;
1109 		case 'j':
1110 			{
1111 				int32_t size = parse_decimal(optarg);
1112 				if (size <= 1518) {
1113 					printf("Invalid jumbo frame size\n");
1114 					if (size < 0) {
1115 						print_usage(prgname);
1116 						return -1;
1117 					}
1118 					printf("Using default value 9000\n");
1119 					frame_size = 9000;
1120 				} else {
1121 					frame_size = size;
1122 				}
1123 			}
1124 			printf("Enabled jumbo frames size %u\n", frame_size);
1125 			break;
1126 		case CMD_LINE_OPT_CONFIG_NUM:
1127 			ret = parse_config(optarg);
1128 			if (ret) {
1129 				printf("Invalid config\n");
1130 				print_usage(prgname);
1131 				return -1;
1132 			}
1133 			break;
1134 		case CMD_LINE_OPT_SINGLE_SA_NUM:
1135 			ret = parse_decimal(optarg);
1136 			if (ret == -1) {
1137 				printf("Invalid argument[sa_idx]\n");
1138 				print_usage(prgname);
1139 				return -1;
1140 			}
1141 
1142 			/* else */
1143 			single_sa = 1;
1144 			single_sa_idx = ret;
1145 			printf("Configured with single SA index %u\n",
1146 					single_sa_idx);
1147 			break;
1148 		case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
1149 			ret = parse_portmask(optarg);
1150 			if (ret == -1) {
1151 				printf("Invalid argument[portmask]\n");
1152 				print_usage(prgname);
1153 				return -1;
1154 			}
1155 
1156 			/* else */
1157 			enabled_cryptodev_mask = ret;
1158 			break;
1159 		default:
1160 			print_usage(prgname);
1161 			return -1;
1162 		}
1163 	}
1164 
1165 	if (f_present == 0) {
1166 		printf("Mandatory option \"-f\" not present\n");
1167 		return -1;
1168 	}
1169 
1170 	if (optind >= 0)
1171 		argv[optind-1] = prgname;
1172 
1173 	ret = optind-1;
1174 	optind = 1; /* reset getopt lib */
1175 	return ret;
1176 }
1177 
1178 static void
1179 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
1180 {
1181 	char buf[ETHER_ADDR_FMT_SIZE];
1182 	ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
1183 	printf("%s%s", name, buf);
1184 }
1185 
1186 /* Check the link status of all ports in up to 9s, and print them finally */
1187 static void
1188 check_all_ports_link_status(uint32_t port_mask)
1189 {
1190 #define CHECK_INTERVAL 100 /* 100ms */
1191 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1192 	uint16_t portid;
1193 	uint8_t count, all_ports_up, print_flag = 0;
1194 	struct rte_eth_link link;
1195 
1196 	printf("\nChecking link status");
1197 	fflush(stdout);
1198 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1199 		all_ports_up = 1;
1200 		RTE_ETH_FOREACH_DEV(portid) {
1201 			if ((port_mask & (1 << portid)) == 0)
1202 				continue;
1203 			memset(&link, 0, sizeof(link));
1204 			rte_eth_link_get_nowait(portid, &link);
1205 			/* print link status if flag set */
1206 			if (print_flag == 1) {
1207 				if (link.link_status)
1208 					printf(
1209 					"Port%d Link Up - speed %u Mbps -%s\n",
1210 						portid, link.link_speed,
1211 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1212 					("full-duplex") : ("half-duplex\n"));
1213 				else
1214 					printf("Port %d Link Down\n", portid);
1215 				continue;
1216 			}
1217 			/* clear all_ports_up flag if any link down */
1218 			if (link.link_status == ETH_LINK_DOWN) {
1219 				all_ports_up = 0;
1220 				break;
1221 			}
1222 		}
1223 		/* after finally printing all link status, get out */
1224 		if (print_flag == 1)
1225 			break;
1226 
1227 		if (all_ports_up == 0) {
1228 			printf(".");
1229 			fflush(stdout);
1230 			rte_delay_ms(CHECK_INTERVAL);
1231 		}
1232 
1233 		/* set the print_flag if all ports up or timeout */
1234 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1235 			print_flag = 1;
1236 			printf("done\n");
1237 		}
1238 	}
1239 }
1240 
1241 static int32_t
1242 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
1243 		uint16_t qp, struct lcore_params *params,
1244 		struct ipsec_ctx *ipsec_ctx,
1245 		const struct rte_cryptodev_capabilities *cipher,
1246 		const struct rte_cryptodev_capabilities *auth,
1247 		const struct rte_cryptodev_capabilities *aead)
1248 {
1249 	int32_t ret = 0;
1250 	unsigned long i;
1251 	struct cdev_key key = { 0 };
1252 
1253 	key.lcore_id = params->lcore_id;
1254 	if (cipher)
1255 		key.cipher_algo = cipher->sym.cipher.algo;
1256 	if (auth)
1257 		key.auth_algo = auth->sym.auth.algo;
1258 	if (aead)
1259 		key.aead_algo = aead->sym.aead.algo;
1260 
1261 	ret = rte_hash_lookup(map, &key);
1262 	if (ret != -ENOENT)
1263 		return 0;
1264 
1265 	for (i = 0; i < ipsec_ctx->nb_qps; i++)
1266 		if (ipsec_ctx->tbl[i].id == cdev_id)
1267 			break;
1268 
1269 	if (i == ipsec_ctx->nb_qps) {
1270 		if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1271 			printf("Maximum number of crypto devices assigned to "
1272 				"a core, increase MAX_QP_PER_LCORE value\n");
1273 			return 0;
1274 		}
1275 		ipsec_ctx->tbl[i].id = cdev_id;
1276 		ipsec_ctx->tbl[i].qp = qp;
1277 		ipsec_ctx->nb_qps++;
1278 		printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1279 				"(cdev_id_qp %lu)\n", str, key.lcore_id,
1280 				cdev_id, qp, i);
1281 	}
1282 
1283 	ret = rte_hash_add_key_data(map, &key, (void *)i);
1284 	if (ret < 0) {
1285 		printf("Faled to insert cdev mapping for (lcore %u, "
1286 				"cdev %u, qp %u), errno %d\n",
1287 				key.lcore_id, ipsec_ctx->tbl[i].id,
1288 				ipsec_ctx->tbl[i].qp, ret);
1289 		return 0;
1290 	}
1291 
1292 	return 1;
1293 }
1294 
1295 static int32_t
1296 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1297 		uint16_t qp, struct lcore_params *params)
1298 {
1299 	int32_t ret = 0;
1300 	const struct rte_cryptodev_capabilities *i, *j;
1301 	struct rte_hash *map;
1302 	struct lcore_conf *qconf;
1303 	struct ipsec_ctx *ipsec_ctx;
1304 	const char *str;
1305 
1306 	qconf = &lcore_conf[params->lcore_id];
1307 
1308 	if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1309 		map = cdev_map_out;
1310 		ipsec_ctx = &qconf->outbound;
1311 		str = "Outbound";
1312 	} else {
1313 		map = cdev_map_in;
1314 		ipsec_ctx = &qconf->inbound;
1315 		str = "Inbound";
1316 	}
1317 
1318 	/* Required cryptodevs with operation chainning */
1319 	if (!(dev_info->feature_flags &
1320 				RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1321 		return ret;
1322 
1323 	for (i = dev_info->capabilities;
1324 			i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1325 		if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1326 			continue;
1327 
1328 		if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1329 			ret |= add_mapping(map, str, cdev_id, qp, params,
1330 					ipsec_ctx, NULL, NULL, i);
1331 			continue;
1332 		}
1333 
1334 		if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1335 			continue;
1336 
1337 		for (j = dev_info->capabilities;
1338 				j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1339 			if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1340 				continue;
1341 
1342 			if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1343 				continue;
1344 
1345 			ret |= add_mapping(map, str, cdev_id, qp, params,
1346 						ipsec_ctx, i, j, NULL);
1347 		}
1348 	}
1349 
1350 	return ret;
1351 }
1352 
1353 /* Check if the device is enabled by cryptodev_mask */
1354 static int
1355 check_cryptodev_mask(uint8_t cdev_id)
1356 {
1357 	if (enabled_cryptodev_mask & (1 << cdev_id))
1358 		return 0;
1359 
1360 	return -1;
1361 }
1362 
1363 static int32_t
1364 cryptodevs_init(void)
1365 {
1366 	struct rte_cryptodev_config dev_conf;
1367 	struct rte_cryptodev_qp_conf qp_conf;
1368 	uint16_t idx, max_nb_qps, qp, i;
1369 	int16_t cdev_id, port_id;
1370 	struct rte_hash_parameters params = { 0 };
1371 
1372 	params.entries = CDEV_MAP_ENTRIES;
1373 	params.key_len = sizeof(struct cdev_key);
1374 	params.hash_func = rte_jhash;
1375 	params.hash_func_init_val = 0;
1376 	params.socket_id = rte_socket_id();
1377 
1378 	params.name = "cdev_map_in";
1379 	cdev_map_in = rte_hash_create(&params);
1380 	if (cdev_map_in == NULL)
1381 		rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1382 				rte_errno);
1383 
1384 	params.name = "cdev_map_out";
1385 	cdev_map_out = rte_hash_create(&params);
1386 	if (cdev_map_out == NULL)
1387 		rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1388 				rte_errno);
1389 
1390 	printf("lcore/cryptodev/qp mappings:\n");
1391 
1392 	uint32_t max_sess_sz = 0, sess_sz;
1393 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1394 		void *sec_ctx;
1395 
1396 		/* Get crypto priv session size */
1397 		sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
1398 		if (sess_sz > max_sess_sz)
1399 			max_sess_sz = sess_sz;
1400 
1401 		/*
1402 		 * If crypto device is security capable, need to check the
1403 		 * size of security session as well.
1404 		 */
1405 
1406 		/* Get security context of the crypto device */
1407 		sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
1408 		if (sec_ctx == NULL)
1409 			continue;
1410 
1411 		/* Get size of security session */
1412 		sess_sz = rte_security_session_get_size(sec_ctx);
1413 		if (sess_sz > max_sess_sz)
1414 			max_sess_sz = sess_sz;
1415 	}
1416 	RTE_ETH_FOREACH_DEV(port_id) {
1417 		void *sec_ctx;
1418 
1419 		if ((enabled_port_mask & (1 << port_id)) == 0)
1420 			continue;
1421 
1422 		sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
1423 		if (sec_ctx == NULL)
1424 			continue;
1425 
1426 		sess_sz = rte_security_session_get_size(sec_ctx);
1427 		if (sess_sz > max_sess_sz)
1428 			max_sess_sz = sess_sz;
1429 	}
1430 
1431 	idx = 0;
1432 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1433 		struct rte_cryptodev_info cdev_info;
1434 
1435 		if (check_cryptodev_mask((uint8_t)cdev_id))
1436 			continue;
1437 
1438 		rte_cryptodev_info_get(cdev_id, &cdev_info);
1439 
1440 		if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1441 			max_nb_qps = cdev_info.max_nb_queue_pairs;
1442 		else
1443 			max_nb_qps = nb_lcore_params;
1444 
1445 		qp = 0;
1446 		i = 0;
1447 		while (qp < max_nb_qps && i < nb_lcore_params) {
1448 			if (add_cdev_mapping(&cdev_info, cdev_id, qp,
1449 						&lcore_params[idx]))
1450 				qp++;
1451 			idx++;
1452 			idx = idx % nb_lcore_params;
1453 			i++;
1454 		}
1455 
1456 		if (qp == 0)
1457 			continue;
1458 
1459 		dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1460 		dev_conf.nb_queue_pairs = qp;
1461 
1462 		uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
1463 		if (dev_max_sess != 0 && dev_max_sess < (CDEV_MP_NB_OBJS / 2))
1464 			rte_exit(EXIT_FAILURE,
1465 				"Device does not support at least %u "
1466 				"sessions", CDEV_MP_NB_OBJS / 2);
1467 
1468 		if (!socket_ctx[dev_conf.socket_id].session_pool) {
1469 			char mp_name[RTE_MEMPOOL_NAMESIZE];
1470 			struct rte_mempool *sess_mp;
1471 
1472 			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1473 					"sess_mp_%u", dev_conf.socket_id);
1474 			sess_mp = rte_mempool_create(mp_name,
1475 					CDEV_MP_NB_OBJS,
1476 					max_sess_sz,
1477 					CDEV_MP_CACHE_SZ,
1478 					0, NULL, NULL, NULL,
1479 					NULL, dev_conf.socket_id,
1480 					0);
1481 			if (sess_mp == NULL)
1482 				rte_exit(EXIT_FAILURE,
1483 					"Cannot create session pool on socket %d\n",
1484 					dev_conf.socket_id);
1485 			else
1486 				printf("Allocated session pool on socket %d\n",
1487 					dev_conf.socket_id);
1488 			socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
1489 		}
1490 
1491 		if (rte_cryptodev_configure(cdev_id, &dev_conf))
1492 			rte_panic("Failed to initialize cryptodev %u\n",
1493 					cdev_id);
1494 
1495 		qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
1496 		for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1497 			if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1498 					&qp_conf, dev_conf.socket_id,
1499 					socket_ctx[dev_conf.socket_id].session_pool))
1500 				rte_panic("Failed to setup queue %u for "
1501 						"cdev_id %u\n",	0, cdev_id);
1502 
1503 		if (rte_cryptodev_start(cdev_id))
1504 			rte_panic("Failed to start cryptodev %u\n",
1505 					cdev_id);
1506 	}
1507 
1508 	/* create session pools for eth devices that implement security */
1509 	RTE_ETH_FOREACH_DEV(port_id) {
1510 		if ((enabled_port_mask & (1 << port_id)) &&
1511 				rte_eth_dev_get_sec_ctx(port_id)) {
1512 			int socket_id = rte_eth_dev_socket_id(port_id);
1513 
1514 			if (!socket_ctx[socket_id].session_pool) {
1515 				char mp_name[RTE_MEMPOOL_NAMESIZE];
1516 				struct rte_mempool *sess_mp;
1517 
1518 				snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1519 						"sess_mp_%u", socket_id);
1520 				sess_mp = rte_mempool_create(mp_name,
1521 						CDEV_MP_NB_OBJS,
1522 						max_sess_sz,
1523 						CDEV_MP_CACHE_SZ,
1524 						0, NULL, NULL, NULL,
1525 						NULL, socket_id,
1526 						0);
1527 				if (sess_mp == NULL)
1528 					rte_exit(EXIT_FAILURE,
1529 						"Cannot create session pool "
1530 						"on socket %d\n", socket_id);
1531 				else
1532 					printf("Allocated session pool "
1533 						"on socket %d\n", socket_id);
1534 				socket_ctx[socket_id].session_pool = sess_mp;
1535 			}
1536 		}
1537 	}
1538 
1539 
1540 	printf("\n");
1541 
1542 	return 0;
1543 }
1544 
1545 static void
1546 port_init(uint16_t portid)
1547 {
1548 	struct rte_eth_dev_info dev_info;
1549 	struct rte_eth_txconf *txconf;
1550 	uint16_t nb_tx_queue, nb_rx_queue;
1551 	uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1552 	int32_t ret, socket_id;
1553 	struct lcore_conf *qconf;
1554 	struct ether_addr ethaddr;
1555 	struct rte_eth_conf local_port_conf = port_conf;
1556 
1557 	rte_eth_dev_info_get(portid, &dev_info);
1558 
1559 	printf("Configuring device port %u:\n", portid);
1560 
1561 	rte_eth_macaddr_get(portid, &ethaddr);
1562 	ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ethaddr);
1563 	print_ethaddr("Address: ", &ethaddr);
1564 	printf("\n");
1565 
1566 	nb_rx_queue = get_port_nb_rx_queues(portid);
1567 	nb_tx_queue = nb_lcores;
1568 
1569 	if (nb_rx_queue > dev_info.max_rx_queues)
1570 		rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1571 				"(max rx queue is %u)\n",
1572 				nb_rx_queue, dev_info.max_rx_queues);
1573 
1574 	if (nb_tx_queue > dev_info.max_tx_queues)
1575 		rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1576 				"(max tx queue is %u)\n",
1577 				nb_tx_queue, dev_info.max_tx_queues);
1578 
1579 	printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1580 			nb_rx_queue, nb_tx_queue);
1581 
1582 	if (frame_size) {
1583 		local_port_conf.rxmode.max_rx_pkt_len = frame_size;
1584 		local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1585 	}
1586 
1587 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY)
1588 		local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SECURITY;
1589 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY)
1590 		local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY;
1591 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1592 		local_port_conf.txmode.offloads |=
1593 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1594 
1595 	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1596 		dev_info.flow_type_rss_offloads;
1597 	if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1598 			port_conf.rx_adv_conf.rss_conf.rss_hf) {
1599 		printf("Port %u modified RSS hash function based on hardware support,"
1600 			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
1601 			portid,
1602 			port_conf.rx_adv_conf.rss_conf.rss_hf,
1603 			local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1604 	}
1605 
1606 	ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
1607 			&local_port_conf);
1608 	if (ret < 0)
1609 		rte_exit(EXIT_FAILURE, "Cannot configure device: "
1610 				"err=%d, port=%d\n", ret, portid);
1611 
1612 	ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
1613 	if (ret < 0)
1614 		rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
1615 				"err=%d, port=%d\n", ret, portid);
1616 
1617 	/* init one TX queue per lcore */
1618 	tx_queueid = 0;
1619 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1620 		if (rte_lcore_is_enabled(lcore_id) == 0)
1621 			continue;
1622 
1623 		if (numa_on)
1624 			socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1625 		else
1626 			socket_id = 0;
1627 
1628 		/* init TX queue */
1629 		printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
1630 
1631 		txconf = &dev_info.default_txconf;
1632 		txconf->offloads = local_port_conf.txmode.offloads;
1633 
1634 		ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
1635 				socket_id, txconf);
1636 		if (ret < 0)
1637 			rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1638 					"err=%d, port=%d\n", ret, portid);
1639 
1640 		qconf = &lcore_conf[lcore_id];
1641 		qconf->tx_queue_id[portid] = tx_queueid;
1642 		tx_queueid++;
1643 
1644 		/* init RX queues */
1645 		for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
1646 			struct rte_eth_rxconf rxq_conf;
1647 
1648 			if (portid != qconf->rx_queue_list[queue].port_id)
1649 				continue;
1650 
1651 			rx_queueid = qconf->rx_queue_list[queue].queue_id;
1652 
1653 			printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
1654 					socket_id);
1655 
1656 			rxq_conf = dev_info.default_rxconf;
1657 			rxq_conf.offloads = local_port_conf.rxmode.offloads;
1658 			ret = rte_eth_rx_queue_setup(portid, rx_queueid,
1659 					nb_rxd,	socket_id, &rxq_conf,
1660 					socket_ctx[socket_id].mbuf_pool);
1661 			if (ret < 0)
1662 				rte_exit(EXIT_FAILURE,
1663 					"rte_eth_rx_queue_setup: err=%d, "
1664 					"port=%d\n", ret, portid);
1665 		}
1666 	}
1667 	printf("\n");
1668 }
1669 
1670 static void
1671 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
1672 {
1673 	char s[64];
1674 	uint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) :
1675 			RTE_MBUF_DEFAULT_BUF_SIZE;
1676 
1677 
1678 	snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
1679 	ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
1680 			MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
1681 			buff_size,
1682 			socket_id);
1683 	if (ctx->mbuf_pool == NULL)
1684 		rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
1685 				socket_id);
1686 	else
1687 		printf("Allocated mbuf pool on socket %d\n", socket_id);
1688 }
1689 
1690 static inline int
1691 inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
1692 {
1693 	struct ipsec_sa *sa;
1694 
1695 	/* For inline protocol processing, the metadata in the event will
1696 	 * uniquely identify the security session which raised the event.
1697 	 * Application would then need the userdata it had registered with the
1698 	 * security session to process the event.
1699 	 */
1700 
1701 	sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
1702 
1703 	if (sa == NULL) {
1704 		/* userdata could not be retrieved */
1705 		return -1;
1706 	}
1707 
1708 	/* Sequence number over flow. SA need to be re-established */
1709 	RTE_SET_USED(sa);
1710 	return 0;
1711 }
1712 
1713 static int
1714 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
1715 		 void *param, void *ret_param)
1716 {
1717 	uint64_t md;
1718 	struct rte_eth_event_ipsec_desc *event_desc = NULL;
1719 	struct rte_security_ctx *ctx = (struct rte_security_ctx *)
1720 					rte_eth_dev_get_sec_ctx(port_id);
1721 
1722 	RTE_SET_USED(param);
1723 
1724 	if (type != RTE_ETH_EVENT_IPSEC)
1725 		return -1;
1726 
1727 	event_desc = ret_param;
1728 	if (event_desc == NULL) {
1729 		printf("Event descriptor not set\n");
1730 		return -1;
1731 	}
1732 
1733 	md = event_desc->metadata;
1734 
1735 	if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
1736 		return inline_ipsec_event_esn_overflow(ctx, md);
1737 	else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
1738 		printf("Invalid IPsec event reported\n");
1739 		return -1;
1740 	}
1741 
1742 	return -1;
1743 }
1744 
1745 int32_t
1746 main(int32_t argc, char **argv)
1747 {
1748 	int32_t ret;
1749 	uint32_t lcore_id;
1750 	uint8_t socket_id;
1751 	uint16_t portid;
1752 
1753 	/* init EAL */
1754 	ret = rte_eal_init(argc, argv);
1755 	if (ret < 0)
1756 		rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1757 	argc -= ret;
1758 	argv += ret;
1759 
1760 	/* parse application arguments (after the EAL ones) */
1761 	ret = parse_args(argc, argv);
1762 	if (ret < 0)
1763 		rte_exit(EXIT_FAILURE, "Invalid parameters\n");
1764 
1765 	if ((unprotected_port_mask & enabled_port_mask) !=
1766 			unprotected_port_mask)
1767 		rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
1768 				unprotected_port_mask);
1769 
1770 	if (check_params() < 0)
1771 		rte_exit(EXIT_FAILURE, "check_params failed\n");
1772 
1773 	ret = init_lcore_rx_queues();
1774 	if (ret < 0)
1775 		rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1776 
1777 	nb_lcores = rte_lcore_count();
1778 
1779 	/* Replicate each context per socket */
1780 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1781 		if (rte_lcore_is_enabled(lcore_id) == 0)
1782 			continue;
1783 
1784 		if (numa_on)
1785 			socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1786 		else
1787 			socket_id = 0;
1788 
1789 		if (socket_ctx[socket_id].mbuf_pool)
1790 			continue;
1791 
1792 		sa_init(&socket_ctx[socket_id], socket_id);
1793 
1794 		sp4_init(&socket_ctx[socket_id], socket_id);
1795 
1796 		sp6_init(&socket_ctx[socket_id], socket_id);
1797 
1798 		rt_init(&socket_ctx[socket_id], socket_id);
1799 
1800 		pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
1801 	}
1802 
1803 	RTE_ETH_FOREACH_DEV(portid) {
1804 		if ((enabled_port_mask & (1 << portid)) == 0)
1805 			continue;
1806 
1807 		port_init(portid);
1808 	}
1809 
1810 	cryptodevs_init();
1811 
1812 	/* start ports */
1813 	RTE_ETH_FOREACH_DEV(portid) {
1814 		if ((enabled_port_mask & (1 << portid)) == 0)
1815 			continue;
1816 
1817 		/* Start device */
1818 		ret = rte_eth_dev_start(portid);
1819 		if (ret < 0)
1820 			rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
1821 					"err=%d, port=%d\n", ret, portid);
1822 		/*
1823 		 * If enabled, put device in promiscuous mode.
1824 		 * This allows IO forwarding mode to forward packets
1825 		 * to itself through 2 cross-connected  ports of the
1826 		 * target machine.
1827 		 */
1828 		if (promiscuous_on)
1829 			rte_eth_promiscuous_enable(portid);
1830 
1831 		rte_eth_dev_callback_register(portid,
1832 			RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
1833 	}
1834 
1835 	check_all_ports_link_status(enabled_port_mask);
1836 
1837 	/* launch per-lcore init on every lcore */
1838 	rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1839 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1840 		if (rte_eal_wait_lcore(lcore_id) < 0)
1841 			return -1;
1842 	}
1843 
1844 	return 0;
1845 }
1846