xref: /dpdk/examples/l3fwd/main.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2021 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/types.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 #include <stdarg.h>
13 #include <errno.h>
14 #include <getopt.h>
15 #include <signal.h>
16 #include <stdbool.h>
17 
18 #include <rte_common.h>
19 #include <rte_vect.h>
20 #include <rte_byteorder.h>
21 #include <rte_log.h>
22 #include <rte_malloc.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_eal.h>
26 #include <rte_launch.h>
27 #include <rte_cycles.h>
28 #include <rte_prefetch.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_interrupts.h>
33 #include <rte_random.h>
34 #include <rte_debug.h>
35 #include <rte_ether.h>
36 #include <rte_mempool.h>
37 #include <rte_mbuf.h>
38 #include <rte_ip.h>
39 #include <rte_tcp.h>
40 #include <rte_udp.h>
41 #include <rte_string_fns.h>
42 #include <rte_cpuflags.h>
43 
44 #include <cmdline_parse.h>
45 #include <cmdline_parse_etheraddr.h>
46 
47 #include "l3fwd.h"
48 #include "l3fwd_event.h"
49 #include "l3fwd_route.h"
50 
51 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_LCORE
52 #define MAX_RX_QUEUE_PER_PORT 128
53 
54 #define MAX_LCORE_PARAMS 1024
55 
56 /* Static global variables used within this file. */
57 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
58 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
59 
60 /**< Ports set in promiscuous mode off by default. */
61 static int promiscuous_on;
62 
63 /* Select Longest-Prefix, Exact match or Forwarding Information Base. */
64 enum L3FWD_LOOKUP_MODE {
65 	L3FWD_LOOKUP_DEFAULT,
66 	L3FWD_LOOKUP_LPM,
67 	L3FWD_LOOKUP_EM,
68 	L3FWD_LOOKUP_FIB
69 };
70 static enum L3FWD_LOOKUP_MODE lookup_mode;
71 
72 /* Global variables. */
73 
74 static int numa_on = 1; /**< NUMA is enabled by default. */
75 static int parse_ptype; /**< Parse packet type using rx callback, and */
76 			/**< disabled by default */
77 static int per_port_pool; /**< Use separate buffer pools per port; disabled */
78 			  /**< by default */
79 
80 volatile bool force_quit;
81 
82 /* ethernet addresses of ports */
83 uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
84 struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
85 
86 xmm_t val_eth[RTE_MAX_ETHPORTS];
87 
88 /* mask of enabled ports */
89 uint32_t enabled_port_mask;
90 
91 /* Used only in exact match mode. */
92 int ipv6; /**< ipv6 is false by default. */
93 uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
94 
95 struct lcore_conf lcore_conf[RTE_MAX_LCORE];
96 
97 struct lcore_params {
98 	uint16_t port_id;
99 	uint8_t queue_id;
100 	uint8_t lcore_id;
101 } __rte_cache_aligned;
102 
103 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
104 static struct lcore_params lcore_params_array_default[] = {
105 	{0, 0, 2},
106 	{0, 1, 2},
107 	{0, 2, 2},
108 	{1, 0, 2},
109 	{1, 1, 2},
110 	{1, 2, 2},
111 	{2, 0, 2},
112 	{3, 0, 3},
113 	{3, 1, 3},
114 };
115 
116 static struct lcore_params * lcore_params = lcore_params_array_default;
117 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
118 				sizeof(lcore_params_array_default[0]);
119 
120 static struct rte_eth_conf port_conf = {
121 	.rxmode = {
122 		.mq_mode = RTE_ETH_MQ_RX_RSS,
123 		.split_hdr_size = 0,
124 		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
125 	},
126 	.rx_adv_conf = {
127 		.rss_conf = {
128 			.rss_key = NULL,
129 			.rss_hf = RTE_ETH_RSS_IP,
130 		},
131 	},
132 	.txmode = {
133 		.mq_mode = RTE_ETH_MQ_TX_NONE,
134 	},
135 };
136 
137 static uint32_t max_pkt_len;
138 
139 static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
140 static struct rte_mempool *vector_pool[RTE_MAX_ETHPORTS];
141 static uint8_t lkp_per_socket[NB_SOCKETS];
142 
143 struct l3fwd_lkp_mode {
144 	void  (*setup)(int);
145 	int   (*check_ptype)(int);
146 	rte_rx_callback_fn cb_parse_ptype;
147 	int   (*main_loop)(void *);
148 	void* (*get_ipv4_lookup_struct)(int);
149 	void* (*get_ipv6_lookup_struct)(int);
150 };
151 
152 static struct l3fwd_lkp_mode l3fwd_lkp;
153 
154 static struct l3fwd_lkp_mode l3fwd_em_lkp = {
155 	.setup                  = setup_hash,
156 	.check_ptype		= em_check_ptype,
157 	.cb_parse_ptype		= em_cb_parse_ptype,
158 	.main_loop              = em_main_loop,
159 	.get_ipv4_lookup_struct = em_get_ipv4_l3fwd_lookup_struct,
160 	.get_ipv6_lookup_struct = em_get_ipv6_l3fwd_lookup_struct,
161 };
162 
163 static struct l3fwd_lkp_mode l3fwd_lpm_lkp = {
164 	.setup                  = setup_lpm,
165 	.check_ptype		= lpm_check_ptype,
166 	.cb_parse_ptype		= lpm_cb_parse_ptype,
167 	.main_loop              = lpm_main_loop,
168 	.get_ipv4_lookup_struct = lpm_get_ipv4_l3fwd_lookup_struct,
169 	.get_ipv6_lookup_struct = lpm_get_ipv6_l3fwd_lookup_struct,
170 };
171 
172 static struct l3fwd_lkp_mode l3fwd_fib_lkp = {
173 	.setup                  = setup_fib,
174 	.check_ptype            = lpm_check_ptype,
175 	.cb_parse_ptype         = lpm_cb_parse_ptype,
176 	.main_loop              = fib_main_loop,
177 	.get_ipv4_lookup_struct = fib_get_ipv4_l3fwd_lookup_struct,
178 	.get_ipv6_lookup_struct = fib_get_ipv6_l3fwd_lookup_struct,
179 };
180 
181 /*
182  * 198.18.0.0/16 are set aside for RFC2544 benchmarking (RFC5735).
183  * 198.18.{0-7}.0/24 = Port {0-7}
184  */
185 const struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
186 	{RTE_IPV4(198, 18, 0, 0), 24, 0},
187 	{RTE_IPV4(198, 18, 1, 0), 24, 1},
188 	{RTE_IPV4(198, 18, 2, 0), 24, 2},
189 	{RTE_IPV4(198, 18, 3, 0), 24, 3},
190 	{RTE_IPV4(198, 18, 4, 0), 24, 4},
191 	{RTE_IPV4(198, 18, 5, 0), 24, 5},
192 	{RTE_IPV4(198, 18, 6, 0), 24, 6},
193 	{RTE_IPV4(198, 18, 7, 0), 24, 7},
194 };
195 
196 /*
197  * 2001:200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180).
198  * 2001:200:0:{0-7}::/64 = Port {0-7}
199  */
200 const struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
201 	{{32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 0},
202 	{{32, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 1},
203 	{{32, 1, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 2},
204 	{{32, 1, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 3},
205 	{{32, 1, 2, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 4},
206 	{{32, 1, 2, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 5},
207 	{{32, 1, 2, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 6},
208 	{{32, 1, 2, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 7},
209 };
210 
211 /*
212  * Setup lookup methods for forwarding.
213  * Currently exact-match, longest-prefix-match and forwarding information
214  * base are the supported ones.
215  */
216 static void
217 setup_l3fwd_lookup_tables(void)
218 {
219 	/* Setup HASH lookup functions. */
220 	if (lookup_mode == L3FWD_LOOKUP_EM)
221 		l3fwd_lkp = l3fwd_em_lkp;
222 	/* Setup FIB lookup functions. */
223 	else if (lookup_mode == L3FWD_LOOKUP_FIB)
224 		l3fwd_lkp = l3fwd_fib_lkp;
225 	/* Setup LPM lookup functions. */
226 	else
227 		l3fwd_lkp = l3fwd_lpm_lkp;
228 }
229 
230 static int
231 check_lcore_params(void)
232 {
233 	uint8_t queue, lcore;
234 	uint16_t i;
235 	int socketid;
236 
237 	for (i = 0; i < nb_lcore_params; ++i) {
238 		queue = lcore_params[i].queue_id;
239 		if (queue >= MAX_RX_QUEUE_PER_PORT) {
240 			printf("invalid queue number: %hhu\n", queue);
241 			return -1;
242 		}
243 		lcore = lcore_params[i].lcore_id;
244 		if (!rte_lcore_is_enabled(lcore)) {
245 			printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
246 			return -1;
247 		}
248 		if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
249 			(numa_on == 0)) {
250 			printf("warning: lcore %hhu is on socket %d with numa off \n",
251 				lcore, socketid);
252 		}
253 	}
254 	return 0;
255 }
256 
257 static int
258 check_port_config(void)
259 {
260 	uint16_t portid;
261 	uint16_t i;
262 
263 	for (i = 0; i < nb_lcore_params; ++i) {
264 		portid = lcore_params[i].port_id;
265 		if ((enabled_port_mask & (1 << portid)) == 0) {
266 			printf("port %u is not enabled in port mask\n", portid);
267 			return -1;
268 		}
269 		if (!rte_eth_dev_is_valid_port(portid)) {
270 			printf("port %u is not present on the board\n", portid);
271 			return -1;
272 		}
273 	}
274 	return 0;
275 }
276 
277 static uint8_t
278 get_port_n_rx_queues(const uint16_t port)
279 {
280 	int queue = -1;
281 	uint16_t i;
282 
283 	for (i = 0; i < nb_lcore_params; ++i) {
284 		if (lcore_params[i].port_id == port) {
285 			if (lcore_params[i].queue_id == queue+1)
286 				queue = lcore_params[i].queue_id;
287 			else
288 				rte_exit(EXIT_FAILURE, "queue ids of the port %d must be"
289 						" in sequence and must start with 0\n",
290 						lcore_params[i].port_id);
291 		}
292 	}
293 	return (uint8_t)(++queue);
294 }
295 
296 static int
297 init_lcore_rx_queues(void)
298 {
299 	uint16_t i, nb_rx_queue;
300 	uint8_t lcore;
301 
302 	for (i = 0; i < nb_lcore_params; ++i) {
303 		lcore = lcore_params[i].lcore_id;
304 		nb_rx_queue = lcore_conf[lcore].n_rx_queue;
305 		if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
306 			printf("error: too many queues (%u) for lcore: %u\n",
307 				(unsigned)nb_rx_queue + 1, (unsigned)lcore);
308 			return -1;
309 		} else {
310 			lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
311 				lcore_params[i].port_id;
312 			lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
313 				lcore_params[i].queue_id;
314 			lcore_conf[lcore].n_rx_queue++;
315 		}
316 	}
317 	return 0;
318 }
319 
320 /* display usage */
321 static void
322 print_usage(const char *prgname)
323 {
324 	fprintf(stderr, "%s [EAL options] --"
325 		" -p PORTMASK"
326 		" [-P]"
327 		" [--lookup]"
328 		" --config (port,queue,lcore)[,(port,queue,lcore)]"
329 		" [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
330 		" [--max-pkt-len PKTLEN]"
331 		" [--no-numa]"
332 		" [--hash-entry-num]"
333 		" [--ipv6]"
334 		" [--parse-ptype]"
335 		" [--per-port-pool]"
336 		" [--mode]"
337 		" [--eventq-sched]"
338 		" [--event-vector [--event-vector-size SIZE] [--event-vector-tmo NS]]"
339 		" [-E]"
340 		" [-L]\n\n"
341 
342 		"  -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
343 		"  -P : Enable promiscuous mode\n"
344 		"  --lookup: Select the lookup method\n"
345 		"            Default: lpm\n"
346 		"            Accepted: em (Exact Match), lpm (Longest Prefix Match), fib (Forwarding Information Base)\n"
347 		"  --config (port,queue,lcore): Rx queue configuration\n"
348 		"  --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n"
349 		"  --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
350 		"  --no-numa: Disable numa awareness\n"
351 		"  --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n"
352 		"  --ipv6: Set if running ipv6 packets\n"
353 		"  --parse-ptype: Set to use software to analyze packet type\n"
354 		"  --per-port-pool: Use separate buffer pool per port\n"
355 		"  --mode: Packet transfer mode for I/O, poll or eventdev\n"
356 		"          Default mode = poll\n"
357 		"  --eventq-sched: Event queue synchronization method\n"
358 		"                  ordered, atomic or parallel.\n"
359 		"                  Default: atomic\n"
360 		"                  Valid only if --mode=eventdev\n"
361 		"  --event-eth-rxqs: Number of ethernet RX queues per device.\n"
362 		"                    Default: 1\n"
363 		"                    Valid only if --mode=eventdev\n"
364 		"  --event-vector:  Enable event vectorization.\n"
365 		"  --event-vector-size: Max vector size if event vectorization is enabled.\n"
366 		"  --event-vector-tmo: Max timeout to form vector in nanoseconds if event vectorization is enabled\n"
367 		"  -E : Enable exact match, legacy flag please use --lookup=em instead\n"
368 		"  -L : Enable longest prefix match, legacy flag please use --lookup=lpm instead\n\n",
369 		prgname);
370 }
371 
372 static int
373 parse_max_pkt_len(const char *pktlen)
374 {
375 	char *end = NULL;
376 	unsigned long len;
377 
378 	/* parse decimal string */
379 	len = strtoul(pktlen, &end, 10);
380 	if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
381 		return -1;
382 
383 	if (len == 0)
384 		return -1;
385 
386 	return len;
387 }
388 
389 static int
390 parse_portmask(const char *portmask)
391 {
392 	char *end = NULL;
393 	unsigned long pm;
394 
395 	/* parse hexadecimal string */
396 	pm = strtoul(portmask, &end, 16);
397 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
398 		return 0;
399 
400 	return pm;
401 }
402 
403 static int
404 parse_hash_entry_number(const char *hash_entry_num)
405 {
406 	char *end = NULL;
407 	unsigned long hash_en;
408 	/* parse hexadecimal string */
409 	hash_en = strtoul(hash_entry_num, &end, 16);
410 	if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
411 		return -1;
412 
413 	if (hash_en == 0)
414 		return -1;
415 
416 	return hash_en;
417 }
418 
419 static int
420 parse_config(const char *q_arg)
421 {
422 	char s[256];
423 	const char *p, *p0 = q_arg;
424 	char *end;
425 	enum fieldnames {
426 		FLD_PORT = 0,
427 		FLD_QUEUE,
428 		FLD_LCORE,
429 		_NUM_FLD
430 	};
431 	unsigned long int_fld[_NUM_FLD];
432 	char *str_fld[_NUM_FLD];
433 	int i;
434 	unsigned size;
435 
436 	nb_lcore_params = 0;
437 
438 	while ((p = strchr(p0,'(')) != NULL) {
439 		++p;
440 		if((p0 = strchr(p,')')) == NULL)
441 			return -1;
442 
443 		size = p0 - p;
444 		if(size >= sizeof(s))
445 			return -1;
446 
447 		snprintf(s, sizeof(s), "%.*s", size, p);
448 		if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
449 			return -1;
450 		for (i = 0; i < _NUM_FLD; i++){
451 			errno = 0;
452 			int_fld[i] = strtoul(str_fld[i], &end, 0);
453 			if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
454 				return -1;
455 		}
456 		if (nb_lcore_params >= MAX_LCORE_PARAMS) {
457 			printf("exceeded max number of lcore params: %hu\n",
458 				nb_lcore_params);
459 			return -1;
460 		}
461 		lcore_params_array[nb_lcore_params].port_id =
462 			(uint8_t)int_fld[FLD_PORT];
463 		lcore_params_array[nb_lcore_params].queue_id =
464 			(uint8_t)int_fld[FLD_QUEUE];
465 		lcore_params_array[nb_lcore_params].lcore_id =
466 			(uint8_t)int_fld[FLD_LCORE];
467 		++nb_lcore_params;
468 	}
469 	lcore_params = lcore_params_array;
470 	return 0;
471 }
472 
473 static void
474 parse_eth_dest(const char *optarg)
475 {
476 	uint16_t portid;
477 	char *port_end;
478 	uint8_t c, *dest, peer_addr[6];
479 
480 	errno = 0;
481 	portid = strtoul(optarg, &port_end, 10);
482 	if (errno != 0 || port_end == optarg || *port_end++ != ',')
483 		rte_exit(EXIT_FAILURE,
484 		"Invalid eth-dest: %s", optarg);
485 	if (portid >= RTE_MAX_ETHPORTS)
486 		rte_exit(EXIT_FAILURE,
487 		"eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
488 		portid, RTE_MAX_ETHPORTS);
489 
490 	if (cmdline_parse_etheraddr(NULL, port_end,
491 		&peer_addr, sizeof(peer_addr)) < 0)
492 		rte_exit(EXIT_FAILURE,
493 		"Invalid ethernet address: %s\n",
494 		port_end);
495 	dest = (uint8_t *)&dest_eth_addr[portid];
496 	for (c = 0; c < 6; c++)
497 		dest[c] = peer_addr[c];
498 	*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
499 }
500 
501 static void
502 parse_mode(const char *optarg)
503 {
504 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
505 
506 	if (!strcmp(optarg, "poll"))
507 		evt_rsrc->enabled = false;
508 	else if (!strcmp(optarg, "eventdev"))
509 		evt_rsrc->enabled = true;
510 }
511 
512 static void
513 parse_eventq_sched(const char *optarg)
514 {
515 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
516 
517 	if (!strcmp(optarg, "ordered"))
518 		evt_rsrc->sched_type = RTE_SCHED_TYPE_ORDERED;
519 	if (!strcmp(optarg, "atomic"))
520 		evt_rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
521 	if (!strcmp(optarg, "parallel"))
522 		evt_rsrc->sched_type = RTE_SCHED_TYPE_PARALLEL;
523 }
524 
525 static void
526 parse_event_eth_rx_queues(const char *eth_rx_queues)
527 {
528 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
529 	char *end = NULL;
530 	uint8_t num_eth_rx_queues;
531 
532 	/* parse decimal string */
533 	num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
534 	if ((eth_rx_queues[0] == '\0') || (end == NULL) || (*end != '\0'))
535 		return;
536 
537 	if (num_eth_rx_queues == 0)
538 		return;
539 
540 	evt_rsrc->eth_rx_queues = num_eth_rx_queues;
541 }
542 
543 static int
544 parse_lookup(const char *optarg)
545 {
546 	if (!strcmp(optarg, "em"))
547 		lookup_mode = L3FWD_LOOKUP_EM;
548 	else if (!strcmp(optarg, "lpm"))
549 		lookup_mode = L3FWD_LOOKUP_LPM;
550 	else if (!strcmp(optarg, "fib"))
551 		lookup_mode = L3FWD_LOOKUP_FIB;
552 	else {
553 		fprintf(stderr, "Invalid lookup option! Accepted options: em, lpm, fib\n");
554 		return -1;
555 	}
556 	return 0;
557 }
558 
559 #define MAX_JUMBO_PKT_LEN  9600
560 
561 static const char short_options[] =
562 	"p:"  /* portmask */
563 	"P"   /* promiscuous */
564 	"L"   /* legacy enable long prefix match */
565 	"E"   /* legacy enable exact match */
566 	;
567 
568 #define CMD_LINE_OPT_CONFIG "config"
569 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
570 #define CMD_LINE_OPT_NO_NUMA "no-numa"
571 #define CMD_LINE_OPT_IPV6 "ipv6"
572 #define CMD_LINE_OPT_MAX_PKT_LEN "max-pkt-len"
573 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
574 #define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
575 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
576 #define CMD_LINE_OPT_MODE "mode"
577 #define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sched"
578 #define CMD_LINE_OPT_EVENT_ETH_RX_QUEUES "event-eth-rxqs"
579 #define CMD_LINE_OPT_LOOKUP "lookup"
580 #define CMD_LINE_OPT_ENABLE_VECTOR "event-vector"
581 #define CMD_LINE_OPT_VECTOR_SIZE "event-vector-size"
582 #define CMD_LINE_OPT_VECTOR_TMO_NS "event-vector-tmo"
583 
584 enum {
585 	/* long options mapped to a short option */
586 
587 	/* first long only option value must be >= 256, so that we won't
588 	 * conflict with short options */
589 	CMD_LINE_OPT_MIN_NUM = 256,
590 	CMD_LINE_OPT_CONFIG_NUM,
591 	CMD_LINE_OPT_ETH_DEST_NUM,
592 	CMD_LINE_OPT_NO_NUMA_NUM,
593 	CMD_LINE_OPT_IPV6_NUM,
594 	CMD_LINE_OPT_MAX_PKT_LEN_NUM,
595 	CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
596 	CMD_LINE_OPT_PARSE_PTYPE_NUM,
597 	CMD_LINE_OPT_PARSE_PER_PORT_POOL,
598 	CMD_LINE_OPT_MODE_NUM,
599 	CMD_LINE_OPT_EVENTQ_SYNC_NUM,
600 	CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM,
601 	CMD_LINE_OPT_LOOKUP_NUM,
602 	CMD_LINE_OPT_ENABLE_VECTOR_NUM,
603 	CMD_LINE_OPT_VECTOR_SIZE_NUM,
604 	CMD_LINE_OPT_VECTOR_TMO_NS_NUM
605 };
606 
607 static const struct option lgopts[] = {
608 	{CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
609 	{CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
610 	{CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
611 	{CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
612 	{CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, CMD_LINE_OPT_MAX_PKT_LEN_NUM},
613 	{CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
614 	{CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
615 	{CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
616 	{CMD_LINE_OPT_MODE, 1, 0, CMD_LINE_OPT_MODE_NUM},
617 	{CMD_LINE_OPT_EVENTQ_SYNC, 1, 0, CMD_LINE_OPT_EVENTQ_SYNC_NUM},
618 	{CMD_LINE_OPT_EVENT_ETH_RX_QUEUES, 1, 0,
619 					CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM},
620 	{CMD_LINE_OPT_LOOKUP, 1, 0, CMD_LINE_OPT_LOOKUP_NUM},
621 	{CMD_LINE_OPT_ENABLE_VECTOR, 0, 0, CMD_LINE_OPT_ENABLE_VECTOR_NUM},
622 	{CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
623 	{CMD_LINE_OPT_VECTOR_TMO_NS, 1, 0, CMD_LINE_OPT_VECTOR_TMO_NS_NUM},
624 	{NULL, 0, 0, 0}
625 };
626 
627 /*
628  * This expression is used to calculate the number of mbufs needed
629  * depending on user input, taking  into account memory for rx and
630  * tx hardware rings, cache per lcore and mtable per port per lcore.
631  * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum
632  * value of 8192
633  */
634 #define NB_MBUF(nports) RTE_MAX(	\
635 	(nports*nb_rx_queue*nb_rxd +		\
636 	nports*nb_lcores*MAX_PKT_BURST +	\
637 	nports*n_tx_queue*nb_txd +		\
638 	nb_lcores*MEMPOOL_CACHE_SIZE),		\
639 	(unsigned)8192)
640 
641 /* Parse the argument given in the command line of the application */
642 static int
643 parse_args(int argc, char **argv)
644 {
645 	int opt, ret;
646 	char **argvopt;
647 	int option_index;
648 	char *prgname = argv[0];
649 	uint8_t lcore_params = 0;
650 	uint8_t eventq_sched = 0;
651 	uint8_t eth_rx_q = 0;
652 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
653 
654 	argvopt = argv;
655 
656 	/* Error or normal output strings. */
657 	while ((opt = getopt_long(argc, argvopt, short_options,
658 				lgopts, &option_index)) != EOF) {
659 
660 		switch (opt) {
661 		/* portmask */
662 		case 'p':
663 			enabled_port_mask = parse_portmask(optarg);
664 			if (enabled_port_mask == 0) {
665 				fprintf(stderr, "Invalid portmask\n");
666 				print_usage(prgname);
667 				return -1;
668 			}
669 			break;
670 
671 		case 'P':
672 			promiscuous_on = 1;
673 			break;
674 
675 		case 'E':
676 			if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
677 				fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
678 				return -1;
679 			}
680 			lookup_mode = L3FWD_LOOKUP_EM;
681 			break;
682 
683 		case 'L':
684 			if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
685 				fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
686 				return -1;
687 			}
688 			lookup_mode = L3FWD_LOOKUP_LPM;
689 			break;
690 
691 		/* long options */
692 		case CMD_LINE_OPT_CONFIG_NUM:
693 			ret = parse_config(optarg);
694 			if (ret) {
695 				fprintf(stderr, "Invalid config\n");
696 				print_usage(prgname);
697 				return -1;
698 			}
699 			lcore_params = 1;
700 			break;
701 
702 		case CMD_LINE_OPT_ETH_DEST_NUM:
703 			parse_eth_dest(optarg);
704 			break;
705 
706 		case CMD_LINE_OPT_NO_NUMA_NUM:
707 			numa_on = 0;
708 			break;
709 
710 		case CMD_LINE_OPT_IPV6_NUM:
711 			ipv6 = 1;
712 			break;
713 
714 		case CMD_LINE_OPT_MAX_PKT_LEN_NUM:
715 			max_pkt_len = parse_max_pkt_len(optarg);
716 			break;
717 
718 		case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
719 			ret = parse_hash_entry_number(optarg);
720 			if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
721 				hash_entry_number = ret;
722 			} else {
723 				fprintf(stderr, "invalid hash entry number\n");
724 				print_usage(prgname);
725 				return -1;
726 			}
727 			break;
728 
729 		case CMD_LINE_OPT_PARSE_PTYPE_NUM:
730 			printf("soft parse-ptype is enabled\n");
731 			parse_ptype = 1;
732 			break;
733 
734 		case CMD_LINE_OPT_PARSE_PER_PORT_POOL:
735 			printf("per port buffer pool is enabled\n");
736 			per_port_pool = 1;
737 			break;
738 
739 		case CMD_LINE_OPT_MODE_NUM:
740 			parse_mode(optarg);
741 			break;
742 
743 		case CMD_LINE_OPT_EVENTQ_SYNC_NUM:
744 			parse_eventq_sched(optarg);
745 			eventq_sched = 1;
746 			break;
747 
748 		case CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM:
749 			parse_event_eth_rx_queues(optarg);
750 			eth_rx_q = 1;
751 			break;
752 
753 		case CMD_LINE_OPT_LOOKUP_NUM:
754 			if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
755 				fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
756 				return -1;
757 			}
758 			ret = parse_lookup(optarg);
759 			/*
760 			 * If parse_lookup was passed an invalid lookup type
761 			 * then return -1. Error log included within
762 			 * parse_lookup for simplicity.
763 			 */
764 			if (ret)
765 				return -1;
766 			break;
767 
768 		case CMD_LINE_OPT_ENABLE_VECTOR_NUM:
769 			printf("event vectorization is enabled\n");
770 			evt_rsrc->vector_enabled = 1;
771 			break;
772 		case CMD_LINE_OPT_VECTOR_SIZE_NUM:
773 			evt_rsrc->vector_size = strtol(optarg, NULL, 10);
774 			break;
775 		case CMD_LINE_OPT_VECTOR_TMO_NS_NUM:
776 			evt_rsrc->vector_tmo_ns = strtoull(optarg, NULL, 10);
777 			break;
778 		default:
779 			print_usage(prgname);
780 			return -1;
781 		}
782 	}
783 
784 	if (evt_rsrc->enabled && lcore_params) {
785 		fprintf(stderr, "lcore config is not valid when event mode is selected\n");
786 		return -1;
787 	}
788 
789 	if (!evt_rsrc->enabled && eth_rx_q) {
790 		fprintf(stderr, "eth_rx_queues is valid only when event mode is selected\n");
791 		return -1;
792 	}
793 
794 	if (!evt_rsrc->enabled && eventq_sched) {
795 		fprintf(stderr, "eventq_sched is valid only when event mode is selected\n");
796 		return -1;
797 	}
798 
799 	if (evt_rsrc->vector_enabled && !evt_rsrc->vector_size) {
800 		evt_rsrc->vector_size = VECTOR_SIZE_DEFAULT;
801 		fprintf(stderr, "vector size set to default (%" PRIu16 ")\n",
802 			evt_rsrc->vector_size);
803 	}
804 
805 	if (evt_rsrc->vector_enabled && !evt_rsrc->vector_tmo_ns) {
806 		evt_rsrc->vector_tmo_ns = VECTOR_TMO_NS_DEFAULT;
807 		fprintf(stderr,
808 			"vector timeout set to default (%" PRIu64 " ns)\n",
809 			evt_rsrc->vector_tmo_ns);
810 	}
811 
812 	/*
813 	 * Nothing is selected, pick longest-prefix match
814 	 * as default match.
815 	 */
816 	if (lookup_mode == L3FWD_LOOKUP_DEFAULT) {
817 		fprintf(stderr, "Neither LPM, EM, or FIB selected, defaulting to LPM\n");
818 		lookup_mode = L3FWD_LOOKUP_LPM;
819 	}
820 
821 	/*
822 	 * ipv6 and hash flags are valid only for
823 	 * exact match, reset them to default for
824 	 * longest-prefix match.
825 	 */
826 	if (lookup_mode == L3FWD_LOOKUP_LPM) {
827 		ipv6 = 0;
828 		hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
829 	}
830 
831 	if (optind >= 0)
832 		argv[optind-1] = prgname;
833 
834 	ret = optind-1;
835 	optind = 1; /* reset getopt lib */
836 	return ret;
837 }
838 
839 static void
840 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
841 {
842 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
843 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
844 	printf("%s%s", name, buf);
845 }
846 
847 int
848 init_mem(uint16_t portid, unsigned int nb_mbuf)
849 {
850 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
851 	struct lcore_conf *qconf;
852 	int socketid;
853 	unsigned lcore_id;
854 	char s[64];
855 
856 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
857 		if (rte_lcore_is_enabled(lcore_id) == 0)
858 			continue;
859 
860 		if (numa_on)
861 			socketid = rte_lcore_to_socket_id(lcore_id);
862 		else
863 			socketid = 0;
864 
865 		if (socketid >= NB_SOCKETS) {
866 			rte_exit(EXIT_FAILURE,
867 				"Socket %d of lcore %u is out of range %d\n",
868 				socketid, lcore_id, NB_SOCKETS);
869 		}
870 
871 		if (pktmbuf_pool[portid][socketid] == NULL) {
872 			snprintf(s, sizeof(s), "mbuf_pool_%d:%d",
873 				 portid, socketid);
874 			pktmbuf_pool[portid][socketid] =
875 				rte_pktmbuf_pool_create(s, nb_mbuf,
876 					MEMPOOL_CACHE_SIZE, 0,
877 					RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
878 			if (pktmbuf_pool[portid][socketid] == NULL)
879 				rte_exit(EXIT_FAILURE,
880 					"Cannot init mbuf pool on socket %d\n",
881 					socketid);
882 			else
883 				printf("Allocated mbuf pool on socket %d\n",
884 					socketid);
885 
886 			/* Setup LPM, EM(f.e Hash) or FIB. But, only once per
887 			 * available socket.
888 			 */
889 			if (!lkp_per_socket[socketid]) {
890 				l3fwd_lkp.setup(socketid);
891 				lkp_per_socket[socketid] = 1;
892 			}
893 		}
894 
895 		if (evt_rsrc->vector_enabled && vector_pool[portid] == NULL) {
896 			unsigned int nb_vec;
897 
898 			nb_vec = (nb_mbuf + evt_rsrc->vector_size - 1) /
899 				 evt_rsrc->vector_size;
900 			snprintf(s, sizeof(s), "vector_pool_%d", portid);
901 			vector_pool[portid] = rte_event_vector_pool_create(
902 				s, nb_vec, 0, evt_rsrc->vector_size, socketid);
903 			if (vector_pool[portid] == NULL)
904 				rte_exit(EXIT_FAILURE,
905 					 "Failed to create vector pool for port %d\n",
906 					 portid);
907 			else
908 				printf("Allocated vector pool for port %d\n",
909 				       portid);
910 		}
911 
912 		qconf = &lcore_conf[lcore_id];
913 		qconf->ipv4_lookup_struct =
914 			l3fwd_lkp.get_ipv4_lookup_struct(socketid);
915 		qconf->ipv6_lookup_struct =
916 			l3fwd_lkp.get_ipv6_lookup_struct(socketid);
917 	}
918 	return 0;
919 }
920 
921 /* Check the link status of all ports in up to 9s, and print them finally */
922 static void
923 check_all_ports_link_status(uint32_t port_mask)
924 {
925 #define CHECK_INTERVAL 100 /* 100ms */
926 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
927 	uint16_t portid;
928 	uint8_t count, all_ports_up, print_flag = 0;
929 	struct rte_eth_link link;
930 	int ret;
931 	char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
932 
933 	printf("\nChecking link status");
934 	fflush(stdout);
935 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
936 		if (force_quit)
937 			return;
938 		all_ports_up = 1;
939 		RTE_ETH_FOREACH_DEV(portid) {
940 			if (force_quit)
941 				return;
942 			if ((port_mask & (1 << portid)) == 0)
943 				continue;
944 			memset(&link, 0, sizeof(link));
945 			ret = rte_eth_link_get_nowait(portid, &link);
946 			if (ret < 0) {
947 				all_ports_up = 0;
948 				if (print_flag == 1)
949 					printf("Port %u link get failed: %s\n",
950 						portid, rte_strerror(-ret));
951 				continue;
952 			}
953 			/* print link status if flag set */
954 			if (print_flag == 1) {
955 				rte_eth_link_to_str(link_status_text,
956 					sizeof(link_status_text), &link);
957 				printf("Port %d %s\n", portid,
958 				       link_status_text);
959 				continue;
960 			}
961 			/* clear all_ports_up flag if any link down */
962 			if (link.link_status == RTE_ETH_LINK_DOWN) {
963 				all_ports_up = 0;
964 				break;
965 			}
966 		}
967 		/* after finally printing all link status, get out */
968 		if (print_flag == 1)
969 			break;
970 
971 		if (all_ports_up == 0) {
972 			printf(".");
973 			fflush(stdout);
974 			rte_delay_ms(CHECK_INTERVAL);
975 		}
976 
977 		/* set the print_flag if all ports up or timeout */
978 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
979 			print_flag = 1;
980 			printf("done\n");
981 		}
982 	}
983 }
984 
985 static void
986 signal_handler(int signum)
987 {
988 	if (signum == SIGINT || signum == SIGTERM) {
989 		printf("\n\nSignal %d received, preparing to exit...\n",
990 				signum);
991 		force_quit = true;
992 	}
993 }
994 
995 static int
996 prepare_ptype_parser(uint16_t portid, uint16_t queueid)
997 {
998 	if (parse_ptype) {
999 		printf("Port %d: softly parse packet type info\n", portid);
1000 		if (rte_eth_add_rx_callback(portid, queueid,
1001 					    l3fwd_lkp.cb_parse_ptype,
1002 					    NULL))
1003 			return 1;
1004 
1005 		printf("Failed to add rx callback: port=%d\n", portid);
1006 		return 0;
1007 	}
1008 
1009 	if (l3fwd_lkp.check_ptype(portid))
1010 		return 1;
1011 
1012 	printf("port %d cannot parse packet type, please add --%s\n",
1013 	       portid, CMD_LINE_OPT_PARSE_PTYPE);
1014 	return 0;
1015 }
1016 
1017 static uint32_t
1018 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1019 {
1020 	uint32_t overhead_len;
1021 
1022 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1023 		overhead_len = max_rx_pktlen - max_mtu;
1024 	else
1025 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1026 
1027 	return overhead_len;
1028 }
1029 
1030 static int
1031 config_port_max_pkt_len(struct rte_eth_conf *conf,
1032 		struct rte_eth_dev_info *dev_info)
1033 {
1034 	uint32_t overhead_len;
1035 
1036 	if (max_pkt_len == 0)
1037 		return 0;
1038 
1039 	if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
1040 		return -1;
1041 
1042 	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1043 			dev_info->max_mtu);
1044 	conf->rxmode.mtu = max_pkt_len - overhead_len;
1045 
1046 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
1047 		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1048 
1049 	return 0;
1050 }
1051 
1052 static void
1053 l3fwd_poll_resource_setup(void)
1054 {
1055 	uint8_t nb_rx_queue, queue, socketid;
1056 	struct rte_eth_dev_info dev_info;
1057 	uint32_t n_tx_queue, nb_lcores;
1058 	struct rte_eth_txconf *txconf;
1059 	struct lcore_conf *qconf;
1060 	uint16_t queueid, portid;
1061 	unsigned int nb_ports;
1062 	unsigned int lcore_id;
1063 	int ret;
1064 
1065 	if (check_lcore_params() < 0)
1066 		rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
1067 
1068 	ret = init_lcore_rx_queues();
1069 	if (ret < 0)
1070 		rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1071 
1072 	nb_ports = rte_eth_dev_count_avail();
1073 
1074 	if (check_port_config() < 0)
1075 		rte_exit(EXIT_FAILURE, "check_port_config failed\n");
1076 
1077 	nb_lcores = rte_lcore_count();
1078 
1079 	/* initialize all ports */
1080 	RTE_ETH_FOREACH_DEV(portid) {
1081 		struct rte_eth_conf local_port_conf = port_conf;
1082 
1083 		/* skip ports that are not enabled */
1084 		if ((enabled_port_mask & (1 << portid)) == 0) {
1085 			printf("\nSkipping disabled port %d\n", portid);
1086 			continue;
1087 		}
1088 
1089 		/* init port */
1090 		printf("Initializing port %d ... ", portid );
1091 		fflush(stdout);
1092 
1093 		nb_rx_queue = get_port_n_rx_queues(portid);
1094 		n_tx_queue = nb_lcores;
1095 		if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1096 			n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1097 		printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1098 			nb_rx_queue, (unsigned)n_tx_queue );
1099 
1100 		ret = rte_eth_dev_info_get(portid, &dev_info);
1101 		if (ret != 0)
1102 			rte_exit(EXIT_FAILURE,
1103 				"Error during getting device (port %u) info: %s\n",
1104 				portid, strerror(-ret));
1105 
1106 		ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
1107 		if (ret != 0)
1108 			rte_exit(EXIT_FAILURE,
1109 				"Invalid max packet length: %u (port %u)\n",
1110 				max_pkt_len, portid);
1111 
1112 		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
1113 			local_port_conf.txmode.offloads |=
1114 				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1115 
1116 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1117 			dev_info.flow_type_rss_offloads;
1118 
1119 		if (dev_info.max_rx_queues == 1)
1120 			local_port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
1121 
1122 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1123 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
1124 			printf("Port %u modified RSS hash function based on hardware support,"
1125 				"requested:%#"PRIx64" configured:%#"PRIx64"\n",
1126 				portid,
1127 				port_conf.rx_adv_conf.rss_conf.rss_hf,
1128 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1129 		}
1130 
1131 		ret = rte_eth_dev_configure(portid, nb_rx_queue,
1132 					(uint16_t)n_tx_queue, &local_port_conf);
1133 		if (ret < 0)
1134 			rte_exit(EXIT_FAILURE,
1135 				"Cannot configure device: err=%d, port=%d\n",
1136 				ret, portid);
1137 
1138 		ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
1139 						       &nb_txd);
1140 		if (ret < 0)
1141 			rte_exit(EXIT_FAILURE,
1142 				 "Cannot adjust number of descriptors: err=%d, "
1143 				 "port=%d\n", ret, portid);
1144 
1145 		ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1146 		if (ret < 0)
1147 			rte_exit(EXIT_FAILURE,
1148 				 "Cannot get MAC address: err=%d, port=%d\n",
1149 				 ret, portid);
1150 
1151 		print_ethaddr(" Address:", &ports_eth_addr[portid]);
1152 		printf(", ");
1153 		print_ethaddr("Destination:",
1154 			(const struct rte_ether_addr *)&dest_eth_addr[portid]);
1155 		printf(", ");
1156 
1157 		/*
1158 		 * prepare src MACs for each port.
1159 		 */
1160 		rte_ether_addr_copy(&ports_eth_addr[portid],
1161 			(struct rte_ether_addr *)(val_eth + portid) + 1);
1162 
1163 		/* init memory */
1164 		if (!per_port_pool) {
1165 			/* portid = 0; this is *not* signifying the first port,
1166 			 * rather, it signifies that portid is ignored.
1167 			 */
1168 			ret = init_mem(0, NB_MBUF(nb_ports));
1169 		} else {
1170 			ret = init_mem(portid, NB_MBUF(1));
1171 		}
1172 		if (ret < 0)
1173 			rte_exit(EXIT_FAILURE, "init_mem failed\n");
1174 
1175 		/* init one TX queue per couple (lcore,port) */
1176 		queueid = 0;
1177 		for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1178 			if (rte_lcore_is_enabled(lcore_id) == 0)
1179 				continue;
1180 
1181 			if (numa_on)
1182 				socketid =
1183 				(uint8_t)rte_lcore_to_socket_id(lcore_id);
1184 			else
1185 				socketid = 0;
1186 
1187 			printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1188 			fflush(stdout);
1189 
1190 			txconf = &dev_info.default_txconf;
1191 			txconf->offloads = local_port_conf.txmode.offloads;
1192 			ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1193 						     socketid, txconf);
1194 			if (ret < 0)
1195 				rte_exit(EXIT_FAILURE,
1196 					"rte_eth_tx_queue_setup: err=%d, "
1197 					"port=%d\n", ret, portid);
1198 
1199 			qconf = &lcore_conf[lcore_id];
1200 			qconf->tx_queue_id[portid] = queueid;
1201 			queueid++;
1202 
1203 			qconf->tx_port_id[qconf->n_tx_port] = portid;
1204 			qconf->n_tx_port++;
1205 		}
1206 		printf("\n");
1207 	}
1208 
1209 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1210 		if (rte_lcore_is_enabled(lcore_id) == 0)
1211 			continue;
1212 		qconf = &lcore_conf[lcore_id];
1213 		printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1214 		fflush(stdout);
1215 		/* init RX queues */
1216 		for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1217 			struct rte_eth_rxconf rxq_conf;
1218 
1219 			portid = qconf->rx_queue_list[queue].port_id;
1220 			queueid = qconf->rx_queue_list[queue].queue_id;
1221 
1222 			if (numa_on)
1223 				socketid =
1224 				(uint8_t)rte_lcore_to_socket_id(lcore_id);
1225 			else
1226 				socketid = 0;
1227 
1228 			printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1229 			fflush(stdout);
1230 
1231 			ret = rte_eth_dev_info_get(portid, &dev_info);
1232 			if (ret != 0)
1233 				rte_exit(EXIT_FAILURE,
1234 					"Error during getting device (port %u) info: %s\n",
1235 					portid, strerror(-ret));
1236 
1237 			rxq_conf = dev_info.default_rxconf;
1238 			rxq_conf.offloads = port_conf.rxmode.offloads;
1239 			if (!per_port_pool)
1240 				ret = rte_eth_rx_queue_setup(portid, queueid,
1241 						nb_rxd, socketid,
1242 						&rxq_conf,
1243 						pktmbuf_pool[0][socketid]);
1244 			else
1245 				ret = rte_eth_rx_queue_setup(portid, queueid,
1246 						nb_rxd, socketid,
1247 						&rxq_conf,
1248 						pktmbuf_pool[portid][socketid]);
1249 			if (ret < 0)
1250 				rte_exit(EXIT_FAILURE,
1251 				"rte_eth_rx_queue_setup: err=%d, port=%d\n",
1252 				ret, portid);
1253 		}
1254 	}
1255 }
1256 
1257 static inline int
1258 l3fwd_service_enable(uint32_t service_id)
1259 {
1260 	uint8_t min_service_count = UINT8_MAX;
1261 	uint32_t slcore_array[RTE_MAX_LCORE];
1262 	unsigned int slcore = 0;
1263 	uint8_t service_count;
1264 	int32_t slcore_count;
1265 
1266 	if (!rte_service_lcore_count())
1267 		return -ENOENT;
1268 
1269 	slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
1270 	if (slcore_count < 0)
1271 		return -ENOENT;
1272 	/* Get the core which has least number of services running. */
1273 	while (slcore_count--) {
1274 		/* Reset default mapping */
1275 		if (rte_service_map_lcore_set(service_id,
1276 				slcore_array[slcore_count], 0) != 0)
1277 			return -ENOENT;
1278 		service_count = rte_service_lcore_count_services(
1279 				slcore_array[slcore_count]);
1280 		if (service_count < min_service_count) {
1281 			slcore = slcore_array[slcore_count];
1282 			min_service_count = service_count;
1283 		}
1284 	}
1285 	if (rte_service_map_lcore_set(service_id, slcore, 1))
1286 		return -ENOENT;
1287 	rte_service_lcore_start(slcore);
1288 
1289 	return 0;
1290 }
1291 
1292 static void
1293 l3fwd_event_service_setup(void)
1294 {
1295 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
1296 	struct rte_event_dev_info evdev_info;
1297 	uint32_t service_id, caps;
1298 	int ret, i;
1299 
1300 	rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
1301 	if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
1302 		ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
1303 				&service_id);
1304 		if (ret != -ESRCH && ret != 0)
1305 			rte_exit(EXIT_FAILURE,
1306 				 "Error in starting eventdev service\n");
1307 		l3fwd_service_enable(service_id);
1308 	}
1309 
1310 	for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
1311 		ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
1312 				evt_rsrc->rx_adptr.rx_adptr[i], &caps);
1313 		if (ret < 0)
1314 			rte_exit(EXIT_FAILURE,
1315 				 "Failed to get Rx adapter[%d] caps\n",
1316 				 evt_rsrc->rx_adptr.rx_adptr[i]);
1317 		ret = rte_event_eth_rx_adapter_service_id_get(
1318 				evt_rsrc->event_d_id,
1319 				&service_id);
1320 		if (ret != -ESRCH && ret != 0)
1321 			rte_exit(EXIT_FAILURE,
1322 				 "Error in starting Rx adapter[%d] service\n",
1323 				 evt_rsrc->rx_adptr.rx_adptr[i]);
1324 		l3fwd_service_enable(service_id);
1325 	}
1326 
1327 	for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
1328 		ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
1329 				evt_rsrc->tx_adptr.tx_adptr[i], &caps);
1330 		if (ret < 0)
1331 			rte_exit(EXIT_FAILURE,
1332 				 "Failed to get Rx adapter[%d] caps\n",
1333 				 evt_rsrc->tx_adptr.tx_adptr[i]);
1334 		ret = rte_event_eth_tx_adapter_service_id_get(
1335 				evt_rsrc->event_d_id,
1336 				&service_id);
1337 		if (ret != -ESRCH && ret != 0)
1338 			rte_exit(EXIT_FAILURE,
1339 				 "Error in starting Rx adapter[%d] service\n",
1340 				 evt_rsrc->tx_adptr.tx_adptr[i]);
1341 		l3fwd_service_enable(service_id);
1342 	}
1343 }
1344 
1345 int
1346 main(int argc, char **argv)
1347 {
1348 	struct l3fwd_event_resources *evt_rsrc;
1349 	struct lcore_conf *qconf;
1350 	uint16_t queueid, portid;
1351 	unsigned int lcore_id;
1352 	uint8_t queue;
1353 	int i, ret;
1354 
1355 	/* init EAL */
1356 	ret = rte_eal_init(argc, argv);
1357 	if (ret < 0)
1358 		rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1359 	argc -= ret;
1360 	argv += ret;
1361 
1362 	force_quit = false;
1363 	signal(SIGINT, signal_handler);
1364 	signal(SIGTERM, signal_handler);
1365 
1366 	/* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
1367 	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1368 		dest_eth_addr[portid] =
1369 			RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
1370 		*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
1371 	}
1372 
1373 	evt_rsrc = l3fwd_get_eventdev_rsrc();
1374 	/* parse application arguments (after the EAL ones) */
1375 	ret = parse_args(argc, argv);
1376 	if (ret < 0)
1377 		rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
1378 
1379 	/* Setup function pointers for lookup method. */
1380 	setup_l3fwd_lookup_tables();
1381 
1382 	evt_rsrc->per_port_pool = per_port_pool;
1383 	evt_rsrc->pkt_pool = pktmbuf_pool;
1384 	evt_rsrc->vec_pool = vector_pool;
1385 	evt_rsrc->port_mask = enabled_port_mask;
1386 	/* Configure eventdev parameters if user has requested */
1387 	if (evt_rsrc->enabled) {
1388 		l3fwd_event_resource_setup(&port_conf);
1389 		if (lookup_mode == L3FWD_LOOKUP_EM)
1390 			l3fwd_lkp.main_loop = evt_rsrc->ops.em_event_loop;
1391 		else if (lookup_mode == L3FWD_LOOKUP_FIB)
1392 			l3fwd_lkp.main_loop = evt_rsrc->ops.fib_event_loop;
1393 		else
1394 			l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;
1395 		l3fwd_event_service_setup();
1396 	} else
1397 		l3fwd_poll_resource_setup();
1398 
1399 	/* start ports */
1400 	RTE_ETH_FOREACH_DEV(portid) {
1401 		if ((enabled_port_mask & (1 << portid)) == 0) {
1402 			continue;
1403 		}
1404 		/* Start device */
1405 		ret = rte_eth_dev_start(portid);
1406 		if (ret < 0)
1407 			rte_exit(EXIT_FAILURE,
1408 				"rte_eth_dev_start: err=%d, port=%d\n",
1409 				ret, portid);
1410 
1411 		/*
1412 		 * If enabled, put device in promiscuous mode.
1413 		 * This allows IO forwarding mode to forward packets
1414 		 * to itself through 2 cross-connected  ports of the
1415 		 * target machine.
1416 		 */
1417 		if (promiscuous_on) {
1418 			ret = rte_eth_promiscuous_enable(portid);
1419 			if (ret != 0)
1420 				rte_exit(EXIT_FAILURE,
1421 					"rte_eth_promiscuous_enable: err=%s, port=%u\n",
1422 					rte_strerror(-ret), portid);
1423 		}
1424 	}
1425 
1426 	printf("\n");
1427 
1428 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1429 		if (rte_lcore_is_enabled(lcore_id) == 0)
1430 			continue;
1431 		qconf = &lcore_conf[lcore_id];
1432 		for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
1433 			portid = qconf->rx_queue_list[queue].port_id;
1434 			queueid = qconf->rx_queue_list[queue].queue_id;
1435 			if (prepare_ptype_parser(portid, queueid) == 0)
1436 				rte_exit(EXIT_FAILURE, "ptype check fails\n");
1437 		}
1438 	}
1439 
1440 	check_all_ports_link_status(enabled_port_mask);
1441 
1442 	ret = 0;
1443 	/* launch per-lcore init on every lcore */
1444 	rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MAIN);
1445 	if (evt_rsrc->enabled) {
1446 		for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++)
1447 			rte_event_eth_rx_adapter_stop(
1448 					evt_rsrc->rx_adptr.rx_adptr[i]);
1449 		for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++)
1450 			rte_event_eth_tx_adapter_stop(
1451 					evt_rsrc->tx_adptr.tx_adptr[i]);
1452 
1453 		RTE_ETH_FOREACH_DEV(portid) {
1454 			if ((enabled_port_mask & (1 << portid)) == 0)
1455 				continue;
1456 			ret = rte_eth_dev_stop(portid);
1457 			if (ret != 0)
1458 				printf("rte_eth_dev_stop: err=%d, port=%u\n",
1459 				       ret, portid);
1460 		}
1461 
1462 		rte_eal_mp_wait_lcore();
1463 		RTE_ETH_FOREACH_DEV(portid) {
1464 			if ((enabled_port_mask & (1 << portid)) == 0)
1465 				continue;
1466 			rte_eth_dev_close(portid);
1467 		}
1468 
1469 		rte_event_dev_stop(evt_rsrc->event_d_id);
1470 		rte_event_dev_close(evt_rsrc->event_d_id);
1471 
1472 	} else {
1473 		rte_eal_mp_wait_lcore();
1474 
1475 		RTE_ETH_FOREACH_DEV(portid) {
1476 			if ((enabled_port_mask & (1 << portid)) == 0)
1477 				continue;
1478 			printf("Closing port %d...", portid);
1479 			ret = rte_eth_dev_stop(portid);
1480 			if (ret != 0)
1481 				printf("rte_eth_dev_stop: err=%d, port=%u\n",
1482 				       ret, portid);
1483 			rte_eth_dev_close(portid);
1484 			printf(" Done\n");
1485 		}
1486 	}
1487 
1488 	/* clean up the EAL */
1489 	rte_eal_cleanup();
1490 
1491 	printf("Bye...\n");
1492 
1493 	return ret;
1494 }
1495