xref: /dpdk/examples/l3fwd/main.c (revision 59f3a8acbcdbafeebe816a26d76dfb06e6450f31)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2021 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/types.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 #include <stdarg.h>
13 #include <errno.h>
14 #include <getopt.h>
15 #include <signal.h>
16 #include <stdbool.h>
17 
18 #include <rte_common.h>
19 #include <rte_vect.h>
20 #include <rte_byteorder.h>
21 #include <rte_log.h>
22 #include <rte_malloc.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_eal.h>
26 #include <rte_launch.h>
27 #include <rte_cycles.h>
28 #include <rte_prefetch.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_interrupts.h>
33 #include <rte_random.h>
34 #include <rte_debug.h>
35 #include <rte_ether.h>
36 #include <rte_mempool.h>
37 #include <rte_mbuf.h>
38 #include <rte_ip.h>
39 #include <rte_tcp.h>
40 #include <rte_udp.h>
41 #include <rte_string_fns.h>
42 #include <rte_cpuflags.h>
43 
44 #include <cmdline_parse.h>
45 #include <cmdline_parse_etheraddr.h>
46 
47 #include "l3fwd.h"
48 #include "l3fwd_event.h"
49 #include "l3fwd_route.h"
50 
51 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_LCORE
52 #define MAX_RX_QUEUE_PER_PORT 128
53 
54 #define MAX_LCORE_PARAMS 1024
55 
56 /* Static global variables used within this file. */
57 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
58 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
59 
60 /**< Ports set in promiscuous mode off by default. */
61 static int promiscuous_on;
62 
63 /* Select Longest-Prefix, Exact match or Forwarding Information Base. */
64 enum L3FWD_LOOKUP_MODE {
65 	L3FWD_LOOKUP_DEFAULT,
66 	L3FWD_LOOKUP_LPM,
67 	L3FWD_LOOKUP_EM,
68 	L3FWD_LOOKUP_FIB
69 };
70 static enum L3FWD_LOOKUP_MODE lookup_mode;
71 
72 /* Global variables. */
73 
74 static int numa_on = 1; /**< NUMA is enabled by default. */
75 static int parse_ptype; /**< Parse packet type using rx callback, and */
76 			/**< disabled by default */
77 static int per_port_pool; /**< Use separate buffer pools per port; disabled */
78 			  /**< by default */
79 
80 volatile bool force_quit;
81 
82 /* ethernet addresses of ports */
83 uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
84 struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
85 
86 xmm_t val_eth[RTE_MAX_ETHPORTS];
87 
88 /* mask of enabled ports */
89 uint32_t enabled_port_mask;
90 
91 /* Used only in exact match mode. */
92 int ipv6; /**< ipv6 is false by default. */
93 uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
94 
95 struct lcore_conf lcore_conf[RTE_MAX_LCORE];
96 
97 struct lcore_params {
98 	uint16_t port_id;
99 	uint8_t queue_id;
100 	uint8_t lcore_id;
101 } __rte_cache_aligned;
102 
103 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
104 static struct lcore_params lcore_params_array_default[] = {
105 	{0, 0, 2},
106 	{0, 1, 2},
107 	{0, 2, 2},
108 	{1, 0, 2},
109 	{1, 1, 2},
110 	{1, 2, 2},
111 	{2, 0, 2},
112 	{3, 0, 3},
113 	{3, 1, 3},
114 };
115 
116 static struct lcore_params * lcore_params = lcore_params_array_default;
117 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
118 				sizeof(lcore_params_array_default[0]);
119 
120 static struct rte_eth_conf port_conf = {
121 	.rxmode = {
122 		.mq_mode = ETH_MQ_RX_RSS,
123 		.split_hdr_size = 0,
124 		.offloads = DEV_RX_OFFLOAD_CHECKSUM,
125 	},
126 	.rx_adv_conf = {
127 		.rss_conf = {
128 			.rss_key = NULL,
129 			.rss_hf = ETH_RSS_IP,
130 		},
131 	},
132 	.txmode = {
133 		.mq_mode = ETH_MQ_TX_NONE,
134 	},
135 };
136 
137 static uint32_t max_pkt_len;
138 
139 static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
140 static uint8_t lkp_per_socket[NB_SOCKETS];
141 
142 struct l3fwd_lkp_mode {
143 	void  (*setup)(int);
144 	int   (*check_ptype)(int);
145 	rte_rx_callback_fn cb_parse_ptype;
146 	int   (*main_loop)(void *);
147 	void* (*get_ipv4_lookup_struct)(int);
148 	void* (*get_ipv6_lookup_struct)(int);
149 };
150 
151 static struct l3fwd_lkp_mode l3fwd_lkp;
152 
153 static struct l3fwd_lkp_mode l3fwd_em_lkp = {
154 	.setup                  = setup_hash,
155 	.check_ptype		= em_check_ptype,
156 	.cb_parse_ptype		= em_cb_parse_ptype,
157 	.main_loop              = em_main_loop,
158 	.get_ipv4_lookup_struct = em_get_ipv4_l3fwd_lookup_struct,
159 	.get_ipv6_lookup_struct = em_get_ipv6_l3fwd_lookup_struct,
160 };
161 
162 static struct l3fwd_lkp_mode l3fwd_lpm_lkp = {
163 	.setup                  = setup_lpm,
164 	.check_ptype		= lpm_check_ptype,
165 	.cb_parse_ptype		= lpm_cb_parse_ptype,
166 	.main_loop              = lpm_main_loop,
167 	.get_ipv4_lookup_struct = lpm_get_ipv4_l3fwd_lookup_struct,
168 	.get_ipv6_lookup_struct = lpm_get_ipv6_l3fwd_lookup_struct,
169 };
170 
171 static struct l3fwd_lkp_mode l3fwd_fib_lkp = {
172 	.setup                  = setup_fib,
173 	.check_ptype            = lpm_check_ptype,
174 	.cb_parse_ptype         = lpm_cb_parse_ptype,
175 	.main_loop              = fib_main_loop,
176 	.get_ipv4_lookup_struct = fib_get_ipv4_l3fwd_lookup_struct,
177 	.get_ipv6_lookup_struct = fib_get_ipv6_l3fwd_lookup_struct,
178 };
179 
180 /*
181  * 198.18.0.0/16 are set aside for RFC2544 benchmarking (RFC5735).
182  * 198.18.{0-7}.0/24 = Port {0-7}
183  */
184 const struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
185 	{RTE_IPV4(198, 18, 0, 0), 24, 0},
186 	{RTE_IPV4(198, 18, 1, 0), 24, 1},
187 	{RTE_IPV4(198, 18, 2, 0), 24, 2},
188 	{RTE_IPV4(198, 18, 3, 0), 24, 3},
189 	{RTE_IPV4(198, 18, 4, 0), 24, 4},
190 	{RTE_IPV4(198, 18, 5, 0), 24, 5},
191 	{RTE_IPV4(198, 18, 6, 0), 24, 6},
192 	{RTE_IPV4(198, 18, 7, 0), 24, 7},
193 };
194 
195 /*
196  * 2001:200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180).
197  * 2001:200:0:{0-7}::/64 = Port {0-7}
198  */
199 const struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
200 	{{32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 0},
201 	{{32, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 1},
202 	{{32, 1, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 2},
203 	{{32, 1, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 3},
204 	{{32, 1, 2, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 4},
205 	{{32, 1, 2, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 5},
206 	{{32, 1, 2, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 6},
207 	{{32, 1, 2, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 7},
208 };
209 
210 /*
211  * Setup lookup methods for forwarding.
212  * Currently exact-match, longest-prefix-match and forwarding information
213  * base are the supported ones.
214  */
215 static void
216 setup_l3fwd_lookup_tables(void)
217 {
218 	/* Setup HASH lookup functions. */
219 	if (lookup_mode == L3FWD_LOOKUP_EM)
220 		l3fwd_lkp = l3fwd_em_lkp;
221 	/* Setup FIB lookup functions. */
222 	else if (lookup_mode == L3FWD_LOOKUP_FIB)
223 		l3fwd_lkp = l3fwd_fib_lkp;
224 	/* Setup LPM lookup functions. */
225 	else
226 		l3fwd_lkp = l3fwd_lpm_lkp;
227 }
228 
229 static int
230 check_lcore_params(void)
231 {
232 	uint8_t queue, lcore;
233 	uint16_t i;
234 	int socketid;
235 
236 	for (i = 0; i < nb_lcore_params; ++i) {
237 		queue = lcore_params[i].queue_id;
238 		if (queue >= MAX_RX_QUEUE_PER_PORT) {
239 			printf("invalid queue number: %hhu\n", queue);
240 			return -1;
241 		}
242 		lcore = lcore_params[i].lcore_id;
243 		if (!rte_lcore_is_enabled(lcore)) {
244 			printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
245 			return -1;
246 		}
247 		if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
248 			(numa_on == 0)) {
249 			printf("warning: lcore %hhu is on socket %d with numa off \n",
250 				lcore, socketid);
251 		}
252 	}
253 	return 0;
254 }
255 
256 static int
257 check_port_config(void)
258 {
259 	uint16_t portid;
260 	uint16_t i;
261 
262 	for (i = 0; i < nb_lcore_params; ++i) {
263 		portid = lcore_params[i].port_id;
264 		if ((enabled_port_mask & (1 << portid)) == 0) {
265 			printf("port %u is not enabled in port mask\n", portid);
266 			return -1;
267 		}
268 		if (!rte_eth_dev_is_valid_port(portid)) {
269 			printf("port %u is not present on the board\n", portid);
270 			return -1;
271 		}
272 	}
273 	return 0;
274 }
275 
276 static uint8_t
277 get_port_n_rx_queues(const uint16_t port)
278 {
279 	int queue = -1;
280 	uint16_t i;
281 
282 	for (i = 0; i < nb_lcore_params; ++i) {
283 		if (lcore_params[i].port_id == port) {
284 			if (lcore_params[i].queue_id == queue+1)
285 				queue = lcore_params[i].queue_id;
286 			else
287 				rte_exit(EXIT_FAILURE, "queue ids of the port %d must be"
288 						" in sequence and must start with 0\n",
289 						lcore_params[i].port_id);
290 		}
291 	}
292 	return (uint8_t)(++queue);
293 }
294 
295 static int
296 init_lcore_rx_queues(void)
297 {
298 	uint16_t i, nb_rx_queue;
299 	uint8_t lcore;
300 
301 	for (i = 0; i < nb_lcore_params; ++i) {
302 		lcore = lcore_params[i].lcore_id;
303 		nb_rx_queue = lcore_conf[lcore].n_rx_queue;
304 		if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
305 			printf("error: too many queues (%u) for lcore: %u\n",
306 				(unsigned)nb_rx_queue + 1, (unsigned)lcore);
307 			return -1;
308 		} else {
309 			lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
310 				lcore_params[i].port_id;
311 			lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
312 				lcore_params[i].queue_id;
313 			lcore_conf[lcore].n_rx_queue++;
314 		}
315 	}
316 	return 0;
317 }
318 
319 /* display usage */
320 static void
321 print_usage(const char *prgname)
322 {
323 	fprintf(stderr, "%s [EAL options] --"
324 		" -p PORTMASK"
325 		" [-P]"
326 		" [--lookup]"
327 		" --config (port,queue,lcore)[,(port,queue,lcore)]"
328 		" [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
329 		" [--max-pkt-len PKTLEN]"
330 		" [--no-numa]"
331 		" [--hash-entry-num]"
332 		" [--ipv6]"
333 		" [--parse-ptype]"
334 		" [--per-port-pool]"
335 		" [--mode]"
336 		" [--eventq-sched]"
337 		" [-E]"
338 		" [-L]\n\n"
339 
340 		"  -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
341 		"  -P : Enable promiscuous mode\n"
342 		"  --lookup: Select the lookup method\n"
343 		"            Default: lpm\n"
344 		"            Accepted: em (Exact Match), lpm (Longest Prefix Match), fib (Forwarding Information Base)\n"
345 		"  --config (port,queue,lcore): Rx queue configuration\n"
346 		"  --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n"
347 		"  --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
348 		"  --no-numa: Disable numa awareness\n"
349 		"  --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n"
350 		"  --ipv6: Set if running ipv6 packets\n"
351 		"  --parse-ptype: Set to use software to analyze packet type\n"
352 		"  --per-port-pool: Use separate buffer pool per port\n"
353 		"  --mode: Packet transfer mode for I/O, poll or eventdev\n"
354 		"          Default mode = poll\n"
355 		"  --eventq-sched: Event queue synchronization method\n"
356 		"                  ordered, atomic or parallel.\n"
357 		"                  Default: atomic\n"
358 		"                  Valid only if --mode=eventdev\n"
359 		"  --event-eth-rxqs: Number of ethernet RX queues per device.\n"
360 		"                    Default: 1\n"
361 		"                    Valid only if --mode=eventdev\n"
362 		"  -E : Enable exact match, legacy flag please use --lookup=em instead\n"
363 		"  -L : Enable longest prefix match, legacy flag please use --lookup=lpm instead\n\n",
364 		prgname);
365 }
366 
367 static int
368 parse_max_pkt_len(const char *pktlen)
369 {
370 	char *end = NULL;
371 	unsigned long len;
372 
373 	/* parse decimal string */
374 	len = strtoul(pktlen, &end, 10);
375 	if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
376 		return -1;
377 
378 	if (len == 0)
379 		return -1;
380 
381 	return len;
382 }
383 
384 static int
385 parse_portmask(const char *portmask)
386 {
387 	char *end = NULL;
388 	unsigned long pm;
389 
390 	/* parse hexadecimal string */
391 	pm = strtoul(portmask, &end, 16);
392 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
393 		return 0;
394 
395 	return pm;
396 }
397 
398 static int
399 parse_hash_entry_number(const char *hash_entry_num)
400 {
401 	char *end = NULL;
402 	unsigned long hash_en;
403 	/* parse hexadecimal string */
404 	hash_en = strtoul(hash_entry_num, &end, 16);
405 	if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
406 		return -1;
407 
408 	if (hash_en == 0)
409 		return -1;
410 
411 	return hash_en;
412 }
413 
414 static int
415 parse_config(const char *q_arg)
416 {
417 	char s[256];
418 	const char *p, *p0 = q_arg;
419 	char *end;
420 	enum fieldnames {
421 		FLD_PORT = 0,
422 		FLD_QUEUE,
423 		FLD_LCORE,
424 		_NUM_FLD
425 	};
426 	unsigned long int_fld[_NUM_FLD];
427 	char *str_fld[_NUM_FLD];
428 	int i;
429 	unsigned size;
430 
431 	nb_lcore_params = 0;
432 
433 	while ((p = strchr(p0,'(')) != NULL) {
434 		++p;
435 		if((p0 = strchr(p,')')) == NULL)
436 			return -1;
437 
438 		size = p0 - p;
439 		if(size >= sizeof(s))
440 			return -1;
441 
442 		snprintf(s, sizeof(s), "%.*s", size, p);
443 		if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
444 			return -1;
445 		for (i = 0; i < _NUM_FLD; i++){
446 			errno = 0;
447 			int_fld[i] = strtoul(str_fld[i], &end, 0);
448 			if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
449 				return -1;
450 		}
451 		if (nb_lcore_params >= MAX_LCORE_PARAMS) {
452 			printf("exceeded max number of lcore params: %hu\n",
453 				nb_lcore_params);
454 			return -1;
455 		}
456 		lcore_params_array[nb_lcore_params].port_id =
457 			(uint8_t)int_fld[FLD_PORT];
458 		lcore_params_array[nb_lcore_params].queue_id =
459 			(uint8_t)int_fld[FLD_QUEUE];
460 		lcore_params_array[nb_lcore_params].lcore_id =
461 			(uint8_t)int_fld[FLD_LCORE];
462 		++nb_lcore_params;
463 	}
464 	lcore_params = lcore_params_array;
465 	return 0;
466 }
467 
468 static void
469 parse_eth_dest(const char *optarg)
470 {
471 	uint16_t portid;
472 	char *port_end;
473 	uint8_t c, *dest, peer_addr[6];
474 
475 	errno = 0;
476 	portid = strtoul(optarg, &port_end, 10);
477 	if (errno != 0 || port_end == optarg || *port_end++ != ',')
478 		rte_exit(EXIT_FAILURE,
479 		"Invalid eth-dest: %s", optarg);
480 	if (portid >= RTE_MAX_ETHPORTS)
481 		rte_exit(EXIT_FAILURE,
482 		"eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
483 		portid, RTE_MAX_ETHPORTS);
484 
485 	if (cmdline_parse_etheraddr(NULL, port_end,
486 		&peer_addr, sizeof(peer_addr)) < 0)
487 		rte_exit(EXIT_FAILURE,
488 		"Invalid ethernet address: %s\n",
489 		port_end);
490 	dest = (uint8_t *)&dest_eth_addr[portid];
491 	for (c = 0; c < 6; c++)
492 		dest[c] = peer_addr[c];
493 	*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
494 }
495 
496 static void
497 parse_mode(const char *optarg)
498 {
499 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
500 
501 	if (!strcmp(optarg, "poll"))
502 		evt_rsrc->enabled = false;
503 	else if (!strcmp(optarg, "eventdev"))
504 		evt_rsrc->enabled = true;
505 }
506 
507 static void
508 parse_eventq_sched(const char *optarg)
509 {
510 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
511 
512 	if (!strcmp(optarg, "ordered"))
513 		evt_rsrc->sched_type = RTE_SCHED_TYPE_ORDERED;
514 	if (!strcmp(optarg, "atomic"))
515 		evt_rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
516 	if (!strcmp(optarg, "parallel"))
517 		evt_rsrc->sched_type = RTE_SCHED_TYPE_PARALLEL;
518 }
519 
520 static void
521 parse_event_eth_rx_queues(const char *eth_rx_queues)
522 {
523 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
524 	char *end = NULL;
525 	uint8_t num_eth_rx_queues;
526 
527 	/* parse decimal string */
528 	num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
529 	if ((eth_rx_queues[0] == '\0') || (end == NULL) || (*end != '\0'))
530 		return;
531 
532 	if (num_eth_rx_queues == 0)
533 		return;
534 
535 	evt_rsrc->eth_rx_queues = num_eth_rx_queues;
536 }
537 
538 static int
539 parse_lookup(const char *optarg)
540 {
541 	if (!strcmp(optarg, "em"))
542 		lookup_mode = L3FWD_LOOKUP_EM;
543 	else if (!strcmp(optarg, "lpm"))
544 		lookup_mode = L3FWD_LOOKUP_LPM;
545 	else if (!strcmp(optarg, "fib"))
546 		lookup_mode = L3FWD_LOOKUP_FIB;
547 	else {
548 		fprintf(stderr, "Invalid lookup option! Accepted options: em, lpm, fib\n");
549 		return -1;
550 	}
551 	return 0;
552 }
553 
554 #define MAX_JUMBO_PKT_LEN  9600
555 
556 static const char short_options[] =
557 	"p:"  /* portmask */
558 	"P"   /* promiscuous */
559 	"L"   /* legacy enable long prefix match */
560 	"E"   /* legacy enable exact match */
561 	;
562 
563 #define CMD_LINE_OPT_CONFIG "config"
564 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
565 #define CMD_LINE_OPT_NO_NUMA "no-numa"
566 #define CMD_LINE_OPT_IPV6 "ipv6"
567 #define CMD_LINE_OPT_MAX_PKT_LEN "max-pkt-len"
568 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
569 #define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
570 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
571 #define CMD_LINE_OPT_MODE "mode"
572 #define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sched"
573 #define CMD_LINE_OPT_EVENT_ETH_RX_QUEUES "event-eth-rxqs"
574 #define CMD_LINE_OPT_LOOKUP "lookup"
575 enum {
576 	/* long options mapped to a short option */
577 
578 	/* first long only option value must be >= 256, so that we won't
579 	 * conflict with short options */
580 	CMD_LINE_OPT_MIN_NUM = 256,
581 	CMD_LINE_OPT_CONFIG_NUM,
582 	CMD_LINE_OPT_ETH_DEST_NUM,
583 	CMD_LINE_OPT_NO_NUMA_NUM,
584 	CMD_LINE_OPT_IPV6_NUM,
585 	CMD_LINE_OPT_MAX_PKT_LEN_NUM,
586 	CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
587 	CMD_LINE_OPT_PARSE_PTYPE_NUM,
588 	CMD_LINE_OPT_PARSE_PER_PORT_POOL,
589 	CMD_LINE_OPT_MODE_NUM,
590 	CMD_LINE_OPT_EVENTQ_SYNC_NUM,
591 	CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM,
592 	CMD_LINE_OPT_LOOKUP_NUM,
593 };
594 
595 static const struct option lgopts[] = {
596 	{CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
597 	{CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
598 	{CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
599 	{CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
600 	{CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, CMD_LINE_OPT_MAX_PKT_LEN_NUM},
601 	{CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
602 	{CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
603 	{CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
604 	{CMD_LINE_OPT_MODE, 1, 0, CMD_LINE_OPT_MODE_NUM},
605 	{CMD_LINE_OPT_EVENTQ_SYNC, 1, 0, CMD_LINE_OPT_EVENTQ_SYNC_NUM},
606 	{CMD_LINE_OPT_EVENT_ETH_RX_QUEUES, 1, 0,
607 					CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM},
608 	{CMD_LINE_OPT_LOOKUP, 1, 0, CMD_LINE_OPT_LOOKUP_NUM},
609 	{NULL, 0, 0, 0}
610 };
611 
612 /*
613  * This expression is used to calculate the number of mbufs needed
614  * depending on user input, taking  into account memory for rx and
615  * tx hardware rings, cache per lcore and mtable per port per lcore.
616  * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum
617  * value of 8192
618  */
619 #define NB_MBUF(nports) RTE_MAX(	\
620 	(nports*nb_rx_queue*nb_rxd +		\
621 	nports*nb_lcores*MAX_PKT_BURST +	\
622 	nports*n_tx_queue*nb_txd +		\
623 	nb_lcores*MEMPOOL_CACHE_SIZE),		\
624 	(unsigned)8192)
625 
626 /* Parse the argument given in the command line of the application */
627 static int
628 parse_args(int argc, char **argv)
629 {
630 	int opt, ret;
631 	char **argvopt;
632 	int option_index;
633 	char *prgname = argv[0];
634 	uint8_t lcore_params = 0;
635 	uint8_t eventq_sched = 0;
636 	uint8_t eth_rx_q = 0;
637 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
638 
639 	argvopt = argv;
640 
641 	/* Error or normal output strings. */
642 	while ((opt = getopt_long(argc, argvopt, short_options,
643 				lgopts, &option_index)) != EOF) {
644 
645 		switch (opt) {
646 		/* portmask */
647 		case 'p':
648 			enabled_port_mask = parse_portmask(optarg);
649 			if (enabled_port_mask == 0) {
650 				fprintf(stderr, "Invalid portmask\n");
651 				print_usage(prgname);
652 				return -1;
653 			}
654 			break;
655 
656 		case 'P':
657 			promiscuous_on = 1;
658 			break;
659 
660 		case 'E':
661 			if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
662 				fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
663 				return -1;
664 			}
665 			lookup_mode = L3FWD_LOOKUP_EM;
666 			break;
667 
668 		case 'L':
669 			if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
670 				fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
671 				return -1;
672 			}
673 			lookup_mode = L3FWD_LOOKUP_LPM;
674 			break;
675 
676 		/* long options */
677 		case CMD_LINE_OPT_CONFIG_NUM:
678 			ret = parse_config(optarg);
679 			if (ret) {
680 				fprintf(stderr, "Invalid config\n");
681 				print_usage(prgname);
682 				return -1;
683 			}
684 			lcore_params = 1;
685 			break;
686 
687 		case CMD_LINE_OPT_ETH_DEST_NUM:
688 			parse_eth_dest(optarg);
689 			break;
690 
691 		case CMD_LINE_OPT_NO_NUMA_NUM:
692 			numa_on = 0;
693 			break;
694 
695 		case CMD_LINE_OPT_IPV6_NUM:
696 			ipv6 = 1;
697 			break;
698 
699 		case CMD_LINE_OPT_MAX_PKT_LEN_NUM:
700 			max_pkt_len = parse_max_pkt_len(optarg);
701 			break;
702 
703 		case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
704 			ret = parse_hash_entry_number(optarg);
705 			if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
706 				hash_entry_number = ret;
707 			} else {
708 				fprintf(stderr, "invalid hash entry number\n");
709 				print_usage(prgname);
710 				return -1;
711 			}
712 			break;
713 
714 		case CMD_LINE_OPT_PARSE_PTYPE_NUM:
715 			printf("soft parse-ptype is enabled\n");
716 			parse_ptype = 1;
717 			break;
718 
719 		case CMD_LINE_OPT_PARSE_PER_PORT_POOL:
720 			printf("per port buffer pool is enabled\n");
721 			per_port_pool = 1;
722 			break;
723 
724 		case CMD_LINE_OPT_MODE_NUM:
725 			parse_mode(optarg);
726 			break;
727 
728 		case CMD_LINE_OPT_EVENTQ_SYNC_NUM:
729 			parse_eventq_sched(optarg);
730 			eventq_sched = 1;
731 			break;
732 
733 		case CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM:
734 			parse_event_eth_rx_queues(optarg);
735 			eth_rx_q = 1;
736 			break;
737 
738 		case CMD_LINE_OPT_LOOKUP_NUM:
739 			if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
740 				fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
741 				return -1;
742 			}
743 			ret = parse_lookup(optarg);
744 			/*
745 			 * If parse_lookup was passed an invalid lookup type
746 			 * then return -1. Error log included within
747 			 * parse_lookup for simplicity.
748 			 */
749 			if (ret)
750 				return -1;
751 			break;
752 
753 		default:
754 			print_usage(prgname);
755 			return -1;
756 		}
757 	}
758 
759 	if (evt_rsrc->enabled && lcore_params) {
760 		fprintf(stderr, "lcore config is not valid when event mode is selected\n");
761 		return -1;
762 	}
763 
764 	if (!evt_rsrc->enabled && eth_rx_q) {
765 		fprintf(stderr, "eth_rx_queues is valid only when event mode is selected\n");
766 		return -1;
767 	}
768 
769 	if (!evt_rsrc->enabled && eventq_sched) {
770 		fprintf(stderr, "eventq_sched is valid only when event mode is selected\n");
771 		return -1;
772 	}
773 
774 	/*
775 	 * Nothing is selected, pick longest-prefix match
776 	 * as default match.
777 	 */
778 	if (lookup_mode == L3FWD_LOOKUP_DEFAULT) {
779 		fprintf(stderr, "Neither LPM, EM, or FIB selected, defaulting to LPM\n");
780 		lookup_mode = L3FWD_LOOKUP_LPM;
781 	}
782 
783 	/*
784 	 * ipv6 and hash flags are valid only for
785 	 * exact match, reset them to default for
786 	 * longest-prefix match.
787 	 */
788 	if (lookup_mode == L3FWD_LOOKUP_LPM) {
789 		ipv6 = 0;
790 		hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
791 	}
792 
793 	if (optind >= 0)
794 		argv[optind-1] = prgname;
795 
796 	ret = optind-1;
797 	optind = 1; /* reset getopt lib */
798 	return ret;
799 }
800 
801 static void
802 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
803 {
804 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
805 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
806 	printf("%s%s", name, buf);
807 }
808 
809 int
810 init_mem(uint16_t portid, unsigned int nb_mbuf)
811 {
812 	struct lcore_conf *qconf;
813 	int socketid;
814 	unsigned lcore_id;
815 	char s[64];
816 
817 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
818 		if (rte_lcore_is_enabled(lcore_id) == 0)
819 			continue;
820 
821 		if (numa_on)
822 			socketid = rte_lcore_to_socket_id(lcore_id);
823 		else
824 			socketid = 0;
825 
826 		if (socketid >= NB_SOCKETS) {
827 			rte_exit(EXIT_FAILURE,
828 				"Socket %d of lcore %u is out of range %d\n",
829 				socketid, lcore_id, NB_SOCKETS);
830 		}
831 
832 		if (pktmbuf_pool[portid][socketid] == NULL) {
833 			snprintf(s, sizeof(s), "mbuf_pool_%d:%d",
834 				 portid, socketid);
835 			pktmbuf_pool[portid][socketid] =
836 				rte_pktmbuf_pool_create(s, nb_mbuf,
837 					MEMPOOL_CACHE_SIZE, 0,
838 					RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
839 			if (pktmbuf_pool[portid][socketid] == NULL)
840 				rte_exit(EXIT_FAILURE,
841 					"Cannot init mbuf pool on socket %d\n",
842 					socketid);
843 			else
844 				printf("Allocated mbuf pool on socket %d\n",
845 					socketid);
846 
847 			/* Setup LPM, EM(f.e Hash) or FIB. But, only once per
848 			 * available socket.
849 			 */
850 			if (!lkp_per_socket[socketid]) {
851 				l3fwd_lkp.setup(socketid);
852 				lkp_per_socket[socketid] = 1;
853 			}
854 		}
855 		qconf = &lcore_conf[lcore_id];
856 		qconf->ipv4_lookup_struct =
857 			l3fwd_lkp.get_ipv4_lookup_struct(socketid);
858 		qconf->ipv6_lookup_struct =
859 			l3fwd_lkp.get_ipv6_lookup_struct(socketid);
860 	}
861 	return 0;
862 }
863 
864 /* Check the link status of all ports in up to 9s, and print them finally */
865 static void
866 check_all_ports_link_status(uint32_t port_mask)
867 {
868 #define CHECK_INTERVAL 100 /* 100ms */
869 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
870 	uint16_t portid;
871 	uint8_t count, all_ports_up, print_flag = 0;
872 	struct rte_eth_link link;
873 	int ret;
874 	char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
875 
876 	printf("\nChecking link status");
877 	fflush(stdout);
878 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
879 		if (force_quit)
880 			return;
881 		all_ports_up = 1;
882 		RTE_ETH_FOREACH_DEV(portid) {
883 			if (force_quit)
884 				return;
885 			if ((port_mask & (1 << portid)) == 0)
886 				continue;
887 			memset(&link, 0, sizeof(link));
888 			ret = rte_eth_link_get_nowait(portid, &link);
889 			if (ret < 0) {
890 				all_ports_up = 0;
891 				if (print_flag == 1)
892 					printf("Port %u link get failed: %s\n",
893 						portid, rte_strerror(-ret));
894 				continue;
895 			}
896 			/* print link status if flag set */
897 			if (print_flag == 1) {
898 				rte_eth_link_to_str(link_status_text,
899 					sizeof(link_status_text), &link);
900 				printf("Port %d %s\n", portid,
901 				       link_status_text);
902 				continue;
903 			}
904 			/* clear all_ports_up flag if any link down */
905 			if (link.link_status == ETH_LINK_DOWN) {
906 				all_ports_up = 0;
907 				break;
908 			}
909 		}
910 		/* after finally printing all link status, get out */
911 		if (print_flag == 1)
912 			break;
913 
914 		if (all_ports_up == 0) {
915 			printf(".");
916 			fflush(stdout);
917 			rte_delay_ms(CHECK_INTERVAL);
918 		}
919 
920 		/* set the print_flag if all ports up or timeout */
921 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
922 			print_flag = 1;
923 			printf("done\n");
924 		}
925 	}
926 }
927 
928 static void
929 signal_handler(int signum)
930 {
931 	if (signum == SIGINT || signum == SIGTERM) {
932 		printf("\n\nSignal %d received, preparing to exit...\n",
933 				signum);
934 		force_quit = true;
935 	}
936 }
937 
938 static int
939 prepare_ptype_parser(uint16_t portid, uint16_t queueid)
940 {
941 	if (parse_ptype) {
942 		printf("Port %d: softly parse packet type info\n", portid);
943 		if (rte_eth_add_rx_callback(portid, queueid,
944 					    l3fwd_lkp.cb_parse_ptype,
945 					    NULL))
946 			return 1;
947 
948 		printf("Failed to add rx callback: port=%d\n", portid);
949 		return 0;
950 	}
951 
952 	if (l3fwd_lkp.check_ptype(portid))
953 		return 1;
954 
955 	printf("port %d cannot parse packet type, please add --%s\n",
956 	       portid, CMD_LINE_OPT_PARSE_PTYPE);
957 	return 0;
958 }
959 
960 static uint32_t
961 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
962 {
963 	uint32_t overhead_len;
964 
965 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
966 		overhead_len = max_rx_pktlen - max_mtu;
967 	else
968 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
969 
970 	return overhead_len;
971 }
972 
973 static int
974 config_port_max_pkt_len(struct rte_eth_conf *conf,
975 		struct rte_eth_dev_info *dev_info)
976 {
977 	uint32_t overhead_len;
978 
979 	if (max_pkt_len == 0)
980 		return 0;
981 
982 	if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
983 		return -1;
984 
985 	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
986 			dev_info->max_mtu);
987 	conf->rxmode.mtu = max_pkt_len - overhead_len;
988 
989 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
990 		conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
991 
992 	return 0;
993 }
994 
995 static void
996 l3fwd_poll_resource_setup(void)
997 {
998 	uint8_t nb_rx_queue, queue, socketid;
999 	struct rte_eth_dev_info dev_info;
1000 	uint32_t n_tx_queue, nb_lcores;
1001 	struct rte_eth_txconf *txconf;
1002 	struct lcore_conf *qconf;
1003 	uint16_t queueid, portid;
1004 	unsigned int nb_ports;
1005 	unsigned int lcore_id;
1006 	int ret;
1007 
1008 	if (check_lcore_params() < 0)
1009 		rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
1010 
1011 	ret = init_lcore_rx_queues();
1012 	if (ret < 0)
1013 		rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1014 
1015 	nb_ports = rte_eth_dev_count_avail();
1016 
1017 	if (check_port_config() < 0)
1018 		rte_exit(EXIT_FAILURE, "check_port_config failed\n");
1019 
1020 	nb_lcores = rte_lcore_count();
1021 
1022 	/* initialize all ports */
1023 	RTE_ETH_FOREACH_DEV(portid) {
1024 		struct rte_eth_conf local_port_conf = port_conf;
1025 
1026 		/* skip ports that are not enabled */
1027 		if ((enabled_port_mask & (1 << portid)) == 0) {
1028 			printf("\nSkipping disabled port %d\n", portid);
1029 			continue;
1030 		}
1031 
1032 		/* init port */
1033 		printf("Initializing port %d ... ", portid );
1034 		fflush(stdout);
1035 
1036 		nb_rx_queue = get_port_n_rx_queues(portid);
1037 		n_tx_queue = nb_lcores;
1038 		if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1039 			n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1040 		printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1041 			nb_rx_queue, (unsigned)n_tx_queue );
1042 
1043 		ret = rte_eth_dev_info_get(portid, &dev_info);
1044 		if (ret != 0)
1045 			rte_exit(EXIT_FAILURE,
1046 				"Error during getting device (port %u) info: %s\n",
1047 				portid, strerror(-ret));
1048 
1049 		ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
1050 		if (ret != 0)
1051 			rte_exit(EXIT_FAILURE,
1052 				"Invalid max packet length: %u (port %u)\n",
1053 				max_pkt_len, portid);
1054 
1055 		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1056 			local_port_conf.txmode.offloads |=
1057 				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1058 
1059 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1060 			dev_info.flow_type_rss_offloads;
1061 
1062 		if (dev_info.max_rx_queues == 1)
1063 			local_port_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1064 
1065 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1066 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
1067 			printf("Port %u modified RSS hash function based on hardware support,"
1068 				"requested:%#"PRIx64" configured:%#"PRIx64"\n",
1069 				portid,
1070 				port_conf.rx_adv_conf.rss_conf.rss_hf,
1071 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1072 		}
1073 
1074 		ret = rte_eth_dev_configure(portid, nb_rx_queue,
1075 					(uint16_t)n_tx_queue, &local_port_conf);
1076 		if (ret < 0)
1077 			rte_exit(EXIT_FAILURE,
1078 				"Cannot configure device: err=%d, port=%d\n",
1079 				ret, portid);
1080 
1081 		ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
1082 						       &nb_txd);
1083 		if (ret < 0)
1084 			rte_exit(EXIT_FAILURE,
1085 				 "Cannot adjust number of descriptors: err=%d, "
1086 				 "port=%d\n", ret, portid);
1087 
1088 		ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1089 		if (ret < 0)
1090 			rte_exit(EXIT_FAILURE,
1091 				 "Cannot get MAC address: err=%d, port=%d\n",
1092 				 ret, portid);
1093 
1094 		print_ethaddr(" Address:", &ports_eth_addr[portid]);
1095 		printf(", ");
1096 		print_ethaddr("Destination:",
1097 			(const struct rte_ether_addr *)&dest_eth_addr[portid]);
1098 		printf(", ");
1099 
1100 		/*
1101 		 * prepare src MACs for each port.
1102 		 */
1103 		rte_ether_addr_copy(&ports_eth_addr[portid],
1104 			(struct rte_ether_addr *)(val_eth + portid) + 1);
1105 
1106 		/* init memory */
1107 		if (!per_port_pool) {
1108 			/* portid = 0; this is *not* signifying the first port,
1109 			 * rather, it signifies that portid is ignored.
1110 			 */
1111 			ret = init_mem(0, NB_MBUF(nb_ports));
1112 		} else {
1113 			ret = init_mem(portid, NB_MBUF(1));
1114 		}
1115 		if (ret < 0)
1116 			rte_exit(EXIT_FAILURE, "init_mem failed\n");
1117 
1118 		/* init one TX queue per couple (lcore,port) */
1119 		queueid = 0;
1120 		for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1121 			if (rte_lcore_is_enabled(lcore_id) == 0)
1122 				continue;
1123 
1124 			if (numa_on)
1125 				socketid =
1126 				(uint8_t)rte_lcore_to_socket_id(lcore_id);
1127 			else
1128 				socketid = 0;
1129 
1130 			printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1131 			fflush(stdout);
1132 
1133 			txconf = &dev_info.default_txconf;
1134 			txconf->offloads = local_port_conf.txmode.offloads;
1135 			ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1136 						     socketid, txconf);
1137 			if (ret < 0)
1138 				rte_exit(EXIT_FAILURE,
1139 					"rte_eth_tx_queue_setup: err=%d, "
1140 					"port=%d\n", ret, portid);
1141 
1142 			qconf = &lcore_conf[lcore_id];
1143 			qconf->tx_queue_id[portid] = queueid;
1144 			queueid++;
1145 
1146 			qconf->tx_port_id[qconf->n_tx_port] = portid;
1147 			qconf->n_tx_port++;
1148 		}
1149 		printf("\n");
1150 	}
1151 
1152 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1153 		if (rte_lcore_is_enabled(lcore_id) == 0)
1154 			continue;
1155 		qconf = &lcore_conf[lcore_id];
1156 		printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1157 		fflush(stdout);
1158 		/* init RX queues */
1159 		for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1160 			struct rte_eth_rxconf rxq_conf;
1161 
1162 			portid = qconf->rx_queue_list[queue].port_id;
1163 			queueid = qconf->rx_queue_list[queue].queue_id;
1164 
1165 			if (numa_on)
1166 				socketid =
1167 				(uint8_t)rte_lcore_to_socket_id(lcore_id);
1168 			else
1169 				socketid = 0;
1170 
1171 			printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1172 			fflush(stdout);
1173 
1174 			ret = rte_eth_dev_info_get(portid, &dev_info);
1175 			if (ret != 0)
1176 				rte_exit(EXIT_FAILURE,
1177 					"Error during getting device (port %u) info: %s\n",
1178 					portid, strerror(-ret));
1179 
1180 			rxq_conf = dev_info.default_rxconf;
1181 			rxq_conf.offloads = port_conf.rxmode.offloads;
1182 			if (!per_port_pool)
1183 				ret = rte_eth_rx_queue_setup(portid, queueid,
1184 						nb_rxd, socketid,
1185 						&rxq_conf,
1186 						pktmbuf_pool[0][socketid]);
1187 			else
1188 				ret = rte_eth_rx_queue_setup(portid, queueid,
1189 						nb_rxd, socketid,
1190 						&rxq_conf,
1191 						pktmbuf_pool[portid][socketid]);
1192 			if (ret < 0)
1193 				rte_exit(EXIT_FAILURE,
1194 				"rte_eth_rx_queue_setup: err=%d, port=%d\n",
1195 				ret, portid);
1196 		}
1197 	}
1198 }
1199 
1200 static inline int
1201 l3fwd_service_enable(uint32_t service_id)
1202 {
1203 	uint8_t min_service_count = UINT8_MAX;
1204 	uint32_t slcore_array[RTE_MAX_LCORE];
1205 	unsigned int slcore = 0;
1206 	uint8_t service_count;
1207 	int32_t slcore_count;
1208 
1209 	if (!rte_service_lcore_count())
1210 		return -ENOENT;
1211 
1212 	slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
1213 	if (slcore_count < 0)
1214 		return -ENOENT;
1215 	/* Get the core which has least number of services running. */
1216 	while (slcore_count--) {
1217 		/* Reset default mapping */
1218 		if (rte_service_map_lcore_set(service_id,
1219 				slcore_array[slcore_count], 0) != 0)
1220 			return -ENOENT;
1221 		service_count = rte_service_lcore_count_services(
1222 				slcore_array[slcore_count]);
1223 		if (service_count < min_service_count) {
1224 			slcore = slcore_array[slcore_count];
1225 			min_service_count = service_count;
1226 		}
1227 	}
1228 	if (rte_service_map_lcore_set(service_id, slcore, 1))
1229 		return -ENOENT;
1230 	rte_service_lcore_start(slcore);
1231 
1232 	return 0;
1233 }
1234 
1235 static void
1236 l3fwd_event_service_setup(void)
1237 {
1238 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
1239 	struct rte_event_dev_info evdev_info;
1240 	uint32_t service_id, caps;
1241 	int ret, i;
1242 
1243 	rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
1244 	if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
1245 		ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
1246 				&service_id);
1247 		if (ret != -ESRCH && ret != 0)
1248 			rte_exit(EXIT_FAILURE,
1249 				 "Error in starting eventdev service\n");
1250 		l3fwd_service_enable(service_id);
1251 	}
1252 
1253 	for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
1254 		ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
1255 				evt_rsrc->rx_adptr.rx_adptr[i], &caps);
1256 		if (ret < 0)
1257 			rte_exit(EXIT_FAILURE,
1258 				 "Failed to get Rx adapter[%d] caps\n",
1259 				 evt_rsrc->rx_adptr.rx_adptr[i]);
1260 		ret = rte_event_eth_rx_adapter_service_id_get(
1261 				evt_rsrc->event_d_id,
1262 				&service_id);
1263 		if (ret != -ESRCH && ret != 0)
1264 			rte_exit(EXIT_FAILURE,
1265 				 "Error in starting Rx adapter[%d] service\n",
1266 				 evt_rsrc->rx_adptr.rx_adptr[i]);
1267 		l3fwd_service_enable(service_id);
1268 	}
1269 
1270 	for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
1271 		ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
1272 				evt_rsrc->tx_adptr.tx_adptr[i], &caps);
1273 		if (ret < 0)
1274 			rte_exit(EXIT_FAILURE,
1275 				 "Failed to get Rx adapter[%d] caps\n",
1276 				 evt_rsrc->tx_adptr.tx_adptr[i]);
1277 		ret = rte_event_eth_tx_adapter_service_id_get(
1278 				evt_rsrc->event_d_id,
1279 				&service_id);
1280 		if (ret != -ESRCH && ret != 0)
1281 			rte_exit(EXIT_FAILURE,
1282 				 "Error in starting Rx adapter[%d] service\n",
1283 				 evt_rsrc->tx_adptr.tx_adptr[i]);
1284 		l3fwd_service_enable(service_id);
1285 	}
1286 }
1287 
1288 int
1289 main(int argc, char **argv)
1290 {
1291 	struct l3fwd_event_resources *evt_rsrc;
1292 	struct lcore_conf *qconf;
1293 	uint16_t queueid, portid;
1294 	unsigned int lcore_id;
1295 	uint8_t queue;
1296 	int i, ret;
1297 
1298 	/* init EAL */
1299 	ret = rte_eal_init(argc, argv);
1300 	if (ret < 0)
1301 		rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1302 	argc -= ret;
1303 	argv += ret;
1304 
1305 	force_quit = false;
1306 	signal(SIGINT, signal_handler);
1307 	signal(SIGTERM, signal_handler);
1308 
1309 	/* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
1310 	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1311 		dest_eth_addr[portid] =
1312 			RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
1313 		*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
1314 	}
1315 
1316 	evt_rsrc = l3fwd_get_eventdev_rsrc();
1317 	/* parse application arguments (after the EAL ones) */
1318 	ret = parse_args(argc, argv);
1319 	if (ret < 0)
1320 		rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
1321 
1322 	/* Setup function pointers for lookup method. */
1323 	setup_l3fwd_lookup_tables();
1324 
1325 	evt_rsrc->per_port_pool = per_port_pool;
1326 	evt_rsrc->pkt_pool = pktmbuf_pool;
1327 	evt_rsrc->port_mask = enabled_port_mask;
1328 	/* Configure eventdev parameters if user has requested */
1329 	if (evt_rsrc->enabled) {
1330 		l3fwd_event_resource_setup(&port_conf);
1331 		if (lookup_mode == L3FWD_LOOKUP_EM)
1332 			l3fwd_lkp.main_loop = evt_rsrc->ops.em_event_loop;
1333 		else if (lookup_mode == L3FWD_LOOKUP_FIB)
1334 			l3fwd_lkp.main_loop = evt_rsrc->ops.fib_event_loop;
1335 		else
1336 			l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;
1337 		l3fwd_event_service_setup();
1338 	} else
1339 		l3fwd_poll_resource_setup();
1340 
1341 	/* start ports */
1342 	RTE_ETH_FOREACH_DEV(portid) {
1343 		if ((enabled_port_mask & (1 << portid)) == 0) {
1344 			continue;
1345 		}
1346 		/* Start device */
1347 		ret = rte_eth_dev_start(portid);
1348 		if (ret < 0)
1349 			rte_exit(EXIT_FAILURE,
1350 				"rte_eth_dev_start: err=%d, port=%d\n",
1351 				ret, portid);
1352 
1353 		/*
1354 		 * If enabled, put device in promiscuous mode.
1355 		 * This allows IO forwarding mode to forward packets
1356 		 * to itself through 2 cross-connected  ports of the
1357 		 * target machine.
1358 		 */
1359 		if (promiscuous_on) {
1360 			ret = rte_eth_promiscuous_enable(portid);
1361 			if (ret != 0)
1362 				rte_exit(EXIT_FAILURE,
1363 					"rte_eth_promiscuous_enable: err=%s, port=%u\n",
1364 					rte_strerror(-ret), portid);
1365 		}
1366 	}
1367 
1368 	printf("\n");
1369 
1370 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1371 		if (rte_lcore_is_enabled(lcore_id) == 0)
1372 			continue;
1373 		qconf = &lcore_conf[lcore_id];
1374 		for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
1375 			portid = qconf->rx_queue_list[queue].port_id;
1376 			queueid = qconf->rx_queue_list[queue].queue_id;
1377 			if (prepare_ptype_parser(portid, queueid) == 0)
1378 				rte_exit(EXIT_FAILURE, "ptype check fails\n");
1379 		}
1380 	}
1381 
1382 	check_all_ports_link_status(enabled_port_mask);
1383 
1384 	ret = 0;
1385 	/* launch per-lcore init on every lcore */
1386 	rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MAIN);
1387 	if (evt_rsrc->enabled) {
1388 		for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++)
1389 			rte_event_eth_rx_adapter_stop(
1390 					evt_rsrc->rx_adptr.rx_adptr[i]);
1391 		for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++)
1392 			rte_event_eth_tx_adapter_stop(
1393 					evt_rsrc->tx_adptr.tx_adptr[i]);
1394 
1395 		RTE_ETH_FOREACH_DEV(portid) {
1396 			if ((enabled_port_mask & (1 << portid)) == 0)
1397 				continue;
1398 			ret = rte_eth_dev_stop(portid);
1399 			if (ret != 0)
1400 				printf("rte_eth_dev_stop: err=%d, port=%u\n",
1401 				       ret, portid);
1402 		}
1403 
1404 		rte_eal_mp_wait_lcore();
1405 		RTE_ETH_FOREACH_DEV(portid) {
1406 			if ((enabled_port_mask & (1 << portid)) == 0)
1407 				continue;
1408 			rte_eth_dev_close(portid);
1409 		}
1410 
1411 		rte_event_dev_stop(evt_rsrc->event_d_id);
1412 		rte_event_dev_close(evt_rsrc->event_d_id);
1413 
1414 	} else {
1415 		rte_eal_mp_wait_lcore();
1416 
1417 		RTE_ETH_FOREACH_DEV(portid) {
1418 			if ((enabled_port_mask & (1 << portid)) == 0)
1419 				continue;
1420 			printf("Closing port %d...", portid);
1421 			ret = rte_eth_dev_stop(portid);
1422 			if (ret != 0)
1423 				printf("rte_eth_dev_stop: err=%d, port=%u\n",
1424 				       ret, portid);
1425 			rte_eth_dev_close(portid);
1426 			printf(" Done\n");
1427 		}
1428 	}
1429 
1430 	/* clean up the EAL */
1431 	rte_eal_cleanup();
1432 
1433 	printf("Bye...\n");
1434 
1435 	return ret;
1436 }
1437