xref: /dpdk/examples/l3fwd/main.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2021 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/types.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 #include <stdarg.h>
13 #include <errno.h>
14 #include <getopt.h>
15 #include <signal.h>
16 #include <stdbool.h>
17 
18 #include <rte_common.h>
19 #include <rte_vect.h>
20 #include <rte_byteorder.h>
21 #include <rte_log.h>
22 #include <rte_malloc.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_eal.h>
26 #include <rte_launch.h>
27 #include <rte_cycles.h>
28 #include <rte_prefetch.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_interrupts.h>
33 #include <rte_random.h>
34 #include <rte_debug.h>
35 #include <rte_ether.h>
36 #include <rte_mempool.h>
37 #include <rte_mbuf.h>
38 #include <rte_ip.h>
39 #include <rte_tcp.h>
40 #include <rte_udp.h>
41 #include <rte_string_fns.h>
42 #include <rte_cpuflags.h>
43 
44 #include <cmdline_parse.h>
45 #include <cmdline_parse_etheraddr.h>
46 
47 #include "l3fwd.h"
48 #include "l3fwd_event.h"
49 #include "l3fwd_route.h"
50 
51 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_LCORE
52 #define MAX_RX_QUEUE_PER_PORT 128
53 
54 #define MAX_LCORE_PARAMS 1024
55 
56 uint16_t nb_rxd = RX_DESC_DEFAULT;
57 uint16_t nb_txd = TX_DESC_DEFAULT;
58 
59 /**< Ports set in promiscuous mode off by default. */
60 static int promiscuous_on;
61 
62 /* Select Longest-Prefix, Exact match, Forwarding Information Base or Access Control. */
63 enum L3FWD_LOOKUP_MODE {
64 	L3FWD_LOOKUP_DEFAULT,
65 	L3FWD_LOOKUP_LPM,
66 	L3FWD_LOOKUP_EM,
67 	L3FWD_LOOKUP_FIB,
68 	L3FWD_LOOKUP_ACL
69 };
70 static enum L3FWD_LOOKUP_MODE lookup_mode;
71 
72 /* Global variables. */
73 static int numa_on = 1; /**< NUMA is enabled by default. */
74 static int parse_ptype; /**< Parse packet type using rx callback, and */
75 			/**< disabled by default */
76 static int disable_rss; /**< Disable RSS mode */
77 static int relax_rx_offload; /**< Relax Rx offload mode, disabled by default */
78 static int per_port_pool; /**< Use separate buffer pools per port; disabled */
79 			  /**< by default */
80 
81 volatile bool force_quit;
82 
83 /* ethernet addresses of ports */
84 uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
85 struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
86 
87 xmm_t val_eth[RTE_MAX_ETHPORTS];
88 
89 /* mask of enabled ports */
90 uint32_t enabled_port_mask;
91 
92 /* Used only in exact match mode. */
93 int ipv6; /**< ipv6 is false by default. */
94 
95 struct lcore_conf lcore_conf[RTE_MAX_LCORE];
96 
97 struct parm_cfg parm_config;
98 
99 struct __rte_cache_aligned lcore_params {
100 	uint16_t port_id;
101 	uint16_t queue_id;
102 	uint32_t lcore_id;
103 };
104 
105 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
106 static struct lcore_params lcore_params_array_default[] = {
107 	{0, 0, 2},
108 	{0, 1, 2},
109 	{0, 2, 2},
110 	{1, 0, 2},
111 	{1, 1, 2},
112 	{1, 2, 2},
113 	{2, 0, 2},
114 	{3, 0, 3},
115 	{3, 1, 3},
116 };
117 
118 static struct lcore_params * lcore_params = lcore_params_array_default;
119 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
120 				sizeof(lcore_params_array_default[0]);
121 
122 static struct rte_eth_conf port_conf = {
123 	.rxmode = {
124 		.mq_mode = RTE_ETH_MQ_RX_RSS,
125 		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
126 	},
127 	.rx_adv_conf = {
128 		.rss_conf = {
129 			.rss_key = NULL,
130 			.rss_hf = RTE_ETH_RSS_IP,
131 		},
132 	},
133 	.txmode = {
134 		.mq_mode = RTE_ETH_MQ_TX_NONE,
135 	},
136 };
137 
138 uint32_t max_pkt_len;
139 
140 #ifdef RTE_LIB_EVENTDEV
141 static struct rte_mempool *vector_pool[RTE_MAX_ETHPORTS];
142 #endif
143 static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
144 static uint8_t lkp_per_socket[NB_SOCKETS];
145 
146 struct l3fwd_lkp_mode {
147 	void  (*read_config_files)(void);
148 	void  (*setup)(int);
149 	int   (*check_ptype)(int);
150 	rte_rx_callback_fn cb_parse_ptype;
151 	int   (*main_loop)(void *);
152 	void* (*get_ipv4_lookup_struct)(int);
153 	void* (*get_ipv6_lookup_struct)(int);
154 	void  (*free_routes)(void);
155 };
156 
157 static struct l3fwd_lkp_mode l3fwd_lkp;
158 
159 static struct l3fwd_lkp_mode l3fwd_em_lkp = {
160 	.read_config_files		= read_config_files_em,
161 	.setup                  = setup_hash,
162 	.check_ptype		= em_check_ptype,
163 	.cb_parse_ptype		= em_cb_parse_ptype,
164 	.main_loop              = em_main_loop,
165 	.get_ipv4_lookup_struct = em_get_ipv4_l3fwd_lookup_struct,
166 	.get_ipv6_lookup_struct = em_get_ipv6_l3fwd_lookup_struct,
167 	.free_routes			= em_free_routes,
168 };
169 
170 static struct l3fwd_lkp_mode l3fwd_lpm_lkp = {
171 	.read_config_files		= read_config_files_lpm,
172 	.setup                  = setup_lpm,
173 	.check_ptype		= lpm_check_ptype,
174 	.cb_parse_ptype		= lpm_cb_parse_ptype,
175 	.main_loop              = lpm_main_loop,
176 	.get_ipv4_lookup_struct = lpm_get_ipv4_l3fwd_lookup_struct,
177 	.get_ipv6_lookup_struct = lpm_get_ipv6_l3fwd_lookup_struct,
178 	.free_routes			= lpm_free_routes,
179 };
180 
181 static struct l3fwd_lkp_mode l3fwd_fib_lkp = {
182 	.read_config_files		= read_config_files_lpm,
183 	.setup                  = setup_fib,
184 	.check_ptype            = lpm_check_ptype,
185 	.cb_parse_ptype         = lpm_cb_parse_ptype,
186 	.main_loop              = fib_main_loop,
187 	.get_ipv4_lookup_struct = fib_get_ipv4_l3fwd_lookup_struct,
188 	.get_ipv6_lookup_struct = fib_get_ipv6_l3fwd_lookup_struct,
189 	.free_routes			= lpm_free_routes,
190 };
191 
192 static struct l3fwd_lkp_mode l3fwd_acl_lkp = {
193 	.read_config_files		= read_config_files_acl,
194 	.setup                  = setup_acl,
195 	.check_ptype            = em_check_ptype,
196 	.cb_parse_ptype         = em_cb_parse_ptype,
197 	.main_loop              = acl_main_loop,
198 	.get_ipv4_lookup_struct = acl_get_ipv4_l3fwd_lookup_struct,
199 	.get_ipv6_lookup_struct = acl_get_ipv6_l3fwd_lookup_struct,
200 	.free_routes			= acl_free_routes,
201 };
202 
203 /*
204  * 198.18.0.0/16 are set aside for RFC2544 benchmarking (RFC5735).
205  * 198.18.{0-15}.0/24 = Port {0-15}
206  */
207 const struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
208 	{RTE_IPV4(198, 18, 0, 0), 24, 0},
209 	{RTE_IPV4(198, 18, 1, 0), 24, 1},
210 	{RTE_IPV4(198, 18, 2, 0), 24, 2},
211 	{RTE_IPV4(198, 18, 3, 0), 24, 3},
212 	{RTE_IPV4(198, 18, 4, 0), 24, 4},
213 	{RTE_IPV4(198, 18, 5, 0), 24, 5},
214 	{RTE_IPV4(198, 18, 6, 0), 24, 6},
215 	{RTE_IPV4(198, 18, 7, 0), 24, 7},
216 	{RTE_IPV4(198, 18, 8, 0), 24, 8},
217 	{RTE_IPV4(198, 18, 9, 0), 24, 9},
218 	{RTE_IPV4(198, 18, 10, 0), 24, 10},
219 	{RTE_IPV4(198, 18, 11, 0), 24, 11},
220 	{RTE_IPV4(198, 18, 12, 0), 24, 12},
221 	{RTE_IPV4(198, 18, 13, 0), 24, 13},
222 	{RTE_IPV4(198, 18, 14, 0), 24, 14},
223 	{RTE_IPV4(198, 18, 15, 0), 24, 15},
224 };
225 
226 /*
227  * 2001:200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180).
228  * 2001:200:0:{0-f}::/64 = Port {0-15}
229  */
230 const struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
231 	{{32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 0},
232 	{{32, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 1},
233 	{{32, 1, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 2},
234 	{{32, 1, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 3},
235 	{{32, 1, 2, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 4},
236 	{{32, 1, 2, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 5},
237 	{{32, 1, 2, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 6},
238 	{{32, 1, 2, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 7},
239 	{{32, 1, 2, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 8},
240 	{{32, 1, 2, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 9},
241 	{{32, 1, 2, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 10},
242 	{{32, 1, 2, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 11},
243 	{{32, 1, 2, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 12},
244 	{{32, 1, 2, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 13},
245 	{{32, 1, 2, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 14},
246 	{{32, 1, 2, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 15},
247 };
248 
249 /*
250  * API's called during initialization to setup ACL/EM/LPM rules.
251  */
252 void
253 l3fwd_set_rule_ipv4_name(const char *optarg)
254 {
255 	parm_config.rule_ipv4_name = optarg;
256 }
257 
258 void
259 l3fwd_set_rule_ipv6_name(const char *optarg)
260 {
261 	parm_config.rule_ipv6_name = optarg;
262 }
263 
264 void
265 l3fwd_set_alg(const char *optarg)
266 {
267 	parm_config.alg = parse_acl_alg(optarg);
268 }
269 
270 /*
271  * Setup lookup methods for forwarding.
272  * Currently exact-match, longest-prefix-match and forwarding information
273  * base are the supported ones.
274  */
275 static void
276 setup_l3fwd_lookup_tables(void)
277 {
278 	/* Setup HASH lookup functions. */
279 	if (lookup_mode == L3FWD_LOOKUP_EM)
280 		l3fwd_lkp = l3fwd_em_lkp;
281 	/* Setup FIB lookup functions. */
282 	else if (lookup_mode == L3FWD_LOOKUP_FIB)
283 		l3fwd_lkp = l3fwd_fib_lkp;
284 	/* Setup ACL lookup functions. */
285 	else if (lookup_mode == L3FWD_LOOKUP_ACL)
286 		l3fwd_lkp = l3fwd_acl_lkp;
287 	/* Setup LPM lookup functions. */
288 	else
289 		l3fwd_lkp = l3fwd_lpm_lkp;
290 }
291 
292 static int
293 check_lcore_params(void)
294 {
295 	uint16_t queue, i;
296 	uint32_t lcore;
297 	int socketid;
298 
299 	for (i = 0; i < nb_lcore_params; ++i) {
300 		queue = lcore_params[i].queue_id;
301 		if (queue >= MAX_RX_QUEUE_PER_PORT) {
302 			printf("invalid queue number: %" PRIu16 "\n", queue);
303 			return -1;
304 		}
305 		lcore = lcore_params[i].lcore_id;
306 		if (!rte_lcore_is_enabled(lcore)) {
307 			printf("error: lcore %u is not enabled in lcore mask\n", lcore);
308 			return -1;
309 		}
310 		if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
311 			(numa_on == 0)) {
312 			printf("warning: lcore %u is on socket %d with numa off\n",
313 				lcore, socketid);
314 		}
315 	}
316 	return 0;
317 }
318 
319 static int
320 check_port_config(void)
321 {
322 	uint16_t portid;
323 	uint16_t i;
324 
325 	for (i = 0; i < nb_lcore_params; ++i) {
326 		portid = lcore_params[i].port_id;
327 		if ((enabled_port_mask & (1 << portid)) == 0) {
328 			printf("port %u is not enabled in port mask\n", portid);
329 			return -1;
330 		}
331 		if (!rte_eth_dev_is_valid_port(portid)) {
332 			printf("port %u is not present on the board\n", portid);
333 			return -1;
334 		}
335 	}
336 	return 0;
337 }
338 
339 static uint16_t
340 get_port_n_rx_queues(const uint16_t port)
341 {
342 	int queue = -1;
343 	uint16_t i;
344 
345 	for (i = 0; i < nb_lcore_params; ++i) {
346 		if (lcore_params[i].port_id == port) {
347 			if (lcore_params[i].queue_id == queue+1)
348 				queue = lcore_params[i].queue_id;
349 			else
350 				rte_exit(EXIT_FAILURE, "queue ids of the port %d must be"
351 						" in sequence and must start with 0\n",
352 						lcore_params[i].port_id);
353 		}
354 	}
355 	return (uint16_t)(++queue);
356 }
357 
358 static int
359 init_lcore_rx_queues(void)
360 {
361 	uint16_t i, nb_rx_queue;
362 	uint32_t lcore;
363 
364 	for (i = 0; i < nb_lcore_params; ++i) {
365 		lcore = lcore_params[i].lcore_id;
366 		nb_rx_queue = lcore_conf[lcore].n_rx_queue;
367 		if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
368 			printf("error: too many queues (%u) for lcore: %u\n",
369 				(unsigned int)nb_rx_queue + 1, lcore);
370 			return -1;
371 		} else {
372 			lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
373 				lcore_params[i].port_id;
374 			lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
375 				lcore_params[i].queue_id;
376 			lcore_conf[lcore].n_rx_queue++;
377 		}
378 	}
379 	return 0;
380 }
381 
382 /* display usage */
383 static void
384 print_usage(const char *prgname)
385 {
386 	char alg[PATH_MAX];
387 
388 	usage_acl_alg(alg, sizeof(alg));
389 	fprintf(stderr, "%s [EAL options] --"
390 		" -p PORTMASK"
391 		"  --rule_ipv4=FILE"
392 		"  --rule_ipv6=FILE"
393 		" [-P]"
394 		" [--lookup]"
395 		" --config (port,queue,lcore)[,(port,queue,lcore)]"
396 		" [--rx-queue-size NPKTS]"
397 		" [--tx-queue-size NPKTS]"
398 		" [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
399 		" [--max-pkt-len PKTLEN]"
400 		" [--no-numa]"
401 		" [--ipv6]"
402 		" [--parse-ptype]"
403 		" [--per-port-pool]"
404 		" [--mode]"
405 #ifdef RTE_LIB_EVENTDEV
406 		" [--eventq-sched]"
407 		" [--event-vector [--event-vector-size SIZE] [--event-vector-tmo NS]]"
408 #endif
409 		" [-E]"
410 		" [-L]\n\n"
411 
412 		"  -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
413 		"  -P : Enable promiscuous mode\n"
414 		"  --lookup: Select the lookup method\n"
415 		"            Default: lpm\n"
416 		"            Accepted: em (Exact Match), lpm (Longest Prefix Match), fib (Forwarding Information Base),\n"
417 		"                      acl (Access Control List)\n"
418 		"  --config (port,queue,lcore): Rx queue configuration\n"
419 		"  --rx-queue-size NPKTS: Rx queue size in decimal\n"
420 		"            Default: %d\n"
421 		"  --tx-queue-size NPKTS: Tx queue size in decimal\n"
422 		"            Default: %d\n"
423 		"  --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n"
424 		"  --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
425 		"  --no-numa: Disable numa awareness\n"
426 		"  --ipv6: Set if running ipv6 packets\n"
427 		"  --parse-ptype: Set to use software to analyze packet type\n"
428 		"  --per-port-pool: Use separate buffer pool per port\n"
429 		"  --mode: Packet transfer mode for I/O, poll or eventdev\n"
430 		"          Default mode = poll\n"
431 #ifdef RTE_LIB_EVENTDEV
432 		"  --eventq-sched: Event queue synchronization method\n"
433 		"                  ordered, atomic or parallel.\n"
434 		"                  Default: atomic\n"
435 		"                  Valid only if --mode=eventdev\n"
436 		"  --event-eth-rxqs: Number of ethernet RX queues per device.\n"
437 		"                    Default: 1\n"
438 		"                    Valid only if --mode=eventdev\n"
439 		"  --event-vector:  Enable event vectorization.\n"
440 		"  --event-vector-size: Max vector size if event vectorization is enabled.\n"
441 		"  --event-vector-tmo: Max timeout to form vector in nanoseconds if event vectorization is enabled\n"
442 #endif
443 		"  -E : Enable exact match, legacy flag please use --lookup=em instead\n"
444 		"  -L : Enable longest prefix match, legacy flag please use --lookup=lpm instead\n"
445 		"  --rule_ipv4=FILE: Specify the ipv4 rules entries file.\n"
446 		"                    Each rule occupies one line.\n"
447 		"                    2 kinds of rules are supported.\n"
448 		"                    One is ACL entry at while line leads with character '%c',\n"
449 		"                    another is route entry at while line leads with character '%c'.\n"
450 		"  --rule_ipv6=FILE: Specify the ipv6 rules entries file.\n"
451 		"  --alg: ACL classify method to use, one of: %s.\n\n",
452 		prgname, RX_DESC_DEFAULT, TX_DESC_DEFAULT,
453 		ACL_LEAD_CHAR, ROUTE_LEAD_CHAR, alg);
454 }
455 
456 static int
457 parse_max_pkt_len(const char *pktlen)
458 {
459 	char *end = NULL;
460 	unsigned long len;
461 
462 	/* parse decimal string */
463 	len = strtoul(pktlen, &end, 10);
464 	if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
465 		return -1;
466 
467 	if (len == 0)
468 		return -1;
469 
470 	return len;
471 }
472 
473 static int
474 parse_portmask(const char *portmask)
475 {
476 	char *end = NULL;
477 	unsigned long pm;
478 
479 	/* parse hexadecimal string */
480 	pm = strtoul(portmask, &end, 16);
481 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
482 		return 0;
483 
484 	return pm;
485 }
486 
487 static int
488 parse_config(const char *q_arg)
489 {
490 	char s[256];
491 	const char *p, *p0 = q_arg;
492 	char *end;
493 	enum fieldnames {
494 		FLD_PORT = 0,
495 		FLD_QUEUE,
496 		FLD_LCORE,
497 		_NUM_FLD
498 	};
499 	unsigned long int_fld[_NUM_FLD];
500 	char *str_fld[_NUM_FLD];
501 	int i;
502 	unsigned size;
503 	uint16_t max_fld[_NUM_FLD] = {
504 		RTE_MAX_ETHPORTS,
505 		RTE_MAX_QUEUES_PER_PORT,
506 		RTE_MAX_LCORE
507 	};
508 
509 	nb_lcore_params = 0;
510 
511 	while ((p = strchr(p0,'(')) != NULL) {
512 		++p;
513 		if((p0 = strchr(p,')')) == NULL)
514 			return -1;
515 
516 		size = p0 - p;
517 		if(size >= sizeof(s))
518 			return -1;
519 
520 		snprintf(s, sizeof(s), "%.*s", size, p);
521 		if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
522 			return -1;
523 		for (i = 0; i < _NUM_FLD; i++){
524 			errno = 0;
525 			int_fld[i] = strtoul(str_fld[i], &end, 0);
526 			if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
527 				return -1;
528 		}
529 		if (nb_lcore_params >= MAX_LCORE_PARAMS) {
530 			printf("exceeded max number of lcore params: %hu\n",
531 				nb_lcore_params);
532 			return -1;
533 		}
534 		lcore_params_array[nb_lcore_params].port_id =
535 			(uint16_t)int_fld[FLD_PORT];
536 		lcore_params_array[nb_lcore_params].queue_id =
537 			(uint16_t)int_fld[FLD_QUEUE];
538 		lcore_params_array[nb_lcore_params].lcore_id =
539 			(uint32_t)int_fld[FLD_LCORE];
540 		++nb_lcore_params;
541 	}
542 	lcore_params = lcore_params_array;
543 	return 0;
544 }
545 
546 static void
547 parse_eth_dest(const char *optarg)
548 {
549 	uint16_t portid;
550 	char *port_end;
551 	uint8_t c, *dest, peer_addr[6];
552 
553 	errno = 0;
554 	portid = strtoul(optarg, &port_end, 10);
555 	if (errno != 0 || port_end == optarg || *port_end++ != ',')
556 		rte_exit(EXIT_FAILURE,
557 		"Invalid eth-dest: %s", optarg);
558 	if (portid >= RTE_MAX_ETHPORTS)
559 		rte_exit(EXIT_FAILURE,
560 		"eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
561 		portid, RTE_MAX_ETHPORTS);
562 
563 	if (cmdline_parse_etheraddr(NULL, port_end,
564 		&peer_addr, sizeof(peer_addr)) < 0)
565 		rte_exit(EXIT_FAILURE,
566 		"Invalid ethernet address: %s\n",
567 		port_end);
568 	dest = (uint8_t *)&dest_eth_addr[portid];
569 	for (c = 0; c < 6; c++)
570 		dest[c] = peer_addr[c];
571 	*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
572 }
573 
574 static void
575 parse_mode(const char *optarg __rte_unused)
576 {
577 #ifdef RTE_LIB_EVENTDEV
578 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
579 
580 	if (!strcmp(optarg, "poll"))
581 		evt_rsrc->enabled = false;
582 	else if (!strcmp(optarg, "eventdev"))
583 		evt_rsrc->enabled = true;
584 #endif
585 }
586 
587 static void
588 parse_queue_size(const char *queue_size_arg, uint16_t *queue_size, int rx)
589 {
590 	char *end = NULL;
591 	unsigned long value;
592 
593 	/* parse decimal string */
594 	value = strtoul(queue_size_arg, &end, 10);
595 	if ((queue_size_arg[0] == '\0') || (end == NULL) ||
596 		(*end != '\0') || (value == 0)) {
597 		if (rx == 1)
598 			rte_exit(EXIT_FAILURE, "Invalid rx-queue-size\n");
599 		else
600 			rte_exit(EXIT_FAILURE, "Invalid tx-queue-size\n");
601 
602 		return;
603 	}
604 
605 	if (value > UINT16_MAX) {
606 		if (rx == 1)
607 			rte_exit(EXIT_FAILURE, "rx-queue-size %lu > %d\n",
608 				value, UINT16_MAX);
609 		else
610 			rte_exit(EXIT_FAILURE, "tx-queue-size %lu > %d\n",
611 				value, UINT16_MAX);
612 
613 		return;
614 	}
615 
616 	*queue_size = value;
617 }
618 
619 #ifdef RTE_LIB_EVENTDEV
620 static void
621 parse_eventq_sched(const char *optarg)
622 {
623 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
624 
625 	if (!strcmp(optarg, "ordered"))
626 		evt_rsrc->sched_type = RTE_SCHED_TYPE_ORDERED;
627 	if (!strcmp(optarg, "atomic"))
628 		evt_rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
629 	if (!strcmp(optarg, "parallel"))
630 		evt_rsrc->sched_type = RTE_SCHED_TYPE_PARALLEL;
631 }
632 
633 static void
634 parse_event_eth_rx_queues(const char *eth_rx_queues)
635 {
636 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
637 	char *end = NULL;
638 	uint16_t num_eth_rx_queues;
639 
640 	/* parse decimal string */
641 	num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
642 	if ((eth_rx_queues[0] == '\0') || (end == NULL) || (*end != '\0'))
643 		return;
644 
645 	if (num_eth_rx_queues == 0)
646 		return;
647 
648 	evt_rsrc->eth_rx_queues = num_eth_rx_queues;
649 }
650 #endif
651 
652 static int
653 parse_lookup(const char *optarg)
654 {
655 	if (!strcmp(optarg, "em"))
656 		lookup_mode = L3FWD_LOOKUP_EM;
657 	else if (!strcmp(optarg, "lpm"))
658 		lookup_mode = L3FWD_LOOKUP_LPM;
659 	else if (!strcmp(optarg, "fib"))
660 		lookup_mode = L3FWD_LOOKUP_FIB;
661 	else if (!strcmp(optarg, "acl"))
662 		lookup_mode = L3FWD_LOOKUP_ACL;
663 	else {
664 		fprintf(stderr, "Invalid lookup option! Accepted options: acl, em, lpm, fib\n");
665 		return -1;
666 	}
667 	return 0;
668 }
669 
670 #define MAX_JUMBO_PKT_LEN  9600
671 
672 static const char short_options[] =
673 	"p:"  /* portmask */
674 	"P"   /* promiscuous */
675 	"L"   /* legacy enable long prefix match */
676 	"E"   /* legacy enable exact match */
677 	;
678 
679 #define CMD_LINE_OPT_CONFIG "config"
680 #define CMD_LINE_OPT_RX_QUEUE_SIZE "rx-queue-size"
681 #define CMD_LINE_OPT_TX_QUEUE_SIZE "tx-queue-size"
682 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
683 #define CMD_LINE_OPT_NO_NUMA "no-numa"
684 #define CMD_LINE_OPT_IPV6 "ipv6"
685 #define CMD_LINE_OPT_MAX_PKT_LEN "max-pkt-len"
686 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
687 #define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
688 #define CMD_LINE_OPT_DISABLE_RSS "disable-rss"
689 #define CMD_LINE_OPT_RELAX_RX_OFFLOAD "relax-rx-offload"
690 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
691 #define CMD_LINE_OPT_MODE "mode"
692 #define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sched"
693 #define CMD_LINE_OPT_EVENT_ETH_RX_QUEUES "event-eth-rxqs"
694 #define CMD_LINE_OPT_LOOKUP "lookup"
695 #define CMD_LINE_OPT_ENABLE_VECTOR "event-vector"
696 #define CMD_LINE_OPT_VECTOR_SIZE "event-vector-size"
697 #define CMD_LINE_OPT_VECTOR_TMO_NS "event-vector-tmo"
698 #define CMD_LINE_OPT_RULE_IPV4 "rule_ipv4"
699 #define CMD_LINE_OPT_RULE_IPV6 "rule_ipv6"
700 #define CMD_LINE_OPT_ALG "alg"
701 
702 enum {
703 	/* long options mapped to a short option */
704 
705 	/* first long only option value must be >= 256, so that we won't
706 	 * conflict with short options */
707 	CMD_LINE_OPT_MIN_NUM = 256,
708 	CMD_LINE_OPT_CONFIG_NUM,
709 	CMD_LINE_OPT_RX_QUEUE_SIZE_NUM,
710 	CMD_LINE_OPT_TX_QUEUE_SIZE_NUM,
711 	CMD_LINE_OPT_ETH_DEST_NUM,
712 	CMD_LINE_OPT_NO_NUMA_NUM,
713 	CMD_LINE_OPT_IPV6_NUM,
714 	CMD_LINE_OPT_MAX_PKT_LEN_NUM,
715 	CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
716 	CMD_LINE_OPT_PARSE_PTYPE_NUM,
717 	CMD_LINE_OPT_DISABLE_RSS_NUM,
718 	CMD_LINE_OPT_RELAX_RX_OFFLOAD_NUM,
719 	CMD_LINE_OPT_RULE_IPV4_NUM,
720 	CMD_LINE_OPT_RULE_IPV6_NUM,
721 	CMD_LINE_OPT_ALG_NUM,
722 	CMD_LINE_OPT_PARSE_PER_PORT_POOL,
723 	CMD_LINE_OPT_MODE_NUM,
724 	CMD_LINE_OPT_EVENTQ_SYNC_NUM,
725 	CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM,
726 	CMD_LINE_OPT_LOOKUP_NUM,
727 	CMD_LINE_OPT_ENABLE_VECTOR_NUM,
728 	CMD_LINE_OPT_VECTOR_SIZE_NUM,
729 	CMD_LINE_OPT_VECTOR_TMO_NS_NUM
730 };
731 
732 static const struct option lgopts[] = {
733 	{CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
734 	{CMD_LINE_OPT_RX_QUEUE_SIZE, 1, 0, CMD_LINE_OPT_RX_QUEUE_SIZE_NUM},
735 	{CMD_LINE_OPT_TX_QUEUE_SIZE, 1, 0, CMD_LINE_OPT_TX_QUEUE_SIZE_NUM},
736 	{CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
737 	{CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
738 	{CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
739 	{CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, CMD_LINE_OPT_MAX_PKT_LEN_NUM},
740 	{CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
741 	{CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
742 	{CMD_LINE_OPT_RELAX_RX_OFFLOAD, 0, 0, CMD_LINE_OPT_RELAX_RX_OFFLOAD_NUM},
743 	{CMD_LINE_OPT_DISABLE_RSS, 0, 0, CMD_LINE_OPT_DISABLE_RSS_NUM},
744 	{CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
745 	{CMD_LINE_OPT_MODE, 1, 0, CMD_LINE_OPT_MODE_NUM},
746 	{CMD_LINE_OPT_EVENTQ_SYNC, 1, 0, CMD_LINE_OPT_EVENTQ_SYNC_NUM},
747 	{CMD_LINE_OPT_EVENT_ETH_RX_QUEUES, 1, 0,
748 					CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM},
749 	{CMD_LINE_OPT_LOOKUP, 1, 0, CMD_LINE_OPT_LOOKUP_NUM},
750 	{CMD_LINE_OPT_ENABLE_VECTOR, 0, 0, CMD_LINE_OPT_ENABLE_VECTOR_NUM},
751 	{CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
752 	{CMD_LINE_OPT_VECTOR_TMO_NS, 1, 0, CMD_LINE_OPT_VECTOR_TMO_NS_NUM},
753 	{CMD_LINE_OPT_RULE_IPV4,   1, 0, CMD_LINE_OPT_RULE_IPV4_NUM},
754 	{CMD_LINE_OPT_RULE_IPV6,   1, 0, CMD_LINE_OPT_RULE_IPV6_NUM},
755 	{CMD_LINE_OPT_ALG,   1, 0, CMD_LINE_OPT_ALG_NUM},
756 	{NULL, 0, 0, 0}
757 };
758 
759 /*
760  * This expression is used to calculate the number of mbufs needed
761  * depending on user input, taking  into account memory for rx and
762  * tx hardware rings, cache per lcore and mtable per port per lcore.
763  * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum
764  * value of 8192
765  */
766 #define NB_MBUF(nports) RTE_MAX(	\
767 	(nports*nb_rx_queue*nb_rxd +		\
768 	nports*nb_lcores*MAX_PKT_BURST +	\
769 	nports*n_tx_queue*nb_txd +		\
770 	nb_lcores*MEMPOOL_CACHE_SIZE),		\
771 	(unsigned)8192)
772 
773 /* Parse the argument given in the command line of the application */
774 static int
775 parse_args(int argc, char **argv)
776 {
777 	int opt, ret;
778 	char **argvopt;
779 	int option_index;
780 	char *prgname = argv[0];
781 	uint8_t lcore_params = 0;
782 #ifdef RTE_LIB_EVENTDEV
783 	uint8_t eventq_sched = 0;
784 	uint8_t eth_rx_q = 0;
785 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
786 #endif
787 
788 	argvopt = argv;
789 
790 	/* Error or normal output strings. */
791 	while ((opt = getopt_long(argc, argvopt, short_options,
792 				lgopts, &option_index)) != EOF) {
793 
794 		switch (opt) {
795 		/* portmask */
796 		case 'p':
797 			enabled_port_mask = parse_portmask(optarg);
798 			if (enabled_port_mask == 0) {
799 				fprintf(stderr, "Invalid portmask\n");
800 				print_usage(prgname);
801 				return -1;
802 			}
803 			break;
804 
805 		case 'P':
806 			promiscuous_on = 1;
807 			break;
808 
809 		case 'E':
810 			if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
811 				fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
812 				return -1;
813 			}
814 			lookup_mode = L3FWD_LOOKUP_EM;
815 			break;
816 
817 		case 'L':
818 			if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
819 				fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
820 				return -1;
821 			}
822 			lookup_mode = L3FWD_LOOKUP_LPM;
823 			break;
824 
825 		/* long options */
826 		case CMD_LINE_OPT_CONFIG_NUM:
827 			ret = parse_config(optarg);
828 			if (ret) {
829 				fprintf(stderr, "Invalid config\n");
830 				print_usage(prgname);
831 				return -1;
832 			}
833 			lcore_params = 1;
834 			break;
835 
836 		case CMD_LINE_OPT_RX_QUEUE_SIZE_NUM:
837 			parse_queue_size(optarg, &nb_rxd, 1);
838 			break;
839 
840 		case CMD_LINE_OPT_TX_QUEUE_SIZE_NUM:
841 			parse_queue_size(optarg, &nb_txd, 0);
842 			break;
843 
844 		case CMD_LINE_OPT_ETH_DEST_NUM:
845 			parse_eth_dest(optarg);
846 			break;
847 
848 		case CMD_LINE_OPT_NO_NUMA_NUM:
849 			numa_on = 0;
850 			break;
851 
852 		case CMD_LINE_OPT_IPV6_NUM:
853 			ipv6 = 1;
854 			break;
855 
856 		case CMD_LINE_OPT_MAX_PKT_LEN_NUM:
857 			max_pkt_len = parse_max_pkt_len(optarg);
858 			break;
859 
860 		case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
861 			fprintf(stderr, "Hash entry number will be ignored\n");
862 			break;
863 
864 		case CMD_LINE_OPT_PARSE_PTYPE_NUM:
865 			printf("soft parse-ptype is enabled\n");
866 			parse_ptype = 1;
867 			break;
868 
869 		case CMD_LINE_OPT_RELAX_RX_OFFLOAD_NUM:
870 			printf("Rx offload is relaxed\n");
871 			relax_rx_offload = 1;
872 			break;
873 
874 		case CMD_LINE_OPT_DISABLE_RSS_NUM:
875 			printf("RSS is disabled\n");
876 			disable_rss = 1;
877 			break;
878 
879 		case CMD_LINE_OPT_PARSE_PER_PORT_POOL:
880 			printf("per port buffer pool is enabled\n");
881 			per_port_pool = 1;
882 			break;
883 
884 		case CMD_LINE_OPT_MODE_NUM:
885 			parse_mode(optarg);
886 			break;
887 
888 #ifdef RTE_LIB_EVENTDEV
889 		case CMD_LINE_OPT_EVENTQ_SYNC_NUM:
890 			parse_eventq_sched(optarg);
891 			eventq_sched = 1;
892 			break;
893 
894 		case CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM:
895 			parse_event_eth_rx_queues(optarg);
896 			eth_rx_q = 1;
897 			break;
898 
899 		case CMD_LINE_OPT_ENABLE_VECTOR_NUM:
900 			printf("event vectorization is enabled\n");
901 			evt_rsrc->vector_enabled = 1;
902 			break;
903 
904 		case CMD_LINE_OPT_VECTOR_SIZE_NUM:
905 			evt_rsrc->vector_size = strtol(optarg, NULL, 10);
906 			break;
907 
908 		case CMD_LINE_OPT_VECTOR_TMO_NS_NUM:
909 			evt_rsrc->vector_tmo_ns = strtoull(optarg, NULL, 10);
910 			break;
911 #endif
912 
913 		case CMD_LINE_OPT_LOOKUP_NUM:
914 			if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
915 				fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
916 				return -1;
917 			}
918 			ret = parse_lookup(optarg);
919 			/*
920 			 * If parse_lookup was passed an invalid lookup type
921 			 * then return -1. Error log included within
922 			 * parse_lookup for simplicity.
923 			 */
924 			if (ret)
925 				return -1;
926 			break;
927 
928 		case CMD_LINE_OPT_RULE_IPV4_NUM:
929 			l3fwd_set_rule_ipv4_name(optarg);
930 			break;
931 		case CMD_LINE_OPT_RULE_IPV6_NUM:
932 			l3fwd_set_rule_ipv6_name(optarg);
933 			break;
934 		case CMD_LINE_OPT_ALG_NUM:
935 			l3fwd_set_alg(optarg);
936 			break;
937 		default:
938 			print_usage(prgname);
939 			return -1;
940 		}
941 	}
942 
943 	RTE_SET_USED(lcore_params); /* needed if no eventdev block */
944 #ifdef RTE_LIB_EVENTDEV
945 	if (evt_rsrc->enabled && lcore_params) {
946 		fprintf(stderr, "lcore config is not valid when event mode is selected\n");
947 		return -1;
948 	}
949 
950 	if (!evt_rsrc->enabled && eth_rx_q) {
951 		fprintf(stderr, "eth_rx_queues is valid only when event mode is selected\n");
952 		return -1;
953 	}
954 
955 	if (!evt_rsrc->enabled && eventq_sched) {
956 		fprintf(stderr, "eventq_sched is valid only when event mode is selected\n");
957 		return -1;
958 	}
959 
960 	if (evt_rsrc->vector_enabled && !evt_rsrc->vector_size) {
961 		evt_rsrc->vector_size = VECTOR_SIZE_DEFAULT;
962 		fprintf(stderr, "vector size set to default (%" PRIu16 ")\n",
963 			evt_rsrc->vector_size);
964 	}
965 
966 	if (evt_rsrc->vector_enabled && !evt_rsrc->vector_tmo_ns) {
967 		evt_rsrc->vector_tmo_ns = VECTOR_TMO_NS_DEFAULT;
968 		fprintf(stderr,
969 			"vector timeout set to default (%" PRIu64 " ns)\n",
970 			evt_rsrc->vector_tmo_ns);
971 	}
972 #endif
973 
974 	/*
975 	 * Nothing is selected, pick longest-prefix match
976 	 * as default match.
977 	 */
978 	if (lookup_mode == L3FWD_LOOKUP_DEFAULT) {
979 		fprintf(stderr, "Neither ACL, LPM, EM, or FIB selected, defaulting to LPM\n");
980 		lookup_mode = L3FWD_LOOKUP_LPM;
981 	}
982 
983 	/* For ACL, update port config rss hash filter */
984 	if (lookup_mode == L3FWD_LOOKUP_ACL) {
985 		port_conf.rx_adv_conf.rss_conf.rss_hf |=
986 				RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP;
987 	}
988 
989 	if (optind >= 0)
990 		argv[optind-1] = prgname;
991 
992 	ret = optind-1;
993 	optind = 1; /* reset getopt lib */
994 	return ret;
995 }
996 
997 static void
998 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
999 {
1000 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
1001 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
1002 	printf("%s%s", name, buf);
1003 }
1004 
1005 int
1006 init_mem(uint16_t portid, unsigned int nb_mbuf)
1007 {
1008 #ifdef RTE_LIB_EVENTDEV
1009 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
1010 #endif
1011 	struct lcore_conf *qconf;
1012 	int socketid;
1013 	unsigned lcore_id;
1014 	char s[64];
1015 
1016 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1017 		if (rte_lcore_is_enabled(lcore_id) == 0)
1018 			continue;
1019 
1020 		if (numa_on)
1021 			socketid = rte_lcore_to_socket_id(lcore_id);
1022 		else
1023 			socketid = 0;
1024 
1025 		if (socketid >= NB_SOCKETS) {
1026 			rte_exit(EXIT_FAILURE,
1027 				"Socket %d of lcore %u is out of range %d\n",
1028 				socketid, lcore_id, NB_SOCKETS);
1029 		}
1030 
1031 		if (pktmbuf_pool[portid][socketid] == NULL) {
1032 			snprintf(s, sizeof(s), "mbuf_pool_%d:%d",
1033 				 portid, socketid);
1034 			pktmbuf_pool[portid][socketid] =
1035 				rte_pktmbuf_pool_create(s, nb_mbuf,
1036 					MEMPOOL_CACHE_SIZE, 0,
1037 					RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
1038 			if (pktmbuf_pool[portid][socketid] == NULL)
1039 				rte_exit(EXIT_FAILURE,
1040 					"Cannot init mbuf pool on socket %d\n",
1041 					socketid);
1042 			else
1043 				printf("Allocated mbuf pool on socket %d\n",
1044 					socketid);
1045 
1046 			/* Setup ACL, LPM, EM(f.e Hash) or FIB. But, only once per
1047 			 * available socket.
1048 			 */
1049 			if (!lkp_per_socket[socketid]) {
1050 				l3fwd_lkp.setup(socketid);
1051 				lkp_per_socket[socketid] = 1;
1052 			}
1053 		}
1054 
1055 #ifdef RTE_LIB_EVENTDEV
1056 		if (evt_rsrc->vector_enabled && vector_pool[portid] == NULL) {
1057 			unsigned int nb_vec;
1058 
1059 			nb_vec = (nb_mbuf + evt_rsrc->vector_size - 1) /
1060 				 evt_rsrc->vector_size;
1061 			nb_vec = RTE_MAX(512U, nb_vec);
1062 			nb_vec += rte_lcore_count() * 32;
1063 			snprintf(s, sizeof(s), "vector_pool_%d", portid);
1064 			vector_pool[portid] = rte_event_vector_pool_create(
1065 				s, nb_vec, 32, evt_rsrc->vector_size, socketid);
1066 			if (vector_pool[portid] == NULL)
1067 				rte_exit(EXIT_FAILURE,
1068 					 "Failed to create vector pool for port %d\n",
1069 					 portid);
1070 			else
1071 				printf("Allocated vector pool for port %d\n",
1072 				       portid);
1073 		}
1074 #endif
1075 
1076 		qconf = &lcore_conf[lcore_id];
1077 		qconf->ipv4_lookup_struct =
1078 			l3fwd_lkp.get_ipv4_lookup_struct(socketid);
1079 		qconf->ipv6_lookup_struct =
1080 			l3fwd_lkp.get_ipv6_lookup_struct(socketid);
1081 	}
1082 	return 0;
1083 }
1084 
1085 /* Check the link status of all ports in up to 9s, and print them finally */
1086 static void
1087 check_all_ports_link_status(uint32_t port_mask)
1088 {
1089 #define CHECK_INTERVAL 100 /* 100ms */
1090 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1091 	uint16_t portid;
1092 	uint8_t count, all_ports_up, print_flag = 0;
1093 	struct rte_eth_link link;
1094 	int ret;
1095 	char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
1096 
1097 	printf("\nChecking link status");
1098 	fflush(stdout);
1099 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1100 		if (force_quit)
1101 			return;
1102 		all_ports_up = 1;
1103 		RTE_ETH_FOREACH_DEV(portid) {
1104 			if (force_quit)
1105 				return;
1106 			if ((port_mask & (1 << portid)) == 0)
1107 				continue;
1108 			memset(&link, 0, sizeof(link));
1109 			ret = rte_eth_link_get_nowait(portid, &link);
1110 			if (ret < 0) {
1111 				all_ports_up = 0;
1112 				if (print_flag == 1)
1113 					printf("Port %u link get failed: %s\n",
1114 						portid, rte_strerror(-ret));
1115 				continue;
1116 			}
1117 			/* print link status if flag set */
1118 			if (print_flag == 1) {
1119 				rte_eth_link_to_str(link_status_text,
1120 					sizeof(link_status_text), &link);
1121 				printf("Port %d %s\n", portid,
1122 				       link_status_text);
1123 				continue;
1124 			}
1125 			/* clear all_ports_up flag if any link down */
1126 			if (link.link_status == RTE_ETH_LINK_DOWN) {
1127 				all_ports_up = 0;
1128 				break;
1129 			}
1130 		}
1131 		/* after finally printing all link status, get out */
1132 		if (print_flag == 1)
1133 			break;
1134 
1135 		if (all_ports_up == 0) {
1136 			printf(".");
1137 			fflush(stdout);
1138 			rte_delay_ms(CHECK_INTERVAL);
1139 		}
1140 
1141 		/* set the print_flag if all ports up or timeout */
1142 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1143 			print_flag = 1;
1144 			printf("done\n");
1145 		}
1146 	}
1147 }
1148 
1149 static void
1150 signal_handler(int signum)
1151 {
1152 	if (signum == SIGINT || signum == SIGTERM) {
1153 		printf("\n\nSignal %d received, preparing to exit...\n",
1154 				signum);
1155 		force_quit = true;
1156 	}
1157 }
1158 
1159 static int
1160 prepare_ptype_parser(uint16_t portid, uint16_t queueid)
1161 {
1162 	if (parse_ptype) {
1163 		printf("Port %d: softly parse packet type info\n", portid);
1164 		if (rte_eth_add_rx_callback(portid, queueid,
1165 					    l3fwd_lkp.cb_parse_ptype,
1166 					    NULL))
1167 			return 1;
1168 
1169 		printf("Failed to add rx callback: port=%d\n", portid);
1170 		return 0;
1171 	}
1172 
1173 	if (l3fwd_lkp.check_ptype(portid))
1174 		return 1;
1175 
1176 	printf("port %d cannot parse packet type, please add --%s\n",
1177 	       portid, CMD_LINE_OPT_PARSE_PTYPE);
1178 	return 0;
1179 }
1180 
1181 static uint32_t
1182 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1183 {
1184 	uint32_t overhead_len;
1185 
1186 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1187 		overhead_len = max_rx_pktlen - max_mtu;
1188 	else
1189 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1190 
1191 	return overhead_len;
1192 }
1193 
1194 int
1195 config_port_max_pkt_len(struct rte_eth_conf *conf,
1196 		struct rte_eth_dev_info *dev_info)
1197 {
1198 	uint32_t overhead_len;
1199 
1200 	if (max_pkt_len == 0)
1201 		return 0;
1202 
1203 	if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
1204 		return -1;
1205 
1206 	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1207 			dev_info->max_mtu);
1208 	conf->rxmode.mtu = max_pkt_len - overhead_len;
1209 
1210 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
1211 		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1212 
1213 	return 0;
1214 }
1215 
1216 static void
1217 l3fwd_poll_resource_setup(void)
1218 {
1219 	uint8_t socketid;
1220 	uint16_t nb_rx_queue, queue;
1221 	struct rte_eth_dev_info dev_info;
1222 	uint32_t n_tx_queue, nb_lcores;
1223 	struct rte_eth_txconf *txconf;
1224 	struct lcore_conf *qconf;
1225 	uint16_t queueid, portid;
1226 	unsigned int nb_ports;
1227 	unsigned int lcore_id;
1228 	int ret;
1229 
1230 	if (check_lcore_params() < 0)
1231 		rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
1232 
1233 	ret = init_lcore_rx_queues();
1234 	if (ret < 0)
1235 		rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1236 
1237 	nb_ports = rte_eth_dev_count_avail();
1238 
1239 	if (check_port_config() < 0)
1240 		rte_exit(EXIT_FAILURE, "check_port_config failed\n");
1241 
1242 	nb_lcores = rte_lcore_count();
1243 
1244 	/* initialize all ports */
1245 	RTE_ETH_FOREACH_DEV(portid) {
1246 		struct rte_eth_conf local_port_conf = port_conf;
1247 
1248 		/* skip ports that are not enabled */
1249 		if ((enabled_port_mask & (1 << portid)) == 0) {
1250 			printf("\nSkipping disabled port %d\n", portid);
1251 			continue;
1252 		}
1253 
1254 		/* init port */
1255 		printf("Initializing port %d ... ", portid );
1256 		fflush(stdout);
1257 
1258 		nb_rx_queue = get_port_n_rx_queues(portid);
1259 		n_tx_queue = nb_lcores;
1260 		if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1261 			n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1262 		printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1263 			nb_rx_queue, (unsigned)n_tx_queue );
1264 
1265 		ret = rte_eth_dev_info_get(portid, &dev_info);
1266 		if (ret != 0)
1267 			rte_exit(EXIT_FAILURE,
1268 				"Error during getting device (port %u) info: %s\n",
1269 				portid, strerror(-ret));
1270 
1271 		ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
1272 		if (ret != 0)
1273 			rte_exit(EXIT_FAILURE,
1274 				"Invalid max packet length: %u (port %u)\n",
1275 				max_pkt_len, portid);
1276 
1277 		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
1278 			local_port_conf.txmode.offloads |=
1279 				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1280 
1281 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1282 			dev_info.flow_type_rss_offloads;
1283 
1284 		if (disable_rss == 1 || dev_info.max_rx_queues == 1)
1285 			local_port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
1286 
1287 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1288 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
1289 			printf("Port %u modified RSS hash function based on hardware support,"
1290 				"requested:%#"PRIx64" configured:%#"PRIx64"\n",
1291 				portid,
1292 				port_conf.rx_adv_conf.rss_conf.rss_hf,
1293 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1294 		}
1295 
1296 		/* Relax Rx offload requirement */
1297 		if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1298 			local_port_conf.rxmode.offloads) {
1299 			printf("Port %u requested Rx offloads 0x%"PRIx64
1300 				" does not match Rx offloads capabilities 0x%"PRIx64"\n",
1301 				portid, local_port_conf.rxmode.offloads,
1302 				dev_info.rx_offload_capa);
1303 			if (relax_rx_offload) {
1304 				local_port_conf.rxmode.offloads &= dev_info.rx_offload_capa;
1305 				printf("Warning: modified Rx offload to 0x%"PRIx64
1306 						" based on device capability\n",
1307 						local_port_conf.rxmode.offloads);
1308 			}
1309 		}
1310 
1311 		ret = rte_eth_dev_configure(portid, nb_rx_queue,
1312 					(uint16_t)n_tx_queue, &local_port_conf);
1313 		if (ret < 0)
1314 			rte_exit(EXIT_FAILURE,
1315 				"Cannot configure device: err=%d, port=%d\n",
1316 				ret, portid);
1317 
1318 		ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
1319 						       &nb_txd);
1320 		if (ret < 0)
1321 			rte_exit(EXIT_FAILURE,
1322 				 "Cannot adjust number of descriptors: err=%d, "
1323 				 "port=%d\n", ret, portid);
1324 
1325 		ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1326 		if (ret < 0)
1327 			rte_exit(EXIT_FAILURE,
1328 				 "Cannot get MAC address: err=%d, port=%d\n",
1329 				 ret, portid);
1330 
1331 		print_ethaddr(" Address:", &ports_eth_addr[portid]);
1332 		printf(", ");
1333 		print_ethaddr("Destination:",
1334 			(const struct rte_ether_addr *)&dest_eth_addr[portid]);
1335 		printf(", ");
1336 
1337 		/*
1338 		 * prepare src MACs for each port.
1339 		 */
1340 		rte_ether_addr_copy(&ports_eth_addr[portid],
1341 			(struct rte_ether_addr *)(val_eth + portid) + 1);
1342 
1343 		/* init memory */
1344 		if (!per_port_pool) {
1345 			/* portid = 0; this is *not* signifying the first port,
1346 			 * rather, it signifies that portid is ignored.
1347 			 */
1348 			ret = init_mem(0, NB_MBUF(nb_ports));
1349 		} else {
1350 			ret = init_mem(portid, NB_MBUF(1));
1351 		}
1352 		if (ret < 0)
1353 			rte_exit(EXIT_FAILURE, "init_mem failed\n");
1354 
1355 		/* init one TX queue per couple (lcore,port) */
1356 		queueid = 0;
1357 		for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1358 			if (rte_lcore_is_enabled(lcore_id) == 0)
1359 				continue;
1360 
1361 			if (numa_on)
1362 				socketid =
1363 				(uint8_t)rte_lcore_to_socket_id(lcore_id);
1364 			else
1365 				socketid = 0;
1366 
1367 			printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1368 			fflush(stdout);
1369 
1370 			txconf = &dev_info.default_txconf;
1371 			txconf->offloads = local_port_conf.txmode.offloads;
1372 			ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1373 						     socketid, txconf);
1374 			if (ret < 0)
1375 				rte_exit(EXIT_FAILURE,
1376 					"rte_eth_tx_queue_setup: err=%d, "
1377 					"port=%d\n", ret, portid);
1378 
1379 			qconf = &lcore_conf[lcore_id];
1380 			qconf->tx_queue_id[portid] = queueid;
1381 			queueid++;
1382 
1383 			qconf->tx_port_id[qconf->n_tx_port] = portid;
1384 			qconf->n_tx_port++;
1385 		}
1386 		printf("\n");
1387 	}
1388 
1389 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1390 		if (rte_lcore_is_enabled(lcore_id) == 0)
1391 			continue;
1392 		qconf = &lcore_conf[lcore_id];
1393 		printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1394 		fflush(stdout);
1395 		/* init RX queues */
1396 		for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1397 			struct rte_eth_conf local_conf;
1398 			struct rte_eth_rxconf rxq_conf;
1399 
1400 			portid = qconf->rx_queue_list[queue].port_id;
1401 			queueid = qconf->rx_queue_list[queue].queue_id;
1402 
1403 			if (numa_on)
1404 				socketid =
1405 				(uint8_t)rte_lcore_to_socket_id(lcore_id);
1406 			else
1407 				socketid = 0;
1408 
1409 			printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1410 			fflush(stdout);
1411 
1412 			ret = rte_eth_dev_info_get(portid, &dev_info);
1413 			if (ret != 0)
1414 				rte_exit(EXIT_FAILURE,
1415 					"Error during getting device (port %u) info: %s\n",
1416 					portid, strerror(-ret));
1417 
1418 			ret = rte_eth_dev_conf_get(portid, &local_conf);
1419 			if (ret != 0)
1420 				rte_exit(EXIT_FAILURE,
1421 					"Error during getting device (port %u) configuration: %s\n",
1422 					portid, strerror(-ret));
1423 
1424 			rxq_conf = dev_info.default_rxconf;
1425 			rxq_conf.offloads = local_conf.rxmode.offloads;
1426 			if (!per_port_pool)
1427 				ret = rte_eth_rx_queue_setup(portid, queueid,
1428 						nb_rxd, socketid,
1429 						&rxq_conf,
1430 						pktmbuf_pool[0][socketid]);
1431 			else
1432 				ret = rte_eth_rx_queue_setup(portid, queueid,
1433 						nb_rxd, socketid,
1434 						&rxq_conf,
1435 						pktmbuf_pool[portid][socketid]);
1436 			if (ret < 0)
1437 				rte_exit(EXIT_FAILURE,
1438 				"rte_eth_rx_queue_setup: err=%d, port=%d\n",
1439 				ret, portid);
1440 		}
1441 	}
1442 }
1443 
1444 static inline int
1445 l3fwd_service_enable(uint32_t service_id)
1446 {
1447 	uint8_t min_service_count = UINT8_MAX;
1448 	uint32_t slcore_array[RTE_MAX_LCORE];
1449 	unsigned int slcore = 0;
1450 	uint8_t service_count;
1451 	int32_t slcore_count;
1452 
1453 	if (!rte_service_lcore_count())
1454 		return -ENOENT;
1455 
1456 	slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
1457 	if (slcore_count < 0)
1458 		return -ENOENT;
1459 	/* Get the core which has least number of services running. */
1460 	while (slcore_count--) {
1461 		/* Reset default mapping */
1462 		if (rte_service_map_lcore_set(service_id,
1463 				slcore_array[slcore_count], 0) != 0)
1464 			return -ENOENT;
1465 		service_count = rte_service_lcore_count_services(
1466 				slcore_array[slcore_count]);
1467 		if (service_count < min_service_count) {
1468 			slcore = slcore_array[slcore_count];
1469 			min_service_count = service_count;
1470 		}
1471 	}
1472 	if (rte_service_map_lcore_set(service_id, slcore, 1))
1473 		return -ENOENT;
1474 	rte_service_lcore_start(slcore);
1475 
1476 	return 0;
1477 }
1478 
1479 #ifdef RTE_LIB_EVENTDEV
1480 static void
1481 l3fwd_event_service_setup(void)
1482 {
1483 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
1484 	struct rte_event_dev_info evdev_info;
1485 	uint32_t service_id, caps;
1486 	int ret, i;
1487 
1488 	rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
1489 	if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
1490 		ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
1491 				&service_id);
1492 		if (ret != -ESRCH && ret != 0)
1493 			rte_exit(EXIT_FAILURE,
1494 				 "Error in starting eventdev service\n");
1495 		l3fwd_service_enable(service_id);
1496 	}
1497 
1498 	for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
1499 		ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
1500 				evt_rsrc->rx_adptr.rx_adptr[i], &caps);
1501 		if (ret < 0)
1502 			rte_exit(EXIT_FAILURE,
1503 				 "Failed to get Rx adapter[%d] caps\n",
1504 				 evt_rsrc->rx_adptr.rx_adptr[i]);
1505 		ret = rte_event_eth_rx_adapter_service_id_get(
1506 				evt_rsrc->event_d_id,
1507 				&service_id);
1508 		if (ret != -ESRCH && ret != 0)
1509 			rte_exit(EXIT_FAILURE,
1510 				 "Error in starting Rx adapter[%d] service\n",
1511 				 evt_rsrc->rx_adptr.rx_adptr[i]);
1512 		l3fwd_service_enable(service_id);
1513 	}
1514 
1515 	for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
1516 		ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
1517 				evt_rsrc->tx_adptr.tx_adptr[i], &caps);
1518 		if (ret < 0)
1519 			rte_exit(EXIT_FAILURE,
1520 				 "Failed to get Rx adapter[%d] caps\n",
1521 				 evt_rsrc->tx_adptr.tx_adptr[i]);
1522 		ret = rte_event_eth_tx_adapter_service_id_get(
1523 				evt_rsrc->event_d_id,
1524 				&service_id);
1525 		if (ret != -ESRCH && ret != 0)
1526 			rte_exit(EXIT_FAILURE,
1527 				 "Error in starting Rx adapter[%d] service\n",
1528 				 evt_rsrc->tx_adptr.tx_adptr[i]);
1529 		l3fwd_service_enable(service_id);
1530 	}
1531 }
1532 #endif
1533 
1534 int
1535 main(int argc, char **argv)
1536 {
1537 #ifdef RTE_LIB_EVENTDEV
1538 	struct l3fwd_event_resources *evt_rsrc;
1539 	int i;
1540 #endif
1541 	struct lcore_conf *qconf;
1542 	uint16_t queueid, portid;
1543 	unsigned int lcore_id;
1544 	uint16_t queue;
1545 	int ret;
1546 
1547 	/* init EAL */
1548 	ret = rte_eal_init(argc, argv);
1549 	if (ret < 0)
1550 		rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1551 	argc -= ret;
1552 	argv += ret;
1553 
1554 	force_quit = false;
1555 	signal(SIGINT, signal_handler);
1556 	signal(SIGTERM, signal_handler);
1557 
1558 	/* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
1559 	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1560 		dest_eth_addr[portid] =
1561 			RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
1562 		*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
1563 	}
1564 
1565 #ifdef RTE_LIB_EVENTDEV
1566 	evt_rsrc = l3fwd_get_eventdev_rsrc();
1567 #endif
1568 	/* parse application arguments (after the EAL ones) */
1569 	ret = parse_args(argc, argv);
1570 	if (ret < 0)
1571 		rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
1572 
1573 	/* Setup function pointers for lookup method. */
1574 	setup_l3fwd_lookup_tables();
1575 
1576 	/* Add the config file rules */
1577 	l3fwd_lkp.read_config_files();
1578 
1579 #ifdef RTE_LIB_EVENTDEV
1580 	evt_rsrc->per_port_pool = per_port_pool;
1581 	evt_rsrc->pkt_pool = pktmbuf_pool;
1582 	evt_rsrc->vec_pool = vector_pool;
1583 	evt_rsrc->port_mask = enabled_port_mask;
1584 	/* Configure eventdev parameters if user has requested */
1585 	if (evt_rsrc->enabled) {
1586 		l3fwd_event_resource_setup(&port_conf);
1587 		if (lookup_mode == L3FWD_LOOKUP_EM)
1588 			l3fwd_lkp.main_loop = evt_rsrc->ops.em_event_loop;
1589 		else if (lookup_mode == L3FWD_LOOKUP_FIB)
1590 			l3fwd_lkp.main_loop = evt_rsrc->ops.fib_event_loop;
1591 		else
1592 			l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;
1593 	} else
1594 #endif
1595 		l3fwd_poll_resource_setup();
1596 
1597 	/* start ports */
1598 	RTE_ETH_FOREACH_DEV(portid) {
1599 		if ((enabled_port_mask & (1 << portid)) == 0) {
1600 			continue;
1601 		}
1602 		/* Start device */
1603 		ret = rte_eth_dev_start(portid);
1604 		if (ret < 0)
1605 			rte_exit(EXIT_FAILURE,
1606 				"rte_eth_dev_start: err=%d, port=%d\n",
1607 				ret, portid);
1608 
1609 		/*
1610 		 * If enabled, put device in promiscuous mode.
1611 		 * This allows IO forwarding mode to forward packets
1612 		 * to itself through 2 cross-connected  ports of the
1613 		 * target machine.
1614 		 */
1615 		if (promiscuous_on) {
1616 			ret = rte_eth_promiscuous_enable(portid);
1617 			if (ret != 0)
1618 				rte_exit(EXIT_FAILURE,
1619 					"rte_eth_promiscuous_enable: err=%s, port=%u\n",
1620 					rte_strerror(-ret), portid);
1621 		}
1622 	}
1623 
1624 #ifdef RTE_LIB_EVENTDEV
1625 	if (evt_rsrc->enabled)
1626 		l3fwd_event_service_setup();
1627 #endif
1628 
1629 	printf("\n");
1630 
1631 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1632 		if (rte_lcore_is_enabled(lcore_id) == 0)
1633 			continue;
1634 		qconf = &lcore_conf[lcore_id];
1635 		for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
1636 			portid = qconf->rx_queue_list[queue].port_id;
1637 			queueid = qconf->rx_queue_list[queue].queue_id;
1638 			if (prepare_ptype_parser(portid, queueid) == 0)
1639 				rte_exit(EXIT_FAILURE, "ptype check fails\n");
1640 		}
1641 	}
1642 
1643 	check_all_ports_link_status(enabled_port_mask);
1644 
1645 	ret = 0;
1646 	/* launch per-lcore init on every lcore */
1647 	rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MAIN);
1648 
1649 #ifdef RTE_LIB_EVENTDEV
1650 	if (evt_rsrc->enabled) {
1651 		for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++)
1652 			rte_event_eth_rx_adapter_stop(
1653 					evt_rsrc->rx_adptr.rx_adptr[i]);
1654 		for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++)
1655 			rte_event_eth_tx_adapter_stop(
1656 					evt_rsrc->tx_adptr.tx_adptr[i]);
1657 
1658 		RTE_ETH_FOREACH_DEV(portid) {
1659 			if ((enabled_port_mask & (1 << portid)) == 0)
1660 				continue;
1661 			ret = rte_eth_dev_stop(portid);
1662 			if (ret != 0)
1663 				printf("rte_eth_dev_stop: err=%d, port=%u\n",
1664 				       ret, portid);
1665 		}
1666 
1667 		rte_eal_mp_wait_lcore();
1668 		RTE_ETH_FOREACH_DEV(portid) {
1669 			if ((enabled_port_mask & (1 << portid)) == 0)
1670 				continue;
1671 			rte_eth_dev_close(portid);
1672 		}
1673 
1674 		rte_event_dev_stop(evt_rsrc->event_d_id);
1675 		rte_event_dev_close(evt_rsrc->event_d_id);
1676 
1677 	} else
1678 #endif
1679 	{
1680 		rte_eal_mp_wait_lcore();
1681 
1682 		RTE_ETH_FOREACH_DEV(portid) {
1683 			if ((enabled_port_mask & (1 << portid)) == 0)
1684 				continue;
1685 			printf("Closing port %d...", portid);
1686 			ret = rte_eth_dev_stop(portid);
1687 			if (ret != 0)
1688 				printf("rte_eth_dev_stop: err=%d, port=%u\n",
1689 				       ret, portid);
1690 			rte_eth_dev_close(portid);
1691 			printf(" Done\n");
1692 		}
1693 	}
1694 
1695 	/* clean up config file routes */
1696 	l3fwd_lkp.free_routes();
1697 
1698 	/* clean up the EAL */
1699 	rte_eal_cleanup();
1700 
1701 	printf("Bye...\n");
1702 
1703 	return ret;
1704 }
1705