xref: /dpdk/examples/l3fwd/main.c (revision 02d36ef6a9528e0f4a3403956e66bcea5fadbf8c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2021 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/types.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 #include <stdarg.h>
13 #include <errno.h>
14 #include <getopt.h>
15 #include <signal.h>
16 #include <stdbool.h>
17 
18 #include <rte_common.h>
19 #include <rte_vect.h>
20 #include <rte_byteorder.h>
21 #include <rte_log.h>
22 #include <rte_malloc.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_eal.h>
26 #include <rte_launch.h>
27 #include <rte_cycles.h>
28 #include <rte_prefetch.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_interrupts.h>
33 #include <rte_random.h>
34 #include <rte_debug.h>
35 #include <rte_ether.h>
36 #include <rte_mempool.h>
37 #include <rte_mbuf.h>
38 #include <rte_ip.h>
39 #include <rte_tcp.h>
40 #include <rte_udp.h>
41 #include <rte_string_fns.h>
42 #include <rte_cpuflags.h>
43 
44 #include <cmdline_parse.h>
45 #include <cmdline_parse_etheraddr.h>
46 
47 #include "l3fwd.h"
48 #include "l3fwd_event.h"
49 #include "l3fwd_route.h"
50 
51 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_LCORE
52 #define MAX_RX_QUEUE_PER_PORT 128
53 
54 #define MAX_LCORE_PARAMS 1024
55 
56 uint16_t nb_rxd = RX_DESC_DEFAULT;
57 uint16_t nb_txd = TX_DESC_DEFAULT;
58 
59 /**< Ports set in promiscuous mode off by default. */
60 static int promiscuous_on;
61 
62 /* Select Longest-Prefix, Exact match, Forwarding Information Base or Access Control. */
63 enum L3FWD_LOOKUP_MODE {
64 	L3FWD_LOOKUP_DEFAULT,
65 	L3FWD_LOOKUP_LPM,
66 	L3FWD_LOOKUP_EM,
67 	L3FWD_LOOKUP_FIB,
68 	L3FWD_LOOKUP_ACL
69 };
70 static enum L3FWD_LOOKUP_MODE lookup_mode;
71 
72 /* Global variables. */
73 static int numa_on = 1; /**< NUMA is enabled by default. */
74 static int parse_ptype; /**< Parse packet type using rx callback, and */
75 			/**< disabled by default */
76 static int per_port_pool; /**< Use separate buffer pools per port; disabled */
77 			  /**< by default */
78 
79 volatile bool force_quit;
80 
81 /* ethernet addresses of ports */
82 uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
83 struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
84 
85 xmm_t val_eth[RTE_MAX_ETHPORTS];
86 
87 /* mask of enabled ports */
88 uint32_t enabled_port_mask;
89 
90 /* Used only in exact match mode. */
91 int ipv6; /**< ipv6 is false by default. */
92 uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
93 
94 struct lcore_conf lcore_conf[RTE_MAX_LCORE];
95 
96 struct parm_cfg parm_config;
97 
98 struct lcore_params {
99 	uint16_t port_id;
100 	uint8_t queue_id;
101 	uint8_t lcore_id;
102 } __rte_cache_aligned;
103 
104 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
105 static struct lcore_params lcore_params_array_default[] = {
106 	{0, 0, 2},
107 	{0, 1, 2},
108 	{0, 2, 2},
109 	{1, 0, 2},
110 	{1, 1, 2},
111 	{1, 2, 2},
112 	{2, 0, 2},
113 	{3, 0, 3},
114 	{3, 1, 3},
115 };
116 
117 static struct lcore_params * lcore_params = lcore_params_array_default;
118 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
119 				sizeof(lcore_params_array_default[0]);
120 
121 static struct rte_eth_conf port_conf = {
122 	.rxmode = {
123 		.mq_mode = RTE_ETH_MQ_RX_RSS,
124 		.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
125 	},
126 	.rx_adv_conf = {
127 		.rss_conf = {
128 			.rss_key = NULL,
129 			.rss_hf = RTE_ETH_RSS_IP,
130 		},
131 	},
132 	.txmode = {
133 		.mq_mode = RTE_ETH_MQ_TX_NONE,
134 	},
135 };
136 
137 uint32_t max_pkt_len;
138 
139 static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
140 static struct rte_mempool *vector_pool[RTE_MAX_ETHPORTS];
141 static uint8_t lkp_per_socket[NB_SOCKETS];
142 
143 struct l3fwd_lkp_mode {
144 	void  (*read_config_files)(void);
145 	void  (*setup)(int);
146 	int   (*check_ptype)(int);
147 	rte_rx_callback_fn cb_parse_ptype;
148 	int   (*main_loop)(void *);
149 	void* (*get_ipv4_lookup_struct)(int);
150 	void* (*get_ipv6_lookup_struct)(int);
151 	void  (*free_routes)(void);
152 };
153 
154 static struct l3fwd_lkp_mode l3fwd_lkp;
155 
156 static struct l3fwd_lkp_mode l3fwd_em_lkp = {
157 	.read_config_files		= read_config_files_em,
158 	.setup                  = setup_hash,
159 	.check_ptype		= em_check_ptype,
160 	.cb_parse_ptype		= em_cb_parse_ptype,
161 	.main_loop              = em_main_loop,
162 	.get_ipv4_lookup_struct = em_get_ipv4_l3fwd_lookup_struct,
163 	.get_ipv6_lookup_struct = em_get_ipv6_l3fwd_lookup_struct,
164 	.free_routes			= em_free_routes,
165 };
166 
167 static struct l3fwd_lkp_mode l3fwd_lpm_lkp = {
168 	.read_config_files		= read_config_files_lpm,
169 	.setup                  = setup_lpm,
170 	.check_ptype		= lpm_check_ptype,
171 	.cb_parse_ptype		= lpm_cb_parse_ptype,
172 	.main_loop              = lpm_main_loop,
173 	.get_ipv4_lookup_struct = lpm_get_ipv4_l3fwd_lookup_struct,
174 	.get_ipv6_lookup_struct = lpm_get_ipv6_l3fwd_lookup_struct,
175 	.free_routes			= lpm_free_routes,
176 };
177 
178 static struct l3fwd_lkp_mode l3fwd_fib_lkp = {
179 	.read_config_files		= read_config_files_lpm,
180 	.setup                  = setup_fib,
181 	.check_ptype            = lpm_check_ptype,
182 	.cb_parse_ptype         = lpm_cb_parse_ptype,
183 	.main_loop              = fib_main_loop,
184 	.get_ipv4_lookup_struct = fib_get_ipv4_l3fwd_lookup_struct,
185 	.get_ipv6_lookup_struct = fib_get_ipv6_l3fwd_lookup_struct,
186 	.free_routes			= lpm_free_routes,
187 };
188 
189 static struct l3fwd_lkp_mode l3fwd_acl_lkp = {
190 	.read_config_files		= read_config_files_acl,
191 	.setup                  = setup_acl,
192 	.check_ptype            = em_check_ptype,
193 	.cb_parse_ptype         = em_cb_parse_ptype,
194 	.main_loop              = acl_main_loop,
195 	.get_ipv4_lookup_struct = acl_get_ipv4_l3fwd_lookup_struct,
196 	.get_ipv6_lookup_struct = acl_get_ipv6_l3fwd_lookup_struct,
197 	.free_routes			= acl_free_routes,
198 };
199 
200 /*
201  * 198.18.0.0/16 are set aside for RFC2544 benchmarking (RFC5735).
202  * 198.18.{0-15}.0/24 = Port {0-15}
203  */
204 const struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
205 	{RTE_IPV4(198, 18, 0, 0), 24, 0},
206 	{RTE_IPV4(198, 18, 1, 0), 24, 1},
207 	{RTE_IPV4(198, 18, 2, 0), 24, 2},
208 	{RTE_IPV4(198, 18, 3, 0), 24, 3},
209 	{RTE_IPV4(198, 18, 4, 0), 24, 4},
210 	{RTE_IPV4(198, 18, 5, 0), 24, 5},
211 	{RTE_IPV4(198, 18, 6, 0), 24, 6},
212 	{RTE_IPV4(198, 18, 7, 0), 24, 7},
213 	{RTE_IPV4(198, 18, 8, 0), 24, 8},
214 	{RTE_IPV4(198, 18, 9, 0), 24, 9},
215 	{RTE_IPV4(198, 18, 10, 0), 24, 10},
216 	{RTE_IPV4(198, 18, 11, 0), 24, 11},
217 	{RTE_IPV4(198, 18, 12, 0), 24, 12},
218 	{RTE_IPV4(198, 18, 13, 0), 24, 13},
219 	{RTE_IPV4(198, 18, 14, 0), 24, 14},
220 	{RTE_IPV4(198, 18, 15, 0), 24, 15},
221 };
222 
223 /*
224  * 2001:200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180).
225  * 2001:200:0:{0-f}::/64 = Port {0-15}
226  */
227 const struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
228 	{{32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 0},
229 	{{32, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 1},
230 	{{32, 1, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 2},
231 	{{32, 1, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 3},
232 	{{32, 1, 2, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 4},
233 	{{32, 1, 2, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 5},
234 	{{32, 1, 2, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 6},
235 	{{32, 1, 2, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 7},
236 	{{32, 1, 2, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 8},
237 	{{32, 1, 2, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 9},
238 	{{32, 1, 2, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 10},
239 	{{32, 1, 2, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 11},
240 	{{32, 1, 2, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 12},
241 	{{32, 1, 2, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 13},
242 	{{32, 1, 2, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 14},
243 	{{32, 1, 2, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 15},
244 };
245 
246 /*
247  * API's called during initialization to setup ACL/EM/LPM rules.
248  */
249 void
250 l3fwd_set_rule_ipv4_name(const char *optarg)
251 {
252 	parm_config.rule_ipv4_name = optarg;
253 }
254 
255 void
256 l3fwd_set_rule_ipv6_name(const char *optarg)
257 {
258 	parm_config.rule_ipv6_name = optarg;
259 }
260 
261 void
262 l3fwd_set_alg(const char *optarg)
263 {
264 	parm_config.alg = parse_acl_alg(optarg);
265 }
266 
267 /*
268  * Setup lookup methods for forwarding.
269  * Currently exact-match, longest-prefix-match and forwarding information
270  * base are the supported ones.
271  */
272 static void
273 setup_l3fwd_lookup_tables(void)
274 {
275 	/* Setup HASH lookup functions. */
276 	if (lookup_mode == L3FWD_LOOKUP_EM)
277 		l3fwd_lkp = l3fwd_em_lkp;
278 	/* Setup FIB lookup functions. */
279 	else if (lookup_mode == L3FWD_LOOKUP_FIB)
280 		l3fwd_lkp = l3fwd_fib_lkp;
281 	/* Setup ACL lookup functions. */
282 	else if (lookup_mode == L3FWD_LOOKUP_ACL)
283 		l3fwd_lkp = l3fwd_acl_lkp;
284 	/* Setup LPM lookup functions. */
285 	else
286 		l3fwd_lkp = l3fwd_lpm_lkp;
287 }
288 
289 static int
290 check_lcore_params(void)
291 {
292 	uint8_t queue, lcore;
293 	uint16_t i;
294 	int socketid;
295 
296 	for (i = 0; i < nb_lcore_params; ++i) {
297 		queue = lcore_params[i].queue_id;
298 		if (queue >= MAX_RX_QUEUE_PER_PORT) {
299 			printf("invalid queue number: %hhu\n", queue);
300 			return -1;
301 		}
302 		lcore = lcore_params[i].lcore_id;
303 		if (!rte_lcore_is_enabled(lcore)) {
304 			printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
305 			return -1;
306 		}
307 		if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
308 			(numa_on == 0)) {
309 			printf("warning: lcore %hhu is on socket %d with numa off \n",
310 				lcore, socketid);
311 		}
312 	}
313 	return 0;
314 }
315 
316 static int
317 check_port_config(void)
318 {
319 	uint16_t portid;
320 	uint16_t i;
321 
322 	for (i = 0; i < nb_lcore_params; ++i) {
323 		portid = lcore_params[i].port_id;
324 		if ((enabled_port_mask & (1 << portid)) == 0) {
325 			printf("port %u is not enabled in port mask\n", portid);
326 			return -1;
327 		}
328 		if (!rte_eth_dev_is_valid_port(portid)) {
329 			printf("port %u is not present on the board\n", portid);
330 			return -1;
331 		}
332 	}
333 	return 0;
334 }
335 
336 static uint8_t
337 get_port_n_rx_queues(const uint16_t port)
338 {
339 	int queue = -1;
340 	uint16_t i;
341 
342 	for (i = 0; i < nb_lcore_params; ++i) {
343 		if (lcore_params[i].port_id == port) {
344 			if (lcore_params[i].queue_id == queue+1)
345 				queue = lcore_params[i].queue_id;
346 			else
347 				rte_exit(EXIT_FAILURE, "queue ids of the port %d must be"
348 						" in sequence and must start with 0\n",
349 						lcore_params[i].port_id);
350 		}
351 	}
352 	return (uint8_t)(++queue);
353 }
354 
355 static int
356 init_lcore_rx_queues(void)
357 {
358 	uint16_t i, nb_rx_queue;
359 	uint8_t lcore;
360 
361 	for (i = 0; i < nb_lcore_params; ++i) {
362 		lcore = lcore_params[i].lcore_id;
363 		nb_rx_queue = lcore_conf[lcore].n_rx_queue;
364 		if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
365 			printf("error: too many queues (%u) for lcore: %u\n",
366 				(unsigned)nb_rx_queue + 1, (unsigned)lcore);
367 			return -1;
368 		} else {
369 			lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
370 				lcore_params[i].port_id;
371 			lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
372 				lcore_params[i].queue_id;
373 			lcore_conf[lcore].n_rx_queue++;
374 		}
375 	}
376 	return 0;
377 }
378 
379 /* display usage */
380 static void
381 print_usage(const char *prgname)
382 {
383 	char alg[PATH_MAX];
384 
385 	usage_acl_alg(alg, sizeof(alg));
386 	fprintf(stderr, "%s [EAL options] --"
387 		" -p PORTMASK"
388 		"  --rule_ipv4=FILE"
389 		"  --rule_ipv6=FILE"
390 		" [-P]"
391 		" [--lookup]"
392 		" --config (port,queue,lcore)[,(port,queue,lcore)]"
393 		" [--rx-queue-size NPKTS]"
394 		" [--tx-queue-size NPKTS]"
395 		" [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
396 		" [--max-pkt-len PKTLEN]"
397 		" [--no-numa]"
398 		" [--hash-entry-num]"
399 		" [--ipv6]"
400 		" [--parse-ptype]"
401 		" [--per-port-pool]"
402 		" [--mode]"
403 		" [--eventq-sched]"
404 		" [--event-vector [--event-vector-size SIZE] [--event-vector-tmo NS]]"
405 		" [-E]"
406 		" [-L]\n\n"
407 
408 		"  -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
409 		"  -P : Enable promiscuous mode\n"
410 		"  --lookup: Select the lookup method\n"
411 		"            Default: lpm\n"
412 		"            Accepted: em (Exact Match), lpm (Longest Prefix Match), fib (Forwarding Information Base),\n"
413 		"                      acl (Access Control List)\n"
414 		"  --config (port,queue,lcore): Rx queue configuration\n"
415 		"  --rx-queue-size NPKTS: Rx queue size in decimal\n"
416 		"            Default: %d\n"
417 		"  --tx-queue-size NPKTS: Tx queue size in decimal\n"
418 		"            Default: %d\n"
419 		"  --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n"
420 		"  --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
421 		"  --no-numa: Disable numa awareness\n"
422 		"  --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n"
423 		"  --ipv6: Set if running ipv6 packets\n"
424 		"  --parse-ptype: Set to use software to analyze packet type\n"
425 		"  --per-port-pool: Use separate buffer pool per port\n"
426 		"  --mode: Packet transfer mode for I/O, poll or eventdev\n"
427 		"          Default mode = poll\n"
428 		"  --eventq-sched: Event queue synchronization method\n"
429 		"                  ordered, atomic or parallel.\n"
430 		"                  Default: atomic\n"
431 		"                  Valid only if --mode=eventdev\n"
432 		"  --event-eth-rxqs: Number of ethernet RX queues per device.\n"
433 		"                    Default: 1\n"
434 		"                    Valid only if --mode=eventdev\n"
435 		"  --event-vector:  Enable event vectorization.\n"
436 		"  --event-vector-size: Max vector size if event vectorization is enabled.\n"
437 		"  --event-vector-tmo: Max timeout to form vector in nanoseconds if event vectorization is enabled\n"
438 		"  -E : Enable exact match, legacy flag please use --lookup=em instead\n"
439 		"  -L : Enable longest prefix match, legacy flag please use --lookup=lpm instead\n"
440 		"  --rule_ipv4=FILE: Specify the ipv4 rules entries file.\n"
441 		"                    Each rule occupies one line.\n"
442 		"                    2 kinds of rules are supported.\n"
443 		"                    One is ACL entry at while line leads with character '%c',\n"
444 		"                    another is route entry at while line leads with character '%c'.\n"
445 		"  --rule_ipv6=FILE: Specify the ipv6 rules entries file.\n"
446 		"  --alg: ACL classify method to use, one of: %s.\n\n",
447 		prgname, RX_DESC_DEFAULT, TX_DESC_DEFAULT,
448 		ACL_LEAD_CHAR, ROUTE_LEAD_CHAR, alg);
449 }
450 
451 static int
452 parse_max_pkt_len(const char *pktlen)
453 {
454 	char *end = NULL;
455 	unsigned long len;
456 
457 	/* parse decimal string */
458 	len = strtoul(pktlen, &end, 10);
459 	if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
460 		return -1;
461 
462 	if (len == 0)
463 		return -1;
464 
465 	return len;
466 }
467 
468 static int
469 parse_portmask(const char *portmask)
470 {
471 	char *end = NULL;
472 	unsigned long pm;
473 
474 	/* parse hexadecimal string */
475 	pm = strtoul(portmask, &end, 16);
476 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
477 		return 0;
478 
479 	return pm;
480 }
481 
482 static int
483 parse_hash_entry_number(const char *hash_entry_num)
484 {
485 	char *end = NULL;
486 	unsigned long hash_en;
487 	/* parse hexadecimal string */
488 	hash_en = strtoul(hash_entry_num, &end, 16);
489 	if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
490 		return -1;
491 
492 	if (hash_en == 0)
493 		return -1;
494 
495 	return hash_en;
496 }
497 
498 static int
499 parse_config(const char *q_arg)
500 {
501 	char s[256];
502 	const char *p, *p0 = q_arg;
503 	char *end;
504 	enum fieldnames {
505 		FLD_PORT = 0,
506 		FLD_QUEUE,
507 		FLD_LCORE,
508 		_NUM_FLD
509 	};
510 	unsigned long int_fld[_NUM_FLD];
511 	char *str_fld[_NUM_FLD];
512 	int i;
513 	unsigned size;
514 
515 	nb_lcore_params = 0;
516 
517 	while ((p = strchr(p0,'(')) != NULL) {
518 		++p;
519 		if((p0 = strchr(p,')')) == NULL)
520 			return -1;
521 
522 		size = p0 - p;
523 		if(size >= sizeof(s))
524 			return -1;
525 
526 		snprintf(s, sizeof(s), "%.*s", size, p);
527 		if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
528 			return -1;
529 		for (i = 0; i < _NUM_FLD; i++){
530 			errno = 0;
531 			int_fld[i] = strtoul(str_fld[i], &end, 0);
532 			if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
533 				return -1;
534 		}
535 		if (nb_lcore_params >= MAX_LCORE_PARAMS) {
536 			printf("exceeded max number of lcore params: %hu\n",
537 				nb_lcore_params);
538 			return -1;
539 		}
540 		lcore_params_array[nb_lcore_params].port_id =
541 			(uint8_t)int_fld[FLD_PORT];
542 		lcore_params_array[nb_lcore_params].queue_id =
543 			(uint8_t)int_fld[FLD_QUEUE];
544 		lcore_params_array[nb_lcore_params].lcore_id =
545 			(uint8_t)int_fld[FLD_LCORE];
546 		++nb_lcore_params;
547 	}
548 	lcore_params = lcore_params_array;
549 	return 0;
550 }
551 
552 static void
553 parse_eth_dest(const char *optarg)
554 {
555 	uint16_t portid;
556 	char *port_end;
557 	uint8_t c, *dest, peer_addr[6];
558 
559 	errno = 0;
560 	portid = strtoul(optarg, &port_end, 10);
561 	if (errno != 0 || port_end == optarg || *port_end++ != ',')
562 		rte_exit(EXIT_FAILURE,
563 		"Invalid eth-dest: %s", optarg);
564 	if (portid >= RTE_MAX_ETHPORTS)
565 		rte_exit(EXIT_FAILURE,
566 		"eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
567 		portid, RTE_MAX_ETHPORTS);
568 
569 	if (cmdline_parse_etheraddr(NULL, port_end,
570 		&peer_addr, sizeof(peer_addr)) < 0)
571 		rte_exit(EXIT_FAILURE,
572 		"Invalid ethernet address: %s\n",
573 		port_end);
574 	dest = (uint8_t *)&dest_eth_addr[portid];
575 	for (c = 0; c < 6; c++)
576 		dest[c] = peer_addr[c];
577 	*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
578 }
579 
580 static void
581 parse_mode(const char *optarg)
582 {
583 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
584 
585 	if (!strcmp(optarg, "poll"))
586 		evt_rsrc->enabled = false;
587 	else if (!strcmp(optarg, "eventdev"))
588 		evt_rsrc->enabled = true;
589 }
590 
591 static void
592 parse_queue_size(const char *queue_size_arg, uint16_t *queue_size, int rx)
593 {
594 	char *end = NULL;
595 	unsigned long value;
596 
597 	/* parse decimal string */
598 	value = strtoul(queue_size_arg, &end, 10);
599 	if ((queue_size_arg[0] == '\0') || (end == NULL) ||
600 		(*end != '\0') || (value == 0)) {
601 		if (rx == 1)
602 			rte_exit(EXIT_FAILURE, "Invalid rx-queue-size\n");
603 		else
604 			rte_exit(EXIT_FAILURE, "Invalid tx-queue-size\n");
605 
606 		return;
607 	}
608 
609 	if (value > UINT16_MAX) {
610 		if (rx == 1)
611 			rte_exit(EXIT_FAILURE, "rx-queue-size %lu > %d\n",
612 				value, UINT16_MAX);
613 		else
614 			rte_exit(EXIT_FAILURE, "tx-queue-size %lu > %d\n",
615 				value, UINT16_MAX);
616 
617 		return;
618 	}
619 
620 	*queue_size = value;
621 }
622 
623 static void
624 parse_eventq_sched(const char *optarg)
625 {
626 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
627 
628 	if (!strcmp(optarg, "ordered"))
629 		evt_rsrc->sched_type = RTE_SCHED_TYPE_ORDERED;
630 	if (!strcmp(optarg, "atomic"))
631 		evt_rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
632 	if (!strcmp(optarg, "parallel"))
633 		evt_rsrc->sched_type = RTE_SCHED_TYPE_PARALLEL;
634 }
635 
636 static void
637 parse_event_eth_rx_queues(const char *eth_rx_queues)
638 {
639 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
640 	char *end = NULL;
641 	uint8_t num_eth_rx_queues;
642 
643 	/* parse decimal string */
644 	num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
645 	if ((eth_rx_queues[0] == '\0') || (end == NULL) || (*end != '\0'))
646 		return;
647 
648 	if (num_eth_rx_queues == 0)
649 		return;
650 
651 	evt_rsrc->eth_rx_queues = num_eth_rx_queues;
652 }
653 
654 static int
655 parse_lookup(const char *optarg)
656 {
657 	if (!strcmp(optarg, "em"))
658 		lookup_mode = L3FWD_LOOKUP_EM;
659 	else if (!strcmp(optarg, "lpm"))
660 		lookup_mode = L3FWD_LOOKUP_LPM;
661 	else if (!strcmp(optarg, "fib"))
662 		lookup_mode = L3FWD_LOOKUP_FIB;
663 	else if (!strcmp(optarg, "acl"))
664 		lookup_mode = L3FWD_LOOKUP_ACL;
665 	else {
666 		fprintf(stderr, "Invalid lookup option! Accepted options: acl, em, lpm, fib\n");
667 		return -1;
668 	}
669 	return 0;
670 }
671 
672 #define MAX_JUMBO_PKT_LEN  9600
673 
674 static const char short_options[] =
675 	"p:"  /* portmask */
676 	"P"   /* promiscuous */
677 	"L"   /* legacy enable long prefix match */
678 	"E"   /* legacy enable exact match */
679 	;
680 
681 #define CMD_LINE_OPT_CONFIG "config"
682 #define CMD_LINE_OPT_RX_QUEUE_SIZE "rx-queue-size"
683 #define CMD_LINE_OPT_TX_QUEUE_SIZE "tx-queue-size"
684 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
685 #define CMD_LINE_OPT_NO_NUMA "no-numa"
686 #define CMD_LINE_OPT_IPV6 "ipv6"
687 #define CMD_LINE_OPT_MAX_PKT_LEN "max-pkt-len"
688 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
689 #define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
690 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
691 #define CMD_LINE_OPT_MODE "mode"
692 #define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sched"
693 #define CMD_LINE_OPT_EVENT_ETH_RX_QUEUES "event-eth-rxqs"
694 #define CMD_LINE_OPT_LOOKUP "lookup"
695 #define CMD_LINE_OPT_ENABLE_VECTOR "event-vector"
696 #define CMD_LINE_OPT_VECTOR_SIZE "event-vector-size"
697 #define CMD_LINE_OPT_VECTOR_TMO_NS "event-vector-tmo"
698 #define CMD_LINE_OPT_RULE_IPV4 "rule_ipv4"
699 #define CMD_LINE_OPT_RULE_IPV6 "rule_ipv6"
700 #define CMD_LINE_OPT_ALG "alg"
701 
702 enum {
703 	/* long options mapped to a short option */
704 
705 	/* first long only option value must be >= 256, so that we won't
706 	 * conflict with short options */
707 	CMD_LINE_OPT_MIN_NUM = 256,
708 	CMD_LINE_OPT_CONFIG_NUM,
709 	CMD_LINE_OPT_RX_QUEUE_SIZE_NUM,
710 	CMD_LINE_OPT_TX_QUEUE_SIZE_NUM,
711 	CMD_LINE_OPT_ETH_DEST_NUM,
712 	CMD_LINE_OPT_NO_NUMA_NUM,
713 	CMD_LINE_OPT_IPV6_NUM,
714 	CMD_LINE_OPT_MAX_PKT_LEN_NUM,
715 	CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
716 	CMD_LINE_OPT_PARSE_PTYPE_NUM,
717 	CMD_LINE_OPT_RULE_IPV4_NUM,
718 	CMD_LINE_OPT_RULE_IPV6_NUM,
719 	CMD_LINE_OPT_ALG_NUM,
720 	CMD_LINE_OPT_PARSE_PER_PORT_POOL,
721 	CMD_LINE_OPT_MODE_NUM,
722 	CMD_LINE_OPT_EVENTQ_SYNC_NUM,
723 	CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM,
724 	CMD_LINE_OPT_LOOKUP_NUM,
725 	CMD_LINE_OPT_ENABLE_VECTOR_NUM,
726 	CMD_LINE_OPT_VECTOR_SIZE_NUM,
727 	CMD_LINE_OPT_VECTOR_TMO_NS_NUM
728 };
729 
730 static const struct option lgopts[] = {
731 	{CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
732 	{CMD_LINE_OPT_RX_QUEUE_SIZE, 1, 0, CMD_LINE_OPT_RX_QUEUE_SIZE_NUM},
733 	{CMD_LINE_OPT_TX_QUEUE_SIZE, 1, 0, CMD_LINE_OPT_TX_QUEUE_SIZE_NUM},
734 	{CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
735 	{CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
736 	{CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
737 	{CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, CMD_LINE_OPT_MAX_PKT_LEN_NUM},
738 	{CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
739 	{CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
740 	{CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
741 	{CMD_LINE_OPT_MODE, 1, 0, CMD_LINE_OPT_MODE_NUM},
742 	{CMD_LINE_OPT_EVENTQ_SYNC, 1, 0, CMD_LINE_OPT_EVENTQ_SYNC_NUM},
743 	{CMD_LINE_OPT_EVENT_ETH_RX_QUEUES, 1, 0,
744 					CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM},
745 	{CMD_LINE_OPT_LOOKUP, 1, 0, CMD_LINE_OPT_LOOKUP_NUM},
746 	{CMD_LINE_OPT_ENABLE_VECTOR, 0, 0, CMD_LINE_OPT_ENABLE_VECTOR_NUM},
747 	{CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
748 	{CMD_LINE_OPT_VECTOR_TMO_NS, 1, 0, CMD_LINE_OPT_VECTOR_TMO_NS_NUM},
749 	{CMD_LINE_OPT_RULE_IPV4,   1, 0, CMD_LINE_OPT_RULE_IPV4_NUM},
750 	{CMD_LINE_OPT_RULE_IPV6,   1, 0, CMD_LINE_OPT_RULE_IPV6_NUM},
751 	{CMD_LINE_OPT_ALG,   1, 0, CMD_LINE_OPT_ALG_NUM},
752 	{NULL, 0, 0, 0}
753 };
754 
755 /*
756  * This expression is used to calculate the number of mbufs needed
757  * depending on user input, taking  into account memory for rx and
758  * tx hardware rings, cache per lcore and mtable per port per lcore.
759  * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum
760  * value of 8192
761  */
762 #define NB_MBUF(nports) RTE_MAX(	\
763 	(nports*nb_rx_queue*nb_rxd +		\
764 	nports*nb_lcores*MAX_PKT_BURST +	\
765 	nports*n_tx_queue*nb_txd +		\
766 	nb_lcores*MEMPOOL_CACHE_SIZE),		\
767 	(unsigned)8192)
768 
769 /* Parse the argument given in the command line of the application */
770 static int
771 parse_args(int argc, char **argv)
772 {
773 	int opt, ret;
774 	char **argvopt;
775 	int option_index;
776 	char *prgname = argv[0];
777 	uint8_t lcore_params = 0;
778 	uint8_t eventq_sched = 0;
779 	uint8_t eth_rx_q = 0;
780 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
781 
782 	argvopt = argv;
783 
784 	/* Error or normal output strings. */
785 	while ((opt = getopt_long(argc, argvopt, short_options,
786 				lgopts, &option_index)) != EOF) {
787 
788 		switch (opt) {
789 		/* portmask */
790 		case 'p':
791 			enabled_port_mask = parse_portmask(optarg);
792 			if (enabled_port_mask == 0) {
793 				fprintf(stderr, "Invalid portmask\n");
794 				print_usage(prgname);
795 				return -1;
796 			}
797 			break;
798 
799 		case 'P':
800 			promiscuous_on = 1;
801 			break;
802 
803 		case 'E':
804 			if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
805 				fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
806 				return -1;
807 			}
808 			lookup_mode = L3FWD_LOOKUP_EM;
809 			break;
810 
811 		case 'L':
812 			if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
813 				fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
814 				return -1;
815 			}
816 			lookup_mode = L3FWD_LOOKUP_LPM;
817 			break;
818 
819 		/* long options */
820 		case CMD_LINE_OPT_CONFIG_NUM:
821 			ret = parse_config(optarg);
822 			if (ret) {
823 				fprintf(stderr, "Invalid config\n");
824 				print_usage(prgname);
825 				return -1;
826 			}
827 			lcore_params = 1;
828 			break;
829 
830 		case CMD_LINE_OPT_RX_QUEUE_SIZE_NUM:
831 			parse_queue_size(optarg, &nb_rxd, 1);
832 			break;
833 
834 		case CMD_LINE_OPT_TX_QUEUE_SIZE_NUM:
835 			parse_queue_size(optarg, &nb_txd, 0);
836 			break;
837 
838 		case CMD_LINE_OPT_ETH_DEST_NUM:
839 			parse_eth_dest(optarg);
840 			break;
841 
842 		case CMD_LINE_OPT_NO_NUMA_NUM:
843 			numa_on = 0;
844 			break;
845 
846 		case CMD_LINE_OPT_IPV6_NUM:
847 			ipv6 = 1;
848 			break;
849 
850 		case CMD_LINE_OPT_MAX_PKT_LEN_NUM:
851 			max_pkt_len = parse_max_pkt_len(optarg);
852 			break;
853 
854 		case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
855 			ret = parse_hash_entry_number(optarg);
856 			if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
857 				hash_entry_number = ret;
858 			} else {
859 				fprintf(stderr, "invalid hash entry number\n");
860 				print_usage(prgname);
861 				return -1;
862 			}
863 			break;
864 
865 		case CMD_LINE_OPT_PARSE_PTYPE_NUM:
866 			printf("soft parse-ptype is enabled\n");
867 			parse_ptype = 1;
868 			break;
869 
870 		case CMD_LINE_OPT_PARSE_PER_PORT_POOL:
871 			printf("per port buffer pool is enabled\n");
872 			per_port_pool = 1;
873 			break;
874 
875 		case CMD_LINE_OPT_MODE_NUM:
876 			parse_mode(optarg);
877 			break;
878 
879 		case CMD_LINE_OPT_EVENTQ_SYNC_NUM:
880 			parse_eventq_sched(optarg);
881 			eventq_sched = 1;
882 			break;
883 
884 		case CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM:
885 			parse_event_eth_rx_queues(optarg);
886 			eth_rx_q = 1;
887 			break;
888 
889 		case CMD_LINE_OPT_LOOKUP_NUM:
890 			if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
891 				fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
892 				return -1;
893 			}
894 			ret = parse_lookup(optarg);
895 			/*
896 			 * If parse_lookup was passed an invalid lookup type
897 			 * then return -1. Error log included within
898 			 * parse_lookup for simplicity.
899 			 */
900 			if (ret)
901 				return -1;
902 			break;
903 
904 		case CMD_LINE_OPT_ENABLE_VECTOR_NUM:
905 			printf("event vectorization is enabled\n");
906 			evt_rsrc->vector_enabled = 1;
907 			break;
908 		case CMD_LINE_OPT_VECTOR_SIZE_NUM:
909 			evt_rsrc->vector_size = strtol(optarg, NULL, 10);
910 			break;
911 		case CMD_LINE_OPT_VECTOR_TMO_NS_NUM:
912 			evt_rsrc->vector_tmo_ns = strtoull(optarg, NULL, 10);
913 			break;
914 		case CMD_LINE_OPT_RULE_IPV4_NUM:
915 			l3fwd_set_rule_ipv4_name(optarg);
916 			break;
917 		case CMD_LINE_OPT_RULE_IPV6_NUM:
918 			l3fwd_set_rule_ipv6_name(optarg);
919 			break;
920 		case CMD_LINE_OPT_ALG_NUM:
921 			l3fwd_set_alg(optarg);
922 			break;
923 		default:
924 			print_usage(prgname);
925 			return -1;
926 		}
927 	}
928 
929 	if (evt_rsrc->enabled && lcore_params) {
930 		fprintf(stderr, "lcore config is not valid when event mode is selected\n");
931 		return -1;
932 	}
933 
934 	if (!evt_rsrc->enabled && eth_rx_q) {
935 		fprintf(stderr, "eth_rx_queues is valid only when event mode is selected\n");
936 		return -1;
937 	}
938 
939 	if (!evt_rsrc->enabled && eventq_sched) {
940 		fprintf(stderr, "eventq_sched is valid only when event mode is selected\n");
941 		return -1;
942 	}
943 
944 	if (evt_rsrc->vector_enabled && !evt_rsrc->vector_size) {
945 		evt_rsrc->vector_size = VECTOR_SIZE_DEFAULT;
946 		fprintf(stderr, "vector size set to default (%" PRIu16 ")\n",
947 			evt_rsrc->vector_size);
948 	}
949 
950 	if (evt_rsrc->vector_enabled && !evt_rsrc->vector_tmo_ns) {
951 		evt_rsrc->vector_tmo_ns = VECTOR_TMO_NS_DEFAULT;
952 		fprintf(stderr,
953 			"vector timeout set to default (%" PRIu64 " ns)\n",
954 			evt_rsrc->vector_tmo_ns);
955 	}
956 
957 	/*
958 	 * Nothing is selected, pick longest-prefix match
959 	 * as default match.
960 	 */
961 	if (lookup_mode == L3FWD_LOOKUP_DEFAULT) {
962 		fprintf(stderr, "Neither ACL, LPM, EM, or FIB selected, defaulting to LPM\n");
963 		lookup_mode = L3FWD_LOOKUP_LPM;
964 	}
965 
966 	/*
967 	 * ipv6 and hash flags are valid only for
968 	 * exact match, reset them to default for
969 	 * longest-prefix match.
970 	 */
971 	if (lookup_mode == L3FWD_LOOKUP_LPM) {
972 		ipv6 = 0;
973 		hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
974 	}
975 
976 	/* For ACL, update port config rss hash filter */
977 	if (lookup_mode == L3FWD_LOOKUP_ACL) {
978 		port_conf.rx_adv_conf.rss_conf.rss_hf |=
979 				RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP;
980 	}
981 
982 	if (optind >= 0)
983 		argv[optind-1] = prgname;
984 
985 	ret = optind-1;
986 	optind = 1; /* reset getopt lib */
987 	return ret;
988 }
989 
990 static void
991 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
992 {
993 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
994 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
995 	printf("%s%s", name, buf);
996 }
997 
998 int
999 init_mem(uint16_t portid, unsigned int nb_mbuf)
1000 {
1001 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
1002 	struct lcore_conf *qconf;
1003 	int socketid;
1004 	unsigned lcore_id;
1005 	char s[64];
1006 
1007 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1008 		if (rte_lcore_is_enabled(lcore_id) == 0)
1009 			continue;
1010 
1011 		if (numa_on)
1012 			socketid = rte_lcore_to_socket_id(lcore_id);
1013 		else
1014 			socketid = 0;
1015 
1016 		if (socketid >= NB_SOCKETS) {
1017 			rte_exit(EXIT_FAILURE,
1018 				"Socket %d of lcore %u is out of range %d\n",
1019 				socketid, lcore_id, NB_SOCKETS);
1020 		}
1021 
1022 		if (pktmbuf_pool[portid][socketid] == NULL) {
1023 			snprintf(s, sizeof(s), "mbuf_pool_%d:%d",
1024 				 portid, socketid);
1025 			pktmbuf_pool[portid][socketid] =
1026 				rte_pktmbuf_pool_create(s, nb_mbuf,
1027 					MEMPOOL_CACHE_SIZE, 0,
1028 					RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
1029 			if (pktmbuf_pool[portid][socketid] == NULL)
1030 				rte_exit(EXIT_FAILURE,
1031 					"Cannot init mbuf pool on socket %d\n",
1032 					socketid);
1033 			else
1034 				printf("Allocated mbuf pool on socket %d\n",
1035 					socketid);
1036 
1037 			/* Setup ACL, LPM, EM(f.e Hash) or FIB. But, only once per
1038 			 * available socket.
1039 			 */
1040 			if (!lkp_per_socket[socketid]) {
1041 				l3fwd_lkp.setup(socketid);
1042 				lkp_per_socket[socketid] = 1;
1043 			}
1044 		}
1045 
1046 		if (evt_rsrc->vector_enabled && vector_pool[portid] == NULL) {
1047 			unsigned int nb_vec;
1048 
1049 			nb_vec = (nb_mbuf + evt_rsrc->vector_size - 1) /
1050 				 evt_rsrc->vector_size;
1051 			nb_vec = RTE_MAX(512U, nb_vec);
1052 			nb_vec += rte_lcore_count() * 32;
1053 			snprintf(s, sizeof(s), "vector_pool_%d", portid);
1054 			vector_pool[portid] = rte_event_vector_pool_create(
1055 				s, nb_vec, 32, evt_rsrc->vector_size, socketid);
1056 			if (vector_pool[portid] == NULL)
1057 				rte_exit(EXIT_FAILURE,
1058 					 "Failed to create vector pool for port %d\n",
1059 					 portid);
1060 			else
1061 				printf("Allocated vector pool for port %d\n",
1062 				       portid);
1063 		}
1064 
1065 		qconf = &lcore_conf[lcore_id];
1066 		qconf->ipv4_lookup_struct =
1067 			l3fwd_lkp.get_ipv4_lookup_struct(socketid);
1068 		qconf->ipv6_lookup_struct =
1069 			l3fwd_lkp.get_ipv6_lookup_struct(socketid);
1070 	}
1071 	return 0;
1072 }
1073 
1074 /* Check the link status of all ports in up to 9s, and print them finally */
1075 static void
1076 check_all_ports_link_status(uint32_t port_mask)
1077 {
1078 #define CHECK_INTERVAL 100 /* 100ms */
1079 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1080 	uint16_t portid;
1081 	uint8_t count, all_ports_up, print_flag = 0;
1082 	struct rte_eth_link link;
1083 	int ret;
1084 	char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
1085 
1086 	printf("\nChecking link status");
1087 	fflush(stdout);
1088 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1089 		if (force_quit)
1090 			return;
1091 		all_ports_up = 1;
1092 		RTE_ETH_FOREACH_DEV(portid) {
1093 			if (force_quit)
1094 				return;
1095 			if ((port_mask & (1 << portid)) == 0)
1096 				continue;
1097 			memset(&link, 0, sizeof(link));
1098 			ret = rte_eth_link_get_nowait(portid, &link);
1099 			if (ret < 0) {
1100 				all_ports_up = 0;
1101 				if (print_flag == 1)
1102 					printf("Port %u link get failed: %s\n",
1103 						portid, rte_strerror(-ret));
1104 				continue;
1105 			}
1106 			/* print link status if flag set */
1107 			if (print_flag == 1) {
1108 				rte_eth_link_to_str(link_status_text,
1109 					sizeof(link_status_text), &link);
1110 				printf("Port %d %s\n", portid,
1111 				       link_status_text);
1112 				continue;
1113 			}
1114 			/* clear all_ports_up flag if any link down */
1115 			if (link.link_status == RTE_ETH_LINK_DOWN) {
1116 				all_ports_up = 0;
1117 				break;
1118 			}
1119 		}
1120 		/* after finally printing all link status, get out */
1121 		if (print_flag == 1)
1122 			break;
1123 
1124 		if (all_ports_up == 0) {
1125 			printf(".");
1126 			fflush(stdout);
1127 			rte_delay_ms(CHECK_INTERVAL);
1128 		}
1129 
1130 		/* set the print_flag if all ports up or timeout */
1131 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1132 			print_flag = 1;
1133 			printf("done\n");
1134 		}
1135 	}
1136 }
1137 
1138 static void
1139 signal_handler(int signum)
1140 {
1141 	if (signum == SIGINT || signum == SIGTERM) {
1142 		printf("\n\nSignal %d received, preparing to exit...\n",
1143 				signum);
1144 		force_quit = true;
1145 	}
1146 }
1147 
1148 static int
1149 prepare_ptype_parser(uint16_t portid, uint16_t queueid)
1150 {
1151 	if (parse_ptype) {
1152 		printf("Port %d: softly parse packet type info\n", portid);
1153 		if (rte_eth_add_rx_callback(portid, queueid,
1154 					    l3fwd_lkp.cb_parse_ptype,
1155 					    NULL))
1156 			return 1;
1157 
1158 		printf("Failed to add rx callback: port=%d\n", portid);
1159 		return 0;
1160 	}
1161 
1162 	if (l3fwd_lkp.check_ptype(portid))
1163 		return 1;
1164 
1165 	printf("port %d cannot parse packet type, please add --%s\n",
1166 	       portid, CMD_LINE_OPT_PARSE_PTYPE);
1167 	return 0;
1168 }
1169 
1170 static uint32_t
1171 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1172 {
1173 	uint32_t overhead_len;
1174 
1175 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1176 		overhead_len = max_rx_pktlen - max_mtu;
1177 	else
1178 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1179 
1180 	return overhead_len;
1181 }
1182 
1183 int
1184 config_port_max_pkt_len(struct rte_eth_conf *conf,
1185 		struct rte_eth_dev_info *dev_info)
1186 {
1187 	uint32_t overhead_len;
1188 
1189 	if (max_pkt_len == 0)
1190 		return 0;
1191 
1192 	if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
1193 		return -1;
1194 
1195 	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1196 			dev_info->max_mtu);
1197 	conf->rxmode.mtu = max_pkt_len - overhead_len;
1198 
1199 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
1200 		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1201 
1202 	return 0;
1203 }
1204 
1205 static void
1206 l3fwd_poll_resource_setup(void)
1207 {
1208 	uint8_t nb_rx_queue, queue, socketid;
1209 	struct rte_eth_dev_info dev_info;
1210 	uint32_t n_tx_queue, nb_lcores;
1211 	struct rte_eth_txconf *txconf;
1212 	struct lcore_conf *qconf;
1213 	uint16_t queueid, portid;
1214 	unsigned int nb_ports;
1215 	unsigned int lcore_id;
1216 	int ret;
1217 
1218 	if (check_lcore_params() < 0)
1219 		rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
1220 
1221 	ret = init_lcore_rx_queues();
1222 	if (ret < 0)
1223 		rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1224 
1225 	nb_ports = rte_eth_dev_count_avail();
1226 
1227 	if (check_port_config() < 0)
1228 		rte_exit(EXIT_FAILURE, "check_port_config failed\n");
1229 
1230 	nb_lcores = rte_lcore_count();
1231 
1232 	/* initialize all ports */
1233 	RTE_ETH_FOREACH_DEV(portid) {
1234 		struct rte_eth_conf local_port_conf = port_conf;
1235 
1236 		/* skip ports that are not enabled */
1237 		if ((enabled_port_mask & (1 << portid)) == 0) {
1238 			printf("\nSkipping disabled port %d\n", portid);
1239 			continue;
1240 		}
1241 
1242 		/* init port */
1243 		printf("Initializing port %d ... ", portid );
1244 		fflush(stdout);
1245 
1246 		nb_rx_queue = get_port_n_rx_queues(portid);
1247 		n_tx_queue = nb_lcores;
1248 		if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1249 			n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1250 		printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1251 			nb_rx_queue, (unsigned)n_tx_queue );
1252 
1253 		ret = rte_eth_dev_info_get(portid, &dev_info);
1254 		if (ret != 0)
1255 			rte_exit(EXIT_FAILURE,
1256 				"Error during getting device (port %u) info: %s\n",
1257 				portid, strerror(-ret));
1258 
1259 		ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
1260 		if (ret != 0)
1261 			rte_exit(EXIT_FAILURE,
1262 				"Invalid max packet length: %u (port %u)\n",
1263 				max_pkt_len, portid);
1264 
1265 		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
1266 			local_port_conf.txmode.offloads |=
1267 				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1268 
1269 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1270 			dev_info.flow_type_rss_offloads;
1271 
1272 		if (dev_info.max_rx_queues == 1)
1273 			local_port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
1274 
1275 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1276 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
1277 			printf("Port %u modified RSS hash function based on hardware support,"
1278 				"requested:%#"PRIx64" configured:%#"PRIx64"\n",
1279 				portid,
1280 				port_conf.rx_adv_conf.rss_conf.rss_hf,
1281 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1282 		}
1283 
1284 		ret = rte_eth_dev_configure(portid, nb_rx_queue,
1285 					(uint16_t)n_tx_queue, &local_port_conf);
1286 		if (ret < 0)
1287 			rte_exit(EXIT_FAILURE,
1288 				"Cannot configure device: err=%d, port=%d\n",
1289 				ret, portid);
1290 
1291 		ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
1292 						       &nb_txd);
1293 		if (ret < 0)
1294 			rte_exit(EXIT_FAILURE,
1295 				 "Cannot adjust number of descriptors: err=%d, "
1296 				 "port=%d\n", ret, portid);
1297 
1298 		ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1299 		if (ret < 0)
1300 			rte_exit(EXIT_FAILURE,
1301 				 "Cannot get MAC address: err=%d, port=%d\n",
1302 				 ret, portid);
1303 
1304 		print_ethaddr(" Address:", &ports_eth_addr[portid]);
1305 		printf(", ");
1306 		print_ethaddr("Destination:",
1307 			(const struct rte_ether_addr *)&dest_eth_addr[portid]);
1308 		printf(", ");
1309 
1310 		/*
1311 		 * prepare src MACs for each port.
1312 		 */
1313 		rte_ether_addr_copy(&ports_eth_addr[portid],
1314 			(struct rte_ether_addr *)(val_eth + portid) + 1);
1315 
1316 		/* init memory */
1317 		if (!per_port_pool) {
1318 			/* portid = 0; this is *not* signifying the first port,
1319 			 * rather, it signifies that portid is ignored.
1320 			 */
1321 			ret = init_mem(0, NB_MBUF(nb_ports));
1322 		} else {
1323 			ret = init_mem(portid, NB_MBUF(1));
1324 		}
1325 		if (ret < 0)
1326 			rte_exit(EXIT_FAILURE, "init_mem failed\n");
1327 
1328 		/* init one TX queue per couple (lcore,port) */
1329 		queueid = 0;
1330 		for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1331 			if (rte_lcore_is_enabled(lcore_id) == 0)
1332 				continue;
1333 
1334 			if (numa_on)
1335 				socketid =
1336 				(uint8_t)rte_lcore_to_socket_id(lcore_id);
1337 			else
1338 				socketid = 0;
1339 
1340 			printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1341 			fflush(stdout);
1342 
1343 			txconf = &dev_info.default_txconf;
1344 			txconf->offloads = local_port_conf.txmode.offloads;
1345 			ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1346 						     socketid, txconf);
1347 			if (ret < 0)
1348 				rte_exit(EXIT_FAILURE,
1349 					"rte_eth_tx_queue_setup: err=%d, "
1350 					"port=%d\n", ret, portid);
1351 
1352 			qconf = &lcore_conf[lcore_id];
1353 			qconf->tx_queue_id[portid] = queueid;
1354 			queueid++;
1355 
1356 			qconf->tx_port_id[qconf->n_tx_port] = portid;
1357 			qconf->n_tx_port++;
1358 		}
1359 		printf("\n");
1360 	}
1361 
1362 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1363 		if (rte_lcore_is_enabled(lcore_id) == 0)
1364 			continue;
1365 		qconf = &lcore_conf[lcore_id];
1366 		printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1367 		fflush(stdout);
1368 		/* init RX queues */
1369 		for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1370 			struct rte_eth_rxconf rxq_conf;
1371 
1372 			portid = qconf->rx_queue_list[queue].port_id;
1373 			queueid = qconf->rx_queue_list[queue].queue_id;
1374 
1375 			if (numa_on)
1376 				socketid =
1377 				(uint8_t)rte_lcore_to_socket_id(lcore_id);
1378 			else
1379 				socketid = 0;
1380 
1381 			printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1382 			fflush(stdout);
1383 
1384 			ret = rte_eth_dev_info_get(portid, &dev_info);
1385 			if (ret != 0)
1386 				rte_exit(EXIT_FAILURE,
1387 					"Error during getting device (port %u) info: %s\n",
1388 					portid, strerror(-ret));
1389 
1390 			rxq_conf = dev_info.default_rxconf;
1391 			rxq_conf.offloads = port_conf.rxmode.offloads;
1392 			if (!per_port_pool)
1393 				ret = rte_eth_rx_queue_setup(portid, queueid,
1394 						nb_rxd, socketid,
1395 						&rxq_conf,
1396 						pktmbuf_pool[0][socketid]);
1397 			else
1398 				ret = rte_eth_rx_queue_setup(portid, queueid,
1399 						nb_rxd, socketid,
1400 						&rxq_conf,
1401 						pktmbuf_pool[portid][socketid]);
1402 			if (ret < 0)
1403 				rte_exit(EXIT_FAILURE,
1404 				"rte_eth_rx_queue_setup: err=%d, port=%d\n",
1405 				ret, portid);
1406 		}
1407 	}
1408 }
1409 
1410 static inline int
1411 l3fwd_service_enable(uint32_t service_id)
1412 {
1413 	uint8_t min_service_count = UINT8_MAX;
1414 	uint32_t slcore_array[RTE_MAX_LCORE];
1415 	unsigned int slcore = 0;
1416 	uint8_t service_count;
1417 	int32_t slcore_count;
1418 
1419 	if (!rte_service_lcore_count())
1420 		return -ENOENT;
1421 
1422 	slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
1423 	if (slcore_count < 0)
1424 		return -ENOENT;
1425 	/* Get the core which has least number of services running. */
1426 	while (slcore_count--) {
1427 		/* Reset default mapping */
1428 		if (rte_service_map_lcore_set(service_id,
1429 				slcore_array[slcore_count], 0) != 0)
1430 			return -ENOENT;
1431 		service_count = rte_service_lcore_count_services(
1432 				slcore_array[slcore_count]);
1433 		if (service_count < min_service_count) {
1434 			slcore = slcore_array[slcore_count];
1435 			min_service_count = service_count;
1436 		}
1437 	}
1438 	if (rte_service_map_lcore_set(service_id, slcore, 1))
1439 		return -ENOENT;
1440 	rte_service_lcore_start(slcore);
1441 
1442 	return 0;
1443 }
1444 
1445 static void
1446 l3fwd_event_service_setup(void)
1447 {
1448 	struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
1449 	struct rte_event_dev_info evdev_info;
1450 	uint32_t service_id, caps;
1451 	int ret, i;
1452 
1453 	rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
1454 	if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
1455 		ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
1456 				&service_id);
1457 		if (ret != -ESRCH && ret != 0)
1458 			rte_exit(EXIT_FAILURE,
1459 				 "Error in starting eventdev service\n");
1460 		l3fwd_service_enable(service_id);
1461 	}
1462 
1463 	for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
1464 		ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
1465 				evt_rsrc->rx_adptr.rx_adptr[i], &caps);
1466 		if (ret < 0)
1467 			rte_exit(EXIT_FAILURE,
1468 				 "Failed to get Rx adapter[%d] caps\n",
1469 				 evt_rsrc->rx_adptr.rx_adptr[i]);
1470 		ret = rte_event_eth_rx_adapter_service_id_get(
1471 				evt_rsrc->event_d_id,
1472 				&service_id);
1473 		if (ret != -ESRCH && ret != 0)
1474 			rte_exit(EXIT_FAILURE,
1475 				 "Error in starting Rx adapter[%d] service\n",
1476 				 evt_rsrc->rx_adptr.rx_adptr[i]);
1477 		l3fwd_service_enable(service_id);
1478 	}
1479 
1480 	for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
1481 		ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
1482 				evt_rsrc->tx_adptr.tx_adptr[i], &caps);
1483 		if (ret < 0)
1484 			rte_exit(EXIT_FAILURE,
1485 				 "Failed to get Rx adapter[%d] caps\n",
1486 				 evt_rsrc->tx_adptr.tx_adptr[i]);
1487 		ret = rte_event_eth_tx_adapter_service_id_get(
1488 				evt_rsrc->event_d_id,
1489 				&service_id);
1490 		if (ret != -ESRCH && ret != 0)
1491 			rte_exit(EXIT_FAILURE,
1492 				 "Error in starting Rx adapter[%d] service\n",
1493 				 evt_rsrc->tx_adptr.tx_adptr[i]);
1494 		l3fwd_service_enable(service_id);
1495 	}
1496 }
1497 
1498 int
1499 main(int argc, char **argv)
1500 {
1501 	struct l3fwd_event_resources *evt_rsrc;
1502 	struct lcore_conf *qconf;
1503 	uint16_t queueid, portid;
1504 	unsigned int lcore_id;
1505 	uint8_t queue;
1506 	int i, ret;
1507 
1508 	/* init EAL */
1509 	ret = rte_eal_init(argc, argv);
1510 	if (ret < 0)
1511 		rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1512 	argc -= ret;
1513 	argv += ret;
1514 
1515 	force_quit = false;
1516 	signal(SIGINT, signal_handler);
1517 	signal(SIGTERM, signal_handler);
1518 
1519 	/* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
1520 	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1521 		dest_eth_addr[portid] =
1522 			RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
1523 		*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
1524 	}
1525 
1526 	evt_rsrc = l3fwd_get_eventdev_rsrc();
1527 	/* parse application arguments (after the EAL ones) */
1528 	ret = parse_args(argc, argv);
1529 	if (ret < 0)
1530 		rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
1531 
1532 	/* Setup function pointers for lookup method. */
1533 	setup_l3fwd_lookup_tables();
1534 
1535 	/* Add the config file rules */
1536 	l3fwd_lkp.read_config_files();
1537 
1538 	evt_rsrc->per_port_pool = per_port_pool;
1539 	evt_rsrc->pkt_pool = pktmbuf_pool;
1540 	evt_rsrc->vec_pool = vector_pool;
1541 	evt_rsrc->port_mask = enabled_port_mask;
1542 	/* Configure eventdev parameters if user has requested */
1543 	if (evt_rsrc->enabled) {
1544 		l3fwd_event_resource_setup(&port_conf);
1545 		if (lookup_mode == L3FWD_LOOKUP_EM)
1546 			l3fwd_lkp.main_loop = evt_rsrc->ops.em_event_loop;
1547 		else if (lookup_mode == L3FWD_LOOKUP_FIB)
1548 			l3fwd_lkp.main_loop = evt_rsrc->ops.fib_event_loop;
1549 		else
1550 			l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;
1551 		l3fwd_event_service_setup();
1552 	} else
1553 		l3fwd_poll_resource_setup();
1554 
1555 	/* start ports */
1556 	RTE_ETH_FOREACH_DEV(portid) {
1557 		if ((enabled_port_mask & (1 << portid)) == 0) {
1558 			continue;
1559 		}
1560 		/* Start device */
1561 		ret = rte_eth_dev_start(portid);
1562 		if (ret < 0)
1563 			rte_exit(EXIT_FAILURE,
1564 				"rte_eth_dev_start: err=%d, port=%d\n",
1565 				ret, portid);
1566 
1567 		/*
1568 		 * If enabled, put device in promiscuous mode.
1569 		 * This allows IO forwarding mode to forward packets
1570 		 * to itself through 2 cross-connected  ports of the
1571 		 * target machine.
1572 		 */
1573 		if (promiscuous_on) {
1574 			ret = rte_eth_promiscuous_enable(portid);
1575 			if (ret != 0)
1576 				rte_exit(EXIT_FAILURE,
1577 					"rte_eth_promiscuous_enable: err=%s, port=%u\n",
1578 					rte_strerror(-ret), portid);
1579 		}
1580 	}
1581 
1582 	printf("\n");
1583 
1584 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1585 		if (rte_lcore_is_enabled(lcore_id) == 0)
1586 			continue;
1587 		qconf = &lcore_conf[lcore_id];
1588 		for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
1589 			portid = qconf->rx_queue_list[queue].port_id;
1590 			queueid = qconf->rx_queue_list[queue].queue_id;
1591 			if (prepare_ptype_parser(portid, queueid) == 0)
1592 				rte_exit(EXIT_FAILURE, "ptype check fails\n");
1593 		}
1594 	}
1595 
1596 	check_all_ports_link_status(enabled_port_mask);
1597 
1598 	ret = 0;
1599 	/* launch per-lcore init on every lcore */
1600 	rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MAIN);
1601 	if (evt_rsrc->enabled) {
1602 		for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++)
1603 			rte_event_eth_rx_adapter_stop(
1604 					evt_rsrc->rx_adptr.rx_adptr[i]);
1605 		for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++)
1606 			rte_event_eth_tx_adapter_stop(
1607 					evt_rsrc->tx_adptr.tx_adptr[i]);
1608 
1609 		RTE_ETH_FOREACH_DEV(portid) {
1610 			if ((enabled_port_mask & (1 << portid)) == 0)
1611 				continue;
1612 			ret = rte_eth_dev_stop(portid);
1613 			if (ret != 0)
1614 				printf("rte_eth_dev_stop: err=%d, port=%u\n",
1615 				       ret, portid);
1616 		}
1617 
1618 		rte_eal_mp_wait_lcore();
1619 		RTE_ETH_FOREACH_DEV(portid) {
1620 			if ((enabled_port_mask & (1 << portid)) == 0)
1621 				continue;
1622 			rte_eth_dev_close(portid);
1623 		}
1624 
1625 		rte_event_dev_stop(evt_rsrc->event_d_id);
1626 		rte_event_dev_close(evt_rsrc->event_d_id);
1627 
1628 	} else {
1629 		rte_eal_mp_wait_lcore();
1630 
1631 		RTE_ETH_FOREACH_DEV(portid) {
1632 			if ((enabled_port_mask & (1 << portid)) == 0)
1633 				continue;
1634 			printf("Closing port %d...", portid);
1635 			ret = rte_eth_dev_stop(portid);
1636 			if (ret != 0)
1637 				printf("rte_eth_dev_stop: err=%d, port=%u\n",
1638 				       ret, portid);
1639 			rte_eth_dev_close(portid);
1640 			printf(" Done\n");
1641 		}
1642 	}
1643 
1644 	/* clean up config file routes */
1645 	l3fwd_lkp.free_routes();
1646 
1647 	/* clean up the EAL */
1648 	rte_eal_cleanup();
1649 
1650 	printf("Bye...\n");
1651 
1652 	return ret;
1653 }
1654