xref: /dpdk/examples/l3fwd-graph/main.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4 
5 #include <arpa/inet.h>
6 #include <errno.h>
7 #include <getopt.h>
8 #include <inttypes.h>
9 #include <signal.h>
10 #include <stdarg.h>
11 #include <stdbool.h>
12 #include <stdint.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <sys/socket.h>
17 #include <sys/types.h>
18 #include <sys/queue.h>
19 #include <unistd.h>
20 
21 #include <rte_branch_prediction.h>
22 #include <rte_common.h>
23 #include <rte_cycles.h>
24 #include <rte_eal.h>
25 #include <rte_ethdev.h>
26 #define RTE_GRAPH_MODEL_SELECT RTE_GRAPH_MODEL_RTC
27 #include <rte_graph_worker.h>
28 #include <rte_launch.h>
29 #include <rte_lcore.h>
30 #include <rte_log.h>
31 #include <rte_lpm6.h>
32 #include <rte_mempool.h>
33 #include <rte_node_eth_api.h>
34 #include <rte_node_ip4_api.h>
35 #include <rte_node_ip6_api.h>
36 #include <rte_per_lcore.h>
37 #include <rte_string_fns.h>
38 #include <rte_vect.h>
39 
40 #include <cmdline_parse.h>
41 #include <cmdline_parse_etheraddr.h>
42 
43 /* Log type */
44 #define RTE_LOGTYPE_L3FWD_GRAPH RTE_LOGTYPE_USER1
45 
46 /*
47  * Configurable number of RX/TX ring descriptors
48  */
49 #define RX_DESC_DEFAULT 1024
50 #define TX_DESC_DEFAULT 1024
51 
52 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
53 #define MAX_RX_QUEUE_PER_PORT 128
54 
55 #define MAX_RX_QUEUE_PER_LCORE 16
56 
57 #define MAX_LCORE_PARAMS 1024
58 
59 #define NB_SOCKETS 8
60 
61 /* Graph module */
62 #define WORKER_MODEL_RTC "rtc"
63 #define WORKER_MODEL_MCORE_DISPATCH "dispatch"
64 /* Static global variables used within this file. */
65 static uint16_t nb_rxd = RX_DESC_DEFAULT;
66 static uint16_t nb_txd = TX_DESC_DEFAULT;
67 
68 /**< Ports set in promiscuous mode off by default. */
69 static int promiscuous_on;
70 
71 static int numa_on = 1;	  /**< NUMA is enabled by default. */
72 static int per_port_pool; /**< Use separate buffer pools per port; disabled */
73 			  /**< by default */
74 
75 static volatile bool force_quit;
76 
77 /* Ethernet addresses of ports */
78 static uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
79 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
80 xmm_t val_eth[RTE_MAX_ETHPORTS];
81 
82 /* Mask of enabled ports */
83 static uint32_t enabled_port_mask;
84 
85 /* Pcap trace */
86 static char pcap_filename[RTE_GRAPH_PCAP_FILE_SZ];
87 static uint64_t packet_to_capture;
88 static int pcap_trace_enable;
89 
90 
91 struct lcore_rx_queue {
92 	uint16_t port_id;
93 	uint16_t queue_id;
94 	char node_name[RTE_NODE_NAMESIZE];
95 };
96 
97 static uint8_t model_conf = RTE_GRAPH_MODEL_DEFAULT;
98 
99 /* Lcore conf */
100 struct __rte_cache_aligned lcore_conf {
101 	uint16_t n_rx_queue;
102 	struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
103 
104 	struct rte_graph *graph;
105 	char name[RTE_GRAPH_NAMESIZE];
106 	rte_graph_t graph_id;
107 };
108 
109 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
110 
111 struct __rte_cache_aligned lcore_params {
112 	uint16_t port_id;
113 	uint16_t queue_id;
114 	uint32_t lcore_id;
115 };
116 
117 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
118 static struct lcore_params lcore_params_array_default[] = {
119 	{0, 0, 2}, {0, 1, 2}, {0, 2, 2}, {1, 0, 2}, {1, 1, 2},
120 	{1, 2, 2}, {2, 0, 2}, {3, 0, 3}, {3, 1, 3},
121 };
122 
123 static struct lcore_params *lcore_params = lcore_params_array_default;
124 static uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
125 
126 static struct rte_eth_conf port_conf = {
127 	.rxmode = {
128 		.mq_mode = RTE_ETH_MQ_RX_RSS,
129 	},
130 	.rx_adv_conf = {
131 		.rss_conf = {
132 				.rss_key = NULL,
133 				.rss_hf = RTE_ETH_RSS_IP,
134 		},
135 	},
136 	.txmode = {
137 		.mq_mode = RTE_ETH_MQ_TX_NONE,
138 	},
139 };
140 
141 static uint32_t max_pkt_len;
142 
143 static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
144 
145 static struct rte_node_ethdev_config ethdev_conf[RTE_MAX_ETHPORTS];
146 
147 struct ipv4_l3fwd_lpm_route {
148 	uint32_t ip;
149 	uint8_t depth;
150 	uint8_t if_out;
151 };
152 
153 struct ipv6_l3fwd_lpm_route {
154 	uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE];
155 	uint8_t depth;
156 	uint8_t if_out;
157 };
158 
159 #define IPV4_L3FWD_LPM_NUM_ROUTES                                              \
160 	(sizeof(ipv4_l3fwd_lpm_route_array) /                                  \
161 	 sizeof(ipv4_l3fwd_lpm_route_array[0]))
162 /* 198.18.0.0/16 are set aside for RFC2544 benchmarking. */
163 static struct ipv4_l3fwd_lpm_route ipv4_l3fwd_lpm_route_array[] = {
164 	{RTE_IPV4(198, 18, 0, 0), 24, 0}, {RTE_IPV4(198, 18, 1, 0), 24, 1},
165 	{RTE_IPV4(198, 18, 2, 0), 24, 2}, {RTE_IPV4(198, 18, 3, 0), 24, 3},
166 	{RTE_IPV4(198, 18, 4, 0), 24, 4}, {RTE_IPV4(198, 18, 5, 0), 24, 5},
167 	{RTE_IPV4(198, 18, 6, 0), 24, 6}, {RTE_IPV4(198, 18, 7, 0), 24, 7},
168 };
169 
170 #define IPV6_L3FWD_LPM_NUM_ROUTES                                              \
171 	(sizeof(ipv6_l3fwd_lpm_route_array) /                                  \
172 	 sizeof(ipv6_l3fwd_lpm_route_array[0]))
173 static struct ipv6_l3fwd_lpm_route ipv6_l3fwd_lpm_route_array[] = {
174 	{{0x20, 0x01, 0xdb, 0x08, 0x12, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
175 	0x00}, 48, 0},
176 	{{0x20, 0x01, 0xdb, 0x08, 0x12, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
177 	0x01}, 48, 1},
178 	{{0x20, 0x01, 0xdb, 0x08, 0x12, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
179 	0x02}, 48, 2},
180 	{{0x20, 0x01, 0xdb, 0x08, 0x12, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
181 	0x03}, 48, 3},
182 	{{0x20, 0x01, 0xdb, 0x08, 0x12, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
183 	0x04}, 48, 4},
184 	{{0x20, 0x01, 0xdb, 0x08, 0x12, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
185 	0x05}, 48, 5},
186 	{{0x20, 0x01, 0xdb, 0x08, 0x12, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
187 	0x06}, 48, 6},
188 	{{0x20, 0x01, 0xdb, 0x08, 0x12, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
189 	0x02}, 48, 7},
190 };
191 
192 static int
193 check_worker_model_params(void)
194 {
195 	if (model_conf == RTE_GRAPH_MODEL_MCORE_DISPATCH &&
196 	    nb_lcore_params > 1) {
197 		printf("Exceeded max number of lcore params for remote model: %hu\n",
198 		       nb_lcore_params);
199 		return -1;
200 	}
201 
202 	return 0;
203 }
204 
205 static int
206 check_lcore_params(void)
207 {
208 	uint16_t queue, i;
209 	int socketid;
210 	uint32_t lcore;
211 
212 	for (i = 0; i < nb_lcore_params; ++i) {
213 		queue = lcore_params[i].queue_id;
214 		if (queue >= MAX_RX_QUEUE_PER_PORT) {
215 			printf("Invalid queue number: %" PRIu16 "\n", queue);
216 			return -1;
217 		}
218 		lcore = lcore_params[i].lcore_id;
219 		if (!rte_lcore_is_enabled(lcore)) {
220 			printf("Error: lcore %u is not enabled in lcore mask\n",
221 			       lcore);
222 			return -1;
223 		}
224 
225 		if (lcore == rte_get_main_lcore()) {
226 			printf("Error: lcore %u is main lcore\n", lcore);
227 			return -1;
228 		}
229 		socketid = rte_lcore_to_socket_id(lcore);
230 		if ((socketid != 0) && (numa_on == 0)) {
231 			printf("Warning: lcore %u is on socket %d with numa off\n",
232 			       lcore, socketid);
233 		}
234 	}
235 
236 	return 0;
237 }
238 
239 static int
240 check_port_config(void)
241 {
242 	uint16_t portid;
243 	uint16_t i;
244 
245 	for (i = 0; i < nb_lcore_params; ++i) {
246 		portid = lcore_params[i].port_id;
247 		if ((enabled_port_mask & (1 << portid)) == 0) {
248 			printf("Port %u is not enabled in port mask\n", portid);
249 			return -1;
250 		}
251 		if (!rte_eth_dev_is_valid_port(portid)) {
252 			printf("Port %u is not present on the board\n", portid);
253 			return -1;
254 		}
255 	}
256 
257 	return 0;
258 }
259 
260 static uint16_t
261 get_port_n_rx_queues(const uint16_t port)
262 {
263 	int queue = -1;
264 	uint16_t i;
265 
266 	for (i = 0; i < nb_lcore_params; ++i) {
267 		if (lcore_params[i].port_id == port) {
268 			if (lcore_params[i].queue_id == queue + 1)
269 				queue = lcore_params[i].queue_id;
270 			else
271 				rte_exit(EXIT_FAILURE,
272 					 "Queue ids of the port %d must be"
273 					 " in sequence and must start with 0\n",
274 					 lcore_params[i].port_id);
275 		}
276 	}
277 
278 	return (uint16_t)(++queue);
279 }
280 
281 static int
282 init_lcore_rx_queues(void)
283 {
284 	uint16_t i, nb_rx_queue;
285 	uint32_t lcore;
286 
287 	for (i = 0; i < nb_lcore_params; ++i) {
288 		lcore = lcore_params[i].lcore_id;
289 		nb_rx_queue = lcore_conf[lcore].n_rx_queue;
290 		if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
291 			printf("Error: too many queues (%u) for lcore: %u\n",
292 			       (unsigned int)nb_rx_queue + 1,
293 			       lcore);
294 			return -1;
295 		}
296 
297 		lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
298 			lcore_params[i].port_id;
299 		lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
300 			lcore_params[i].queue_id;
301 		lcore_conf[lcore].n_rx_queue++;
302 	}
303 
304 	return 0;
305 }
306 
307 /* Display usage */
308 static void
309 print_usage(const char *prgname)
310 {
311 	fprintf(stderr,
312 		"%s [EAL options] --"
313 		" -p PORTMASK"
314 		" [-P]"
315 		" --config (port,queue,lcore)[,(port,queue,lcore)]"
316 		" [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
317 		" [--max-pkt-len PKTLEN]"
318 		" [--no-numa]"
319 		" [--per-port-pool]"
320 		" [--num-pkt-cap]\n\n"
321 
322 		"  -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
323 		"  -P : Enable promiscuous mode\n"
324 		"  --config (port,queue,lcore): Rx queue configuration\n"
325 		"  --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for "
326 		"port X\n"
327 		"  --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
328 		"  --model NAME: walking model name, dispatch or rtc(by default)\n"
329 		"  --no-numa: Disable numa awareness\n"
330 		"  --per-port-pool: Use separate buffer pool per port\n"
331 		"  --pcap-enable: Enables pcap capture\n"
332 		"  --pcap-num-cap NUMPKT: Number of packets to capture\n"
333 		"  --pcap-file-name NAME: Pcap file name\n\n",
334 		prgname);
335 }
336 
337 static uint64_t
338 parse_num_pkt_cap(const char *num_pkt_cap)
339 {
340 	uint64_t num_pkt;
341 	char *end = NULL;
342 
343 	/* Parse decimal string */
344 	num_pkt = strtoull(num_pkt_cap, &end, 10);
345 	if ((num_pkt_cap[0] == '\0') || (end == NULL) || (*end != '\0'))
346 		return 0;
347 
348 	if (num_pkt == 0)
349 		return 0;
350 
351 	return num_pkt;
352 }
353 
354 static int
355 parse_max_pkt_len(const char *pktlen)
356 {
357 	unsigned long len;
358 	char *end = NULL;
359 
360 	/* Parse decimal string */
361 	len = strtoul(pktlen, &end, 10);
362 	if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
363 		return -1;
364 
365 	if (len == 0)
366 		return -1;
367 
368 	return len;
369 }
370 
371 static void
372 parse_worker_model(const char *model)
373 {
374 	if (strcmp(model, WORKER_MODEL_MCORE_DISPATCH) == 0)
375 		model_conf = RTE_GRAPH_MODEL_MCORE_DISPATCH;
376 	else if (strcmp(model, WORKER_MODEL_RTC) == 0)
377 		model_conf = RTE_GRAPH_MODEL_RTC;
378 	else
379 		rte_exit(EXIT_FAILURE, "Invalid worker model: %s", model);
380 
381 #if defined(RTE_GRAPH_MODEL_SELECT)
382 	if (model_conf != RTE_GRAPH_MODEL_SELECT)
383 		printf("Warning: model mismatch, will use the RTE_GRAPH_MODEL_SELECT model\n");
384 	model_conf = RTE_GRAPH_MODEL_SELECT;
385 #endif
386 }
387 
388 static int
389 parse_portmask(const char *portmask)
390 {
391 	char *end = NULL;
392 	unsigned long pm;
393 
394 	/* Parse hexadecimal string */
395 	pm = strtoul(portmask, &end, 16);
396 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
397 		return 0;
398 
399 	return pm;
400 }
401 
402 static int
403 parse_config(const char *q_arg)
404 {
405 	enum fieldnames { FLD_PORT = 0, FLD_QUEUE, FLD_LCORE, _NUM_FLD };
406 	unsigned long int_fld[_NUM_FLD];
407 	const char *p, *p0 = q_arg;
408 	char *str_fld[_NUM_FLD];
409 	uint32_t size;
410 	char s[256];
411 	char *end;
412 	int i;
413 
414 	nb_lcore_params = 0;
415 
416 	while ((p = strchr(p0, '(')) != NULL) {
417 		++p;
418 		p0 = strchr(p, ')');
419 		if (p0 == NULL)
420 			return -1;
421 
422 		size = p0 - p;
423 		if (size >= sizeof(s))
424 			return -1;
425 
426 		memcpy(s, p, size);
427 		s[size] = '\0';
428 		if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
429 		    _NUM_FLD)
430 			return -1;
431 		for (i = 0; i < _NUM_FLD; i++) {
432 			errno = 0;
433 			int_fld[i] = strtoul(str_fld[i], &end, 0);
434 			if (errno != 0 || end == str_fld[i])
435 				return -1;
436 		}
437 
438 		if (nb_lcore_params >= MAX_LCORE_PARAMS) {
439 			printf("Exceeded max number of lcore params: %hu\n",
440 			       nb_lcore_params);
441 			return -1;
442 		}
443 
444 		if (int_fld[FLD_PORT] >= RTE_MAX_ETHPORTS ||
445 		    int_fld[FLD_LCORE] >= RTE_MAX_LCORE) {
446 			printf("Invalid port/lcore id\n");
447 			return -1;
448 		}
449 
450 		lcore_params_array[nb_lcore_params].port_id =
451 			(uint16_t)int_fld[FLD_PORT];
452 		lcore_params_array[nb_lcore_params].queue_id =
453 			(uint16_t)int_fld[FLD_QUEUE];
454 		lcore_params_array[nb_lcore_params].lcore_id =
455 			(uint32_t)int_fld[FLD_LCORE];
456 		++nb_lcore_params;
457 	}
458 	lcore_params = lcore_params_array;
459 
460 	return 0;
461 }
462 
463 static void
464 parse_eth_dest(const char *optarg)
465 {
466 	uint8_t c, *dest, peer_addr[6];
467 	uint16_t portid;
468 	char *port_end;
469 
470 	errno = 0;
471 	portid = strtoul(optarg, &port_end, 10);
472 	if (errno != 0 || port_end == optarg || *port_end++ != ',')
473 		rte_exit(EXIT_FAILURE, "Invalid eth-dest: %s", optarg);
474 	if (portid >= RTE_MAX_ETHPORTS)
475 		rte_exit(EXIT_FAILURE,
476 			 "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n", portid,
477 			 RTE_MAX_ETHPORTS);
478 
479 	if (cmdline_parse_etheraddr(NULL, port_end, &peer_addr,
480 				    sizeof(peer_addr)) < 0)
481 		rte_exit(EXIT_FAILURE, "Invalid ethernet address: %s\n",
482 			 port_end);
483 	dest = (uint8_t *)&dest_eth_addr[portid];
484 	for (c = 0; c < 6; c++)
485 		dest[c] = peer_addr[c];
486 	*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
487 }
488 
489 #define MAX_JUMBO_PKT_LEN  9600
490 #define MEMPOOL_CACHE_SIZE 256
491 
492 static const char short_options[] = "p:" /* portmask */
493 				    "P"	 /* promiscuous */
494 	;
495 
496 #define CMD_LINE_OPT_CONFIG	   "config"
497 #define CMD_LINE_OPT_ETH_DEST	   "eth-dest"
498 #define CMD_LINE_OPT_NO_NUMA	   "no-numa"
499 #define CMD_LINE_OPT_MAX_PKT_LEN   "max-pkt-len"
500 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
501 #define CMD_LINE_OPT_PCAP_ENABLE   "pcap-enable"
502 #define CMD_LINE_OPT_NUM_PKT_CAP   "pcap-num-cap"
503 #define CMD_LINE_OPT_PCAP_FILENAME "pcap-file-name"
504 #define CMD_LINE_OPT_WORKER_MODEL  "model"
505 
506 enum {
507 	/* Long options mapped to a short option */
508 
509 	/* First long only option value must be >= 256, so that we won't
510 	 * conflict with short options
511 	 */
512 	CMD_LINE_OPT_MIN_NUM = 256,
513 	CMD_LINE_OPT_CONFIG_NUM,
514 	CMD_LINE_OPT_ETH_DEST_NUM,
515 	CMD_LINE_OPT_NO_NUMA_NUM,
516 	CMD_LINE_OPT_MAX_PKT_LEN_NUM,
517 	CMD_LINE_OPT_PARSE_PER_PORT_POOL,
518 	CMD_LINE_OPT_PARSE_PCAP_ENABLE,
519 	CMD_LINE_OPT_PARSE_NUM_PKT_CAP,
520 	CMD_LINE_OPT_PCAP_FILENAME_CAP,
521 	CMD_LINE_OPT_WORKER_MODEL_TYPE,
522 };
523 
524 static const struct option lgopts[] = {
525 	{CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
526 	{CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
527 	{CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
528 	{CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, CMD_LINE_OPT_MAX_PKT_LEN_NUM},
529 	{CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
530 	{CMD_LINE_OPT_PCAP_ENABLE, 0, 0, CMD_LINE_OPT_PARSE_PCAP_ENABLE},
531 	{CMD_LINE_OPT_NUM_PKT_CAP, 1, 0, CMD_LINE_OPT_PARSE_NUM_PKT_CAP},
532 	{CMD_LINE_OPT_PCAP_FILENAME, 1, 0, CMD_LINE_OPT_PCAP_FILENAME_CAP},
533 	{CMD_LINE_OPT_WORKER_MODEL, 1, 0, CMD_LINE_OPT_WORKER_MODEL_TYPE},
534 	{NULL, 0, 0, 0},
535 };
536 
537 /*
538  * This expression is used to calculate the number of mbufs needed
539  * depending on user input, taking  into account memory for rx and
540  * tx hardware rings, cache per lcore and mtable per port per lcore.
541  * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum
542  * value of 8192
543  */
544 #define NB_MBUF(nports)                                                        \
545 	RTE_MAX((nports * nb_rx_queue * nb_rxd +                               \
546 		 nports * nb_lcores * RTE_GRAPH_BURST_SIZE +                   \
547 		 nports * n_tx_queue * nb_txd +                                \
548 		 nb_lcores * MEMPOOL_CACHE_SIZE), 8192u)
549 
550 /* Parse the argument given in the command line of the application */
551 static int
552 parse_args(int argc, char **argv)
553 {
554 	char *prgname = argv[0];
555 	int option_index;
556 	char **argvopt;
557 	int opt, ret;
558 
559 	argvopt = argv;
560 
561 	/* Error or normal output strings. */
562 	while ((opt = getopt_long(argc, argvopt, short_options, lgopts,
563 				  &option_index)) != EOF) {
564 
565 		switch (opt) {
566 		/* Portmask */
567 		case 'p':
568 			enabled_port_mask = parse_portmask(optarg);
569 			if (enabled_port_mask == 0) {
570 				fprintf(stderr, "Invalid portmask\n");
571 				print_usage(prgname);
572 				return -1;
573 			}
574 			break;
575 
576 		case 'P':
577 			promiscuous_on = 1;
578 			break;
579 
580 		/* Long options */
581 		case CMD_LINE_OPT_CONFIG_NUM:
582 			ret = parse_config(optarg);
583 			if (ret) {
584 				fprintf(stderr, "Invalid config\n");
585 				print_usage(prgname);
586 				return -1;
587 			}
588 			break;
589 
590 		case CMD_LINE_OPT_ETH_DEST_NUM:
591 			parse_eth_dest(optarg);
592 			break;
593 
594 		case CMD_LINE_OPT_NO_NUMA_NUM:
595 			numa_on = 0;
596 			break;
597 
598 		case CMD_LINE_OPT_MAX_PKT_LEN_NUM: {
599 			max_pkt_len = parse_max_pkt_len(optarg);
600 			break;
601 		}
602 
603 		case CMD_LINE_OPT_PARSE_PER_PORT_POOL:
604 			printf("Per port buffer pool is enabled\n");
605 			per_port_pool = 1;
606 			break;
607 
608 		case CMD_LINE_OPT_PARSE_PCAP_ENABLE:
609 			printf("Packet capture enabled\n");
610 			pcap_trace_enable = 1;
611 			break;
612 
613 		case CMD_LINE_OPT_PARSE_NUM_PKT_CAP:
614 			packet_to_capture = parse_num_pkt_cap(optarg);
615 			printf("Number of packets to capture: %"PRIu64"\n",
616 			       packet_to_capture);
617 			break;
618 
619 		case CMD_LINE_OPT_PCAP_FILENAME_CAP:
620 			rte_strlcpy(pcap_filename, optarg,
621 				    sizeof(pcap_filename));
622 			printf("Pcap file name: %s\n", pcap_filename);
623 			break;
624 
625 		case CMD_LINE_OPT_WORKER_MODEL_TYPE:
626 			printf("Use new worker model: %s\n", optarg);
627 			parse_worker_model(optarg);
628 			break;
629 
630 		default:
631 			print_usage(prgname);
632 			return -1;
633 		}
634 	}
635 
636 	if (optind >= 0)
637 		argv[optind - 1] = prgname;
638 	ret = optind - 1;
639 	optind = 1; /* Reset getopt lib */
640 
641 	return ret;
642 }
643 
644 static void
645 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
646 {
647 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
648 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
649 	printf("%s%s", name, buf);
650 }
651 
652 static int
653 init_mem(uint16_t portid, uint32_t nb_mbuf)
654 {
655 	uint32_t lcore_id;
656 	int socketid;
657 	char s[64];
658 
659 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
660 		if (rte_lcore_is_enabled(lcore_id) == 0)
661 			continue;
662 
663 		if (numa_on)
664 			socketid = rte_lcore_to_socket_id(lcore_id);
665 		else
666 			socketid = 0;
667 
668 		if (socketid >= NB_SOCKETS) {
669 			rte_exit(EXIT_FAILURE,
670 				 "Socket %d of lcore %u is out of range %d\n",
671 				 socketid, lcore_id, NB_SOCKETS);
672 		}
673 
674 		if (pktmbuf_pool[portid][socketid] == NULL) {
675 			snprintf(s, sizeof(s), "mbuf_pool_%d:%d", portid,
676 				 socketid);
677 			/* Create a pool with priv size of a cacheline */
678 			pktmbuf_pool[portid][socketid] =
679 				rte_pktmbuf_pool_create(
680 					s, nb_mbuf, MEMPOOL_CACHE_SIZE,
681 					RTE_CACHE_LINE_SIZE,
682 					RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
683 			if (pktmbuf_pool[portid][socketid] == NULL)
684 				rte_exit(EXIT_FAILURE,
685 					 "Cannot init mbuf pool on socket %d\n",
686 					 socketid);
687 			else
688 				printf("Allocated mbuf pool on socket %d\n",
689 				       socketid);
690 		}
691 	}
692 
693 	return 0;
694 }
695 
696 /* Check the link status of all ports in up to 9s, and print them finally */
697 static void
698 check_all_ports_link_status(uint32_t port_mask)
699 {
700 #define CHECK_INTERVAL 100 /* 100ms */
701 #define MAX_CHECK_TIME 90  /* 9s (90 * 100ms) in total */
702 	uint8_t count, all_ports_up, print_flag = 0;
703 	struct rte_eth_link link;
704 	uint16_t portid;
705 	int ret;
706 	char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
707 
708 	printf("\nChecking link status");
709 	fflush(stdout);
710 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
711 		if (force_quit)
712 			return;
713 		all_ports_up = 1;
714 		RTE_ETH_FOREACH_DEV(portid)
715 		{
716 			if (force_quit)
717 				return;
718 			if ((port_mask & (1 << portid)) == 0)
719 				continue;
720 			memset(&link, 0, sizeof(link));
721 			ret = rte_eth_link_get_nowait(portid, &link);
722 			if (ret < 0) {
723 				all_ports_up = 0;
724 				if (print_flag == 1)
725 					printf("Port %u link get failed: %s\n",
726 						portid, rte_strerror(-ret));
727 				continue;
728 			}
729 			/* Print link status if flag set */
730 			if (print_flag == 1) {
731 				rte_eth_link_to_str(link_status_text,
732 					sizeof(link_status_text), &link);
733 				printf("Port %d %s\n", portid,
734 				       link_status_text);
735 				continue;
736 			}
737 			/* Clear all_ports_up flag if any link down */
738 			if (link.link_status == RTE_ETH_LINK_DOWN) {
739 				all_ports_up = 0;
740 				break;
741 			}
742 		}
743 		/* After finally printing all link status, get out */
744 		if (print_flag == 1)
745 			break;
746 
747 		if (all_ports_up == 0) {
748 			printf(".");
749 			fflush(stdout);
750 			rte_delay_ms(CHECK_INTERVAL);
751 		}
752 
753 		/* Set the print_flag if all ports up or timeout */
754 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
755 			print_flag = 1;
756 			printf("Done\n");
757 		}
758 	}
759 }
760 
761 static void
762 signal_handler(int signum)
763 {
764 	if (signum == SIGINT || signum == SIGTERM) {
765 		printf("\n\nSignal %d received, preparing to exit...\n",
766 		       signum);
767 		force_quit = true;
768 	}
769 }
770 
771 static void
772 print_stats(void)
773 {
774 	const char topLeft[] = {27, '[', '1', ';', '1', 'H', '\0'};
775 	const char clr[] = {27, '[', '2', 'J', '\0'};
776 	struct rte_graph_cluster_stats_param s_param;
777 	struct rte_graph_cluster_stats *stats;
778 	const char *pattern = "worker_*";
779 
780 	/* Prepare stats object */
781 	memset(&s_param, 0, sizeof(s_param));
782 	s_param.f = stdout;
783 	s_param.socket_id = SOCKET_ID_ANY;
784 	s_param.graph_patterns = &pattern;
785 	s_param.nb_graph_patterns = 1;
786 
787 	stats = rte_graph_cluster_stats_create(&s_param);
788 	if (stats == NULL)
789 		rte_exit(EXIT_FAILURE, "Unable to create stats object\n");
790 
791 	while (!force_quit) {
792 		/* Clear screen and move to top left */
793 		printf("%s%s", clr, topLeft);
794 		rte_graph_cluster_stats_get(stats, 0);
795 		rte_delay_ms(1E3);
796 	}
797 
798 	rte_graph_cluster_stats_destroy(stats);
799 }
800 
801 /* Main processing loop. 8< */
802 static int
803 graph_main_loop(void *conf)
804 {
805 	struct lcore_conf *qconf;
806 	struct rte_graph *graph;
807 	uint32_t lcore_id;
808 
809 	RTE_SET_USED(conf);
810 
811 	lcore_id = rte_lcore_id();
812 	qconf = &lcore_conf[lcore_id];
813 	graph = qconf->graph;
814 
815 	if (!graph) {
816 		RTE_LOG(INFO, L3FWD_GRAPH, "Lcore %u has nothing to do\n",
817 			lcore_id);
818 		return 0;
819 	}
820 
821 	RTE_LOG(INFO, L3FWD_GRAPH,
822 		"Entering main loop on lcore %u, graph %s(%p)\n", lcore_id,
823 		qconf->name, graph);
824 
825 	while (likely(!force_quit))
826 		rte_graph_walk(graph);
827 
828 	return 0;
829 }
830 /* >8 End of main processing loop. */
831 
832 static uint32_t
833 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
834 {
835 	uint32_t overhead_len;
836 
837 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
838 		overhead_len = max_rx_pktlen - max_mtu;
839 	else
840 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
841 
842 	return overhead_len;
843 }
844 
845 static int
846 config_port_max_pkt_len(struct rte_eth_conf *conf,
847 		struct rte_eth_dev_info *dev_info)
848 {
849 	uint32_t overhead_len;
850 
851 	if (max_pkt_len == 0)
852 		return 0;
853 
854 	if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
855 		return -1;
856 
857 	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
858 			dev_info->max_mtu);
859 	conf->rxmode.mtu = max_pkt_len - overhead_len;
860 
861 	if (conf->rxmode.mtu > RTE_ETHER_MTU)
862 		conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
863 
864 	return 0;
865 }
866 
867 static void
868 graph_config_mcore_dispatch(struct rte_graph_param graph_conf)
869 {
870 	uint16_t nb_patterns = graph_conf.nb_node_patterns;
871 	int worker_count = rte_lcore_count() - 1;
872 	int main_lcore_id = rte_get_main_lcore();
873 	rte_graph_t main_graph_id = 0;
874 	struct rte_node *node_tmp;
875 	struct lcore_conf *qconf;
876 	struct rte_graph *graph;
877 	rte_graph_t graph_id;
878 	rte_graph_off_t off;
879 	int n_rx_node = 0;
880 	int worker_lcore;
881 	rte_node_t count;
882 	int i, j;
883 	int ret;
884 
885 	for (j = 0; j < nb_lcore_params; j++) {
886 		qconf = &lcore_conf[lcore_params[j].lcore_id];
887 		/* Add rx node patterns of all lcore */
888 		for (i = 0; i < qconf->n_rx_queue; i++) {
889 			char *node_name = qconf->rx_queue_list[i].node_name;
890 			unsigned int lcore_id = lcore_params[j].lcore_id;
891 
892 			graph_conf.node_patterns[nb_patterns + n_rx_node + i] = node_name;
893 			n_rx_node++;
894 			ret = rte_graph_model_mcore_dispatch_node_lcore_affinity_set(node_name,
895 										     lcore_id);
896 			if (ret == 0)
897 				printf("Set node %s affinity to lcore %u\n", node_name,
898 				       lcore_params[j].lcore_id);
899 		}
900 	}
901 
902 	graph_conf.nb_node_patterns = nb_patterns + n_rx_node;
903 	graph_conf.socket_id = rte_lcore_to_socket_id(main_lcore_id);
904 
905 	qconf = &lcore_conf[main_lcore_id];
906 	snprintf(qconf->name, sizeof(qconf->name), "worker_%u",
907 		 main_lcore_id);
908 
909 	/* create main graph */
910 	main_graph_id = rte_graph_create(qconf->name, &graph_conf);
911 	if (main_graph_id == RTE_GRAPH_ID_INVALID)
912 		rte_exit(EXIT_FAILURE,
913 			 "rte_graph_create(): main_graph_id invalid for lcore %u\n",
914 			 main_lcore_id);
915 
916 	/* set the graph model for the main graph */
917 	rte_graph_worker_model_set(RTE_GRAPH_MODEL_MCORE_DISPATCH);
918 	qconf->graph_id = main_graph_id;
919 	qconf->graph = rte_graph_lookup(qconf->name);
920 	if (!qconf->graph)
921 		rte_exit(EXIT_FAILURE,
922 			 "rte_graph_lookup(): graph %s not found\n",
923 			 qconf->name);
924 
925 	graph = qconf->graph;
926 	worker_lcore = lcore_params[nb_lcore_params - 1].lcore_id;
927 	rte_graph_foreach_node(count, off, graph, node_tmp) {
928 		/* Need to set the node Lcore affinity before clone graph for each lcore */
929 		if (node_tmp->dispatch.lcore_id == RTE_MAX_LCORE) {
930 			worker_lcore = rte_get_next_lcore(worker_lcore, true, 1);
931 			ret = rte_graph_model_mcore_dispatch_node_lcore_affinity_set(node_tmp->name,
932 										     worker_lcore);
933 			if (ret == 0)
934 				printf("Set node %s affinity to lcore %u\n",
935 				       node_tmp->name, worker_lcore);
936 		}
937 	}
938 
939 	worker_lcore = main_lcore_id;
940 	for (i = 0; i < worker_count; i++) {
941 		worker_lcore = rte_get_next_lcore(worker_lcore, true, 1);
942 
943 		qconf = &lcore_conf[worker_lcore];
944 		snprintf(qconf->name, sizeof(qconf->name), "cloned-%u", worker_lcore);
945 		graph_id = rte_graph_clone(main_graph_id, qconf->name, &graph_conf);
946 		ret = rte_graph_model_mcore_dispatch_core_bind(graph_id, worker_lcore);
947 		if (ret == 0)
948 			printf("bind graph %d to lcore %u\n", graph_id, worker_lcore);
949 
950 		/* full cloned graph name */
951 		snprintf(qconf->name, sizeof(qconf->name), "%s",
952 			 rte_graph_id_to_name(graph_id));
953 		qconf->graph_id = graph_id;
954 		qconf->graph = rte_graph_lookup(qconf->name);
955 		if (!qconf->graph)
956 			rte_exit(EXIT_FAILURE,
957 				 "Failed to lookup graph %s\n",
958 				 qconf->name);
959 		continue;
960 	}
961 }
962 
963 static void
964 graph_config_rtc(struct rte_graph_param graph_conf)
965 {
966 	uint16_t nb_patterns = graph_conf.nb_node_patterns;
967 	struct lcore_conf *qconf;
968 	rte_graph_t graph_id;
969 	uint32_t lcore_id;
970 	rte_edge_t i;
971 
972 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
973 		if (rte_lcore_is_enabled(lcore_id) == 0)
974 			continue;
975 
976 		qconf = &lcore_conf[lcore_id];
977 		/* Skip graph creation if no source exists */
978 		if (!qconf->n_rx_queue)
979 			continue;
980 		/* Add rx node patterns of this lcore */
981 		for (i = 0; i < qconf->n_rx_queue; i++) {
982 			graph_conf.node_patterns[nb_patterns + i] =
983 				qconf->rx_queue_list[i].node_name;
984 		}
985 		graph_conf.nb_node_patterns = nb_patterns + i;
986 		graph_conf.socket_id = rte_lcore_to_socket_id(lcore_id);
987 		snprintf(qconf->name, sizeof(qconf->name), "worker_%u",
988 			 lcore_id);
989 		graph_id = rte_graph_create(qconf->name, &graph_conf);
990 		if (graph_id == RTE_GRAPH_ID_INVALID)
991 			rte_exit(EXIT_FAILURE,
992 				 "rte_graph_create(): graph_id invalid for lcore %u\n",
993 				 lcore_id);
994 		qconf->graph_id = graph_id;
995 		qconf->graph = rte_graph_lookup(qconf->name);
996 		if (!qconf->graph)
997 			rte_exit(EXIT_FAILURE,
998 				 "rte_graph_lookup(): graph %s not found\n",
999 				 qconf->name);
1000 	}
1001 }
1002 
1003 int
1004 main(int argc, char **argv)
1005 {
1006 	/* Rewrite data of src and dst ether addr */
1007 	uint8_t rewrite_data[2 * sizeof(struct rte_ether_addr)];
1008 	/* Graph initialization. 8< */
1009 	static const char * const default_patterns[] = {
1010 		"ip4*",
1011 		"ethdev_tx-*",
1012 		"pkt_drop",
1013 	};
1014 	uint8_t socketid;
1015 	uint16_t nb_rx_queue, queue;
1016 	struct rte_graph_param graph_conf;
1017 	struct rte_eth_dev_info dev_info;
1018 	uint32_t nb_ports, nb_conf = 0;
1019 	uint32_t n_tx_queue, nb_lcores;
1020 	struct rte_eth_txconf *txconf;
1021 	uint16_t queueid, portid, i;
1022 	const char **node_patterns;
1023 	struct lcore_conf *qconf;
1024 	uint16_t nb_graphs = 0;
1025 	uint16_t nb_patterns;
1026 	uint8_t rewrite_len;
1027 	uint32_t lcore_id;
1028 	int ret;
1029 
1030 	/* Init EAL */
1031 	ret = rte_eal_init(argc, argv);
1032 	if (ret < 0)
1033 		rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1034 	argc -= ret;
1035 	argv += ret;
1036 
1037 	force_quit = false;
1038 	signal(SIGINT, signal_handler);
1039 	signal(SIGTERM, signal_handler);
1040 
1041 	/* Pre-init dst MACs for all ports to 02:00:00:00:00:xx */
1042 	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1043 		dest_eth_addr[portid] =
1044 			RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
1045 		*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
1046 	}
1047 
1048 	/* Parse application arguments (after the EAL ones) */
1049 	ret = parse_args(argc, argv);
1050 	if (ret < 0)
1051 		rte_exit(EXIT_FAILURE, "Invalid L3FWD_GRAPH parameters\n");
1052 
1053 	if (check_lcore_params() < 0)
1054 		rte_exit(EXIT_FAILURE, "check_lcore_params() failed\n");
1055 
1056 	if (check_worker_model_params() < 0)
1057 		rte_exit(EXIT_FAILURE, "check_worker_model_params() failed\n");
1058 
1059 	ret = init_lcore_rx_queues();
1060 	if (ret < 0)
1061 		rte_exit(EXIT_FAILURE, "init_lcore_rx_queues() failed\n");
1062 
1063 	if (check_port_config() < 0)
1064 		rte_exit(EXIT_FAILURE, "check_port_config() failed\n");
1065 
1066 	nb_ports = rte_eth_dev_count_avail();
1067 	nb_lcores = rte_lcore_count();
1068 
1069 	/* Initialize all ports. 8< */
1070 	RTE_ETH_FOREACH_DEV(portid)
1071 	{
1072 		struct rte_eth_conf local_port_conf = port_conf;
1073 
1074 		/* Skip ports that are not enabled */
1075 		if ((enabled_port_mask & (1 << portid)) == 0) {
1076 			printf("\nSkipping disabled port %d\n", portid);
1077 			continue;
1078 		}
1079 
1080 		/* Init port */
1081 		printf("Initializing port %d ... ", portid);
1082 		fflush(stdout);
1083 
1084 		nb_rx_queue = get_port_n_rx_queues(portid);
1085 		n_tx_queue = nb_lcores;
1086 		if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1087 			n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1088 		printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1089 		       nb_rx_queue, n_tx_queue);
1090 
1091 		rte_eth_dev_info_get(portid, &dev_info);
1092 
1093 		ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
1094 		if (ret != 0)
1095 			rte_exit(EXIT_FAILURE,
1096 				"Invalid max packet length: %u (port %u)\n",
1097 				max_pkt_len, portid);
1098 
1099 		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
1100 			local_port_conf.txmode.offloads |=
1101 				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1102 
1103 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1104 			dev_info.flow_type_rss_offloads;
1105 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1106 		    port_conf.rx_adv_conf.rss_conf.rss_hf) {
1107 			printf("Port %u modified RSS hash function based on "
1108 			       "hardware support,"
1109 			       "requested:%#" PRIx64 " configured:%#" PRIx64
1110 			       "\n",
1111 			       portid, port_conf.rx_adv_conf.rss_conf.rss_hf,
1112 			       local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1113 		}
1114 
1115 		ret = rte_eth_dev_configure(portid, nb_rx_queue,
1116 					    n_tx_queue, &local_port_conf);
1117 		if (ret < 0)
1118 			rte_exit(EXIT_FAILURE,
1119 				 "Cannot configure device: err=%d, port=%d\n",
1120 				 ret, portid);
1121 
1122 		ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
1123 						       &nb_txd);
1124 		if (ret < 0)
1125 			rte_exit(EXIT_FAILURE,
1126 				 "Cannot adjust number of descriptors: err=%d, "
1127 				 "port=%d\n",
1128 				 ret, portid);
1129 
1130 		rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1131 		print_ethaddr(" Address:", &ports_eth_addr[portid]);
1132 		printf(", ");
1133 		print_ethaddr(
1134 			"Destination:",
1135 			(const struct rte_ether_addr *)&dest_eth_addr[portid]);
1136 		printf(", ");
1137 
1138 		/*
1139 		 * prepare src MACs for each port.
1140 		 */
1141 		rte_ether_addr_copy(
1142 			&ports_eth_addr[portid],
1143 			(struct rte_ether_addr *)(val_eth + portid) + 1);
1144 
1145 		/* Init memory */
1146 		if (!per_port_pool) {
1147 			/* portid = 0; this is *not* signifying the first port,
1148 			 * rather, it signifies that portid is ignored.
1149 			 */
1150 			ret = init_mem(0, NB_MBUF(nb_ports));
1151 		} else {
1152 			ret = init_mem(portid, NB_MBUF(1));
1153 		}
1154 		if (ret < 0)
1155 			rte_exit(EXIT_FAILURE, "init_mem() failed\n");
1156 
1157 		/* Init one TX queue per couple (lcore,port) */
1158 		queueid = 0;
1159 		for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1160 			if (rte_lcore_is_enabled(lcore_id) == 0)
1161 				continue;
1162 
1163 			qconf = &lcore_conf[lcore_id];
1164 
1165 			if (numa_on)
1166 				socketid = (uint8_t)rte_lcore_to_socket_id(
1167 					lcore_id);
1168 			else
1169 				socketid = 0;
1170 
1171 			printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1172 			fflush(stdout);
1173 
1174 			txconf = &dev_info.default_txconf;
1175 			txconf->offloads = local_port_conf.txmode.offloads;
1176 			ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1177 						     socketid, txconf);
1178 			if (ret < 0)
1179 				rte_exit(EXIT_FAILURE,
1180 					 "rte_eth_tx_queue_setup: err=%d, "
1181 					 "port=%d\n",
1182 					 ret, portid);
1183 			queueid++;
1184 		}
1185 
1186 		/* Setup ethdev node config */
1187 		ethdev_conf[nb_conf].port_id = portid;
1188 		ethdev_conf[nb_conf].num_rx_queues = nb_rx_queue;
1189 		ethdev_conf[nb_conf].num_tx_queues = n_tx_queue;
1190 		if (!per_port_pool)
1191 			ethdev_conf[nb_conf].mp = pktmbuf_pool[0];
1192 
1193 		else
1194 			ethdev_conf[nb_conf].mp = pktmbuf_pool[portid];
1195 		ethdev_conf[nb_conf].mp_count = NB_SOCKETS;
1196 
1197 		nb_conf++;
1198 		printf("\n");
1199 	}
1200 
1201 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1202 		if (rte_lcore_is_enabled(lcore_id) == 0)
1203 			continue;
1204 		qconf = &lcore_conf[lcore_id];
1205 		printf("\nInitializing rx queues on lcore %u ... ", lcore_id);
1206 		fflush(stdout);
1207 		/* Init RX queues */
1208 		for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
1209 			struct rte_eth_rxconf rxq_conf;
1210 
1211 			portid = qconf->rx_queue_list[queue].port_id;
1212 			queueid = qconf->rx_queue_list[queue].queue_id;
1213 
1214 			if (numa_on)
1215 				socketid = (uint8_t)rte_lcore_to_socket_id(
1216 					lcore_id);
1217 			else
1218 				socketid = 0;
1219 
1220 			printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1221 			fflush(stdout);
1222 
1223 			rte_eth_dev_info_get(portid, &dev_info);
1224 			rxq_conf = dev_info.default_rxconf;
1225 			rxq_conf.offloads = port_conf.rxmode.offloads;
1226 			if (!per_port_pool)
1227 				ret = rte_eth_rx_queue_setup(
1228 					portid, queueid, nb_rxd, socketid,
1229 					&rxq_conf, pktmbuf_pool[0][socketid]);
1230 			else
1231 				ret = rte_eth_rx_queue_setup(
1232 					portid, queueid, nb_rxd, socketid,
1233 					&rxq_conf,
1234 					pktmbuf_pool[portid][socketid]);
1235 			if (ret < 0)
1236 				rte_exit(EXIT_FAILURE,
1237 					 "rte_eth_rx_queue_setup: err=%d, "
1238 					 "port=%d\n",
1239 					 ret, portid);
1240 
1241 			/* Add this queue node to its graph */
1242 			snprintf(qconf->rx_queue_list[queue].node_name,
1243 				 RTE_NODE_NAMESIZE, "ethdev_rx-%u-%u", portid,
1244 				 queueid);
1245 		}
1246 
1247 		/* Alloc a graph to this lcore only if source exists  */
1248 		if (qconf->n_rx_queue)
1249 			nb_graphs++;
1250 	}
1251 
1252 	printf("\n");
1253 
1254 	/* Ethdev node config, skip rx queue mapping */
1255 	ret = rte_node_eth_config(ethdev_conf, nb_conf, nb_graphs);
1256 	/* >8 End of graph creation. */
1257 	if (ret)
1258 		rte_exit(EXIT_FAILURE, "rte_node_eth_config: err=%d\n", ret);
1259 
1260 	/* Start ports */
1261 	RTE_ETH_FOREACH_DEV(portid)
1262 	{
1263 		if ((enabled_port_mask & (1 << portid)) == 0)
1264 			continue;
1265 
1266 		/* Start device */
1267 		ret = rte_eth_dev_start(portid);
1268 		if (ret < 0)
1269 			rte_exit(EXIT_FAILURE,
1270 				 "rte_eth_dev_start: err=%d, port=%d\n", ret,
1271 				 portid);
1272 
1273 		/*
1274 		 * If enabled, put device in promiscuous mode.
1275 		 * This allows IO forwarding mode to forward packets
1276 		 * to itself through 2 cross-connected  ports of the
1277 		 * target machine.
1278 		 */
1279 		if (promiscuous_on)
1280 			rte_eth_promiscuous_enable(portid);
1281 	}
1282 
1283 	printf("\n");
1284 
1285 	check_all_ports_link_status(enabled_port_mask);
1286 
1287 	/* Graph Initialization */
1288 	nb_patterns = RTE_DIM(default_patterns);
1289 	node_patterns = malloc((MAX_RX_QUEUE_PER_LCORE + nb_patterns) *
1290 			       sizeof(*node_patterns));
1291 	if (!node_patterns)
1292 		return -ENOMEM;
1293 	memcpy(node_patterns, default_patterns,
1294 	       nb_patterns * sizeof(*node_patterns));
1295 
1296 	memset(&graph_conf, 0, sizeof(graph_conf));
1297 	graph_conf.node_patterns = node_patterns;
1298 	graph_conf.nb_node_patterns = nb_patterns;
1299 
1300 	/* Pcap config */
1301 	graph_conf.pcap_enable = pcap_trace_enable;
1302 	graph_conf.num_pkt_to_capture = packet_to_capture;
1303 	graph_conf.pcap_filename = pcap_filename;
1304 
1305 	if (model_conf == RTE_GRAPH_MODEL_MCORE_DISPATCH)
1306 		graph_config_mcore_dispatch(graph_conf);
1307 	else
1308 		graph_config_rtc(graph_conf);
1309 
1310 	rte_graph_worker_model_set(model_conf);
1311 	/* >8 End of graph initialization. */
1312 
1313 	memset(&rewrite_data, 0, sizeof(rewrite_data));
1314 	rewrite_len = sizeof(rewrite_data);
1315 
1316 	/* Add routes and rewrite data to graph infra. 8< */
1317 	for (i = 0; i < IPV4_L3FWD_LPM_NUM_ROUTES; i++) {
1318 		char route_str[INET6_ADDRSTRLEN * 4];
1319 		char abuf[INET6_ADDRSTRLEN];
1320 		struct in_addr in;
1321 		uint32_t dst_port;
1322 
1323 		/* Skip unused ports */
1324 		if ((1 << ipv4_l3fwd_lpm_route_array[i].if_out &
1325 		     enabled_port_mask) == 0)
1326 			continue;
1327 
1328 		dst_port = ipv4_l3fwd_lpm_route_array[i].if_out;
1329 
1330 		in.s_addr = htonl(ipv4_l3fwd_lpm_route_array[i].ip);
1331 		snprintf(route_str, sizeof(route_str), "%s / %d (%d)",
1332 			 inet_ntop(AF_INET, &in, abuf, sizeof(abuf)),
1333 			 ipv4_l3fwd_lpm_route_array[i].depth,
1334 			 ipv4_l3fwd_lpm_route_array[i].if_out);
1335 
1336 		/* Use route index 'i' as next hop id */
1337 		ret = rte_node_ip4_route_add(
1338 			ipv4_l3fwd_lpm_route_array[i].ip,
1339 			ipv4_l3fwd_lpm_route_array[i].depth, i,
1340 			RTE_NODE_IP4_LOOKUP_NEXT_REWRITE);
1341 
1342 		if (ret < 0)
1343 			rte_exit(EXIT_FAILURE,
1344 				 "Unable to add ip4 route %s to graph\n",
1345 				 route_str);
1346 
1347 		memcpy(rewrite_data, val_eth + dst_port, rewrite_len);
1348 
1349 		/* Add next hop rewrite data for id 'i' */
1350 		ret = rte_node_ip4_rewrite_add(i, rewrite_data,
1351 					       rewrite_len, dst_port);
1352 		if (ret < 0)
1353 			rte_exit(EXIT_FAILURE,
1354 				 "Unable to add next hop %u for "
1355 				 "route %s\n", i, route_str);
1356 
1357 		RTE_LOG(INFO, L3FWD_GRAPH, "Added route %s, next_hop %u\n",
1358 			route_str, i);
1359 	}
1360 
1361 	for (i = 0; i < IPV6_L3FWD_LPM_NUM_ROUTES; i++) {
1362 		char route_str[INET6_ADDRSTRLEN * 4];
1363 		char abuf[INET6_ADDRSTRLEN];
1364 		struct in6_addr in6;
1365 		uint32_t dst_port;
1366 
1367 		/* Skip unused ports */
1368 		if ((1 << ipv6_l3fwd_lpm_route_array[i].if_out &
1369 		     enabled_port_mask) == 0)
1370 			continue;
1371 
1372 		dst_port = ipv6_l3fwd_lpm_route_array[i].if_out;
1373 
1374 		memcpy(in6.s6_addr, ipv6_l3fwd_lpm_route_array[i].ip, RTE_LPM6_IPV6_ADDR_SIZE);
1375 		snprintf(route_str, sizeof(route_str), "%s / %d (%d)",
1376 			 inet_ntop(AF_INET6, &in6, abuf, sizeof(abuf)),
1377 			 ipv6_l3fwd_lpm_route_array[i].depth,
1378 			 ipv6_l3fwd_lpm_route_array[i].if_out);
1379 
1380 		/* Use route index 'i' as next hop id */
1381 		ret = rte_node_ip6_route_add(ipv6_l3fwd_lpm_route_array[i].ip,
1382 			ipv6_l3fwd_lpm_route_array[i].depth, i,
1383 			RTE_NODE_IP6_LOOKUP_NEXT_REWRITE);
1384 
1385 		if (ret < 0)
1386 			rte_exit(EXIT_FAILURE,
1387 				 "Unable to add ip6 route %s to graph\n",
1388 				 route_str);
1389 
1390 		memcpy(rewrite_data, val_eth + dst_port, rewrite_len);
1391 
1392 		/* Add next hop rewrite data for id 'i' */
1393 		ret = rte_node_ip6_rewrite_add(i, rewrite_data,
1394 					       rewrite_len, dst_port);
1395 		if (ret < 0)
1396 			rte_exit(EXIT_FAILURE,
1397 				 "Unable to add next hop %u for "
1398 				 "route %s\n", i, route_str);
1399 
1400 		RTE_LOG(INFO, L3FWD_GRAPH, "Added route %s, next_hop %u\n",
1401 			route_str, i);
1402 	}
1403 	/* >8 End of adding routes and rewrite data to graph infa. */
1404 
1405 	/* Launch per-lcore init on every worker lcore */
1406 	rte_eal_mp_remote_launch(graph_main_loop, NULL, SKIP_MAIN);
1407 
1408 	/* Accumulate and print stats on main until exit */
1409 	if (rte_graph_has_stats_feature())
1410 		print_stats();
1411 
1412 	/* Wait for worker cores to exit */
1413 	ret = 0;
1414 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
1415 		ret = rte_eal_wait_lcore(lcore_id);
1416 		/* Destroy graph */
1417 		if (ret < 0 || rte_graph_destroy(
1418 			rte_graph_from_name(lcore_conf[lcore_id].name))) {
1419 			ret = -1;
1420 			break;
1421 		}
1422 	}
1423 	free(node_patterns);
1424 
1425 	/* Stop ports */
1426 	RTE_ETH_FOREACH_DEV(portid) {
1427 		if ((enabled_port_mask & (1 << portid)) == 0)
1428 			continue;
1429 		printf("Closing port %d...", portid);
1430 		ret = rte_eth_dev_stop(portid);
1431 		if (ret != 0)
1432 			printf("Failed to stop port %u: %s\n",
1433 			       portid, rte_strerror(-ret));
1434 		rte_eth_dev_close(portid);
1435 		printf(" Done\n");
1436 	}
1437 
1438 	/* clean up the EAL */
1439 	rte_eal_cleanup();
1440 	printf("Bye...\n");
1441 
1442 	return ret;
1443 }
1444