xref: /dpdk/app/test-pmd/testpmd.c (revision bb44fb6fe7713ddcd023d5b9bacadf074d68092e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
33 #include <rte_eal.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
41 #include <rte_mbuf.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_dev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
51 #endif
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
54 #endif
55 #include <rte_flow.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
59 #endif
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
62 #endif
63 
64 #include "testpmd.h"
65 
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
68 
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
72 uint8_t tx_first;
73 char cmdline_filename[PATH_MAX] = {0};
74 
75 /*
76  * NUMA support configuration.
77  * When set, the NUMA support attempts to dispatch the allocation of the
78  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79  * probed ports among the CPU sockets 0 and 1.
80  * Otherwise, all memory is allocated from CPU socket 0.
81  */
82 uint8_t numa_support = 1; /**< numa enabled by default */
83 
84 /*
85  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86  * not configured.
87  */
88 uint8_t socket_num = UMA_NO_CONFIG;
89 
90 /*
91  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
92  */
93 uint8_t mp_anon = 0;
94 
95 /*
96  * Store specified sockets on which memory pool to be used by ports
97  * is allocated.
98  */
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
100 
101 /*
102  * Store specified sockets on which RX ring to be used by ports
103  * is allocated.
104  */
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
106 
107 /*
108  * Store specified sockets on which TX ring to be used by ports
109  * is allocated.
110  */
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
112 
113 /*
114  * Record the Ethernet address of peer target ports to which packets are
115  * forwarded.
116  * Must be instantiated with the ethernet addresses of peer traffic generator
117  * ports.
118  */
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
121 
122 /*
123  * Probed Target Environment.
124  */
125 struct rte_port *ports;	       /**< For all probed ethernet ports. */
126 portid_t nb_ports;             /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
129 
130 /*
131  * Test Forwarding Configuration.
132  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
134  */
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
138 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
139 
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
142 
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
145 
146 /*
147  * Forwarding engines.
148  */
149 struct fwd_engine * fwd_engines[] = {
150 	&io_fwd_engine,
151 	&mac_fwd_engine,
152 	&mac_swap_engine,
153 	&flow_gen_engine,
154 	&rx_only_engine,
155 	&tx_only_engine,
156 	&csum_fwd_engine,
157 	&icmp_echo_engine,
158 #if defined RTE_LIBRTE_PMD_SOFTNIC
159 	&softnic_fwd_engine,
160 #endif
161 #ifdef RTE_LIBRTE_IEEE1588
162 	&ieee1588_fwd_engine,
163 #endif
164 	NULL,
165 };
166 
167 struct fwd_config cur_fwd_config;
168 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
169 uint32_t retry_enabled;
170 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
171 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
172 
173 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
174 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
175                                       * specified on command-line. */
176 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
177 
178 /*
179  * In container, it cannot terminate the process which running with 'stats-period'
180  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
181  */
182 uint8_t f_quit;
183 
184 /*
185  * Configuration of packet segments used by the "txonly" processing engine.
186  */
187 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
188 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
189 	TXONLY_DEF_PACKET_LEN,
190 };
191 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
192 
193 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
194 /**< Split policy for packets to TX. */
195 
196 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
197 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
198 
199 /* current configuration is in DCB or not,0 means it is not in DCB mode */
200 uint8_t dcb_config = 0;
201 
202 /* Whether the dcb is in testing status */
203 uint8_t dcb_test = 0;
204 
205 /*
206  * Configurable number of RX/TX queues.
207  */
208 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
209 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
210 
211 /*
212  * Configurable number of RX/TX ring descriptors.
213  * Defaults are supplied by drivers via ethdev.
214  */
215 #define RTE_TEST_RX_DESC_DEFAULT 0
216 #define RTE_TEST_TX_DESC_DEFAULT 0
217 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
218 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
219 
220 #define RTE_PMD_PARAM_UNSET -1
221 /*
222  * Configurable values of RX and TX ring threshold registers.
223  */
224 
225 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
228 
229 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
232 
233 /*
234  * Configurable value of RX free threshold.
235  */
236 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
237 
238 /*
239  * Configurable value of RX drop enable.
240  */
241 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
242 
243 /*
244  * Configurable value of TX free threshold.
245  */
246 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
247 
248 /*
249  * Configurable value of TX RS bit threshold.
250  */
251 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
252 
253 /*
254  * Receive Side Scaling (RSS) configuration.
255  */
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
257 
258 /*
259  * Port topology configuration
260  */
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
262 
263 /*
264  * Avoids to flush all the RX streams before starts forwarding.
265  */
266 uint8_t no_flush_rx = 0; /* flush by default */
267 
268 /*
269  * Flow API isolated mode.
270  */
271 uint8_t flow_isolate_all;
272 
273 /*
274  * Avoids to check link status when starting/stopping a port.
275  */
276 uint8_t no_link_check = 0; /* check by default */
277 
278 /*
279  * Enable link status change notification
280  */
281 uint8_t lsc_interrupt = 1; /* enabled by default */
282 
283 /*
284  * Enable device removal notification.
285  */
286 uint8_t rmv_interrupt = 1; /* enabled by default */
287 
288 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
289 
290 /*
291  * Display or mask ether events
292  * Default to all events except VF_MBOX
293  */
294 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
295 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
301 /*
302  * Decide if all memory are locked for performance.
303  */
304 int do_mlockall = 0;
305 
306 /*
307  * NIC bypass mode configuration options.
308  */
309 
310 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
311 /* The NIC bypass watchdog timeout. */
312 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
313 #endif
314 
315 
316 #ifdef RTE_LIBRTE_LATENCY_STATS
317 
318 /*
319  * Set when latency stats is enabled in the commandline
320  */
321 uint8_t latencystats_enabled;
322 
323 /*
324  * Lcore ID to serive latency statistics.
325  */
326 lcoreid_t latencystats_lcore_id = -1;
327 
328 #endif
329 
330 /*
331  * Ethernet device configuration.
332  */
333 struct rte_eth_rxmode rx_mode = {
334 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
335 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
336 };
337 
338 struct rte_eth_txmode tx_mode = {
339 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
340 };
341 
342 struct rte_fdir_conf fdir_conf = {
343 	.mode = RTE_FDIR_MODE_NONE,
344 	.pballoc = RTE_FDIR_PBALLOC_64K,
345 	.status = RTE_FDIR_REPORT_STATUS,
346 	.mask = {
347 		.vlan_tci_mask = 0xFFEF,
348 		.ipv4_mask     = {
349 			.src_ip = 0xFFFFFFFF,
350 			.dst_ip = 0xFFFFFFFF,
351 		},
352 		.ipv6_mask     = {
353 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
354 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
355 		},
356 		.src_port_mask = 0xFFFF,
357 		.dst_port_mask = 0xFFFF,
358 		.mac_addr_byte_mask = 0xFF,
359 		.tunnel_type_mask = 1,
360 		.tunnel_id_mask = 0xFFFFFFFF,
361 	},
362 	.drop_queue = 127,
363 };
364 
365 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
366 
367 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
368 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
369 
370 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
371 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
372 
373 uint16_t nb_tx_queue_stats_mappings = 0;
374 uint16_t nb_rx_queue_stats_mappings = 0;
375 
376 /*
377  * Display zero values by default for xstats
378  */
379 uint8_t xstats_hide_zero;
380 
381 unsigned int num_sockets = 0;
382 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
383 
384 #ifdef RTE_LIBRTE_BITRATE
385 /* Bitrate statistics */
386 struct rte_stats_bitrates *bitrate_data;
387 lcoreid_t bitrate_lcore_id;
388 uint8_t bitrate_enabled;
389 #endif
390 
391 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
392 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
393 
394 /* Forward function declarations */
395 static void map_port_queue_stats_mapping_registers(portid_t pi,
396 						   struct rte_port *port);
397 static void check_all_ports_link_status(uint32_t port_mask);
398 static int eth_event_callback(portid_t port_id,
399 			      enum rte_eth_event_type type,
400 			      void *param, void *ret_param);
401 static void eth_dev_event_callback(char *device_name,
402 				enum rte_dev_event_type type,
403 				void *param);
404 static int eth_dev_event_callback_register(void);
405 static int eth_dev_event_callback_unregister(void);
406 
407 
408 /*
409  * Check if all the ports are started.
410  * If yes, return positive value. If not, return zero.
411  */
412 static int all_ports_started(void);
413 
414 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
415 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
416 
417 /*
418  * Helper function to check if socket is already discovered.
419  * If yes, return positive value. If not, return zero.
420  */
421 int
422 new_socket_id(unsigned int socket_id)
423 {
424 	unsigned int i;
425 
426 	for (i = 0; i < num_sockets; i++) {
427 		if (socket_ids[i] == socket_id)
428 			return 0;
429 	}
430 	return 1;
431 }
432 
433 /*
434  * Setup default configuration.
435  */
436 static void
437 set_default_fwd_lcores_config(void)
438 {
439 	unsigned int i;
440 	unsigned int nb_lc;
441 	unsigned int sock_num;
442 
443 	nb_lc = 0;
444 	for (i = 0; i < RTE_MAX_LCORE; i++) {
445 		sock_num = rte_lcore_to_socket_id(i);
446 		if (new_socket_id(sock_num)) {
447 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
448 				rte_exit(EXIT_FAILURE,
449 					 "Total sockets greater than %u\n",
450 					 RTE_MAX_NUMA_NODES);
451 			}
452 			socket_ids[num_sockets++] = sock_num;
453 		}
454 		if (!rte_lcore_is_enabled(i))
455 			continue;
456 		if (i == rte_get_master_lcore())
457 			continue;
458 		fwd_lcores_cpuids[nb_lc++] = i;
459 	}
460 	nb_lcores = (lcoreid_t) nb_lc;
461 	nb_cfg_lcores = nb_lcores;
462 	nb_fwd_lcores = 1;
463 }
464 
465 static void
466 set_def_peer_eth_addrs(void)
467 {
468 	portid_t i;
469 
470 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
471 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
472 		peer_eth_addrs[i].addr_bytes[5] = i;
473 	}
474 }
475 
476 static void
477 set_default_fwd_ports_config(void)
478 {
479 	portid_t pt_id;
480 	int i = 0;
481 
482 	RTE_ETH_FOREACH_DEV(pt_id)
483 		fwd_ports_ids[i++] = pt_id;
484 
485 	nb_cfg_ports = nb_ports;
486 	nb_fwd_ports = nb_ports;
487 }
488 
489 void
490 set_def_fwd_config(void)
491 {
492 	set_default_fwd_lcores_config();
493 	set_def_peer_eth_addrs();
494 	set_default_fwd_ports_config();
495 }
496 
497 /*
498  * Configuration initialisation done once at init time.
499  */
500 static void
501 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
502 		 unsigned int socket_id)
503 {
504 	char pool_name[RTE_MEMPOOL_NAMESIZE];
505 	struct rte_mempool *rte_mp = NULL;
506 	uint32_t mb_size;
507 
508 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
509 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
510 
511 	TESTPMD_LOG(INFO,
512 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
513 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
514 
515 	if (mp_anon != 0) {
516 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
517 			mb_size, (unsigned) mb_mempool_cache,
518 			sizeof(struct rte_pktmbuf_pool_private),
519 			socket_id, 0);
520 		if (rte_mp == NULL)
521 			goto err;
522 
523 		if (rte_mempool_populate_anon(rte_mp) == 0) {
524 			rte_mempool_free(rte_mp);
525 			rte_mp = NULL;
526 			goto err;
527 		}
528 		rte_pktmbuf_pool_init(rte_mp, NULL);
529 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
530 	} else {
531 		/* wrapper to rte_mempool_create() */
532 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
533 				rte_mbuf_best_mempool_ops());
534 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
535 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
536 	}
537 
538 err:
539 	if (rte_mp == NULL) {
540 		rte_exit(EXIT_FAILURE,
541 			"Creation of mbuf pool for socket %u failed: %s\n",
542 			socket_id, rte_strerror(rte_errno));
543 	} else if (verbose_level > 0) {
544 		rte_mempool_dump(stdout, rte_mp);
545 	}
546 }
547 
548 /*
549  * Check given socket id is valid or not with NUMA mode,
550  * if valid, return 0, else return -1
551  */
552 static int
553 check_socket_id(const unsigned int socket_id)
554 {
555 	static int warning_once = 0;
556 
557 	if (new_socket_id(socket_id)) {
558 		if (!warning_once && numa_support)
559 			printf("Warning: NUMA should be configured manually by"
560 			       " using --port-numa-config and"
561 			       " --ring-numa-config parameters along with"
562 			       " --numa.\n");
563 		warning_once = 1;
564 		return -1;
565 	}
566 	return 0;
567 }
568 
569 /*
570  * Get the allowed maximum number of RX queues.
571  * *pid return the port id which has minimal value of
572  * max_rx_queues in all ports.
573  */
574 queueid_t
575 get_allowed_max_nb_rxq(portid_t *pid)
576 {
577 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
578 	portid_t pi;
579 	struct rte_eth_dev_info dev_info;
580 
581 	RTE_ETH_FOREACH_DEV(pi) {
582 		rte_eth_dev_info_get(pi, &dev_info);
583 		if (dev_info.max_rx_queues < allowed_max_rxq) {
584 			allowed_max_rxq = dev_info.max_rx_queues;
585 			*pid = pi;
586 		}
587 	}
588 	return allowed_max_rxq;
589 }
590 
591 /*
592  * Check input rxq is valid or not.
593  * If input rxq is not greater than any of maximum number
594  * of RX queues of all ports, it is valid.
595  * if valid, return 0, else return -1
596  */
597 int
598 check_nb_rxq(queueid_t rxq)
599 {
600 	queueid_t allowed_max_rxq;
601 	portid_t pid = 0;
602 
603 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
604 	if (rxq > allowed_max_rxq) {
605 		printf("Fail: input rxq (%u) can't be greater "
606 		       "than max_rx_queues (%u) of port %u\n",
607 		       rxq,
608 		       allowed_max_rxq,
609 		       pid);
610 		return -1;
611 	}
612 	return 0;
613 }
614 
615 /*
616  * Get the allowed maximum number of TX queues.
617  * *pid return the port id which has minimal value of
618  * max_tx_queues in all ports.
619  */
620 queueid_t
621 get_allowed_max_nb_txq(portid_t *pid)
622 {
623 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
624 	portid_t pi;
625 	struct rte_eth_dev_info dev_info;
626 
627 	RTE_ETH_FOREACH_DEV(pi) {
628 		rte_eth_dev_info_get(pi, &dev_info);
629 		if (dev_info.max_tx_queues < allowed_max_txq) {
630 			allowed_max_txq = dev_info.max_tx_queues;
631 			*pid = pi;
632 		}
633 	}
634 	return allowed_max_txq;
635 }
636 
637 /*
638  * Check input txq is valid or not.
639  * If input txq is not greater than any of maximum number
640  * of TX queues of all ports, it is valid.
641  * if valid, return 0, else return -1
642  */
643 int
644 check_nb_txq(queueid_t txq)
645 {
646 	queueid_t allowed_max_txq;
647 	portid_t pid = 0;
648 
649 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
650 	if (txq > allowed_max_txq) {
651 		printf("Fail: input txq (%u) can't be greater "
652 		       "than max_tx_queues (%u) of port %u\n",
653 		       txq,
654 		       allowed_max_txq,
655 		       pid);
656 		return -1;
657 	}
658 	return 0;
659 }
660 
661 static void
662 init_config(void)
663 {
664 	portid_t pid;
665 	struct rte_port *port;
666 	struct rte_mempool *mbp;
667 	unsigned int nb_mbuf_per_pool;
668 	lcoreid_t  lc_id;
669 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
670 	struct rte_gro_param gro_param;
671 	uint32_t gso_types;
672 	int k;
673 
674 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
675 
676 	if (numa_support) {
677 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
678 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
679 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
680 	}
681 
682 	/* Configuration of logical cores. */
683 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
684 				sizeof(struct fwd_lcore *) * nb_lcores,
685 				RTE_CACHE_LINE_SIZE);
686 	if (fwd_lcores == NULL) {
687 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
688 							"failed\n", nb_lcores);
689 	}
690 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
691 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
692 					       sizeof(struct fwd_lcore),
693 					       RTE_CACHE_LINE_SIZE);
694 		if (fwd_lcores[lc_id] == NULL) {
695 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
696 								"failed\n");
697 		}
698 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
699 	}
700 
701 	RTE_ETH_FOREACH_DEV(pid) {
702 		port = &ports[pid];
703 		/* Apply default TxRx configuration for all ports */
704 		port->dev_conf.txmode = tx_mode;
705 		port->dev_conf.rxmode = rx_mode;
706 		rte_eth_dev_info_get(pid, &port->dev_info);
707 
708 		if (!(port->dev_info.rx_offload_capa &
709 					DEV_RX_OFFLOAD_CRC_STRIP))
710 			port->dev_conf.rxmode.offloads &=
711 				~DEV_RX_OFFLOAD_CRC_STRIP;
712 		if (!(port->dev_info.tx_offload_capa &
713 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
714 			port->dev_conf.txmode.offloads &=
715 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
716 		if (numa_support) {
717 			if (port_numa[pid] != NUMA_NO_CONFIG)
718 				port_per_socket[port_numa[pid]]++;
719 			else {
720 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
721 
722 				/* if socket_id is invalid, set to 0 */
723 				if (check_socket_id(socket_id) < 0)
724 					socket_id = 0;
725 				port_per_socket[socket_id]++;
726 			}
727 		}
728 
729 		/* Apply Rx offloads configuration */
730 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
731 			port->rx_conf[k].offloads =
732 				port->dev_conf.rxmode.offloads;
733 		/* Apply Tx offloads configuration */
734 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
735 			port->tx_conf[k].offloads =
736 				port->dev_conf.txmode.offloads;
737 
738 		/* set flag to initialize port/queue */
739 		port->need_reconfig = 1;
740 		port->need_reconfig_queues = 1;
741 	}
742 
743 	/*
744 	 * Create pools of mbuf.
745 	 * If NUMA support is disabled, create a single pool of mbuf in
746 	 * socket 0 memory by default.
747 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
748 	 *
749 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
750 	 * nb_txd can be configured at run time.
751 	 */
752 	if (param_total_num_mbufs)
753 		nb_mbuf_per_pool = param_total_num_mbufs;
754 	else {
755 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
756 			(nb_lcores * mb_mempool_cache) +
757 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
758 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
759 	}
760 
761 	if (numa_support) {
762 		uint8_t i;
763 
764 		for (i = 0; i < num_sockets; i++)
765 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
766 					 socket_ids[i]);
767 	} else {
768 		if (socket_num == UMA_NO_CONFIG)
769 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
770 		else
771 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
772 						 socket_num);
773 	}
774 
775 	init_port_config();
776 
777 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
778 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
779 	/*
780 	 * Records which Mbuf pool to use by each logical core, if needed.
781 	 */
782 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
783 		mbp = mbuf_pool_find(
784 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
785 
786 		if (mbp == NULL)
787 			mbp = mbuf_pool_find(0);
788 		fwd_lcores[lc_id]->mbp = mbp;
789 		/* initialize GSO context */
790 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
791 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
792 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
793 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
794 			ETHER_CRC_LEN;
795 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
796 	}
797 
798 	/* Configuration of packet forwarding streams. */
799 	if (init_fwd_streams() < 0)
800 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
801 
802 	fwd_config_setup();
803 
804 	/* create a gro context for each lcore */
805 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
806 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
807 	gro_param.max_item_per_flow = MAX_PKT_BURST;
808 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
809 		gro_param.socket_id = rte_lcore_to_socket_id(
810 				fwd_lcores_cpuids[lc_id]);
811 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
812 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
813 			rte_exit(EXIT_FAILURE,
814 					"rte_gro_ctx_create() failed\n");
815 		}
816 	}
817 
818 #if defined RTE_LIBRTE_PMD_SOFTNIC
819 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
820 		RTE_ETH_FOREACH_DEV(pid) {
821 			port = &ports[pid];
822 			const char *driver = port->dev_info.driver_name;
823 
824 			if (strcmp(driver, "net_softnic") == 0)
825 				port->softport.fwd_lcore_arg = fwd_lcores;
826 		}
827 	}
828 #endif
829 
830 }
831 
832 
833 void
834 reconfig(portid_t new_port_id, unsigned socket_id)
835 {
836 	struct rte_port *port;
837 
838 	/* Reconfiguration of Ethernet ports. */
839 	port = &ports[new_port_id];
840 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
841 
842 	/* set flag to initialize port/queue */
843 	port->need_reconfig = 1;
844 	port->need_reconfig_queues = 1;
845 	port->socket_id = socket_id;
846 
847 	init_port_config();
848 }
849 
850 
851 int
852 init_fwd_streams(void)
853 {
854 	portid_t pid;
855 	struct rte_port *port;
856 	streamid_t sm_id, nb_fwd_streams_new;
857 	queueid_t q;
858 
859 	/* set socket id according to numa or not */
860 	RTE_ETH_FOREACH_DEV(pid) {
861 		port = &ports[pid];
862 		if (nb_rxq > port->dev_info.max_rx_queues) {
863 			printf("Fail: nb_rxq(%d) is greater than "
864 				"max_rx_queues(%d)\n", nb_rxq,
865 				port->dev_info.max_rx_queues);
866 			return -1;
867 		}
868 		if (nb_txq > port->dev_info.max_tx_queues) {
869 			printf("Fail: nb_txq(%d) is greater than "
870 				"max_tx_queues(%d)\n", nb_txq,
871 				port->dev_info.max_tx_queues);
872 			return -1;
873 		}
874 		if (numa_support) {
875 			if (port_numa[pid] != NUMA_NO_CONFIG)
876 				port->socket_id = port_numa[pid];
877 			else {
878 				port->socket_id = rte_eth_dev_socket_id(pid);
879 
880 				/* if socket_id is invalid, set to 0 */
881 				if (check_socket_id(port->socket_id) < 0)
882 					port->socket_id = 0;
883 			}
884 		}
885 		else {
886 			if (socket_num == UMA_NO_CONFIG)
887 				port->socket_id = 0;
888 			else
889 				port->socket_id = socket_num;
890 		}
891 	}
892 
893 	q = RTE_MAX(nb_rxq, nb_txq);
894 	if (q == 0) {
895 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
896 		return -1;
897 	}
898 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
899 	if (nb_fwd_streams_new == nb_fwd_streams)
900 		return 0;
901 	/* clear the old */
902 	if (fwd_streams != NULL) {
903 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
904 			if (fwd_streams[sm_id] == NULL)
905 				continue;
906 			rte_free(fwd_streams[sm_id]);
907 			fwd_streams[sm_id] = NULL;
908 		}
909 		rte_free(fwd_streams);
910 		fwd_streams = NULL;
911 	}
912 
913 	/* init new */
914 	nb_fwd_streams = nb_fwd_streams_new;
915 	if (nb_fwd_streams) {
916 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
917 			sizeof(struct fwd_stream *) * nb_fwd_streams,
918 			RTE_CACHE_LINE_SIZE);
919 		if (fwd_streams == NULL)
920 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
921 				 " (struct fwd_stream *)) failed\n",
922 				 nb_fwd_streams);
923 
924 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
925 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
926 				" struct fwd_stream", sizeof(struct fwd_stream),
927 				RTE_CACHE_LINE_SIZE);
928 			if (fwd_streams[sm_id] == NULL)
929 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
930 					 "(struct fwd_stream) failed\n");
931 		}
932 	}
933 
934 	return 0;
935 }
936 
937 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
938 static void
939 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
940 {
941 	unsigned int total_burst;
942 	unsigned int nb_burst;
943 	unsigned int burst_stats[3];
944 	uint16_t pktnb_stats[3];
945 	uint16_t nb_pkt;
946 	int burst_percent[3];
947 
948 	/*
949 	 * First compute the total number of packet bursts and the
950 	 * two highest numbers of bursts of the same number of packets.
951 	 */
952 	total_burst = 0;
953 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
954 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
955 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
956 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
957 		if (nb_burst == 0)
958 			continue;
959 		total_burst += nb_burst;
960 		if (nb_burst > burst_stats[0]) {
961 			burst_stats[1] = burst_stats[0];
962 			pktnb_stats[1] = pktnb_stats[0];
963 			burst_stats[0] = nb_burst;
964 			pktnb_stats[0] = nb_pkt;
965 		} else if (nb_burst > burst_stats[1]) {
966 			burst_stats[1] = nb_burst;
967 			pktnb_stats[1] = nb_pkt;
968 		}
969 	}
970 	if (total_burst == 0)
971 		return;
972 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
973 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
974 	       burst_percent[0], (int) pktnb_stats[0]);
975 	if (burst_stats[0] == total_burst) {
976 		printf("]\n");
977 		return;
978 	}
979 	if (burst_stats[0] + burst_stats[1] == total_burst) {
980 		printf(" + %d%% of %d pkts]\n",
981 		       100 - burst_percent[0], pktnb_stats[1]);
982 		return;
983 	}
984 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
985 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
986 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
987 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
988 		return;
989 	}
990 	printf(" + %d%% of %d pkts + %d%% of others]\n",
991 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
992 }
993 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
994 
995 static void
996 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
997 {
998 	struct rte_port *port;
999 	uint8_t i;
1000 
1001 	static const char *fwd_stats_border = "----------------------";
1002 
1003 	port = &ports[port_id];
1004 	printf("\n  %s Forward statistics for port %-2d %s\n",
1005 	       fwd_stats_border, port_id, fwd_stats_border);
1006 
1007 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1008 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1009 		       "%-"PRIu64"\n",
1010 		       stats->ipackets, stats->imissed,
1011 		       (uint64_t) (stats->ipackets + stats->imissed));
1012 
1013 		if (cur_fwd_eng == &csum_fwd_engine)
1014 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1015 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1016 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1017 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
1018 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1019 		}
1020 
1021 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1022 		       "%-"PRIu64"\n",
1023 		       stats->opackets, port->tx_dropped,
1024 		       (uint64_t) (stats->opackets + port->tx_dropped));
1025 	}
1026 	else {
1027 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1028 		       "%14"PRIu64"\n",
1029 		       stats->ipackets, stats->imissed,
1030 		       (uint64_t) (stats->ipackets + stats->imissed));
1031 
1032 		if (cur_fwd_eng == &csum_fwd_engine)
1033 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1034 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1035 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1036 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1037 			printf("  RX-nombufs:             %14"PRIu64"\n",
1038 			       stats->rx_nombuf);
1039 		}
1040 
1041 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1042 		       "%14"PRIu64"\n",
1043 		       stats->opackets, port->tx_dropped,
1044 		       (uint64_t) (stats->opackets + port->tx_dropped));
1045 	}
1046 
1047 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1048 	if (port->rx_stream)
1049 		pkt_burst_stats_display("RX",
1050 			&port->rx_stream->rx_burst_stats);
1051 	if (port->tx_stream)
1052 		pkt_burst_stats_display("TX",
1053 			&port->tx_stream->tx_burst_stats);
1054 #endif
1055 
1056 	if (port->rx_queue_stats_mapping_enabled) {
1057 		printf("\n");
1058 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1059 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1060 			       "     RX-errors:%14"PRIu64
1061 			       "    RX-bytes:%14"PRIu64"\n",
1062 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1063 		}
1064 		printf("\n");
1065 	}
1066 	if (port->tx_queue_stats_mapping_enabled) {
1067 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1068 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1069 			       "                                 TX-bytes:%14"PRIu64"\n",
1070 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1071 		}
1072 	}
1073 
1074 	printf("  %s--------------------------------%s\n",
1075 	       fwd_stats_border, fwd_stats_border);
1076 }
1077 
1078 static void
1079 fwd_stream_stats_display(streamid_t stream_id)
1080 {
1081 	struct fwd_stream *fs;
1082 	static const char *fwd_top_stats_border = "-------";
1083 
1084 	fs = fwd_streams[stream_id];
1085 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1086 	    (fs->fwd_dropped == 0))
1087 		return;
1088 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1089 	       "TX Port=%2d/Queue=%2d %s\n",
1090 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1091 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1092 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1093 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1094 
1095 	/* if checksum mode */
1096 	if (cur_fwd_eng == &csum_fwd_engine) {
1097 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1098 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1099 	}
1100 
1101 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1102 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1103 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1104 #endif
1105 }
1106 
1107 static void
1108 flush_fwd_rx_queues(void)
1109 {
1110 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1111 	portid_t  rxp;
1112 	portid_t port_id;
1113 	queueid_t rxq;
1114 	uint16_t  nb_rx;
1115 	uint16_t  i;
1116 	uint8_t   j;
1117 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1118 	uint64_t timer_period;
1119 
1120 	/* convert to number of cycles */
1121 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1122 
1123 	for (j = 0; j < 2; j++) {
1124 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1125 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1126 				port_id = fwd_ports_ids[rxp];
1127 				/**
1128 				* testpmd can stuck in the below do while loop
1129 				* if rte_eth_rx_burst() always returns nonzero
1130 				* packets. So timer is added to exit this loop
1131 				* after 1sec timer expiry.
1132 				*/
1133 				prev_tsc = rte_rdtsc();
1134 				do {
1135 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1136 						pkts_burst, MAX_PKT_BURST);
1137 					for (i = 0; i < nb_rx; i++)
1138 						rte_pktmbuf_free(pkts_burst[i]);
1139 
1140 					cur_tsc = rte_rdtsc();
1141 					diff_tsc = cur_tsc - prev_tsc;
1142 					timer_tsc += diff_tsc;
1143 				} while ((nb_rx > 0) &&
1144 					(timer_tsc < timer_period));
1145 				timer_tsc = 0;
1146 			}
1147 		}
1148 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1149 	}
1150 }
1151 
1152 static void
1153 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1154 {
1155 	struct fwd_stream **fsm;
1156 	streamid_t nb_fs;
1157 	streamid_t sm_id;
1158 #ifdef RTE_LIBRTE_BITRATE
1159 	uint64_t tics_per_1sec;
1160 	uint64_t tics_datum;
1161 	uint64_t tics_current;
1162 	uint16_t idx_port;
1163 
1164 	tics_datum = rte_rdtsc();
1165 	tics_per_1sec = rte_get_timer_hz();
1166 #endif
1167 	fsm = &fwd_streams[fc->stream_idx];
1168 	nb_fs = fc->stream_nb;
1169 	do {
1170 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1171 			(*pkt_fwd)(fsm[sm_id]);
1172 #ifdef RTE_LIBRTE_BITRATE
1173 		if (bitrate_enabled != 0 &&
1174 				bitrate_lcore_id == rte_lcore_id()) {
1175 			tics_current = rte_rdtsc();
1176 			if (tics_current - tics_datum >= tics_per_1sec) {
1177 				/* Periodic bitrate calculation */
1178 				RTE_ETH_FOREACH_DEV(idx_port)
1179 					rte_stats_bitrate_calc(bitrate_data,
1180 						idx_port);
1181 				tics_datum = tics_current;
1182 			}
1183 		}
1184 #endif
1185 #ifdef RTE_LIBRTE_LATENCY_STATS
1186 		if (latencystats_enabled != 0 &&
1187 				latencystats_lcore_id == rte_lcore_id())
1188 			rte_latencystats_update();
1189 #endif
1190 
1191 	} while (! fc->stopped);
1192 }
1193 
1194 static int
1195 start_pkt_forward_on_core(void *fwd_arg)
1196 {
1197 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1198 			     cur_fwd_config.fwd_eng->packet_fwd);
1199 	return 0;
1200 }
1201 
1202 /*
1203  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1204  * Used to start communication flows in network loopback test configurations.
1205  */
1206 static int
1207 run_one_txonly_burst_on_core(void *fwd_arg)
1208 {
1209 	struct fwd_lcore *fwd_lc;
1210 	struct fwd_lcore tmp_lcore;
1211 
1212 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1213 	tmp_lcore = *fwd_lc;
1214 	tmp_lcore.stopped = 1;
1215 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1216 	return 0;
1217 }
1218 
1219 /*
1220  * Launch packet forwarding:
1221  *     - Setup per-port forwarding context.
1222  *     - launch logical cores with their forwarding configuration.
1223  */
1224 static void
1225 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1226 {
1227 	port_fwd_begin_t port_fwd_begin;
1228 	unsigned int i;
1229 	unsigned int lc_id;
1230 	int diag;
1231 
1232 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1233 	if (port_fwd_begin != NULL) {
1234 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1235 			(*port_fwd_begin)(fwd_ports_ids[i]);
1236 	}
1237 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1238 		lc_id = fwd_lcores_cpuids[i];
1239 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1240 			fwd_lcores[i]->stopped = 0;
1241 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1242 						     fwd_lcores[i], lc_id);
1243 			if (diag != 0)
1244 				printf("launch lcore %u failed - diag=%d\n",
1245 				       lc_id, diag);
1246 		}
1247 	}
1248 }
1249 
1250 /*
1251  * Update the forward ports list.
1252  */
1253 void
1254 update_fwd_ports(portid_t new_pid)
1255 {
1256 	unsigned int i;
1257 	unsigned int new_nb_fwd_ports = 0;
1258 	int move = 0;
1259 
1260 	for (i = 0; i < nb_fwd_ports; ++i) {
1261 		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1262 			move = 1;
1263 		else if (move)
1264 			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1265 		else
1266 			new_nb_fwd_ports++;
1267 	}
1268 	if (new_pid < RTE_MAX_ETHPORTS)
1269 		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1270 
1271 	nb_fwd_ports = new_nb_fwd_ports;
1272 	nb_cfg_ports = new_nb_fwd_ports;
1273 }
1274 
1275 /*
1276  * Launch packet forwarding configuration.
1277  */
1278 void
1279 start_packet_forwarding(int with_tx_first)
1280 {
1281 	port_fwd_begin_t port_fwd_begin;
1282 	port_fwd_end_t  port_fwd_end;
1283 	struct rte_port *port;
1284 	unsigned int i;
1285 	portid_t   pt_id;
1286 	streamid_t sm_id;
1287 
1288 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1289 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1290 
1291 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1292 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1293 
1294 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1295 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1296 		(!nb_rxq || !nb_txq))
1297 		rte_exit(EXIT_FAILURE,
1298 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1299 			cur_fwd_eng->fwd_mode_name);
1300 
1301 	if (all_ports_started() == 0) {
1302 		printf("Not all ports were started\n");
1303 		return;
1304 	}
1305 	if (test_done == 0) {
1306 		printf("Packet forwarding already started\n");
1307 		return;
1308 	}
1309 
1310 
1311 	if(dcb_test) {
1312 		for (i = 0; i < nb_fwd_ports; i++) {
1313 			pt_id = fwd_ports_ids[i];
1314 			port = &ports[pt_id];
1315 			if (!port->dcb_flag) {
1316 				printf("In DCB mode, all forwarding ports must "
1317                                        "be configured in this mode.\n");
1318 				return;
1319 			}
1320 		}
1321 		if (nb_fwd_lcores == 1) {
1322 			printf("In DCB mode,the nb forwarding cores "
1323                                "should be larger than 1.\n");
1324 			return;
1325 		}
1326 	}
1327 	test_done = 0;
1328 
1329 	fwd_config_setup();
1330 
1331 	if(!no_flush_rx)
1332 		flush_fwd_rx_queues();
1333 
1334 	pkt_fwd_config_display(&cur_fwd_config);
1335 	rxtx_config_display();
1336 
1337 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1338 		pt_id = fwd_ports_ids[i];
1339 		port = &ports[pt_id];
1340 		rte_eth_stats_get(pt_id, &port->stats);
1341 		port->tx_dropped = 0;
1342 
1343 		map_port_queue_stats_mapping_registers(pt_id, port);
1344 	}
1345 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1346 		fwd_streams[sm_id]->rx_packets = 0;
1347 		fwd_streams[sm_id]->tx_packets = 0;
1348 		fwd_streams[sm_id]->fwd_dropped = 0;
1349 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1350 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1351 
1352 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1353 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1354 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1355 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1356 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1357 #endif
1358 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1359 		fwd_streams[sm_id]->core_cycles = 0;
1360 #endif
1361 	}
1362 	if (with_tx_first) {
1363 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1364 		if (port_fwd_begin != NULL) {
1365 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1366 				(*port_fwd_begin)(fwd_ports_ids[i]);
1367 		}
1368 		while (with_tx_first--) {
1369 			launch_packet_forwarding(
1370 					run_one_txonly_burst_on_core);
1371 			rte_eal_mp_wait_lcore();
1372 		}
1373 		port_fwd_end = tx_only_engine.port_fwd_end;
1374 		if (port_fwd_end != NULL) {
1375 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1376 				(*port_fwd_end)(fwd_ports_ids[i]);
1377 		}
1378 	}
1379 	launch_packet_forwarding(start_pkt_forward_on_core);
1380 }
1381 
1382 void
1383 stop_packet_forwarding(void)
1384 {
1385 	struct rte_eth_stats stats;
1386 	struct rte_port *port;
1387 	port_fwd_end_t  port_fwd_end;
1388 	int i;
1389 	portid_t   pt_id;
1390 	streamid_t sm_id;
1391 	lcoreid_t  lc_id;
1392 	uint64_t total_recv;
1393 	uint64_t total_xmit;
1394 	uint64_t total_rx_dropped;
1395 	uint64_t total_tx_dropped;
1396 	uint64_t total_rx_nombuf;
1397 	uint64_t tx_dropped;
1398 	uint64_t rx_bad_ip_csum;
1399 	uint64_t rx_bad_l4_csum;
1400 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1401 	uint64_t fwd_cycles;
1402 #endif
1403 
1404 	static const char *acc_stats_border = "+++++++++++++++";
1405 
1406 	if (test_done) {
1407 		printf("Packet forwarding not started\n");
1408 		return;
1409 	}
1410 	printf("Telling cores to stop...");
1411 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1412 		fwd_lcores[lc_id]->stopped = 1;
1413 	printf("\nWaiting for lcores to finish...\n");
1414 	rte_eal_mp_wait_lcore();
1415 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1416 	if (port_fwd_end != NULL) {
1417 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1418 			pt_id = fwd_ports_ids[i];
1419 			(*port_fwd_end)(pt_id);
1420 		}
1421 	}
1422 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1423 	fwd_cycles = 0;
1424 #endif
1425 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1426 		if (cur_fwd_config.nb_fwd_streams >
1427 		    cur_fwd_config.nb_fwd_ports) {
1428 			fwd_stream_stats_display(sm_id);
1429 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1430 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1431 		} else {
1432 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1433 				fwd_streams[sm_id];
1434 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1435 				fwd_streams[sm_id];
1436 		}
1437 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1438 		tx_dropped = (uint64_t) (tx_dropped +
1439 					 fwd_streams[sm_id]->fwd_dropped);
1440 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1441 
1442 		rx_bad_ip_csum =
1443 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1444 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1445 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1446 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1447 							rx_bad_ip_csum;
1448 
1449 		rx_bad_l4_csum =
1450 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1451 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1452 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1453 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1454 							rx_bad_l4_csum;
1455 
1456 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1457 		fwd_cycles = (uint64_t) (fwd_cycles +
1458 					 fwd_streams[sm_id]->core_cycles);
1459 #endif
1460 	}
1461 	total_recv = 0;
1462 	total_xmit = 0;
1463 	total_rx_dropped = 0;
1464 	total_tx_dropped = 0;
1465 	total_rx_nombuf  = 0;
1466 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1467 		pt_id = fwd_ports_ids[i];
1468 
1469 		port = &ports[pt_id];
1470 		rte_eth_stats_get(pt_id, &stats);
1471 		stats.ipackets -= port->stats.ipackets;
1472 		port->stats.ipackets = 0;
1473 		stats.opackets -= port->stats.opackets;
1474 		port->stats.opackets = 0;
1475 		stats.ibytes   -= port->stats.ibytes;
1476 		port->stats.ibytes = 0;
1477 		stats.obytes   -= port->stats.obytes;
1478 		port->stats.obytes = 0;
1479 		stats.imissed  -= port->stats.imissed;
1480 		port->stats.imissed = 0;
1481 		stats.oerrors  -= port->stats.oerrors;
1482 		port->stats.oerrors = 0;
1483 		stats.rx_nombuf -= port->stats.rx_nombuf;
1484 		port->stats.rx_nombuf = 0;
1485 
1486 		total_recv += stats.ipackets;
1487 		total_xmit += stats.opackets;
1488 		total_rx_dropped += stats.imissed;
1489 		total_tx_dropped += port->tx_dropped;
1490 		total_rx_nombuf  += stats.rx_nombuf;
1491 
1492 		fwd_port_stats_display(pt_id, &stats);
1493 	}
1494 
1495 	printf("\n  %s Accumulated forward statistics for all ports"
1496 	       "%s\n",
1497 	       acc_stats_border, acc_stats_border);
1498 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1499 	       "%-"PRIu64"\n"
1500 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1501 	       "%-"PRIu64"\n",
1502 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1503 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1504 	if (total_rx_nombuf > 0)
1505 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1506 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1507 	       "%s\n",
1508 	       acc_stats_border, acc_stats_border);
1509 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1510 	if (total_recv > 0)
1511 		printf("\n  CPU cycles/packet=%u (total cycles="
1512 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1513 		       (unsigned int)(fwd_cycles / total_recv),
1514 		       fwd_cycles, total_recv);
1515 #endif
1516 	printf("\nDone.\n");
1517 	test_done = 1;
1518 }
1519 
1520 void
1521 dev_set_link_up(portid_t pid)
1522 {
1523 	if (rte_eth_dev_set_link_up(pid) < 0)
1524 		printf("\nSet link up fail.\n");
1525 }
1526 
1527 void
1528 dev_set_link_down(portid_t pid)
1529 {
1530 	if (rte_eth_dev_set_link_down(pid) < 0)
1531 		printf("\nSet link down fail.\n");
1532 }
1533 
1534 static int
1535 all_ports_started(void)
1536 {
1537 	portid_t pi;
1538 	struct rte_port *port;
1539 
1540 	RTE_ETH_FOREACH_DEV(pi) {
1541 		port = &ports[pi];
1542 		/* Check if there is a port which is not started */
1543 		if ((port->port_status != RTE_PORT_STARTED) &&
1544 			(port->slave_flag == 0))
1545 			return 0;
1546 	}
1547 
1548 	/* No port is not started */
1549 	return 1;
1550 }
1551 
1552 int
1553 port_is_stopped(portid_t port_id)
1554 {
1555 	struct rte_port *port = &ports[port_id];
1556 
1557 	if ((port->port_status != RTE_PORT_STOPPED) &&
1558 	    (port->slave_flag == 0))
1559 		return 0;
1560 	return 1;
1561 }
1562 
1563 int
1564 all_ports_stopped(void)
1565 {
1566 	portid_t pi;
1567 
1568 	RTE_ETH_FOREACH_DEV(pi) {
1569 		if (!port_is_stopped(pi))
1570 			return 0;
1571 	}
1572 
1573 	return 1;
1574 }
1575 
1576 int
1577 port_is_started(portid_t port_id)
1578 {
1579 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1580 		return 0;
1581 
1582 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1583 		return 0;
1584 
1585 	return 1;
1586 }
1587 
1588 static int
1589 port_is_closed(portid_t port_id)
1590 {
1591 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1592 		return 0;
1593 
1594 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1595 		return 0;
1596 
1597 	return 1;
1598 }
1599 
1600 int
1601 start_port(portid_t pid)
1602 {
1603 	int diag, need_check_link_status = -1;
1604 	portid_t pi;
1605 	queueid_t qi;
1606 	struct rte_port *port;
1607 	struct ether_addr mac_addr;
1608 	enum rte_eth_event_type event_type;
1609 
1610 	if (port_id_is_invalid(pid, ENABLED_WARN))
1611 		return 0;
1612 
1613 	if(dcb_config)
1614 		dcb_test = 1;
1615 	RTE_ETH_FOREACH_DEV(pi) {
1616 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1617 			continue;
1618 
1619 		need_check_link_status = 0;
1620 		port = &ports[pi];
1621 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1622 						 RTE_PORT_HANDLING) == 0) {
1623 			printf("Port %d is now not stopped\n", pi);
1624 			continue;
1625 		}
1626 
1627 		if (port->need_reconfig > 0) {
1628 			port->need_reconfig = 0;
1629 
1630 			if (flow_isolate_all) {
1631 				int ret = port_flow_isolate(pi, 1);
1632 				if (ret) {
1633 					printf("Failed to apply isolated"
1634 					       " mode on port %d\n", pi);
1635 					return -1;
1636 				}
1637 			}
1638 
1639 			printf("Configuring Port %d (socket %u)\n", pi,
1640 					port->socket_id);
1641 			/* configure port */
1642 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1643 						&(port->dev_conf));
1644 			if (diag != 0) {
1645 				if (rte_atomic16_cmpset(&(port->port_status),
1646 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1647 					printf("Port %d can not be set back "
1648 							"to stopped\n", pi);
1649 				printf("Fail to configure port %d\n", pi);
1650 				/* try to reconfigure port next time */
1651 				port->need_reconfig = 1;
1652 				return -1;
1653 			}
1654 		}
1655 		if (port->need_reconfig_queues > 0) {
1656 			port->need_reconfig_queues = 0;
1657 			/* setup tx queues */
1658 			for (qi = 0; qi < nb_txq; qi++) {
1659 				if ((numa_support) &&
1660 					(txring_numa[pi] != NUMA_NO_CONFIG))
1661 					diag = rte_eth_tx_queue_setup(pi, qi,
1662 						port->nb_tx_desc[qi],
1663 						txring_numa[pi],
1664 						&(port->tx_conf[qi]));
1665 				else
1666 					diag = rte_eth_tx_queue_setup(pi, qi,
1667 						port->nb_tx_desc[qi],
1668 						port->socket_id,
1669 						&(port->tx_conf[qi]));
1670 
1671 				if (diag == 0)
1672 					continue;
1673 
1674 				/* Fail to setup tx queue, return */
1675 				if (rte_atomic16_cmpset(&(port->port_status),
1676 							RTE_PORT_HANDLING,
1677 							RTE_PORT_STOPPED) == 0)
1678 					printf("Port %d can not be set back "
1679 							"to stopped\n", pi);
1680 				printf("Fail to configure port %d tx queues\n",
1681 				       pi);
1682 				/* try to reconfigure queues next time */
1683 				port->need_reconfig_queues = 1;
1684 				return -1;
1685 			}
1686 			for (qi = 0; qi < nb_rxq; qi++) {
1687 				/* setup rx queues */
1688 				if ((numa_support) &&
1689 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1690 					struct rte_mempool * mp =
1691 						mbuf_pool_find(rxring_numa[pi]);
1692 					if (mp == NULL) {
1693 						printf("Failed to setup RX queue:"
1694 							"No mempool allocation"
1695 							" on the socket %d\n",
1696 							rxring_numa[pi]);
1697 						return -1;
1698 					}
1699 
1700 					diag = rte_eth_rx_queue_setup(pi, qi,
1701 					     port->nb_rx_desc[qi],
1702 					     rxring_numa[pi],
1703 					     &(port->rx_conf[qi]),
1704 					     mp);
1705 				} else {
1706 					struct rte_mempool *mp =
1707 						mbuf_pool_find(port->socket_id);
1708 					if (mp == NULL) {
1709 						printf("Failed to setup RX queue:"
1710 							"No mempool allocation"
1711 							" on the socket %d\n",
1712 							port->socket_id);
1713 						return -1;
1714 					}
1715 					diag = rte_eth_rx_queue_setup(pi, qi,
1716 					     port->nb_rx_desc[qi],
1717 					     port->socket_id,
1718 					     &(port->rx_conf[qi]),
1719 					     mp);
1720 				}
1721 				if (diag == 0)
1722 					continue;
1723 
1724 				/* Fail to setup rx queue, return */
1725 				if (rte_atomic16_cmpset(&(port->port_status),
1726 							RTE_PORT_HANDLING,
1727 							RTE_PORT_STOPPED) == 0)
1728 					printf("Port %d can not be set back "
1729 							"to stopped\n", pi);
1730 				printf("Fail to configure port %d rx queues\n",
1731 				       pi);
1732 				/* try to reconfigure queues next time */
1733 				port->need_reconfig_queues = 1;
1734 				return -1;
1735 			}
1736 		}
1737 
1738 		/* start port */
1739 		if (rte_eth_dev_start(pi) < 0) {
1740 			printf("Fail to start port %d\n", pi);
1741 
1742 			/* Fail to setup rx queue, return */
1743 			if (rte_atomic16_cmpset(&(port->port_status),
1744 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1745 				printf("Port %d can not be set back to "
1746 							"stopped\n", pi);
1747 			continue;
1748 		}
1749 
1750 		if (rte_atomic16_cmpset(&(port->port_status),
1751 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1752 			printf("Port %d can not be set into started\n", pi);
1753 
1754 		rte_eth_macaddr_get(pi, &mac_addr);
1755 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1756 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1757 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1758 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1759 
1760 		/* at least one port started, need checking link status */
1761 		need_check_link_status = 1;
1762 	}
1763 
1764 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1765 	     event_type < RTE_ETH_EVENT_MAX;
1766 	     event_type++) {
1767 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1768 						event_type,
1769 						eth_event_callback,
1770 						NULL);
1771 		if (diag) {
1772 			printf("Failed to setup even callback for event %d\n",
1773 				event_type);
1774 			return -1;
1775 		}
1776 	}
1777 
1778 	if (need_check_link_status == 1 && !no_link_check)
1779 		check_all_ports_link_status(RTE_PORT_ALL);
1780 	else if (need_check_link_status == 0)
1781 		printf("Please stop the ports first\n");
1782 
1783 	printf("Done\n");
1784 	return 0;
1785 }
1786 
1787 void
1788 stop_port(portid_t pid)
1789 {
1790 	portid_t pi;
1791 	struct rte_port *port;
1792 	int need_check_link_status = 0;
1793 
1794 	if (dcb_test) {
1795 		dcb_test = 0;
1796 		dcb_config = 0;
1797 	}
1798 
1799 	if (port_id_is_invalid(pid, ENABLED_WARN))
1800 		return;
1801 
1802 	printf("Stopping ports...\n");
1803 
1804 	RTE_ETH_FOREACH_DEV(pi) {
1805 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1806 			continue;
1807 
1808 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1809 			printf("Please remove port %d from forwarding configuration.\n", pi);
1810 			continue;
1811 		}
1812 
1813 		if (port_is_bonding_slave(pi)) {
1814 			printf("Please remove port %d from bonded device.\n", pi);
1815 			continue;
1816 		}
1817 
1818 		port = &ports[pi];
1819 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1820 						RTE_PORT_HANDLING) == 0)
1821 			continue;
1822 
1823 		rte_eth_dev_stop(pi);
1824 
1825 		if (rte_atomic16_cmpset(&(port->port_status),
1826 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1827 			printf("Port %d can not be set into stopped\n", pi);
1828 		need_check_link_status = 1;
1829 	}
1830 	if (need_check_link_status && !no_link_check)
1831 		check_all_ports_link_status(RTE_PORT_ALL);
1832 
1833 	printf("Done\n");
1834 }
1835 
1836 void
1837 close_port(portid_t pid)
1838 {
1839 	portid_t pi;
1840 	struct rte_port *port;
1841 
1842 	if (port_id_is_invalid(pid, ENABLED_WARN))
1843 		return;
1844 
1845 	printf("Closing ports...\n");
1846 
1847 	RTE_ETH_FOREACH_DEV(pi) {
1848 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1849 			continue;
1850 
1851 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1852 			printf("Please remove port %d from forwarding configuration.\n", pi);
1853 			continue;
1854 		}
1855 
1856 		if (port_is_bonding_slave(pi)) {
1857 			printf("Please remove port %d from bonded device.\n", pi);
1858 			continue;
1859 		}
1860 
1861 		port = &ports[pi];
1862 		if (rte_atomic16_cmpset(&(port->port_status),
1863 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1864 			printf("Port %d is already closed\n", pi);
1865 			continue;
1866 		}
1867 
1868 		if (rte_atomic16_cmpset(&(port->port_status),
1869 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1870 			printf("Port %d is now not stopped\n", pi);
1871 			continue;
1872 		}
1873 
1874 		if (port->flow_list)
1875 			port_flow_flush(pi);
1876 		rte_eth_dev_close(pi);
1877 
1878 		if (rte_atomic16_cmpset(&(port->port_status),
1879 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1880 			printf("Port %d cannot be set to closed\n", pi);
1881 	}
1882 
1883 	printf("Done\n");
1884 }
1885 
1886 void
1887 reset_port(portid_t pid)
1888 {
1889 	int diag;
1890 	portid_t pi;
1891 	struct rte_port *port;
1892 
1893 	if (port_id_is_invalid(pid, ENABLED_WARN))
1894 		return;
1895 
1896 	printf("Resetting ports...\n");
1897 
1898 	RTE_ETH_FOREACH_DEV(pi) {
1899 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1900 			continue;
1901 
1902 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1903 			printf("Please remove port %d from forwarding "
1904 			       "configuration.\n", pi);
1905 			continue;
1906 		}
1907 
1908 		if (port_is_bonding_slave(pi)) {
1909 			printf("Please remove port %d from bonded device.\n",
1910 			       pi);
1911 			continue;
1912 		}
1913 
1914 		diag = rte_eth_dev_reset(pi);
1915 		if (diag == 0) {
1916 			port = &ports[pi];
1917 			port->need_reconfig = 1;
1918 			port->need_reconfig_queues = 1;
1919 		} else {
1920 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1921 		}
1922 	}
1923 
1924 	printf("Done\n");
1925 }
1926 
1927 static int
1928 eth_dev_event_callback_register(void)
1929 {
1930 	int ret;
1931 
1932 	/* register the device event callback */
1933 	ret = rte_dev_event_callback_register(NULL,
1934 		eth_dev_event_callback, NULL);
1935 	if (ret) {
1936 		printf("Failed to register device event callback\n");
1937 		return -1;
1938 	}
1939 
1940 	return 0;
1941 }
1942 
1943 
1944 static int
1945 eth_dev_event_callback_unregister(void)
1946 {
1947 	int ret;
1948 
1949 	/* unregister the device event callback */
1950 	ret = rte_dev_event_callback_unregister(NULL,
1951 		eth_dev_event_callback, NULL);
1952 	if (ret < 0) {
1953 		printf("Failed to unregister device event callback\n");
1954 		return -1;
1955 	}
1956 
1957 	return 0;
1958 }
1959 
1960 void
1961 attach_port(char *identifier)
1962 {
1963 	portid_t pi = 0;
1964 	unsigned int socket_id;
1965 
1966 	printf("Attaching a new port...\n");
1967 
1968 	if (identifier == NULL) {
1969 		printf("Invalid parameters are specified\n");
1970 		return;
1971 	}
1972 
1973 	if (rte_eth_dev_attach(identifier, &pi))
1974 		return;
1975 
1976 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1977 	/* if socket_id is invalid, set to 0 */
1978 	if (check_socket_id(socket_id) < 0)
1979 		socket_id = 0;
1980 	reconfig(pi, socket_id);
1981 	rte_eth_promiscuous_enable(pi);
1982 
1983 	nb_ports = rte_eth_dev_count_avail();
1984 
1985 	ports[pi].port_status = RTE_PORT_STOPPED;
1986 
1987 	update_fwd_ports(pi);
1988 
1989 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1990 	printf("Done\n");
1991 }
1992 
1993 void
1994 detach_port(portid_t port_id)
1995 {
1996 	char name[RTE_ETH_NAME_MAX_LEN];
1997 
1998 	printf("Detaching a port...\n");
1999 
2000 	if (!port_is_closed(port_id)) {
2001 		printf("Please close port first\n");
2002 		return;
2003 	}
2004 
2005 	if (ports[port_id].flow_list)
2006 		port_flow_flush(port_id);
2007 
2008 	if (rte_eth_dev_detach(port_id, name)) {
2009 		TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2010 		return;
2011 	}
2012 
2013 	nb_ports = rte_eth_dev_count_avail();
2014 
2015 	update_fwd_ports(RTE_MAX_ETHPORTS);
2016 
2017 	printf("Port %u is detached. Now total ports is %d\n",
2018 			port_id, nb_ports);
2019 	printf("Done\n");
2020 	return;
2021 }
2022 
2023 void
2024 pmd_test_exit(void)
2025 {
2026 	struct rte_device *device;
2027 	portid_t pt_id;
2028 	int ret;
2029 
2030 	if (test_done == 0)
2031 		stop_packet_forwarding();
2032 
2033 	if (ports != NULL) {
2034 		no_link_check = 1;
2035 		RTE_ETH_FOREACH_DEV(pt_id) {
2036 			printf("\nShutting down port %d...\n", pt_id);
2037 			fflush(stdout);
2038 			stop_port(pt_id);
2039 			close_port(pt_id);
2040 
2041 			/*
2042 			 * This is a workaround to fix a virtio-user issue that
2043 			 * requires to call clean-up routine to remove existing
2044 			 * socket.
2045 			 * This workaround valid only for testpmd, needs a fix
2046 			 * valid for all applications.
2047 			 * TODO: Implement proper resource cleanup
2048 			 */
2049 			device = rte_eth_devices[pt_id].device;
2050 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2051 				detach_port(pt_id);
2052 		}
2053 	}
2054 
2055 	if (hot_plug) {
2056 		ret = rte_dev_event_monitor_stop();
2057 		if (ret)
2058 			RTE_LOG(ERR, EAL,
2059 				"fail to stop device event monitor.");
2060 
2061 		ret = eth_dev_event_callback_unregister();
2062 		if (ret)
2063 			RTE_LOG(ERR, EAL,
2064 				"fail to unregister all event callbacks.");
2065 	}
2066 
2067 	printf("\nBye...\n");
2068 }
2069 
2070 typedef void (*cmd_func_t)(void);
2071 struct pmd_test_command {
2072 	const char *cmd_name;
2073 	cmd_func_t cmd_func;
2074 };
2075 
2076 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2077 
2078 /* Check the link status of all ports in up to 9s, and print them finally */
2079 static void
2080 check_all_ports_link_status(uint32_t port_mask)
2081 {
2082 #define CHECK_INTERVAL 100 /* 100ms */
2083 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2084 	portid_t portid;
2085 	uint8_t count, all_ports_up, print_flag = 0;
2086 	struct rte_eth_link link;
2087 
2088 	printf("Checking link statuses...\n");
2089 	fflush(stdout);
2090 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2091 		all_ports_up = 1;
2092 		RTE_ETH_FOREACH_DEV(portid) {
2093 			if ((port_mask & (1 << portid)) == 0)
2094 				continue;
2095 			memset(&link, 0, sizeof(link));
2096 			rte_eth_link_get_nowait(portid, &link);
2097 			/* print link status if flag set */
2098 			if (print_flag == 1) {
2099 				if (link.link_status)
2100 					printf(
2101 					"Port%d Link Up. speed %u Mbps- %s\n",
2102 					portid, link.link_speed,
2103 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2104 					("full-duplex") : ("half-duplex\n"));
2105 				else
2106 					printf("Port %d Link Down\n", portid);
2107 				continue;
2108 			}
2109 			/* clear all_ports_up flag if any link down */
2110 			if (link.link_status == ETH_LINK_DOWN) {
2111 				all_ports_up = 0;
2112 				break;
2113 			}
2114 		}
2115 		/* after finally printing all link status, get out */
2116 		if (print_flag == 1)
2117 			break;
2118 
2119 		if (all_ports_up == 0) {
2120 			fflush(stdout);
2121 			rte_delay_ms(CHECK_INTERVAL);
2122 		}
2123 
2124 		/* set the print_flag if all ports up or timeout */
2125 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2126 			print_flag = 1;
2127 		}
2128 
2129 		if (lsc_interrupt)
2130 			break;
2131 	}
2132 }
2133 
2134 static void
2135 rmv_event_callback(void *arg)
2136 {
2137 	int need_to_start = 0;
2138 	int org_no_link_check = no_link_check;
2139 	portid_t port_id = (intptr_t)arg;
2140 
2141 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2142 
2143 	if (!test_done && port_is_forwarding(port_id)) {
2144 		need_to_start = 1;
2145 		stop_packet_forwarding();
2146 	}
2147 	no_link_check = 1;
2148 	stop_port(port_id);
2149 	no_link_check = org_no_link_check;
2150 	close_port(port_id);
2151 	detach_port(port_id);
2152 	if (need_to_start)
2153 		start_packet_forwarding(0);
2154 }
2155 
2156 /* This function is used by the interrupt thread */
2157 static int
2158 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2159 		  void *ret_param)
2160 {
2161 	static const char * const event_desc[] = {
2162 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2163 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2164 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2165 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2166 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2167 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
2168 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2169 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2170 		[RTE_ETH_EVENT_NEW] = "device probed",
2171 		[RTE_ETH_EVENT_DESTROY] = "device released",
2172 		[RTE_ETH_EVENT_MAX] = NULL,
2173 	};
2174 
2175 	RTE_SET_USED(param);
2176 	RTE_SET_USED(ret_param);
2177 
2178 	if (type >= RTE_ETH_EVENT_MAX) {
2179 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2180 			port_id, __func__, type);
2181 		fflush(stderr);
2182 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2183 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2184 			event_desc[type]);
2185 		fflush(stdout);
2186 	}
2187 
2188 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2189 		return 0;
2190 
2191 	switch (type) {
2192 	case RTE_ETH_EVENT_INTR_RMV:
2193 		if (rte_eal_alarm_set(100000,
2194 				rmv_event_callback, (void *)(intptr_t)port_id))
2195 			fprintf(stderr, "Could not set up deferred device removal\n");
2196 		break;
2197 	default:
2198 		break;
2199 	}
2200 	return 0;
2201 }
2202 
2203 /* This function is used by the interrupt thread */
2204 static void
2205 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2206 			     __rte_unused void *arg)
2207 {
2208 	if (type >= RTE_DEV_EVENT_MAX) {
2209 		fprintf(stderr, "%s called upon invalid event %d\n",
2210 			__func__, type);
2211 		fflush(stderr);
2212 	}
2213 
2214 	switch (type) {
2215 	case RTE_DEV_EVENT_REMOVE:
2216 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2217 			device_name);
2218 		/* TODO: After finish failure handle, begin to stop
2219 		 * packet forward, stop port, close port, detach port.
2220 		 */
2221 		break;
2222 	case RTE_DEV_EVENT_ADD:
2223 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2224 			device_name);
2225 		/* TODO: After finish kernel driver binding,
2226 		 * begin to attach port.
2227 		 */
2228 		break;
2229 	default:
2230 		break;
2231 	}
2232 }
2233 
2234 static int
2235 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2236 {
2237 	uint16_t i;
2238 	int diag;
2239 	uint8_t mapping_found = 0;
2240 
2241 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2242 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2243 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2244 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2245 					tx_queue_stats_mappings[i].queue_id,
2246 					tx_queue_stats_mappings[i].stats_counter_id);
2247 			if (diag != 0)
2248 				return diag;
2249 			mapping_found = 1;
2250 		}
2251 	}
2252 	if (mapping_found)
2253 		port->tx_queue_stats_mapping_enabled = 1;
2254 	return 0;
2255 }
2256 
2257 static int
2258 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2259 {
2260 	uint16_t i;
2261 	int diag;
2262 	uint8_t mapping_found = 0;
2263 
2264 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2265 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2266 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2267 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2268 					rx_queue_stats_mappings[i].queue_id,
2269 					rx_queue_stats_mappings[i].stats_counter_id);
2270 			if (diag != 0)
2271 				return diag;
2272 			mapping_found = 1;
2273 		}
2274 	}
2275 	if (mapping_found)
2276 		port->rx_queue_stats_mapping_enabled = 1;
2277 	return 0;
2278 }
2279 
2280 static void
2281 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2282 {
2283 	int diag = 0;
2284 
2285 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2286 	if (diag != 0) {
2287 		if (diag == -ENOTSUP) {
2288 			port->tx_queue_stats_mapping_enabled = 0;
2289 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2290 		}
2291 		else
2292 			rte_exit(EXIT_FAILURE,
2293 					"set_tx_queue_stats_mapping_registers "
2294 					"failed for port id=%d diag=%d\n",
2295 					pi, diag);
2296 	}
2297 
2298 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2299 	if (diag != 0) {
2300 		if (diag == -ENOTSUP) {
2301 			port->rx_queue_stats_mapping_enabled = 0;
2302 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2303 		}
2304 		else
2305 			rte_exit(EXIT_FAILURE,
2306 					"set_rx_queue_stats_mapping_registers "
2307 					"failed for port id=%d diag=%d\n",
2308 					pi, diag);
2309 	}
2310 }
2311 
2312 static void
2313 rxtx_port_config(struct rte_port *port)
2314 {
2315 	uint16_t qid;
2316 
2317 	for (qid = 0; qid < nb_rxq; qid++) {
2318 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2319 
2320 		/* Check if any Rx parameters have been passed */
2321 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2322 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2323 
2324 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2325 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2326 
2327 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2328 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2329 
2330 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2331 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2332 
2333 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2334 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2335 
2336 		port->nb_rx_desc[qid] = nb_rxd;
2337 	}
2338 
2339 	for (qid = 0; qid < nb_txq; qid++) {
2340 		port->tx_conf[qid] = port->dev_info.default_txconf;
2341 
2342 		/* Check if any Tx parameters have been passed */
2343 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2344 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2345 
2346 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2347 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2348 
2349 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2350 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2351 
2352 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2353 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2354 
2355 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2356 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2357 
2358 		port->nb_tx_desc[qid] = nb_txd;
2359 	}
2360 }
2361 
2362 void
2363 init_port_config(void)
2364 {
2365 	portid_t pid;
2366 	struct rte_port *port;
2367 
2368 	RTE_ETH_FOREACH_DEV(pid) {
2369 		port = &ports[pid];
2370 		port->dev_conf.fdir_conf = fdir_conf;
2371 		rte_eth_dev_info_get(pid, &port->dev_info);
2372 		if (nb_rxq > 1) {
2373 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2374 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2375 				rss_hf & port->dev_info.flow_type_rss_offloads;
2376 		} else {
2377 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2378 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2379 		}
2380 
2381 		if (port->dcb_flag == 0) {
2382 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2383 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2384 			else
2385 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2386 		}
2387 
2388 		rxtx_port_config(port);
2389 
2390 		rte_eth_macaddr_get(pid, &port->eth_addr);
2391 
2392 		map_port_queue_stats_mapping_registers(pid, port);
2393 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2394 		rte_pmd_ixgbe_bypass_init(pid);
2395 #endif
2396 
2397 		if (lsc_interrupt &&
2398 		    (rte_eth_devices[pid].data->dev_flags &
2399 		     RTE_ETH_DEV_INTR_LSC))
2400 			port->dev_conf.intr_conf.lsc = 1;
2401 		if (rmv_interrupt &&
2402 		    (rte_eth_devices[pid].data->dev_flags &
2403 		     RTE_ETH_DEV_INTR_RMV))
2404 			port->dev_conf.intr_conf.rmv = 1;
2405 	}
2406 }
2407 
2408 void set_port_slave_flag(portid_t slave_pid)
2409 {
2410 	struct rte_port *port;
2411 
2412 	port = &ports[slave_pid];
2413 	port->slave_flag = 1;
2414 }
2415 
2416 void clear_port_slave_flag(portid_t slave_pid)
2417 {
2418 	struct rte_port *port;
2419 
2420 	port = &ports[slave_pid];
2421 	port->slave_flag = 0;
2422 }
2423 
2424 uint8_t port_is_bonding_slave(portid_t slave_pid)
2425 {
2426 	struct rte_port *port;
2427 
2428 	port = &ports[slave_pid];
2429 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2430 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2431 		return 1;
2432 	return 0;
2433 }
2434 
2435 const uint16_t vlan_tags[] = {
2436 		0,  1,  2,  3,  4,  5,  6,  7,
2437 		8,  9, 10, 11,  12, 13, 14, 15,
2438 		16, 17, 18, 19, 20, 21, 22, 23,
2439 		24, 25, 26, 27, 28, 29, 30, 31
2440 };
2441 
2442 static  int
2443 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2444 		 enum dcb_mode_enable dcb_mode,
2445 		 enum rte_eth_nb_tcs num_tcs,
2446 		 uint8_t pfc_en)
2447 {
2448 	uint8_t i;
2449 
2450 	/*
2451 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2452 	 * given above, and the number of traffic classes available for use.
2453 	 */
2454 	if (dcb_mode == DCB_VT_ENABLED) {
2455 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2456 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2457 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2458 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2459 
2460 		/* VMDQ+DCB RX and TX configurations */
2461 		vmdq_rx_conf->enable_default_pool = 0;
2462 		vmdq_rx_conf->default_pool = 0;
2463 		vmdq_rx_conf->nb_queue_pools =
2464 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2465 		vmdq_tx_conf->nb_queue_pools =
2466 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2467 
2468 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2469 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2470 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2471 			vmdq_rx_conf->pool_map[i].pools =
2472 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2473 		}
2474 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2475 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2476 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2477 		}
2478 
2479 		/* set DCB mode of RX and TX of multiple queues */
2480 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2481 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2482 	} else {
2483 		struct rte_eth_dcb_rx_conf *rx_conf =
2484 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2485 		struct rte_eth_dcb_tx_conf *tx_conf =
2486 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2487 
2488 		rx_conf->nb_tcs = num_tcs;
2489 		tx_conf->nb_tcs = num_tcs;
2490 
2491 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2492 			rx_conf->dcb_tc[i] = i % num_tcs;
2493 			tx_conf->dcb_tc[i] = i % num_tcs;
2494 		}
2495 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2496 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2497 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2498 	}
2499 
2500 	if (pfc_en)
2501 		eth_conf->dcb_capability_en =
2502 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2503 	else
2504 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2505 
2506 	return 0;
2507 }
2508 
2509 int
2510 init_port_dcb_config(portid_t pid,
2511 		     enum dcb_mode_enable dcb_mode,
2512 		     enum rte_eth_nb_tcs num_tcs,
2513 		     uint8_t pfc_en)
2514 {
2515 	struct rte_eth_conf port_conf;
2516 	struct rte_port *rte_port;
2517 	int retval;
2518 	uint16_t i;
2519 
2520 	rte_port = &ports[pid];
2521 
2522 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2523 	/* Enter DCB configuration status */
2524 	dcb_config = 1;
2525 
2526 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2527 	port_conf.txmode = rte_port->dev_conf.txmode;
2528 
2529 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2530 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2531 	if (retval < 0)
2532 		return retval;
2533 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2534 
2535 	/* re-configure the device . */
2536 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2537 
2538 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2539 
2540 	/* If dev_info.vmdq_pool_base is greater than 0,
2541 	 * the queue id of vmdq pools is started after pf queues.
2542 	 */
2543 	if (dcb_mode == DCB_VT_ENABLED &&
2544 	    rte_port->dev_info.vmdq_pool_base > 0) {
2545 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2546 			" for port %d.", pid);
2547 		return -1;
2548 	}
2549 
2550 	/* Assume the ports in testpmd have the same dcb capability
2551 	 * and has the same number of rxq and txq in dcb mode
2552 	 */
2553 	if (dcb_mode == DCB_VT_ENABLED) {
2554 		if (rte_port->dev_info.max_vfs > 0) {
2555 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2556 			nb_txq = rte_port->dev_info.nb_tx_queues;
2557 		} else {
2558 			nb_rxq = rte_port->dev_info.max_rx_queues;
2559 			nb_txq = rte_port->dev_info.max_tx_queues;
2560 		}
2561 	} else {
2562 		/*if vt is disabled, use all pf queues */
2563 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2564 			nb_rxq = rte_port->dev_info.max_rx_queues;
2565 			nb_txq = rte_port->dev_info.max_tx_queues;
2566 		} else {
2567 			nb_rxq = (queueid_t)num_tcs;
2568 			nb_txq = (queueid_t)num_tcs;
2569 
2570 		}
2571 	}
2572 	rx_free_thresh = 64;
2573 
2574 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2575 
2576 	rxtx_port_config(rte_port);
2577 	/* VLAN filter */
2578 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2579 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2580 		rx_vft_set(pid, vlan_tags[i], 1);
2581 
2582 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2583 	map_port_queue_stats_mapping_registers(pid, rte_port);
2584 
2585 	rte_port->dcb_flag = 1;
2586 
2587 	return 0;
2588 }
2589 
2590 static void
2591 init_port(void)
2592 {
2593 	/* Configuration of Ethernet ports. */
2594 	ports = rte_zmalloc("testpmd: ports",
2595 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2596 			    RTE_CACHE_LINE_SIZE);
2597 	if (ports == NULL) {
2598 		rte_exit(EXIT_FAILURE,
2599 				"rte_zmalloc(%d struct rte_port) failed\n",
2600 				RTE_MAX_ETHPORTS);
2601 	}
2602 }
2603 
2604 static void
2605 force_quit(void)
2606 {
2607 	pmd_test_exit();
2608 	prompt_exit();
2609 }
2610 
2611 static void
2612 print_stats(void)
2613 {
2614 	uint8_t i;
2615 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2616 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2617 
2618 	/* Clear screen and move to top left */
2619 	printf("%s%s", clr, top_left);
2620 
2621 	printf("\nPort statistics ====================================");
2622 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2623 		nic_stats_display(fwd_ports_ids[i]);
2624 }
2625 
2626 static void
2627 signal_handler(int signum)
2628 {
2629 	if (signum == SIGINT || signum == SIGTERM) {
2630 		printf("\nSignal %d received, preparing to exit...\n",
2631 				signum);
2632 #ifdef RTE_LIBRTE_PDUMP
2633 		/* uninitialize packet capture framework */
2634 		rte_pdump_uninit();
2635 #endif
2636 #ifdef RTE_LIBRTE_LATENCY_STATS
2637 		rte_latencystats_uninit();
2638 #endif
2639 		force_quit();
2640 		/* Set flag to indicate the force termination. */
2641 		f_quit = 1;
2642 		/* exit with the expected status */
2643 		signal(signum, SIG_DFL);
2644 		kill(getpid(), signum);
2645 	}
2646 }
2647 
2648 int
2649 main(int argc, char** argv)
2650 {
2651 	int diag;
2652 	portid_t port_id;
2653 	int ret;
2654 
2655 	signal(SIGINT, signal_handler);
2656 	signal(SIGTERM, signal_handler);
2657 
2658 	diag = rte_eal_init(argc, argv);
2659 	if (diag < 0)
2660 		rte_panic("Cannot init EAL\n");
2661 
2662 	testpmd_logtype = rte_log_register("testpmd");
2663 	if (testpmd_logtype < 0)
2664 		rte_panic("Cannot register log type");
2665 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2666 
2667 #ifdef RTE_LIBRTE_PDUMP
2668 	/* initialize packet capture framework */
2669 	rte_pdump_init(NULL);
2670 #endif
2671 
2672 	nb_ports = (portid_t) rte_eth_dev_count_avail();
2673 	if (nb_ports == 0)
2674 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2675 
2676 	/* allocate port structures, and init them */
2677 	init_port();
2678 
2679 	set_def_fwd_config();
2680 	if (nb_lcores == 0)
2681 		rte_panic("Empty set of forwarding logical cores - check the "
2682 			  "core mask supplied in the command parameters\n");
2683 
2684 	/* Bitrate/latency stats disabled by default */
2685 #ifdef RTE_LIBRTE_BITRATE
2686 	bitrate_enabled = 0;
2687 #endif
2688 #ifdef RTE_LIBRTE_LATENCY_STATS
2689 	latencystats_enabled = 0;
2690 #endif
2691 
2692 	/* on FreeBSD, mlockall() is disabled by default */
2693 #ifdef RTE_EXEC_ENV_BSDAPP
2694 	do_mlockall = 0;
2695 #else
2696 	do_mlockall = 1;
2697 #endif
2698 
2699 	argc -= diag;
2700 	argv += diag;
2701 	if (argc > 1)
2702 		launch_args_parse(argc, argv);
2703 
2704 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2705 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2706 			strerror(errno));
2707 	}
2708 
2709 	if (tx_first && interactive)
2710 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2711 				"interactive mode.\n");
2712 
2713 	if (tx_first && lsc_interrupt) {
2714 		printf("Warning: lsc_interrupt needs to be off when "
2715 				" using tx_first. Disabling.\n");
2716 		lsc_interrupt = 0;
2717 	}
2718 
2719 	if (!nb_rxq && !nb_txq)
2720 		printf("Warning: Either rx or tx queues should be non-zero\n");
2721 
2722 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2723 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2724 		       "but nb_txq=%d will prevent to fully test it.\n",
2725 		       nb_rxq, nb_txq);
2726 
2727 	init_config();
2728 
2729 	if (hot_plug) {
2730 		/* enable hot plug monitoring */
2731 		ret = rte_dev_event_monitor_start();
2732 		if (ret) {
2733 			rte_errno = EINVAL;
2734 			return -1;
2735 		}
2736 		eth_dev_event_callback_register();
2737 
2738 	}
2739 
2740 	if (start_port(RTE_PORT_ALL) != 0)
2741 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2742 
2743 	/* set all ports to promiscuous mode by default */
2744 	RTE_ETH_FOREACH_DEV(port_id)
2745 		rte_eth_promiscuous_enable(port_id);
2746 
2747 	/* Init metrics library */
2748 	rte_metrics_init(rte_socket_id());
2749 
2750 #ifdef RTE_LIBRTE_LATENCY_STATS
2751 	if (latencystats_enabled != 0) {
2752 		int ret = rte_latencystats_init(1, NULL);
2753 		if (ret)
2754 			printf("Warning: latencystats init()"
2755 				" returned error %d\n",	ret);
2756 		printf("Latencystats running on lcore %d\n",
2757 			latencystats_lcore_id);
2758 	}
2759 #endif
2760 
2761 	/* Setup bitrate stats */
2762 #ifdef RTE_LIBRTE_BITRATE
2763 	if (bitrate_enabled != 0) {
2764 		bitrate_data = rte_stats_bitrate_create();
2765 		if (bitrate_data == NULL)
2766 			rte_exit(EXIT_FAILURE,
2767 				"Could not allocate bitrate data.\n");
2768 		rte_stats_bitrate_reg(bitrate_data);
2769 	}
2770 #endif
2771 
2772 #ifdef RTE_LIBRTE_CMDLINE
2773 	if (strlen(cmdline_filename) != 0)
2774 		cmdline_read_from_file(cmdline_filename);
2775 
2776 	if (interactive == 1) {
2777 		if (auto_start) {
2778 			printf("Start automatic packet forwarding\n");
2779 			start_packet_forwarding(0);
2780 		}
2781 		prompt();
2782 		pmd_test_exit();
2783 	} else
2784 #endif
2785 	{
2786 		char c;
2787 		int rc;
2788 
2789 		f_quit = 0;
2790 
2791 		printf("No commandline core given, start packet forwarding\n");
2792 		start_packet_forwarding(tx_first);
2793 		if (stats_period != 0) {
2794 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2795 			uint64_t timer_period;
2796 
2797 			/* Convert to number of cycles */
2798 			timer_period = stats_period * rte_get_timer_hz();
2799 
2800 			while (f_quit == 0) {
2801 				cur_time = rte_get_timer_cycles();
2802 				diff_time += cur_time - prev_time;
2803 
2804 				if (diff_time >= timer_period) {
2805 					print_stats();
2806 					/* Reset the timer */
2807 					diff_time = 0;
2808 				}
2809 				/* Sleep to avoid unnecessary checks */
2810 				prev_time = cur_time;
2811 				sleep(1);
2812 			}
2813 		}
2814 
2815 		printf("Press enter to exit\n");
2816 		rc = read(0, &c, 1);
2817 		pmd_test_exit();
2818 		if (rc < 0)
2819 			return 1;
2820 	}
2821 
2822 	return 0;
2823 }
2824