xref: /dpdk/app/test-pmd/testpmd.c (revision 250c9eb3ca895127f21a729caf4a928eb2f04d2c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
33 #include <rte_eal.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
41 #include <rte_mbuf.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_dev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
51 #endif
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
54 #endif
55 #include <rte_flow.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
59 #endif
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
62 #endif
63 
64 #include "testpmd.h"
65 
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
68 
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
72 uint8_t tx_first;
73 char cmdline_filename[PATH_MAX] = {0};
74 
75 /*
76  * NUMA support configuration.
77  * When set, the NUMA support attempts to dispatch the allocation of the
78  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79  * probed ports among the CPU sockets 0 and 1.
80  * Otherwise, all memory is allocated from CPU socket 0.
81  */
82 uint8_t numa_support = 1; /**< numa enabled by default */
83 
84 /*
85  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86  * not configured.
87  */
88 uint8_t socket_num = UMA_NO_CONFIG;
89 
90 /*
91  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
92  */
93 uint8_t mp_anon = 0;
94 
95 /*
96  * Store specified sockets on which memory pool to be used by ports
97  * is allocated.
98  */
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
100 
101 /*
102  * Store specified sockets on which RX ring to be used by ports
103  * is allocated.
104  */
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
106 
107 /*
108  * Store specified sockets on which TX ring to be used by ports
109  * is allocated.
110  */
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
112 
113 /*
114  * Record the Ethernet address of peer target ports to which packets are
115  * forwarded.
116  * Must be instantiated with the ethernet addresses of peer traffic generator
117  * ports.
118  */
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
121 
122 /*
123  * Probed Target Environment.
124  */
125 struct rte_port *ports;	       /**< For all probed ethernet ports. */
126 portid_t nb_ports;             /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
129 
130 /*
131  * Test Forwarding Configuration.
132  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
134  */
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
138 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
139 
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
142 
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
145 
146 /*
147  * Forwarding engines.
148  */
149 struct fwd_engine * fwd_engines[] = {
150 	&io_fwd_engine,
151 	&mac_fwd_engine,
152 	&mac_swap_engine,
153 	&flow_gen_engine,
154 	&rx_only_engine,
155 	&tx_only_engine,
156 	&csum_fwd_engine,
157 	&icmp_echo_engine,
158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
159 	&softnic_tm_engine,
160 	&softnic_tm_bypass_engine,
161 #endif
162 #ifdef RTE_LIBRTE_IEEE1588
163 	&ieee1588_fwd_engine,
164 #endif
165 	NULL,
166 };
167 
168 struct fwd_config cur_fwd_config;
169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
170 uint32_t retry_enabled;
171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173 
174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
175 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
176                                       * specified on command-line. */
177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
178 
179 /*
180  * In container, it cannot terminate the process which running with 'stats-period'
181  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
182  */
183 uint8_t f_quit;
184 
185 /*
186  * Configuration of packet segments used by the "txonly" processing engine.
187  */
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 	TXONLY_DEF_PACKET_LEN,
191 };
192 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
196 
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
202 
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
205 
206 /*
207  * Configurable number of RX/TX queues.
208  */
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
211 
212 /*
213  * Configurable number of RX/TX ring descriptors.
214  * Defaults are supplied by drivers via ethdev.
215  */
216 #define RTE_TEST_RX_DESC_DEFAULT 0
217 #define RTE_TEST_TX_DESC_DEFAULT 0
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220 
221 #define RTE_PMD_PARAM_UNSET -1
222 /*
223  * Configurable values of RX and TX ring threshold registers.
224  */
225 
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229 
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Configurable value of RX free threshold.
236  */
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238 
239 /*
240  * Configurable value of RX drop enable.
241  */
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243 
244 /*
245  * Configurable value of TX free threshold.
246  */
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248 
249 /*
250  * Configurable value of TX RS bit threshold.
251  */
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253 
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268 
269 /*
270  * Flow API isolated mode.
271  */
272 uint8_t flow_isolate_all;
273 
274 /*
275  * Avoids to check link status when starting/stopping a port.
276  */
277 uint8_t no_link_check = 0; /* check by default */
278 
279 /*
280  * Enable link status change notification
281  */
282 uint8_t lsc_interrupt = 1; /* enabled by default */
283 
284 /*
285  * Enable device removal notification.
286  */
287 uint8_t rmv_interrupt = 1; /* enabled by default */
288 
289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
290 
291 /*
292  * Display or mask ether events
293  * Default to all events except VF_MBOX
294  */
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
301 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
302 /*
303  * Decide if all memory are locked for performance.
304  */
305 int do_mlockall = 0;
306 
307 /*
308  * NIC bypass mode configuration options.
309  */
310 
311 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
312 /* The NIC bypass watchdog timeout. */
313 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
314 #endif
315 
316 
317 #ifdef RTE_LIBRTE_LATENCY_STATS
318 
319 /*
320  * Set when latency stats is enabled in the commandline
321  */
322 uint8_t latencystats_enabled;
323 
324 /*
325  * Lcore ID to serive latency statistics.
326  */
327 lcoreid_t latencystats_lcore_id = -1;
328 
329 #endif
330 
331 /*
332  * Ethernet device configuration.
333  */
334 struct rte_eth_rxmode rx_mode = {
335 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
336 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
337 };
338 
339 struct rte_eth_txmode tx_mode = {
340 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
341 };
342 
343 struct rte_fdir_conf fdir_conf = {
344 	.mode = RTE_FDIR_MODE_NONE,
345 	.pballoc = RTE_FDIR_PBALLOC_64K,
346 	.status = RTE_FDIR_REPORT_STATUS,
347 	.mask = {
348 		.vlan_tci_mask = 0xFFEF,
349 		.ipv4_mask     = {
350 			.src_ip = 0xFFFFFFFF,
351 			.dst_ip = 0xFFFFFFFF,
352 		},
353 		.ipv6_mask     = {
354 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
355 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356 		},
357 		.src_port_mask = 0xFFFF,
358 		.dst_port_mask = 0xFFFF,
359 		.mac_addr_byte_mask = 0xFF,
360 		.tunnel_type_mask = 1,
361 		.tunnel_id_mask = 0xFFFFFFFF,
362 	},
363 	.drop_queue = 127,
364 };
365 
366 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
367 
368 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
369 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
370 
371 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
372 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
373 
374 uint16_t nb_tx_queue_stats_mappings = 0;
375 uint16_t nb_rx_queue_stats_mappings = 0;
376 
377 /*
378  * Display zero values by default for xstats
379  */
380 uint8_t xstats_hide_zero;
381 
382 unsigned int num_sockets = 0;
383 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
384 
385 #ifdef RTE_LIBRTE_BITRATE
386 /* Bitrate statistics */
387 struct rte_stats_bitrates *bitrate_data;
388 lcoreid_t bitrate_lcore_id;
389 uint8_t bitrate_enabled;
390 #endif
391 
392 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
393 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
394 
395 /* Forward function declarations */
396 static void map_port_queue_stats_mapping_registers(portid_t pi,
397 						   struct rte_port *port);
398 static void check_all_ports_link_status(uint32_t port_mask);
399 static int eth_event_callback(portid_t port_id,
400 			      enum rte_eth_event_type type,
401 			      void *param, void *ret_param);
402 static void eth_dev_event_callback(char *device_name,
403 				enum rte_dev_event_type type,
404 				void *param);
405 static int eth_dev_event_callback_register(void);
406 static int eth_dev_event_callback_unregister(void);
407 
408 
409 /*
410  * Check if all the ports are started.
411  * If yes, return positive value. If not, return zero.
412  */
413 static int all_ports_started(void);
414 
415 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
416 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
417 
418 /*
419  * Helper function to check if socket is already discovered.
420  * If yes, return positive value. If not, return zero.
421  */
422 int
423 new_socket_id(unsigned int socket_id)
424 {
425 	unsigned int i;
426 
427 	for (i = 0; i < num_sockets; i++) {
428 		if (socket_ids[i] == socket_id)
429 			return 0;
430 	}
431 	return 1;
432 }
433 
434 /*
435  * Setup default configuration.
436  */
437 static void
438 set_default_fwd_lcores_config(void)
439 {
440 	unsigned int i;
441 	unsigned int nb_lc;
442 	unsigned int sock_num;
443 
444 	nb_lc = 0;
445 	for (i = 0; i < RTE_MAX_LCORE; i++) {
446 		sock_num = rte_lcore_to_socket_id(i);
447 		if (new_socket_id(sock_num)) {
448 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
449 				rte_exit(EXIT_FAILURE,
450 					 "Total sockets greater than %u\n",
451 					 RTE_MAX_NUMA_NODES);
452 			}
453 			socket_ids[num_sockets++] = sock_num;
454 		}
455 		if (!rte_lcore_is_enabled(i))
456 			continue;
457 		if (i == rte_get_master_lcore())
458 			continue;
459 		fwd_lcores_cpuids[nb_lc++] = i;
460 	}
461 	nb_lcores = (lcoreid_t) nb_lc;
462 	nb_cfg_lcores = nb_lcores;
463 	nb_fwd_lcores = 1;
464 }
465 
466 static void
467 set_def_peer_eth_addrs(void)
468 {
469 	portid_t i;
470 
471 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
472 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
473 		peer_eth_addrs[i].addr_bytes[5] = i;
474 	}
475 }
476 
477 static void
478 set_default_fwd_ports_config(void)
479 {
480 	portid_t pt_id;
481 	int i = 0;
482 
483 	RTE_ETH_FOREACH_DEV(pt_id)
484 		fwd_ports_ids[i++] = pt_id;
485 
486 	nb_cfg_ports = nb_ports;
487 	nb_fwd_ports = nb_ports;
488 }
489 
490 void
491 set_def_fwd_config(void)
492 {
493 	set_default_fwd_lcores_config();
494 	set_def_peer_eth_addrs();
495 	set_default_fwd_ports_config();
496 }
497 
498 /*
499  * Configuration initialisation done once at init time.
500  */
501 static void
502 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
503 		 unsigned int socket_id)
504 {
505 	char pool_name[RTE_MEMPOOL_NAMESIZE];
506 	struct rte_mempool *rte_mp = NULL;
507 	uint32_t mb_size;
508 
509 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
510 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
511 
512 	TESTPMD_LOG(INFO,
513 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
514 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
515 
516 	if (mp_anon != 0) {
517 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
518 			mb_size, (unsigned) mb_mempool_cache,
519 			sizeof(struct rte_pktmbuf_pool_private),
520 			socket_id, 0);
521 		if (rte_mp == NULL)
522 			goto err;
523 
524 		if (rte_mempool_populate_anon(rte_mp) == 0) {
525 			rte_mempool_free(rte_mp);
526 			rte_mp = NULL;
527 			goto err;
528 		}
529 		rte_pktmbuf_pool_init(rte_mp, NULL);
530 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
531 	} else {
532 		/* wrapper to rte_mempool_create() */
533 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
534 				rte_mbuf_best_mempool_ops());
535 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
536 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
537 	}
538 
539 err:
540 	if (rte_mp == NULL) {
541 		rte_exit(EXIT_FAILURE,
542 			"Creation of mbuf pool for socket %u failed: %s\n",
543 			socket_id, rte_strerror(rte_errno));
544 	} else if (verbose_level > 0) {
545 		rte_mempool_dump(stdout, rte_mp);
546 	}
547 }
548 
549 /*
550  * Check given socket id is valid or not with NUMA mode,
551  * if valid, return 0, else return -1
552  */
553 static int
554 check_socket_id(const unsigned int socket_id)
555 {
556 	static int warning_once = 0;
557 
558 	if (new_socket_id(socket_id)) {
559 		if (!warning_once && numa_support)
560 			printf("Warning: NUMA should be configured manually by"
561 			       " using --port-numa-config and"
562 			       " --ring-numa-config parameters along with"
563 			       " --numa.\n");
564 		warning_once = 1;
565 		return -1;
566 	}
567 	return 0;
568 }
569 
570 /*
571  * Get the allowed maximum number of RX queues.
572  * *pid return the port id which has minimal value of
573  * max_rx_queues in all ports.
574  */
575 queueid_t
576 get_allowed_max_nb_rxq(portid_t *pid)
577 {
578 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
579 	portid_t pi;
580 	struct rte_eth_dev_info dev_info;
581 
582 	RTE_ETH_FOREACH_DEV(pi) {
583 		rte_eth_dev_info_get(pi, &dev_info);
584 		if (dev_info.max_rx_queues < allowed_max_rxq) {
585 			allowed_max_rxq = dev_info.max_rx_queues;
586 			*pid = pi;
587 		}
588 	}
589 	return allowed_max_rxq;
590 }
591 
592 /*
593  * Check input rxq is valid or not.
594  * If input rxq is not greater than any of maximum number
595  * of RX queues of all ports, it is valid.
596  * if valid, return 0, else return -1
597  */
598 int
599 check_nb_rxq(queueid_t rxq)
600 {
601 	queueid_t allowed_max_rxq;
602 	portid_t pid = 0;
603 
604 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
605 	if (rxq > allowed_max_rxq) {
606 		printf("Fail: input rxq (%u) can't be greater "
607 		       "than max_rx_queues (%u) of port %u\n",
608 		       rxq,
609 		       allowed_max_rxq,
610 		       pid);
611 		return -1;
612 	}
613 	return 0;
614 }
615 
616 /*
617  * Get the allowed maximum number of TX queues.
618  * *pid return the port id which has minimal value of
619  * max_tx_queues in all ports.
620  */
621 queueid_t
622 get_allowed_max_nb_txq(portid_t *pid)
623 {
624 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
625 	portid_t pi;
626 	struct rte_eth_dev_info dev_info;
627 
628 	RTE_ETH_FOREACH_DEV(pi) {
629 		rte_eth_dev_info_get(pi, &dev_info);
630 		if (dev_info.max_tx_queues < allowed_max_txq) {
631 			allowed_max_txq = dev_info.max_tx_queues;
632 			*pid = pi;
633 		}
634 	}
635 	return allowed_max_txq;
636 }
637 
638 /*
639  * Check input txq is valid or not.
640  * If input txq is not greater than any of maximum number
641  * of TX queues of all ports, it is valid.
642  * if valid, return 0, else return -1
643  */
644 int
645 check_nb_txq(queueid_t txq)
646 {
647 	queueid_t allowed_max_txq;
648 	portid_t pid = 0;
649 
650 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
651 	if (txq > allowed_max_txq) {
652 		printf("Fail: input txq (%u) can't be greater "
653 		       "than max_tx_queues (%u) of port %u\n",
654 		       txq,
655 		       allowed_max_txq,
656 		       pid);
657 		return -1;
658 	}
659 	return 0;
660 }
661 
662 static void
663 init_config(void)
664 {
665 	portid_t pid;
666 	struct rte_port *port;
667 	struct rte_mempool *mbp;
668 	unsigned int nb_mbuf_per_pool;
669 	lcoreid_t  lc_id;
670 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
671 	struct rte_gro_param gro_param;
672 	uint32_t gso_types;
673 	int k;
674 
675 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
676 
677 	if (numa_support) {
678 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
679 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
680 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
681 	}
682 
683 	/* Configuration of logical cores. */
684 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
685 				sizeof(struct fwd_lcore *) * nb_lcores,
686 				RTE_CACHE_LINE_SIZE);
687 	if (fwd_lcores == NULL) {
688 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
689 							"failed\n", nb_lcores);
690 	}
691 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
692 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
693 					       sizeof(struct fwd_lcore),
694 					       RTE_CACHE_LINE_SIZE);
695 		if (fwd_lcores[lc_id] == NULL) {
696 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
697 								"failed\n");
698 		}
699 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
700 	}
701 
702 	RTE_ETH_FOREACH_DEV(pid) {
703 		port = &ports[pid];
704 		/* Apply default TxRx configuration for all ports */
705 		port->dev_conf.txmode = tx_mode;
706 		port->dev_conf.rxmode = rx_mode;
707 		rte_eth_dev_info_get(pid, &port->dev_info);
708 
709 		if (!(port->dev_info.rx_offload_capa &
710 					DEV_RX_OFFLOAD_CRC_STRIP))
711 			port->dev_conf.rxmode.offloads &=
712 				~DEV_RX_OFFLOAD_CRC_STRIP;
713 		if (!(port->dev_info.tx_offload_capa &
714 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
715 			port->dev_conf.txmode.offloads &=
716 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
717 		if (numa_support) {
718 			if (port_numa[pid] != NUMA_NO_CONFIG)
719 				port_per_socket[port_numa[pid]]++;
720 			else {
721 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
722 
723 				/* if socket_id is invalid, set to 0 */
724 				if (check_socket_id(socket_id) < 0)
725 					socket_id = 0;
726 				port_per_socket[socket_id]++;
727 			}
728 		}
729 
730 		/* Apply Rx offloads configuration */
731 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
732 			port->rx_conf[k].offloads =
733 				port->dev_conf.rxmode.offloads;
734 		/* Apply Tx offloads configuration */
735 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
736 			port->tx_conf[k].offloads =
737 				port->dev_conf.txmode.offloads;
738 
739 		/* set flag to initialize port/queue */
740 		port->need_reconfig = 1;
741 		port->need_reconfig_queues = 1;
742 	}
743 
744 	/*
745 	 * Create pools of mbuf.
746 	 * If NUMA support is disabled, create a single pool of mbuf in
747 	 * socket 0 memory by default.
748 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
749 	 *
750 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
751 	 * nb_txd can be configured at run time.
752 	 */
753 	if (param_total_num_mbufs)
754 		nb_mbuf_per_pool = param_total_num_mbufs;
755 	else {
756 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
757 			(nb_lcores * mb_mempool_cache) +
758 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
759 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
760 	}
761 
762 	if (numa_support) {
763 		uint8_t i;
764 
765 		for (i = 0; i < num_sockets; i++)
766 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
767 					 socket_ids[i]);
768 	} else {
769 		if (socket_num == UMA_NO_CONFIG)
770 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
771 		else
772 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
773 						 socket_num);
774 	}
775 
776 	init_port_config();
777 
778 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
779 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
780 	/*
781 	 * Records which Mbuf pool to use by each logical core, if needed.
782 	 */
783 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
784 		mbp = mbuf_pool_find(
785 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
786 
787 		if (mbp == NULL)
788 			mbp = mbuf_pool_find(0);
789 		fwd_lcores[lc_id]->mbp = mbp;
790 		/* initialize GSO context */
791 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
792 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
793 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
794 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
795 			ETHER_CRC_LEN;
796 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
797 	}
798 
799 	/* Configuration of packet forwarding streams. */
800 	if (init_fwd_streams() < 0)
801 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
802 
803 	fwd_config_setup();
804 
805 	/* create a gro context for each lcore */
806 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
807 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
808 	gro_param.max_item_per_flow = MAX_PKT_BURST;
809 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
810 		gro_param.socket_id = rte_lcore_to_socket_id(
811 				fwd_lcores_cpuids[lc_id]);
812 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
813 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
814 			rte_exit(EXIT_FAILURE,
815 					"rte_gro_ctx_create() failed\n");
816 		}
817 	}
818 }
819 
820 
821 void
822 reconfig(portid_t new_port_id, unsigned socket_id)
823 {
824 	struct rte_port *port;
825 
826 	/* Reconfiguration of Ethernet ports. */
827 	port = &ports[new_port_id];
828 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
829 
830 	/* set flag to initialize port/queue */
831 	port->need_reconfig = 1;
832 	port->need_reconfig_queues = 1;
833 	port->socket_id = socket_id;
834 
835 	init_port_config();
836 }
837 
838 
839 int
840 init_fwd_streams(void)
841 {
842 	portid_t pid;
843 	struct rte_port *port;
844 	streamid_t sm_id, nb_fwd_streams_new;
845 	queueid_t q;
846 
847 	/* set socket id according to numa or not */
848 	RTE_ETH_FOREACH_DEV(pid) {
849 		port = &ports[pid];
850 		if (nb_rxq > port->dev_info.max_rx_queues) {
851 			printf("Fail: nb_rxq(%d) is greater than "
852 				"max_rx_queues(%d)\n", nb_rxq,
853 				port->dev_info.max_rx_queues);
854 			return -1;
855 		}
856 		if (nb_txq > port->dev_info.max_tx_queues) {
857 			printf("Fail: nb_txq(%d) is greater than "
858 				"max_tx_queues(%d)\n", nb_txq,
859 				port->dev_info.max_tx_queues);
860 			return -1;
861 		}
862 		if (numa_support) {
863 			if (port_numa[pid] != NUMA_NO_CONFIG)
864 				port->socket_id = port_numa[pid];
865 			else {
866 				port->socket_id = rte_eth_dev_socket_id(pid);
867 
868 				/* if socket_id is invalid, set to 0 */
869 				if (check_socket_id(port->socket_id) < 0)
870 					port->socket_id = 0;
871 			}
872 		}
873 		else {
874 			if (socket_num == UMA_NO_CONFIG)
875 				port->socket_id = 0;
876 			else
877 				port->socket_id = socket_num;
878 		}
879 	}
880 
881 	q = RTE_MAX(nb_rxq, nb_txq);
882 	if (q == 0) {
883 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
884 		return -1;
885 	}
886 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
887 	if (nb_fwd_streams_new == nb_fwd_streams)
888 		return 0;
889 	/* clear the old */
890 	if (fwd_streams != NULL) {
891 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
892 			if (fwd_streams[sm_id] == NULL)
893 				continue;
894 			rte_free(fwd_streams[sm_id]);
895 			fwd_streams[sm_id] = NULL;
896 		}
897 		rte_free(fwd_streams);
898 		fwd_streams = NULL;
899 	}
900 
901 	/* init new */
902 	nb_fwd_streams = nb_fwd_streams_new;
903 	if (nb_fwd_streams) {
904 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
905 			sizeof(struct fwd_stream *) * nb_fwd_streams,
906 			RTE_CACHE_LINE_SIZE);
907 		if (fwd_streams == NULL)
908 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
909 				 " (struct fwd_stream *)) failed\n",
910 				 nb_fwd_streams);
911 
912 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
913 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
914 				" struct fwd_stream", sizeof(struct fwd_stream),
915 				RTE_CACHE_LINE_SIZE);
916 			if (fwd_streams[sm_id] == NULL)
917 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
918 					 "(struct fwd_stream) failed\n");
919 		}
920 	}
921 
922 	return 0;
923 }
924 
925 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
926 static void
927 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
928 {
929 	unsigned int total_burst;
930 	unsigned int nb_burst;
931 	unsigned int burst_stats[3];
932 	uint16_t pktnb_stats[3];
933 	uint16_t nb_pkt;
934 	int burst_percent[3];
935 
936 	/*
937 	 * First compute the total number of packet bursts and the
938 	 * two highest numbers of bursts of the same number of packets.
939 	 */
940 	total_burst = 0;
941 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
942 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
943 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
944 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
945 		if (nb_burst == 0)
946 			continue;
947 		total_burst += nb_burst;
948 		if (nb_burst > burst_stats[0]) {
949 			burst_stats[1] = burst_stats[0];
950 			pktnb_stats[1] = pktnb_stats[0];
951 			burst_stats[0] = nb_burst;
952 			pktnb_stats[0] = nb_pkt;
953 		} else if (nb_burst > burst_stats[1]) {
954 			burst_stats[1] = nb_burst;
955 			pktnb_stats[1] = nb_pkt;
956 		}
957 	}
958 	if (total_burst == 0)
959 		return;
960 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
961 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
962 	       burst_percent[0], (int) pktnb_stats[0]);
963 	if (burst_stats[0] == total_burst) {
964 		printf("]\n");
965 		return;
966 	}
967 	if (burst_stats[0] + burst_stats[1] == total_burst) {
968 		printf(" + %d%% of %d pkts]\n",
969 		       100 - burst_percent[0], pktnb_stats[1]);
970 		return;
971 	}
972 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
973 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
974 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
975 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
976 		return;
977 	}
978 	printf(" + %d%% of %d pkts + %d%% of others]\n",
979 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
980 }
981 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
982 
983 static void
984 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
985 {
986 	struct rte_port *port;
987 	uint8_t i;
988 
989 	static const char *fwd_stats_border = "----------------------";
990 
991 	port = &ports[port_id];
992 	printf("\n  %s Forward statistics for port %-2d %s\n",
993 	       fwd_stats_border, port_id, fwd_stats_border);
994 
995 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
996 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
997 		       "%-"PRIu64"\n",
998 		       stats->ipackets, stats->imissed,
999 		       (uint64_t) (stats->ipackets + stats->imissed));
1000 
1001 		if (cur_fwd_eng == &csum_fwd_engine)
1002 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1003 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1004 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1005 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
1006 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1007 		}
1008 
1009 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1010 		       "%-"PRIu64"\n",
1011 		       stats->opackets, port->tx_dropped,
1012 		       (uint64_t) (stats->opackets + port->tx_dropped));
1013 	}
1014 	else {
1015 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1016 		       "%14"PRIu64"\n",
1017 		       stats->ipackets, stats->imissed,
1018 		       (uint64_t) (stats->ipackets + stats->imissed));
1019 
1020 		if (cur_fwd_eng == &csum_fwd_engine)
1021 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1022 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1023 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1024 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1025 			printf("  RX-nombufs:             %14"PRIu64"\n",
1026 			       stats->rx_nombuf);
1027 		}
1028 
1029 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1030 		       "%14"PRIu64"\n",
1031 		       stats->opackets, port->tx_dropped,
1032 		       (uint64_t) (stats->opackets + port->tx_dropped));
1033 	}
1034 
1035 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1036 	if (port->rx_stream)
1037 		pkt_burst_stats_display("RX",
1038 			&port->rx_stream->rx_burst_stats);
1039 	if (port->tx_stream)
1040 		pkt_burst_stats_display("TX",
1041 			&port->tx_stream->tx_burst_stats);
1042 #endif
1043 
1044 	if (port->rx_queue_stats_mapping_enabled) {
1045 		printf("\n");
1046 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1047 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1048 			       "     RX-errors:%14"PRIu64
1049 			       "    RX-bytes:%14"PRIu64"\n",
1050 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1051 		}
1052 		printf("\n");
1053 	}
1054 	if (port->tx_queue_stats_mapping_enabled) {
1055 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1056 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1057 			       "                                 TX-bytes:%14"PRIu64"\n",
1058 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1059 		}
1060 	}
1061 
1062 	printf("  %s--------------------------------%s\n",
1063 	       fwd_stats_border, fwd_stats_border);
1064 }
1065 
1066 static void
1067 fwd_stream_stats_display(streamid_t stream_id)
1068 {
1069 	struct fwd_stream *fs;
1070 	static const char *fwd_top_stats_border = "-------";
1071 
1072 	fs = fwd_streams[stream_id];
1073 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1074 	    (fs->fwd_dropped == 0))
1075 		return;
1076 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1077 	       "TX Port=%2d/Queue=%2d %s\n",
1078 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1079 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1080 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1081 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1082 
1083 	/* if checksum mode */
1084 	if (cur_fwd_eng == &csum_fwd_engine) {
1085 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1086 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1087 	}
1088 
1089 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1090 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1091 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1092 #endif
1093 }
1094 
1095 static void
1096 flush_fwd_rx_queues(void)
1097 {
1098 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1099 	portid_t  rxp;
1100 	portid_t port_id;
1101 	queueid_t rxq;
1102 	uint16_t  nb_rx;
1103 	uint16_t  i;
1104 	uint8_t   j;
1105 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1106 	uint64_t timer_period;
1107 
1108 	/* convert to number of cycles */
1109 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1110 
1111 	for (j = 0; j < 2; j++) {
1112 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1113 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1114 				port_id = fwd_ports_ids[rxp];
1115 				/**
1116 				* testpmd can stuck in the below do while loop
1117 				* if rte_eth_rx_burst() always returns nonzero
1118 				* packets. So timer is added to exit this loop
1119 				* after 1sec timer expiry.
1120 				*/
1121 				prev_tsc = rte_rdtsc();
1122 				do {
1123 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1124 						pkts_burst, MAX_PKT_BURST);
1125 					for (i = 0; i < nb_rx; i++)
1126 						rte_pktmbuf_free(pkts_burst[i]);
1127 
1128 					cur_tsc = rte_rdtsc();
1129 					diff_tsc = cur_tsc - prev_tsc;
1130 					timer_tsc += diff_tsc;
1131 				} while ((nb_rx > 0) &&
1132 					(timer_tsc < timer_period));
1133 				timer_tsc = 0;
1134 			}
1135 		}
1136 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1137 	}
1138 }
1139 
1140 static void
1141 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1142 {
1143 	struct fwd_stream **fsm;
1144 	streamid_t nb_fs;
1145 	streamid_t sm_id;
1146 #ifdef RTE_LIBRTE_BITRATE
1147 	uint64_t tics_per_1sec;
1148 	uint64_t tics_datum;
1149 	uint64_t tics_current;
1150 	uint16_t idx_port;
1151 
1152 	tics_datum = rte_rdtsc();
1153 	tics_per_1sec = rte_get_timer_hz();
1154 #endif
1155 	fsm = &fwd_streams[fc->stream_idx];
1156 	nb_fs = fc->stream_nb;
1157 	do {
1158 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1159 			(*pkt_fwd)(fsm[sm_id]);
1160 #ifdef RTE_LIBRTE_BITRATE
1161 		if (bitrate_enabled != 0 &&
1162 				bitrate_lcore_id == rte_lcore_id()) {
1163 			tics_current = rte_rdtsc();
1164 			if (tics_current - tics_datum >= tics_per_1sec) {
1165 				/* Periodic bitrate calculation */
1166 				RTE_ETH_FOREACH_DEV(idx_port)
1167 					rte_stats_bitrate_calc(bitrate_data,
1168 						idx_port);
1169 				tics_datum = tics_current;
1170 			}
1171 		}
1172 #endif
1173 #ifdef RTE_LIBRTE_LATENCY_STATS
1174 		if (latencystats_enabled != 0 &&
1175 				latencystats_lcore_id == rte_lcore_id())
1176 			rte_latencystats_update();
1177 #endif
1178 
1179 	} while (! fc->stopped);
1180 }
1181 
1182 static int
1183 start_pkt_forward_on_core(void *fwd_arg)
1184 {
1185 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1186 			     cur_fwd_config.fwd_eng->packet_fwd);
1187 	return 0;
1188 }
1189 
1190 /*
1191  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1192  * Used to start communication flows in network loopback test configurations.
1193  */
1194 static int
1195 run_one_txonly_burst_on_core(void *fwd_arg)
1196 {
1197 	struct fwd_lcore *fwd_lc;
1198 	struct fwd_lcore tmp_lcore;
1199 
1200 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1201 	tmp_lcore = *fwd_lc;
1202 	tmp_lcore.stopped = 1;
1203 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1204 	return 0;
1205 }
1206 
1207 /*
1208  * Launch packet forwarding:
1209  *     - Setup per-port forwarding context.
1210  *     - launch logical cores with their forwarding configuration.
1211  */
1212 static void
1213 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1214 {
1215 	port_fwd_begin_t port_fwd_begin;
1216 	unsigned int i;
1217 	unsigned int lc_id;
1218 	int diag;
1219 
1220 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1221 	if (port_fwd_begin != NULL) {
1222 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1223 			(*port_fwd_begin)(fwd_ports_ids[i]);
1224 	}
1225 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1226 		lc_id = fwd_lcores_cpuids[i];
1227 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1228 			fwd_lcores[i]->stopped = 0;
1229 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1230 						     fwd_lcores[i], lc_id);
1231 			if (diag != 0)
1232 				printf("launch lcore %u failed - diag=%d\n",
1233 				       lc_id, diag);
1234 		}
1235 	}
1236 }
1237 
1238 /*
1239  * Update the forward ports list.
1240  */
1241 void
1242 update_fwd_ports(portid_t new_pid)
1243 {
1244 	unsigned int i;
1245 	unsigned int new_nb_fwd_ports = 0;
1246 	int move = 0;
1247 
1248 	for (i = 0; i < nb_fwd_ports; ++i) {
1249 		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1250 			move = 1;
1251 		else if (move)
1252 			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1253 		else
1254 			new_nb_fwd_ports++;
1255 	}
1256 	if (new_pid < RTE_MAX_ETHPORTS)
1257 		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1258 
1259 	nb_fwd_ports = new_nb_fwd_ports;
1260 	nb_cfg_ports = new_nb_fwd_ports;
1261 }
1262 
1263 /*
1264  * Launch packet forwarding configuration.
1265  */
1266 void
1267 start_packet_forwarding(int with_tx_first)
1268 {
1269 	port_fwd_begin_t port_fwd_begin;
1270 	port_fwd_end_t  port_fwd_end;
1271 	struct rte_port *port;
1272 	unsigned int i;
1273 	portid_t   pt_id;
1274 	streamid_t sm_id;
1275 
1276 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1277 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1278 
1279 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1280 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1281 
1282 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1283 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1284 		(!nb_rxq || !nb_txq))
1285 		rte_exit(EXIT_FAILURE,
1286 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1287 			cur_fwd_eng->fwd_mode_name);
1288 
1289 	if (all_ports_started() == 0) {
1290 		printf("Not all ports were started\n");
1291 		return;
1292 	}
1293 	if (test_done == 0) {
1294 		printf("Packet forwarding already started\n");
1295 		return;
1296 	}
1297 
1298 
1299 	if(dcb_test) {
1300 		for (i = 0; i < nb_fwd_ports; i++) {
1301 			pt_id = fwd_ports_ids[i];
1302 			port = &ports[pt_id];
1303 			if (!port->dcb_flag) {
1304 				printf("In DCB mode, all forwarding ports must "
1305                                        "be configured in this mode.\n");
1306 				return;
1307 			}
1308 		}
1309 		if (nb_fwd_lcores == 1) {
1310 			printf("In DCB mode,the nb forwarding cores "
1311                                "should be larger than 1.\n");
1312 			return;
1313 		}
1314 	}
1315 	test_done = 0;
1316 
1317 	fwd_config_setup();
1318 
1319 	if(!no_flush_rx)
1320 		flush_fwd_rx_queues();
1321 
1322 	pkt_fwd_config_display(&cur_fwd_config);
1323 	rxtx_config_display();
1324 
1325 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1326 		pt_id = fwd_ports_ids[i];
1327 		port = &ports[pt_id];
1328 		rte_eth_stats_get(pt_id, &port->stats);
1329 		port->tx_dropped = 0;
1330 
1331 		map_port_queue_stats_mapping_registers(pt_id, port);
1332 	}
1333 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1334 		fwd_streams[sm_id]->rx_packets = 0;
1335 		fwd_streams[sm_id]->tx_packets = 0;
1336 		fwd_streams[sm_id]->fwd_dropped = 0;
1337 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1338 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1339 
1340 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1341 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1342 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1343 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1344 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1345 #endif
1346 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1347 		fwd_streams[sm_id]->core_cycles = 0;
1348 #endif
1349 	}
1350 	if (with_tx_first) {
1351 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1352 		if (port_fwd_begin != NULL) {
1353 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1354 				(*port_fwd_begin)(fwd_ports_ids[i]);
1355 		}
1356 		while (with_tx_first--) {
1357 			launch_packet_forwarding(
1358 					run_one_txonly_burst_on_core);
1359 			rte_eal_mp_wait_lcore();
1360 		}
1361 		port_fwd_end = tx_only_engine.port_fwd_end;
1362 		if (port_fwd_end != NULL) {
1363 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1364 				(*port_fwd_end)(fwd_ports_ids[i]);
1365 		}
1366 	}
1367 	launch_packet_forwarding(start_pkt_forward_on_core);
1368 }
1369 
1370 void
1371 stop_packet_forwarding(void)
1372 {
1373 	struct rte_eth_stats stats;
1374 	struct rte_port *port;
1375 	port_fwd_end_t  port_fwd_end;
1376 	int i;
1377 	portid_t   pt_id;
1378 	streamid_t sm_id;
1379 	lcoreid_t  lc_id;
1380 	uint64_t total_recv;
1381 	uint64_t total_xmit;
1382 	uint64_t total_rx_dropped;
1383 	uint64_t total_tx_dropped;
1384 	uint64_t total_rx_nombuf;
1385 	uint64_t tx_dropped;
1386 	uint64_t rx_bad_ip_csum;
1387 	uint64_t rx_bad_l4_csum;
1388 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1389 	uint64_t fwd_cycles;
1390 #endif
1391 
1392 	static const char *acc_stats_border = "+++++++++++++++";
1393 
1394 	if (test_done) {
1395 		printf("Packet forwarding not started\n");
1396 		return;
1397 	}
1398 	printf("Telling cores to stop...");
1399 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1400 		fwd_lcores[lc_id]->stopped = 1;
1401 	printf("\nWaiting for lcores to finish...\n");
1402 	rte_eal_mp_wait_lcore();
1403 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1404 	if (port_fwd_end != NULL) {
1405 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1406 			pt_id = fwd_ports_ids[i];
1407 			(*port_fwd_end)(pt_id);
1408 		}
1409 	}
1410 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1411 	fwd_cycles = 0;
1412 #endif
1413 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1414 		if (cur_fwd_config.nb_fwd_streams >
1415 		    cur_fwd_config.nb_fwd_ports) {
1416 			fwd_stream_stats_display(sm_id);
1417 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1418 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1419 		} else {
1420 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1421 				fwd_streams[sm_id];
1422 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1423 				fwd_streams[sm_id];
1424 		}
1425 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1426 		tx_dropped = (uint64_t) (tx_dropped +
1427 					 fwd_streams[sm_id]->fwd_dropped);
1428 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1429 
1430 		rx_bad_ip_csum =
1431 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1432 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1433 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1434 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1435 							rx_bad_ip_csum;
1436 
1437 		rx_bad_l4_csum =
1438 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1439 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1440 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1441 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1442 							rx_bad_l4_csum;
1443 
1444 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1445 		fwd_cycles = (uint64_t) (fwd_cycles +
1446 					 fwd_streams[sm_id]->core_cycles);
1447 #endif
1448 	}
1449 	total_recv = 0;
1450 	total_xmit = 0;
1451 	total_rx_dropped = 0;
1452 	total_tx_dropped = 0;
1453 	total_rx_nombuf  = 0;
1454 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1455 		pt_id = fwd_ports_ids[i];
1456 
1457 		port = &ports[pt_id];
1458 		rte_eth_stats_get(pt_id, &stats);
1459 		stats.ipackets -= port->stats.ipackets;
1460 		port->stats.ipackets = 0;
1461 		stats.opackets -= port->stats.opackets;
1462 		port->stats.opackets = 0;
1463 		stats.ibytes   -= port->stats.ibytes;
1464 		port->stats.ibytes = 0;
1465 		stats.obytes   -= port->stats.obytes;
1466 		port->stats.obytes = 0;
1467 		stats.imissed  -= port->stats.imissed;
1468 		port->stats.imissed = 0;
1469 		stats.oerrors  -= port->stats.oerrors;
1470 		port->stats.oerrors = 0;
1471 		stats.rx_nombuf -= port->stats.rx_nombuf;
1472 		port->stats.rx_nombuf = 0;
1473 
1474 		total_recv += stats.ipackets;
1475 		total_xmit += stats.opackets;
1476 		total_rx_dropped += stats.imissed;
1477 		total_tx_dropped += port->tx_dropped;
1478 		total_rx_nombuf  += stats.rx_nombuf;
1479 
1480 		fwd_port_stats_display(pt_id, &stats);
1481 	}
1482 
1483 	printf("\n  %s Accumulated forward statistics for all ports"
1484 	       "%s\n",
1485 	       acc_stats_border, acc_stats_border);
1486 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1487 	       "%-"PRIu64"\n"
1488 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1489 	       "%-"PRIu64"\n",
1490 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1491 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1492 	if (total_rx_nombuf > 0)
1493 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1494 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1495 	       "%s\n",
1496 	       acc_stats_border, acc_stats_border);
1497 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1498 	if (total_recv > 0)
1499 		printf("\n  CPU cycles/packet=%u (total cycles="
1500 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1501 		       (unsigned int)(fwd_cycles / total_recv),
1502 		       fwd_cycles, total_recv);
1503 #endif
1504 	printf("\nDone.\n");
1505 	test_done = 1;
1506 }
1507 
1508 void
1509 dev_set_link_up(portid_t pid)
1510 {
1511 	if (rte_eth_dev_set_link_up(pid) < 0)
1512 		printf("\nSet link up fail.\n");
1513 }
1514 
1515 void
1516 dev_set_link_down(portid_t pid)
1517 {
1518 	if (rte_eth_dev_set_link_down(pid) < 0)
1519 		printf("\nSet link down fail.\n");
1520 }
1521 
1522 static int
1523 all_ports_started(void)
1524 {
1525 	portid_t pi;
1526 	struct rte_port *port;
1527 
1528 	RTE_ETH_FOREACH_DEV(pi) {
1529 		port = &ports[pi];
1530 		/* Check if there is a port which is not started */
1531 		if ((port->port_status != RTE_PORT_STARTED) &&
1532 			(port->slave_flag == 0))
1533 			return 0;
1534 	}
1535 
1536 	/* No port is not started */
1537 	return 1;
1538 }
1539 
1540 int
1541 port_is_stopped(portid_t port_id)
1542 {
1543 	struct rte_port *port = &ports[port_id];
1544 
1545 	if ((port->port_status != RTE_PORT_STOPPED) &&
1546 	    (port->slave_flag == 0))
1547 		return 0;
1548 	return 1;
1549 }
1550 
1551 int
1552 all_ports_stopped(void)
1553 {
1554 	portid_t pi;
1555 
1556 	RTE_ETH_FOREACH_DEV(pi) {
1557 		if (!port_is_stopped(pi))
1558 			return 0;
1559 	}
1560 
1561 	return 1;
1562 }
1563 
1564 int
1565 port_is_started(portid_t port_id)
1566 {
1567 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1568 		return 0;
1569 
1570 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1571 		return 0;
1572 
1573 	return 1;
1574 }
1575 
1576 static int
1577 port_is_closed(portid_t port_id)
1578 {
1579 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1580 		return 0;
1581 
1582 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1583 		return 0;
1584 
1585 	return 1;
1586 }
1587 
1588 int
1589 start_port(portid_t pid)
1590 {
1591 	int diag, need_check_link_status = -1;
1592 	portid_t pi;
1593 	queueid_t qi;
1594 	struct rte_port *port;
1595 	struct ether_addr mac_addr;
1596 	enum rte_eth_event_type event_type;
1597 
1598 	if (port_id_is_invalid(pid, ENABLED_WARN))
1599 		return 0;
1600 
1601 	if(dcb_config)
1602 		dcb_test = 1;
1603 	RTE_ETH_FOREACH_DEV(pi) {
1604 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1605 			continue;
1606 
1607 		need_check_link_status = 0;
1608 		port = &ports[pi];
1609 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1610 						 RTE_PORT_HANDLING) == 0) {
1611 			printf("Port %d is now not stopped\n", pi);
1612 			continue;
1613 		}
1614 
1615 		if (port->need_reconfig > 0) {
1616 			port->need_reconfig = 0;
1617 
1618 			if (flow_isolate_all) {
1619 				int ret = port_flow_isolate(pi, 1);
1620 				if (ret) {
1621 					printf("Failed to apply isolated"
1622 					       " mode on port %d\n", pi);
1623 					return -1;
1624 				}
1625 			}
1626 
1627 			printf("Configuring Port %d (socket %u)\n", pi,
1628 					port->socket_id);
1629 			/* configure port */
1630 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1631 						&(port->dev_conf));
1632 			if (diag != 0) {
1633 				if (rte_atomic16_cmpset(&(port->port_status),
1634 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1635 					printf("Port %d can not be set back "
1636 							"to stopped\n", pi);
1637 				printf("Fail to configure port %d\n", pi);
1638 				/* try to reconfigure port next time */
1639 				port->need_reconfig = 1;
1640 				return -1;
1641 			}
1642 		}
1643 		if (port->need_reconfig_queues > 0) {
1644 			port->need_reconfig_queues = 0;
1645 			/* setup tx queues */
1646 			for (qi = 0; qi < nb_txq; qi++) {
1647 				if ((numa_support) &&
1648 					(txring_numa[pi] != NUMA_NO_CONFIG))
1649 					diag = rte_eth_tx_queue_setup(pi, qi,
1650 						port->nb_tx_desc[qi],
1651 						txring_numa[pi],
1652 						&(port->tx_conf[qi]));
1653 				else
1654 					diag = rte_eth_tx_queue_setup(pi, qi,
1655 						port->nb_tx_desc[qi],
1656 						port->socket_id,
1657 						&(port->tx_conf[qi]));
1658 
1659 				if (diag == 0)
1660 					continue;
1661 
1662 				/* Fail to setup tx queue, return */
1663 				if (rte_atomic16_cmpset(&(port->port_status),
1664 							RTE_PORT_HANDLING,
1665 							RTE_PORT_STOPPED) == 0)
1666 					printf("Port %d can not be set back "
1667 							"to stopped\n", pi);
1668 				printf("Fail to configure port %d tx queues\n",
1669 				       pi);
1670 				/* try to reconfigure queues next time */
1671 				port->need_reconfig_queues = 1;
1672 				return -1;
1673 			}
1674 			for (qi = 0; qi < nb_rxq; qi++) {
1675 				/* setup rx queues */
1676 				if ((numa_support) &&
1677 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1678 					struct rte_mempool * mp =
1679 						mbuf_pool_find(rxring_numa[pi]);
1680 					if (mp == NULL) {
1681 						printf("Failed to setup RX queue:"
1682 							"No mempool allocation"
1683 							" on the socket %d\n",
1684 							rxring_numa[pi]);
1685 						return -1;
1686 					}
1687 
1688 					diag = rte_eth_rx_queue_setup(pi, qi,
1689 					     port->nb_rx_desc[qi],
1690 					     rxring_numa[pi],
1691 					     &(port->rx_conf[qi]),
1692 					     mp);
1693 				} else {
1694 					struct rte_mempool *mp =
1695 						mbuf_pool_find(port->socket_id);
1696 					if (mp == NULL) {
1697 						printf("Failed to setup RX queue:"
1698 							"No mempool allocation"
1699 							" on the socket %d\n",
1700 							port->socket_id);
1701 						return -1;
1702 					}
1703 					diag = rte_eth_rx_queue_setup(pi, qi,
1704 					     port->nb_rx_desc[qi],
1705 					     port->socket_id,
1706 					     &(port->rx_conf[qi]),
1707 					     mp);
1708 				}
1709 				if (diag == 0)
1710 					continue;
1711 
1712 				/* Fail to setup rx queue, return */
1713 				if (rte_atomic16_cmpset(&(port->port_status),
1714 							RTE_PORT_HANDLING,
1715 							RTE_PORT_STOPPED) == 0)
1716 					printf("Port %d can not be set back "
1717 							"to stopped\n", pi);
1718 				printf("Fail to configure port %d rx queues\n",
1719 				       pi);
1720 				/* try to reconfigure queues next time */
1721 				port->need_reconfig_queues = 1;
1722 				return -1;
1723 			}
1724 		}
1725 
1726 		/* start port */
1727 		if (rte_eth_dev_start(pi) < 0) {
1728 			printf("Fail to start port %d\n", pi);
1729 
1730 			/* Fail to setup rx queue, return */
1731 			if (rte_atomic16_cmpset(&(port->port_status),
1732 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1733 				printf("Port %d can not be set back to "
1734 							"stopped\n", pi);
1735 			continue;
1736 		}
1737 
1738 		if (rte_atomic16_cmpset(&(port->port_status),
1739 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1740 			printf("Port %d can not be set into started\n", pi);
1741 
1742 		rte_eth_macaddr_get(pi, &mac_addr);
1743 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1744 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1745 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1746 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1747 
1748 		/* at least one port started, need checking link status */
1749 		need_check_link_status = 1;
1750 	}
1751 
1752 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1753 	     event_type < RTE_ETH_EVENT_MAX;
1754 	     event_type++) {
1755 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1756 						event_type,
1757 						eth_event_callback,
1758 						NULL);
1759 		if (diag) {
1760 			printf("Failed to setup even callback for event %d\n",
1761 				event_type);
1762 			return -1;
1763 		}
1764 	}
1765 
1766 	if (need_check_link_status == 1 && !no_link_check)
1767 		check_all_ports_link_status(RTE_PORT_ALL);
1768 	else if (need_check_link_status == 0)
1769 		printf("Please stop the ports first\n");
1770 
1771 	printf("Done\n");
1772 	return 0;
1773 }
1774 
1775 void
1776 stop_port(portid_t pid)
1777 {
1778 	portid_t pi;
1779 	struct rte_port *port;
1780 	int need_check_link_status = 0;
1781 
1782 	if (dcb_test) {
1783 		dcb_test = 0;
1784 		dcb_config = 0;
1785 	}
1786 
1787 	if (port_id_is_invalid(pid, ENABLED_WARN))
1788 		return;
1789 
1790 	printf("Stopping ports...\n");
1791 
1792 	RTE_ETH_FOREACH_DEV(pi) {
1793 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1794 			continue;
1795 
1796 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1797 			printf("Please remove port %d from forwarding configuration.\n", pi);
1798 			continue;
1799 		}
1800 
1801 		if (port_is_bonding_slave(pi)) {
1802 			printf("Please remove port %d from bonded device.\n", pi);
1803 			continue;
1804 		}
1805 
1806 		port = &ports[pi];
1807 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1808 						RTE_PORT_HANDLING) == 0)
1809 			continue;
1810 
1811 		rte_eth_dev_stop(pi);
1812 
1813 		if (rte_atomic16_cmpset(&(port->port_status),
1814 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1815 			printf("Port %d can not be set into stopped\n", pi);
1816 		need_check_link_status = 1;
1817 	}
1818 	if (need_check_link_status && !no_link_check)
1819 		check_all_ports_link_status(RTE_PORT_ALL);
1820 
1821 	printf("Done\n");
1822 }
1823 
1824 void
1825 close_port(portid_t pid)
1826 {
1827 	portid_t pi;
1828 	struct rte_port *port;
1829 
1830 	if (port_id_is_invalid(pid, ENABLED_WARN))
1831 		return;
1832 
1833 	printf("Closing ports...\n");
1834 
1835 	RTE_ETH_FOREACH_DEV(pi) {
1836 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1837 			continue;
1838 
1839 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1840 			printf("Please remove port %d from forwarding configuration.\n", pi);
1841 			continue;
1842 		}
1843 
1844 		if (port_is_bonding_slave(pi)) {
1845 			printf("Please remove port %d from bonded device.\n", pi);
1846 			continue;
1847 		}
1848 
1849 		port = &ports[pi];
1850 		if (rte_atomic16_cmpset(&(port->port_status),
1851 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1852 			printf("Port %d is already closed\n", pi);
1853 			continue;
1854 		}
1855 
1856 		if (rte_atomic16_cmpset(&(port->port_status),
1857 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1858 			printf("Port %d is now not stopped\n", pi);
1859 			continue;
1860 		}
1861 
1862 		if (port->flow_list)
1863 			port_flow_flush(pi);
1864 		rte_eth_dev_close(pi);
1865 
1866 		if (rte_atomic16_cmpset(&(port->port_status),
1867 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1868 			printf("Port %d cannot be set to closed\n", pi);
1869 	}
1870 
1871 	printf("Done\n");
1872 }
1873 
1874 void
1875 reset_port(portid_t pid)
1876 {
1877 	int diag;
1878 	portid_t pi;
1879 	struct rte_port *port;
1880 
1881 	if (port_id_is_invalid(pid, ENABLED_WARN))
1882 		return;
1883 
1884 	printf("Resetting ports...\n");
1885 
1886 	RTE_ETH_FOREACH_DEV(pi) {
1887 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1888 			continue;
1889 
1890 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1891 			printf("Please remove port %d from forwarding "
1892 			       "configuration.\n", pi);
1893 			continue;
1894 		}
1895 
1896 		if (port_is_bonding_slave(pi)) {
1897 			printf("Please remove port %d from bonded device.\n",
1898 			       pi);
1899 			continue;
1900 		}
1901 
1902 		diag = rte_eth_dev_reset(pi);
1903 		if (diag == 0) {
1904 			port = &ports[pi];
1905 			port->need_reconfig = 1;
1906 			port->need_reconfig_queues = 1;
1907 		} else {
1908 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1909 		}
1910 	}
1911 
1912 	printf("Done\n");
1913 }
1914 
1915 static int
1916 eth_dev_event_callback_register(void)
1917 {
1918 	int ret;
1919 
1920 	/* register the device event callback */
1921 	ret = rte_dev_event_callback_register(NULL,
1922 		eth_dev_event_callback, NULL);
1923 	if (ret) {
1924 		printf("Failed to register device event callback\n");
1925 		return -1;
1926 	}
1927 
1928 	return 0;
1929 }
1930 
1931 
1932 static int
1933 eth_dev_event_callback_unregister(void)
1934 {
1935 	int ret;
1936 
1937 	/* unregister the device event callback */
1938 	ret = rte_dev_event_callback_unregister(NULL,
1939 		eth_dev_event_callback, NULL);
1940 	if (ret < 0) {
1941 		printf("Failed to unregister device event callback\n");
1942 		return -1;
1943 	}
1944 
1945 	return 0;
1946 }
1947 
1948 void
1949 attach_port(char *identifier)
1950 {
1951 	portid_t pi = 0;
1952 	unsigned int socket_id;
1953 
1954 	printf("Attaching a new port...\n");
1955 
1956 	if (identifier == NULL) {
1957 		printf("Invalid parameters are specified\n");
1958 		return;
1959 	}
1960 
1961 	if (rte_eth_dev_attach(identifier, &pi))
1962 		return;
1963 
1964 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1965 	/* if socket_id is invalid, set to 0 */
1966 	if (check_socket_id(socket_id) < 0)
1967 		socket_id = 0;
1968 	reconfig(pi, socket_id);
1969 	rte_eth_promiscuous_enable(pi);
1970 
1971 	nb_ports = rte_eth_dev_count_avail();
1972 
1973 	ports[pi].port_status = RTE_PORT_STOPPED;
1974 
1975 	update_fwd_ports(pi);
1976 
1977 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1978 	printf("Done\n");
1979 }
1980 
1981 void
1982 detach_port(portid_t port_id)
1983 {
1984 	char name[RTE_ETH_NAME_MAX_LEN];
1985 
1986 	printf("Detaching a port...\n");
1987 
1988 	if (!port_is_closed(port_id)) {
1989 		printf("Please close port first\n");
1990 		return;
1991 	}
1992 
1993 	if (ports[port_id].flow_list)
1994 		port_flow_flush(port_id);
1995 
1996 	if (rte_eth_dev_detach(port_id, name)) {
1997 		TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
1998 		return;
1999 	}
2000 
2001 	nb_ports = rte_eth_dev_count_avail();
2002 
2003 	update_fwd_ports(RTE_MAX_ETHPORTS);
2004 
2005 	printf("Port %u is detached. Now total ports is %d\n",
2006 			port_id, nb_ports);
2007 	printf("Done\n");
2008 	return;
2009 }
2010 
2011 void
2012 pmd_test_exit(void)
2013 {
2014 	struct rte_device *device;
2015 	portid_t pt_id;
2016 	int ret;
2017 
2018 	if (test_done == 0)
2019 		stop_packet_forwarding();
2020 
2021 	if (ports != NULL) {
2022 		no_link_check = 1;
2023 		RTE_ETH_FOREACH_DEV(pt_id) {
2024 			printf("\nShutting down port %d...\n", pt_id);
2025 			fflush(stdout);
2026 			stop_port(pt_id);
2027 			close_port(pt_id);
2028 
2029 			/*
2030 			 * This is a workaround to fix a virtio-user issue that
2031 			 * requires to call clean-up routine to remove existing
2032 			 * socket.
2033 			 * This workaround valid only for testpmd, needs a fix
2034 			 * valid for all applications.
2035 			 * TODO: Implement proper resource cleanup
2036 			 */
2037 			device = rte_eth_devices[pt_id].device;
2038 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2039 				detach_port(pt_id);
2040 		}
2041 	}
2042 
2043 	if (hot_plug) {
2044 		ret = rte_dev_event_monitor_stop();
2045 		if (ret)
2046 			RTE_LOG(ERR, EAL,
2047 				"fail to stop device event monitor.");
2048 
2049 		ret = eth_dev_event_callback_unregister();
2050 		if (ret)
2051 			RTE_LOG(ERR, EAL,
2052 				"fail to unregister all event callbacks.");
2053 	}
2054 
2055 	printf("\nBye...\n");
2056 }
2057 
2058 typedef void (*cmd_func_t)(void);
2059 struct pmd_test_command {
2060 	const char *cmd_name;
2061 	cmd_func_t cmd_func;
2062 };
2063 
2064 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2065 
2066 /* Check the link status of all ports in up to 9s, and print them finally */
2067 static void
2068 check_all_ports_link_status(uint32_t port_mask)
2069 {
2070 #define CHECK_INTERVAL 100 /* 100ms */
2071 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2072 	portid_t portid;
2073 	uint8_t count, all_ports_up, print_flag = 0;
2074 	struct rte_eth_link link;
2075 
2076 	printf("Checking link statuses...\n");
2077 	fflush(stdout);
2078 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2079 		all_ports_up = 1;
2080 		RTE_ETH_FOREACH_DEV(portid) {
2081 			if ((port_mask & (1 << portid)) == 0)
2082 				continue;
2083 			memset(&link, 0, sizeof(link));
2084 			rte_eth_link_get_nowait(portid, &link);
2085 			/* print link status if flag set */
2086 			if (print_flag == 1) {
2087 				if (link.link_status)
2088 					printf(
2089 					"Port%d Link Up. speed %u Mbps- %s\n",
2090 					portid, link.link_speed,
2091 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2092 					("full-duplex") : ("half-duplex\n"));
2093 				else
2094 					printf("Port %d Link Down\n", portid);
2095 				continue;
2096 			}
2097 			/* clear all_ports_up flag if any link down */
2098 			if (link.link_status == ETH_LINK_DOWN) {
2099 				all_ports_up = 0;
2100 				break;
2101 			}
2102 		}
2103 		/* after finally printing all link status, get out */
2104 		if (print_flag == 1)
2105 			break;
2106 
2107 		if (all_ports_up == 0) {
2108 			fflush(stdout);
2109 			rte_delay_ms(CHECK_INTERVAL);
2110 		}
2111 
2112 		/* set the print_flag if all ports up or timeout */
2113 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2114 			print_flag = 1;
2115 		}
2116 
2117 		if (lsc_interrupt)
2118 			break;
2119 	}
2120 }
2121 
2122 static void
2123 rmv_event_callback(void *arg)
2124 {
2125 	int need_to_start = 0;
2126 	int org_no_link_check = no_link_check;
2127 	portid_t port_id = (intptr_t)arg;
2128 
2129 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2130 
2131 	if (!test_done && port_is_forwarding(port_id)) {
2132 		need_to_start = 1;
2133 		stop_packet_forwarding();
2134 	}
2135 	no_link_check = 1;
2136 	stop_port(port_id);
2137 	no_link_check = org_no_link_check;
2138 	close_port(port_id);
2139 	detach_port(port_id);
2140 	if (need_to_start)
2141 		start_packet_forwarding(0);
2142 }
2143 
2144 /* This function is used by the interrupt thread */
2145 static int
2146 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2147 		  void *ret_param)
2148 {
2149 	static const char * const event_desc[] = {
2150 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2151 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2152 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2153 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2154 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2155 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
2156 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2157 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2158 		[RTE_ETH_EVENT_NEW] = "device probed",
2159 		[RTE_ETH_EVENT_DESTROY] = "device released",
2160 		[RTE_ETH_EVENT_MAX] = NULL,
2161 	};
2162 
2163 	RTE_SET_USED(param);
2164 	RTE_SET_USED(ret_param);
2165 
2166 	if (type >= RTE_ETH_EVENT_MAX) {
2167 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2168 			port_id, __func__, type);
2169 		fflush(stderr);
2170 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2171 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2172 			event_desc[type]);
2173 		fflush(stdout);
2174 	}
2175 
2176 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2177 		return 0;
2178 
2179 	switch (type) {
2180 	case RTE_ETH_EVENT_INTR_RMV:
2181 		if (rte_eal_alarm_set(100000,
2182 				rmv_event_callback, (void *)(intptr_t)port_id))
2183 			fprintf(stderr, "Could not set up deferred device removal\n");
2184 		break;
2185 	default:
2186 		break;
2187 	}
2188 	return 0;
2189 }
2190 
2191 /* This function is used by the interrupt thread */
2192 static void
2193 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2194 			     __rte_unused void *arg)
2195 {
2196 	if (type >= RTE_DEV_EVENT_MAX) {
2197 		fprintf(stderr, "%s called upon invalid event %d\n",
2198 			__func__, type);
2199 		fflush(stderr);
2200 	}
2201 
2202 	switch (type) {
2203 	case RTE_DEV_EVENT_REMOVE:
2204 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2205 			device_name);
2206 		/* TODO: After finish failure handle, begin to stop
2207 		 * packet forward, stop port, close port, detach port.
2208 		 */
2209 		break;
2210 	case RTE_DEV_EVENT_ADD:
2211 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2212 			device_name);
2213 		/* TODO: After finish kernel driver binding,
2214 		 * begin to attach port.
2215 		 */
2216 		break;
2217 	default:
2218 		break;
2219 	}
2220 }
2221 
2222 static int
2223 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2224 {
2225 	uint16_t i;
2226 	int diag;
2227 	uint8_t mapping_found = 0;
2228 
2229 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2230 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2231 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2232 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2233 					tx_queue_stats_mappings[i].queue_id,
2234 					tx_queue_stats_mappings[i].stats_counter_id);
2235 			if (diag != 0)
2236 				return diag;
2237 			mapping_found = 1;
2238 		}
2239 	}
2240 	if (mapping_found)
2241 		port->tx_queue_stats_mapping_enabled = 1;
2242 	return 0;
2243 }
2244 
2245 static int
2246 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2247 {
2248 	uint16_t i;
2249 	int diag;
2250 	uint8_t mapping_found = 0;
2251 
2252 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2253 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2254 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2255 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2256 					rx_queue_stats_mappings[i].queue_id,
2257 					rx_queue_stats_mappings[i].stats_counter_id);
2258 			if (diag != 0)
2259 				return diag;
2260 			mapping_found = 1;
2261 		}
2262 	}
2263 	if (mapping_found)
2264 		port->rx_queue_stats_mapping_enabled = 1;
2265 	return 0;
2266 }
2267 
2268 static void
2269 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2270 {
2271 	int diag = 0;
2272 
2273 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2274 	if (diag != 0) {
2275 		if (diag == -ENOTSUP) {
2276 			port->tx_queue_stats_mapping_enabled = 0;
2277 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2278 		}
2279 		else
2280 			rte_exit(EXIT_FAILURE,
2281 					"set_tx_queue_stats_mapping_registers "
2282 					"failed for port id=%d diag=%d\n",
2283 					pi, diag);
2284 	}
2285 
2286 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2287 	if (diag != 0) {
2288 		if (diag == -ENOTSUP) {
2289 			port->rx_queue_stats_mapping_enabled = 0;
2290 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2291 		}
2292 		else
2293 			rte_exit(EXIT_FAILURE,
2294 					"set_rx_queue_stats_mapping_registers "
2295 					"failed for port id=%d diag=%d\n",
2296 					pi, diag);
2297 	}
2298 }
2299 
2300 static void
2301 rxtx_port_config(struct rte_port *port)
2302 {
2303 	uint16_t qid;
2304 
2305 	for (qid = 0; qid < nb_rxq; qid++) {
2306 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2307 
2308 		/* Check if any Rx parameters have been passed */
2309 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2310 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2311 
2312 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2313 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2314 
2315 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2316 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2317 
2318 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2319 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2320 
2321 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2322 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2323 
2324 		port->nb_rx_desc[qid] = nb_rxd;
2325 	}
2326 
2327 	for (qid = 0; qid < nb_txq; qid++) {
2328 		port->tx_conf[qid] = port->dev_info.default_txconf;
2329 
2330 		/* Check if any Tx parameters have been passed */
2331 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2332 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2333 
2334 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2335 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2336 
2337 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2338 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2339 
2340 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2341 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2342 
2343 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2344 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2345 
2346 		port->nb_tx_desc[qid] = nb_txd;
2347 	}
2348 }
2349 
2350 void
2351 init_port_config(void)
2352 {
2353 	portid_t pid;
2354 	struct rte_port *port;
2355 
2356 	RTE_ETH_FOREACH_DEV(pid) {
2357 		port = &ports[pid];
2358 		port->dev_conf.fdir_conf = fdir_conf;
2359 		rte_eth_dev_info_get(pid, &port->dev_info);
2360 		if (nb_rxq > 1) {
2361 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2362 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2363 				rss_hf & port->dev_info.flow_type_rss_offloads;
2364 		} else {
2365 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2366 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2367 		}
2368 
2369 		if (port->dcb_flag == 0) {
2370 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2371 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2372 			else
2373 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2374 		}
2375 
2376 		rxtx_port_config(port);
2377 
2378 		rte_eth_macaddr_get(pid, &port->eth_addr);
2379 
2380 		map_port_queue_stats_mapping_registers(pid, port);
2381 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2382 		rte_pmd_ixgbe_bypass_init(pid);
2383 #endif
2384 
2385 		if (lsc_interrupt &&
2386 		    (rte_eth_devices[pid].data->dev_flags &
2387 		     RTE_ETH_DEV_INTR_LSC))
2388 			port->dev_conf.intr_conf.lsc = 1;
2389 		if (rmv_interrupt &&
2390 		    (rte_eth_devices[pid].data->dev_flags &
2391 		     RTE_ETH_DEV_INTR_RMV))
2392 			port->dev_conf.intr_conf.rmv = 1;
2393 
2394 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2395 		/* Detect softnic port */
2396 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2397 			port->softnic_enable = 1;
2398 			memset(&port->softport, 0, sizeof(struct softnic_port));
2399 
2400 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2401 				port->softport.tm_flag = 1;
2402 		}
2403 #endif
2404 	}
2405 }
2406 
2407 void set_port_slave_flag(portid_t slave_pid)
2408 {
2409 	struct rte_port *port;
2410 
2411 	port = &ports[slave_pid];
2412 	port->slave_flag = 1;
2413 }
2414 
2415 void clear_port_slave_flag(portid_t slave_pid)
2416 {
2417 	struct rte_port *port;
2418 
2419 	port = &ports[slave_pid];
2420 	port->slave_flag = 0;
2421 }
2422 
2423 uint8_t port_is_bonding_slave(portid_t slave_pid)
2424 {
2425 	struct rte_port *port;
2426 
2427 	port = &ports[slave_pid];
2428 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2429 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2430 		return 1;
2431 	return 0;
2432 }
2433 
2434 const uint16_t vlan_tags[] = {
2435 		0,  1,  2,  3,  4,  5,  6,  7,
2436 		8,  9, 10, 11,  12, 13, 14, 15,
2437 		16, 17, 18, 19, 20, 21, 22, 23,
2438 		24, 25, 26, 27, 28, 29, 30, 31
2439 };
2440 
2441 static  int
2442 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2443 		 enum dcb_mode_enable dcb_mode,
2444 		 enum rte_eth_nb_tcs num_tcs,
2445 		 uint8_t pfc_en)
2446 {
2447 	uint8_t i;
2448 
2449 	/*
2450 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2451 	 * given above, and the number of traffic classes available for use.
2452 	 */
2453 	if (dcb_mode == DCB_VT_ENABLED) {
2454 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2455 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2456 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2457 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2458 
2459 		/* VMDQ+DCB RX and TX configurations */
2460 		vmdq_rx_conf->enable_default_pool = 0;
2461 		vmdq_rx_conf->default_pool = 0;
2462 		vmdq_rx_conf->nb_queue_pools =
2463 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2464 		vmdq_tx_conf->nb_queue_pools =
2465 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2466 
2467 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2468 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2469 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2470 			vmdq_rx_conf->pool_map[i].pools =
2471 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2472 		}
2473 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2474 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2475 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2476 		}
2477 
2478 		/* set DCB mode of RX and TX of multiple queues */
2479 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2480 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2481 	} else {
2482 		struct rte_eth_dcb_rx_conf *rx_conf =
2483 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2484 		struct rte_eth_dcb_tx_conf *tx_conf =
2485 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2486 
2487 		rx_conf->nb_tcs = num_tcs;
2488 		tx_conf->nb_tcs = num_tcs;
2489 
2490 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2491 			rx_conf->dcb_tc[i] = i % num_tcs;
2492 			tx_conf->dcb_tc[i] = i % num_tcs;
2493 		}
2494 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2495 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2496 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2497 	}
2498 
2499 	if (pfc_en)
2500 		eth_conf->dcb_capability_en =
2501 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2502 	else
2503 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2504 
2505 	return 0;
2506 }
2507 
2508 int
2509 init_port_dcb_config(portid_t pid,
2510 		     enum dcb_mode_enable dcb_mode,
2511 		     enum rte_eth_nb_tcs num_tcs,
2512 		     uint8_t pfc_en)
2513 {
2514 	struct rte_eth_conf port_conf;
2515 	struct rte_port *rte_port;
2516 	int retval;
2517 	uint16_t i;
2518 
2519 	rte_port = &ports[pid];
2520 
2521 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2522 	/* Enter DCB configuration status */
2523 	dcb_config = 1;
2524 
2525 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2526 	port_conf.txmode = rte_port->dev_conf.txmode;
2527 
2528 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2529 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2530 	if (retval < 0)
2531 		return retval;
2532 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2533 
2534 	/* re-configure the device . */
2535 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2536 
2537 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2538 
2539 	/* If dev_info.vmdq_pool_base is greater than 0,
2540 	 * the queue id of vmdq pools is started after pf queues.
2541 	 */
2542 	if (dcb_mode == DCB_VT_ENABLED &&
2543 	    rte_port->dev_info.vmdq_pool_base > 0) {
2544 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2545 			" for port %d.", pid);
2546 		return -1;
2547 	}
2548 
2549 	/* Assume the ports in testpmd have the same dcb capability
2550 	 * and has the same number of rxq and txq in dcb mode
2551 	 */
2552 	if (dcb_mode == DCB_VT_ENABLED) {
2553 		if (rte_port->dev_info.max_vfs > 0) {
2554 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2555 			nb_txq = rte_port->dev_info.nb_tx_queues;
2556 		} else {
2557 			nb_rxq = rte_port->dev_info.max_rx_queues;
2558 			nb_txq = rte_port->dev_info.max_tx_queues;
2559 		}
2560 	} else {
2561 		/*if vt is disabled, use all pf queues */
2562 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2563 			nb_rxq = rte_port->dev_info.max_rx_queues;
2564 			nb_txq = rte_port->dev_info.max_tx_queues;
2565 		} else {
2566 			nb_rxq = (queueid_t)num_tcs;
2567 			nb_txq = (queueid_t)num_tcs;
2568 
2569 		}
2570 	}
2571 	rx_free_thresh = 64;
2572 
2573 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2574 
2575 	rxtx_port_config(rte_port);
2576 	/* VLAN filter */
2577 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2578 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2579 		rx_vft_set(pid, vlan_tags[i], 1);
2580 
2581 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2582 	map_port_queue_stats_mapping_registers(pid, rte_port);
2583 
2584 	rte_port->dcb_flag = 1;
2585 
2586 	return 0;
2587 }
2588 
2589 static void
2590 init_port(void)
2591 {
2592 	/* Configuration of Ethernet ports. */
2593 	ports = rte_zmalloc("testpmd: ports",
2594 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2595 			    RTE_CACHE_LINE_SIZE);
2596 	if (ports == NULL) {
2597 		rte_exit(EXIT_FAILURE,
2598 				"rte_zmalloc(%d struct rte_port) failed\n",
2599 				RTE_MAX_ETHPORTS);
2600 	}
2601 }
2602 
2603 static void
2604 force_quit(void)
2605 {
2606 	pmd_test_exit();
2607 	prompt_exit();
2608 }
2609 
2610 static void
2611 print_stats(void)
2612 {
2613 	uint8_t i;
2614 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2615 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2616 
2617 	/* Clear screen and move to top left */
2618 	printf("%s%s", clr, top_left);
2619 
2620 	printf("\nPort statistics ====================================");
2621 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2622 		nic_stats_display(fwd_ports_ids[i]);
2623 }
2624 
2625 static void
2626 signal_handler(int signum)
2627 {
2628 	if (signum == SIGINT || signum == SIGTERM) {
2629 		printf("\nSignal %d received, preparing to exit...\n",
2630 				signum);
2631 #ifdef RTE_LIBRTE_PDUMP
2632 		/* uninitialize packet capture framework */
2633 		rte_pdump_uninit();
2634 #endif
2635 #ifdef RTE_LIBRTE_LATENCY_STATS
2636 		rte_latencystats_uninit();
2637 #endif
2638 		force_quit();
2639 		/* Set flag to indicate the force termination. */
2640 		f_quit = 1;
2641 		/* exit with the expected status */
2642 		signal(signum, SIG_DFL);
2643 		kill(getpid(), signum);
2644 	}
2645 }
2646 
2647 int
2648 main(int argc, char** argv)
2649 {
2650 	int diag;
2651 	portid_t port_id;
2652 	int ret;
2653 
2654 	signal(SIGINT, signal_handler);
2655 	signal(SIGTERM, signal_handler);
2656 
2657 	diag = rte_eal_init(argc, argv);
2658 	if (diag < 0)
2659 		rte_panic("Cannot init EAL\n");
2660 
2661 	testpmd_logtype = rte_log_register("testpmd");
2662 	if (testpmd_logtype < 0)
2663 		rte_panic("Cannot register log type");
2664 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2665 
2666 #ifdef RTE_LIBRTE_PDUMP
2667 	/* initialize packet capture framework */
2668 	rte_pdump_init(NULL);
2669 #endif
2670 
2671 	nb_ports = (portid_t) rte_eth_dev_count_avail();
2672 	if (nb_ports == 0)
2673 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2674 
2675 	/* allocate port structures, and init them */
2676 	init_port();
2677 
2678 	set_def_fwd_config();
2679 	if (nb_lcores == 0)
2680 		rte_panic("Empty set of forwarding logical cores - check the "
2681 			  "core mask supplied in the command parameters\n");
2682 
2683 	/* Bitrate/latency stats disabled by default */
2684 #ifdef RTE_LIBRTE_BITRATE
2685 	bitrate_enabled = 0;
2686 #endif
2687 #ifdef RTE_LIBRTE_LATENCY_STATS
2688 	latencystats_enabled = 0;
2689 #endif
2690 
2691 	/* on FreeBSD, mlockall() is disabled by default */
2692 #ifdef RTE_EXEC_ENV_BSDAPP
2693 	do_mlockall = 0;
2694 #else
2695 	do_mlockall = 1;
2696 #endif
2697 
2698 	argc -= diag;
2699 	argv += diag;
2700 	if (argc > 1)
2701 		launch_args_parse(argc, argv);
2702 
2703 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2704 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2705 			strerror(errno));
2706 	}
2707 
2708 	if (tx_first && interactive)
2709 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2710 				"interactive mode.\n");
2711 
2712 	if (tx_first && lsc_interrupt) {
2713 		printf("Warning: lsc_interrupt needs to be off when "
2714 				" using tx_first. Disabling.\n");
2715 		lsc_interrupt = 0;
2716 	}
2717 
2718 	if (!nb_rxq && !nb_txq)
2719 		printf("Warning: Either rx or tx queues should be non-zero\n");
2720 
2721 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2722 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2723 		       "but nb_txq=%d will prevent to fully test it.\n",
2724 		       nb_rxq, nb_txq);
2725 
2726 	init_config();
2727 
2728 	if (hot_plug) {
2729 		/* enable hot plug monitoring */
2730 		ret = rte_dev_event_monitor_start();
2731 		if (ret) {
2732 			rte_errno = EINVAL;
2733 			return -1;
2734 		}
2735 		eth_dev_event_callback_register();
2736 
2737 	}
2738 
2739 	if (start_port(RTE_PORT_ALL) != 0)
2740 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2741 
2742 	/* set all ports to promiscuous mode by default */
2743 	RTE_ETH_FOREACH_DEV(port_id)
2744 		rte_eth_promiscuous_enable(port_id);
2745 
2746 	/* Init metrics library */
2747 	rte_metrics_init(rte_socket_id());
2748 
2749 #ifdef RTE_LIBRTE_LATENCY_STATS
2750 	if (latencystats_enabled != 0) {
2751 		int ret = rte_latencystats_init(1, NULL);
2752 		if (ret)
2753 			printf("Warning: latencystats init()"
2754 				" returned error %d\n",	ret);
2755 		printf("Latencystats running on lcore %d\n",
2756 			latencystats_lcore_id);
2757 	}
2758 #endif
2759 
2760 	/* Setup bitrate stats */
2761 #ifdef RTE_LIBRTE_BITRATE
2762 	if (bitrate_enabled != 0) {
2763 		bitrate_data = rte_stats_bitrate_create();
2764 		if (bitrate_data == NULL)
2765 			rte_exit(EXIT_FAILURE,
2766 				"Could not allocate bitrate data.\n");
2767 		rte_stats_bitrate_reg(bitrate_data);
2768 	}
2769 #endif
2770 
2771 #ifdef RTE_LIBRTE_CMDLINE
2772 	if (strlen(cmdline_filename) != 0)
2773 		cmdline_read_from_file(cmdline_filename);
2774 
2775 	if (interactive == 1) {
2776 		if (auto_start) {
2777 			printf("Start automatic packet forwarding\n");
2778 			start_packet_forwarding(0);
2779 		}
2780 		prompt();
2781 		pmd_test_exit();
2782 	} else
2783 #endif
2784 	{
2785 		char c;
2786 		int rc;
2787 
2788 		f_quit = 0;
2789 
2790 		printf("No commandline core given, start packet forwarding\n");
2791 		start_packet_forwarding(tx_first);
2792 		if (stats_period != 0) {
2793 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2794 			uint64_t timer_period;
2795 
2796 			/* Convert to number of cycles */
2797 			timer_period = stats_period * rte_get_timer_hz();
2798 
2799 			while (f_quit == 0) {
2800 				cur_time = rte_get_timer_cycles();
2801 				diff_time += cur_time - prev_time;
2802 
2803 				if (diff_time >= timer_period) {
2804 					print_stats();
2805 					/* Reset the timer */
2806 					diff_time = 0;
2807 				}
2808 				/* Sleep to avoid unnecessary checks */
2809 				prev_time = cur_time;
2810 				sleep(1);
2811 			}
2812 		}
2813 
2814 		printf("Press enter to exit\n");
2815 		rc = read(0, &c, 1);
2816 		pmd_test_exit();
2817 		if (rc < 0)
2818 			return 1;
2819 	}
2820 
2821 	return 0;
2822 }
2823