xref: /dpdk/app/test-pmd/testpmd.c (revision c73a9071877a1b80d01517b9c6205b9b3b503f59)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
33 #include <rte_eal.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
41 #include <rte_mbuf.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_dev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
51 #endif
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
54 #endif
55 #include <rte_flow.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
59 #endif
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
62 #endif
63 
64 #include "testpmd.h"
65 
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
68 
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
72 uint8_t tx_first;
73 char cmdline_filename[PATH_MAX] = {0};
74 
75 /*
76  * NUMA support configuration.
77  * When set, the NUMA support attempts to dispatch the allocation of the
78  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79  * probed ports among the CPU sockets 0 and 1.
80  * Otherwise, all memory is allocated from CPU socket 0.
81  */
82 uint8_t numa_support = 1; /**< numa enabled by default */
83 
84 /*
85  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86  * not configured.
87  */
88 uint8_t socket_num = UMA_NO_CONFIG;
89 
90 /*
91  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
92  */
93 uint8_t mp_anon = 0;
94 
95 /*
96  * Store specified sockets on which memory pool to be used by ports
97  * is allocated.
98  */
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
100 
101 /*
102  * Store specified sockets on which RX ring to be used by ports
103  * is allocated.
104  */
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
106 
107 /*
108  * Store specified sockets on which TX ring to be used by ports
109  * is allocated.
110  */
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
112 
113 /*
114  * Record the Ethernet address of peer target ports to which packets are
115  * forwarded.
116  * Must be instantiated with the ethernet addresses of peer traffic generator
117  * ports.
118  */
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
121 
122 /*
123  * Probed Target Environment.
124  */
125 struct rte_port *ports;	       /**< For all probed ethernet ports. */
126 portid_t nb_ports;             /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
129 
130 /*
131  * Test Forwarding Configuration.
132  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
134  */
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
138 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
139 
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
142 
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
145 
146 /*
147  * Forwarding engines.
148  */
149 struct fwd_engine * fwd_engines[] = {
150 	&io_fwd_engine,
151 	&mac_fwd_engine,
152 	&mac_swap_engine,
153 	&flow_gen_engine,
154 	&rx_only_engine,
155 	&tx_only_engine,
156 	&csum_fwd_engine,
157 	&icmp_echo_engine,
158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
159 	&softnic_tm_engine,
160 	&softnic_tm_bypass_engine,
161 #endif
162 #ifdef RTE_LIBRTE_IEEE1588
163 	&ieee1588_fwd_engine,
164 #endif
165 	NULL,
166 };
167 
168 struct fwd_config cur_fwd_config;
169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
170 uint32_t retry_enabled;
171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173 
174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
175 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
176                                       * specified on command-line. */
177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
178 
179 /*
180  * In container, it cannot terminate the process which running with 'stats-period'
181  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
182  */
183 uint8_t f_quit;
184 
185 /*
186  * Configuration of packet segments used by the "txonly" processing engine.
187  */
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 	TXONLY_DEF_PACKET_LEN,
191 };
192 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
196 
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
202 
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
205 
206 /*
207  * Configurable number of RX/TX queues.
208  */
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
211 
212 /*
213  * Configurable number of RX/TX ring descriptors.
214  * Defaults are supplied by drivers via ethdev.
215  */
216 #define RTE_TEST_RX_DESC_DEFAULT 0
217 #define RTE_TEST_TX_DESC_DEFAULT 0
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220 
221 #define RTE_PMD_PARAM_UNSET -1
222 /*
223  * Configurable values of RX and TX ring threshold registers.
224  */
225 
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229 
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Configurable value of RX free threshold.
236  */
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238 
239 /*
240  * Configurable value of RX drop enable.
241  */
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243 
244 /*
245  * Configurable value of TX free threshold.
246  */
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248 
249 /*
250  * Configurable value of TX RS bit threshold.
251  */
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253 
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268 
269 /*
270  * Flow API isolated mode.
271  */
272 uint8_t flow_isolate_all;
273 
274 /*
275  * Avoids to check link status when starting/stopping a port.
276  */
277 uint8_t no_link_check = 0; /* check by default */
278 
279 /*
280  * Enable link status change notification
281  */
282 uint8_t lsc_interrupt = 1; /* enabled by default */
283 
284 /*
285  * Enable device removal notification.
286  */
287 uint8_t rmv_interrupt = 1; /* enabled by default */
288 
289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
290 
291 /*
292  * Display or mask ether events
293  * Default to all events except VF_MBOX
294  */
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
301 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
302 /*
303  * Decide if all memory are locked for performance.
304  */
305 int do_mlockall = 0;
306 
307 /*
308  * NIC bypass mode configuration options.
309  */
310 
311 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
312 /* The NIC bypass watchdog timeout. */
313 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
314 #endif
315 
316 
317 #ifdef RTE_LIBRTE_LATENCY_STATS
318 
319 /*
320  * Set when latency stats is enabled in the commandline
321  */
322 uint8_t latencystats_enabled;
323 
324 /*
325  * Lcore ID to serive latency statistics.
326  */
327 lcoreid_t latencystats_lcore_id = -1;
328 
329 #endif
330 
331 /*
332  * Ethernet device configuration.
333  */
334 struct rte_eth_rxmode rx_mode = {
335 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
336 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
337 	.ignore_offload_bitfield = 1,
338 };
339 
340 struct rte_eth_txmode tx_mode = {
341 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
342 };
343 
344 struct rte_fdir_conf fdir_conf = {
345 	.mode = RTE_FDIR_MODE_NONE,
346 	.pballoc = RTE_FDIR_PBALLOC_64K,
347 	.status = RTE_FDIR_REPORT_STATUS,
348 	.mask = {
349 		.vlan_tci_mask = 0x0,
350 		.ipv4_mask     = {
351 			.src_ip = 0xFFFFFFFF,
352 			.dst_ip = 0xFFFFFFFF,
353 		},
354 		.ipv6_mask     = {
355 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
357 		},
358 		.src_port_mask = 0xFFFF,
359 		.dst_port_mask = 0xFFFF,
360 		.mac_addr_byte_mask = 0xFF,
361 		.tunnel_type_mask = 1,
362 		.tunnel_id_mask = 0xFFFFFFFF,
363 	},
364 	.drop_queue = 127,
365 };
366 
367 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
368 
369 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
371 
372 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
373 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
374 
375 uint16_t nb_tx_queue_stats_mappings = 0;
376 uint16_t nb_rx_queue_stats_mappings = 0;
377 
378 /*
379  * Display zero values by default for xstats
380  */
381 uint8_t xstats_hide_zero;
382 
383 unsigned int num_sockets = 0;
384 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
385 
386 #ifdef RTE_LIBRTE_BITRATE
387 /* Bitrate statistics */
388 struct rte_stats_bitrates *bitrate_data;
389 lcoreid_t bitrate_lcore_id;
390 uint8_t bitrate_enabled;
391 #endif
392 
393 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
394 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
395 
396 /* Forward function declarations */
397 static void map_port_queue_stats_mapping_registers(portid_t pi,
398 						   struct rte_port *port);
399 static void check_all_ports_link_status(uint32_t port_mask);
400 static int eth_event_callback(portid_t port_id,
401 			      enum rte_eth_event_type type,
402 			      void *param, void *ret_param);
403 static void eth_dev_event_callback(char *device_name,
404 				enum rte_dev_event_type type,
405 				void *param);
406 static int eth_dev_event_callback_register(void);
407 static int eth_dev_event_callback_unregister(void);
408 
409 
410 /*
411  * Check if all the ports are started.
412  * If yes, return positive value. If not, return zero.
413  */
414 static int all_ports_started(void);
415 
416 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
417 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
418 
419 /*
420  * Helper function to check if socket is already discovered.
421  * If yes, return positive value. If not, return zero.
422  */
423 int
424 new_socket_id(unsigned int socket_id)
425 {
426 	unsigned int i;
427 
428 	for (i = 0; i < num_sockets; i++) {
429 		if (socket_ids[i] == socket_id)
430 			return 0;
431 	}
432 	return 1;
433 }
434 
435 /*
436  * Setup default configuration.
437  */
438 static void
439 set_default_fwd_lcores_config(void)
440 {
441 	unsigned int i;
442 	unsigned int nb_lc;
443 	unsigned int sock_num;
444 
445 	nb_lc = 0;
446 	for (i = 0; i < RTE_MAX_LCORE; i++) {
447 		sock_num = rte_lcore_to_socket_id(i);
448 		if (new_socket_id(sock_num)) {
449 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
450 				rte_exit(EXIT_FAILURE,
451 					 "Total sockets greater than %u\n",
452 					 RTE_MAX_NUMA_NODES);
453 			}
454 			socket_ids[num_sockets++] = sock_num;
455 		}
456 		if (!rte_lcore_is_enabled(i))
457 			continue;
458 		if (i == rte_get_master_lcore())
459 			continue;
460 		fwd_lcores_cpuids[nb_lc++] = i;
461 	}
462 	nb_lcores = (lcoreid_t) nb_lc;
463 	nb_cfg_lcores = nb_lcores;
464 	nb_fwd_lcores = 1;
465 }
466 
467 static void
468 set_def_peer_eth_addrs(void)
469 {
470 	portid_t i;
471 
472 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
473 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
474 		peer_eth_addrs[i].addr_bytes[5] = i;
475 	}
476 }
477 
478 static void
479 set_default_fwd_ports_config(void)
480 {
481 	portid_t pt_id;
482 	int i = 0;
483 
484 	RTE_ETH_FOREACH_DEV(pt_id)
485 		fwd_ports_ids[i++] = pt_id;
486 
487 	nb_cfg_ports = nb_ports;
488 	nb_fwd_ports = nb_ports;
489 }
490 
491 void
492 set_def_fwd_config(void)
493 {
494 	set_default_fwd_lcores_config();
495 	set_def_peer_eth_addrs();
496 	set_default_fwd_ports_config();
497 }
498 
499 /*
500  * Configuration initialisation done once at init time.
501  */
502 static void
503 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
504 		 unsigned int socket_id)
505 {
506 	char pool_name[RTE_MEMPOOL_NAMESIZE];
507 	struct rte_mempool *rte_mp = NULL;
508 	uint32_t mb_size;
509 
510 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
511 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
512 
513 	TESTPMD_LOG(INFO,
514 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
515 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
516 
517 	if (mp_anon != 0) {
518 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
519 			mb_size, (unsigned) mb_mempool_cache,
520 			sizeof(struct rte_pktmbuf_pool_private),
521 			socket_id, 0);
522 		if (rte_mp == NULL)
523 			goto err;
524 
525 		if (rte_mempool_populate_anon(rte_mp) == 0) {
526 			rte_mempool_free(rte_mp);
527 			rte_mp = NULL;
528 			goto err;
529 		}
530 		rte_pktmbuf_pool_init(rte_mp, NULL);
531 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
532 	} else {
533 		/* wrapper to rte_mempool_create() */
534 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
535 				rte_mbuf_best_mempool_ops());
536 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
537 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
538 	}
539 
540 err:
541 	if (rte_mp == NULL) {
542 		rte_exit(EXIT_FAILURE,
543 			"Creation of mbuf pool for socket %u failed: %s\n",
544 			socket_id, rte_strerror(rte_errno));
545 	} else if (verbose_level > 0) {
546 		rte_mempool_dump(stdout, rte_mp);
547 	}
548 }
549 
550 /*
551  * Check given socket id is valid or not with NUMA mode,
552  * if valid, return 0, else return -1
553  */
554 static int
555 check_socket_id(const unsigned int socket_id)
556 {
557 	static int warning_once = 0;
558 
559 	if (new_socket_id(socket_id)) {
560 		if (!warning_once && numa_support)
561 			printf("Warning: NUMA should be configured manually by"
562 			       " using --port-numa-config and"
563 			       " --ring-numa-config parameters along with"
564 			       " --numa.\n");
565 		warning_once = 1;
566 		return -1;
567 	}
568 	return 0;
569 }
570 
571 /*
572  * Get the allowed maximum number of RX queues.
573  * *pid return the port id which has minimal value of
574  * max_rx_queues in all ports.
575  */
576 queueid_t
577 get_allowed_max_nb_rxq(portid_t *pid)
578 {
579 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
580 	portid_t pi;
581 	struct rte_eth_dev_info dev_info;
582 
583 	RTE_ETH_FOREACH_DEV(pi) {
584 		rte_eth_dev_info_get(pi, &dev_info);
585 		if (dev_info.max_rx_queues < allowed_max_rxq) {
586 			allowed_max_rxq = dev_info.max_rx_queues;
587 			*pid = pi;
588 		}
589 	}
590 	return allowed_max_rxq;
591 }
592 
593 /*
594  * Check input rxq is valid or not.
595  * If input rxq is not greater than any of maximum number
596  * of RX queues of all ports, it is valid.
597  * if valid, return 0, else return -1
598  */
599 int
600 check_nb_rxq(queueid_t rxq)
601 {
602 	queueid_t allowed_max_rxq;
603 	portid_t pid = 0;
604 
605 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
606 	if (rxq > allowed_max_rxq) {
607 		printf("Fail: input rxq (%u) can't be greater "
608 		       "than max_rx_queues (%u) of port %u\n",
609 		       rxq,
610 		       allowed_max_rxq,
611 		       pid);
612 		return -1;
613 	}
614 	return 0;
615 }
616 
617 /*
618  * Get the allowed maximum number of TX queues.
619  * *pid return the port id which has minimal value of
620  * max_tx_queues in all ports.
621  */
622 queueid_t
623 get_allowed_max_nb_txq(portid_t *pid)
624 {
625 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
626 	portid_t pi;
627 	struct rte_eth_dev_info dev_info;
628 
629 	RTE_ETH_FOREACH_DEV(pi) {
630 		rte_eth_dev_info_get(pi, &dev_info);
631 		if (dev_info.max_tx_queues < allowed_max_txq) {
632 			allowed_max_txq = dev_info.max_tx_queues;
633 			*pid = pi;
634 		}
635 	}
636 	return allowed_max_txq;
637 }
638 
639 /*
640  * Check input txq is valid or not.
641  * If input txq is not greater than any of maximum number
642  * of TX queues of all ports, it is valid.
643  * if valid, return 0, else return -1
644  */
645 int
646 check_nb_txq(queueid_t txq)
647 {
648 	queueid_t allowed_max_txq;
649 	portid_t pid = 0;
650 
651 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
652 	if (txq > allowed_max_txq) {
653 		printf("Fail: input txq (%u) can't be greater "
654 		       "than max_tx_queues (%u) of port %u\n",
655 		       txq,
656 		       allowed_max_txq,
657 		       pid);
658 		return -1;
659 	}
660 	return 0;
661 }
662 
663 static void
664 init_config(void)
665 {
666 	portid_t pid;
667 	struct rte_port *port;
668 	struct rte_mempool *mbp;
669 	unsigned int nb_mbuf_per_pool;
670 	lcoreid_t  lc_id;
671 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
672 	struct rte_gro_param gro_param;
673 	uint32_t gso_types;
674 	int k;
675 
676 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
677 
678 	if (numa_support) {
679 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
680 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
681 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
682 	}
683 
684 	/* Configuration of logical cores. */
685 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
686 				sizeof(struct fwd_lcore *) * nb_lcores,
687 				RTE_CACHE_LINE_SIZE);
688 	if (fwd_lcores == NULL) {
689 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
690 							"failed\n", nb_lcores);
691 	}
692 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
693 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
694 					       sizeof(struct fwd_lcore),
695 					       RTE_CACHE_LINE_SIZE);
696 		if (fwd_lcores[lc_id] == NULL) {
697 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
698 								"failed\n");
699 		}
700 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
701 	}
702 
703 	RTE_ETH_FOREACH_DEV(pid) {
704 		port = &ports[pid];
705 		/* Apply default TxRx configuration for all ports */
706 		port->dev_conf.txmode = tx_mode;
707 		port->dev_conf.rxmode = rx_mode;
708 		rte_eth_dev_info_get(pid, &port->dev_info);
709 		if (!(port->dev_info.tx_offload_capa &
710 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
711 			port->dev_conf.txmode.offloads &=
712 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
713 		if (numa_support) {
714 			if (port_numa[pid] != NUMA_NO_CONFIG)
715 				port_per_socket[port_numa[pid]]++;
716 			else {
717 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
718 
719 				/* if socket_id is invalid, set to 0 */
720 				if (check_socket_id(socket_id) < 0)
721 					socket_id = 0;
722 				port_per_socket[socket_id]++;
723 			}
724 		}
725 
726 		/* Apply Rx offloads configuration */
727 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
728 			port->rx_conf[k].offloads =
729 				port->dev_conf.rxmode.offloads;
730 		/* Apply Tx offloads configuration */
731 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
732 			port->tx_conf[k].offloads =
733 				port->dev_conf.txmode.offloads;
734 
735 		/* set flag to initialize port/queue */
736 		port->need_reconfig = 1;
737 		port->need_reconfig_queues = 1;
738 	}
739 
740 	/*
741 	 * Create pools of mbuf.
742 	 * If NUMA support is disabled, create a single pool of mbuf in
743 	 * socket 0 memory by default.
744 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
745 	 *
746 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
747 	 * nb_txd can be configured at run time.
748 	 */
749 	if (param_total_num_mbufs)
750 		nb_mbuf_per_pool = param_total_num_mbufs;
751 	else {
752 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
753 			(nb_lcores * mb_mempool_cache) +
754 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
755 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
756 	}
757 
758 	if (numa_support) {
759 		uint8_t i;
760 
761 		for (i = 0; i < num_sockets; i++)
762 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
763 					 socket_ids[i]);
764 	} else {
765 		if (socket_num == UMA_NO_CONFIG)
766 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
767 		else
768 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
769 						 socket_num);
770 	}
771 
772 	init_port_config();
773 
774 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
775 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
776 	/*
777 	 * Records which Mbuf pool to use by each logical core, if needed.
778 	 */
779 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
780 		mbp = mbuf_pool_find(
781 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
782 
783 		if (mbp == NULL)
784 			mbp = mbuf_pool_find(0);
785 		fwd_lcores[lc_id]->mbp = mbp;
786 		/* initialize GSO context */
787 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
788 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
789 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
790 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
791 			ETHER_CRC_LEN;
792 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
793 	}
794 
795 	/* Configuration of packet forwarding streams. */
796 	if (init_fwd_streams() < 0)
797 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
798 
799 	fwd_config_setup();
800 
801 	/* create a gro context for each lcore */
802 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
803 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
804 	gro_param.max_item_per_flow = MAX_PKT_BURST;
805 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
806 		gro_param.socket_id = rte_lcore_to_socket_id(
807 				fwd_lcores_cpuids[lc_id]);
808 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
809 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
810 			rte_exit(EXIT_FAILURE,
811 					"rte_gro_ctx_create() failed\n");
812 		}
813 	}
814 }
815 
816 
817 void
818 reconfig(portid_t new_port_id, unsigned socket_id)
819 {
820 	struct rte_port *port;
821 
822 	/* Reconfiguration of Ethernet ports. */
823 	port = &ports[new_port_id];
824 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
825 
826 	/* set flag to initialize port/queue */
827 	port->need_reconfig = 1;
828 	port->need_reconfig_queues = 1;
829 	port->socket_id = socket_id;
830 
831 	init_port_config();
832 }
833 
834 
835 int
836 init_fwd_streams(void)
837 {
838 	portid_t pid;
839 	struct rte_port *port;
840 	streamid_t sm_id, nb_fwd_streams_new;
841 	queueid_t q;
842 
843 	/* set socket id according to numa or not */
844 	RTE_ETH_FOREACH_DEV(pid) {
845 		port = &ports[pid];
846 		if (nb_rxq > port->dev_info.max_rx_queues) {
847 			printf("Fail: nb_rxq(%d) is greater than "
848 				"max_rx_queues(%d)\n", nb_rxq,
849 				port->dev_info.max_rx_queues);
850 			return -1;
851 		}
852 		if (nb_txq > port->dev_info.max_tx_queues) {
853 			printf("Fail: nb_txq(%d) is greater than "
854 				"max_tx_queues(%d)\n", nb_txq,
855 				port->dev_info.max_tx_queues);
856 			return -1;
857 		}
858 		if (numa_support) {
859 			if (port_numa[pid] != NUMA_NO_CONFIG)
860 				port->socket_id = port_numa[pid];
861 			else {
862 				port->socket_id = rte_eth_dev_socket_id(pid);
863 
864 				/* if socket_id is invalid, set to 0 */
865 				if (check_socket_id(port->socket_id) < 0)
866 					port->socket_id = 0;
867 			}
868 		}
869 		else {
870 			if (socket_num == UMA_NO_CONFIG)
871 				port->socket_id = 0;
872 			else
873 				port->socket_id = socket_num;
874 		}
875 	}
876 
877 	q = RTE_MAX(nb_rxq, nb_txq);
878 	if (q == 0) {
879 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
880 		return -1;
881 	}
882 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
883 	if (nb_fwd_streams_new == nb_fwd_streams)
884 		return 0;
885 	/* clear the old */
886 	if (fwd_streams != NULL) {
887 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
888 			if (fwd_streams[sm_id] == NULL)
889 				continue;
890 			rte_free(fwd_streams[sm_id]);
891 			fwd_streams[sm_id] = NULL;
892 		}
893 		rte_free(fwd_streams);
894 		fwd_streams = NULL;
895 	}
896 
897 	/* init new */
898 	nb_fwd_streams = nb_fwd_streams_new;
899 	if (nb_fwd_streams) {
900 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
901 			sizeof(struct fwd_stream *) * nb_fwd_streams,
902 			RTE_CACHE_LINE_SIZE);
903 		if (fwd_streams == NULL)
904 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
905 				 " (struct fwd_stream *)) failed\n",
906 				 nb_fwd_streams);
907 
908 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
909 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
910 				" struct fwd_stream", sizeof(struct fwd_stream),
911 				RTE_CACHE_LINE_SIZE);
912 			if (fwd_streams[sm_id] == NULL)
913 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
914 					 "(struct fwd_stream) failed\n");
915 		}
916 	}
917 
918 	return 0;
919 }
920 
921 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
922 static void
923 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
924 {
925 	unsigned int total_burst;
926 	unsigned int nb_burst;
927 	unsigned int burst_stats[3];
928 	uint16_t pktnb_stats[3];
929 	uint16_t nb_pkt;
930 	int burst_percent[3];
931 
932 	/*
933 	 * First compute the total number of packet bursts and the
934 	 * two highest numbers of bursts of the same number of packets.
935 	 */
936 	total_burst = 0;
937 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
938 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
939 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
940 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
941 		if (nb_burst == 0)
942 			continue;
943 		total_burst += nb_burst;
944 		if (nb_burst > burst_stats[0]) {
945 			burst_stats[1] = burst_stats[0];
946 			pktnb_stats[1] = pktnb_stats[0];
947 			burst_stats[0] = nb_burst;
948 			pktnb_stats[0] = nb_pkt;
949 		}
950 	}
951 	if (total_burst == 0)
952 		return;
953 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
954 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
955 	       burst_percent[0], (int) pktnb_stats[0]);
956 	if (burst_stats[0] == total_burst) {
957 		printf("]\n");
958 		return;
959 	}
960 	if (burst_stats[0] + burst_stats[1] == total_burst) {
961 		printf(" + %d%% of %d pkts]\n",
962 		       100 - burst_percent[0], pktnb_stats[1]);
963 		return;
964 	}
965 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
966 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
967 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
968 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
969 		return;
970 	}
971 	printf(" + %d%% of %d pkts + %d%% of others]\n",
972 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
973 }
974 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
975 
976 static void
977 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
978 {
979 	struct rte_port *port;
980 	uint8_t i;
981 
982 	static const char *fwd_stats_border = "----------------------";
983 
984 	port = &ports[port_id];
985 	printf("\n  %s Forward statistics for port %-2d %s\n",
986 	       fwd_stats_border, port_id, fwd_stats_border);
987 
988 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
989 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
990 		       "%-"PRIu64"\n",
991 		       stats->ipackets, stats->imissed,
992 		       (uint64_t) (stats->ipackets + stats->imissed));
993 
994 		if (cur_fwd_eng == &csum_fwd_engine)
995 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
996 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
997 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
998 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
999 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1000 		}
1001 
1002 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1003 		       "%-"PRIu64"\n",
1004 		       stats->opackets, port->tx_dropped,
1005 		       (uint64_t) (stats->opackets + port->tx_dropped));
1006 	}
1007 	else {
1008 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1009 		       "%14"PRIu64"\n",
1010 		       stats->ipackets, stats->imissed,
1011 		       (uint64_t) (stats->ipackets + stats->imissed));
1012 
1013 		if (cur_fwd_eng == &csum_fwd_engine)
1014 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1015 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1016 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1017 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1018 			printf("  RX-nombufs:             %14"PRIu64"\n",
1019 			       stats->rx_nombuf);
1020 		}
1021 
1022 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1023 		       "%14"PRIu64"\n",
1024 		       stats->opackets, port->tx_dropped,
1025 		       (uint64_t) (stats->opackets + port->tx_dropped));
1026 	}
1027 
1028 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1029 	if (port->rx_stream)
1030 		pkt_burst_stats_display("RX",
1031 			&port->rx_stream->rx_burst_stats);
1032 	if (port->tx_stream)
1033 		pkt_burst_stats_display("TX",
1034 			&port->tx_stream->tx_burst_stats);
1035 #endif
1036 
1037 	if (port->rx_queue_stats_mapping_enabled) {
1038 		printf("\n");
1039 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1040 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1041 			       "     RX-errors:%14"PRIu64
1042 			       "    RX-bytes:%14"PRIu64"\n",
1043 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1044 		}
1045 		printf("\n");
1046 	}
1047 	if (port->tx_queue_stats_mapping_enabled) {
1048 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1049 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1050 			       "                                 TX-bytes:%14"PRIu64"\n",
1051 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1052 		}
1053 	}
1054 
1055 	printf("  %s--------------------------------%s\n",
1056 	       fwd_stats_border, fwd_stats_border);
1057 }
1058 
1059 static void
1060 fwd_stream_stats_display(streamid_t stream_id)
1061 {
1062 	struct fwd_stream *fs;
1063 	static const char *fwd_top_stats_border = "-------";
1064 
1065 	fs = fwd_streams[stream_id];
1066 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1067 	    (fs->fwd_dropped == 0))
1068 		return;
1069 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1070 	       "TX Port=%2d/Queue=%2d %s\n",
1071 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1072 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1073 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1074 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1075 
1076 	/* if checksum mode */
1077 	if (cur_fwd_eng == &csum_fwd_engine) {
1078 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1079 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1080 	}
1081 
1082 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1083 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1084 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1085 #endif
1086 }
1087 
1088 static void
1089 flush_fwd_rx_queues(void)
1090 {
1091 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1092 	portid_t  rxp;
1093 	portid_t port_id;
1094 	queueid_t rxq;
1095 	uint16_t  nb_rx;
1096 	uint16_t  i;
1097 	uint8_t   j;
1098 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1099 	uint64_t timer_period;
1100 
1101 	/* convert to number of cycles */
1102 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1103 
1104 	for (j = 0; j < 2; j++) {
1105 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1106 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1107 				port_id = fwd_ports_ids[rxp];
1108 				/**
1109 				* testpmd can stuck in the below do while loop
1110 				* if rte_eth_rx_burst() always returns nonzero
1111 				* packets. So timer is added to exit this loop
1112 				* after 1sec timer expiry.
1113 				*/
1114 				prev_tsc = rte_rdtsc();
1115 				do {
1116 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1117 						pkts_burst, MAX_PKT_BURST);
1118 					for (i = 0; i < nb_rx; i++)
1119 						rte_pktmbuf_free(pkts_burst[i]);
1120 
1121 					cur_tsc = rte_rdtsc();
1122 					diff_tsc = cur_tsc - prev_tsc;
1123 					timer_tsc += diff_tsc;
1124 				} while ((nb_rx > 0) &&
1125 					(timer_tsc < timer_period));
1126 				timer_tsc = 0;
1127 			}
1128 		}
1129 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1130 	}
1131 }
1132 
1133 static void
1134 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1135 {
1136 	struct fwd_stream **fsm;
1137 	streamid_t nb_fs;
1138 	streamid_t sm_id;
1139 #ifdef RTE_LIBRTE_BITRATE
1140 	uint64_t tics_per_1sec;
1141 	uint64_t tics_datum;
1142 	uint64_t tics_current;
1143 	uint16_t idx_port;
1144 
1145 	tics_datum = rte_rdtsc();
1146 	tics_per_1sec = rte_get_timer_hz();
1147 #endif
1148 	fsm = &fwd_streams[fc->stream_idx];
1149 	nb_fs = fc->stream_nb;
1150 	do {
1151 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1152 			(*pkt_fwd)(fsm[sm_id]);
1153 #ifdef RTE_LIBRTE_BITRATE
1154 		if (bitrate_enabled != 0 &&
1155 				bitrate_lcore_id == rte_lcore_id()) {
1156 			tics_current = rte_rdtsc();
1157 			if (tics_current - tics_datum >= tics_per_1sec) {
1158 				/* Periodic bitrate calculation */
1159 				RTE_ETH_FOREACH_DEV(idx_port)
1160 					rte_stats_bitrate_calc(bitrate_data,
1161 						idx_port);
1162 				tics_datum = tics_current;
1163 			}
1164 		}
1165 #endif
1166 #ifdef RTE_LIBRTE_LATENCY_STATS
1167 		if (latencystats_enabled != 0 &&
1168 				latencystats_lcore_id == rte_lcore_id())
1169 			rte_latencystats_update();
1170 #endif
1171 
1172 	} while (! fc->stopped);
1173 }
1174 
1175 static int
1176 start_pkt_forward_on_core(void *fwd_arg)
1177 {
1178 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1179 			     cur_fwd_config.fwd_eng->packet_fwd);
1180 	return 0;
1181 }
1182 
1183 /*
1184  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1185  * Used to start communication flows in network loopback test configurations.
1186  */
1187 static int
1188 run_one_txonly_burst_on_core(void *fwd_arg)
1189 {
1190 	struct fwd_lcore *fwd_lc;
1191 	struct fwd_lcore tmp_lcore;
1192 
1193 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1194 	tmp_lcore = *fwd_lc;
1195 	tmp_lcore.stopped = 1;
1196 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1197 	return 0;
1198 }
1199 
1200 /*
1201  * Launch packet forwarding:
1202  *     - Setup per-port forwarding context.
1203  *     - launch logical cores with their forwarding configuration.
1204  */
1205 static void
1206 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1207 {
1208 	port_fwd_begin_t port_fwd_begin;
1209 	unsigned int i;
1210 	unsigned int lc_id;
1211 	int diag;
1212 
1213 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1214 	if (port_fwd_begin != NULL) {
1215 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1216 			(*port_fwd_begin)(fwd_ports_ids[i]);
1217 	}
1218 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1219 		lc_id = fwd_lcores_cpuids[i];
1220 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1221 			fwd_lcores[i]->stopped = 0;
1222 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1223 						     fwd_lcores[i], lc_id);
1224 			if (diag != 0)
1225 				printf("launch lcore %u failed - diag=%d\n",
1226 				       lc_id, diag);
1227 		}
1228 	}
1229 }
1230 
1231 /*
1232  * Update the forward ports list.
1233  */
1234 void
1235 update_fwd_ports(portid_t new_pid)
1236 {
1237 	unsigned int i;
1238 	unsigned int new_nb_fwd_ports = 0;
1239 	int move = 0;
1240 
1241 	for (i = 0; i < nb_fwd_ports; ++i) {
1242 		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1243 			move = 1;
1244 		else if (move)
1245 			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1246 		else
1247 			new_nb_fwd_ports++;
1248 	}
1249 	if (new_pid < RTE_MAX_ETHPORTS)
1250 		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1251 
1252 	nb_fwd_ports = new_nb_fwd_ports;
1253 	nb_cfg_ports = new_nb_fwd_ports;
1254 }
1255 
1256 /*
1257  * Launch packet forwarding configuration.
1258  */
1259 void
1260 start_packet_forwarding(int with_tx_first)
1261 {
1262 	port_fwd_begin_t port_fwd_begin;
1263 	port_fwd_end_t  port_fwd_end;
1264 	struct rte_port *port;
1265 	unsigned int i;
1266 	portid_t   pt_id;
1267 	streamid_t sm_id;
1268 
1269 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1270 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1271 
1272 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1273 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1274 
1275 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1276 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1277 		(!nb_rxq || !nb_txq))
1278 		rte_exit(EXIT_FAILURE,
1279 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1280 			cur_fwd_eng->fwd_mode_name);
1281 
1282 	if (all_ports_started() == 0) {
1283 		printf("Not all ports were started\n");
1284 		return;
1285 	}
1286 	if (test_done == 0) {
1287 		printf("Packet forwarding already started\n");
1288 		return;
1289 	}
1290 
1291 
1292 	if(dcb_test) {
1293 		for (i = 0; i < nb_fwd_ports; i++) {
1294 			pt_id = fwd_ports_ids[i];
1295 			port = &ports[pt_id];
1296 			if (!port->dcb_flag) {
1297 				printf("In DCB mode, all forwarding ports must "
1298                                        "be configured in this mode.\n");
1299 				return;
1300 			}
1301 		}
1302 		if (nb_fwd_lcores == 1) {
1303 			printf("In DCB mode,the nb forwarding cores "
1304                                "should be larger than 1.\n");
1305 			return;
1306 		}
1307 	}
1308 	test_done = 0;
1309 
1310 	fwd_config_setup();
1311 
1312 	if(!no_flush_rx)
1313 		flush_fwd_rx_queues();
1314 
1315 	pkt_fwd_config_display(&cur_fwd_config);
1316 	rxtx_config_display();
1317 
1318 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1319 		pt_id = fwd_ports_ids[i];
1320 		port = &ports[pt_id];
1321 		rte_eth_stats_get(pt_id, &port->stats);
1322 		port->tx_dropped = 0;
1323 
1324 		map_port_queue_stats_mapping_registers(pt_id, port);
1325 	}
1326 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1327 		fwd_streams[sm_id]->rx_packets = 0;
1328 		fwd_streams[sm_id]->tx_packets = 0;
1329 		fwd_streams[sm_id]->fwd_dropped = 0;
1330 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1331 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1332 
1333 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1334 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1335 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1336 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1337 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1338 #endif
1339 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1340 		fwd_streams[sm_id]->core_cycles = 0;
1341 #endif
1342 	}
1343 	if (with_tx_first) {
1344 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1345 		if (port_fwd_begin != NULL) {
1346 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1347 				(*port_fwd_begin)(fwd_ports_ids[i]);
1348 		}
1349 		while (with_tx_first--) {
1350 			launch_packet_forwarding(
1351 					run_one_txonly_burst_on_core);
1352 			rte_eal_mp_wait_lcore();
1353 		}
1354 		port_fwd_end = tx_only_engine.port_fwd_end;
1355 		if (port_fwd_end != NULL) {
1356 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1357 				(*port_fwd_end)(fwd_ports_ids[i]);
1358 		}
1359 	}
1360 	launch_packet_forwarding(start_pkt_forward_on_core);
1361 }
1362 
1363 void
1364 stop_packet_forwarding(void)
1365 {
1366 	struct rte_eth_stats stats;
1367 	struct rte_port *port;
1368 	port_fwd_end_t  port_fwd_end;
1369 	int i;
1370 	portid_t   pt_id;
1371 	streamid_t sm_id;
1372 	lcoreid_t  lc_id;
1373 	uint64_t total_recv;
1374 	uint64_t total_xmit;
1375 	uint64_t total_rx_dropped;
1376 	uint64_t total_tx_dropped;
1377 	uint64_t total_rx_nombuf;
1378 	uint64_t tx_dropped;
1379 	uint64_t rx_bad_ip_csum;
1380 	uint64_t rx_bad_l4_csum;
1381 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1382 	uint64_t fwd_cycles;
1383 #endif
1384 
1385 	static const char *acc_stats_border = "+++++++++++++++";
1386 
1387 	if (test_done) {
1388 		printf("Packet forwarding not started\n");
1389 		return;
1390 	}
1391 	printf("Telling cores to stop...");
1392 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1393 		fwd_lcores[lc_id]->stopped = 1;
1394 	printf("\nWaiting for lcores to finish...\n");
1395 	rte_eal_mp_wait_lcore();
1396 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1397 	if (port_fwd_end != NULL) {
1398 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1399 			pt_id = fwd_ports_ids[i];
1400 			(*port_fwd_end)(pt_id);
1401 		}
1402 	}
1403 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1404 	fwd_cycles = 0;
1405 #endif
1406 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1407 		if (cur_fwd_config.nb_fwd_streams >
1408 		    cur_fwd_config.nb_fwd_ports) {
1409 			fwd_stream_stats_display(sm_id);
1410 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1411 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1412 		} else {
1413 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1414 				fwd_streams[sm_id];
1415 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1416 				fwd_streams[sm_id];
1417 		}
1418 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1419 		tx_dropped = (uint64_t) (tx_dropped +
1420 					 fwd_streams[sm_id]->fwd_dropped);
1421 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1422 
1423 		rx_bad_ip_csum =
1424 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1425 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1426 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1427 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1428 							rx_bad_ip_csum;
1429 
1430 		rx_bad_l4_csum =
1431 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1432 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1433 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1434 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1435 							rx_bad_l4_csum;
1436 
1437 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1438 		fwd_cycles = (uint64_t) (fwd_cycles +
1439 					 fwd_streams[sm_id]->core_cycles);
1440 #endif
1441 	}
1442 	total_recv = 0;
1443 	total_xmit = 0;
1444 	total_rx_dropped = 0;
1445 	total_tx_dropped = 0;
1446 	total_rx_nombuf  = 0;
1447 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1448 		pt_id = fwd_ports_ids[i];
1449 
1450 		port = &ports[pt_id];
1451 		rte_eth_stats_get(pt_id, &stats);
1452 		stats.ipackets -= port->stats.ipackets;
1453 		port->stats.ipackets = 0;
1454 		stats.opackets -= port->stats.opackets;
1455 		port->stats.opackets = 0;
1456 		stats.ibytes   -= port->stats.ibytes;
1457 		port->stats.ibytes = 0;
1458 		stats.obytes   -= port->stats.obytes;
1459 		port->stats.obytes = 0;
1460 		stats.imissed  -= port->stats.imissed;
1461 		port->stats.imissed = 0;
1462 		stats.oerrors  -= port->stats.oerrors;
1463 		port->stats.oerrors = 0;
1464 		stats.rx_nombuf -= port->stats.rx_nombuf;
1465 		port->stats.rx_nombuf = 0;
1466 
1467 		total_recv += stats.ipackets;
1468 		total_xmit += stats.opackets;
1469 		total_rx_dropped += stats.imissed;
1470 		total_tx_dropped += port->tx_dropped;
1471 		total_rx_nombuf  += stats.rx_nombuf;
1472 
1473 		fwd_port_stats_display(pt_id, &stats);
1474 	}
1475 
1476 	printf("\n  %s Accumulated forward statistics for all ports"
1477 	       "%s\n",
1478 	       acc_stats_border, acc_stats_border);
1479 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1480 	       "%-"PRIu64"\n"
1481 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1482 	       "%-"PRIu64"\n",
1483 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1484 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1485 	if (total_rx_nombuf > 0)
1486 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1487 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1488 	       "%s\n",
1489 	       acc_stats_border, acc_stats_border);
1490 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1491 	if (total_recv > 0)
1492 		printf("\n  CPU cycles/packet=%u (total cycles="
1493 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1494 		       (unsigned int)(fwd_cycles / total_recv),
1495 		       fwd_cycles, total_recv);
1496 #endif
1497 	printf("\nDone.\n");
1498 	test_done = 1;
1499 }
1500 
1501 void
1502 dev_set_link_up(portid_t pid)
1503 {
1504 	if (rte_eth_dev_set_link_up(pid) < 0)
1505 		printf("\nSet link up fail.\n");
1506 }
1507 
1508 void
1509 dev_set_link_down(portid_t pid)
1510 {
1511 	if (rte_eth_dev_set_link_down(pid) < 0)
1512 		printf("\nSet link down fail.\n");
1513 }
1514 
1515 static int
1516 all_ports_started(void)
1517 {
1518 	portid_t pi;
1519 	struct rte_port *port;
1520 
1521 	RTE_ETH_FOREACH_DEV(pi) {
1522 		port = &ports[pi];
1523 		/* Check if there is a port which is not started */
1524 		if ((port->port_status != RTE_PORT_STARTED) &&
1525 			(port->slave_flag == 0))
1526 			return 0;
1527 	}
1528 
1529 	/* No port is not started */
1530 	return 1;
1531 }
1532 
1533 int
1534 port_is_stopped(portid_t port_id)
1535 {
1536 	struct rte_port *port = &ports[port_id];
1537 
1538 	if ((port->port_status != RTE_PORT_STOPPED) &&
1539 	    (port->slave_flag == 0))
1540 		return 0;
1541 	return 1;
1542 }
1543 
1544 int
1545 all_ports_stopped(void)
1546 {
1547 	portid_t pi;
1548 
1549 	RTE_ETH_FOREACH_DEV(pi) {
1550 		if (!port_is_stopped(pi))
1551 			return 0;
1552 	}
1553 
1554 	return 1;
1555 }
1556 
1557 int
1558 port_is_started(portid_t port_id)
1559 {
1560 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1561 		return 0;
1562 
1563 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1564 		return 0;
1565 
1566 	return 1;
1567 }
1568 
1569 static int
1570 port_is_closed(portid_t port_id)
1571 {
1572 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1573 		return 0;
1574 
1575 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1576 		return 0;
1577 
1578 	return 1;
1579 }
1580 
1581 int
1582 start_port(portid_t pid)
1583 {
1584 	int diag, need_check_link_status = -1;
1585 	portid_t pi;
1586 	queueid_t qi;
1587 	struct rte_port *port;
1588 	struct ether_addr mac_addr;
1589 	enum rte_eth_event_type event_type;
1590 
1591 	if (port_id_is_invalid(pid, ENABLED_WARN))
1592 		return 0;
1593 
1594 	if(dcb_config)
1595 		dcb_test = 1;
1596 	RTE_ETH_FOREACH_DEV(pi) {
1597 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1598 			continue;
1599 
1600 		need_check_link_status = 0;
1601 		port = &ports[pi];
1602 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1603 						 RTE_PORT_HANDLING) == 0) {
1604 			printf("Port %d is now not stopped\n", pi);
1605 			continue;
1606 		}
1607 
1608 		if (port->need_reconfig > 0) {
1609 			port->need_reconfig = 0;
1610 
1611 			if (flow_isolate_all) {
1612 				int ret = port_flow_isolate(pi, 1);
1613 				if (ret) {
1614 					printf("Failed to apply isolated"
1615 					       " mode on port %d\n", pi);
1616 					return -1;
1617 				}
1618 			}
1619 
1620 			printf("Configuring Port %d (socket %u)\n", pi,
1621 					port->socket_id);
1622 			/* configure port */
1623 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1624 						&(port->dev_conf));
1625 			if (diag != 0) {
1626 				if (rte_atomic16_cmpset(&(port->port_status),
1627 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1628 					printf("Port %d can not be set back "
1629 							"to stopped\n", pi);
1630 				printf("Fail to configure port %d\n", pi);
1631 				/* try to reconfigure port next time */
1632 				port->need_reconfig = 1;
1633 				return -1;
1634 			}
1635 		}
1636 		if (port->need_reconfig_queues > 0) {
1637 			port->need_reconfig_queues = 0;
1638 			/* setup tx queues */
1639 			for (qi = 0; qi < nb_txq; qi++) {
1640 				port->tx_conf[qi].txq_flags =
1641 					ETH_TXQ_FLAGS_IGNORE;
1642 				if ((numa_support) &&
1643 					(txring_numa[pi] != NUMA_NO_CONFIG))
1644 					diag = rte_eth_tx_queue_setup(pi, qi,
1645 						port->nb_tx_desc[qi],
1646 						txring_numa[pi],
1647 						&(port->tx_conf[qi]));
1648 				else
1649 					diag = rte_eth_tx_queue_setup(pi, qi,
1650 						port->nb_tx_desc[qi],
1651 						port->socket_id,
1652 						&(port->tx_conf[qi]));
1653 
1654 				if (diag == 0)
1655 					continue;
1656 
1657 				/* Fail to setup tx queue, return */
1658 				if (rte_atomic16_cmpset(&(port->port_status),
1659 							RTE_PORT_HANDLING,
1660 							RTE_PORT_STOPPED) == 0)
1661 					printf("Port %d can not be set back "
1662 							"to stopped\n", pi);
1663 				printf("Fail to configure port %d tx queues\n",
1664 				       pi);
1665 				/* try to reconfigure queues next time */
1666 				port->need_reconfig_queues = 1;
1667 				return -1;
1668 			}
1669 			for (qi = 0; qi < nb_rxq; qi++) {
1670 				/* setup rx queues */
1671 				if ((numa_support) &&
1672 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1673 					struct rte_mempool * mp =
1674 						mbuf_pool_find(rxring_numa[pi]);
1675 					if (mp == NULL) {
1676 						printf("Failed to setup RX queue:"
1677 							"No mempool allocation"
1678 							" on the socket %d\n",
1679 							rxring_numa[pi]);
1680 						return -1;
1681 					}
1682 
1683 					diag = rte_eth_rx_queue_setup(pi, qi,
1684 					     port->nb_rx_desc[pi],
1685 					     rxring_numa[pi],
1686 					     &(port->rx_conf[qi]),
1687 					     mp);
1688 				} else {
1689 					struct rte_mempool *mp =
1690 						mbuf_pool_find(port->socket_id);
1691 					if (mp == NULL) {
1692 						printf("Failed to setup RX queue:"
1693 							"No mempool allocation"
1694 							" on the socket %d\n",
1695 							port->socket_id);
1696 						return -1;
1697 					}
1698 					diag = rte_eth_rx_queue_setup(pi, qi,
1699 					     port->nb_rx_desc[pi],
1700 					     port->socket_id,
1701 					     &(port->rx_conf[qi]),
1702 					     mp);
1703 				}
1704 				if (diag == 0)
1705 					continue;
1706 
1707 				/* Fail to setup rx queue, return */
1708 				if (rte_atomic16_cmpset(&(port->port_status),
1709 							RTE_PORT_HANDLING,
1710 							RTE_PORT_STOPPED) == 0)
1711 					printf("Port %d can not be set back "
1712 							"to stopped\n", pi);
1713 				printf("Fail to configure port %d rx queues\n",
1714 				       pi);
1715 				/* try to reconfigure queues next time */
1716 				port->need_reconfig_queues = 1;
1717 				return -1;
1718 			}
1719 		}
1720 
1721 		/* start port */
1722 		if (rte_eth_dev_start(pi) < 0) {
1723 			printf("Fail to start port %d\n", pi);
1724 
1725 			/* Fail to setup rx queue, return */
1726 			if (rte_atomic16_cmpset(&(port->port_status),
1727 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1728 				printf("Port %d can not be set back to "
1729 							"stopped\n", pi);
1730 			continue;
1731 		}
1732 
1733 		if (rte_atomic16_cmpset(&(port->port_status),
1734 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1735 			printf("Port %d can not be set into started\n", pi);
1736 
1737 		rte_eth_macaddr_get(pi, &mac_addr);
1738 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1739 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1740 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1741 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1742 
1743 		/* at least one port started, need checking link status */
1744 		need_check_link_status = 1;
1745 	}
1746 
1747 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1748 	     event_type < RTE_ETH_EVENT_MAX;
1749 	     event_type++) {
1750 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1751 						event_type,
1752 						eth_event_callback,
1753 						NULL);
1754 		if (diag) {
1755 			printf("Failed to setup even callback for event %d\n",
1756 				event_type);
1757 			return -1;
1758 		}
1759 	}
1760 
1761 	if (need_check_link_status == 1 && !no_link_check)
1762 		check_all_ports_link_status(RTE_PORT_ALL);
1763 	else if (need_check_link_status == 0)
1764 		printf("Please stop the ports first\n");
1765 
1766 	printf("Done\n");
1767 	return 0;
1768 }
1769 
1770 void
1771 stop_port(portid_t pid)
1772 {
1773 	portid_t pi;
1774 	struct rte_port *port;
1775 	int need_check_link_status = 0;
1776 
1777 	if (dcb_test) {
1778 		dcb_test = 0;
1779 		dcb_config = 0;
1780 	}
1781 
1782 	if (port_id_is_invalid(pid, ENABLED_WARN))
1783 		return;
1784 
1785 	printf("Stopping ports...\n");
1786 
1787 	RTE_ETH_FOREACH_DEV(pi) {
1788 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1789 			continue;
1790 
1791 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1792 			printf("Please remove port %d from forwarding configuration.\n", pi);
1793 			continue;
1794 		}
1795 
1796 		if (port_is_bonding_slave(pi)) {
1797 			printf("Please remove port %d from bonded device.\n", pi);
1798 			continue;
1799 		}
1800 
1801 		port = &ports[pi];
1802 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1803 						RTE_PORT_HANDLING) == 0)
1804 			continue;
1805 
1806 		rte_eth_dev_stop(pi);
1807 
1808 		if (rte_atomic16_cmpset(&(port->port_status),
1809 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1810 			printf("Port %d can not be set into stopped\n", pi);
1811 		need_check_link_status = 1;
1812 	}
1813 	if (need_check_link_status && !no_link_check)
1814 		check_all_ports_link_status(RTE_PORT_ALL);
1815 
1816 	printf("Done\n");
1817 }
1818 
1819 void
1820 close_port(portid_t pid)
1821 {
1822 	portid_t pi;
1823 	struct rte_port *port;
1824 
1825 	if (port_id_is_invalid(pid, ENABLED_WARN))
1826 		return;
1827 
1828 	printf("Closing ports...\n");
1829 
1830 	RTE_ETH_FOREACH_DEV(pi) {
1831 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1832 			continue;
1833 
1834 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1835 			printf("Please remove port %d from forwarding configuration.\n", pi);
1836 			continue;
1837 		}
1838 
1839 		if (port_is_bonding_slave(pi)) {
1840 			printf("Please remove port %d from bonded device.\n", pi);
1841 			continue;
1842 		}
1843 
1844 		port = &ports[pi];
1845 		if (rte_atomic16_cmpset(&(port->port_status),
1846 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1847 			printf("Port %d is already closed\n", pi);
1848 			continue;
1849 		}
1850 
1851 		if (rte_atomic16_cmpset(&(port->port_status),
1852 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1853 			printf("Port %d is now not stopped\n", pi);
1854 			continue;
1855 		}
1856 
1857 		if (port->flow_list)
1858 			port_flow_flush(pi);
1859 		rte_eth_dev_close(pi);
1860 
1861 		if (rte_atomic16_cmpset(&(port->port_status),
1862 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1863 			printf("Port %d cannot be set to closed\n", pi);
1864 	}
1865 
1866 	printf("Done\n");
1867 }
1868 
1869 void
1870 reset_port(portid_t pid)
1871 {
1872 	int diag;
1873 	portid_t pi;
1874 	struct rte_port *port;
1875 
1876 	if (port_id_is_invalid(pid, ENABLED_WARN))
1877 		return;
1878 
1879 	printf("Resetting ports...\n");
1880 
1881 	RTE_ETH_FOREACH_DEV(pi) {
1882 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1883 			continue;
1884 
1885 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1886 			printf("Please remove port %d from forwarding "
1887 			       "configuration.\n", pi);
1888 			continue;
1889 		}
1890 
1891 		if (port_is_bonding_slave(pi)) {
1892 			printf("Please remove port %d from bonded device.\n",
1893 			       pi);
1894 			continue;
1895 		}
1896 
1897 		diag = rte_eth_dev_reset(pi);
1898 		if (diag == 0) {
1899 			port = &ports[pi];
1900 			port->need_reconfig = 1;
1901 			port->need_reconfig_queues = 1;
1902 		} else {
1903 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1904 		}
1905 	}
1906 
1907 	printf("Done\n");
1908 }
1909 
1910 static int
1911 eth_dev_event_callback_register(void)
1912 {
1913 	int ret;
1914 
1915 	/* register the device event callback */
1916 	ret = rte_dev_event_callback_register(NULL,
1917 		eth_dev_event_callback, NULL);
1918 	if (ret) {
1919 		printf("Failed to register device event callback\n");
1920 		return -1;
1921 	}
1922 
1923 	return 0;
1924 }
1925 
1926 
1927 static int
1928 eth_dev_event_callback_unregister(void)
1929 {
1930 	int ret;
1931 
1932 	/* unregister the device event callback */
1933 	ret = rte_dev_event_callback_unregister(NULL,
1934 		eth_dev_event_callback, NULL);
1935 	if (ret < 0) {
1936 		printf("Failed to unregister device event callback\n");
1937 		return -1;
1938 	}
1939 
1940 	return 0;
1941 }
1942 
1943 void
1944 attach_port(char *identifier)
1945 {
1946 	portid_t pi = 0;
1947 	unsigned int socket_id;
1948 
1949 	printf("Attaching a new port...\n");
1950 
1951 	if (identifier == NULL) {
1952 		printf("Invalid parameters are specified\n");
1953 		return;
1954 	}
1955 
1956 	if (rte_eth_dev_attach(identifier, &pi))
1957 		return;
1958 
1959 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1960 	/* if socket_id is invalid, set to 0 */
1961 	if (check_socket_id(socket_id) < 0)
1962 		socket_id = 0;
1963 	reconfig(pi, socket_id);
1964 	rte_eth_promiscuous_enable(pi);
1965 
1966 	nb_ports = rte_eth_dev_count_avail();
1967 
1968 	ports[pi].port_status = RTE_PORT_STOPPED;
1969 
1970 	update_fwd_ports(pi);
1971 
1972 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1973 	printf("Done\n");
1974 }
1975 
1976 void
1977 detach_port(portid_t port_id)
1978 {
1979 	char name[RTE_ETH_NAME_MAX_LEN];
1980 
1981 	printf("Detaching a port...\n");
1982 
1983 	if (!port_is_closed(port_id)) {
1984 		printf("Please close port first\n");
1985 		return;
1986 	}
1987 
1988 	if (ports[port_id].flow_list)
1989 		port_flow_flush(port_id);
1990 
1991 	if (rte_eth_dev_detach(port_id, name)) {
1992 		TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
1993 		return;
1994 	}
1995 
1996 	nb_ports = rte_eth_dev_count_avail();
1997 
1998 	update_fwd_ports(RTE_MAX_ETHPORTS);
1999 
2000 	printf("Port %u is detached. Now total ports is %d\n",
2001 			port_id, nb_ports);
2002 	printf("Done\n");
2003 	return;
2004 }
2005 
2006 void
2007 pmd_test_exit(void)
2008 {
2009 	portid_t pt_id;
2010 	int ret;
2011 
2012 	if (test_done == 0)
2013 		stop_packet_forwarding();
2014 
2015 	if (ports != NULL) {
2016 		no_link_check = 1;
2017 		RTE_ETH_FOREACH_DEV(pt_id) {
2018 			printf("\nShutting down port %d...\n", pt_id);
2019 			fflush(stdout);
2020 			stop_port(pt_id);
2021 			close_port(pt_id);
2022 		}
2023 	}
2024 
2025 	if (hot_plug) {
2026 		ret = rte_dev_event_monitor_stop();
2027 		if (ret)
2028 			RTE_LOG(ERR, EAL,
2029 				"fail to stop device event monitor.");
2030 
2031 		ret = eth_dev_event_callback_unregister();
2032 		if (ret)
2033 			RTE_LOG(ERR, EAL,
2034 				"fail to unregister all event callbacks.");
2035 	}
2036 
2037 	printf("\nBye...\n");
2038 }
2039 
2040 typedef void (*cmd_func_t)(void);
2041 struct pmd_test_command {
2042 	const char *cmd_name;
2043 	cmd_func_t cmd_func;
2044 };
2045 
2046 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2047 
2048 /* Check the link status of all ports in up to 9s, and print them finally */
2049 static void
2050 check_all_ports_link_status(uint32_t port_mask)
2051 {
2052 #define CHECK_INTERVAL 100 /* 100ms */
2053 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2054 	portid_t portid;
2055 	uint8_t count, all_ports_up, print_flag = 0;
2056 	struct rte_eth_link link;
2057 
2058 	printf("Checking link statuses...\n");
2059 	fflush(stdout);
2060 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2061 		all_ports_up = 1;
2062 		RTE_ETH_FOREACH_DEV(portid) {
2063 			if ((port_mask & (1 << portid)) == 0)
2064 				continue;
2065 			memset(&link, 0, sizeof(link));
2066 			rte_eth_link_get_nowait(portid, &link);
2067 			/* print link status if flag set */
2068 			if (print_flag == 1) {
2069 				if (link.link_status)
2070 					printf(
2071 					"Port%d Link Up. speed %u Mbps- %s\n",
2072 					portid, link.link_speed,
2073 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2074 					("full-duplex") : ("half-duplex\n"));
2075 				else
2076 					printf("Port %d Link Down\n", portid);
2077 				continue;
2078 			}
2079 			/* clear all_ports_up flag if any link down */
2080 			if (link.link_status == ETH_LINK_DOWN) {
2081 				all_ports_up = 0;
2082 				break;
2083 			}
2084 		}
2085 		/* after finally printing all link status, get out */
2086 		if (print_flag == 1)
2087 			break;
2088 
2089 		if (all_ports_up == 0) {
2090 			fflush(stdout);
2091 			rte_delay_ms(CHECK_INTERVAL);
2092 		}
2093 
2094 		/* set the print_flag if all ports up or timeout */
2095 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2096 			print_flag = 1;
2097 		}
2098 
2099 		if (lsc_interrupt)
2100 			break;
2101 	}
2102 }
2103 
2104 static void
2105 rmv_event_callback(void *arg)
2106 {
2107 	int need_to_start = 0;
2108 	int org_no_link_check = no_link_check;
2109 	portid_t port_id = (intptr_t)arg;
2110 
2111 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2112 
2113 	if (!test_done && port_is_forwarding(port_id)) {
2114 		need_to_start = 1;
2115 		stop_packet_forwarding();
2116 	}
2117 	no_link_check = 1;
2118 	stop_port(port_id);
2119 	no_link_check = org_no_link_check;
2120 	close_port(port_id);
2121 	detach_port(port_id);
2122 	if (need_to_start)
2123 		start_packet_forwarding(0);
2124 }
2125 
2126 /* This function is used by the interrupt thread */
2127 static int
2128 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2129 		  void *ret_param)
2130 {
2131 	static const char * const event_desc[] = {
2132 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2133 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2134 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2135 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2136 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2137 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
2138 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2139 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2140 		[RTE_ETH_EVENT_NEW] = "device probed",
2141 		[RTE_ETH_EVENT_DESTROY] = "device released",
2142 		[RTE_ETH_EVENT_MAX] = NULL,
2143 	};
2144 
2145 	RTE_SET_USED(param);
2146 	RTE_SET_USED(ret_param);
2147 
2148 	if (type >= RTE_ETH_EVENT_MAX) {
2149 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2150 			port_id, __func__, type);
2151 		fflush(stderr);
2152 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2153 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2154 			event_desc[type]);
2155 		fflush(stdout);
2156 	}
2157 
2158 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2159 		return 0;
2160 
2161 	switch (type) {
2162 	case RTE_ETH_EVENT_INTR_RMV:
2163 		if (rte_eal_alarm_set(100000,
2164 				rmv_event_callback, (void *)(intptr_t)port_id))
2165 			fprintf(stderr, "Could not set up deferred device removal\n");
2166 		break;
2167 	default:
2168 		break;
2169 	}
2170 	return 0;
2171 }
2172 
2173 /* This function is used by the interrupt thread */
2174 static void
2175 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2176 			     __rte_unused void *arg)
2177 {
2178 	if (type >= RTE_DEV_EVENT_MAX) {
2179 		fprintf(stderr, "%s called upon invalid event %d\n",
2180 			__func__, type);
2181 		fflush(stderr);
2182 	}
2183 
2184 	switch (type) {
2185 	case RTE_DEV_EVENT_REMOVE:
2186 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2187 			device_name);
2188 		/* TODO: After finish failure handle, begin to stop
2189 		 * packet forward, stop port, close port, detach port.
2190 		 */
2191 		break;
2192 	case RTE_DEV_EVENT_ADD:
2193 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2194 			device_name);
2195 		/* TODO: After finish kernel driver binding,
2196 		 * begin to attach port.
2197 		 */
2198 		break;
2199 	default:
2200 		break;
2201 	}
2202 }
2203 
2204 static int
2205 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2206 {
2207 	uint16_t i;
2208 	int diag;
2209 	uint8_t mapping_found = 0;
2210 
2211 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2212 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2213 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2214 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2215 					tx_queue_stats_mappings[i].queue_id,
2216 					tx_queue_stats_mappings[i].stats_counter_id);
2217 			if (diag != 0)
2218 				return diag;
2219 			mapping_found = 1;
2220 		}
2221 	}
2222 	if (mapping_found)
2223 		port->tx_queue_stats_mapping_enabled = 1;
2224 	return 0;
2225 }
2226 
2227 static int
2228 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2229 {
2230 	uint16_t i;
2231 	int diag;
2232 	uint8_t mapping_found = 0;
2233 
2234 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2235 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2236 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2237 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2238 					rx_queue_stats_mappings[i].queue_id,
2239 					rx_queue_stats_mappings[i].stats_counter_id);
2240 			if (diag != 0)
2241 				return diag;
2242 			mapping_found = 1;
2243 		}
2244 	}
2245 	if (mapping_found)
2246 		port->rx_queue_stats_mapping_enabled = 1;
2247 	return 0;
2248 }
2249 
2250 static void
2251 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2252 {
2253 	int diag = 0;
2254 
2255 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2256 	if (diag != 0) {
2257 		if (diag == -ENOTSUP) {
2258 			port->tx_queue_stats_mapping_enabled = 0;
2259 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2260 		}
2261 		else
2262 			rte_exit(EXIT_FAILURE,
2263 					"set_tx_queue_stats_mapping_registers "
2264 					"failed for port id=%d diag=%d\n",
2265 					pi, diag);
2266 	}
2267 
2268 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2269 	if (diag != 0) {
2270 		if (diag == -ENOTSUP) {
2271 			port->rx_queue_stats_mapping_enabled = 0;
2272 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2273 		}
2274 		else
2275 			rte_exit(EXIT_FAILURE,
2276 					"set_rx_queue_stats_mapping_registers "
2277 					"failed for port id=%d diag=%d\n",
2278 					pi, diag);
2279 	}
2280 }
2281 
2282 static void
2283 rxtx_port_config(struct rte_port *port)
2284 {
2285 	uint16_t qid;
2286 
2287 	for (qid = 0; qid < nb_rxq; qid++) {
2288 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2289 
2290 		/* Check if any Rx parameters have been passed */
2291 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2292 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2293 
2294 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2295 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2296 
2297 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2298 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2299 
2300 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2301 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2302 
2303 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2304 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2305 
2306 		port->nb_rx_desc[qid] = nb_rxd;
2307 	}
2308 
2309 	for (qid = 0; qid < nb_txq; qid++) {
2310 		port->tx_conf[qid] = port->dev_info.default_txconf;
2311 
2312 		/* Check if any Tx parameters have been passed */
2313 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2314 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2315 
2316 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2317 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2318 
2319 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2320 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2321 
2322 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2323 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2324 
2325 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2326 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2327 
2328 		port->nb_tx_desc[qid] = nb_txd;
2329 	}
2330 }
2331 
2332 void
2333 init_port_config(void)
2334 {
2335 	portid_t pid;
2336 	struct rte_port *port;
2337 	struct rte_eth_dev_info dev_info;
2338 
2339 	RTE_ETH_FOREACH_DEV(pid) {
2340 		port = &ports[pid];
2341 		port->dev_conf.fdir_conf = fdir_conf;
2342 		if (nb_rxq > 1) {
2343 			rte_eth_dev_info_get(pid, &dev_info);
2344 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2345 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2346 				rss_hf & dev_info.flow_type_rss_offloads;
2347 		} else {
2348 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2349 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2350 		}
2351 
2352 		if (port->dcb_flag == 0) {
2353 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2354 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2355 			else
2356 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2357 		}
2358 
2359 		rxtx_port_config(port);
2360 
2361 		rte_eth_macaddr_get(pid, &port->eth_addr);
2362 
2363 		map_port_queue_stats_mapping_registers(pid, port);
2364 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2365 		rte_pmd_ixgbe_bypass_init(pid);
2366 #endif
2367 
2368 		if (lsc_interrupt &&
2369 		    (rte_eth_devices[pid].data->dev_flags &
2370 		     RTE_ETH_DEV_INTR_LSC))
2371 			port->dev_conf.intr_conf.lsc = 1;
2372 		if (rmv_interrupt &&
2373 		    (rte_eth_devices[pid].data->dev_flags &
2374 		     RTE_ETH_DEV_INTR_RMV))
2375 			port->dev_conf.intr_conf.rmv = 1;
2376 
2377 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2378 		/* Detect softnic port */
2379 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2380 			port->softnic_enable = 1;
2381 			memset(&port->softport, 0, sizeof(struct softnic_port));
2382 
2383 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2384 				port->softport.tm_flag = 1;
2385 		}
2386 #endif
2387 	}
2388 }
2389 
2390 void set_port_slave_flag(portid_t slave_pid)
2391 {
2392 	struct rte_port *port;
2393 
2394 	port = &ports[slave_pid];
2395 	port->slave_flag = 1;
2396 }
2397 
2398 void clear_port_slave_flag(portid_t slave_pid)
2399 {
2400 	struct rte_port *port;
2401 
2402 	port = &ports[slave_pid];
2403 	port->slave_flag = 0;
2404 }
2405 
2406 uint8_t port_is_bonding_slave(portid_t slave_pid)
2407 {
2408 	struct rte_port *port;
2409 
2410 	port = &ports[slave_pid];
2411 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2412 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2413 		return 1;
2414 	return 0;
2415 }
2416 
2417 const uint16_t vlan_tags[] = {
2418 		0,  1,  2,  3,  4,  5,  6,  7,
2419 		8,  9, 10, 11,  12, 13, 14, 15,
2420 		16, 17, 18, 19, 20, 21, 22, 23,
2421 		24, 25, 26, 27, 28, 29, 30, 31
2422 };
2423 
2424 static  int
2425 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2426 		 enum dcb_mode_enable dcb_mode,
2427 		 enum rte_eth_nb_tcs num_tcs,
2428 		 uint8_t pfc_en)
2429 {
2430 	uint8_t i;
2431 
2432 	/*
2433 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2434 	 * given above, and the number of traffic classes available for use.
2435 	 */
2436 	if (dcb_mode == DCB_VT_ENABLED) {
2437 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2438 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2439 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2440 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2441 
2442 		/* VMDQ+DCB RX and TX configurations */
2443 		vmdq_rx_conf->enable_default_pool = 0;
2444 		vmdq_rx_conf->default_pool = 0;
2445 		vmdq_rx_conf->nb_queue_pools =
2446 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2447 		vmdq_tx_conf->nb_queue_pools =
2448 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2449 
2450 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2451 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2452 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2453 			vmdq_rx_conf->pool_map[i].pools =
2454 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2455 		}
2456 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2457 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2458 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2459 		}
2460 
2461 		/* set DCB mode of RX and TX of multiple queues */
2462 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2463 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2464 	} else {
2465 		struct rte_eth_dcb_rx_conf *rx_conf =
2466 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2467 		struct rte_eth_dcb_tx_conf *tx_conf =
2468 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2469 
2470 		rx_conf->nb_tcs = num_tcs;
2471 		tx_conf->nb_tcs = num_tcs;
2472 
2473 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2474 			rx_conf->dcb_tc[i] = i % num_tcs;
2475 			tx_conf->dcb_tc[i] = i % num_tcs;
2476 		}
2477 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2478 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2479 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2480 	}
2481 
2482 	if (pfc_en)
2483 		eth_conf->dcb_capability_en =
2484 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2485 	else
2486 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2487 
2488 	return 0;
2489 }
2490 
2491 int
2492 init_port_dcb_config(portid_t pid,
2493 		     enum dcb_mode_enable dcb_mode,
2494 		     enum rte_eth_nb_tcs num_tcs,
2495 		     uint8_t pfc_en)
2496 {
2497 	struct rte_eth_conf port_conf;
2498 	struct rte_port *rte_port;
2499 	int retval;
2500 	uint16_t i;
2501 
2502 	rte_port = &ports[pid];
2503 
2504 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2505 	/* Enter DCB configuration status */
2506 	dcb_config = 1;
2507 
2508 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2509 	port_conf.txmode = rte_port->dev_conf.txmode;
2510 
2511 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2512 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2513 	if (retval < 0)
2514 		return retval;
2515 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2516 
2517 	/* re-configure the device . */
2518 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2519 
2520 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2521 
2522 	/* If dev_info.vmdq_pool_base is greater than 0,
2523 	 * the queue id of vmdq pools is started after pf queues.
2524 	 */
2525 	if (dcb_mode == DCB_VT_ENABLED &&
2526 	    rte_port->dev_info.vmdq_pool_base > 0) {
2527 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2528 			" for port %d.", pid);
2529 		return -1;
2530 	}
2531 
2532 	/* Assume the ports in testpmd have the same dcb capability
2533 	 * and has the same number of rxq and txq in dcb mode
2534 	 */
2535 	if (dcb_mode == DCB_VT_ENABLED) {
2536 		if (rte_port->dev_info.max_vfs > 0) {
2537 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2538 			nb_txq = rte_port->dev_info.nb_tx_queues;
2539 		} else {
2540 			nb_rxq = rte_port->dev_info.max_rx_queues;
2541 			nb_txq = rte_port->dev_info.max_tx_queues;
2542 		}
2543 	} else {
2544 		/*if vt is disabled, use all pf queues */
2545 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2546 			nb_rxq = rte_port->dev_info.max_rx_queues;
2547 			nb_txq = rte_port->dev_info.max_tx_queues;
2548 		} else {
2549 			nb_rxq = (queueid_t)num_tcs;
2550 			nb_txq = (queueid_t)num_tcs;
2551 
2552 		}
2553 	}
2554 	rx_free_thresh = 64;
2555 
2556 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2557 
2558 	rxtx_port_config(rte_port);
2559 	/* VLAN filter */
2560 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2561 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2562 		rx_vft_set(pid, vlan_tags[i], 1);
2563 
2564 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2565 	map_port_queue_stats_mapping_registers(pid, rte_port);
2566 
2567 	rte_port->dcb_flag = 1;
2568 
2569 	return 0;
2570 }
2571 
2572 static void
2573 init_port(void)
2574 {
2575 	/* Configuration of Ethernet ports. */
2576 	ports = rte_zmalloc("testpmd: ports",
2577 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2578 			    RTE_CACHE_LINE_SIZE);
2579 	if (ports == NULL) {
2580 		rte_exit(EXIT_FAILURE,
2581 				"rte_zmalloc(%d struct rte_port) failed\n",
2582 				RTE_MAX_ETHPORTS);
2583 	}
2584 }
2585 
2586 static void
2587 force_quit(void)
2588 {
2589 	pmd_test_exit();
2590 	prompt_exit();
2591 }
2592 
2593 static void
2594 print_stats(void)
2595 {
2596 	uint8_t i;
2597 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2598 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2599 
2600 	/* Clear screen and move to top left */
2601 	printf("%s%s", clr, top_left);
2602 
2603 	printf("\nPort statistics ====================================");
2604 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2605 		nic_stats_display(fwd_ports_ids[i]);
2606 }
2607 
2608 static void
2609 signal_handler(int signum)
2610 {
2611 	if (signum == SIGINT || signum == SIGTERM) {
2612 		printf("\nSignal %d received, preparing to exit...\n",
2613 				signum);
2614 #ifdef RTE_LIBRTE_PDUMP
2615 		/* uninitialize packet capture framework */
2616 		rte_pdump_uninit();
2617 #endif
2618 #ifdef RTE_LIBRTE_LATENCY_STATS
2619 		rte_latencystats_uninit();
2620 #endif
2621 		force_quit();
2622 		/* Set flag to indicate the force termination. */
2623 		f_quit = 1;
2624 		/* exit with the expected status */
2625 		signal(signum, SIG_DFL);
2626 		kill(getpid(), signum);
2627 	}
2628 }
2629 
2630 int
2631 main(int argc, char** argv)
2632 {
2633 	int diag;
2634 	portid_t port_id;
2635 	int ret;
2636 
2637 	signal(SIGINT, signal_handler);
2638 	signal(SIGTERM, signal_handler);
2639 
2640 	diag = rte_eal_init(argc, argv);
2641 	if (diag < 0)
2642 		rte_panic("Cannot init EAL\n");
2643 
2644 	testpmd_logtype = rte_log_register("testpmd");
2645 	if (testpmd_logtype < 0)
2646 		rte_panic("Cannot register log type");
2647 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2648 
2649 #ifdef RTE_LIBRTE_PDUMP
2650 	/* initialize packet capture framework */
2651 	rte_pdump_init(NULL);
2652 #endif
2653 
2654 	nb_ports = (portid_t) rte_eth_dev_count_avail();
2655 	if (nb_ports == 0)
2656 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2657 
2658 	/* allocate port structures, and init them */
2659 	init_port();
2660 
2661 	set_def_fwd_config();
2662 	if (nb_lcores == 0)
2663 		rte_panic("Empty set of forwarding logical cores - check the "
2664 			  "core mask supplied in the command parameters\n");
2665 
2666 	/* Bitrate/latency stats disabled by default */
2667 #ifdef RTE_LIBRTE_BITRATE
2668 	bitrate_enabled = 0;
2669 #endif
2670 #ifdef RTE_LIBRTE_LATENCY_STATS
2671 	latencystats_enabled = 0;
2672 #endif
2673 
2674 	/* on FreeBSD, mlockall() is disabled by default */
2675 #ifdef RTE_EXEC_ENV_BSDAPP
2676 	do_mlockall = 0;
2677 #else
2678 	do_mlockall = 1;
2679 #endif
2680 
2681 	argc -= diag;
2682 	argv += diag;
2683 	if (argc > 1)
2684 		launch_args_parse(argc, argv);
2685 
2686 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2687 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2688 			strerror(errno));
2689 	}
2690 
2691 	if (tx_first && interactive)
2692 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2693 				"interactive mode.\n");
2694 
2695 	if (tx_first && lsc_interrupt) {
2696 		printf("Warning: lsc_interrupt needs to be off when "
2697 				" using tx_first. Disabling.\n");
2698 		lsc_interrupt = 0;
2699 	}
2700 
2701 	if (!nb_rxq && !nb_txq)
2702 		printf("Warning: Either rx or tx queues should be non-zero\n");
2703 
2704 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2705 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2706 		       "but nb_txq=%d will prevent to fully test it.\n",
2707 		       nb_rxq, nb_txq);
2708 
2709 	init_config();
2710 
2711 	if (hot_plug) {
2712 		/* enable hot plug monitoring */
2713 		ret = rte_dev_event_monitor_start();
2714 		if (ret) {
2715 			rte_errno = EINVAL;
2716 			return -1;
2717 		}
2718 		eth_dev_event_callback_register();
2719 
2720 	}
2721 
2722 	if (start_port(RTE_PORT_ALL) != 0)
2723 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2724 
2725 	/* set all ports to promiscuous mode by default */
2726 	RTE_ETH_FOREACH_DEV(port_id)
2727 		rte_eth_promiscuous_enable(port_id);
2728 
2729 	/* Init metrics library */
2730 	rte_metrics_init(rte_socket_id());
2731 
2732 #ifdef RTE_LIBRTE_LATENCY_STATS
2733 	if (latencystats_enabled != 0) {
2734 		int ret = rte_latencystats_init(1, NULL);
2735 		if (ret)
2736 			printf("Warning: latencystats init()"
2737 				" returned error %d\n",	ret);
2738 		printf("Latencystats running on lcore %d\n",
2739 			latencystats_lcore_id);
2740 	}
2741 #endif
2742 
2743 	/* Setup bitrate stats */
2744 #ifdef RTE_LIBRTE_BITRATE
2745 	if (bitrate_enabled != 0) {
2746 		bitrate_data = rte_stats_bitrate_create();
2747 		if (bitrate_data == NULL)
2748 			rte_exit(EXIT_FAILURE,
2749 				"Could not allocate bitrate data.\n");
2750 		rte_stats_bitrate_reg(bitrate_data);
2751 	}
2752 #endif
2753 
2754 #ifdef RTE_LIBRTE_CMDLINE
2755 	if (strlen(cmdline_filename) != 0)
2756 		cmdline_read_from_file(cmdline_filename);
2757 
2758 	if (interactive == 1) {
2759 		if (auto_start) {
2760 			printf("Start automatic packet forwarding\n");
2761 			start_packet_forwarding(0);
2762 		}
2763 		prompt();
2764 		pmd_test_exit();
2765 	} else
2766 #endif
2767 	{
2768 		char c;
2769 		int rc;
2770 
2771 		f_quit = 0;
2772 
2773 		printf("No commandline core given, start packet forwarding\n");
2774 		start_packet_forwarding(tx_first);
2775 		if (stats_period != 0) {
2776 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2777 			uint64_t timer_period;
2778 
2779 			/* Convert to number of cycles */
2780 			timer_period = stats_period * rte_get_timer_hz();
2781 
2782 			while (f_quit == 0) {
2783 				cur_time = rte_get_timer_cycles();
2784 				diff_time += cur_time - prev_time;
2785 
2786 				if (diff_time >= timer_period) {
2787 					print_stats();
2788 					/* Reset the timer */
2789 					diff_time = 0;
2790 				}
2791 				/* Sleep to avoid unnecessary checks */
2792 				prev_time = cur_time;
2793 				sleep(1);
2794 			}
2795 		}
2796 
2797 		printf("Press enter to exit\n");
2798 		rc = read(0, &c, 1);
2799 		pmd_test_exit();
2800 		if (rc < 0)
2801 			return 1;
2802 	}
2803 
2804 	return 0;
2805 }
2806