xref: /dpdk/app/test-pmd/testpmd.c (revision 1f84c4695a567edc8cd8dc2572aa320cdeb9c358)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
33 #include <rte_eal.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
41 #include <rte_mbuf.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_dev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
51 #endif
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
54 #endif
55 #include <rte_flow.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
59 #endif
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
62 #endif
63 
64 #include "testpmd.h"
65 
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
68 
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
72 uint8_t tx_first;
73 char cmdline_filename[PATH_MAX] = {0};
74 
75 /*
76  * NUMA support configuration.
77  * When set, the NUMA support attempts to dispatch the allocation of the
78  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79  * probed ports among the CPU sockets 0 and 1.
80  * Otherwise, all memory is allocated from CPU socket 0.
81  */
82 uint8_t numa_support = 1; /**< numa enabled by default */
83 
84 /*
85  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86  * not configured.
87  */
88 uint8_t socket_num = UMA_NO_CONFIG;
89 
90 /*
91  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
92  */
93 uint8_t mp_anon = 0;
94 
95 /*
96  * Store specified sockets on which memory pool to be used by ports
97  * is allocated.
98  */
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
100 
101 /*
102  * Store specified sockets on which RX ring to be used by ports
103  * is allocated.
104  */
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
106 
107 /*
108  * Store specified sockets on which TX ring to be used by ports
109  * is allocated.
110  */
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
112 
113 /*
114  * Record the Ethernet address of peer target ports to which packets are
115  * forwarded.
116  * Must be instantiated with the ethernet addresses of peer traffic generator
117  * ports.
118  */
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
121 
122 /*
123  * Probed Target Environment.
124  */
125 struct rte_port *ports;	       /**< For all probed ethernet ports. */
126 portid_t nb_ports;             /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
129 
130 /*
131  * Test Forwarding Configuration.
132  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
134  */
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
138 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
139 
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
142 
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
145 
146 /*
147  * Forwarding engines.
148  */
149 struct fwd_engine * fwd_engines[] = {
150 	&io_fwd_engine,
151 	&mac_fwd_engine,
152 	&mac_swap_engine,
153 	&flow_gen_engine,
154 	&rx_only_engine,
155 	&tx_only_engine,
156 	&csum_fwd_engine,
157 	&icmp_echo_engine,
158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
159 	&softnic_tm_engine,
160 	&softnic_tm_bypass_engine,
161 #endif
162 #ifdef RTE_LIBRTE_IEEE1588
163 	&ieee1588_fwd_engine,
164 #endif
165 	NULL,
166 };
167 
168 struct fwd_config cur_fwd_config;
169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
170 uint32_t retry_enabled;
171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173 
174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
175 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
176                                       * specified on command-line. */
177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
178 
179 /*
180  * In container, it cannot terminate the process which running with 'stats-period'
181  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
182  */
183 uint8_t f_quit;
184 
185 /*
186  * Configuration of packet segments used by the "txonly" processing engine.
187  */
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 	TXONLY_DEF_PACKET_LEN,
191 };
192 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
196 
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
202 
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
205 
206 /*
207  * Configurable number of RX/TX queues.
208  */
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
211 
212 /*
213  * Configurable number of RX/TX ring descriptors.
214  * Defaults are supplied by drivers via ethdev.
215  */
216 #define RTE_TEST_RX_DESC_DEFAULT 0
217 #define RTE_TEST_TX_DESC_DEFAULT 0
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220 
221 #define RTE_PMD_PARAM_UNSET -1
222 /*
223  * Configurable values of RX and TX ring threshold registers.
224  */
225 
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229 
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Configurable value of RX free threshold.
236  */
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238 
239 /*
240  * Configurable value of RX drop enable.
241  */
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243 
244 /*
245  * Configurable value of TX free threshold.
246  */
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248 
249 /*
250  * Configurable value of TX RS bit threshold.
251  */
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253 
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268 
269 /*
270  * Flow API isolated mode.
271  */
272 uint8_t flow_isolate_all;
273 
274 /*
275  * Avoids to check link status when starting/stopping a port.
276  */
277 uint8_t no_link_check = 0; /* check by default */
278 
279 /*
280  * Enable link status change notification
281  */
282 uint8_t lsc_interrupt = 1; /* enabled by default */
283 
284 /*
285  * Enable device removal notification.
286  */
287 uint8_t rmv_interrupt = 1; /* enabled by default */
288 
289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
290 
291 /*
292  * Display or mask ether events
293  * Default to all events except VF_MBOX
294  */
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
301 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
302 /*
303  * Decide if all memory are locked for performance.
304  */
305 int do_mlockall = 0;
306 
307 /*
308  * NIC bypass mode configuration options.
309  */
310 
311 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
312 /* The NIC bypass watchdog timeout. */
313 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
314 #endif
315 
316 
317 #ifdef RTE_LIBRTE_LATENCY_STATS
318 
319 /*
320  * Set when latency stats is enabled in the commandline
321  */
322 uint8_t latencystats_enabled;
323 
324 /*
325  * Lcore ID to serive latency statistics.
326  */
327 lcoreid_t latencystats_lcore_id = -1;
328 
329 #endif
330 
331 /*
332  * Ethernet device configuration.
333  */
334 struct rte_eth_rxmode rx_mode = {
335 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
336 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
337 	.ignore_offload_bitfield = 1,
338 };
339 
340 struct rte_eth_txmode tx_mode = {
341 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
342 };
343 
344 struct rte_fdir_conf fdir_conf = {
345 	.mode = RTE_FDIR_MODE_NONE,
346 	.pballoc = RTE_FDIR_PBALLOC_64K,
347 	.status = RTE_FDIR_REPORT_STATUS,
348 	.mask = {
349 		.vlan_tci_mask = 0x0,
350 		.ipv4_mask     = {
351 			.src_ip = 0xFFFFFFFF,
352 			.dst_ip = 0xFFFFFFFF,
353 		},
354 		.ipv6_mask     = {
355 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
357 		},
358 		.src_port_mask = 0xFFFF,
359 		.dst_port_mask = 0xFFFF,
360 		.mac_addr_byte_mask = 0xFF,
361 		.tunnel_type_mask = 1,
362 		.tunnel_id_mask = 0xFFFFFFFF,
363 	},
364 	.drop_queue = 127,
365 };
366 
367 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
368 
369 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
371 
372 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
373 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
374 
375 uint16_t nb_tx_queue_stats_mappings = 0;
376 uint16_t nb_rx_queue_stats_mappings = 0;
377 
378 /*
379  * Display zero values by default for xstats
380  */
381 uint8_t xstats_hide_zero;
382 
383 unsigned int num_sockets = 0;
384 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
385 
386 #ifdef RTE_LIBRTE_BITRATE
387 /* Bitrate statistics */
388 struct rte_stats_bitrates *bitrate_data;
389 lcoreid_t bitrate_lcore_id;
390 uint8_t bitrate_enabled;
391 #endif
392 
393 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
394 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
395 
396 /* Forward function declarations */
397 static void map_port_queue_stats_mapping_registers(portid_t pi,
398 						   struct rte_port *port);
399 static void check_all_ports_link_status(uint32_t port_mask);
400 static int eth_event_callback(portid_t port_id,
401 			      enum rte_eth_event_type type,
402 			      void *param, void *ret_param);
403 static void eth_dev_event_callback(char *device_name,
404 				enum rte_dev_event_type type,
405 				void *param);
406 static int eth_dev_event_callback_register(void);
407 static int eth_dev_event_callback_unregister(void);
408 
409 
410 /*
411  * Check if all the ports are started.
412  * If yes, return positive value. If not, return zero.
413  */
414 static int all_ports_started(void);
415 
416 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
417 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
418 
419 /*
420  * Helper function to check if socket is already discovered.
421  * If yes, return positive value. If not, return zero.
422  */
423 int
424 new_socket_id(unsigned int socket_id)
425 {
426 	unsigned int i;
427 
428 	for (i = 0; i < num_sockets; i++) {
429 		if (socket_ids[i] == socket_id)
430 			return 0;
431 	}
432 	return 1;
433 }
434 
435 /*
436  * Setup default configuration.
437  */
438 static void
439 set_default_fwd_lcores_config(void)
440 {
441 	unsigned int i;
442 	unsigned int nb_lc;
443 	unsigned int sock_num;
444 
445 	nb_lc = 0;
446 	for (i = 0; i < RTE_MAX_LCORE; i++) {
447 		sock_num = rte_lcore_to_socket_id(i);
448 		if (new_socket_id(sock_num)) {
449 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
450 				rte_exit(EXIT_FAILURE,
451 					 "Total sockets greater than %u\n",
452 					 RTE_MAX_NUMA_NODES);
453 			}
454 			socket_ids[num_sockets++] = sock_num;
455 		}
456 		if (!rte_lcore_is_enabled(i))
457 			continue;
458 		if (i == rte_get_master_lcore())
459 			continue;
460 		fwd_lcores_cpuids[nb_lc++] = i;
461 	}
462 	nb_lcores = (lcoreid_t) nb_lc;
463 	nb_cfg_lcores = nb_lcores;
464 	nb_fwd_lcores = 1;
465 }
466 
467 static void
468 set_def_peer_eth_addrs(void)
469 {
470 	portid_t i;
471 
472 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
473 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
474 		peer_eth_addrs[i].addr_bytes[5] = i;
475 	}
476 }
477 
478 static void
479 set_default_fwd_ports_config(void)
480 {
481 	portid_t pt_id;
482 	int i = 0;
483 
484 	RTE_ETH_FOREACH_DEV(pt_id)
485 		fwd_ports_ids[i++] = pt_id;
486 
487 	nb_cfg_ports = nb_ports;
488 	nb_fwd_ports = nb_ports;
489 }
490 
491 void
492 set_def_fwd_config(void)
493 {
494 	set_default_fwd_lcores_config();
495 	set_def_peer_eth_addrs();
496 	set_default_fwd_ports_config();
497 }
498 
499 /*
500  * Configuration initialisation done once at init time.
501  */
502 static void
503 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
504 		 unsigned int socket_id)
505 {
506 	char pool_name[RTE_MEMPOOL_NAMESIZE];
507 	struct rte_mempool *rte_mp = NULL;
508 	uint32_t mb_size;
509 
510 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
511 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
512 
513 	TESTPMD_LOG(INFO,
514 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
515 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
516 
517 	if (mp_anon != 0) {
518 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
519 			mb_size, (unsigned) mb_mempool_cache,
520 			sizeof(struct rte_pktmbuf_pool_private),
521 			socket_id, 0);
522 		if (rte_mp == NULL)
523 			goto err;
524 
525 		if (rte_mempool_populate_anon(rte_mp) == 0) {
526 			rte_mempool_free(rte_mp);
527 			rte_mp = NULL;
528 			goto err;
529 		}
530 		rte_pktmbuf_pool_init(rte_mp, NULL);
531 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
532 	} else {
533 		/* wrapper to rte_mempool_create() */
534 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
535 				rte_mbuf_best_mempool_ops());
536 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
537 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
538 	}
539 
540 err:
541 	if (rte_mp == NULL) {
542 		rte_exit(EXIT_FAILURE,
543 			"Creation of mbuf pool for socket %u failed: %s\n",
544 			socket_id, rte_strerror(rte_errno));
545 	} else if (verbose_level > 0) {
546 		rte_mempool_dump(stdout, rte_mp);
547 	}
548 }
549 
550 /*
551  * Check given socket id is valid or not with NUMA mode,
552  * if valid, return 0, else return -1
553  */
554 static int
555 check_socket_id(const unsigned int socket_id)
556 {
557 	static int warning_once = 0;
558 
559 	if (new_socket_id(socket_id)) {
560 		if (!warning_once && numa_support)
561 			printf("Warning: NUMA should be configured manually by"
562 			       " using --port-numa-config and"
563 			       " --ring-numa-config parameters along with"
564 			       " --numa.\n");
565 		warning_once = 1;
566 		return -1;
567 	}
568 	return 0;
569 }
570 
571 /*
572  * Get the allowed maximum number of RX queues.
573  * *pid return the port id which has minimal value of
574  * max_rx_queues in all ports.
575  */
576 queueid_t
577 get_allowed_max_nb_rxq(portid_t *pid)
578 {
579 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
580 	portid_t pi;
581 	struct rte_eth_dev_info dev_info;
582 
583 	RTE_ETH_FOREACH_DEV(pi) {
584 		rte_eth_dev_info_get(pi, &dev_info);
585 		if (dev_info.max_rx_queues < allowed_max_rxq) {
586 			allowed_max_rxq = dev_info.max_rx_queues;
587 			*pid = pi;
588 		}
589 	}
590 	return allowed_max_rxq;
591 }
592 
593 /*
594  * Check input rxq is valid or not.
595  * If input rxq is not greater than any of maximum number
596  * of RX queues of all ports, it is valid.
597  * if valid, return 0, else return -1
598  */
599 int
600 check_nb_rxq(queueid_t rxq)
601 {
602 	queueid_t allowed_max_rxq;
603 	portid_t pid = 0;
604 
605 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
606 	if (rxq > allowed_max_rxq) {
607 		printf("Fail: input rxq (%u) can't be greater "
608 		       "than max_rx_queues (%u) of port %u\n",
609 		       rxq,
610 		       allowed_max_rxq,
611 		       pid);
612 		return -1;
613 	}
614 	return 0;
615 }
616 
617 /*
618  * Get the allowed maximum number of TX queues.
619  * *pid return the port id which has minimal value of
620  * max_tx_queues in all ports.
621  */
622 queueid_t
623 get_allowed_max_nb_txq(portid_t *pid)
624 {
625 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
626 	portid_t pi;
627 	struct rte_eth_dev_info dev_info;
628 
629 	RTE_ETH_FOREACH_DEV(pi) {
630 		rte_eth_dev_info_get(pi, &dev_info);
631 		if (dev_info.max_tx_queues < allowed_max_txq) {
632 			allowed_max_txq = dev_info.max_tx_queues;
633 			*pid = pi;
634 		}
635 	}
636 	return allowed_max_txq;
637 }
638 
639 /*
640  * Check input txq is valid or not.
641  * If input txq is not greater than any of maximum number
642  * of TX queues of all ports, it is valid.
643  * if valid, return 0, else return -1
644  */
645 int
646 check_nb_txq(queueid_t txq)
647 {
648 	queueid_t allowed_max_txq;
649 	portid_t pid = 0;
650 
651 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
652 	if (txq > allowed_max_txq) {
653 		printf("Fail: input txq (%u) can't be greater "
654 		       "than max_tx_queues (%u) of port %u\n",
655 		       txq,
656 		       allowed_max_txq,
657 		       pid);
658 		return -1;
659 	}
660 	return 0;
661 }
662 
663 static void
664 init_config(void)
665 {
666 	portid_t pid;
667 	struct rte_port *port;
668 	struct rte_mempool *mbp;
669 	unsigned int nb_mbuf_per_pool;
670 	lcoreid_t  lc_id;
671 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
672 	struct rte_gro_param gro_param;
673 	uint32_t gso_types;
674 
675 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
676 
677 	if (numa_support) {
678 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
679 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
680 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
681 	}
682 
683 	/* Configuration of logical cores. */
684 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
685 				sizeof(struct fwd_lcore *) * nb_lcores,
686 				RTE_CACHE_LINE_SIZE);
687 	if (fwd_lcores == NULL) {
688 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
689 							"failed\n", nb_lcores);
690 	}
691 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
692 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
693 					       sizeof(struct fwd_lcore),
694 					       RTE_CACHE_LINE_SIZE);
695 		if (fwd_lcores[lc_id] == NULL) {
696 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
697 								"failed\n");
698 		}
699 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
700 	}
701 
702 	RTE_ETH_FOREACH_DEV(pid) {
703 		port = &ports[pid];
704 		/* Apply default TxRx configuration for all ports */
705 		port->dev_conf.txmode = tx_mode;
706 		port->dev_conf.rxmode = rx_mode;
707 		rte_eth_dev_info_get(pid, &port->dev_info);
708 		if (!(port->dev_info.tx_offload_capa &
709 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
710 			port->dev_conf.txmode.offloads &=
711 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
712 		if (numa_support) {
713 			if (port_numa[pid] != NUMA_NO_CONFIG)
714 				port_per_socket[port_numa[pid]]++;
715 			else {
716 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
717 
718 				/* if socket_id is invalid, set to 0 */
719 				if (check_socket_id(socket_id) < 0)
720 					socket_id = 0;
721 				port_per_socket[socket_id]++;
722 			}
723 		}
724 
725 		/* set flag to initialize port/queue */
726 		port->need_reconfig = 1;
727 		port->need_reconfig_queues = 1;
728 	}
729 
730 	/*
731 	 * Create pools of mbuf.
732 	 * If NUMA support is disabled, create a single pool of mbuf in
733 	 * socket 0 memory by default.
734 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
735 	 *
736 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
737 	 * nb_txd can be configured at run time.
738 	 */
739 	if (param_total_num_mbufs)
740 		nb_mbuf_per_pool = param_total_num_mbufs;
741 	else {
742 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
743 			(nb_lcores * mb_mempool_cache) +
744 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
745 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
746 	}
747 
748 	if (numa_support) {
749 		uint8_t i;
750 
751 		for (i = 0; i < num_sockets; i++)
752 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
753 					 socket_ids[i]);
754 	} else {
755 		if (socket_num == UMA_NO_CONFIG)
756 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
757 		else
758 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
759 						 socket_num);
760 	}
761 
762 	init_port_config();
763 
764 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
765 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
766 	/*
767 	 * Records which Mbuf pool to use by each logical core, if needed.
768 	 */
769 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
770 		mbp = mbuf_pool_find(
771 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
772 
773 		if (mbp == NULL)
774 			mbp = mbuf_pool_find(0);
775 		fwd_lcores[lc_id]->mbp = mbp;
776 		/* initialize GSO context */
777 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
778 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
779 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
780 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
781 			ETHER_CRC_LEN;
782 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
783 	}
784 
785 	/* Configuration of packet forwarding streams. */
786 	if (init_fwd_streams() < 0)
787 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
788 
789 	fwd_config_setup();
790 
791 	/* create a gro context for each lcore */
792 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
793 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
794 	gro_param.max_item_per_flow = MAX_PKT_BURST;
795 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
796 		gro_param.socket_id = rte_lcore_to_socket_id(
797 				fwd_lcores_cpuids[lc_id]);
798 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
799 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
800 			rte_exit(EXIT_FAILURE,
801 					"rte_gro_ctx_create() failed\n");
802 		}
803 	}
804 }
805 
806 
807 void
808 reconfig(portid_t new_port_id, unsigned socket_id)
809 {
810 	struct rte_port *port;
811 
812 	/* Reconfiguration of Ethernet ports. */
813 	port = &ports[new_port_id];
814 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
815 
816 	/* set flag to initialize port/queue */
817 	port->need_reconfig = 1;
818 	port->need_reconfig_queues = 1;
819 	port->socket_id = socket_id;
820 
821 	init_port_config();
822 }
823 
824 
825 int
826 init_fwd_streams(void)
827 {
828 	portid_t pid;
829 	struct rte_port *port;
830 	streamid_t sm_id, nb_fwd_streams_new;
831 	queueid_t q;
832 
833 	/* set socket id according to numa or not */
834 	RTE_ETH_FOREACH_DEV(pid) {
835 		port = &ports[pid];
836 		if (nb_rxq > port->dev_info.max_rx_queues) {
837 			printf("Fail: nb_rxq(%d) is greater than "
838 				"max_rx_queues(%d)\n", nb_rxq,
839 				port->dev_info.max_rx_queues);
840 			return -1;
841 		}
842 		if (nb_txq > port->dev_info.max_tx_queues) {
843 			printf("Fail: nb_txq(%d) is greater than "
844 				"max_tx_queues(%d)\n", nb_txq,
845 				port->dev_info.max_tx_queues);
846 			return -1;
847 		}
848 		if (numa_support) {
849 			if (port_numa[pid] != NUMA_NO_CONFIG)
850 				port->socket_id = port_numa[pid];
851 			else {
852 				port->socket_id = rte_eth_dev_socket_id(pid);
853 
854 				/* if socket_id is invalid, set to 0 */
855 				if (check_socket_id(port->socket_id) < 0)
856 					port->socket_id = 0;
857 			}
858 		}
859 		else {
860 			if (socket_num == UMA_NO_CONFIG)
861 				port->socket_id = 0;
862 			else
863 				port->socket_id = socket_num;
864 		}
865 	}
866 
867 	q = RTE_MAX(nb_rxq, nb_txq);
868 	if (q == 0) {
869 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
870 		return -1;
871 	}
872 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
873 	if (nb_fwd_streams_new == nb_fwd_streams)
874 		return 0;
875 	/* clear the old */
876 	if (fwd_streams != NULL) {
877 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
878 			if (fwd_streams[sm_id] == NULL)
879 				continue;
880 			rte_free(fwd_streams[sm_id]);
881 			fwd_streams[sm_id] = NULL;
882 		}
883 		rte_free(fwd_streams);
884 		fwd_streams = NULL;
885 	}
886 
887 	/* init new */
888 	nb_fwd_streams = nb_fwd_streams_new;
889 	if (nb_fwd_streams) {
890 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
891 			sizeof(struct fwd_stream *) * nb_fwd_streams,
892 			RTE_CACHE_LINE_SIZE);
893 		if (fwd_streams == NULL)
894 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
895 				 " (struct fwd_stream *)) failed\n",
896 				 nb_fwd_streams);
897 
898 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
899 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
900 				" struct fwd_stream", sizeof(struct fwd_stream),
901 				RTE_CACHE_LINE_SIZE);
902 			if (fwd_streams[sm_id] == NULL)
903 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
904 					 "(struct fwd_stream) failed\n");
905 		}
906 	}
907 
908 	return 0;
909 }
910 
911 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
912 static void
913 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
914 {
915 	unsigned int total_burst;
916 	unsigned int nb_burst;
917 	unsigned int burst_stats[3];
918 	uint16_t pktnb_stats[3];
919 	uint16_t nb_pkt;
920 	int burst_percent[3];
921 
922 	/*
923 	 * First compute the total number of packet bursts and the
924 	 * two highest numbers of bursts of the same number of packets.
925 	 */
926 	total_burst = 0;
927 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
928 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
929 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
930 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
931 		if (nb_burst == 0)
932 			continue;
933 		total_burst += nb_burst;
934 		if (nb_burst > burst_stats[0]) {
935 			burst_stats[1] = burst_stats[0];
936 			pktnb_stats[1] = pktnb_stats[0];
937 			burst_stats[0] = nb_burst;
938 			pktnb_stats[0] = nb_pkt;
939 		}
940 	}
941 	if (total_burst == 0)
942 		return;
943 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
944 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
945 	       burst_percent[0], (int) pktnb_stats[0]);
946 	if (burst_stats[0] == total_burst) {
947 		printf("]\n");
948 		return;
949 	}
950 	if (burst_stats[0] + burst_stats[1] == total_burst) {
951 		printf(" + %d%% of %d pkts]\n",
952 		       100 - burst_percent[0], pktnb_stats[1]);
953 		return;
954 	}
955 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
956 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
957 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
958 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
959 		return;
960 	}
961 	printf(" + %d%% of %d pkts + %d%% of others]\n",
962 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
963 }
964 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
965 
966 static void
967 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
968 {
969 	struct rte_port *port;
970 	uint8_t i;
971 
972 	static const char *fwd_stats_border = "----------------------";
973 
974 	port = &ports[port_id];
975 	printf("\n  %s Forward statistics for port %-2d %s\n",
976 	       fwd_stats_border, port_id, fwd_stats_border);
977 
978 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
979 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
980 		       "%-"PRIu64"\n",
981 		       stats->ipackets, stats->imissed,
982 		       (uint64_t) (stats->ipackets + stats->imissed));
983 
984 		if (cur_fwd_eng == &csum_fwd_engine)
985 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
986 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
987 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
988 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
989 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
990 		}
991 
992 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
993 		       "%-"PRIu64"\n",
994 		       stats->opackets, port->tx_dropped,
995 		       (uint64_t) (stats->opackets + port->tx_dropped));
996 	}
997 	else {
998 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
999 		       "%14"PRIu64"\n",
1000 		       stats->ipackets, stats->imissed,
1001 		       (uint64_t) (stats->ipackets + stats->imissed));
1002 
1003 		if (cur_fwd_eng == &csum_fwd_engine)
1004 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1005 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1006 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1007 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1008 			printf("  RX-nombufs:             %14"PRIu64"\n",
1009 			       stats->rx_nombuf);
1010 		}
1011 
1012 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1013 		       "%14"PRIu64"\n",
1014 		       stats->opackets, port->tx_dropped,
1015 		       (uint64_t) (stats->opackets + port->tx_dropped));
1016 	}
1017 
1018 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1019 	if (port->rx_stream)
1020 		pkt_burst_stats_display("RX",
1021 			&port->rx_stream->rx_burst_stats);
1022 	if (port->tx_stream)
1023 		pkt_burst_stats_display("TX",
1024 			&port->tx_stream->tx_burst_stats);
1025 #endif
1026 
1027 	if (port->rx_queue_stats_mapping_enabled) {
1028 		printf("\n");
1029 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1030 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1031 			       "     RX-errors:%14"PRIu64
1032 			       "    RX-bytes:%14"PRIu64"\n",
1033 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1034 		}
1035 		printf("\n");
1036 	}
1037 	if (port->tx_queue_stats_mapping_enabled) {
1038 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1039 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1040 			       "                                 TX-bytes:%14"PRIu64"\n",
1041 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1042 		}
1043 	}
1044 
1045 	printf("  %s--------------------------------%s\n",
1046 	       fwd_stats_border, fwd_stats_border);
1047 }
1048 
1049 static void
1050 fwd_stream_stats_display(streamid_t stream_id)
1051 {
1052 	struct fwd_stream *fs;
1053 	static const char *fwd_top_stats_border = "-------";
1054 
1055 	fs = fwd_streams[stream_id];
1056 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1057 	    (fs->fwd_dropped == 0))
1058 		return;
1059 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1060 	       "TX Port=%2d/Queue=%2d %s\n",
1061 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1062 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1063 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1064 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1065 
1066 	/* if checksum mode */
1067 	if (cur_fwd_eng == &csum_fwd_engine) {
1068 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1069 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1070 	}
1071 
1072 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1073 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1074 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1075 #endif
1076 }
1077 
1078 static void
1079 flush_fwd_rx_queues(void)
1080 {
1081 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1082 	portid_t  rxp;
1083 	portid_t port_id;
1084 	queueid_t rxq;
1085 	uint16_t  nb_rx;
1086 	uint16_t  i;
1087 	uint8_t   j;
1088 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1089 	uint64_t timer_period;
1090 
1091 	/* convert to number of cycles */
1092 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1093 
1094 	for (j = 0; j < 2; j++) {
1095 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1096 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1097 				port_id = fwd_ports_ids[rxp];
1098 				/**
1099 				* testpmd can stuck in the below do while loop
1100 				* if rte_eth_rx_burst() always returns nonzero
1101 				* packets. So timer is added to exit this loop
1102 				* after 1sec timer expiry.
1103 				*/
1104 				prev_tsc = rte_rdtsc();
1105 				do {
1106 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1107 						pkts_burst, MAX_PKT_BURST);
1108 					for (i = 0; i < nb_rx; i++)
1109 						rte_pktmbuf_free(pkts_burst[i]);
1110 
1111 					cur_tsc = rte_rdtsc();
1112 					diff_tsc = cur_tsc - prev_tsc;
1113 					timer_tsc += diff_tsc;
1114 				} while ((nb_rx > 0) &&
1115 					(timer_tsc < timer_period));
1116 				timer_tsc = 0;
1117 			}
1118 		}
1119 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1120 	}
1121 }
1122 
1123 static void
1124 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1125 {
1126 	struct fwd_stream **fsm;
1127 	streamid_t nb_fs;
1128 	streamid_t sm_id;
1129 #ifdef RTE_LIBRTE_BITRATE
1130 	uint64_t tics_per_1sec;
1131 	uint64_t tics_datum;
1132 	uint64_t tics_current;
1133 	uint16_t idx_port;
1134 
1135 	tics_datum = rte_rdtsc();
1136 	tics_per_1sec = rte_get_timer_hz();
1137 #endif
1138 	fsm = &fwd_streams[fc->stream_idx];
1139 	nb_fs = fc->stream_nb;
1140 	do {
1141 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1142 			(*pkt_fwd)(fsm[sm_id]);
1143 #ifdef RTE_LIBRTE_BITRATE
1144 		if (bitrate_enabled != 0 &&
1145 				bitrate_lcore_id == rte_lcore_id()) {
1146 			tics_current = rte_rdtsc();
1147 			if (tics_current - tics_datum >= tics_per_1sec) {
1148 				/* Periodic bitrate calculation */
1149 				RTE_ETH_FOREACH_DEV(idx_port)
1150 					rte_stats_bitrate_calc(bitrate_data,
1151 						idx_port);
1152 				tics_datum = tics_current;
1153 			}
1154 		}
1155 #endif
1156 #ifdef RTE_LIBRTE_LATENCY_STATS
1157 		if (latencystats_enabled != 0 &&
1158 				latencystats_lcore_id == rte_lcore_id())
1159 			rte_latencystats_update();
1160 #endif
1161 
1162 	} while (! fc->stopped);
1163 }
1164 
1165 static int
1166 start_pkt_forward_on_core(void *fwd_arg)
1167 {
1168 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1169 			     cur_fwd_config.fwd_eng->packet_fwd);
1170 	return 0;
1171 }
1172 
1173 /*
1174  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1175  * Used to start communication flows in network loopback test configurations.
1176  */
1177 static int
1178 run_one_txonly_burst_on_core(void *fwd_arg)
1179 {
1180 	struct fwd_lcore *fwd_lc;
1181 	struct fwd_lcore tmp_lcore;
1182 
1183 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1184 	tmp_lcore = *fwd_lc;
1185 	tmp_lcore.stopped = 1;
1186 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1187 	return 0;
1188 }
1189 
1190 /*
1191  * Launch packet forwarding:
1192  *     - Setup per-port forwarding context.
1193  *     - launch logical cores with their forwarding configuration.
1194  */
1195 static void
1196 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1197 {
1198 	port_fwd_begin_t port_fwd_begin;
1199 	unsigned int i;
1200 	unsigned int lc_id;
1201 	int diag;
1202 
1203 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1204 	if (port_fwd_begin != NULL) {
1205 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1206 			(*port_fwd_begin)(fwd_ports_ids[i]);
1207 	}
1208 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1209 		lc_id = fwd_lcores_cpuids[i];
1210 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1211 			fwd_lcores[i]->stopped = 0;
1212 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1213 						     fwd_lcores[i], lc_id);
1214 			if (diag != 0)
1215 				printf("launch lcore %u failed - diag=%d\n",
1216 				       lc_id, diag);
1217 		}
1218 	}
1219 }
1220 
1221 /*
1222  * Launch packet forwarding configuration.
1223  */
1224 void
1225 start_packet_forwarding(int with_tx_first)
1226 {
1227 	port_fwd_begin_t port_fwd_begin;
1228 	port_fwd_end_t  port_fwd_end;
1229 	struct rte_port *port;
1230 	unsigned int i;
1231 	portid_t   pt_id;
1232 	streamid_t sm_id;
1233 
1234 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1235 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1236 
1237 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1238 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1239 
1240 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1241 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1242 		(!nb_rxq || !nb_txq))
1243 		rte_exit(EXIT_FAILURE,
1244 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1245 			cur_fwd_eng->fwd_mode_name);
1246 
1247 	if (all_ports_started() == 0) {
1248 		printf("Not all ports were started\n");
1249 		return;
1250 	}
1251 	if (test_done == 0) {
1252 		printf("Packet forwarding already started\n");
1253 		return;
1254 	}
1255 
1256 	if (init_fwd_streams() < 0) {
1257 		printf("Fail from init_fwd_streams()\n");
1258 		return;
1259 	}
1260 
1261 	if(dcb_test) {
1262 		for (i = 0; i < nb_fwd_ports; i++) {
1263 			pt_id = fwd_ports_ids[i];
1264 			port = &ports[pt_id];
1265 			if (!port->dcb_flag) {
1266 				printf("In DCB mode, all forwarding ports must "
1267                                        "be configured in this mode.\n");
1268 				return;
1269 			}
1270 		}
1271 		if (nb_fwd_lcores == 1) {
1272 			printf("In DCB mode,the nb forwarding cores "
1273                                "should be larger than 1.\n");
1274 			return;
1275 		}
1276 	}
1277 	test_done = 0;
1278 
1279 	if(!no_flush_rx)
1280 		flush_fwd_rx_queues();
1281 
1282 	fwd_config_setup();
1283 	pkt_fwd_config_display(&cur_fwd_config);
1284 	rxtx_config_display();
1285 
1286 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1287 		pt_id = fwd_ports_ids[i];
1288 		port = &ports[pt_id];
1289 		rte_eth_stats_get(pt_id, &port->stats);
1290 		port->tx_dropped = 0;
1291 
1292 		map_port_queue_stats_mapping_registers(pt_id, port);
1293 	}
1294 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1295 		fwd_streams[sm_id]->rx_packets = 0;
1296 		fwd_streams[sm_id]->tx_packets = 0;
1297 		fwd_streams[sm_id]->fwd_dropped = 0;
1298 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1299 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1300 
1301 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1302 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1303 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1304 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1305 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1306 #endif
1307 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1308 		fwd_streams[sm_id]->core_cycles = 0;
1309 #endif
1310 	}
1311 	if (with_tx_first) {
1312 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1313 		if (port_fwd_begin != NULL) {
1314 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1315 				(*port_fwd_begin)(fwd_ports_ids[i]);
1316 		}
1317 		while (with_tx_first--) {
1318 			launch_packet_forwarding(
1319 					run_one_txonly_burst_on_core);
1320 			rte_eal_mp_wait_lcore();
1321 		}
1322 		port_fwd_end = tx_only_engine.port_fwd_end;
1323 		if (port_fwd_end != NULL) {
1324 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1325 				(*port_fwd_end)(fwd_ports_ids[i]);
1326 		}
1327 	}
1328 	launch_packet_forwarding(start_pkt_forward_on_core);
1329 }
1330 
1331 void
1332 stop_packet_forwarding(void)
1333 {
1334 	struct rte_eth_stats stats;
1335 	struct rte_port *port;
1336 	port_fwd_end_t  port_fwd_end;
1337 	int i;
1338 	portid_t   pt_id;
1339 	streamid_t sm_id;
1340 	lcoreid_t  lc_id;
1341 	uint64_t total_recv;
1342 	uint64_t total_xmit;
1343 	uint64_t total_rx_dropped;
1344 	uint64_t total_tx_dropped;
1345 	uint64_t total_rx_nombuf;
1346 	uint64_t tx_dropped;
1347 	uint64_t rx_bad_ip_csum;
1348 	uint64_t rx_bad_l4_csum;
1349 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1350 	uint64_t fwd_cycles;
1351 #endif
1352 
1353 	static const char *acc_stats_border = "+++++++++++++++";
1354 
1355 	if (test_done) {
1356 		printf("Packet forwarding not started\n");
1357 		return;
1358 	}
1359 	printf("Telling cores to stop...");
1360 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1361 		fwd_lcores[lc_id]->stopped = 1;
1362 	printf("\nWaiting for lcores to finish...\n");
1363 	rte_eal_mp_wait_lcore();
1364 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1365 	if (port_fwd_end != NULL) {
1366 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1367 			pt_id = fwd_ports_ids[i];
1368 			(*port_fwd_end)(pt_id);
1369 		}
1370 	}
1371 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1372 	fwd_cycles = 0;
1373 #endif
1374 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1375 		if (cur_fwd_config.nb_fwd_streams >
1376 		    cur_fwd_config.nb_fwd_ports) {
1377 			fwd_stream_stats_display(sm_id);
1378 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1379 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1380 		} else {
1381 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1382 				fwd_streams[sm_id];
1383 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1384 				fwd_streams[sm_id];
1385 		}
1386 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1387 		tx_dropped = (uint64_t) (tx_dropped +
1388 					 fwd_streams[sm_id]->fwd_dropped);
1389 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1390 
1391 		rx_bad_ip_csum =
1392 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1393 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1394 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1395 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1396 							rx_bad_ip_csum;
1397 
1398 		rx_bad_l4_csum =
1399 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1400 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1401 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1402 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1403 							rx_bad_l4_csum;
1404 
1405 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1406 		fwd_cycles = (uint64_t) (fwd_cycles +
1407 					 fwd_streams[sm_id]->core_cycles);
1408 #endif
1409 	}
1410 	total_recv = 0;
1411 	total_xmit = 0;
1412 	total_rx_dropped = 0;
1413 	total_tx_dropped = 0;
1414 	total_rx_nombuf  = 0;
1415 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1416 		pt_id = fwd_ports_ids[i];
1417 
1418 		port = &ports[pt_id];
1419 		rte_eth_stats_get(pt_id, &stats);
1420 		stats.ipackets -= port->stats.ipackets;
1421 		port->stats.ipackets = 0;
1422 		stats.opackets -= port->stats.opackets;
1423 		port->stats.opackets = 0;
1424 		stats.ibytes   -= port->stats.ibytes;
1425 		port->stats.ibytes = 0;
1426 		stats.obytes   -= port->stats.obytes;
1427 		port->stats.obytes = 0;
1428 		stats.imissed  -= port->stats.imissed;
1429 		port->stats.imissed = 0;
1430 		stats.oerrors  -= port->stats.oerrors;
1431 		port->stats.oerrors = 0;
1432 		stats.rx_nombuf -= port->stats.rx_nombuf;
1433 		port->stats.rx_nombuf = 0;
1434 
1435 		total_recv += stats.ipackets;
1436 		total_xmit += stats.opackets;
1437 		total_rx_dropped += stats.imissed;
1438 		total_tx_dropped += port->tx_dropped;
1439 		total_rx_nombuf  += stats.rx_nombuf;
1440 
1441 		fwd_port_stats_display(pt_id, &stats);
1442 	}
1443 
1444 	printf("\n  %s Accumulated forward statistics for all ports"
1445 	       "%s\n",
1446 	       acc_stats_border, acc_stats_border);
1447 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1448 	       "%-"PRIu64"\n"
1449 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1450 	       "%-"PRIu64"\n",
1451 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1452 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1453 	if (total_rx_nombuf > 0)
1454 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1455 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1456 	       "%s\n",
1457 	       acc_stats_border, acc_stats_border);
1458 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1459 	if (total_recv > 0)
1460 		printf("\n  CPU cycles/packet=%u (total cycles="
1461 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1462 		       (unsigned int)(fwd_cycles / total_recv),
1463 		       fwd_cycles, total_recv);
1464 #endif
1465 	printf("\nDone.\n");
1466 	test_done = 1;
1467 }
1468 
1469 void
1470 dev_set_link_up(portid_t pid)
1471 {
1472 	if (rte_eth_dev_set_link_up(pid) < 0)
1473 		printf("\nSet link up fail.\n");
1474 }
1475 
1476 void
1477 dev_set_link_down(portid_t pid)
1478 {
1479 	if (rte_eth_dev_set_link_down(pid) < 0)
1480 		printf("\nSet link down fail.\n");
1481 }
1482 
1483 static int
1484 all_ports_started(void)
1485 {
1486 	portid_t pi;
1487 	struct rte_port *port;
1488 
1489 	RTE_ETH_FOREACH_DEV(pi) {
1490 		port = &ports[pi];
1491 		/* Check if there is a port which is not started */
1492 		if ((port->port_status != RTE_PORT_STARTED) &&
1493 			(port->slave_flag == 0))
1494 			return 0;
1495 	}
1496 
1497 	/* No port is not started */
1498 	return 1;
1499 }
1500 
1501 int
1502 port_is_stopped(portid_t port_id)
1503 {
1504 	struct rte_port *port = &ports[port_id];
1505 
1506 	if ((port->port_status != RTE_PORT_STOPPED) &&
1507 	    (port->slave_flag == 0))
1508 		return 0;
1509 	return 1;
1510 }
1511 
1512 int
1513 all_ports_stopped(void)
1514 {
1515 	portid_t pi;
1516 
1517 	RTE_ETH_FOREACH_DEV(pi) {
1518 		if (!port_is_stopped(pi))
1519 			return 0;
1520 	}
1521 
1522 	return 1;
1523 }
1524 
1525 int
1526 port_is_started(portid_t port_id)
1527 {
1528 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1529 		return 0;
1530 
1531 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1532 		return 0;
1533 
1534 	return 1;
1535 }
1536 
1537 static int
1538 port_is_closed(portid_t port_id)
1539 {
1540 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1541 		return 0;
1542 
1543 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1544 		return 0;
1545 
1546 	return 1;
1547 }
1548 
1549 int
1550 start_port(portid_t pid)
1551 {
1552 	int diag, need_check_link_status = -1;
1553 	portid_t pi;
1554 	queueid_t qi;
1555 	struct rte_port *port;
1556 	struct ether_addr mac_addr;
1557 	enum rte_eth_event_type event_type;
1558 
1559 	if (port_id_is_invalid(pid, ENABLED_WARN))
1560 		return 0;
1561 
1562 	if(dcb_config)
1563 		dcb_test = 1;
1564 	RTE_ETH_FOREACH_DEV(pi) {
1565 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1566 			continue;
1567 
1568 		need_check_link_status = 0;
1569 		port = &ports[pi];
1570 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1571 						 RTE_PORT_HANDLING) == 0) {
1572 			printf("Port %d is now not stopped\n", pi);
1573 			continue;
1574 		}
1575 
1576 		if (port->need_reconfig > 0) {
1577 			port->need_reconfig = 0;
1578 
1579 			if (flow_isolate_all) {
1580 				int ret = port_flow_isolate(pi, 1);
1581 				if (ret) {
1582 					printf("Failed to apply isolated"
1583 					       " mode on port %d\n", pi);
1584 					return -1;
1585 				}
1586 			}
1587 
1588 			printf("Configuring Port %d (socket %u)\n", pi,
1589 					port->socket_id);
1590 			/* configure port */
1591 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1592 						&(port->dev_conf));
1593 			if (diag != 0) {
1594 				if (rte_atomic16_cmpset(&(port->port_status),
1595 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1596 					printf("Port %d can not be set back "
1597 							"to stopped\n", pi);
1598 				printf("Fail to configure port %d\n", pi);
1599 				/* try to reconfigure port next time */
1600 				port->need_reconfig = 1;
1601 				return -1;
1602 			}
1603 		}
1604 		if (port->need_reconfig_queues > 0) {
1605 			port->need_reconfig_queues = 0;
1606 			/* setup tx queues */
1607 			for (qi = 0; qi < nb_txq; qi++) {
1608 				port->tx_conf[qi].txq_flags =
1609 					ETH_TXQ_FLAGS_IGNORE;
1610 				/* Apply Tx offloads configuration */
1611 				port->tx_conf[qi].offloads =
1612 					port->dev_conf.txmode.offloads;
1613 				if ((numa_support) &&
1614 					(txring_numa[pi] != NUMA_NO_CONFIG))
1615 					diag = rte_eth_tx_queue_setup(pi, qi,
1616 						port->nb_tx_desc[qi],
1617 						txring_numa[pi],
1618 						&(port->tx_conf[qi]));
1619 				else
1620 					diag = rte_eth_tx_queue_setup(pi, qi,
1621 						port->nb_tx_desc[qi],
1622 						port->socket_id,
1623 						&(port->tx_conf[qi]));
1624 
1625 				if (diag == 0)
1626 					continue;
1627 
1628 				/* Fail to setup tx queue, return */
1629 				if (rte_atomic16_cmpset(&(port->port_status),
1630 							RTE_PORT_HANDLING,
1631 							RTE_PORT_STOPPED) == 0)
1632 					printf("Port %d can not be set back "
1633 							"to stopped\n", pi);
1634 				printf("Fail to configure port %d tx queues\n",
1635 				       pi);
1636 				/* try to reconfigure queues next time */
1637 				port->need_reconfig_queues = 1;
1638 				return -1;
1639 			}
1640 			for (qi = 0; qi < nb_rxq; qi++) {
1641 				/* Apply Rx offloads configuration */
1642 				port->rx_conf[qi].offloads =
1643 					port->dev_conf.rxmode.offloads;
1644 				/* setup rx queues */
1645 				if ((numa_support) &&
1646 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1647 					struct rte_mempool * mp =
1648 						mbuf_pool_find(rxring_numa[pi]);
1649 					if (mp == NULL) {
1650 						printf("Failed to setup RX queue:"
1651 							"No mempool allocation"
1652 							" on the socket %d\n",
1653 							rxring_numa[pi]);
1654 						return -1;
1655 					}
1656 
1657 					diag = rte_eth_rx_queue_setup(pi, qi,
1658 					     port->nb_rx_desc[pi],
1659 					     rxring_numa[pi],
1660 					     &(port->rx_conf[qi]),
1661 					     mp);
1662 				} else {
1663 					struct rte_mempool *mp =
1664 						mbuf_pool_find(port->socket_id);
1665 					if (mp == NULL) {
1666 						printf("Failed to setup RX queue:"
1667 							"No mempool allocation"
1668 							" on the socket %d\n",
1669 							port->socket_id);
1670 						return -1;
1671 					}
1672 					diag = rte_eth_rx_queue_setup(pi, qi,
1673 					     port->nb_rx_desc[pi],
1674 					     port->socket_id,
1675 					     &(port->rx_conf[qi]),
1676 					     mp);
1677 				}
1678 				if (diag == 0)
1679 					continue;
1680 
1681 				/* Fail to setup rx queue, return */
1682 				if (rte_atomic16_cmpset(&(port->port_status),
1683 							RTE_PORT_HANDLING,
1684 							RTE_PORT_STOPPED) == 0)
1685 					printf("Port %d can not be set back "
1686 							"to stopped\n", pi);
1687 				printf("Fail to configure port %d rx queues\n",
1688 				       pi);
1689 				/* try to reconfigure queues next time */
1690 				port->need_reconfig_queues = 1;
1691 				return -1;
1692 			}
1693 		}
1694 
1695 		/* start port */
1696 		if (rte_eth_dev_start(pi) < 0) {
1697 			printf("Fail to start port %d\n", pi);
1698 
1699 			/* Fail to setup rx queue, return */
1700 			if (rte_atomic16_cmpset(&(port->port_status),
1701 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1702 				printf("Port %d can not be set back to "
1703 							"stopped\n", pi);
1704 			continue;
1705 		}
1706 
1707 		if (rte_atomic16_cmpset(&(port->port_status),
1708 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1709 			printf("Port %d can not be set into started\n", pi);
1710 
1711 		rte_eth_macaddr_get(pi, &mac_addr);
1712 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1713 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1714 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1715 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1716 
1717 		/* at least one port started, need checking link status */
1718 		need_check_link_status = 1;
1719 	}
1720 
1721 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1722 	     event_type < RTE_ETH_EVENT_MAX;
1723 	     event_type++) {
1724 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1725 						event_type,
1726 						eth_event_callback,
1727 						NULL);
1728 		if (diag) {
1729 			printf("Failed to setup even callback for event %d\n",
1730 				event_type);
1731 			return -1;
1732 		}
1733 	}
1734 
1735 	if (need_check_link_status == 1 && !no_link_check)
1736 		check_all_ports_link_status(RTE_PORT_ALL);
1737 	else if (need_check_link_status == 0)
1738 		printf("Please stop the ports first\n");
1739 
1740 	printf("Done\n");
1741 	return 0;
1742 }
1743 
1744 void
1745 stop_port(portid_t pid)
1746 {
1747 	portid_t pi;
1748 	struct rte_port *port;
1749 	int need_check_link_status = 0;
1750 
1751 	if (dcb_test) {
1752 		dcb_test = 0;
1753 		dcb_config = 0;
1754 	}
1755 
1756 	if (port_id_is_invalid(pid, ENABLED_WARN))
1757 		return;
1758 
1759 	printf("Stopping ports...\n");
1760 
1761 	RTE_ETH_FOREACH_DEV(pi) {
1762 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1763 			continue;
1764 
1765 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1766 			printf("Please remove port %d from forwarding configuration.\n", pi);
1767 			continue;
1768 		}
1769 
1770 		if (port_is_bonding_slave(pi)) {
1771 			printf("Please remove port %d from bonded device.\n", pi);
1772 			continue;
1773 		}
1774 
1775 		port = &ports[pi];
1776 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1777 						RTE_PORT_HANDLING) == 0)
1778 			continue;
1779 
1780 		rte_eth_dev_stop(pi);
1781 
1782 		if (rte_atomic16_cmpset(&(port->port_status),
1783 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1784 			printf("Port %d can not be set into stopped\n", pi);
1785 		need_check_link_status = 1;
1786 	}
1787 	if (need_check_link_status && !no_link_check)
1788 		check_all_ports_link_status(RTE_PORT_ALL);
1789 
1790 	printf("Done\n");
1791 }
1792 
1793 void
1794 close_port(portid_t pid)
1795 {
1796 	portid_t pi;
1797 	struct rte_port *port;
1798 
1799 	if (port_id_is_invalid(pid, ENABLED_WARN))
1800 		return;
1801 
1802 	printf("Closing ports...\n");
1803 
1804 	RTE_ETH_FOREACH_DEV(pi) {
1805 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1806 			continue;
1807 
1808 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1809 			printf("Please remove port %d from forwarding configuration.\n", pi);
1810 			continue;
1811 		}
1812 
1813 		if (port_is_bonding_slave(pi)) {
1814 			printf("Please remove port %d from bonded device.\n", pi);
1815 			continue;
1816 		}
1817 
1818 		port = &ports[pi];
1819 		if (rte_atomic16_cmpset(&(port->port_status),
1820 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1821 			printf("Port %d is already closed\n", pi);
1822 			continue;
1823 		}
1824 
1825 		if (rte_atomic16_cmpset(&(port->port_status),
1826 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1827 			printf("Port %d is now not stopped\n", pi);
1828 			continue;
1829 		}
1830 
1831 		if (port->flow_list)
1832 			port_flow_flush(pi);
1833 		rte_eth_dev_close(pi);
1834 
1835 		if (rte_atomic16_cmpset(&(port->port_status),
1836 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1837 			printf("Port %d cannot be set to closed\n", pi);
1838 	}
1839 
1840 	printf("Done\n");
1841 }
1842 
1843 void
1844 reset_port(portid_t pid)
1845 {
1846 	int diag;
1847 	portid_t pi;
1848 	struct rte_port *port;
1849 
1850 	if (port_id_is_invalid(pid, ENABLED_WARN))
1851 		return;
1852 
1853 	printf("Resetting ports...\n");
1854 
1855 	RTE_ETH_FOREACH_DEV(pi) {
1856 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1857 			continue;
1858 
1859 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1860 			printf("Please remove port %d from forwarding "
1861 			       "configuration.\n", pi);
1862 			continue;
1863 		}
1864 
1865 		if (port_is_bonding_slave(pi)) {
1866 			printf("Please remove port %d from bonded device.\n",
1867 			       pi);
1868 			continue;
1869 		}
1870 
1871 		diag = rte_eth_dev_reset(pi);
1872 		if (diag == 0) {
1873 			port = &ports[pi];
1874 			port->need_reconfig = 1;
1875 			port->need_reconfig_queues = 1;
1876 		} else {
1877 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1878 		}
1879 	}
1880 
1881 	printf("Done\n");
1882 }
1883 
1884 static int
1885 eth_dev_event_callback_register(void)
1886 {
1887 	int ret;
1888 
1889 	/* register the device event callback */
1890 	ret = rte_dev_event_callback_register(NULL,
1891 		eth_dev_event_callback, NULL);
1892 	if (ret) {
1893 		printf("Failed to register device event callback\n");
1894 		return -1;
1895 	}
1896 
1897 	return 0;
1898 }
1899 
1900 
1901 static int
1902 eth_dev_event_callback_unregister(void)
1903 {
1904 	int ret;
1905 
1906 	/* unregister the device event callback */
1907 	ret = rte_dev_event_callback_unregister(NULL,
1908 		eth_dev_event_callback, NULL);
1909 	if (ret < 0) {
1910 		printf("Failed to unregister device event callback\n");
1911 		return -1;
1912 	}
1913 
1914 	return 0;
1915 }
1916 
1917 void
1918 attach_port(char *identifier)
1919 {
1920 	portid_t pi = 0;
1921 	unsigned int socket_id;
1922 
1923 	printf("Attaching a new port...\n");
1924 
1925 	if (identifier == NULL) {
1926 		printf("Invalid parameters are specified\n");
1927 		return;
1928 	}
1929 
1930 	if (rte_eth_dev_attach(identifier, &pi))
1931 		return;
1932 
1933 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1934 	/* if socket_id is invalid, set to 0 */
1935 	if (check_socket_id(socket_id) < 0)
1936 		socket_id = 0;
1937 	reconfig(pi, socket_id);
1938 	rte_eth_promiscuous_enable(pi);
1939 
1940 	nb_ports = rte_eth_dev_count_avail();
1941 
1942 	ports[pi].port_status = RTE_PORT_STOPPED;
1943 
1944 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1945 	printf("Done\n");
1946 }
1947 
1948 void
1949 detach_port(portid_t port_id)
1950 {
1951 	char name[RTE_ETH_NAME_MAX_LEN];
1952 
1953 	printf("Detaching a port...\n");
1954 
1955 	if (!port_is_closed(port_id)) {
1956 		printf("Please close port first\n");
1957 		return;
1958 	}
1959 
1960 	if (ports[port_id].flow_list)
1961 		port_flow_flush(port_id);
1962 
1963 	if (rte_eth_dev_detach(port_id, name)) {
1964 		TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1965 		return;
1966 	}
1967 
1968 	nb_ports = rte_eth_dev_count_avail();
1969 
1970 	printf("Port '%s' is detached. Now total ports is %d\n",
1971 			name, nb_ports);
1972 	printf("Done\n");
1973 	return;
1974 }
1975 
1976 void
1977 pmd_test_exit(void)
1978 {
1979 	portid_t pt_id;
1980 	int ret;
1981 
1982 	if (test_done == 0)
1983 		stop_packet_forwarding();
1984 
1985 	if (ports != NULL) {
1986 		no_link_check = 1;
1987 		RTE_ETH_FOREACH_DEV(pt_id) {
1988 			printf("\nShutting down port %d...\n", pt_id);
1989 			fflush(stdout);
1990 			stop_port(pt_id);
1991 			close_port(pt_id);
1992 		}
1993 	}
1994 
1995 	if (hot_plug) {
1996 		ret = rte_dev_event_monitor_stop();
1997 		if (ret)
1998 			RTE_LOG(ERR, EAL,
1999 				"fail to stop device event monitor.");
2000 
2001 		ret = eth_dev_event_callback_unregister();
2002 		if (ret)
2003 			RTE_LOG(ERR, EAL,
2004 				"fail to unregister all event callbacks.");
2005 	}
2006 
2007 	printf("\nBye...\n");
2008 }
2009 
2010 typedef void (*cmd_func_t)(void);
2011 struct pmd_test_command {
2012 	const char *cmd_name;
2013 	cmd_func_t cmd_func;
2014 };
2015 
2016 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2017 
2018 /* Check the link status of all ports in up to 9s, and print them finally */
2019 static void
2020 check_all_ports_link_status(uint32_t port_mask)
2021 {
2022 #define CHECK_INTERVAL 100 /* 100ms */
2023 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2024 	portid_t portid;
2025 	uint8_t count, all_ports_up, print_flag = 0;
2026 	struct rte_eth_link link;
2027 
2028 	printf("Checking link statuses...\n");
2029 	fflush(stdout);
2030 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2031 		all_ports_up = 1;
2032 		RTE_ETH_FOREACH_DEV(portid) {
2033 			if ((port_mask & (1 << portid)) == 0)
2034 				continue;
2035 			memset(&link, 0, sizeof(link));
2036 			rte_eth_link_get_nowait(portid, &link);
2037 			/* print link status if flag set */
2038 			if (print_flag == 1) {
2039 				if (link.link_status)
2040 					printf(
2041 					"Port%d Link Up. speed %u Mbps- %s\n",
2042 					portid, link.link_speed,
2043 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2044 					("full-duplex") : ("half-duplex\n"));
2045 				else
2046 					printf("Port %d Link Down\n", portid);
2047 				continue;
2048 			}
2049 			/* clear all_ports_up flag if any link down */
2050 			if (link.link_status == ETH_LINK_DOWN) {
2051 				all_ports_up = 0;
2052 				break;
2053 			}
2054 		}
2055 		/* after finally printing all link status, get out */
2056 		if (print_flag == 1)
2057 			break;
2058 
2059 		if (all_ports_up == 0) {
2060 			fflush(stdout);
2061 			rte_delay_ms(CHECK_INTERVAL);
2062 		}
2063 
2064 		/* set the print_flag if all ports up or timeout */
2065 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2066 			print_flag = 1;
2067 		}
2068 
2069 		if (lsc_interrupt)
2070 			break;
2071 	}
2072 }
2073 
2074 static void
2075 rmv_event_callback(void *arg)
2076 {
2077 	struct rte_eth_dev *dev;
2078 	portid_t port_id = (intptr_t)arg;
2079 
2080 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2081 	dev = &rte_eth_devices[port_id];
2082 
2083 	stop_port(port_id);
2084 	close_port(port_id);
2085 	printf("removing device %s\n", dev->device->name);
2086 	if (rte_eal_dev_detach(dev->device))
2087 		TESTPMD_LOG(ERR, "Failed to detach device %s\n",
2088 			dev->device->name);
2089 }
2090 
2091 /* This function is used by the interrupt thread */
2092 static int
2093 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2094 		  void *ret_param)
2095 {
2096 	static const char * const event_desc[] = {
2097 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2098 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2099 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2100 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2101 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2102 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
2103 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2104 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2105 		[RTE_ETH_EVENT_NEW] = "device probed",
2106 		[RTE_ETH_EVENT_DESTROY] = "device released",
2107 		[RTE_ETH_EVENT_MAX] = NULL,
2108 	};
2109 
2110 	RTE_SET_USED(param);
2111 	RTE_SET_USED(ret_param);
2112 
2113 	if (type >= RTE_ETH_EVENT_MAX) {
2114 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2115 			port_id, __func__, type);
2116 		fflush(stderr);
2117 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2118 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2119 			event_desc[type]);
2120 		fflush(stdout);
2121 	}
2122 
2123 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2124 		return 0;
2125 
2126 	switch (type) {
2127 	case RTE_ETH_EVENT_INTR_RMV:
2128 		if (rte_eal_alarm_set(100000,
2129 				rmv_event_callback, (void *)(intptr_t)port_id))
2130 			fprintf(stderr, "Could not set up deferred device removal\n");
2131 		break;
2132 	default:
2133 		break;
2134 	}
2135 	return 0;
2136 }
2137 
2138 /* This function is used by the interrupt thread */
2139 static void
2140 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2141 			     __rte_unused void *arg)
2142 {
2143 	if (type >= RTE_DEV_EVENT_MAX) {
2144 		fprintf(stderr, "%s called upon invalid event %d\n",
2145 			__func__, type);
2146 		fflush(stderr);
2147 	}
2148 
2149 	switch (type) {
2150 	case RTE_DEV_EVENT_REMOVE:
2151 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2152 			device_name);
2153 		/* TODO: After finish failure handle, begin to stop
2154 		 * packet forward, stop port, close port, detach port.
2155 		 */
2156 		break;
2157 	case RTE_DEV_EVENT_ADD:
2158 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2159 			device_name);
2160 		/* TODO: After finish kernel driver binding,
2161 		 * begin to attach port.
2162 		 */
2163 		break;
2164 	default:
2165 		break;
2166 	}
2167 }
2168 
2169 static int
2170 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2171 {
2172 	uint16_t i;
2173 	int diag;
2174 	uint8_t mapping_found = 0;
2175 
2176 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2177 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2178 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2179 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2180 					tx_queue_stats_mappings[i].queue_id,
2181 					tx_queue_stats_mappings[i].stats_counter_id);
2182 			if (diag != 0)
2183 				return diag;
2184 			mapping_found = 1;
2185 		}
2186 	}
2187 	if (mapping_found)
2188 		port->tx_queue_stats_mapping_enabled = 1;
2189 	return 0;
2190 }
2191 
2192 static int
2193 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2194 {
2195 	uint16_t i;
2196 	int diag;
2197 	uint8_t mapping_found = 0;
2198 
2199 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2200 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2201 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2202 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2203 					rx_queue_stats_mappings[i].queue_id,
2204 					rx_queue_stats_mappings[i].stats_counter_id);
2205 			if (diag != 0)
2206 				return diag;
2207 			mapping_found = 1;
2208 		}
2209 	}
2210 	if (mapping_found)
2211 		port->rx_queue_stats_mapping_enabled = 1;
2212 	return 0;
2213 }
2214 
2215 static void
2216 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2217 {
2218 	int diag = 0;
2219 
2220 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2221 	if (diag != 0) {
2222 		if (diag == -ENOTSUP) {
2223 			port->tx_queue_stats_mapping_enabled = 0;
2224 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2225 		}
2226 		else
2227 			rte_exit(EXIT_FAILURE,
2228 					"set_tx_queue_stats_mapping_registers "
2229 					"failed for port id=%d diag=%d\n",
2230 					pi, diag);
2231 	}
2232 
2233 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2234 	if (diag != 0) {
2235 		if (diag == -ENOTSUP) {
2236 			port->rx_queue_stats_mapping_enabled = 0;
2237 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2238 		}
2239 		else
2240 			rte_exit(EXIT_FAILURE,
2241 					"set_rx_queue_stats_mapping_registers "
2242 					"failed for port id=%d diag=%d\n",
2243 					pi, diag);
2244 	}
2245 }
2246 
2247 static void
2248 rxtx_port_config(struct rte_port *port)
2249 {
2250 	uint16_t qid;
2251 
2252 	for (qid = 0; qid < nb_rxq; qid++) {
2253 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2254 
2255 		/* Check if any Rx parameters have been passed */
2256 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2257 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2258 
2259 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2260 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2261 
2262 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2263 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2264 
2265 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2266 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2267 
2268 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2269 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2270 
2271 		port->nb_rx_desc[qid] = nb_rxd;
2272 	}
2273 
2274 	for (qid = 0; qid < nb_txq; qid++) {
2275 		port->tx_conf[qid] = port->dev_info.default_txconf;
2276 
2277 		/* Check if any Tx parameters have been passed */
2278 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2279 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2280 
2281 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2282 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2283 
2284 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2285 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2286 
2287 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2288 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2289 
2290 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2291 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2292 
2293 		port->nb_tx_desc[qid] = nb_txd;
2294 	}
2295 }
2296 
2297 void
2298 init_port_config(void)
2299 {
2300 	portid_t pid;
2301 	struct rte_port *port;
2302 	struct rte_eth_dev_info dev_info;
2303 
2304 	RTE_ETH_FOREACH_DEV(pid) {
2305 		port = &ports[pid];
2306 		port->dev_conf.fdir_conf = fdir_conf;
2307 		if (nb_rxq > 1) {
2308 			rte_eth_dev_info_get(pid, &dev_info);
2309 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2310 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2311 				rss_hf & dev_info.flow_type_rss_offloads;
2312 		} else {
2313 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2314 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2315 		}
2316 
2317 		if (port->dcb_flag == 0) {
2318 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2319 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2320 			else
2321 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2322 		}
2323 
2324 		rxtx_port_config(port);
2325 
2326 		rte_eth_macaddr_get(pid, &port->eth_addr);
2327 
2328 		map_port_queue_stats_mapping_registers(pid, port);
2329 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2330 		rte_pmd_ixgbe_bypass_init(pid);
2331 #endif
2332 
2333 		if (lsc_interrupt &&
2334 		    (rte_eth_devices[pid].data->dev_flags &
2335 		     RTE_ETH_DEV_INTR_LSC))
2336 			port->dev_conf.intr_conf.lsc = 1;
2337 		if (rmv_interrupt &&
2338 		    (rte_eth_devices[pid].data->dev_flags &
2339 		     RTE_ETH_DEV_INTR_RMV))
2340 			port->dev_conf.intr_conf.rmv = 1;
2341 
2342 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2343 		/* Detect softnic port */
2344 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2345 			port->softnic_enable = 1;
2346 			memset(&port->softport, 0, sizeof(struct softnic_port));
2347 
2348 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2349 				port->softport.tm_flag = 1;
2350 		}
2351 #endif
2352 	}
2353 }
2354 
2355 void set_port_slave_flag(portid_t slave_pid)
2356 {
2357 	struct rte_port *port;
2358 
2359 	port = &ports[slave_pid];
2360 	port->slave_flag = 1;
2361 }
2362 
2363 void clear_port_slave_flag(portid_t slave_pid)
2364 {
2365 	struct rte_port *port;
2366 
2367 	port = &ports[slave_pid];
2368 	port->slave_flag = 0;
2369 }
2370 
2371 uint8_t port_is_bonding_slave(portid_t slave_pid)
2372 {
2373 	struct rte_port *port;
2374 
2375 	port = &ports[slave_pid];
2376 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2377 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2378 		return 1;
2379 	return 0;
2380 }
2381 
2382 const uint16_t vlan_tags[] = {
2383 		0,  1,  2,  3,  4,  5,  6,  7,
2384 		8,  9, 10, 11,  12, 13, 14, 15,
2385 		16, 17, 18, 19, 20, 21, 22, 23,
2386 		24, 25, 26, 27, 28, 29, 30, 31
2387 };
2388 
2389 static  int
2390 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2391 		 enum dcb_mode_enable dcb_mode,
2392 		 enum rte_eth_nb_tcs num_tcs,
2393 		 uint8_t pfc_en)
2394 {
2395 	uint8_t i;
2396 
2397 	/*
2398 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2399 	 * given above, and the number of traffic classes available for use.
2400 	 */
2401 	if (dcb_mode == DCB_VT_ENABLED) {
2402 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2403 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2404 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2405 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2406 
2407 		/* VMDQ+DCB RX and TX configurations */
2408 		vmdq_rx_conf->enable_default_pool = 0;
2409 		vmdq_rx_conf->default_pool = 0;
2410 		vmdq_rx_conf->nb_queue_pools =
2411 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2412 		vmdq_tx_conf->nb_queue_pools =
2413 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2414 
2415 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2416 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2417 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2418 			vmdq_rx_conf->pool_map[i].pools =
2419 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2420 		}
2421 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2422 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2423 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2424 		}
2425 
2426 		/* set DCB mode of RX and TX of multiple queues */
2427 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2428 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2429 	} else {
2430 		struct rte_eth_dcb_rx_conf *rx_conf =
2431 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2432 		struct rte_eth_dcb_tx_conf *tx_conf =
2433 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2434 
2435 		rx_conf->nb_tcs = num_tcs;
2436 		tx_conf->nb_tcs = num_tcs;
2437 
2438 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2439 			rx_conf->dcb_tc[i] = i % num_tcs;
2440 			tx_conf->dcb_tc[i] = i % num_tcs;
2441 		}
2442 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2443 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2444 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2445 	}
2446 
2447 	if (pfc_en)
2448 		eth_conf->dcb_capability_en =
2449 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2450 	else
2451 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2452 
2453 	return 0;
2454 }
2455 
2456 int
2457 init_port_dcb_config(portid_t pid,
2458 		     enum dcb_mode_enable dcb_mode,
2459 		     enum rte_eth_nb_tcs num_tcs,
2460 		     uint8_t pfc_en)
2461 {
2462 	struct rte_eth_conf port_conf;
2463 	struct rte_port *rte_port;
2464 	int retval;
2465 	uint16_t i;
2466 
2467 	rte_port = &ports[pid];
2468 
2469 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2470 	/* Enter DCB configuration status */
2471 	dcb_config = 1;
2472 
2473 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2474 	port_conf.txmode = rte_port->dev_conf.txmode;
2475 
2476 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2477 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2478 	if (retval < 0)
2479 		return retval;
2480 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2481 
2482 	/**
2483 	 * Write the configuration into the device.
2484 	 * Set the numbers of RX & TX queues to 0, so
2485 	 * the RX & TX queues will not be setup.
2486 	 */
2487 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2488 
2489 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2490 
2491 	/* If dev_info.vmdq_pool_base is greater than 0,
2492 	 * the queue id of vmdq pools is started after pf queues.
2493 	 */
2494 	if (dcb_mode == DCB_VT_ENABLED &&
2495 	    rte_port->dev_info.vmdq_pool_base > 0) {
2496 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2497 			" for port %d.", pid);
2498 		return -1;
2499 	}
2500 
2501 	/* Assume the ports in testpmd have the same dcb capability
2502 	 * and has the same number of rxq and txq in dcb mode
2503 	 */
2504 	if (dcb_mode == DCB_VT_ENABLED) {
2505 		if (rte_port->dev_info.max_vfs > 0) {
2506 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2507 			nb_txq = rte_port->dev_info.nb_tx_queues;
2508 		} else {
2509 			nb_rxq = rte_port->dev_info.max_rx_queues;
2510 			nb_txq = rte_port->dev_info.max_tx_queues;
2511 		}
2512 	} else {
2513 		/*if vt is disabled, use all pf queues */
2514 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2515 			nb_rxq = rte_port->dev_info.max_rx_queues;
2516 			nb_txq = rte_port->dev_info.max_tx_queues;
2517 		} else {
2518 			nb_rxq = (queueid_t)num_tcs;
2519 			nb_txq = (queueid_t)num_tcs;
2520 
2521 		}
2522 	}
2523 	rx_free_thresh = 64;
2524 
2525 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2526 
2527 	rxtx_port_config(rte_port);
2528 	/* VLAN filter */
2529 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2530 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2531 		rx_vft_set(pid, vlan_tags[i], 1);
2532 
2533 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2534 	map_port_queue_stats_mapping_registers(pid, rte_port);
2535 
2536 	rte_port->dcb_flag = 1;
2537 
2538 	return 0;
2539 }
2540 
2541 static void
2542 init_port(void)
2543 {
2544 	/* Configuration of Ethernet ports. */
2545 	ports = rte_zmalloc("testpmd: ports",
2546 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2547 			    RTE_CACHE_LINE_SIZE);
2548 	if (ports == NULL) {
2549 		rte_exit(EXIT_FAILURE,
2550 				"rte_zmalloc(%d struct rte_port) failed\n",
2551 				RTE_MAX_ETHPORTS);
2552 	}
2553 }
2554 
2555 static void
2556 force_quit(void)
2557 {
2558 	pmd_test_exit();
2559 	prompt_exit();
2560 }
2561 
2562 static void
2563 print_stats(void)
2564 {
2565 	uint8_t i;
2566 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2567 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2568 
2569 	/* Clear screen and move to top left */
2570 	printf("%s%s", clr, top_left);
2571 
2572 	printf("\nPort statistics ====================================");
2573 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2574 		nic_stats_display(fwd_ports_ids[i]);
2575 }
2576 
2577 static void
2578 signal_handler(int signum)
2579 {
2580 	if (signum == SIGINT || signum == SIGTERM) {
2581 		printf("\nSignal %d received, preparing to exit...\n",
2582 				signum);
2583 #ifdef RTE_LIBRTE_PDUMP
2584 		/* uninitialize packet capture framework */
2585 		rte_pdump_uninit();
2586 #endif
2587 #ifdef RTE_LIBRTE_LATENCY_STATS
2588 		rte_latencystats_uninit();
2589 #endif
2590 		force_quit();
2591 		/* Set flag to indicate the force termination. */
2592 		f_quit = 1;
2593 		/* exit with the expected status */
2594 		signal(signum, SIG_DFL);
2595 		kill(getpid(), signum);
2596 	}
2597 }
2598 
2599 int
2600 main(int argc, char** argv)
2601 {
2602 	int diag;
2603 	portid_t port_id;
2604 	int ret;
2605 
2606 	signal(SIGINT, signal_handler);
2607 	signal(SIGTERM, signal_handler);
2608 
2609 	diag = rte_eal_init(argc, argv);
2610 	if (diag < 0)
2611 		rte_panic("Cannot init EAL\n");
2612 
2613 	testpmd_logtype = rte_log_register("testpmd");
2614 	if (testpmd_logtype < 0)
2615 		rte_panic("Cannot register log type");
2616 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2617 
2618 	/* Bitrate/latency stats disabled by default */
2619 #ifdef RTE_LIBRTE_BITRATE
2620 	bitrate_enabled = 0;
2621 #endif
2622 #ifdef RTE_LIBRTE_LATENCY_STATS
2623 	latencystats_enabled = 0;
2624 #endif
2625 
2626 	/* on FreeBSD, mlockall() is disabled by default */
2627 #ifdef RTE_EXEC_ENV_BSDAPP
2628 	do_mlockall = 0;
2629 #else
2630 	do_mlockall = 1;
2631 #endif
2632 
2633 	argc -= diag;
2634 	argv += diag;
2635 	if (argc > 1)
2636 		launch_args_parse(argc, argv);
2637 
2638 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2639 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2640 			strerror(errno));
2641 	}
2642 
2643 #ifdef RTE_LIBRTE_PDUMP
2644 	/* initialize packet capture framework */
2645 	rte_pdump_init(NULL);
2646 #endif
2647 
2648 	nb_ports = (portid_t) rte_eth_dev_count_avail();
2649 	if (nb_ports == 0)
2650 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2651 
2652 	/* allocate port structures, and init them */
2653 	init_port();
2654 
2655 	set_def_fwd_config();
2656 	if (nb_lcores == 0)
2657 		rte_panic("Empty set of forwarding logical cores - check the "
2658 			  "core mask supplied in the command parameters\n");
2659 
2660 	if (tx_first && interactive)
2661 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2662 				"interactive mode.\n");
2663 
2664 	if (tx_first && lsc_interrupt) {
2665 		printf("Warning: lsc_interrupt needs to be off when "
2666 				" using tx_first. Disabling.\n");
2667 		lsc_interrupt = 0;
2668 	}
2669 
2670 	if (!nb_rxq && !nb_txq)
2671 		printf("Warning: Either rx or tx queues should be non-zero\n");
2672 
2673 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2674 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2675 		       "but nb_txq=%d will prevent to fully test it.\n",
2676 		       nb_rxq, nb_txq);
2677 
2678 	init_config();
2679 
2680 	if (hot_plug) {
2681 		/* enable hot plug monitoring */
2682 		ret = rte_dev_event_monitor_start();
2683 		if (ret) {
2684 			rte_errno = EINVAL;
2685 			return -1;
2686 		}
2687 		eth_dev_event_callback_register();
2688 
2689 	}
2690 
2691 	if (start_port(RTE_PORT_ALL) != 0)
2692 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2693 
2694 	/* set all ports to promiscuous mode by default */
2695 	RTE_ETH_FOREACH_DEV(port_id)
2696 		rte_eth_promiscuous_enable(port_id);
2697 
2698 	/* Init metrics library */
2699 	rte_metrics_init(rte_socket_id());
2700 
2701 #ifdef RTE_LIBRTE_LATENCY_STATS
2702 	if (latencystats_enabled != 0) {
2703 		int ret = rte_latencystats_init(1, NULL);
2704 		if (ret)
2705 			printf("Warning: latencystats init()"
2706 				" returned error %d\n",	ret);
2707 		printf("Latencystats running on lcore %d\n",
2708 			latencystats_lcore_id);
2709 	}
2710 #endif
2711 
2712 	/* Setup bitrate stats */
2713 #ifdef RTE_LIBRTE_BITRATE
2714 	if (bitrate_enabled != 0) {
2715 		bitrate_data = rte_stats_bitrate_create();
2716 		if (bitrate_data == NULL)
2717 			rte_exit(EXIT_FAILURE,
2718 				"Could not allocate bitrate data.\n");
2719 		rte_stats_bitrate_reg(bitrate_data);
2720 	}
2721 #endif
2722 
2723 #ifdef RTE_LIBRTE_CMDLINE
2724 	if (strlen(cmdline_filename) != 0)
2725 		cmdline_read_from_file(cmdline_filename);
2726 
2727 	if (interactive == 1) {
2728 		if (auto_start) {
2729 			printf("Start automatic packet forwarding\n");
2730 			start_packet_forwarding(0);
2731 		}
2732 		prompt();
2733 		pmd_test_exit();
2734 	} else
2735 #endif
2736 	{
2737 		char c;
2738 		int rc;
2739 
2740 		f_quit = 0;
2741 
2742 		printf("No commandline core given, start packet forwarding\n");
2743 		start_packet_forwarding(tx_first);
2744 		if (stats_period != 0) {
2745 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2746 			uint64_t timer_period;
2747 
2748 			/* Convert to number of cycles */
2749 			timer_period = stats_period * rte_get_timer_hz();
2750 
2751 			while (f_quit == 0) {
2752 				cur_time = rte_get_timer_cycles();
2753 				diff_time += cur_time - prev_time;
2754 
2755 				if (diff_time >= timer_period) {
2756 					print_stats();
2757 					/* Reset the timer */
2758 					diff_time = 0;
2759 				}
2760 				/* Sleep to avoid unnecessary checks */
2761 				prev_time = cur_time;
2762 				sleep(1);
2763 			}
2764 		}
2765 
2766 		printf("Press enter to exit\n");
2767 		rc = read(0, &c, 1);
2768 		pmd_test_exit();
2769 		if (rc < 0)
2770 			return 1;
2771 	}
2772 
2773 	return 0;
2774 }
2775