xref: /dpdk/app/test-pmd/testpmd.c (revision fb7b8b32cd958f03d3e327b4cd046cf0c815d3b3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
33 #include <rte_eal.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
41 #include <rte_mbuf.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_dev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
51 #endif
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
54 #endif
55 #include <rte_flow.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
59 #endif
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
62 #endif
63 
64 #include "testpmd.h"
65 
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
68 
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
72 uint8_t tx_first;
73 char cmdline_filename[PATH_MAX] = {0};
74 
75 /*
76  * NUMA support configuration.
77  * When set, the NUMA support attempts to dispatch the allocation of the
78  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79  * probed ports among the CPU sockets 0 and 1.
80  * Otherwise, all memory is allocated from CPU socket 0.
81  */
82 uint8_t numa_support = 1; /**< numa enabled by default */
83 
84 /*
85  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86  * not configured.
87  */
88 uint8_t socket_num = UMA_NO_CONFIG;
89 
90 /*
91  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
92  */
93 uint8_t mp_anon = 0;
94 
95 /*
96  * Store specified sockets on which memory pool to be used by ports
97  * is allocated.
98  */
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
100 
101 /*
102  * Store specified sockets on which RX ring to be used by ports
103  * is allocated.
104  */
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
106 
107 /*
108  * Store specified sockets on which TX ring to be used by ports
109  * is allocated.
110  */
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
112 
113 /*
114  * Record the Ethernet address of peer target ports to which packets are
115  * forwarded.
116  * Must be instantiated with the ethernet addresses of peer traffic generator
117  * ports.
118  */
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
121 
122 /*
123  * Probed Target Environment.
124  */
125 struct rte_port *ports;	       /**< For all probed ethernet ports. */
126 portid_t nb_ports;             /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
129 
130 /*
131  * Test Forwarding Configuration.
132  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
134  */
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
138 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
139 
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
142 
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
145 
146 /*
147  * Forwarding engines.
148  */
149 struct fwd_engine * fwd_engines[] = {
150 	&io_fwd_engine,
151 	&mac_fwd_engine,
152 	&mac_swap_engine,
153 	&flow_gen_engine,
154 	&rx_only_engine,
155 	&tx_only_engine,
156 	&csum_fwd_engine,
157 	&icmp_echo_engine,
158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
159 	&softnic_tm_engine,
160 	&softnic_tm_bypass_engine,
161 #endif
162 #ifdef RTE_LIBRTE_IEEE1588
163 	&ieee1588_fwd_engine,
164 #endif
165 	NULL,
166 };
167 
168 struct fwd_config cur_fwd_config;
169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
170 uint32_t retry_enabled;
171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173 
174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
175 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
176                                       * specified on command-line. */
177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
178 
179 /*
180  * In container, it cannot terminate the process which running with 'stats-period'
181  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
182  */
183 uint8_t f_quit;
184 
185 /*
186  * Configuration of packet segments used by the "txonly" processing engine.
187  */
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 	TXONLY_DEF_PACKET_LEN,
191 };
192 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
196 
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
202 
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
205 
206 /*
207  * Configurable number of RX/TX queues.
208  */
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
211 
212 /*
213  * Configurable number of RX/TX ring descriptors.
214  * Defaults are supplied by drivers via ethdev.
215  */
216 #define RTE_TEST_RX_DESC_DEFAULT 0
217 #define RTE_TEST_TX_DESC_DEFAULT 0
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220 
221 #define RTE_PMD_PARAM_UNSET -1
222 /*
223  * Configurable values of RX and TX ring threshold registers.
224  */
225 
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229 
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Configurable value of RX free threshold.
236  */
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238 
239 /*
240  * Configurable value of RX drop enable.
241  */
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243 
244 /*
245  * Configurable value of TX free threshold.
246  */
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248 
249 /*
250  * Configurable value of TX RS bit threshold.
251  */
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253 
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268 
269 /*
270  * Flow API isolated mode.
271  */
272 uint8_t flow_isolate_all;
273 
274 /*
275  * Avoids to check link status when starting/stopping a port.
276  */
277 uint8_t no_link_check = 0; /* check by default */
278 
279 /*
280  * Enable link status change notification
281  */
282 uint8_t lsc_interrupt = 1; /* enabled by default */
283 
284 /*
285  * Enable device removal notification.
286  */
287 uint8_t rmv_interrupt = 1; /* enabled by default */
288 
289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
290 
291 /*
292  * Display or mask ether events
293  * Default to all events except VF_MBOX
294  */
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
301 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
302 /*
303  * Decide if all memory are locked for performance.
304  */
305 int do_mlockall = 0;
306 
307 /*
308  * NIC bypass mode configuration options.
309  */
310 
311 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
312 /* The NIC bypass watchdog timeout. */
313 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
314 #endif
315 
316 
317 #ifdef RTE_LIBRTE_LATENCY_STATS
318 
319 /*
320  * Set when latency stats is enabled in the commandline
321  */
322 uint8_t latencystats_enabled;
323 
324 /*
325  * Lcore ID to serive latency statistics.
326  */
327 lcoreid_t latencystats_lcore_id = -1;
328 
329 #endif
330 
331 /*
332  * Ethernet device configuration.
333  */
334 struct rte_eth_rxmode rx_mode = {
335 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
336 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
337 	.ignore_offload_bitfield = 1,
338 };
339 
340 struct rte_eth_txmode tx_mode = {
341 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
342 };
343 
344 struct rte_fdir_conf fdir_conf = {
345 	.mode = RTE_FDIR_MODE_NONE,
346 	.pballoc = RTE_FDIR_PBALLOC_64K,
347 	.status = RTE_FDIR_REPORT_STATUS,
348 	.mask = {
349 		.vlan_tci_mask = 0x0,
350 		.ipv4_mask     = {
351 			.src_ip = 0xFFFFFFFF,
352 			.dst_ip = 0xFFFFFFFF,
353 		},
354 		.ipv6_mask     = {
355 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
357 		},
358 		.src_port_mask = 0xFFFF,
359 		.dst_port_mask = 0xFFFF,
360 		.mac_addr_byte_mask = 0xFF,
361 		.tunnel_type_mask = 1,
362 		.tunnel_id_mask = 0xFFFFFFFF,
363 	},
364 	.drop_queue = 127,
365 };
366 
367 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
368 
369 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
371 
372 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
373 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
374 
375 uint16_t nb_tx_queue_stats_mappings = 0;
376 uint16_t nb_rx_queue_stats_mappings = 0;
377 
378 /*
379  * Display zero values by default for xstats
380  */
381 uint8_t xstats_hide_zero;
382 
383 unsigned int num_sockets = 0;
384 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
385 
386 #ifdef RTE_LIBRTE_BITRATE
387 /* Bitrate statistics */
388 struct rte_stats_bitrates *bitrate_data;
389 lcoreid_t bitrate_lcore_id;
390 uint8_t bitrate_enabled;
391 #endif
392 
393 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
394 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
395 
396 /* Forward function declarations */
397 static void map_port_queue_stats_mapping_registers(portid_t pi,
398 						   struct rte_port *port);
399 static void check_all_ports_link_status(uint32_t port_mask);
400 static int eth_event_callback(portid_t port_id,
401 			      enum rte_eth_event_type type,
402 			      void *param, void *ret_param);
403 static void eth_dev_event_callback(char *device_name,
404 				enum rte_dev_event_type type,
405 				void *param);
406 static int eth_dev_event_callback_register(void);
407 static int eth_dev_event_callback_unregister(void);
408 
409 
410 /*
411  * Check if all the ports are started.
412  * If yes, return positive value. If not, return zero.
413  */
414 static int all_ports_started(void);
415 
416 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
417 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
418 
419 /*
420  * Helper function to check if socket is already discovered.
421  * If yes, return positive value. If not, return zero.
422  */
423 int
424 new_socket_id(unsigned int socket_id)
425 {
426 	unsigned int i;
427 
428 	for (i = 0; i < num_sockets; i++) {
429 		if (socket_ids[i] == socket_id)
430 			return 0;
431 	}
432 	return 1;
433 }
434 
435 /*
436  * Setup default configuration.
437  */
438 static void
439 set_default_fwd_lcores_config(void)
440 {
441 	unsigned int i;
442 	unsigned int nb_lc;
443 	unsigned int sock_num;
444 
445 	nb_lc = 0;
446 	for (i = 0; i < RTE_MAX_LCORE; i++) {
447 		sock_num = rte_lcore_to_socket_id(i);
448 		if (new_socket_id(sock_num)) {
449 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
450 				rte_exit(EXIT_FAILURE,
451 					 "Total sockets greater than %u\n",
452 					 RTE_MAX_NUMA_NODES);
453 			}
454 			socket_ids[num_sockets++] = sock_num;
455 		}
456 		if (!rte_lcore_is_enabled(i))
457 			continue;
458 		if (i == rte_get_master_lcore())
459 			continue;
460 		fwd_lcores_cpuids[nb_lc++] = i;
461 	}
462 	nb_lcores = (lcoreid_t) nb_lc;
463 	nb_cfg_lcores = nb_lcores;
464 	nb_fwd_lcores = 1;
465 }
466 
467 static void
468 set_def_peer_eth_addrs(void)
469 {
470 	portid_t i;
471 
472 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
473 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
474 		peer_eth_addrs[i].addr_bytes[5] = i;
475 	}
476 }
477 
478 static void
479 set_default_fwd_ports_config(void)
480 {
481 	portid_t pt_id;
482 	int i = 0;
483 
484 	RTE_ETH_FOREACH_DEV(pt_id)
485 		fwd_ports_ids[i++] = pt_id;
486 
487 	nb_cfg_ports = nb_ports;
488 	nb_fwd_ports = nb_ports;
489 }
490 
491 void
492 set_def_fwd_config(void)
493 {
494 	set_default_fwd_lcores_config();
495 	set_def_peer_eth_addrs();
496 	set_default_fwd_ports_config();
497 }
498 
499 /*
500  * Configuration initialisation done once at init time.
501  */
502 static void
503 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
504 		 unsigned int socket_id)
505 {
506 	char pool_name[RTE_MEMPOOL_NAMESIZE];
507 	struct rte_mempool *rte_mp = NULL;
508 	uint32_t mb_size;
509 
510 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
511 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
512 
513 	TESTPMD_LOG(INFO,
514 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
515 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
516 
517 	if (mp_anon != 0) {
518 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
519 			mb_size, (unsigned) mb_mempool_cache,
520 			sizeof(struct rte_pktmbuf_pool_private),
521 			socket_id, 0);
522 		if (rte_mp == NULL)
523 			goto err;
524 
525 		if (rte_mempool_populate_anon(rte_mp) == 0) {
526 			rte_mempool_free(rte_mp);
527 			rte_mp = NULL;
528 			goto err;
529 		}
530 		rte_pktmbuf_pool_init(rte_mp, NULL);
531 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
532 	} else {
533 		/* wrapper to rte_mempool_create() */
534 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
535 				rte_mbuf_best_mempool_ops());
536 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
537 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
538 	}
539 
540 err:
541 	if (rte_mp == NULL) {
542 		rte_exit(EXIT_FAILURE,
543 			"Creation of mbuf pool for socket %u failed: %s\n",
544 			socket_id, rte_strerror(rte_errno));
545 	} else if (verbose_level > 0) {
546 		rte_mempool_dump(stdout, rte_mp);
547 	}
548 }
549 
550 /*
551  * Check given socket id is valid or not with NUMA mode,
552  * if valid, return 0, else return -1
553  */
554 static int
555 check_socket_id(const unsigned int socket_id)
556 {
557 	static int warning_once = 0;
558 
559 	if (new_socket_id(socket_id)) {
560 		if (!warning_once && numa_support)
561 			printf("Warning: NUMA should be configured manually by"
562 			       " using --port-numa-config and"
563 			       " --ring-numa-config parameters along with"
564 			       " --numa.\n");
565 		warning_once = 1;
566 		return -1;
567 	}
568 	return 0;
569 }
570 
571 /*
572  * Get the allowed maximum number of RX queues.
573  * *pid return the port id which has minimal value of
574  * max_rx_queues in all ports.
575  */
576 queueid_t
577 get_allowed_max_nb_rxq(portid_t *pid)
578 {
579 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
580 	portid_t pi;
581 	struct rte_eth_dev_info dev_info;
582 
583 	RTE_ETH_FOREACH_DEV(pi) {
584 		rte_eth_dev_info_get(pi, &dev_info);
585 		if (dev_info.max_rx_queues < allowed_max_rxq) {
586 			allowed_max_rxq = dev_info.max_rx_queues;
587 			*pid = pi;
588 		}
589 	}
590 	return allowed_max_rxq;
591 }
592 
593 /*
594  * Check input rxq is valid or not.
595  * If input rxq is not greater than any of maximum number
596  * of RX queues of all ports, it is valid.
597  * if valid, return 0, else return -1
598  */
599 int
600 check_nb_rxq(queueid_t rxq)
601 {
602 	queueid_t allowed_max_rxq;
603 	portid_t pid = 0;
604 
605 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
606 	if (rxq > allowed_max_rxq) {
607 		printf("Fail: input rxq (%u) can't be greater "
608 		       "than max_rx_queues (%u) of port %u\n",
609 		       rxq,
610 		       allowed_max_rxq,
611 		       pid);
612 		return -1;
613 	}
614 	return 0;
615 }
616 
617 /*
618  * Get the allowed maximum number of TX queues.
619  * *pid return the port id which has minimal value of
620  * max_tx_queues in all ports.
621  */
622 queueid_t
623 get_allowed_max_nb_txq(portid_t *pid)
624 {
625 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
626 	portid_t pi;
627 	struct rte_eth_dev_info dev_info;
628 
629 	RTE_ETH_FOREACH_DEV(pi) {
630 		rte_eth_dev_info_get(pi, &dev_info);
631 		if (dev_info.max_tx_queues < allowed_max_txq) {
632 			allowed_max_txq = dev_info.max_tx_queues;
633 			*pid = pi;
634 		}
635 	}
636 	return allowed_max_txq;
637 }
638 
639 /*
640  * Check input txq is valid or not.
641  * If input txq is not greater than any of maximum number
642  * of TX queues of all ports, it is valid.
643  * if valid, return 0, else return -1
644  */
645 int
646 check_nb_txq(queueid_t txq)
647 {
648 	queueid_t allowed_max_txq;
649 	portid_t pid = 0;
650 
651 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
652 	if (txq > allowed_max_txq) {
653 		printf("Fail: input txq (%u) can't be greater "
654 		       "than max_tx_queues (%u) of port %u\n",
655 		       txq,
656 		       allowed_max_txq,
657 		       pid);
658 		return -1;
659 	}
660 	return 0;
661 }
662 
663 static void
664 init_config(void)
665 {
666 	portid_t pid;
667 	struct rte_port *port;
668 	struct rte_mempool *mbp;
669 	unsigned int nb_mbuf_per_pool;
670 	lcoreid_t  lc_id;
671 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
672 	struct rte_gro_param gro_param;
673 	uint32_t gso_types;
674 
675 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
676 
677 	if (numa_support) {
678 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
679 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
680 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
681 	}
682 
683 	/* Configuration of logical cores. */
684 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
685 				sizeof(struct fwd_lcore *) * nb_lcores,
686 				RTE_CACHE_LINE_SIZE);
687 	if (fwd_lcores == NULL) {
688 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
689 							"failed\n", nb_lcores);
690 	}
691 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
692 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
693 					       sizeof(struct fwd_lcore),
694 					       RTE_CACHE_LINE_SIZE);
695 		if (fwd_lcores[lc_id] == NULL) {
696 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
697 								"failed\n");
698 		}
699 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
700 	}
701 
702 	RTE_ETH_FOREACH_DEV(pid) {
703 		port = &ports[pid];
704 		/* Apply default TxRx configuration for all ports */
705 		port->dev_conf.txmode = tx_mode;
706 		port->dev_conf.rxmode = rx_mode;
707 		rte_eth_dev_info_get(pid, &port->dev_info);
708 		if (!(port->dev_info.tx_offload_capa &
709 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
710 			port->dev_conf.txmode.offloads &=
711 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
712 		if (numa_support) {
713 			if (port_numa[pid] != NUMA_NO_CONFIG)
714 				port_per_socket[port_numa[pid]]++;
715 			else {
716 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
717 
718 				/* if socket_id is invalid, set to 0 */
719 				if (check_socket_id(socket_id) < 0)
720 					socket_id = 0;
721 				port_per_socket[socket_id]++;
722 			}
723 		}
724 
725 		/* set flag to initialize port/queue */
726 		port->need_reconfig = 1;
727 		port->need_reconfig_queues = 1;
728 	}
729 
730 	/*
731 	 * Create pools of mbuf.
732 	 * If NUMA support is disabled, create a single pool of mbuf in
733 	 * socket 0 memory by default.
734 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
735 	 *
736 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
737 	 * nb_txd can be configured at run time.
738 	 */
739 	if (param_total_num_mbufs)
740 		nb_mbuf_per_pool = param_total_num_mbufs;
741 	else {
742 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
743 			(nb_lcores * mb_mempool_cache) +
744 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
745 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
746 	}
747 
748 	if (numa_support) {
749 		uint8_t i;
750 
751 		for (i = 0; i < num_sockets; i++)
752 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
753 					 socket_ids[i]);
754 	} else {
755 		if (socket_num == UMA_NO_CONFIG)
756 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
757 		else
758 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
759 						 socket_num);
760 	}
761 
762 	init_port_config();
763 
764 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
765 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
766 	/*
767 	 * Records which Mbuf pool to use by each logical core, if needed.
768 	 */
769 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
770 		mbp = mbuf_pool_find(
771 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
772 
773 		if (mbp == NULL)
774 			mbp = mbuf_pool_find(0);
775 		fwd_lcores[lc_id]->mbp = mbp;
776 		/* initialize GSO context */
777 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
778 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
779 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
780 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
781 			ETHER_CRC_LEN;
782 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
783 	}
784 
785 	/* Configuration of packet forwarding streams. */
786 	if (init_fwd_streams() < 0)
787 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
788 
789 	fwd_config_setup();
790 
791 	/* create a gro context for each lcore */
792 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
793 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
794 	gro_param.max_item_per_flow = MAX_PKT_BURST;
795 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
796 		gro_param.socket_id = rte_lcore_to_socket_id(
797 				fwd_lcores_cpuids[lc_id]);
798 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
799 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
800 			rte_exit(EXIT_FAILURE,
801 					"rte_gro_ctx_create() failed\n");
802 		}
803 	}
804 }
805 
806 
807 void
808 reconfig(portid_t new_port_id, unsigned socket_id)
809 {
810 	struct rte_port *port;
811 
812 	/* Reconfiguration of Ethernet ports. */
813 	port = &ports[new_port_id];
814 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
815 
816 	/* set flag to initialize port/queue */
817 	port->need_reconfig = 1;
818 	port->need_reconfig_queues = 1;
819 	port->socket_id = socket_id;
820 
821 	init_port_config();
822 }
823 
824 
825 int
826 init_fwd_streams(void)
827 {
828 	portid_t pid;
829 	struct rte_port *port;
830 	streamid_t sm_id, nb_fwd_streams_new;
831 	queueid_t q;
832 
833 	/* set socket id according to numa or not */
834 	RTE_ETH_FOREACH_DEV(pid) {
835 		port = &ports[pid];
836 		if (nb_rxq > port->dev_info.max_rx_queues) {
837 			printf("Fail: nb_rxq(%d) is greater than "
838 				"max_rx_queues(%d)\n", nb_rxq,
839 				port->dev_info.max_rx_queues);
840 			return -1;
841 		}
842 		if (nb_txq > port->dev_info.max_tx_queues) {
843 			printf("Fail: nb_txq(%d) is greater than "
844 				"max_tx_queues(%d)\n", nb_txq,
845 				port->dev_info.max_tx_queues);
846 			return -1;
847 		}
848 		if (numa_support) {
849 			if (port_numa[pid] != NUMA_NO_CONFIG)
850 				port->socket_id = port_numa[pid];
851 			else {
852 				port->socket_id = rte_eth_dev_socket_id(pid);
853 
854 				/* if socket_id is invalid, set to 0 */
855 				if (check_socket_id(port->socket_id) < 0)
856 					port->socket_id = 0;
857 			}
858 		}
859 		else {
860 			if (socket_num == UMA_NO_CONFIG)
861 				port->socket_id = 0;
862 			else
863 				port->socket_id = socket_num;
864 		}
865 	}
866 
867 	q = RTE_MAX(nb_rxq, nb_txq);
868 	if (q == 0) {
869 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
870 		return -1;
871 	}
872 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
873 	if (nb_fwd_streams_new == nb_fwd_streams)
874 		return 0;
875 	/* clear the old */
876 	if (fwd_streams != NULL) {
877 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
878 			if (fwd_streams[sm_id] == NULL)
879 				continue;
880 			rte_free(fwd_streams[sm_id]);
881 			fwd_streams[sm_id] = NULL;
882 		}
883 		rte_free(fwd_streams);
884 		fwd_streams = NULL;
885 	}
886 
887 	/* init new */
888 	nb_fwd_streams = nb_fwd_streams_new;
889 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
890 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
891 	if (fwd_streams == NULL)
892 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
893 						"failed\n", nb_fwd_streams);
894 
895 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
896 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
897 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
898 		if (fwd_streams[sm_id] == NULL)
899 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
900 								" failed\n");
901 	}
902 
903 	return 0;
904 }
905 
906 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
907 static void
908 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
909 {
910 	unsigned int total_burst;
911 	unsigned int nb_burst;
912 	unsigned int burst_stats[3];
913 	uint16_t pktnb_stats[3];
914 	uint16_t nb_pkt;
915 	int burst_percent[3];
916 
917 	/*
918 	 * First compute the total number of packet bursts and the
919 	 * two highest numbers of bursts of the same number of packets.
920 	 */
921 	total_burst = 0;
922 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
923 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
924 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
925 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
926 		if (nb_burst == 0)
927 			continue;
928 		total_burst += nb_burst;
929 		if (nb_burst > burst_stats[0]) {
930 			burst_stats[1] = burst_stats[0];
931 			pktnb_stats[1] = pktnb_stats[0];
932 			burst_stats[0] = nb_burst;
933 			pktnb_stats[0] = nb_pkt;
934 		}
935 	}
936 	if (total_burst == 0)
937 		return;
938 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
939 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
940 	       burst_percent[0], (int) pktnb_stats[0]);
941 	if (burst_stats[0] == total_burst) {
942 		printf("]\n");
943 		return;
944 	}
945 	if (burst_stats[0] + burst_stats[1] == total_burst) {
946 		printf(" + %d%% of %d pkts]\n",
947 		       100 - burst_percent[0], pktnb_stats[1]);
948 		return;
949 	}
950 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
951 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
952 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
953 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
954 		return;
955 	}
956 	printf(" + %d%% of %d pkts + %d%% of others]\n",
957 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
958 }
959 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
960 
961 static void
962 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
963 {
964 	struct rte_port *port;
965 	uint8_t i;
966 
967 	static const char *fwd_stats_border = "----------------------";
968 
969 	port = &ports[port_id];
970 	printf("\n  %s Forward statistics for port %-2d %s\n",
971 	       fwd_stats_border, port_id, fwd_stats_border);
972 
973 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
974 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
975 		       "%-"PRIu64"\n",
976 		       stats->ipackets, stats->imissed,
977 		       (uint64_t) (stats->ipackets + stats->imissed));
978 
979 		if (cur_fwd_eng == &csum_fwd_engine)
980 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
981 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
982 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
983 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
984 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
985 		}
986 
987 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
988 		       "%-"PRIu64"\n",
989 		       stats->opackets, port->tx_dropped,
990 		       (uint64_t) (stats->opackets + port->tx_dropped));
991 	}
992 	else {
993 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
994 		       "%14"PRIu64"\n",
995 		       stats->ipackets, stats->imissed,
996 		       (uint64_t) (stats->ipackets + stats->imissed));
997 
998 		if (cur_fwd_eng == &csum_fwd_engine)
999 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1000 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1001 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1002 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1003 			printf("  RX-nombufs:             %14"PRIu64"\n",
1004 			       stats->rx_nombuf);
1005 		}
1006 
1007 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1008 		       "%14"PRIu64"\n",
1009 		       stats->opackets, port->tx_dropped,
1010 		       (uint64_t) (stats->opackets + port->tx_dropped));
1011 	}
1012 
1013 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1014 	if (port->rx_stream)
1015 		pkt_burst_stats_display("RX",
1016 			&port->rx_stream->rx_burst_stats);
1017 	if (port->tx_stream)
1018 		pkt_burst_stats_display("TX",
1019 			&port->tx_stream->tx_burst_stats);
1020 #endif
1021 
1022 	if (port->rx_queue_stats_mapping_enabled) {
1023 		printf("\n");
1024 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1025 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1026 			       "     RX-errors:%14"PRIu64
1027 			       "    RX-bytes:%14"PRIu64"\n",
1028 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1029 		}
1030 		printf("\n");
1031 	}
1032 	if (port->tx_queue_stats_mapping_enabled) {
1033 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1034 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1035 			       "                                 TX-bytes:%14"PRIu64"\n",
1036 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1037 		}
1038 	}
1039 
1040 	printf("  %s--------------------------------%s\n",
1041 	       fwd_stats_border, fwd_stats_border);
1042 }
1043 
1044 static void
1045 fwd_stream_stats_display(streamid_t stream_id)
1046 {
1047 	struct fwd_stream *fs;
1048 	static const char *fwd_top_stats_border = "-------";
1049 
1050 	fs = fwd_streams[stream_id];
1051 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1052 	    (fs->fwd_dropped == 0))
1053 		return;
1054 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1055 	       "TX Port=%2d/Queue=%2d %s\n",
1056 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1057 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1058 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1059 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1060 
1061 	/* if checksum mode */
1062 	if (cur_fwd_eng == &csum_fwd_engine) {
1063 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1064 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1065 	}
1066 
1067 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1068 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1069 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1070 #endif
1071 }
1072 
1073 static void
1074 flush_fwd_rx_queues(void)
1075 {
1076 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1077 	portid_t  rxp;
1078 	portid_t port_id;
1079 	queueid_t rxq;
1080 	uint16_t  nb_rx;
1081 	uint16_t  i;
1082 	uint8_t   j;
1083 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1084 	uint64_t timer_period;
1085 
1086 	/* convert to number of cycles */
1087 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1088 
1089 	for (j = 0; j < 2; j++) {
1090 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1091 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1092 				port_id = fwd_ports_ids[rxp];
1093 				/**
1094 				* testpmd can stuck in the below do while loop
1095 				* if rte_eth_rx_burst() always returns nonzero
1096 				* packets. So timer is added to exit this loop
1097 				* after 1sec timer expiry.
1098 				*/
1099 				prev_tsc = rte_rdtsc();
1100 				do {
1101 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1102 						pkts_burst, MAX_PKT_BURST);
1103 					for (i = 0; i < nb_rx; i++)
1104 						rte_pktmbuf_free(pkts_burst[i]);
1105 
1106 					cur_tsc = rte_rdtsc();
1107 					diff_tsc = cur_tsc - prev_tsc;
1108 					timer_tsc += diff_tsc;
1109 				} while ((nb_rx > 0) &&
1110 					(timer_tsc < timer_period));
1111 				timer_tsc = 0;
1112 			}
1113 		}
1114 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1115 	}
1116 }
1117 
1118 static void
1119 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1120 {
1121 	struct fwd_stream **fsm;
1122 	streamid_t nb_fs;
1123 	streamid_t sm_id;
1124 #ifdef RTE_LIBRTE_BITRATE
1125 	uint64_t tics_per_1sec;
1126 	uint64_t tics_datum;
1127 	uint64_t tics_current;
1128 	uint16_t idx_port;
1129 
1130 	tics_datum = rte_rdtsc();
1131 	tics_per_1sec = rte_get_timer_hz();
1132 #endif
1133 	fsm = &fwd_streams[fc->stream_idx];
1134 	nb_fs = fc->stream_nb;
1135 	do {
1136 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1137 			(*pkt_fwd)(fsm[sm_id]);
1138 #ifdef RTE_LIBRTE_BITRATE
1139 		if (bitrate_enabled != 0 &&
1140 				bitrate_lcore_id == rte_lcore_id()) {
1141 			tics_current = rte_rdtsc();
1142 			if (tics_current - tics_datum >= tics_per_1sec) {
1143 				/* Periodic bitrate calculation */
1144 				RTE_ETH_FOREACH_DEV(idx_port)
1145 					rte_stats_bitrate_calc(bitrate_data,
1146 						idx_port);
1147 				tics_datum = tics_current;
1148 			}
1149 		}
1150 #endif
1151 #ifdef RTE_LIBRTE_LATENCY_STATS
1152 		if (latencystats_enabled != 0 &&
1153 				latencystats_lcore_id == rte_lcore_id())
1154 			rte_latencystats_update();
1155 #endif
1156 
1157 	} while (! fc->stopped);
1158 }
1159 
1160 static int
1161 start_pkt_forward_on_core(void *fwd_arg)
1162 {
1163 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1164 			     cur_fwd_config.fwd_eng->packet_fwd);
1165 	return 0;
1166 }
1167 
1168 /*
1169  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1170  * Used to start communication flows in network loopback test configurations.
1171  */
1172 static int
1173 run_one_txonly_burst_on_core(void *fwd_arg)
1174 {
1175 	struct fwd_lcore *fwd_lc;
1176 	struct fwd_lcore tmp_lcore;
1177 
1178 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1179 	tmp_lcore = *fwd_lc;
1180 	tmp_lcore.stopped = 1;
1181 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1182 	return 0;
1183 }
1184 
1185 /*
1186  * Launch packet forwarding:
1187  *     - Setup per-port forwarding context.
1188  *     - launch logical cores with their forwarding configuration.
1189  */
1190 static void
1191 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1192 {
1193 	port_fwd_begin_t port_fwd_begin;
1194 	unsigned int i;
1195 	unsigned int lc_id;
1196 	int diag;
1197 
1198 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1199 	if (port_fwd_begin != NULL) {
1200 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1201 			(*port_fwd_begin)(fwd_ports_ids[i]);
1202 	}
1203 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1204 		lc_id = fwd_lcores_cpuids[i];
1205 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1206 			fwd_lcores[i]->stopped = 0;
1207 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1208 						     fwd_lcores[i], lc_id);
1209 			if (diag != 0)
1210 				printf("launch lcore %u failed - diag=%d\n",
1211 				       lc_id, diag);
1212 		}
1213 	}
1214 }
1215 
1216 /*
1217  * Launch packet forwarding configuration.
1218  */
1219 void
1220 start_packet_forwarding(int with_tx_first)
1221 {
1222 	port_fwd_begin_t port_fwd_begin;
1223 	port_fwd_end_t  port_fwd_end;
1224 	struct rte_port *port;
1225 	unsigned int i;
1226 	portid_t   pt_id;
1227 	streamid_t sm_id;
1228 
1229 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1230 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1231 
1232 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1233 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1234 
1235 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1236 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1237 		(!nb_rxq || !nb_txq))
1238 		rte_exit(EXIT_FAILURE,
1239 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1240 			cur_fwd_eng->fwd_mode_name);
1241 
1242 	if (all_ports_started() == 0) {
1243 		printf("Not all ports were started\n");
1244 		return;
1245 	}
1246 	if (test_done == 0) {
1247 		printf("Packet forwarding already started\n");
1248 		return;
1249 	}
1250 
1251 	if (init_fwd_streams() < 0) {
1252 		printf("Fail from init_fwd_streams()\n");
1253 		return;
1254 	}
1255 
1256 	if(dcb_test) {
1257 		for (i = 0; i < nb_fwd_ports; i++) {
1258 			pt_id = fwd_ports_ids[i];
1259 			port = &ports[pt_id];
1260 			if (!port->dcb_flag) {
1261 				printf("In DCB mode, all forwarding ports must "
1262                                        "be configured in this mode.\n");
1263 				return;
1264 			}
1265 		}
1266 		if (nb_fwd_lcores == 1) {
1267 			printf("In DCB mode,the nb forwarding cores "
1268                                "should be larger than 1.\n");
1269 			return;
1270 		}
1271 	}
1272 	test_done = 0;
1273 
1274 	if(!no_flush_rx)
1275 		flush_fwd_rx_queues();
1276 
1277 	fwd_config_setup();
1278 	pkt_fwd_config_display(&cur_fwd_config);
1279 	rxtx_config_display();
1280 
1281 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1282 		pt_id = fwd_ports_ids[i];
1283 		port = &ports[pt_id];
1284 		rte_eth_stats_get(pt_id, &port->stats);
1285 		port->tx_dropped = 0;
1286 
1287 		map_port_queue_stats_mapping_registers(pt_id, port);
1288 	}
1289 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1290 		fwd_streams[sm_id]->rx_packets = 0;
1291 		fwd_streams[sm_id]->tx_packets = 0;
1292 		fwd_streams[sm_id]->fwd_dropped = 0;
1293 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1294 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1295 
1296 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1297 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1298 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1299 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1300 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1301 #endif
1302 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1303 		fwd_streams[sm_id]->core_cycles = 0;
1304 #endif
1305 	}
1306 	if (with_tx_first) {
1307 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1308 		if (port_fwd_begin != NULL) {
1309 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1310 				(*port_fwd_begin)(fwd_ports_ids[i]);
1311 		}
1312 		while (with_tx_first--) {
1313 			launch_packet_forwarding(
1314 					run_one_txonly_burst_on_core);
1315 			rte_eal_mp_wait_lcore();
1316 		}
1317 		port_fwd_end = tx_only_engine.port_fwd_end;
1318 		if (port_fwd_end != NULL) {
1319 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1320 				(*port_fwd_end)(fwd_ports_ids[i]);
1321 		}
1322 	}
1323 	launch_packet_forwarding(start_pkt_forward_on_core);
1324 }
1325 
1326 void
1327 stop_packet_forwarding(void)
1328 {
1329 	struct rte_eth_stats stats;
1330 	struct rte_port *port;
1331 	port_fwd_end_t  port_fwd_end;
1332 	int i;
1333 	portid_t   pt_id;
1334 	streamid_t sm_id;
1335 	lcoreid_t  lc_id;
1336 	uint64_t total_recv;
1337 	uint64_t total_xmit;
1338 	uint64_t total_rx_dropped;
1339 	uint64_t total_tx_dropped;
1340 	uint64_t total_rx_nombuf;
1341 	uint64_t tx_dropped;
1342 	uint64_t rx_bad_ip_csum;
1343 	uint64_t rx_bad_l4_csum;
1344 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1345 	uint64_t fwd_cycles;
1346 #endif
1347 
1348 	static const char *acc_stats_border = "+++++++++++++++";
1349 
1350 	if (test_done) {
1351 		printf("Packet forwarding not started\n");
1352 		return;
1353 	}
1354 	printf("Telling cores to stop...");
1355 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1356 		fwd_lcores[lc_id]->stopped = 1;
1357 	printf("\nWaiting for lcores to finish...\n");
1358 	rte_eal_mp_wait_lcore();
1359 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1360 	if (port_fwd_end != NULL) {
1361 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1362 			pt_id = fwd_ports_ids[i];
1363 			(*port_fwd_end)(pt_id);
1364 		}
1365 	}
1366 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1367 	fwd_cycles = 0;
1368 #endif
1369 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1370 		if (cur_fwd_config.nb_fwd_streams >
1371 		    cur_fwd_config.nb_fwd_ports) {
1372 			fwd_stream_stats_display(sm_id);
1373 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1374 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1375 		} else {
1376 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1377 				fwd_streams[sm_id];
1378 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1379 				fwd_streams[sm_id];
1380 		}
1381 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1382 		tx_dropped = (uint64_t) (tx_dropped +
1383 					 fwd_streams[sm_id]->fwd_dropped);
1384 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1385 
1386 		rx_bad_ip_csum =
1387 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1388 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1389 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1390 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1391 							rx_bad_ip_csum;
1392 
1393 		rx_bad_l4_csum =
1394 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1395 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1396 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1397 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1398 							rx_bad_l4_csum;
1399 
1400 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1401 		fwd_cycles = (uint64_t) (fwd_cycles +
1402 					 fwd_streams[sm_id]->core_cycles);
1403 #endif
1404 	}
1405 	total_recv = 0;
1406 	total_xmit = 0;
1407 	total_rx_dropped = 0;
1408 	total_tx_dropped = 0;
1409 	total_rx_nombuf  = 0;
1410 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1411 		pt_id = fwd_ports_ids[i];
1412 
1413 		port = &ports[pt_id];
1414 		rte_eth_stats_get(pt_id, &stats);
1415 		stats.ipackets -= port->stats.ipackets;
1416 		port->stats.ipackets = 0;
1417 		stats.opackets -= port->stats.opackets;
1418 		port->stats.opackets = 0;
1419 		stats.ibytes   -= port->stats.ibytes;
1420 		port->stats.ibytes = 0;
1421 		stats.obytes   -= port->stats.obytes;
1422 		port->stats.obytes = 0;
1423 		stats.imissed  -= port->stats.imissed;
1424 		port->stats.imissed = 0;
1425 		stats.oerrors  -= port->stats.oerrors;
1426 		port->stats.oerrors = 0;
1427 		stats.rx_nombuf -= port->stats.rx_nombuf;
1428 		port->stats.rx_nombuf = 0;
1429 
1430 		total_recv += stats.ipackets;
1431 		total_xmit += stats.opackets;
1432 		total_rx_dropped += stats.imissed;
1433 		total_tx_dropped += port->tx_dropped;
1434 		total_rx_nombuf  += stats.rx_nombuf;
1435 
1436 		fwd_port_stats_display(pt_id, &stats);
1437 	}
1438 
1439 	printf("\n  %s Accumulated forward statistics for all ports"
1440 	       "%s\n",
1441 	       acc_stats_border, acc_stats_border);
1442 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1443 	       "%-"PRIu64"\n"
1444 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1445 	       "%-"PRIu64"\n",
1446 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1447 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1448 	if (total_rx_nombuf > 0)
1449 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1450 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1451 	       "%s\n",
1452 	       acc_stats_border, acc_stats_border);
1453 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1454 	if (total_recv > 0)
1455 		printf("\n  CPU cycles/packet=%u (total cycles="
1456 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1457 		       (unsigned int)(fwd_cycles / total_recv),
1458 		       fwd_cycles, total_recv);
1459 #endif
1460 	printf("\nDone.\n");
1461 	test_done = 1;
1462 }
1463 
1464 void
1465 dev_set_link_up(portid_t pid)
1466 {
1467 	if (rte_eth_dev_set_link_up(pid) < 0)
1468 		printf("\nSet link up fail.\n");
1469 }
1470 
1471 void
1472 dev_set_link_down(portid_t pid)
1473 {
1474 	if (rte_eth_dev_set_link_down(pid) < 0)
1475 		printf("\nSet link down fail.\n");
1476 }
1477 
1478 static int
1479 all_ports_started(void)
1480 {
1481 	portid_t pi;
1482 	struct rte_port *port;
1483 
1484 	RTE_ETH_FOREACH_DEV(pi) {
1485 		port = &ports[pi];
1486 		/* Check if there is a port which is not started */
1487 		if ((port->port_status != RTE_PORT_STARTED) &&
1488 			(port->slave_flag == 0))
1489 			return 0;
1490 	}
1491 
1492 	/* No port is not started */
1493 	return 1;
1494 }
1495 
1496 int
1497 port_is_stopped(portid_t port_id)
1498 {
1499 	struct rte_port *port = &ports[port_id];
1500 
1501 	if ((port->port_status != RTE_PORT_STOPPED) &&
1502 	    (port->slave_flag == 0))
1503 		return 0;
1504 	return 1;
1505 }
1506 
1507 int
1508 all_ports_stopped(void)
1509 {
1510 	portid_t pi;
1511 
1512 	RTE_ETH_FOREACH_DEV(pi) {
1513 		if (!port_is_stopped(pi))
1514 			return 0;
1515 	}
1516 
1517 	return 1;
1518 }
1519 
1520 int
1521 port_is_started(portid_t port_id)
1522 {
1523 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1524 		return 0;
1525 
1526 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1527 		return 0;
1528 
1529 	return 1;
1530 }
1531 
1532 static int
1533 port_is_closed(portid_t port_id)
1534 {
1535 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1536 		return 0;
1537 
1538 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1539 		return 0;
1540 
1541 	return 1;
1542 }
1543 
1544 int
1545 start_port(portid_t pid)
1546 {
1547 	int diag, need_check_link_status = -1;
1548 	portid_t pi;
1549 	queueid_t qi;
1550 	struct rte_port *port;
1551 	struct ether_addr mac_addr;
1552 	enum rte_eth_event_type event_type;
1553 
1554 	if (port_id_is_invalid(pid, ENABLED_WARN))
1555 		return 0;
1556 
1557 	if(dcb_config)
1558 		dcb_test = 1;
1559 	RTE_ETH_FOREACH_DEV(pi) {
1560 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1561 			continue;
1562 
1563 		need_check_link_status = 0;
1564 		port = &ports[pi];
1565 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1566 						 RTE_PORT_HANDLING) == 0) {
1567 			printf("Port %d is now not stopped\n", pi);
1568 			continue;
1569 		}
1570 
1571 		if (port->need_reconfig > 0) {
1572 			port->need_reconfig = 0;
1573 
1574 			if (flow_isolate_all) {
1575 				int ret = port_flow_isolate(pi, 1);
1576 				if (ret) {
1577 					printf("Failed to apply isolated"
1578 					       " mode on port %d\n", pi);
1579 					return -1;
1580 				}
1581 			}
1582 
1583 			printf("Configuring Port %d (socket %u)\n", pi,
1584 					port->socket_id);
1585 			/* configure port */
1586 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1587 						&(port->dev_conf));
1588 			if (diag != 0) {
1589 				if (rte_atomic16_cmpset(&(port->port_status),
1590 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1591 					printf("Port %d can not be set back "
1592 							"to stopped\n", pi);
1593 				printf("Fail to configure port %d\n", pi);
1594 				/* try to reconfigure port next time */
1595 				port->need_reconfig = 1;
1596 				return -1;
1597 			}
1598 		}
1599 		if (port->need_reconfig_queues > 0) {
1600 			port->need_reconfig_queues = 0;
1601 			/* setup tx queues */
1602 			for (qi = 0; qi < nb_txq; qi++) {
1603 				port->tx_conf[qi].txq_flags =
1604 					ETH_TXQ_FLAGS_IGNORE;
1605 				/* Apply Tx offloads configuration */
1606 				port->tx_conf[qi].offloads =
1607 					port->dev_conf.txmode.offloads;
1608 				if ((numa_support) &&
1609 					(txring_numa[pi] != NUMA_NO_CONFIG))
1610 					diag = rte_eth_tx_queue_setup(pi, qi,
1611 						port->nb_tx_desc[qi],
1612 						txring_numa[pi],
1613 						&(port->tx_conf[qi]));
1614 				else
1615 					diag = rte_eth_tx_queue_setup(pi, qi,
1616 						port->nb_tx_desc[qi],
1617 						port->socket_id,
1618 						&(port->tx_conf[qi]));
1619 
1620 				if (diag == 0)
1621 					continue;
1622 
1623 				/* Fail to setup tx queue, return */
1624 				if (rte_atomic16_cmpset(&(port->port_status),
1625 							RTE_PORT_HANDLING,
1626 							RTE_PORT_STOPPED) == 0)
1627 					printf("Port %d can not be set back "
1628 							"to stopped\n", pi);
1629 				printf("Fail to configure port %d tx queues\n",
1630 				       pi);
1631 				/* try to reconfigure queues next time */
1632 				port->need_reconfig_queues = 1;
1633 				return -1;
1634 			}
1635 			for (qi = 0; qi < nb_rxq; qi++) {
1636 				/* Apply Rx offloads configuration */
1637 				port->rx_conf[qi].offloads =
1638 					port->dev_conf.rxmode.offloads;
1639 				/* setup rx queues */
1640 				if ((numa_support) &&
1641 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1642 					struct rte_mempool * mp =
1643 						mbuf_pool_find(rxring_numa[pi]);
1644 					if (mp == NULL) {
1645 						printf("Failed to setup RX queue:"
1646 							"No mempool allocation"
1647 							" on the socket %d\n",
1648 							rxring_numa[pi]);
1649 						return -1;
1650 					}
1651 
1652 					diag = rte_eth_rx_queue_setup(pi, qi,
1653 					     port->nb_rx_desc[pi],
1654 					     rxring_numa[pi],
1655 					     &(port->rx_conf[qi]),
1656 					     mp);
1657 				} else {
1658 					struct rte_mempool *mp =
1659 						mbuf_pool_find(port->socket_id);
1660 					if (mp == NULL) {
1661 						printf("Failed to setup RX queue:"
1662 							"No mempool allocation"
1663 							" on the socket %d\n",
1664 							port->socket_id);
1665 						return -1;
1666 					}
1667 					diag = rte_eth_rx_queue_setup(pi, qi,
1668 					     port->nb_rx_desc[pi],
1669 					     port->socket_id,
1670 					     &(port->rx_conf[qi]),
1671 					     mp);
1672 				}
1673 				if (diag == 0)
1674 					continue;
1675 
1676 				/* Fail to setup rx queue, return */
1677 				if (rte_atomic16_cmpset(&(port->port_status),
1678 							RTE_PORT_HANDLING,
1679 							RTE_PORT_STOPPED) == 0)
1680 					printf("Port %d can not be set back "
1681 							"to stopped\n", pi);
1682 				printf("Fail to configure port %d rx queues\n",
1683 				       pi);
1684 				/* try to reconfigure queues next time */
1685 				port->need_reconfig_queues = 1;
1686 				return -1;
1687 			}
1688 		}
1689 
1690 		/* start port */
1691 		if (rte_eth_dev_start(pi) < 0) {
1692 			printf("Fail to start port %d\n", pi);
1693 
1694 			/* Fail to setup rx queue, return */
1695 			if (rte_atomic16_cmpset(&(port->port_status),
1696 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1697 				printf("Port %d can not be set back to "
1698 							"stopped\n", pi);
1699 			continue;
1700 		}
1701 
1702 		if (rte_atomic16_cmpset(&(port->port_status),
1703 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1704 			printf("Port %d can not be set into started\n", pi);
1705 
1706 		rte_eth_macaddr_get(pi, &mac_addr);
1707 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1708 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1709 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1710 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1711 
1712 		/* at least one port started, need checking link status */
1713 		need_check_link_status = 1;
1714 	}
1715 
1716 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1717 	     event_type < RTE_ETH_EVENT_MAX;
1718 	     event_type++) {
1719 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1720 						event_type,
1721 						eth_event_callback,
1722 						NULL);
1723 		if (diag) {
1724 			printf("Failed to setup even callback for event %d\n",
1725 				event_type);
1726 			return -1;
1727 		}
1728 	}
1729 
1730 	if (need_check_link_status == 1 && !no_link_check)
1731 		check_all_ports_link_status(RTE_PORT_ALL);
1732 	else if (need_check_link_status == 0)
1733 		printf("Please stop the ports first\n");
1734 
1735 	printf("Done\n");
1736 	return 0;
1737 }
1738 
1739 void
1740 stop_port(portid_t pid)
1741 {
1742 	portid_t pi;
1743 	struct rte_port *port;
1744 	int need_check_link_status = 0;
1745 
1746 	if (dcb_test) {
1747 		dcb_test = 0;
1748 		dcb_config = 0;
1749 	}
1750 
1751 	if (port_id_is_invalid(pid, ENABLED_WARN))
1752 		return;
1753 
1754 	printf("Stopping ports...\n");
1755 
1756 	RTE_ETH_FOREACH_DEV(pi) {
1757 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1758 			continue;
1759 
1760 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1761 			printf("Please remove port %d from forwarding configuration.\n", pi);
1762 			continue;
1763 		}
1764 
1765 		if (port_is_bonding_slave(pi)) {
1766 			printf("Please remove port %d from bonded device.\n", pi);
1767 			continue;
1768 		}
1769 
1770 		port = &ports[pi];
1771 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1772 						RTE_PORT_HANDLING) == 0)
1773 			continue;
1774 
1775 		rte_eth_dev_stop(pi);
1776 
1777 		if (rte_atomic16_cmpset(&(port->port_status),
1778 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1779 			printf("Port %d can not be set into stopped\n", pi);
1780 		need_check_link_status = 1;
1781 	}
1782 	if (need_check_link_status && !no_link_check)
1783 		check_all_ports_link_status(RTE_PORT_ALL);
1784 
1785 	printf("Done\n");
1786 }
1787 
1788 void
1789 close_port(portid_t pid)
1790 {
1791 	portid_t pi;
1792 	struct rte_port *port;
1793 
1794 	if (port_id_is_invalid(pid, ENABLED_WARN))
1795 		return;
1796 
1797 	printf("Closing ports...\n");
1798 
1799 	RTE_ETH_FOREACH_DEV(pi) {
1800 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1801 			continue;
1802 
1803 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1804 			printf("Please remove port %d from forwarding configuration.\n", pi);
1805 			continue;
1806 		}
1807 
1808 		if (port_is_bonding_slave(pi)) {
1809 			printf("Please remove port %d from bonded device.\n", pi);
1810 			continue;
1811 		}
1812 
1813 		port = &ports[pi];
1814 		if (rte_atomic16_cmpset(&(port->port_status),
1815 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1816 			printf("Port %d is already closed\n", pi);
1817 			continue;
1818 		}
1819 
1820 		if (rte_atomic16_cmpset(&(port->port_status),
1821 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1822 			printf("Port %d is now not stopped\n", pi);
1823 			continue;
1824 		}
1825 
1826 		if (port->flow_list)
1827 			port_flow_flush(pi);
1828 		rte_eth_dev_close(pi);
1829 
1830 		if (rte_atomic16_cmpset(&(port->port_status),
1831 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1832 			printf("Port %d cannot be set to closed\n", pi);
1833 	}
1834 
1835 	printf("Done\n");
1836 }
1837 
1838 void
1839 reset_port(portid_t pid)
1840 {
1841 	int diag;
1842 	portid_t pi;
1843 	struct rte_port *port;
1844 
1845 	if (port_id_is_invalid(pid, ENABLED_WARN))
1846 		return;
1847 
1848 	printf("Resetting ports...\n");
1849 
1850 	RTE_ETH_FOREACH_DEV(pi) {
1851 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1852 			continue;
1853 
1854 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1855 			printf("Please remove port %d from forwarding "
1856 			       "configuration.\n", pi);
1857 			continue;
1858 		}
1859 
1860 		if (port_is_bonding_slave(pi)) {
1861 			printf("Please remove port %d from bonded device.\n",
1862 			       pi);
1863 			continue;
1864 		}
1865 
1866 		diag = rte_eth_dev_reset(pi);
1867 		if (diag == 0) {
1868 			port = &ports[pi];
1869 			port->need_reconfig = 1;
1870 			port->need_reconfig_queues = 1;
1871 		} else {
1872 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1873 		}
1874 	}
1875 
1876 	printf("Done\n");
1877 }
1878 
1879 static int
1880 eth_dev_event_callback_register(void)
1881 {
1882 	int ret;
1883 
1884 	/* register the device event callback */
1885 	ret = rte_dev_event_callback_register(NULL,
1886 		eth_dev_event_callback, NULL);
1887 	if (ret) {
1888 		printf("Failed to register device event callback\n");
1889 		return -1;
1890 	}
1891 
1892 	return 0;
1893 }
1894 
1895 
1896 static int
1897 eth_dev_event_callback_unregister(void)
1898 {
1899 	int ret;
1900 
1901 	/* unregister the device event callback */
1902 	ret = rte_dev_event_callback_unregister(NULL,
1903 		eth_dev_event_callback, NULL);
1904 	if (ret < 0) {
1905 		printf("Failed to unregister device event callback\n");
1906 		return -1;
1907 	}
1908 
1909 	return 0;
1910 }
1911 
1912 void
1913 attach_port(char *identifier)
1914 {
1915 	portid_t pi = 0;
1916 	unsigned int socket_id;
1917 
1918 	printf("Attaching a new port...\n");
1919 
1920 	if (identifier == NULL) {
1921 		printf("Invalid parameters are specified\n");
1922 		return;
1923 	}
1924 
1925 	if (rte_eth_dev_attach(identifier, &pi))
1926 		return;
1927 
1928 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1929 	/* if socket_id is invalid, set to 0 */
1930 	if (check_socket_id(socket_id) < 0)
1931 		socket_id = 0;
1932 	reconfig(pi, socket_id);
1933 	rte_eth_promiscuous_enable(pi);
1934 
1935 	nb_ports = rte_eth_dev_count_avail();
1936 
1937 	ports[pi].port_status = RTE_PORT_STOPPED;
1938 
1939 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1940 	printf("Done\n");
1941 }
1942 
1943 void
1944 detach_port(portid_t port_id)
1945 {
1946 	char name[RTE_ETH_NAME_MAX_LEN];
1947 
1948 	printf("Detaching a port...\n");
1949 
1950 	if (!port_is_closed(port_id)) {
1951 		printf("Please close port first\n");
1952 		return;
1953 	}
1954 
1955 	if (ports[port_id].flow_list)
1956 		port_flow_flush(port_id);
1957 
1958 	if (rte_eth_dev_detach(port_id, name)) {
1959 		TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1960 		return;
1961 	}
1962 
1963 	nb_ports = rte_eth_dev_count_avail();
1964 
1965 	printf("Port '%s' is detached. Now total ports is %d\n",
1966 			name, nb_ports);
1967 	printf("Done\n");
1968 	return;
1969 }
1970 
1971 void
1972 pmd_test_exit(void)
1973 {
1974 	portid_t pt_id;
1975 	int ret;
1976 
1977 	if (test_done == 0)
1978 		stop_packet_forwarding();
1979 
1980 	if (ports != NULL) {
1981 		no_link_check = 1;
1982 		RTE_ETH_FOREACH_DEV(pt_id) {
1983 			printf("\nShutting down port %d...\n", pt_id);
1984 			fflush(stdout);
1985 			stop_port(pt_id);
1986 			close_port(pt_id);
1987 		}
1988 	}
1989 
1990 	if (hot_plug) {
1991 		ret = rte_dev_event_monitor_stop();
1992 		if (ret)
1993 			RTE_LOG(ERR, EAL,
1994 				"fail to stop device event monitor.");
1995 
1996 		ret = eth_dev_event_callback_unregister();
1997 		if (ret)
1998 			RTE_LOG(ERR, EAL,
1999 				"fail to unregister all event callbacks.");
2000 	}
2001 
2002 	printf("\nBye...\n");
2003 }
2004 
2005 typedef void (*cmd_func_t)(void);
2006 struct pmd_test_command {
2007 	const char *cmd_name;
2008 	cmd_func_t cmd_func;
2009 };
2010 
2011 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2012 
2013 /* Check the link status of all ports in up to 9s, and print them finally */
2014 static void
2015 check_all_ports_link_status(uint32_t port_mask)
2016 {
2017 #define CHECK_INTERVAL 100 /* 100ms */
2018 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2019 	portid_t portid;
2020 	uint8_t count, all_ports_up, print_flag = 0;
2021 	struct rte_eth_link link;
2022 
2023 	printf("Checking link statuses...\n");
2024 	fflush(stdout);
2025 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2026 		all_ports_up = 1;
2027 		RTE_ETH_FOREACH_DEV(portid) {
2028 			if ((port_mask & (1 << portid)) == 0)
2029 				continue;
2030 			memset(&link, 0, sizeof(link));
2031 			rte_eth_link_get_nowait(portid, &link);
2032 			/* print link status if flag set */
2033 			if (print_flag == 1) {
2034 				if (link.link_status)
2035 					printf(
2036 					"Port%d Link Up. speed %u Mbps- %s\n",
2037 					portid, link.link_speed,
2038 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2039 					("full-duplex") : ("half-duplex\n"));
2040 				else
2041 					printf("Port %d Link Down\n", portid);
2042 				continue;
2043 			}
2044 			/* clear all_ports_up flag if any link down */
2045 			if (link.link_status == ETH_LINK_DOWN) {
2046 				all_ports_up = 0;
2047 				break;
2048 			}
2049 		}
2050 		/* after finally printing all link status, get out */
2051 		if (print_flag == 1)
2052 			break;
2053 
2054 		if (all_ports_up == 0) {
2055 			fflush(stdout);
2056 			rte_delay_ms(CHECK_INTERVAL);
2057 		}
2058 
2059 		/* set the print_flag if all ports up or timeout */
2060 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2061 			print_flag = 1;
2062 		}
2063 
2064 		if (lsc_interrupt)
2065 			break;
2066 	}
2067 }
2068 
2069 static void
2070 rmv_event_callback(void *arg)
2071 {
2072 	struct rte_eth_dev *dev;
2073 	portid_t port_id = (intptr_t)arg;
2074 
2075 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2076 	dev = &rte_eth_devices[port_id];
2077 
2078 	stop_port(port_id);
2079 	close_port(port_id);
2080 	printf("removing device %s\n", dev->device->name);
2081 	if (rte_eal_dev_detach(dev->device))
2082 		TESTPMD_LOG(ERR, "Failed to detach device %s\n",
2083 			dev->device->name);
2084 }
2085 
2086 /* This function is used by the interrupt thread */
2087 static int
2088 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2089 		  void *ret_param)
2090 {
2091 	static const char * const event_desc[] = {
2092 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2093 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2094 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2095 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2096 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2097 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
2098 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2099 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2100 		[RTE_ETH_EVENT_NEW] = "device probed",
2101 		[RTE_ETH_EVENT_DESTROY] = "device released",
2102 		[RTE_ETH_EVENT_MAX] = NULL,
2103 	};
2104 
2105 	RTE_SET_USED(param);
2106 	RTE_SET_USED(ret_param);
2107 
2108 	if (type >= RTE_ETH_EVENT_MAX) {
2109 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2110 			port_id, __func__, type);
2111 		fflush(stderr);
2112 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2113 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2114 			event_desc[type]);
2115 		fflush(stdout);
2116 	}
2117 
2118 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2119 		return 0;
2120 
2121 	switch (type) {
2122 	case RTE_ETH_EVENT_INTR_RMV:
2123 		if (rte_eal_alarm_set(100000,
2124 				rmv_event_callback, (void *)(intptr_t)port_id))
2125 			fprintf(stderr, "Could not set up deferred device removal\n");
2126 		break;
2127 	default:
2128 		break;
2129 	}
2130 	return 0;
2131 }
2132 
2133 /* This function is used by the interrupt thread */
2134 static void
2135 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2136 			     __rte_unused void *arg)
2137 {
2138 	if (type >= RTE_DEV_EVENT_MAX) {
2139 		fprintf(stderr, "%s called upon invalid event %d\n",
2140 			__func__, type);
2141 		fflush(stderr);
2142 	}
2143 
2144 	switch (type) {
2145 	case RTE_DEV_EVENT_REMOVE:
2146 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2147 			device_name);
2148 		/* TODO: After finish failure handle, begin to stop
2149 		 * packet forward, stop port, close port, detach port.
2150 		 */
2151 		break;
2152 	case RTE_DEV_EVENT_ADD:
2153 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2154 			device_name);
2155 		/* TODO: After finish kernel driver binding,
2156 		 * begin to attach port.
2157 		 */
2158 		break;
2159 	default:
2160 		break;
2161 	}
2162 }
2163 
2164 static int
2165 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2166 {
2167 	uint16_t i;
2168 	int diag;
2169 	uint8_t mapping_found = 0;
2170 
2171 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2172 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2173 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2174 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2175 					tx_queue_stats_mappings[i].queue_id,
2176 					tx_queue_stats_mappings[i].stats_counter_id);
2177 			if (diag != 0)
2178 				return diag;
2179 			mapping_found = 1;
2180 		}
2181 	}
2182 	if (mapping_found)
2183 		port->tx_queue_stats_mapping_enabled = 1;
2184 	return 0;
2185 }
2186 
2187 static int
2188 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2189 {
2190 	uint16_t i;
2191 	int diag;
2192 	uint8_t mapping_found = 0;
2193 
2194 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2195 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2196 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2197 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2198 					rx_queue_stats_mappings[i].queue_id,
2199 					rx_queue_stats_mappings[i].stats_counter_id);
2200 			if (diag != 0)
2201 				return diag;
2202 			mapping_found = 1;
2203 		}
2204 	}
2205 	if (mapping_found)
2206 		port->rx_queue_stats_mapping_enabled = 1;
2207 	return 0;
2208 }
2209 
2210 static void
2211 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2212 {
2213 	int diag = 0;
2214 
2215 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2216 	if (diag != 0) {
2217 		if (diag == -ENOTSUP) {
2218 			port->tx_queue_stats_mapping_enabled = 0;
2219 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2220 		}
2221 		else
2222 			rte_exit(EXIT_FAILURE,
2223 					"set_tx_queue_stats_mapping_registers "
2224 					"failed for port id=%d diag=%d\n",
2225 					pi, diag);
2226 	}
2227 
2228 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2229 	if (diag != 0) {
2230 		if (diag == -ENOTSUP) {
2231 			port->rx_queue_stats_mapping_enabled = 0;
2232 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2233 		}
2234 		else
2235 			rte_exit(EXIT_FAILURE,
2236 					"set_rx_queue_stats_mapping_registers "
2237 					"failed for port id=%d diag=%d\n",
2238 					pi, diag);
2239 	}
2240 }
2241 
2242 static void
2243 rxtx_port_config(struct rte_port *port)
2244 {
2245 	uint16_t qid;
2246 
2247 	for (qid = 0; qid < nb_rxq; qid++) {
2248 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2249 
2250 		/* Check if any Rx parameters have been passed */
2251 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2252 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2253 
2254 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2255 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2256 
2257 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2258 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2259 
2260 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2261 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2262 
2263 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2264 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2265 
2266 		port->nb_rx_desc[qid] = nb_rxd;
2267 	}
2268 
2269 	for (qid = 0; qid < nb_txq; qid++) {
2270 		port->tx_conf[qid] = port->dev_info.default_txconf;
2271 
2272 		/* Check if any Tx parameters have been passed */
2273 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2274 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2275 
2276 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2277 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2278 
2279 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2280 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2281 
2282 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2283 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2284 
2285 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2286 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2287 
2288 		port->nb_tx_desc[qid] = nb_txd;
2289 	}
2290 }
2291 
2292 void
2293 init_port_config(void)
2294 {
2295 	portid_t pid;
2296 	struct rte_port *port;
2297 	struct rte_eth_dev_info dev_info;
2298 
2299 	RTE_ETH_FOREACH_DEV(pid) {
2300 		port = &ports[pid];
2301 		port->dev_conf.fdir_conf = fdir_conf;
2302 		if (nb_rxq > 1) {
2303 			rte_eth_dev_info_get(pid, &dev_info);
2304 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2305 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2306 				rss_hf & dev_info.flow_type_rss_offloads;
2307 		} else {
2308 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2309 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2310 		}
2311 
2312 		if (port->dcb_flag == 0) {
2313 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2314 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2315 			else
2316 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2317 		}
2318 
2319 		rxtx_port_config(port);
2320 
2321 		rte_eth_macaddr_get(pid, &port->eth_addr);
2322 
2323 		map_port_queue_stats_mapping_registers(pid, port);
2324 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2325 		rte_pmd_ixgbe_bypass_init(pid);
2326 #endif
2327 
2328 		if (lsc_interrupt &&
2329 		    (rte_eth_devices[pid].data->dev_flags &
2330 		     RTE_ETH_DEV_INTR_LSC))
2331 			port->dev_conf.intr_conf.lsc = 1;
2332 		if (rmv_interrupt &&
2333 		    (rte_eth_devices[pid].data->dev_flags &
2334 		     RTE_ETH_DEV_INTR_RMV))
2335 			port->dev_conf.intr_conf.rmv = 1;
2336 
2337 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2338 		/* Detect softnic port */
2339 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2340 			port->softnic_enable = 1;
2341 			memset(&port->softport, 0, sizeof(struct softnic_port));
2342 
2343 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2344 				port->softport.tm_flag = 1;
2345 		}
2346 #endif
2347 	}
2348 }
2349 
2350 void set_port_slave_flag(portid_t slave_pid)
2351 {
2352 	struct rte_port *port;
2353 
2354 	port = &ports[slave_pid];
2355 	port->slave_flag = 1;
2356 }
2357 
2358 void clear_port_slave_flag(portid_t slave_pid)
2359 {
2360 	struct rte_port *port;
2361 
2362 	port = &ports[slave_pid];
2363 	port->slave_flag = 0;
2364 }
2365 
2366 uint8_t port_is_bonding_slave(portid_t slave_pid)
2367 {
2368 	struct rte_port *port;
2369 
2370 	port = &ports[slave_pid];
2371 	return port->slave_flag;
2372 }
2373 
2374 const uint16_t vlan_tags[] = {
2375 		0,  1,  2,  3,  4,  5,  6,  7,
2376 		8,  9, 10, 11,  12, 13, 14, 15,
2377 		16, 17, 18, 19, 20, 21, 22, 23,
2378 		24, 25, 26, 27, 28, 29, 30, 31
2379 };
2380 
2381 static  int
2382 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2383 		 enum dcb_mode_enable dcb_mode,
2384 		 enum rte_eth_nb_tcs num_tcs,
2385 		 uint8_t pfc_en)
2386 {
2387 	uint8_t i;
2388 
2389 	/*
2390 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2391 	 * given above, and the number of traffic classes available for use.
2392 	 */
2393 	if (dcb_mode == DCB_VT_ENABLED) {
2394 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2395 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2396 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2397 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2398 
2399 		/* VMDQ+DCB RX and TX configurations */
2400 		vmdq_rx_conf->enable_default_pool = 0;
2401 		vmdq_rx_conf->default_pool = 0;
2402 		vmdq_rx_conf->nb_queue_pools =
2403 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2404 		vmdq_tx_conf->nb_queue_pools =
2405 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2406 
2407 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2408 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2409 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2410 			vmdq_rx_conf->pool_map[i].pools =
2411 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2412 		}
2413 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2414 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2415 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2416 		}
2417 
2418 		/* set DCB mode of RX and TX of multiple queues */
2419 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2420 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2421 	} else {
2422 		struct rte_eth_dcb_rx_conf *rx_conf =
2423 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2424 		struct rte_eth_dcb_tx_conf *tx_conf =
2425 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2426 
2427 		rx_conf->nb_tcs = num_tcs;
2428 		tx_conf->nb_tcs = num_tcs;
2429 
2430 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2431 			rx_conf->dcb_tc[i] = i % num_tcs;
2432 			tx_conf->dcb_tc[i] = i % num_tcs;
2433 		}
2434 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2435 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2436 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2437 	}
2438 
2439 	if (pfc_en)
2440 		eth_conf->dcb_capability_en =
2441 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2442 	else
2443 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2444 
2445 	return 0;
2446 }
2447 
2448 int
2449 init_port_dcb_config(portid_t pid,
2450 		     enum dcb_mode_enable dcb_mode,
2451 		     enum rte_eth_nb_tcs num_tcs,
2452 		     uint8_t pfc_en)
2453 {
2454 	struct rte_eth_conf port_conf;
2455 	struct rte_port *rte_port;
2456 	int retval;
2457 	uint16_t i;
2458 
2459 	rte_port = &ports[pid];
2460 
2461 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2462 	/* Enter DCB configuration status */
2463 	dcb_config = 1;
2464 
2465 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2466 	port_conf.txmode = rte_port->dev_conf.txmode;
2467 
2468 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2469 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2470 	if (retval < 0)
2471 		return retval;
2472 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2473 
2474 	/**
2475 	 * Write the configuration into the device.
2476 	 * Set the numbers of RX & TX queues to 0, so
2477 	 * the RX & TX queues will not be setup.
2478 	 */
2479 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2480 
2481 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2482 
2483 	/* If dev_info.vmdq_pool_base is greater than 0,
2484 	 * the queue id of vmdq pools is started after pf queues.
2485 	 */
2486 	if (dcb_mode == DCB_VT_ENABLED &&
2487 	    rte_port->dev_info.vmdq_pool_base > 0) {
2488 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2489 			" for port %d.", pid);
2490 		return -1;
2491 	}
2492 
2493 	/* Assume the ports in testpmd have the same dcb capability
2494 	 * and has the same number of rxq and txq in dcb mode
2495 	 */
2496 	if (dcb_mode == DCB_VT_ENABLED) {
2497 		if (rte_port->dev_info.max_vfs > 0) {
2498 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2499 			nb_txq = rte_port->dev_info.nb_tx_queues;
2500 		} else {
2501 			nb_rxq = rte_port->dev_info.max_rx_queues;
2502 			nb_txq = rte_port->dev_info.max_tx_queues;
2503 		}
2504 	} else {
2505 		/*if vt is disabled, use all pf queues */
2506 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2507 			nb_rxq = rte_port->dev_info.max_rx_queues;
2508 			nb_txq = rte_port->dev_info.max_tx_queues;
2509 		} else {
2510 			nb_rxq = (queueid_t)num_tcs;
2511 			nb_txq = (queueid_t)num_tcs;
2512 
2513 		}
2514 	}
2515 	rx_free_thresh = 64;
2516 
2517 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2518 
2519 	rxtx_port_config(rte_port);
2520 	/* VLAN filter */
2521 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2522 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2523 		rx_vft_set(pid, vlan_tags[i], 1);
2524 
2525 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2526 	map_port_queue_stats_mapping_registers(pid, rte_port);
2527 
2528 	rte_port->dcb_flag = 1;
2529 
2530 	return 0;
2531 }
2532 
2533 static void
2534 init_port(void)
2535 {
2536 	/* Configuration of Ethernet ports. */
2537 	ports = rte_zmalloc("testpmd: ports",
2538 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2539 			    RTE_CACHE_LINE_SIZE);
2540 	if (ports == NULL) {
2541 		rte_exit(EXIT_FAILURE,
2542 				"rte_zmalloc(%d struct rte_port) failed\n",
2543 				RTE_MAX_ETHPORTS);
2544 	}
2545 }
2546 
2547 static void
2548 force_quit(void)
2549 {
2550 	pmd_test_exit();
2551 	prompt_exit();
2552 }
2553 
2554 static void
2555 print_stats(void)
2556 {
2557 	uint8_t i;
2558 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2559 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2560 
2561 	/* Clear screen and move to top left */
2562 	printf("%s%s", clr, top_left);
2563 
2564 	printf("\nPort statistics ====================================");
2565 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2566 		nic_stats_display(fwd_ports_ids[i]);
2567 }
2568 
2569 static void
2570 signal_handler(int signum)
2571 {
2572 	if (signum == SIGINT || signum == SIGTERM) {
2573 		printf("\nSignal %d received, preparing to exit...\n",
2574 				signum);
2575 #ifdef RTE_LIBRTE_PDUMP
2576 		/* uninitialize packet capture framework */
2577 		rte_pdump_uninit();
2578 #endif
2579 #ifdef RTE_LIBRTE_LATENCY_STATS
2580 		rte_latencystats_uninit();
2581 #endif
2582 		force_quit();
2583 		/* Set flag to indicate the force termination. */
2584 		f_quit = 1;
2585 		/* exit with the expected status */
2586 		signal(signum, SIG_DFL);
2587 		kill(getpid(), signum);
2588 	}
2589 }
2590 
2591 int
2592 main(int argc, char** argv)
2593 {
2594 	int diag;
2595 	portid_t port_id;
2596 	int ret;
2597 
2598 	signal(SIGINT, signal_handler);
2599 	signal(SIGTERM, signal_handler);
2600 
2601 	diag = rte_eal_init(argc, argv);
2602 	if (diag < 0)
2603 		rte_panic("Cannot init EAL\n");
2604 
2605 	testpmd_logtype = rte_log_register("testpmd");
2606 	if (testpmd_logtype < 0)
2607 		rte_panic("Cannot register log type");
2608 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2609 
2610 	/* Bitrate/latency stats disabled by default */
2611 #ifdef RTE_LIBRTE_BITRATE
2612 	bitrate_enabled = 0;
2613 #endif
2614 #ifdef RTE_LIBRTE_LATENCY_STATS
2615 	latencystats_enabled = 0;
2616 #endif
2617 
2618 	/* on FreeBSD, mlockall() is disabled by default */
2619 #ifdef RTE_EXEC_ENV_BSDAPP
2620 	do_mlockall = 0;
2621 #else
2622 	do_mlockall = 1;
2623 #endif
2624 
2625 	argc -= diag;
2626 	argv += diag;
2627 	if (argc > 1)
2628 		launch_args_parse(argc, argv);
2629 
2630 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2631 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2632 			strerror(errno));
2633 	}
2634 
2635 #ifdef RTE_LIBRTE_PDUMP
2636 	/* initialize packet capture framework */
2637 	rte_pdump_init(NULL);
2638 #endif
2639 
2640 	nb_ports = (portid_t) rte_eth_dev_count_avail();
2641 	if (nb_ports == 0)
2642 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2643 
2644 	/* allocate port structures, and init them */
2645 	init_port();
2646 
2647 	set_def_fwd_config();
2648 	if (nb_lcores == 0)
2649 		rte_panic("Empty set of forwarding logical cores - check the "
2650 			  "core mask supplied in the command parameters\n");
2651 
2652 	if (tx_first && interactive)
2653 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2654 				"interactive mode.\n");
2655 
2656 	if (tx_first && lsc_interrupt) {
2657 		printf("Warning: lsc_interrupt needs to be off when "
2658 				" using tx_first. Disabling.\n");
2659 		lsc_interrupt = 0;
2660 	}
2661 
2662 	if (!nb_rxq && !nb_txq)
2663 		printf("Warning: Either rx or tx queues should be non-zero\n");
2664 
2665 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2666 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2667 		       "but nb_txq=%d will prevent to fully test it.\n",
2668 		       nb_rxq, nb_txq);
2669 
2670 	init_config();
2671 
2672 	if (hot_plug) {
2673 		/* enable hot plug monitoring */
2674 		ret = rte_dev_event_monitor_start();
2675 		if (ret) {
2676 			rte_errno = EINVAL;
2677 			return -1;
2678 		}
2679 		eth_dev_event_callback_register();
2680 
2681 	}
2682 
2683 	if (start_port(RTE_PORT_ALL) != 0)
2684 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2685 
2686 	/* set all ports to promiscuous mode by default */
2687 	RTE_ETH_FOREACH_DEV(port_id)
2688 		rte_eth_promiscuous_enable(port_id);
2689 
2690 	/* Init metrics library */
2691 	rte_metrics_init(rte_socket_id());
2692 
2693 #ifdef RTE_LIBRTE_LATENCY_STATS
2694 	if (latencystats_enabled != 0) {
2695 		int ret = rte_latencystats_init(1, NULL);
2696 		if (ret)
2697 			printf("Warning: latencystats init()"
2698 				" returned error %d\n",	ret);
2699 		printf("Latencystats running on lcore %d\n",
2700 			latencystats_lcore_id);
2701 	}
2702 #endif
2703 
2704 	/* Setup bitrate stats */
2705 #ifdef RTE_LIBRTE_BITRATE
2706 	if (bitrate_enabled != 0) {
2707 		bitrate_data = rte_stats_bitrate_create();
2708 		if (bitrate_data == NULL)
2709 			rte_exit(EXIT_FAILURE,
2710 				"Could not allocate bitrate data.\n");
2711 		rte_stats_bitrate_reg(bitrate_data);
2712 	}
2713 #endif
2714 
2715 #ifdef RTE_LIBRTE_CMDLINE
2716 	if (strlen(cmdline_filename) != 0)
2717 		cmdline_read_from_file(cmdline_filename);
2718 
2719 	if (interactive == 1) {
2720 		if (auto_start) {
2721 			printf("Start automatic packet forwarding\n");
2722 			start_packet_forwarding(0);
2723 		}
2724 		prompt();
2725 		pmd_test_exit();
2726 	} else
2727 #endif
2728 	{
2729 		char c;
2730 		int rc;
2731 
2732 		f_quit = 0;
2733 
2734 		printf("No commandline core given, start packet forwarding\n");
2735 		start_packet_forwarding(tx_first);
2736 		if (stats_period != 0) {
2737 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2738 			uint64_t timer_period;
2739 
2740 			/* Convert to number of cycles */
2741 			timer_period = stats_period * rte_get_timer_hz();
2742 
2743 			while (f_quit == 0) {
2744 				cur_time = rte_get_timer_cycles();
2745 				diff_time += cur_time - prev_time;
2746 
2747 				if (diff_time >= timer_period) {
2748 					print_stats();
2749 					/* Reset the timer */
2750 					diff_time = 0;
2751 				}
2752 				/* Sleep to avoid unnecessary checks */
2753 				prev_time = cur_time;
2754 				sleep(1);
2755 			}
2756 		}
2757 
2758 		printf("Press enter to exit\n");
2759 		rc = read(0, &c, 1);
2760 		pmd_test_exit();
2761 		if (rc < 0)
2762 			return 1;
2763 	}
2764 
2765 	return 0;
2766 }
2767