xref: /dpdk/app/test-pmd/testpmd.c (revision 08aa6271c86a561b66c6dd91f9a54fa2f12bc859)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
33 #include <rte_eal.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
41 #include <rte_mbuf.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_dev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
51 #endif
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
54 #endif
55 #include <rte_flow.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
59 #endif
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
62 #endif
63 
64 #include "testpmd.h"
65 
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
68 
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
72 uint8_t tx_first;
73 char cmdline_filename[PATH_MAX] = {0};
74 
75 /*
76  * NUMA support configuration.
77  * When set, the NUMA support attempts to dispatch the allocation of the
78  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79  * probed ports among the CPU sockets 0 and 1.
80  * Otherwise, all memory is allocated from CPU socket 0.
81  */
82 uint8_t numa_support = 1; /**< numa enabled by default */
83 
84 /*
85  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86  * not configured.
87  */
88 uint8_t socket_num = UMA_NO_CONFIG;
89 
90 /*
91  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
92  */
93 uint8_t mp_anon = 0;
94 
95 /*
96  * Store specified sockets on which memory pool to be used by ports
97  * is allocated.
98  */
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
100 
101 /*
102  * Store specified sockets on which RX ring to be used by ports
103  * is allocated.
104  */
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
106 
107 /*
108  * Store specified sockets on which TX ring to be used by ports
109  * is allocated.
110  */
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
112 
113 /*
114  * Record the Ethernet address of peer target ports to which packets are
115  * forwarded.
116  * Must be instantiated with the ethernet addresses of peer traffic generator
117  * ports.
118  */
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
121 
122 /*
123  * Probed Target Environment.
124  */
125 struct rte_port *ports;	       /**< For all probed ethernet ports. */
126 portid_t nb_ports;             /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
129 
130 /*
131  * Test Forwarding Configuration.
132  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
134  */
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
138 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
139 
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
142 
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
145 
146 /*
147  * Forwarding engines.
148  */
149 struct fwd_engine * fwd_engines[] = {
150 	&io_fwd_engine,
151 	&mac_fwd_engine,
152 	&mac_swap_engine,
153 	&flow_gen_engine,
154 	&rx_only_engine,
155 	&tx_only_engine,
156 	&csum_fwd_engine,
157 	&icmp_echo_engine,
158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
159 	&softnic_tm_engine,
160 	&softnic_tm_bypass_engine,
161 #endif
162 #ifdef RTE_LIBRTE_IEEE1588
163 	&ieee1588_fwd_engine,
164 #endif
165 	NULL,
166 };
167 
168 struct fwd_config cur_fwd_config;
169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
170 uint32_t retry_enabled;
171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173 
174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
175 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
176                                       * specified on command-line. */
177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
178 
179 /*
180  * In container, it cannot terminate the process which running with 'stats-period'
181  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
182  */
183 uint8_t f_quit;
184 
185 /*
186  * Configuration of packet segments used by the "txonly" processing engine.
187  */
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 	TXONLY_DEF_PACKET_LEN,
191 };
192 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
196 
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
202 
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
205 
206 /*
207  * Configurable number of RX/TX queues.
208  */
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
211 
212 /*
213  * Configurable number of RX/TX ring descriptors.
214  * Defaults are supplied by drivers via ethdev.
215  */
216 #define RTE_TEST_RX_DESC_DEFAULT 0
217 #define RTE_TEST_TX_DESC_DEFAULT 0
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220 
221 #define RTE_PMD_PARAM_UNSET -1
222 /*
223  * Configurable values of RX and TX ring threshold registers.
224  */
225 
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229 
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Configurable value of RX free threshold.
236  */
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238 
239 /*
240  * Configurable value of RX drop enable.
241  */
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243 
244 /*
245  * Configurable value of TX free threshold.
246  */
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248 
249 /*
250  * Configurable value of TX RS bit threshold.
251  */
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253 
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268 
269 /*
270  * Flow API isolated mode.
271  */
272 uint8_t flow_isolate_all;
273 
274 /*
275  * Avoids to check link status when starting/stopping a port.
276  */
277 uint8_t no_link_check = 0; /* check by default */
278 
279 /*
280  * Enable link status change notification
281  */
282 uint8_t lsc_interrupt = 1; /* enabled by default */
283 
284 /*
285  * Enable device removal notification.
286  */
287 uint8_t rmv_interrupt = 1; /* enabled by default */
288 
289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
290 
291 /*
292  * Display or mask ether events
293  * Default to all events except VF_MBOX
294  */
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
301 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
302 /*
303  * Decide if all memory are locked for performance.
304  */
305 int do_mlockall = 0;
306 
307 /*
308  * NIC bypass mode configuration options.
309  */
310 
311 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
312 /* The NIC bypass watchdog timeout. */
313 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
314 #endif
315 
316 
317 #ifdef RTE_LIBRTE_LATENCY_STATS
318 
319 /*
320  * Set when latency stats is enabled in the commandline
321  */
322 uint8_t latencystats_enabled;
323 
324 /*
325  * Lcore ID to serive latency statistics.
326  */
327 lcoreid_t latencystats_lcore_id = -1;
328 
329 #endif
330 
331 /*
332  * Ethernet device configuration.
333  */
334 struct rte_eth_rxmode rx_mode = {
335 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
336 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
337 	.ignore_offload_bitfield = 1,
338 };
339 
340 struct rte_eth_txmode tx_mode = {
341 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
342 };
343 
344 struct rte_fdir_conf fdir_conf = {
345 	.mode = RTE_FDIR_MODE_NONE,
346 	.pballoc = RTE_FDIR_PBALLOC_64K,
347 	.status = RTE_FDIR_REPORT_STATUS,
348 	.mask = {
349 		.vlan_tci_mask = 0x0,
350 		.ipv4_mask     = {
351 			.src_ip = 0xFFFFFFFF,
352 			.dst_ip = 0xFFFFFFFF,
353 		},
354 		.ipv6_mask     = {
355 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
357 		},
358 		.src_port_mask = 0xFFFF,
359 		.dst_port_mask = 0xFFFF,
360 		.mac_addr_byte_mask = 0xFF,
361 		.tunnel_type_mask = 1,
362 		.tunnel_id_mask = 0xFFFFFFFF,
363 	},
364 	.drop_queue = 127,
365 };
366 
367 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
368 
369 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
371 
372 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
373 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
374 
375 uint16_t nb_tx_queue_stats_mappings = 0;
376 uint16_t nb_rx_queue_stats_mappings = 0;
377 
378 /*
379  * Display zero values by default for xstats
380  */
381 uint8_t xstats_hide_zero;
382 
383 unsigned int num_sockets = 0;
384 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
385 
386 #ifdef RTE_LIBRTE_BITRATE
387 /* Bitrate statistics */
388 struct rte_stats_bitrates *bitrate_data;
389 lcoreid_t bitrate_lcore_id;
390 uint8_t bitrate_enabled;
391 #endif
392 
393 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
394 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
395 
396 /* Forward function declarations */
397 static void map_port_queue_stats_mapping_registers(portid_t pi,
398 						   struct rte_port *port);
399 static void check_all_ports_link_status(uint32_t port_mask);
400 static int eth_event_callback(portid_t port_id,
401 			      enum rte_eth_event_type type,
402 			      void *param, void *ret_param);
403 static void eth_dev_event_callback(char *device_name,
404 				enum rte_dev_event_type type,
405 				void *param);
406 static int eth_dev_event_callback_register(void);
407 static int eth_dev_event_callback_unregister(void);
408 
409 
410 /*
411  * Check if all the ports are started.
412  * If yes, return positive value. If not, return zero.
413  */
414 static int all_ports_started(void);
415 
416 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
417 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
418 
419 /*
420  * Helper function to check if socket is already discovered.
421  * If yes, return positive value. If not, return zero.
422  */
423 int
424 new_socket_id(unsigned int socket_id)
425 {
426 	unsigned int i;
427 
428 	for (i = 0; i < num_sockets; i++) {
429 		if (socket_ids[i] == socket_id)
430 			return 0;
431 	}
432 	return 1;
433 }
434 
435 /*
436  * Setup default configuration.
437  */
438 static void
439 set_default_fwd_lcores_config(void)
440 {
441 	unsigned int i;
442 	unsigned int nb_lc;
443 	unsigned int sock_num;
444 
445 	nb_lc = 0;
446 	for (i = 0; i < RTE_MAX_LCORE; i++) {
447 		sock_num = rte_lcore_to_socket_id(i);
448 		if (new_socket_id(sock_num)) {
449 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
450 				rte_exit(EXIT_FAILURE,
451 					 "Total sockets greater than %u\n",
452 					 RTE_MAX_NUMA_NODES);
453 			}
454 			socket_ids[num_sockets++] = sock_num;
455 		}
456 		if (!rte_lcore_is_enabled(i))
457 			continue;
458 		if (i == rte_get_master_lcore())
459 			continue;
460 		fwd_lcores_cpuids[nb_lc++] = i;
461 	}
462 	nb_lcores = (lcoreid_t) nb_lc;
463 	nb_cfg_lcores = nb_lcores;
464 	nb_fwd_lcores = 1;
465 }
466 
467 static void
468 set_def_peer_eth_addrs(void)
469 {
470 	portid_t i;
471 
472 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
473 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
474 		peer_eth_addrs[i].addr_bytes[5] = i;
475 	}
476 }
477 
478 static void
479 set_default_fwd_ports_config(void)
480 {
481 	portid_t pt_id;
482 	int i = 0;
483 
484 	RTE_ETH_FOREACH_DEV(pt_id)
485 		fwd_ports_ids[i++] = pt_id;
486 
487 	nb_cfg_ports = nb_ports;
488 	nb_fwd_ports = nb_ports;
489 }
490 
491 void
492 set_def_fwd_config(void)
493 {
494 	set_default_fwd_lcores_config();
495 	set_def_peer_eth_addrs();
496 	set_default_fwd_ports_config();
497 }
498 
499 /*
500  * Configuration initialisation done once at init time.
501  */
502 static void
503 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
504 		 unsigned int socket_id)
505 {
506 	char pool_name[RTE_MEMPOOL_NAMESIZE];
507 	struct rte_mempool *rte_mp = NULL;
508 	uint32_t mb_size;
509 
510 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
511 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
512 
513 	TESTPMD_LOG(INFO,
514 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
515 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
516 
517 	if (mp_anon != 0) {
518 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
519 			mb_size, (unsigned) mb_mempool_cache,
520 			sizeof(struct rte_pktmbuf_pool_private),
521 			socket_id, 0);
522 		if (rte_mp == NULL)
523 			goto err;
524 
525 		if (rte_mempool_populate_anon(rte_mp) == 0) {
526 			rte_mempool_free(rte_mp);
527 			rte_mp = NULL;
528 			goto err;
529 		}
530 		rte_pktmbuf_pool_init(rte_mp, NULL);
531 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
532 	} else {
533 		/* wrapper to rte_mempool_create() */
534 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
535 				rte_mbuf_best_mempool_ops());
536 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
537 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
538 	}
539 
540 err:
541 	if (rte_mp == NULL) {
542 		rte_exit(EXIT_FAILURE,
543 			"Creation of mbuf pool for socket %u failed: %s\n",
544 			socket_id, rte_strerror(rte_errno));
545 	} else if (verbose_level > 0) {
546 		rte_mempool_dump(stdout, rte_mp);
547 	}
548 }
549 
550 /*
551  * Check given socket id is valid or not with NUMA mode,
552  * if valid, return 0, else return -1
553  */
554 static int
555 check_socket_id(const unsigned int socket_id)
556 {
557 	static int warning_once = 0;
558 
559 	if (new_socket_id(socket_id)) {
560 		if (!warning_once && numa_support)
561 			printf("Warning: NUMA should be configured manually by"
562 			       " using --port-numa-config and"
563 			       " --ring-numa-config parameters along with"
564 			       " --numa.\n");
565 		warning_once = 1;
566 		return -1;
567 	}
568 	return 0;
569 }
570 
571 /*
572  * Get the allowed maximum number of RX queues.
573  * *pid return the port id which has minimal value of
574  * max_rx_queues in all ports.
575  */
576 queueid_t
577 get_allowed_max_nb_rxq(portid_t *pid)
578 {
579 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
580 	portid_t pi;
581 	struct rte_eth_dev_info dev_info;
582 
583 	RTE_ETH_FOREACH_DEV(pi) {
584 		rte_eth_dev_info_get(pi, &dev_info);
585 		if (dev_info.max_rx_queues < allowed_max_rxq) {
586 			allowed_max_rxq = dev_info.max_rx_queues;
587 			*pid = pi;
588 		}
589 	}
590 	return allowed_max_rxq;
591 }
592 
593 /*
594  * Check input rxq is valid or not.
595  * If input rxq is not greater than any of maximum number
596  * of RX queues of all ports, it is valid.
597  * if valid, return 0, else return -1
598  */
599 int
600 check_nb_rxq(queueid_t rxq)
601 {
602 	queueid_t allowed_max_rxq;
603 	portid_t pid = 0;
604 
605 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
606 	if (rxq > allowed_max_rxq) {
607 		printf("Fail: input rxq (%u) can't be greater "
608 		       "than max_rx_queues (%u) of port %u\n",
609 		       rxq,
610 		       allowed_max_rxq,
611 		       pid);
612 		return -1;
613 	}
614 	return 0;
615 }
616 
617 /*
618  * Get the allowed maximum number of TX queues.
619  * *pid return the port id which has minimal value of
620  * max_tx_queues in all ports.
621  */
622 queueid_t
623 get_allowed_max_nb_txq(portid_t *pid)
624 {
625 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
626 	portid_t pi;
627 	struct rte_eth_dev_info dev_info;
628 
629 	RTE_ETH_FOREACH_DEV(pi) {
630 		rte_eth_dev_info_get(pi, &dev_info);
631 		if (dev_info.max_tx_queues < allowed_max_txq) {
632 			allowed_max_txq = dev_info.max_tx_queues;
633 			*pid = pi;
634 		}
635 	}
636 	return allowed_max_txq;
637 }
638 
639 /*
640  * Check input txq is valid or not.
641  * If input txq is not greater than any of maximum number
642  * of TX queues of all ports, it is valid.
643  * if valid, return 0, else return -1
644  */
645 int
646 check_nb_txq(queueid_t txq)
647 {
648 	queueid_t allowed_max_txq;
649 	portid_t pid = 0;
650 
651 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
652 	if (txq > allowed_max_txq) {
653 		printf("Fail: input txq (%u) can't be greater "
654 		       "than max_tx_queues (%u) of port %u\n",
655 		       txq,
656 		       allowed_max_txq,
657 		       pid);
658 		return -1;
659 	}
660 	return 0;
661 }
662 
663 static void
664 init_config(void)
665 {
666 	portid_t pid;
667 	struct rte_port *port;
668 	struct rte_mempool *mbp;
669 	unsigned int nb_mbuf_per_pool;
670 	lcoreid_t  lc_id;
671 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
672 	struct rte_gro_param gro_param;
673 	uint32_t gso_types;
674 	int k;
675 
676 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
677 
678 	if (numa_support) {
679 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
680 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
681 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
682 	}
683 
684 	/* Configuration of logical cores. */
685 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
686 				sizeof(struct fwd_lcore *) * nb_lcores,
687 				RTE_CACHE_LINE_SIZE);
688 	if (fwd_lcores == NULL) {
689 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
690 							"failed\n", nb_lcores);
691 	}
692 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
693 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
694 					       sizeof(struct fwd_lcore),
695 					       RTE_CACHE_LINE_SIZE);
696 		if (fwd_lcores[lc_id] == NULL) {
697 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
698 								"failed\n");
699 		}
700 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
701 	}
702 
703 	RTE_ETH_FOREACH_DEV(pid) {
704 		port = &ports[pid];
705 		/* Apply default TxRx configuration for all ports */
706 		port->dev_conf.txmode = tx_mode;
707 		port->dev_conf.rxmode = rx_mode;
708 		rte_eth_dev_info_get(pid, &port->dev_info);
709 
710 		if (!(port->dev_info.rx_offload_capa &
711 					DEV_RX_OFFLOAD_CRC_STRIP))
712 			port->dev_conf.rxmode.offloads &=
713 				~DEV_RX_OFFLOAD_CRC_STRIP;
714 		if (!(port->dev_info.tx_offload_capa &
715 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
716 			port->dev_conf.txmode.offloads &=
717 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
718 		if (numa_support) {
719 			if (port_numa[pid] != NUMA_NO_CONFIG)
720 				port_per_socket[port_numa[pid]]++;
721 			else {
722 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
723 
724 				/* if socket_id is invalid, set to 0 */
725 				if (check_socket_id(socket_id) < 0)
726 					socket_id = 0;
727 				port_per_socket[socket_id]++;
728 			}
729 		}
730 
731 		/* Apply Rx offloads configuration */
732 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
733 			port->rx_conf[k].offloads =
734 				port->dev_conf.rxmode.offloads;
735 		/* Apply Tx offloads configuration */
736 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
737 			port->tx_conf[k].offloads =
738 				port->dev_conf.txmode.offloads;
739 
740 		/* set flag to initialize port/queue */
741 		port->need_reconfig = 1;
742 		port->need_reconfig_queues = 1;
743 	}
744 
745 	/*
746 	 * Create pools of mbuf.
747 	 * If NUMA support is disabled, create a single pool of mbuf in
748 	 * socket 0 memory by default.
749 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
750 	 *
751 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
752 	 * nb_txd can be configured at run time.
753 	 */
754 	if (param_total_num_mbufs)
755 		nb_mbuf_per_pool = param_total_num_mbufs;
756 	else {
757 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
758 			(nb_lcores * mb_mempool_cache) +
759 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
760 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
761 	}
762 
763 	if (numa_support) {
764 		uint8_t i;
765 
766 		for (i = 0; i < num_sockets; i++)
767 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
768 					 socket_ids[i]);
769 	} else {
770 		if (socket_num == UMA_NO_CONFIG)
771 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
772 		else
773 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
774 						 socket_num);
775 	}
776 
777 	init_port_config();
778 
779 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
780 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
781 	/*
782 	 * Records which Mbuf pool to use by each logical core, if needed.
783 	 */
784 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
785 		mbp = mbuf_pool_find(
786 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
787 
788 		if (mbp == NULL)
789 			mbp = mbuf_pool_find(0);
790 		fwd_lcores[lc_id]->mbp = mbp;
791 		/* initialize GSO context */
792 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
793 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
794 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
795 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
796 			ETHER_CRC_LEN;
797 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
798 	}
799 
800 	/* Configuration of packet forwarding streams. */
801 	if (init_fwd_streams() < 0)
802 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
803 
804 	fwd_config_setup();
805 
806 	/* create a gro context for each lcore */
807 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
808 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
809 	gro_param.max_item_per_flow = MAX_PKT_BURST;
810 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
811 		gro_param.socket_id = rte_lcore_to_socket_id(
812 				fwd_lcores_cpuids[lc_id]);
813 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
814 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
815 			rte_exit(EXIT_FAILURE,
816 					"rte_gro_ctx_create() failed\n");
817 		}
818 	}
819 }
820 
821 
822 void
823 reconfig(portid_t new_port_id, unsigned socket_id)
824 {
825 	struct rte_port *port;
826 
827 	/* Reconfiguration of Ethernet ports. */
828 	port = &ports[new_port_id];
829 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
830 
831 	/* set flag to initialize port/queue */
832 	port->need_reconfig = 1;
833 	port->need_reconfig_queues = 1;
834 	port->socket_id = socket_id;
835 
836 	init_port_config();
837 }
838 
839 
840 int
841 init_fwd_streams(void)
842 {
843 	portid_t pid;
844 	struct rte_port *port;
845 	streamid_t sm_id, nb_fwd_streams_new;
846 	queueid_t q;
847 
848 	/* set socket id according to numa or not */
849 	RTE_ETH_FOREACH_DEV(pid) {
850 		port = &ports[pid];
851 		if (nb_rxq > port->dev_info.max_rx_queues) {
852 			printf("Fail: nb_rxq(%d) is greater than "
853 				"max_rx_queues(%d)\n", nb_rxq,
854 				port->dev_info.max_rx_queues);
855 			return -1;
856 		}
857 		if (nb_txq > port->dev_info.max_tx_queues) {
858 			printf("Fail: nb_txq(%d) is greater than "
859 				"max_tx_queues(%d)\n", nb_txq,
860 				port->dev_info.max_tx_queues);
861 			return -1;
862 		}
863 		if (numa_support) {
864 			if (port_numa[pid] != NUMA_NO_CONFIG)
865 				port->socket_id = port_numa[pid];
866 			else {
867 				port->socket_id = rte_eth_dev_socket_id(pid);
868 
869 				/* if socket_id is invalid, set to 0 */
870 				if (check_socket_id(port->socket_id) < 0)
871 					port->socket_id = 0;
872 			}
873 		}
874 		else {
875 			if (socket_num == UMA_NO_CONFIG)
876 				port->socket_id = 0;
877 			else
878 				port->socket_id = socket_num;
879 		}
880 	}
881 
882 	q = RTE_MAX(nb_rxq, nb_txq);
883 	if (q == 0) {
884 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
885 		return -1;
886 	}
887 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
888 	if (nb_fwd_streams_new == nb_fwd_streams)
889 		return 0;
890 	/* clear the old */
891 	if (fwd_streams != NULL) {
892 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
893 			if (fwd_streams[sm_id] == NULL)
894 				continue;
895 			rte_free(fwd_streams[sm_id]);
896 			fwd_streams[sm_id] = NULL;
897 		}
898 		rte_free(fwd_streams);
899 		fwd_streams = NULL;
900 	}
901 
902 	/* init new */
903 	nb_fwd_streams = nb_fwd_streams_new;
904 	if (nb_fwd_streams) {
905 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
906 			sizeof(struct fwd_stream *) * nb_fwd_streams,
907 			RTE_CACHE_LINE_SIZE);
908 		if (fwd_streams == NULL)
909 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
910 				 " (struct fwd_stream *)) failed\n",
911 				 nb_fwd_streams);
912 
913 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
914 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
915 				" struct fwd_stream", sizeof(struct fwd_stream),
916 				RTE_CACHE_LINE_SIZE);
917 			if (fwd_streams[sm_id] == NULL)
918 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
919 					 "(struct fwd_stream) failed\n");
920 		}
921 	}
922 
923 	return 0;
924 }
925 
926 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
927 static void
928 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
929 {
930 	unsigned int total_burst;
931 	unsigned int nb_burst;
932 	unsigned int burst_stats[3];
933 	uint16_t pktnb_stats[3];
934 	uint16_t nb_pkt;
935 	int burst_percent[3];
936 
937 	/*
938 	 * First compute the total number of packet bursts and the
939 	 * two highest numbers of bursts of the same number of packets.
940 	 */
941 	total_burst = 0;
942 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
943 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
944 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
945 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
946 		if (nb_burst == 0)
947 			continue;
948 		total_burst += nb_burst;
949 		if (nb_burst > burst_stats[0]) {
950 			burst_stats[1] = burst_stats[0];
951 			pktnb_stats[1] = pktnb_stats[0];
952 			burst_stats[0] = nb_burst;
953 			pktnb_stats[0] = nb_pkt;
954 		} else if (nb_burst > burst_stats[1]) {
955 			burst_stats[1] = nb_burst;
956 			pktnb_stats[1] = nb_pkt;
957 		}
958 	}
959 	if (total_burst == 0)
960 		return;
961 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
962 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
963 	       burst_percent[0], (int) pktnb_stats[0]);
964 	if (burst_stats[0] == total_burst) {
965 		printf("]\n");
966 		return;
967 	}
968 	if (burst_stats[0] + burst_stats[1] == total_burst) {
969 		printf(" + %d%% of %d pkts]\n",
970 		       100 - burst_percent[0], pktnb_stats[1]);
971 		return;
972 	}
973 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
974 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
975 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
976 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
977 		return;
978 	}
979 	printf(" + %d%% of %d pkts + %d%% of others]\n",
980 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
981 }
982 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
983 
984 static void
985 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
986 {
987 	struct rte_port *port;
988 	uint8_t i;
989 
990 	static const char *fwd_stats_border = "----------------------";
991 
992 	port = &ports[port_id];
993 	printf("\n  %s Forward statistics for port %-2d %s\n",
994 	       fwd_stats_border, port_id, fwd_stats_border);
995 
996 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
997 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
998 		       "%-"PRIu64"\n",
999 		       stats->ipackets, stats->imissed,
1000 		       (uint64_t) (stats->ipackets + stats->imissed));
1001 
1002 		if (cur_fwd_eng == &csum_fwd_engine)
1003 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1004 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1005 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1006 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
1007 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1008 		}
1009 
1010 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1011 		       "%-"PRIu64"\n",
1012 		       stats->opackets, port->tx_dropped,
1013 		       (uint64_t) (stats->opackets + port->tx_dropped));
1014 	}
1015 	else {
1016 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1017 		       "%14"PRIu64"\n",
1018 		       stats->ipackets, stats->imissed,
1019 		       (uint64_t) (stats->ipackets + stats->imissed));
1020 
1021 		if (cur_fwd_eng == &csum_fwd_engine)
1022 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1023 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1024 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1025 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1026 			printf("  RX-nombufs:             %14"PRIu64"\n",
1027 			       stats->rx_nombuf);
1028 		}
1029 
1030 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1031 		       "%14"PRIu64"\n",
1032 		       stats->opackets, port->tx_dropped,
1033 		       (uint64_t) (stats->opackets + port->tx_dropped));
1034 	}
1035 
1036 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1037 	if (port->rx_stream)
1038 		pkt_burst_stats_display("RX",
1039 			&port->rx_stream->rx_burst_stats);
1040 	if (port->tx_stream)
1041 		pkt_burst_stats_display("TX",
1042 			&port->tx_stream->tx_burst_stats);
1043 #endif
1044 
1045 	if (port->rx_queue_stats_mapping_enabled) {
1046 		printf("\n");
1047 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1048 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1049 			       "     RX-errors:%14"PRIu64
1050 			       "    RX-bytes:%14"PRIu64"\n",
1051 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1052 		}
1053 		printf("\n");
1054 	}
1055 	if (port->tx_queue_stats_mapping_enabled) {
1056 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1057 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1058 			       "                                 TX-bytes:%14"PRIu64"\n",
1059 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1060 		}
1061 	}
1062 
1063 	printf("  %s--------------------------------%s\n",
1064 	       fwd_stats_border, fwd_stats_border);
1065 }
1066 
1067 static void
1068 fwd_stream_stats_display(streamid_t stream_id)
1069 {
1070 	struct fwd_stream *fs;
1071 	static const char *fwd_top_stats_border = "-------";
1072 
1073 	fs = fwd_streams[stream_id];
1074 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1075 	    (fs->fwd_dropped == 0))
1076 		return;
1077 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1078 	       "TX Port=%2d/Queue=%2d %s\n",
1079 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1080 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1081 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1082 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1083 
1084 	/* if checksum mode */
1085 	if (cur_fwd_eng == &csum_fwd_engine) {
1086 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1087 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1088 	}
1089 
1090 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1091 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1092 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1093 #endif
1094 }
1095 
1096 static void
1097 flush_fwd_rx_queues(void)
1098 {
1099 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1100 	portid_t  rxp;
1101 	portid_t port_id;
1102 	queueid_t rxq;
1103 	uint16_t  nb_rx;
1104 	uint16_t  i;
1105 	uint8_t   j;
1106 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1107 	uint64_t timer_period;
1108 
1109 	/* convert to number of cycles */
1110 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1111 
1112 	for (j = 0; j < 2; j++) {
1113 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1114 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1115 				port_id = fwd_ports_ids[rxp];
1116 				/**
1117 				* testpmd can stuck in the below do while loop
1118 				* if rte_eth_rx_burst() always returns nonzero
1119 				* packets. So timer is added to exit this loop
1120 				* after 1sec timer expiry.
1121 				*/
1122 				prev_tsc = rte_rdtsc();
1123 				do {
1124 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1125 						pkts_burst, MAX_PKT_BURST);
1126 					for (i = 0; i < nb_rx; i++)
1127 						rte_pktmbuf_free(pkts_burst[i]);
1128 
1129 					cur_tsc = rte_rdtsc();
1130 					diff_tsc = cur_tsc - prev_tsc;
1131 					timer_tsc += diff_tsc;
1132 				} while ((nb_rx > 0) &&
1133 					(timer_tsc < timer_period));
1134 				timer_tsc = 0;
1135 			}
1136 		}
1137 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1138 	}
1139 }
1140 
1141 static void
1142 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1143 {
1144 	struct fwd_stream **fsm;
1145 	streamid_t nb_fs;
1146 	streamid_t sm_id;
1147 #ifdef RTE_LIBRTE_BITRATE
1148 	uint64_t tics_per_1sec;
1149 	uint64_t tics_datum;
1150 	uint64_t tics_current;
1151 	uint16_t idx_port;
1152 
1153 	tics_datum = rte_rdtsc();
1154 	tics_per_1sec = rte_get_timer_hz();
1155 #endif
1156 	fsm = &fwd_streams[fc->stream_idx];
1157 	nb_fs = fc->stream_nb;
1158 	do {
1159 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1160 			(*pkt_fwd)(fsm[sm_id]);
1161 #ifdef RTE_LIBRTE_BITRATE
1162 		if (bitrate_enabled != 0 &&
1163 				bitrate_lcore_id == rte_lcore_id()) {
1164 			tics_current = rte_rdtsc();
1165 			if (tics_current - tics_datum >= tics_per_1sec) {
1166 				/* Periodic bitrate calculation */
1167 				RTE_ETH_FOREACH_DEV(idx_port)
1168 					rte_stats_bitrate_calc(bitrate_data,
1169 						idx_port);
1170 				tics_datum = tics_current;
1171 			}
1172 		}
1173 #endif
1174 #ifdef RTE_LIBRTE_LATENCY_STATS
1175 		if (latencystats_enabled != 0 &&
1176 				latencystats_lcore_id == rte_lcore_id())
1177 			rte_latencystats_update();
1178 #endif
1179 
1180 	} while (! fc->stopped);
1181 }
1182 
1183 static int
1184 start_pkt_forward_on_core(void *fwd_arg)
1185 {
1186 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1187 			     cur_fwd_config.fwd_eng->packet_fwd);
1188 	return 0;
1189 }
1190 
1191 /*
1192  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1193  * Used to start communication flows in network loopback test configurations.
1194  */
1195 static int
1196 run_one_txonly_burst_on_core(void *fwd_arg)
1197 {
1198 	struct fwd_lcore *fwd_lc;
1199 	struct fwd_lcore tmp_lcore;
1200 
1201 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1202 	tmp_lcore = *fwd_lc;
1203 	tmp_lcore.stopped = 1;
1204 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1205 	return 0;
1206 }
1207 
1208 /*
1209  * Launch packet forwarding:
1210  *     - Setup per-port forwarding context.
1211  *     - launch logical cores with their forwarding configuration.
1212  */
1213 static void
1214 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1215 {
1216 	port_fwd_begin_t port_fwd_begin;
1217 	unsigned int i;
1218 	unsigned int lc_id;
1219 	int diag;
1220 
1221 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1222 	if (port_fwd_begin != NULL) {
1223 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1224 			(*port_fwd_begin)(fwd_ports_ids[i]);
1225 	}
1226 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1227 		lc_id = fwd_lcores_cpuids[i];
1228 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1229 			fwd_lcores[i]->stopped = 0;
1230 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1231 						     fwd_lcores[i], lc_id);
1232 			if (diag != 0)
1233 				printf("launch lcore %u failed - diag=%d\n",
1234 				       lc_id, diag);
1235 		}
1236 	}
1237 }
1238 
1239 /*
1240  * Update the forward ports list.
1241  */
1242 void
1243 update_fwd_ports(portid_t new_pid)
1244 {
1245 	unsigned int i;
1246 	unsigned int new_nb_fwd_ports = 0;
1247 	int move = 0;
1248 
1249 	for (i = 0; i < nb_fwd_ports; ++i) {
1250 		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1251 			move = 1;
1252 		else if (move)
1253 			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1254 		else
1255 			new_nb_fwd_ports++;
1256 	}
1257 	if (new_pid < RTE_MAX_ETHPORTS)
1258 		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1259 
1260 	nb_fwd_ports = new_nb_fwd_ports;
1261 	nb_cfg_ports = new_nb_fwd_ports;
1262 }
1263 
1264 /*
1265  * Launch packet forwarding configuration.
1266  */
1267 void
1268 start_packet_forwarding(int with_tx_first)
1269 {
1270 	port_fwd_begin_t port_fwd_begin;
1271 	port_fwd_end_t  port_fwd_end;
1272 	struct rte_port *port;
1273 	unsigned int i;
1274 	portid_t   pt_id;
1275 	streamid_t sm_id;
1276 
1277 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1278 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1279 
1280 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1281 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1282 
1283 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1284 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1285 		(!nb_rxq || !nb_txq))
1286 		rte_exit(EXIT_FAILURE,
1287 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1288 			cur_fwd_eng->fwd_mode_name);
1289 
1290 	if (all_ports_started() == 0) {
1291 		printf("Not all ports were started\n");
1292 		return;
1293 	}
1294 	if (test_done == 0) {
1295 		printf("Packet forwarding already started\n");
1296 		return;
1297 	}
1298 
1299 
1300 	if(dcb_test) {
1301 		for (i = 0; i < nb_fwd_ports; i++) {
1302 			pt_id = fwd_ports_ids[i];
1303 			port = &ports[pt_id];
1304 			if (!port->dcb_flag) {
1305 				printf("In DCB mode, all forwarding ports must "
1306                                        "be configured in this mode.\n");
1307 				return;
1308 			}
1309 		}
1310 		if (nb_fwd_lcores == 1) {
1311 			printf("In DCB mode,the nb forwarding cores "
1312                                "should be larger than 1.\n");
1313 			return;
1314 		}
1315 	}
1316 	test_done = 0;
1317 
1318 	fwd_config_setup();
1319 
1320 	if(!no_flush_rx)
1321 		flush_fwd_rx_queues();
1322 
1323 	pkt_fwd_config_display(&cur_fwd_config);
1324 	rxtx_config_display();
1325 
1326 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1327 		pt_id = fwd_ports_ids[i];
1328 		port = &ports[pt_id];
1329 		rte_eth_stats_get(pt_id, &port->stats);
1330 		port->tx_dropped = 0;
1331 
1332 		map_port_queue_stats_mapping_registers(pt_id, port);
1333 	}
1334 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1335 		fwd_streams[sm_id]->rx_packets = 0;
1336 		fwd_streams[sm_id]->tx_packets = 0;
1337 		fwd_streams[sm_id]->fwd_dropped = 0;
1338 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1339 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1340 
1341 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1342 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1343 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1344 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1345 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1346 #endif
1347 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1348 		fwd_streams[sm_id]->core_cycles = 0;
1349 #endif
1350 	}
1351 	if (with_tx_first) {
1352 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1353 		if (port_fwd_begin != NULL) {
1354 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1355 				(*port_fwd_begin)(fwd_ports_ids[i]);
1356 		}
1357 		while (with_tx_first--) {
1358 			launch_packet_forwarding(
1359 					run_one_txonly_burst_on_core);
1360 			rte_eal_mp_wait_lcore();
1361 		}
1362 		port_fwd_end = tx_only_engine.port_fwd_end;
1363 		if (port_fwd_end != NULL) {
1364 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1365 				(*port_fwd_end)(fwd_ports_ids[i]);
1366 		}
1367 	}
1368 	launch_packet_forwarding(start_pkt_forward_on_core);
1369 }
1370 
1371 void
1372 stop_packet_forwarding(void)
1373 {
1374 	struct rte_eth_stats stats;
1375 	struct rte_port *port;
1376 	port_fwd_end_t  port_fwd_end;
1377 	int i;
1378 	portid_t   pt_id;
1379 	streamid_t sm_id;
1380 	lcoreid_t  lc_id;
1381 	uint64_t total_recv;
1382 	uint64_t total_xmit;
1383 	uint64_t total_rx_dropped;
1384 	uint64_t total_tx_dropped;
1385 	uint64_t total_rx_nombuf;
1386 	uint64_t tx_dropped;
1387 	uint64_t rx_bad_ip_csum;
1388 	uint64_t rx_bad_l4_csum;
1389 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1390 	uint64_t fwd_cycles;
1391 #endif
1392 
1393 	static const char *acc_stats_border = "+++++++++++++++";
1394 
1395 	if (test_done) {
1396 		printf("Packet forwarding not started\n");
1397 		return;
1398 	}
1399 	printf("Telling cores to stop...");
1400 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1401 		fwd_lcores[lc_id]->stopped = 1;
1402 	printf("\nWaiting for lcores to finish...\n");
1403 	rte_eal_mp_wait_lcore();
1404 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1405 	if (port_fwd_end != NULL) {
1406 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1407 			pt_id = fwd_ports_ids[i];
1408 			(*port_fwd_end)(pt_id);
1409 		}
1410 	}
1411 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1412 	fwd_cycles = 0;
1413 #endif
1414 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1415 		if (cur_fwd_config.nb_fwd_streams >
1416 		    cur_fwd_config.nb_fwd_ports) {
1417 			fwd_stream_stats_display(sm_id);
1418 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1419 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1420 		} else {
1421 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1422 				fwd_streams[sm_id];
1423 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1424 				fwd_streams[sm_id];
1425 		}
1426 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1427 		tx_dropped = (uint64_t) (tx_dropped +
1428 					 fwd_streams[sm_id]->fwd_dropped);
1429 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1430 
1431 		rx_bad_ip_csum =
1432 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1433 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1434 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1435 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1436 							rx_bad_ip_csum;
1437 
1438 		rx_bad_l4_csum =
1439 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1440 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1441 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1442 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1443 							rx_bad_l4_csum;
1444 
1445 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1446 		fwd_cycles = (uint64_t) (fwd_cycles +
1447 					 fwd_streams[sm_id]->core_cycles);
1448 #endif
1449 	}
1450 	total_recv = 0;
1451 	total_xmit = 0;
1452 	total_rx_dropped = 0;
1453 	total_tx_dropped = 0;
1454 	total_rx_nombuf  = 0;
1455 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1456 		pt_id = fwd_ports_ids[i];
1457 
1458 		port = &ports[pt_id];
1459 		rte_eth_stats_get(pt_id, &stats);
1460 		stats.ipackets -= port->stats.ipackets;
1461 		port->stats.ipackets = 0;
1462 		stats.opackets -= port->stats.opackets;
1463 		port->stats.opackets = 0;
1464 		stats.ibytes   -= port->stats.ibytes;
1465 		port->stats.ibytes = 0;
1466 		stats.obytes   -= port->stats.obytes;
1467 		port->stats.obytes = 0;
1468 		stats.imissed  -= port->stats.imissed;
1469 		port->stats.imissed = 0;
1470 		stats.oerrors  -= port->stats.oerrors;
1471 		port->stats.oerrors = 0;
1472 		stats.rx_nombuf -= port->stats.rx_nombuf;
1473 		port->stats.rx_nombuf = 0;
1474 
1475 		total_recv += stats.ipackets;
1476 		total_xmit += stats.opackets;
1477 		total_rx_dropped += stats.imissed;
1478 		total_tx_dropped += port->tx_dropped;
1479 		total_rx_nombuf  += stats.rx_nombuf;
1480 
1481 		fwd_port_stats_display(pt_id, &stats);
1482 	}
1483 
1484 	printf("\n  %s Accumulated forward statistics for all ports"
1485 	       "%s\n",
1486 	       acc_stats_border, acc_stats_border);
1487 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1488 	       "%-"PRIu64"\n"
1489 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1490 	       "%-"PRIu64"\n",
1491 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1492 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1493 	if (total_rx_nombuf > 0)
1494 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1495 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1496 	       "%s\n",
1497 	       acc_stats_border, acc_stats_border);
1498 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1499 	if (total_recv > 0)
1500 		printf("\n  CPU cycles/packet=%u (total cycles="
1501 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1502 		       (unsigned int)(fwd_cycles / total_recv),
1503 		       fwd_cycles, total_recv);
1504 #endif
1505 	printf("\nDone.\n");
1506 	test_done = 1;
1507 }
1508 
1509 void
1510 dev_set_link_up(portid_t pid)
1511 {
1512 	if (rte_eth_dev_set_link_up(pid) < 0)
1513 		printf("\nSet link up fail.\n");
1514 }
1515 
1516 void
1517 dev_set_link_down(portid_t pid)
1518 {
1519 	if (rte_eth_dev_set_link_down(pid) < 0)
1520 		printf("\nSet link down fail.\n");
1521 }
1522 
1523 static int
1524 all_ports_started(void)
1525 {
1526 	portid_t pi;
1527 	struct rte_port *port;
1528 
1529 	RTE_ETH_FOREACH_DEV(pi) {
1530 		port = &ports[pi];
1531 		/* Check if there is a port which is not started */
1532 		if ((port->port_status != RTE_PORT_STARTED) &&
1533 			(port->slave_flag == 0))
1534 			return 0;
1535 	}
1536 
1537 	/* No port is not started */
1538 	return 1;
1539 }
1540 
1541 int
1542 port_is_stopped(portid_t port_id)
1543 {
1544 	struct rte_port *port = &ports[port_id];
1545 
1546 	if ((port->port_status != RTE_PORT_STOPPED) &&
1547 	    (port->slave_flag == 0))
1548 		return 0;
1549 	return 1;
1550 }
1551 
1552 int
1553 all_ports_stopped(void)
1554 {
1555 	portid_t pi;
1556 
1557 	RTE_ETH_FOREACH_DEV(pi) {
1558 		if (!port_is_stopped(pi))
1559 			return 0;
1560 	}
1561 
1562 	return 1;
1563 }
1564 
1565 int
1566 port_is_started(portid_t port_id)
1567 {
1568 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1569 		return 0;
1570 
1571 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1572 		return 0;
1573 
1574 	return 1;
1575 }
1576 
1577 static int
1578 port_is_closed(portid_t port_id)
1579 {
1580 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1581 		return 0;
1582 
1583 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1584 		return 0;
1585 
1586 	return 1;
1587 }
1588 
1589 int
1590 start_port(portid_t pid)
1591 {
1592 	int diag, need_check_link_status = -1;
1593 	portid_t pi;
1594 	queueid_t qi;
1595 	struct rte_port *port;
1596 	struct ether_addr mac_addr;
1597 	enum rte_eth_event_type event_type;
1598 
1599 	if (port_id_is_invalid(pid, ENABLED_WARN))
1600 		return 0;
1601 
1602 	if(dcb_config)
1603 		dcb_test = 1;
1604 	RTE_ETH_FOREACH_DEV(pi) {
1605 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1606 			continue;
1607 
1608 		need_check_link_status = 0;
1609 		port = &ports[pi];
1610 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1611 						 RTE_PORT_HANDLING) == 0) {
1612 			printf("Port %d is now not stopped\n", pi);
1613 			continue;
1614 		}
1615 
1616 		if (port->need_reconfig > 0) {
1617 			port->need_reconfig = 0;
1618 
1619 			if (flow_isolate_all) {
1620 				int ret = port_flow_isolate(pi, 1);
1621 				if (ret) {
1622 					printf("Failed to apply isolated"
1623 					       " mode on port %d\n", pi);
1624 					return -1;
1625 				}
1626 			}
1627 
1628 			printf("Configuring Port %d (socket %u)\n", pi,
1629 					port->socket_id);
1630 			/* configure port */
1631 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1632 						&(port->dev_conf));
1633 			if (diag != 0) {
1634 				if (rte_atomic16_cmpset(&(port->port_status),
1635 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1636 					printf("Port %d can not be set back "
1637 							"to stopped\n", pi);
1638 				printf("Fail to configure port %d\n", pi);
1639 				/* try to reconfigure port next time */
1640 				port->need_reconfig = 1;
1641 				return -1;
1642 			}
1643 		}
1644 		if (port->need_reconfig_queues > 0) {
1645 			port->need_reconfig_queues = 0;
1646 			/* setup tx queues */
1647 			for (qi = 0; qi < nb_txq; qi++) {
1648 				port->tx_conf[qi].txq_flags =
1649 					ETH_TXQ_FLAGS_IGNORE;
1650 				if ((numa_support) &&
1651 					(txring_numa[pi] != NUMA_NO_CONFIG))
1652 					diag = rte_eth_tx_queue_setup(pi, qi,
1653 						port->nb_tx_desc[qi],
1654 						txring_numa[pi],
1655 						&(port->tx_conf[qi]));
1656 				else
1657 					diag = rte_eth_tx_queue_setup(pi, qi,
1658 						port->nb_tx_desc[qi],
1659 						port->socket_id,
1660 						&(port->tx_conf[qi]));
1661 
1662 				if (diag == 0)
1663 					continue;
1664 
1665 				/* Fail to setup tx queue, return */
1666 				if (rte_atomic16_cmpset(&(port->port_status),
1667 							RTE_PORT_HANDLING,
1668 							RTE_PORT_STOPPED) == 0)
1669 					printf("Port %d can not be set back "
1670 							"to stopped\n", pi);
1671 				printf("Fail to configure port %d tx queues\n",
1672 				       pi);
1673 				/* try to reconfigure queues next time */
1674 				port->need_reconfig_queues = 1;
1675 				return -1;
1676 			}
1677 			for (qi = 0; qi < nb_rxq; qi++) {
1678 				/* setup rx queues */
1679 				if ((numa_support) &&
1680 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1681 					struct rte_mempool * mp =
1682 						mbuf_pool_find(rxring_numa[pi]);
1683 					if (mp == NULL) {
1684 						printf("Failed to setup RX queue:"
1685 							"No mempool allocation"
1686 							" on the socket %d\n",
1687 							rxring_numa[pi]);
1688 						return -1;
1689 					}
1690 
1691 					diag = rte_eth_rx_queue_setup(pi, qi,
1692 					     port->nb_rx_desc[pi],
1693 					     rxring_numa[pi],
1694 					     &(port->rx_conf[qi]),
1695 					     mp);
1696 				} else {
1697 					struct rte_mempool *mp =
1698 						mbuf_pool_find(port->socket_id);
1699 					if (mp == NULL) {
1700 						printf("Failed to setup RX queue:"
1701 							"No mempool allocation"
1702 							" on the socket %d\n",
1703 							port->socket_id);
1704 						return -1;
1705 					}
1706 					diag = rte_eth_rx_queue_setup(pi, qi,
1707 					     port->nb_rx_desc[pi],
1708 					     port->socket_id,
1709 					     &(port->rx_conf[qi]),
1710 					     mp);
1711 				}
1712 				if (diag == 0)
1713 					continue;
1714 
1715 				/* Fail to setup rx queue, return */
1716 				if (rte_atomic16_cmpset(&(port->port_status),
1717 							RTE_PORT_HANDLING,
1718 							RTE_PORT_STOPPED) == 0)
1719 					printf("Port %d can not be set back "
1720 							"to stopped\n", pi);
1721 				printf("Fail to configure port %d rx queues\n",
1722 				       pi);
1723 				/* try to reconfigure queues next time */
1724 				port->need_reconfig_queues = 1;
1725 				return -1;
1726 			}
1727 		}
1728 
1729 		/* start port */
1730 		if (rte_eth_dev_start(pi) < 0) {
1731 			printf("Fail to start port %d\n", pi);
1732 
1733 			/* Fail to setup rx queue, return */
1734 			if (rte_atomic16_cmpset(&(port->port_status),
1735 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1736 				printf("Port %d can not be set back to "
1737 							"stopped\n", pi);
1738 			continue;
1739 		}
1740 
1741 		if (rte_atomic16_cmpset(&(port->port_status),
1742 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1743 			printf("Port %d can not be set into started\n", pi);
1744 
1745 		rte_eth_macaddr_get(pi, &mac_addr);
1746 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1747 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1748 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1749 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1750 
1751 		/* at least one port started, need checking link status */
1752 		need_check_link_status = 1;
1753 	}
1754 
1755 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1756 	     event_type < RTE_ETH_EVENT_MAX;
1757 	     event_type++) {
1758 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1759 						event_type,
1760 						eth_event_callback,
1761 						NULL);
1762 		if (diag) {
1763 			printf("Failed to setup even callback for event %d\n",
1764 				event_type);
1765 			return -1;
1766 		}
1767 	}
1768 
1769 	if (need_check_link_status == 1 && !no_link_check)
1770 		check_all_ports_link_status(RTE_PORT_ALL);
1771 	else if (need_check_link_status == 0)
1772 		printf("Please stop the ports first\n");
1773 
1774 	printf("Done\n");
1775 	return 0;
1776 }
1777 
1778 void
1779 stop_port(portid_t pid)
1780 {
1781 	portid_t pi;
1782 	struct rte_port *port;
1783 	int need_check_link_status = 0;
1784 
1785 	if (dcb_test) {
1786 		dcb_test = 0;
1787 		dcb_config = 0;
1788 	}
1789 
1790 	if (port_id_is_invalid(pid, ENABLED_WARN))
1791 		return;
1792 
1793 	printf("Stopping ports...\n");
1794 
1795 	RTE_ETH_FOREACH_DEV(pi) {
1796 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1797 			continue;
1798 
1799 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1800 			printf("Please remove port %d from forwarding configuration.\n", pi);
1801 			continue;
1802 		}
1803 
1804 		if (port_is_bonding_slave(pi)) {
1805 			printf("Please remove port %d from bonded device.\n", pi);
1806 			continue;
1807 		}
1808 
1809 		port = &ports[pi];
1810 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1811 						RTE_PORT_HANDLING) == 0)
1812 			continue;
1813 
1814 		rte_eth_dev_stop(pi);
1815 
1816 		if (rte_atomic16_cmpset(&(port->port_status),
1817 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1818 			printf("Port %d can not be set into stopped\n", pi);
1819 		need_check_link_status = 1;
1820 	}
1821 	if (need_check_link_status && !no_link_check)
1822 		check_all_ports_link_status(RTE_PORT_ALL);
1823 
1824 	printf("Done\n");
1825 }
1826 
1827 void
1828 close_port(portid_t pid)
1829 {
1830 	portid_t pi;
1831 	struct rte_port *port;
1832 
1833 	if (port_id_is_invalid(pid, ENABLED_WARN))
1834 		return;
1835 
1836 	printf("Closing ports...\n");
1837 
1838 	RTE_ETH_FOREACH_DEV(pi) {
1839 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1840 			continue;
1841 
1842 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1843 			printf("Please remove port %d from forwarding configuration.\n", pi);
1844 			continue;
1845 		}
1846 
1847 		if (port_is_bonding_slave(pi)) {
1848 			printf("Please remove port %d from bonded device.\n", pi);
1849 			continue;
1850 		}
1851 
1852 		port = &ports[pi];
1853 		if (rte_atomic16_cmpset(&(port->port_status),
1854 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1855 			printf("Port %d is already closed\n", pi);
1856 			continue;
1857 		}
1858 
1859 		if (rte_atomic16_cmpset(&(port->port_status),
1860 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1861 			printf("Port %d is now not stopped\n", pi);
1862 			continue;
1863 		}
1864 
1865 		if (port->flow_list)
1866 			port_flow_flush(pi);
1867 		rte_eth_dev_close(pi);
1868 
1869 		if (rte_atomic16_cmpset(&(port->port_status),
1870 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1871 			printf("Port %d cannot be set to closed\n", pi);
1872 	}
1873 
1874 	printf("Done\n");
1875 }
1876 
1877 void
1878 reset_port(portid_t pid)
1879 {
1880 	int diag;
1881 	portid_t pi;
1882 	struct rte_port *port;
1883 
1884 	if (port_id_is_invalid(pid, ENABLED_WARN))
1885 		return;
1886 
1887 	printf("Resetting ports...\n");
1888 
1889 	RTE_ETH_FOREACH_DEV(pi) {
1890 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1891 			continue;
1892 
1893 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1894 			printf("Please remove port %d from forwarding "
1895 			       "configuration.\n", pi);
1896 			continue;
1897 		}
1898 
1899 		if (port_is_bonding_slave(pi)) {
1900 			printf("Please remove port %d from bonded device.\n",
1901 			       pi);
1902 			continue;
1903 		}
1904 
1905 		diag = rte_eth_dev_reset(pi);
1906 		if (diag == 0) {
1907 			port = &ports[pi];
1908 			port->need_reconfig = 1;
1909 			port->need_reconfig_queues = 1;
1910 		} else {
1911 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1912 		}
1913 	}
1914 
1915 	printf("Done\n");
1916 }
1917 
1918 static int
1919 eth_dev_event_callback_register(void)
1920 {
1921 	int ret;
1922 
1923 	/* register the device event callback */
1924 	ret = rte_dev_event_callback_register(NULL,
1925 		eth_dev_event_callback, NULL);
1926 	if (ret) {
1927 		printf("Failed to register device event callback\n");
1928 		return -1;
1929 	}
1930 
1931 	return 0;
1932 }
1933 
1934 
1935 static int
1936 eth_dev_event_callback_unregister(void)
1937 {
1938 	int ret;
1939 
1940 	/* unregister the device event callback */
1941 	ret = rte_dev_event_callback_unregister(NULL,
1942 		eth_dev_event_callback, NULL);
1943 	if (ret < 0) {
1944 		printf("Failed to unregister device event callback\n");
1945 		return -1;
1946 	}
1947 
1948 	return 0;
1949 }
1950 
1951 void
1952 attach_port(char *identifier)
1953 {
1954 	portid_t pi = 0;
1955 	unsigned int socket_id;
1956 
1957 	printf("Attaching a new port...\n");
1958 
1959 	if (identifier == NULL) {
1960 		printf("Invalid parameters are specified\n");
1961 		return;
1962 	}
1963 
1964 	if (rte_eth_dev_attach(identifier, &pi))
1965 		return;
1966 
1967 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1968 	/* if socket_id is invalid, set to 0 */
1969 	if (check_socket_id(socket_id) < 0)
1970 		socket_id = 0;
1971 	reconfig(pi, socket_id);
1972 	rte_eth_promiscuous_enable(pi);
1973 
1974 	nb_ports = rte_eth_dev_count_avail();
1975 
1976 	ports[pi].port_status = RTE_PORT_STOPPED;
1977 
1978 	update_fwd_ports(pi);
1979 
1980 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1981 	printf("Done\n");
1982 }
1983 
1984 void
1985 detach_port(portid_t port_id)
1986 {
1987 	char name[RTE_ETH_NAME_MAX_LEN];
1988 
1989 	printf("Detaching a port...\n");
1990 
1991 	if (!port_is_closed(port_id)) {
1992 		printf("Please close port first\n");
1993 		return;
1994 	}
1995 
1996 	if (ports[port_id].flow_list)
1997 		port_flow_flush(port_id);
1998 
1999 	if (rte_eth_dev_detach(port_id, name)) {
2000 		TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2001 		return;
2002 	}
2003 
2004 	nb_ports = rte_eth_dev_count_avail();
2005 
2006 	update_fwd_ports(RTE_MAX_ETHPORTS);
2007 
2008 	printf("Port %u is detached. Now total ports is %d\n",
2009 			port_id, nb_ports);
2010 	printf("Done\n");
2011 	return;
2012 }
2013 
2014 void
2015 pmd_test_exit(void)
2016 {
2017 	portid_t pt_id;
2018 	int ret;
2019 
2020 	if (test_done == 0)
2021 		stop_packet_forwarding();
2022 
2023 	if (ports != NULL) {
2024 		no_link_check = 1;
2025 		RTE_ETH_FOREACH_DEV(pt_id) {
2026 			printf("\nShutting down port %d...\n", pt_id);
2027 			fflush(stdout);
2028 			stop_port(pt_id);
2029 			close_port(pt_id);
2030 		}
2031 	}
2032 
2033 	if (hot_plug) {
2034 		ret = rte_dev_event_monitor_stop();
2035 		if (ret)
2036 			RTE_LOG(ERR, EAL,
2037 				"fail to stop device event monitor.");
2038 
2039 		ret = eth_dev_event_callback_unregister();
2040 		if (ret)
2041 			RTE_LOG(ERR, EAL,
2042 				"fail to unregister all event callbacks.");
2043 	}
2044 
2045 	printf("\nBye...\n");
2046 }
2047 
2048 typedef void (*cmd_func_t)(void);
2049 struct pmd_test_command {
2050 	const char *cmd_name;
2051 	cmd_func_t cmd_func;
2052 };
2053 
2054 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2055 
2056 /* Check the link status of all ports in up to 9s, and print them finally */
2057 static void
2058 check_all_ports_link_status(uint32_t port_mask)
2059 {
2060 #define CHECK_INTERVAL 100 /* 100ms */
2061 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2062 	portid_t portid;
2063 	uint8_t count, all_ports_up, print_flag = 0;
2064 	struct rte_eth_link link;
2065 
2066 	printf("Checking link statuses...\n");
2067 	fflush(stdout);
2068 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2069 		all_ports_up = 1;
2070 		RTE_ETH_FOREACH_DEV(portid) {
2071 			if ((port_mask & (1 << portid)) == 0)
2072 				continue;
2073 			memset(&link, 0, sizeof(link));
2074 			rte_eth_link_get_nowait(portid, &link);
2075 			/* print link status if flag set */
2076 			if (print_flag == 1) {
2077 				if (link.link_status)
2078 					printf(
2079 					"Port%d Link Up. speed %u Mbps- %s\n",
2080 					portid, link.link_speed,
2081 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2082 					("full-duplex") : ("half-duplex\n"));
2083 				else
2084 					printf("Port %d Link Down\n", portid);
2085 				continue;
2086 			}
2087 			/* clear all_ports_up flag if any link down */
2088 			if (link.link_status == ETH_LINK_DOWN) {
2089 				all_ports_up = 0;
2090 				break;
2091 			}
2092 		}
2093 		/* after finally printing all link status, get out */
2094 		if (print_flag == 1)
2095 			break;
2096 
2097 		if (all_ports_up == 0) {
2098 			fflush(stdout);
2099 			rte_delay_ms(CHECK_INTERVAL);
2100 		}
2101 
2102 		/* set the print_flag if all ports up or timeout */
2103 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2104 			print_flag = 1;
2105 		}
2106 
2107 		if (lsc_interrupt)
2108 			break;
2109 	}
2110 }
2111 
2112 static void
2113 rmv_event_callback(void *arg)
2114 {
2115 	int need_to_start = 0;
2116 	int org_no_link_check = no_link_check;
2117 	portid_t port_id = (intptr_t)arg;
2118 
2119 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2120 
2121 	if (!test_done && port_is_forwarding(port_id)) {
2122 		need_to_start = 1;
2123 		stop_packet_forwarding();
2124 	}
2125 	no_link_check = 1;
2126 	stop_port(port_id);
2127 	no_link_check = org_no_link_check;
2128 	close_port(port_id);
2129 	detach_port(port_id);
2130 	if (need_to_start)
2131 		start_packet_forwarding(0);
2132 }
2133 
2134 /* This function is used by the interrupt thread */
2135 static int
2136 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2137 		  void *ret_param)
2138 {
2139 	static const char * const event_desc[] = {
2140 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2141 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2142 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2143 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2144 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2145 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
2146 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2147 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2148 		[RTE_ETH_EVENT_NEW] = "device probed",
2149 		[RTE_ETH_EVENT_DESTROY] = "device released",
2150 		[RTE_ETH_EVENT_MAX] = NULL,
2151 	};
2152 
2153 	RTE_SET_USED(param);
2154 	RTE_SET_USED(ret_param);
2155 
2156 	if (type >= RTE_ETH_EVENT_MAX) {
2157 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2158 			port_id, __func__, type);
2159 		fflush(stderr);
2160 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2161 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2162 			event_desc[type]);
2163 		fflush(stdout);
2164 	}
2165 
2166 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2167 		return 0;
2168 
2169 	switch (type) {
2170 	case RTE_ETH_EVENT_INTR_RMV:
2171 		if (rte_eal_alarm_set(100000,
2172 				rmv_event_callback, (void *)(intptr_t)port_id))
2173 			fprintf(stderr, "Could not set up deferred device removal\n");
2174 		break;
2175 	default:
2176 		break;
2177 	}
2178 	return 0;
2179 }
2180 
2181 /* This function is used by the interrupt thread */
2182 static void
2183 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2184 			     __rte_unused void *arg)
2185 {
2186 	if (type >= RTE_DEV_EVENT_MAX) {
2187 		fprintf(stderr, "%s called upon invalid event %d\n",
2188 			__func__, type);
2189 		fflush(stderr);
2190 	}
2191 
2192 	switch (type) {
2193 	case RTE_DEV_EVENT_REMOVE:
2194 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2195 			device_name);
2196 		/* TODO: After finish failure handle, begin to stop
2197 		 * packet forward, stop port, close port, detach port.
2198 		 */
2199 		break;
2200 	case RTE_DEV_EVENT_ADD:
2201 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2202 			device_name);
2203 		/* TODO: After finish kernel driver binding,
2204 		 * begin to attach port.
2205 		 */
2206 		break;
2207 	default:
2208 		break;
2209 	}
2210 }
2211 
2212 static int
2213 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2214 {
2215 	uint16_t i;
2216 	int diag;
2217 	uint8_t mapping_found = 0;
2218 
2219 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2220 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2221 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2222 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2223 					tx_queue_stats_mappings[i].queue_id,
2224 					tx_queue_stats_mappings[i].stats_counter_id);
2225 			if (diag != 0)
2226 				return diag;
2227 			mapping_found = 1;
2228 		}
2229 	}
2230 	if (mapping_found)
2231 		port->tx_queue_stats_mapping_enabled = 1;
2232 	return 0;
2233 }
2234 
2235 static int
2236 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2237 {
2238 	uint16_t i;
2239 	int diag;
2240 	uint8_t mapping_found = 0;
2241 
2242 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2243 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2244 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2245 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2246 					rx_queue_stats_mappings[i].queue_id,
2247 					rx_queue_stats_mappings[i].stats_counter_id);
2248 			if (diag != 0)
2249 				return diag;
2250 			mapping_found = 1;
2251 		}
2252 	}
2253 	if (mapping_found)
2254 		port->rx_queue_stats_mapping_enabled = 1;
2255 	return 0;
2256 }
2257 
2258 static void
2259 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2260 {
2261 	int diag = 0;
2262 
2263 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2264 	if (diag != 0) {
2265 		if (diag == -ENOTSUP) {
2266 			port->tx_queue_stats_mapping_enabled = 0;
2267 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2268 		}
2269 		else
2270 			rte_exit(EXIT_FAILURE,
2271 					"set_tx_queue_stats_mapping_registers "
2272 					"failed for port id=%d diag=%d\n",
2273 					pi, diag);
2274 	}
2275 
2276 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2277 	if (diag != 0) {
2278 		if (diag == -ENOTSUP) {
2279 			port->rx_queue_stats_mapping_enabled = 0;
2280 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2281 		}
2282 		else
2283 			rte_exit(EXIT_FAILURE,
2284 					"set_rx_queue_stats_mapping_registers "
2285 					"failed for port id=%d diag=%d\n",
2286 					pi, diag);
2287 	}
2288 }
2289 
2290 static void
2291 rxtx_port_config(struct rte_port *port)
2292 {
2293 	uint16_t qid;
2294 
2295 	for (qid = 0; qid < nb_rxq; qid++) {
2296 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2297 
2298 		/* Check if any Rx parameters have been passed */
2299 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2300 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2301 
2302 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2303 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2304 
2305 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2306 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2307 
2308 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2309 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2310 
2311 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2312 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2313 
2314 		port->nb_rx_desc[qid] = nb_rxd;
2315 	}
2316 
2317 	for (qid = 0; qid < nb_txq; qid++) {
2318 		port->tx_conf[qid] = port->dev_info.default_txconf;
2319 
2320 		/* Check if any Tx parameters have been passed */
2321 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2322 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2323 
2324 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2325 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2326 
2327 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2328 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2329 
2330 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2331 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2332 
2333 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2334 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2335 
2336 		port->nb_tx_desc[qid] = nb_txd;
2337 	}
2338 }
2339 
2340 void
2341 init_port_config(void)
2342 {
2343 	portid_t pid;
2344 	struct rte_port *port;
2345 	struct rte_eth_dev_info dev_info;
2346 
2347 	RTE_ETH_FOREACH_DEV(pid) {
2348 		port = &ports[pid];
2349 		port->dev_conf.fdir_conf = fdir_conf;
2350 		if (nb_rxq > 1) {
2351 			rte_eth_dev_info_get(pid, &dev_info);
2352 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2353 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2354 				rss_hf & dev_info.flow_type_rss_offloads;
2355 		} else {
2356 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2357 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2358 		}
2359 
2360 		if (port->dcb_flag == 0) {
2361 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2362 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2363 			else
2364 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2365 		}
2366 
2367 		rxtx_port_config(port);
2368 
2369 		rte_eth_macaddr_get(pid, &port->eth_addr);
2370 
2371 		map_port_queue_stats_mapping_registers(pid, port);
2372 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2373 		rte_pmd_ixgbe_bypass_init(pid);
2374 #endif
2375 
2376 		if (lsc_interrupt &&
2377 		    (rte_eth_devices[pid].data->dev_flags &
2378 		     RTE_ETH_DEV_INTR_LSC))
2379 			port->dev_conf.intr_conf.lsc = 1;
2380 		if (rmv_interrupt &&
2381 		    (rte_eth_devices[pid].data->dev_flags &
2382 		     RTE_ETH_DEV_INTR_RMV))
2383 			port->dev_conf.intr_conf.rmv = 1;
2384 
2385 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2386 		/* Detect softnic port */
2387 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2388 			port->softnic_enable = 1;
2389 			memset(&port->softport, 0, sizeof(struct softnic_port));
2390 
2391 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2392 				port->softport.tm_flag = 1;
2393 		}
2394 #endif
2395 	}
2396 }
2397 
2398 void set_port_slave_flag(portid_t slave_pid)
2399 {
2400 	struct rte_port *port;
2401 
2402 	port = &ports[slave_pid];
2403 	port->slave_flag = 1;
2404 }
2405 
2406 void clear_port_slave_flag(portid_t slave_pid)
2407 {
2408 	struct rte_port *port;
2409 
2410 	port = &ports[slave_pid];
2411 	port->slave_flag = 0;
2412 }
2413 
2414 uint8_t port_is_bonding_slave(portid_t slave_pid)
2415 {
2416 	struct rte_port *port;
2417 
2418 	port = &ports[slave_pid];
2419 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2420 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2421 		return 1;
2422 	return 0;
2423 }
2424 
2425 const uint16_t vlan_tags[] = {
2426 		0,  1,  2,  3,  4,  5,  6,  7,
2427 		8,  9, 10, 11,  12, 13, 14, 15,
2428 		16, 17, 18, 19, 20, 21, 22, 23,
2429 		24, 25, 26, 27, 28, 29, 30, 31
2430 };
2431 
2432 static  int
2433 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2434 		 enum dcb_mode_enable dcb_mode,
2435 		 enum rte_eth_nb_tcs num_tcs,
2436 		 uint8_t pfc_en)
2437 {
2438 	uint8_t i;
2439 
2440 	/*
2441 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2442 	 * given above, and the number of traffic classes available for use.
2443 	 */
2444 	if (dcb_mode == DCB_VT_ENABLED) {
2445 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2446 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2447 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2448 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2449 
2450 		/* VMDQ+DCB RX and TX configurations */
2451 		vmdq_rx_conf->enable_default_pool = 0;
2452 		vmdq_rx_conf->default_pool = 0;
2453 		vmdq_rx_conf->nb_queue_pools =
2454 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2455 		vmdq_tx_conf->nb_queue_pools =
2456 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2457 
2458 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2459 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2460 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2461 			vmdq_rx_conf->pool_map[i].pools =
2462 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2463 		}
2464 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2465 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2466 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2467 		}
2468 
2469 		/* set DCB mode of RX and TX of multiple queues */
2470 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2471 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2472 	} else {
2473 		struct rte_eth_dcb_rx_conf *rx_conf =
2474 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2475 		struct rte_eth_dcb_tx_conf *tx_conf =
2476 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2477 
2478 		rx_conf->nb_tcs = num_tcs;
2479 		tx_conf->nb_tcs = num_tcs;
2480 
2481 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2482 			rx_conf->dcb_tc[i] = i % num_tcs;
2483 			tx_conf->dcb_tc[i] = i % num_tcs;
2484 		}
2485 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2486 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2487 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2488 	}
2489 
2490 	if (pfc_en)
2491 		eth_conf->dcb_capability_en =
2492 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2493 	else
2494 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2495 
2496 	return 0;
2497 }
2498 
2499 int
2500 init_port_dcb_config(portid_t pid,
2501 		     enum dcb_mode_enable dcb_mode,
2502 		     enum rte_eth_nb_tcs num_tcs,
2503 		     uint8_t pfc_en)
2504 {
2505 	struct rte_eth_conf port_conf;
2506 	struct rte_port *rte_port;
2507 	int retval;
2508 	uint16_t i;
2509 
2510 	rte_port = &ports[pid];
2511 
2512 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2513 	/* Enter DCB configuration status */
2514 	dcb_config = 1;
2515 
2516 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2517 	port_conf.txmode = rte_port->dev_conf.txmode;
2518 
2519 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2520 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2521 	if (retval < 0)
2522 		return retval;
2523 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2524 
2525 	/* re-configure the device . */
2526 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2527 
2528 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2529 
2530 	/* If dev_info.vmdq_pool_base is greater than 0,
2531 	 * the queue id of vmdq pools is started after pf queues.
2532 	 */
2533 	if (dcb_mode == DCB_VT_ENABLED &&
2534 	    rte_port->dev_info.vmdq_pool_base > 0) {
2535 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2536 			" for port %d.", pid);
2537 		return -1;
2538 	}
2539 
2540 	/* Assume the ports in testpmd have the same dcb capability
2541 	 * and has the same number of rxq and txq in dcb mode
2542 	 */
2543 	if (dcb_mode == DCB_VT_ENABLED) {
2544 		if (rte_port->dev_info.max_vfs > 0) {
2545 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2546 			nb_txq = rte_port->dev_info.nb_tx_queues;
2547 		} else {
2548 			nb_rxq = rte_port->dev_info.max_rx_queues;
2549 			nb_txq = rte_port->dev_info.max_tx_queues;
2550 		}
2551 	} else {
2552 		/*if vt is disabled, use all pf queues */
2553 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2554 			nb_rxq = rte_port->dev_info.max_rx_queues;
2555 			nb_txq = rte_port->dev_info.max_tx_queues;
2556 		} else {
2557 			nb_rxq = (queueid_t)num_tcs;
2558 			nb_txq = (queueid_t)num_tcs;
2559 
2560 		}
2561 	}
2562 	rx_free_thresh = 64;
2563 
2564 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2565 
2566 	rxtx_port_config(rte_port);
2567 	/* VLAN filter */
2568 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2569 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2570 		rx_vft_set(pid, vlan_tags[i], 1);
2571 
2572 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2573 	map_port_queue_stats_mapping_registers(pid, rte_port);
2574 
2575 	rte_port->dcb_flag = 1;
2576 
2577 	return 0;
2578 }
2579 
2580 static void
2581 init_port(void)
2582 {
2583 	/* Configuration of Ethernet ports. */
2584 	ports = rte_zmalloc("testpmd: ports",
2585 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2586 			    RTE_CACHE_LINE_SIZE);
2587 	if (ports == NULL) {
2588 		rte_exit(EXIT_FAILURE,
2589 				"rte_zmalloc(%d struct rte_port) failed\n",
2590 				RTE_MAX_ETHPORTS);
2591 	}
2592 }
2593 
2594 static void
2595 force_quit(void)
2596 {
2597 	pmd_test_exit();
2598 	prompt_exit();
2599 }
2600 
2601 static void
2602 print_stats(void)
2603 {
2604 	uint8_t i;
2605 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2606 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2607 
2608 	/* Clear screen and move to top left */
2609 	printf("%s%s", clr, top_left);
2610 
2611 	printf("\nPort statistics ====================================");
2612 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2613 		nic_stats_display(fwd_ports_ids[i]);
2614 }
2615 
2616 static void
2617 signal_handler(int signum)
2618 {
2619 	if (signum == SIGINT || signum == SIGTERM) {
2620 		printf("\nSignal %d received, preparing to exit...\n",
2621 				signum);
2622 #ifdef RTE_LIBRTE_PDUMP
2623 		/* uninitialize packet capture framework */
2624 		rte_pdump_uninit();
2625 #endif
2626 #ifdef RTE_LIBRTE_LATENCY_STATS
2627 		rte_latencystats_uninit();
2628 #endif
2629 		force_quit();
2630 		/* Set flag to indicate the force termination. */
2631 		f_quit = 1;
2632 		/* exit with the expected status */
2633 		signal(signum, SIG_DFL);
2634 		kill(getpid(), signum);
2635 	}
2636 }
2637 
2638 int
2639 main(int argc, char** argv)
2640 {
2641 	int diag;
2642 	portid_t port_id;
2643 	int ret;
2644 
2645 	signal(SIGINT, signal_handler);
2646 	signal(SIGTERM, signal_handler);
2647 
2648 	diag = rte_eal_init(argc, argv);
2649 	if (diag < 0)
2650 		rte_panic("Cannot init EAL\n");
2651 
2652 	testpmd_logtype = rte_log_register("testpmd");
2653 	if (testpmd_logtype < 0)
2654 		rte_panic("Cannot register log type");
2655 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2656 
2657 #ifdef RTE_LIBRTE_PDUMP
2658 	/* initialize packet capture framework */
2659 	rte_pdump_init(NULL);
2660 #endif
2661 
2662 	nb_ports = (portid_t) rte_eth_dev_count_avail();
2663 	if (nb_ports == 0)
2664 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2665 
2666 	/* allocate port structures, and init them */
2667 	init_port();
2668 
2669 	set_def_fwd_config();
2670 	if (nb_lcores == 0)
2671 		rte_panic("Empty set of forwarding logical cores - check the "
2672 			  "core mask supplied in the command parameters\n");
2673 
2674 	/* Bitrate/latency stats disabled by default */
2675 #ifdef RTE_LIBRTE_BITRATE
2676 	bitrate_enabled = 0;
2677 #endif
2678 #ifdef RTE_LIBRTE_LATENCY_STATS
2679 	latencystats_enabled = 0;
2680 #endif
2681 
2682 	/* on FreeBSD, mlockall() is disabled by default */
2683 #ifdef RTE_EXEC_ENV_BSDAPP
2684 	do_mlockall = 0;
2685 #else
2686 	do_mlockall = 1;
2687 #endif
2688 
2689 	argc -= diag;
2690 	argv += diag;
2691 	if (argc > 1)
2692 		launch_args_parse(argc, argv);
2693 
2694 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2695 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2696 			strerror(errno));
2697 	}
2698 
2699 	if (tx_first && interactive)
2700 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2701 				"interactive mode.\n");
2702 
2703 	if (tx_first && lsc_interrupt) {
2704 		printf("Warning: lsc_interrupt needs to be off when "
2705 				" using tx_first. Disabling.\n");
2706 		lsc_interrupt = 0;
2707 	}
2708 
2709 	if (!nb_rxq && !nb_txq)
2710 		printf("Warning: Either rx or tx queues should be non-zero\n");
2711 
2712 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2713 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2714 		       "but nb_txq=%d will prevent to fully test it.\n",
2715 		       nb_rxq, nb_txq);
2716 
2717 	init_config();
2718 
2719 	if (hot_plug) {
2720 		/* enable hot plug monitoring */
2721 		ret = rte_dev_event_monitor_start();
2722 		if (ret) {
2723 			rte_errno = EINVAL;
2724 			return -1;
2725 		}
2726 		eth_dev_event_callback_register();
2727 
2728 	}
2729 
2730 	if (start_port(RTE_PORT_ALL) != 0)
2731 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2732 
2733 	/* set all ports to promiscuous mode by default */
2734 	RTE_ETH_FOREACH_DEV(port_id)
2735 		rte_eth_promiscuous_enable(port_id);
2736 
2737 	/* Init metrics library */
2738 	rte_metrics_init(rte_socket_id());
2739 
2740 #ifdef RTE_LIBRTE_LATENCY_STATS
2741 	if (latencystats_enabled != 0) {
2742 		int ret = rte_latencystats_init(1, NULL);
2743 		if (ret)
2744 			printf("Warning: latencystats init()"
2745 				" returned error %d\n",	ret);
2746 		printf("Latencystats running on lcore %d\n",
2747 			latencystats_lcore_id);
2748 	}
2749 #endif
2750 
2751 	/* Setup bitrate stats */
2752 #ifdef RTE_LIBRTE_BITRATE
2753 	if (bitrate_enabled != 0) {
2754 		bitrate_data = rte_stats_bitrate_create();
2755 		if (bitrate_data == NULL)
2756 			rte_exit(EXIT_FAILURE,
2757 				"Could not allocate bitrate data.\n");
2758 		rte_stats_bitrate_reg(bitrate_data);
2759 	}
2760 #endif
2761 
2762 #ifdef RTE_LIBRTE_CMDLINE
2763 	if (strlen(cmdline_filename) != 0)
2764 		cmdline_read_from_file(cmdline_filename);
2765 
2766 	if (interactive == 1) {
2767 		if (auto_start) {
2768 			printf("Start automatic packet forwarding\n");
2769 			start_packet_forwarding(0);
2770 		}
2771 		prompt();
2772 		pmd_test_exit();
2773 	} else
2774 #endif
2775 	{
2776 		char c;
2777 		int rc;
2778 
2779 		f_quit = 0;
2780 
2781 		printf("No commandline core given, start packet forwarding\n");
2782 		start_packet_forwarding(tx_first);
2783 		if (stats_period != 0) {
2784 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2785 			uint64_t timer_period;
2786 
2787 			/* Convert to number of cycles */
2788 			timer_period = stats_period * rte_get_timer_hz();
2789 
2790 			while (f_quit == 0) {
2791 				cur_time = rte_get_timer_cycles();
2792 				diff_time += cur_time - prev_time;
2793 
2794 				if (diff_time >= timer_period) {
2795 					print_stats();
2796 					/* Reset the timer */
2797 					diff_time = 0;
2798 				}
2799 				/* Sleep to avoid unnecessary checks */
2800 				prev_time = cur_time;
2801 				sleep(1);
2802 			}
2803 		}
2804 
2805 		printf("Press enter to exit\n");
2806 		rc = read(0, &c, 1);
2807 		pmd_test_exit();
2808 		if (rc < 0)
2809 			return 1;
2810 	}
2811 
2812 	return 0;
2813 }
2814