xref: /dpdk/app/test-pmd/testpmd.c (revision 2f82d143fb318042f47a50694baa4507b51b7381)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
33 #include <rte_eal.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
41 #include <rte_mbuf.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_dev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
51 #endif
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
54 #endif
55 #include <rte_flow.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
59 #endif
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
62 #endif
63 
64 #include "testpmd.h"
65 
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
68 
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
72 uint8_t tx_first;
73 char cmdline_filename[PATH_MAX] = {0};
74 
75 /*
76  * NUMA support configuration.
77  * When set, the NUMA support attempts to dispatch the allocation of the
78  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79  * probed ports among the CPU sockets 0 and 1.
80  * Otherwise, all memory is allocated from CPU socket 0.
81  */
82 uint8_t numa_support = 1; /**< numa enabled by default */
83 
84 /*
85  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86  * not configured.
87  */
88 uint8_t socket_num = UMA_NO_CONFIG;
89 
90 /*
91  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
92  */
93 uint8_t mp_anon = 0;
94 
95 /*
96  * Store specified sockets on which memory pool to be used by ports
97  * is allocated.
98  */
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
100 
101 /*
102  * Store specified sockets on which RX ring to be used by ports
103  * is allocated.
104  */
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
106 
107 /*
108  * Store specified sockets on which TX ring to be used by ports
109  * is allocated.
110  */
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
112 
113 /*
114  * Record the Ethernet address of peer target ports to which packets are
115  * forwarded.
116  * Must be instantiated with the ethernet addresses of peer traffic generator
117  * ports.
118  */
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
121 
122 /*
123  * Probed Target Environment.
124  */
125 struct rte_port *ports;	       /**< For all probed ethernet ports. */
126 portid_t nb_ports;             /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
129 
130 /*
131  * Test Forwarding Configuration.
132  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
134  */
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
138 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
139 
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
142 
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
145 
146 /*
147  * Forwarding engines.
148  */
149 struct fwd_engine * fwd_engines[] = {
150 	&io_fwd_engine,
151 	&mac_fwd_engine,
152 	&mac_swap_engine,
153 	&flow_gen_engine,
154 	&rx_only_engine,
155 	&tx_only_engine,
156 	&csum_fwd_engine,
157 	&icmp_echo_engine,
158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
159 	&softnic_tm_engine,
160 	&softnic_tm_bypass_engine,
161 #endif
162 #ifdef RTE_LIBRTE_IEEE1588
163 	&ieee1588_fwd_engine,
164 #endif
165 	NULL,
166 };
167 
168 struct fwd_config cur_fwd_config;
169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
170 uint32_t retry_enabled;
171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173 
174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
175 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
176                                       * specified on command-line. */
177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
178 
179 /*
180  * In container, it cannot terminate the process which running with 'stats-period'
181  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
182  */
183 uint8_t f_quit;
184 
185 /*
186  * Configuration of packet segments used by the "txonly" processing engine.
187  */
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 	TXONLY_DEF_PACKET_LEN,
191 };
192 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
196 
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
202 
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
205 
206 /*
207  * Configurable number of RX/TX queues.
208  */
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
211 
212 /*
213  * Configurable number of RX/TX ring descriptors.
214  * Defaults are supplied by drivers via ethdev.
215  */
216 #define RTE_TEST_RX_DESC_DEFAULT 0
217 #define RTE_TEST_TX_DESC_DEFAULT 0
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220 
221 #define RTE_PMD_PARAM_UNSET -1
222 /*
223  * Configurable values of RX and TX ring threshold registers.
224  */
225 
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229 
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Configurable value of RX free threshold.
236  */
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238 
239 /*
240  * Configurable value of RX drop enable.
241  */
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243 
244 /*
245  * Configurable value of TX free threshold.
246  */
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248 
249 /*
250  * Configurable value of TX RS bit threshold.
251  */
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253 
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268 
269 /*
270  * Flow API isolated mode.
271  */
272 uint8_t flow_isolate_all;
273 
274 /*
275  * Avoids to check link status when starting/stopping a port.
276  */
277 uint8_t no_link_check = 0; /* check by default */
278 
279 /*
280  * Enable link status change notification
281  */
282 uint8_t lsc_interrupt = 1; /* enabled by default */
283 
284 /*
285  * Enable device removal notification.
286  */
287 uint8_t rmv_interrupt = 1; /* enabled by default */
288 
289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
290 
291 /*
292  * Display or mask ether events
293  * Default to all events except VF_MBOX
294  */
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
301 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
302 
303 /*
304  * NIC bypass mode configuration options.
305  */
306 
307 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
308 /* The NIC bypass watchdog timeout. */
309 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
310 #endif
311 
312 
313 #ifdef RTE_LIBRTE_LATENCY_STATS
314 
315 /*
316  * Set when latency stats is enabled in the commandline
317  */
318 uint8_t latencystats_enabled;
319 
320 /*
321  * Lcore ID to serive latency statistics.
322  */
323 lcoreid_t latencystats_lcore_id = -1;
324 
325 #endif
326 
327 /*
328  * Ethernet device configuration.
329  */
330 struct rte_eth_rxmode rx_mode = {
331 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
332 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
333 	.ignore_offload_bitfield = 1,
334 };
335 
336 struct rte_eth_txmode tx_mode = {
337 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
338 };
339 
340 struct rte_fdir_conf fdir_conf = {
341 	.mode = RTE_FDIR_MODE_NONE,
342 	.pballoc = RTE_FDIR_PBALLOC_64K,
343 	.status = RTE_FDIR_REPORT_STATUS,
344 	.mask = {
345 		.vlan_tci_mask = 0x0,
346 		.ipv4_mask     = {
347 			.src_ip = 0xFFFFFFFF,
348 			.dst_ip = 0xFFFFFFFF,
349 		},
350 		.ipv6_mask     = {
351 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
352 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
353 		},
354 		.src_port_mask = 0xFFFF,
355 		.dst_port_mask = 0xFFFF,
356 		.mac_addr_byte_mask = 0xFF,
357 		.tunnel_type_mask = 1,
358 		.tunnel_id_mask = 0xFFFFFFFF,
359 	},
360 	.drop_queue = 127,
361 };
362 
363 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
364 
365 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
366 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
367 
368 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
369 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
370 
371 uint16_t nb_tx_queue_stats_mappings = 0;
372 uint16_t nb_rx_queue_stats_mappings = 0;
373 
374 /*
375  * Display zero values by default for xstats
376  */
377 uint8_t xstats_hide_zero;
378 
379 unsigned int num_sockets = 0;
380 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
381 
382 #ifdef RTE_LIBRTE_BITRATE
383 /* Bitrate statistics */
384 struct rte_stats_bitrates *bitrate_data;
385 lcoreid_t bitrate_lcore_id;
386 uint8_t bitrate_enabled;
387 #endif
388 
389 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
390 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
391 
392 /* Forward function declarations */
393 static void map_port_queue_stats_mapping_registers(portid_t pi,
394 						   struct rte_port *port);
395 static void check_all_ports_link_status(uint32_t port_mask);
396 static int eth_event_callback(portid_t port_id,
397 			      enum rte_eth_event_type type,
398 			      void *param, void *ret_param);
399 static void eth_dev_event_callback(char *device_name,
400 				enum rte_dev_event_type type,
401 				void *param);
402 static int eth_dev_event_callback_register(void);
403 static int eth_dev_event_callback_unregister(void);
404 
405 
406 /*
407  * Check if all the ports are started.
408  * If yes, return positive value. If not, return zero.
409  */
410 static int all_ports_started(void);
411 
412 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
413 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
414 
415 /*
416  * Helper function to check if socket is already discovered.
417  * If yes, return positive value. If not, return zero.
418  */
419 int
420 new_socket_id(unsigned int socket_id)
421 {
422 	unsigned int i;
423 
424 	for (i = 0; i < num_sockets; i++) {
425 		if (socket_ids[i] == socket_id)
426 			return 0;
427 	}
428 	return 1;
429 }
430 
431 /*
432  * Setup default configuration.
433  */
434 static void
435 set_default_fwd_lcores_config(void)
436 {
437 	unsigned int i;
438 	unsigned int nb_lc;
439 	unsigned int sock_num;
440 
441 	nb_lc = 0;
442 	for (i = 0; i < RTE_MAX_LCORE; i++) {
443 		sock_num = rte_lcore_to_socket_id(i);
444 		if (new_socket_id(sock_num)) {
445 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
446 				rte_exit(EXIT_FAILURE,
447 					 "Total sockets greater than %u\n",
448 					 RTE_MAX_NUMA_NODES);
449 			}
450 			socket_ids[num_sockets++] = sock_num;
451 		}
452 		if (!rte_lcore_is_enabled(i))
453 			continue;
454 		if (i == rte_get_master_lcore())
455 			continue;
456 		fwd_lcores_cpuids[nb_lc++] = i;
457 	}
458 	nb_lcores = (lcoreid_t) nb_lc;
459 	nb_cfg_lcores = nb_lcores;
460 	nb_fwd_lcores = 1;
461 }
462 
463 static void
464 set_def_peer_eth_addrs(void)
465 {
466 	portid_t i;
467 
468 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
469 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
470 		peer_eth_addrs[i].addr_bytes[5] = i;
471 	}
472 }
473 
474 static void
475 set_default_fwd_ports_config(void)
476 {
477 	portid_t pt_id;
478 	int i = 0;
479 
480 	RTE_ETH_FOREACH_DEV(pt_id)
481 		fwd_ports_ids[i++] = pt_id;
482 
483 	nb_cfg_ports = nb_ports;
484 	nb_fwd_ports = nb_ports;
485 }
486 
487 void
488 set_def_fwd_config(void)
489 {
490 	set_default_fwd_lcores_config();
491 	set_def_peer_eth_addrs();
492 	set_default_fwd_ports_config();
493 }
494 
495 /*
496  * Configuration initialisation done once at init time.
497  */
498 static void
499 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
500 		 unsigned int socket_id)
501 {
502 	char pool_name[RTE_MEMPOOL_NAMESIZE];
503 	struct rte_mempool *rte_mp = NULL;
504 	uint32_t mb_size;
505 
506 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
507 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
508 
509 	TESTPMD_LOG(INFO,
510 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
511 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
512 
513 	if (mp_anon != 0) {
514 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
515 			mb_size, (unsigned) mb_mempool_cache,
516 			sizeof(struct rte_pktmbuf_pool_private),
517 			socket_id, 0);
518 		if (rte_mp == NULL)
519 			goto err;
520 
521 		if (rte_mempool_populate_anon(rte_mp) == 0) {
522 			rte_mempool_free(rte_mp);
523 			rte_mp = NULL;
524 			goto err;
525 		}
526 		rte_pktmbuf_pool_init(rte_mp, NULL);
527 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
528 	} else {
529 		/* wrapper to rte_mempool_create() */
530 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
531 				rte_mbuf_best_mempool_ops());
532 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
533 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
534 	}
535 
536 err:
537 	if (rte_mp == NULL) {
538 		rte_exit(EXIT_FAILURE,
539 			"Creation of mbuf pool for socket %u failed: %s\n",
540 			socket_id, rte_strerror(rte_errno));
541 	} else if (verbose_level > 0) {
542 		rte_mempool_dump(stdout, rte_mp);
543 	}
544 }
545 
546 /*
547  * Check given socket id is valid or not with NUMA mode,
548  * if valid, return 0, else return -1
549  */
550 static int
551 check_socket_id(const unsigned int socket_id)
552 {
553 	static int warning_once = 0;
554 
555 	if (new_socket_id(socket_id)) {
556 		if (!warning_once && numa_support)
557 			printf("Warning: NUMA should be configured manually by"
558 			       " using --port-numa-config and"
559 			       " --ring-numa-config parameters along with"
560 			       " --numa.\n");
561 		warning_once = 1;
562 		return -1;
563 	}
564 	return 0;
565 }
566 
567 /*
568  * Get the allowed maximum number of RX queues.
569  * *pid return the port id which has minimal value of
570  * max_rx_queues in all ports.
571  */
572 queueid_t
573 get_allowed_max_nb_rxq(portid_t *pid)
574 {
575 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
576 	portid_t pi;
577 	struct rte_eth_dev_info dev_info;
578 
579 	RTE_ETH_FOREACH_DEV(pi) {
580 		rte_eth_dev_info_get(pi, &dev_info);
581 		if (dev_info.max_rx_queues < allowed_max_rxq) {
582 			allowed_max_rxq = dev_info.max_rx_queues;
583 			*pid = pi;
584 		}
585 	}
586 	return allowed_max_rxq;
587 }
588 
589 /*
590  * Check input rxq is valid or not.
591  * If input rxq is not greater than any of maximum number
592  * of RX queues of all ports, it is valid.
593  * if valid, return 0, else return -1
594  */
595 int
596 check_nb_rxq(queueid_t rxq)
597 {
598 	queueid_t allowed_max_rxq;
599 	portid_t pid = 0;
600 
601 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
602 	if (rxq > allowed_max_rxq) {
603 		printf("Fail: input rxq (%u) can't be greater "
604 		       "than max_rx_queues (%u) of port %u\n",
605 		       rxq,
606 		       allowed_max_rxq,
607 		       pid);
608 		return -1;
609 	}
610 	return 0;
611 }
612 
613 /*
614  * Get the allowed maximum number of TX queues.
615  * *pid return the port id which has minimal value of
616  * max_tx_queues in all ports.
617  */
618 queueid_t
619 get_allowed_max_nb_txq(portid_t *pid)
620 {
621 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
622 	portid_t pi;
623 	struct rte_eth_dev_info dev_info;
624 
625 	RTE_ETH_FOREACH_DEV(pi) {
626 		rte_eth_dev_info_get(pi, &dev_info);
627 		if (dev_info.max_tx_queues < allowed_max_txq) {
628 			allowed_max_txq = dev_info.max_tx_queues;
629 			*pid = pi;
630 		}
631 	}
632 	return allowed_max_txq;
633 }
634 
635 /*
636  * Check input txq is valid or not.
637  * If input txq is not greater than any of maximum number
638  * of TX queues of all ports, it is valid.
639  * if valid, return 0, else return -1
640  */
641 int
642 check_nb_txq(queueid_t txq)
643 {
644 	queueid_t allowed_max_txq;
645 	portid_t pid = 0;
646 
647 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
648 	if (txq > allowed_max_txq) {
649 		printf("Fail: input txq (%u) can't be greater "
650 		       "than max_tx_queues (%u) of port %u\n",
651 		       txq,
652 		       allowed_max_txq,
653 		       pid);
654 		return -1;
655 	}
656 	return 0;
657 }
658 
659 static void
660 init_config(void)
661 {
662 	portid_t pid;
663 	struct rte_port *port;
664 	struct rte_mempool *mbp;
665 	unsigned int nb_mbuf_per_pool;
666 	lcoreid_t  lc_id;
667 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
668 	struct rte_gro_param gro_param;
669 	uint32_t gso_types;
670 
671 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
672 
673 	if (numa_support) {
674 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
675 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
676 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
677 	}
678 
679 	/* Configuration of logical cores. */
680 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
681 				sizeof(struct fwd_lcore *) * nb_lcores,
682 				RTE_CACHE_LINE_SIZE);
683 	if (fwd_lcores == NULL) {
684 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
685 							"failed\n", nb_lcores);
686 	}
687 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
688 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
689 					       sizeof(struct fwd_lcore),
690 					       RTE_CACHE_LINE_SIZE);
691 		if (fwd_lcores[lc_id] == NULL) {
692 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
693 								"failed\n");
694 		}
695 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
696 	}
697 
698 	RTE_ETH_FOREACH_DEV(pid) {
699 		port = &ports[pid];
700 		/* Apply default TxRx configuration for all ports */
701 		port->dev_conf.txmode = tx_mode;
702 		port->dev_conf.rxmode = rx_mode;
703 		rte_eth_dev_info_get(pid, &port->dev_info);
704 		if (!(port->dev_info.tx_offload_capa &
705 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
706 			port->dev_conf.txmode.offloads &=
707 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
708 		if (numa_support) {
709 			if (port_numa[pid] != NUMA_NO_CONFIG)
710 				port_per_socket[port_numa[pid]]++;
711 			else {
712 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
713 
714 				/* if socket_id is invalid, set to 0 */
715 				if (check_socket_id(socket_id) < 0)
716 					socket_id = 0;
717 				port_per_socket[socket_id]++;
718 			}
719 		}
720 
721 		/* set flag to initialize port/queue */
722 		port->need_reconfig = 1;
723 		port->need_reconfig_queues = 1;
724 	}
725 
726 	/*
727 	 * Create pools of mbuf.
728 	 * If NUMA support is disabled, create a single pool of mbuf in
729 	 * socket 0 memory by default.
730 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
731 	 *
732 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
733 	 * nb_txd can be configured at run time.
734 	 */
735 	if (param_total_num_mbufs)
736 		nb_mbuf_per_pool = param_total_num_mbufs;
737 	else {
738 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
739 			(nb_lcores * mb_mempool_cache) +
740 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
741 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
742 	}
743 
744 	if (numa_support) {
745 		uint8_t i;
746 
747 		for (i = 0; i < num_sockets; i++)
748 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
749 					 socket_ids[i]);
750 	} else {
751 		if (socket_num == UMA_NO_CONFIG)
752 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
753 		else
754 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
755 						 socket_num);
756 	}
757 
758 	init_port_config();
759 
760 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
761 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
762 	/*
763 	 * Records which Mbuf pool to use by each logical core, if needed.
764 	 */
765 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
766 		mbp = mbuf_pool_find(
767 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
768 
769 		if (mbp == NULL)
770 			mbp = mbuf_pool_find(0);
771 		fwd_lcores[lc_id]->mbp = mbp;
772 		/* initialize GSO context */
773 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
774 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
775 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
776 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
777 			ETHER_CRC_LEN;
778 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
779 	}
780 
781 	/* Configuration of packet forwarding streams. */
782 	if (init_fwd_streams() < 0)
783 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
784 
785 	fwd_config_setup();
786 
787 	/* create a gro context for each lcore */
788 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
789 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
790 	gro_param.max_item_per_flow = MAX_PKT_BURST;
791 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
792 		gro_param.socket_id = rte_lcore_to_socket_id(
793 				fwd_lcores_cpuids[lc_id]);
794 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
795 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
796 			rte_exit(EXIT_FAILURE,
797 					"rte_gro_ctx_create() failed\n");
798 		}
799 	}
800 }
801 
802 
803 void
804 reconfig(portid_t new_port_id, unsigned socket_id)
805 {
806 	struct rte_port *port;
807 
808 	/* Reconfiguration of Ethernet ports. */
809 	port = &ports[new_port_id];
810 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
811 
812 	/* set flag to initialize port/queue */
813 	port->need_reconfig = 1;
814 	port->need_reconfig_queues = 1;
815 	port->socket_id = socket_id;
816 
817 	init_port_config();
818 }
819 
820 
821 int
822 init_fwd_streams(void)
823 {
824 	portid_t pid;
825 	struct rte_port *port;
826 	streamid_t sm_id, nb_fwd_streams_new;
827 	queueid_t q;
828 
829 	/* set socket id according to numa or not */
830 	RTE_ETH_FOREACH_DEV(pid) {
831 		port = &ports[pid];
832 		if (nb_rxq > port->dev_info.max_rx_queues) {
833 			printf("Fail: nb_rxq(%d) is greater than "
834 				"max_rx_queues(%d)\n", nb_rxq,
835 				port->dev_info.max_rx_queues);
836 			return -1;
837 		}
838 		if (nb_txq > port->dev_info.max_tx_queues) {
839 			printf("Fail: nb_txq(%d) is greater than "
840 				"max_tx_queues(%d)\n", nb_txq,
841 				port->dev_info.max_tx_queues);
842 			return -1;
843 		}
844 		if (numa_support) {
845 			if (port_numa[pid] != NUMA_NO_CONFIG)
846 				port->socket_id = port_numa[pid];
847 			else {
848 				port->socket_id = rte_eth_dev_socket_id(pid);
849 
850 				/* if socket_id is invalid, set to 0 */
851 				if (check_socket_id(port->socket_id) < 0)
852 					port->socket_id = 0;
853 			}
854 		}
855 		else {
856 			if (socket_num == UMA_NO_CONFIG)
857 				port->socket_id = 0;
858 			else
859 				port->socket_id = socket_num;
860 		}
861 	}
862 
863 	q = RTE_MAX(nb_rxq, nb_txq);
864 	if (q == 0) {
865 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
866 		return -1;
867 	}
868 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
869 	if (nb_fwd_streams_new == nb_fwd_streams)
870 		return 0;
871 	/* clear the old */
872 	if (fwd_streams != NULL) {
873 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
874 			if (fwd_streams[sm_id] == NULL)
875 				continue;
876 			rte_free(fwd_streams[sm_id]);
877 			fwd_streams[sm_id] = NULL;
878 		}
879 		rte_free(fwd_streams);
880 		fwd_streams = NULL;
881 	}
882 
883 	/* init new */
884 	nb_fwd_streams = nb_fwd_streams_new;
885 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
886 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
887 	if (fwd_streams == NULL)
888 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
889 						"failed\n", nb_fwd_streams);
890 
891 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
892 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
893 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
894 		if (fwd_streams[sm_id] == NULL)
895 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
896 								" failed\n");
897 	}
898 
899 	return 0;
900 }
901 
902 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
903 static void
904 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
905 {
906 	unsigned int total_burst;
907 	unsigned int nb_burst;
908 	unsigned int burst_stats[3];
909 	uint16_t pktnb_stats[3];
910 	uint16_t nb_pkt;
911 	int burst_percent[3];
912 
913 	/*
914 	 * First compute the total number of packet bursts and the
915 	 * two highest numbers of bursts of the same number of packets.
916 	 */
917 	total_burst = 0;
918 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
919 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
920 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
921 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
922 		if (nb_burst == 0)
923 			continue;
924 		total_burst += nb_burst;
925 		if (nb_burst > burst_stats[0]) {
926 			burst_stats[1] = burst_stats[0];
927 			pktnb_stats[1] = pktnb_stats[0];
928 			burst_stats[0] = nb_burst;
929 			pktnb_stats[0] = nb_pkt;
930 		}
931 	}
932 	if (total_burst == 0)
933 		return;
934 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
935 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
936 	       burst_percent[0], (int) pktnb_stats[0]);
937 	if (burst_stats[0] == total_burst) {
938 		printf("]\n");
939 		return;
940 	}
941 	if (burst_stats[0] + burst_stats[1] == total_burst) {
942 		printf(" + %d%% of %d pkts]\n",
943 		       100 - burst_percent[0], pktnb_stats[1]);
944 		return;
945 	}
946 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
947 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
948 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
949 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
950 		return;
951 	}
952 	printf(" + %d%% of %d pkts + %d%% of others]\n",
953 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
954 }
955 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
956 
957 static void
958 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
959 {
960 	struct rte_port *port;
961 	uint8_t i;
962 
963 	static const char *fwd_stats_border = "----------------------";
964 
965 	port = &ports[port_id];
966 	printf("\n  %s Forward statistics for port %-2d %s\n",
967 	       fwd_stats_border, port_id, fwd_stats_border);
968 
969 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
970 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
971 		       "%-"PRIu64"\n",
972 		       stats->ipackets, stats->imissed,
973 		       (uint64_t) (stats->ipackets + stats->imissed));
974 
975 		if (cur_fwd_eng == &csum_fwd_engine)
976 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
977 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
978 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
979 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
980 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
981 		}
982 
983 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
984 		       "%-"PRIu64"\n",
985 		       stats->opackets, port->tx_dropped,
986 		       (uint64_t) (stats->opackets + port->tx_dropped));
987 	}
988 	else {
989 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
990 		       "%14"PRIu64"\n",
991 		       stats->ipackets, stats->imissed,
992 		       (uint64_t) (stats->ipackets + stats->imissed));
993 
994 		if (cur_fwd_eng == &csum_fwd_engine)
995 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
996 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
997 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
998 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
999 			printf("  RX-nombufs:             %14"PRIu64"\n",
1000 			       stats->rx_nombuf);
1001 		}
1002 
1003 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1004 		       "%14"PRIu64"\n",
1005 		       stats->opackets, port->tx_dropped,
1006 		       (uint64_t) (stats->opackets + port->tx_dropped));
1007 	}
1008 
1009 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1010 	if (port->rx_stream)
1011 		pkt_burst_stats_display("RX",
1012 			&port->rx_stream->rx_burst_stats);
1013 	if (port->tx_stream)
1014 		pkt_burst_stats_display("TX",
1015 			&port->tx_stream->tx_burst_stats);
1016 #endif
1017 
1018 	if (port->rx_queue_stats_mapping_enabled) {
1019 		printf("\n");
1020 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1021 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1022 			       "     RX-errors:%14"PRIu64
1023 			       "    RX-bytes:%14"PRIu64"\n",
1024 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1025 		}
1026 		printf("\n");
1027 	}
1028 	if (port->tx_queue_stats_mapping_enabled) {
1029 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1030 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1031 			       "                                 TX-bytes:%14"PRIu64"\n",
1032 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1033 		}
1034 	}
1035 
1036 	printf("  %s--------------------------------%s\n",
1037 	       fwd_stats_border, fwd_stats_border);
1038 }
1039 
1040 static void
1041 fwd_stream_stats_display(streamid_t stream_id)
1042 {
1043 	struct fwd_stream *fs;
1044 	static const char *fwd_top_stats_border = "-------";
1045 
1046 	fs = fwd_streams[stream_id];
1047 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1048 	    (fs->fwd_dropped == 0))
1049 		return;
1050 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1051 	       "TX Port=%2d/Queue=%2d %s\n",
1052 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1053 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1054 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1055 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1056 
1057 	/* if checksum mode */
1058 	if (cur_fwd_eng == &csum_fwd_engine) {
1059 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1060 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1061 	}
1062 
1063 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1064 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1065 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1066 #endif
1067 }
1068 
1069 static void
1070 flush_fwd_rx_queues(void)
1071 {
1072 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1073 	portid_t  rxp;
1074 	portid_t port_id;
1075 	queueid_t rxq;
1076 	uint16_t  nb_rx;
1077 	uint16_t  i;
1078 	uint8_t   j;
1079 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1080 	uint64_t timer_period;
1081 
1082 	/* convert to number of cycles */
1083 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1084 
1085 	for (j = 0; j < 2; j++) {
1086 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1087 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1088 				port_id = fwd_ports_ids[rxp];
1089 				/**
1090 				* testpmd can stuck in the below do while loop
1091 				* if rte_eth_rx_burst() always returns nonzero
1092 				* packets. So timer is added to exit this loop
1093 				* after 1sec timer expiry.
1094 				*/
1095 				prev_tsc = rte_rdtsc();
1096 				do {
1097 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1098 						pkts_burst, MAX_PKT_BURST);
1099 					for (i = 0; i < nb_rx; i++)
1100 						rte_pktmbuf_free(pkts_burst[i]);
1101 
1102 					cur_tsc = rte_rdtsc();
1103 					diff_tsc = cur_tsc - prev_tsc;
1104 					timer_tsc += diff_tsc;
1105 				} while ((nb_rx > 0) &&
1106 					(timer_tsc < timer_period));
1107 				timer_tsc = 0;
1108 			}
1109 		}
1110 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1111 	}
1112 }
1113 
1114 static void
1115 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1116 {
1117 	struct fwd_stream **fsm;
1118 	streamid_t nb_fs;
1119 	streamid_t sm_id;
1120 #ifdef RTE_LIBRTE_BITRATE
1121 	uint64_t tics_per_1sec;
1122 	uint64_t tics_datum;
1123 	uint64_t tics_current;
1124 	uint16_t idx_port;
1125 
1126 	tics_datum = rte_rdtsc();
1127 	tics_per_1sec = rte_get_timer_hz();
1128 #endif
1129 	fsm = &fwd_streams[fc->stream_idx];
1130 	nb_fs = fc->stream_nb;
1131 	do {
1132 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1133 			(*pkt_fwd)(fsm[sm_id]);
1134 #ifdef RTE_LIBRTE_BITRATE
1135 		if (bitrate_enabled != 0 &&
1136 				bitrate_lcore_id == rte_lcore_id()) {
1137 			tics_current = rte_rdtsc();
1138 			if (tics_current - tics_datum >= tics_per_1sec) {
1139 				/* Periodic bitrate calculation */
1140 				RTE_ETH_FOREACH_DEV(idx_port)
1141 					rte_stats_bitrate_calc(bitrate_data,
1142 						idx_port);
1143 				tics_datum = tics_current;
1144 			}
1145 		}
1146 #endif
1147 #ifdef RTE_LIBRTE_LATENCY_STATS
1148 		if (latencystats_enabled != 0 &&
1149 				latencystats_lcore_id == rte_lcore_id())
1150 			rte_latencystats_update();
1151 #endif
1152 
1153 	} while (! fc->stopped);
1154 }
1155 
1156 static int
1157 start_pkt_forward_on_core(void *fwd_arg)
1158 {
1159 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1160 			     cur_fwd_config.fwd_eng->packet_fwd);
1161 	return 0;
1162 }
1163 
1164 /*
1165  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1166  * Used to start communication flows in network loopback test configurations.
1167  */
1168 static int
1169 run_one_txonly_burst_on_core(void *fwd_arg)
1170 {
1171 	struct fwd_lcore *fwd_lc;
1172 	struct fwd_lcore tmp_lcore;
1173 
1174 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1175 	tmp_lcore = *fwd_lc;
1176 	tmp_lcore.stopped = 1;
1177 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1178 	return 0;
1179 }
1180 
1181 /*
1182  * Launch packet forwarding:
1183  *     - Setup per-port forwarding context.
1184  *     - launch logical cores with their forwarding configuration.
1185  */
1186 static void
1187 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1188 {
1189 	port_fwd_begin_t port_fwd_begin;
1190 	unsigned int i;
1191 	unsigned int lc_id;
1192 	int diag;
1193 
1194 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1195 	if (port_fwd_begin != NULL) {
1196 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1197 			(*port_fwd_begin)(fwd_ports_ids[i]);
1198 	}
1199 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1200 		lc_id = fwd_lcores_cpuids[i];
1201 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1202 			fwd_lcores[i]->stopped = 0;
1203 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1204 						     fwd_lcores[i], lc_id);
1205 			if (diag != 0)
1206 				printf("launch lcore %u failed - diag=%d\n",
1207 				       lc_id, diag);
1208 		}
1209 	}
1210 }
1211 
1212 /*
1213  * Launch packet forwarding configuration.
1214  */
1215 void
1216 start_packet_forwarding(int with_tx_first)
1217 {
1218 	port_fwd_begin_t port_fwd_begin;
1219 	port_fwd_end_t  port_fwd_end;
1220 	struct rte_port *port;
1221 	unsigned int i;
1222 	portid_t   pt_id;
1223 	streamid_t sm_id;
1224 
1225 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1226 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1227 
1228 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1229 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1230 
1231 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1232 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1233 		(!nb_rxq || !nb_txq))
1234 		rte_exit(EXIT_FAILURE,
1235 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1236 			cur_fwd_eng->fwd_mode_name);
1237 
1238 	if (all_ports_started() == 0) {
1239 		printf("Not all ports were started\n");
1240 		return;
1241 	}
1242 	if (test_done == 0) {
1243 		printf("Packet forwarding already started\n");
1244 		return;
1245 	}
1246 
1247 	if (init_fwd_streams() < 0) {
1248 		printf("Fail from init_fwd_streams()\n");
1249 		return;
1250 	}
1251 
1252 	if(dcb_test) {
1253 		for (i = 0; i < nb_fwd_ports; i++) {
1254 			pt_id = fwd_ports_ids[i];
1255 			port = &ports[pt_id];
1256 			if (!port->dcb_flag) {
1257 				printf("In DCB mode, all forwarding ports must "
1258                                        "be configured in this mode.\n");
1259 				return;
1260 			}
1261 		}
1262 		if (nb_fwd_lcores == 1) {
1263 			printf("In DCB mode,the nb forwarding cores "
1264                                "should be larger than 1.\n");
1265 			return;
1266 		}
1267 	}
1268 	test_done = 0;
1269 
1270 	if(!no_flush_rx)
1271 		flush_fwd_rx_queues();
1272 
1273 	fwd_config_setup();
1274 	pkt_fwd_config_display(&cur_fwd_config);
1275 	rxtx_config_display();
1276 
1277 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1278 		pt_id = fwd_ports_ids[i];
1279 		port = &ports[pt_id];
1280 		rte_eth_stats_get(pt_id, &port->stats);
1281 		port->tx_dropped = 0;
1282 
1283 		map_port_queue_stats_mapping_registers(pt_id, port);
1284 	}
1285 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1286 		fwd_streams[sm_id]->rx_packets = 0;
1287 		fwd_streams[sm_id]->tx_packets = 0;
1288 		fwd_streams[sm_id]->fwd_dropped = 0;
1289 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1290 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1291 
1292 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1293 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1294 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1295 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1296 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1297 #endif
1298 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1299 		fwd_streams[sm_id]->core_cycles = 0;
1300 #endif
1301 	}
1302 	if (with_tx_first) {
1303 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1304 		if (port_fwd_begin != NULL) {
1305 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1306 				(*port_fwd_begin)(fwd_ports_ids[i]);
1307 		}
1308 		while (with_tx_first--) {
1309 			launch_packet_forwarding(
1310 					run_one_txonly_burst_on_core);
1311 			rte_eal_mp_wait_lcore();
1312 		}
1313 		port_fwd_end = tx_only_engine.port_fwd_end;
1314 		if (port_fwd_end != NULL) {
1315 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1316 				(*port_fwd_end)(fwd_ports_ids[i]);
1317 		}
1318 	}
1319 	launch_packet_forwarding(start_pkt_forward_on_core);
1320 }
1321 
1322 void
1323 stop_packet_forwarding(void)
1324 {
1325 	struct rte_eth_stats stats;
1326 	struct rte_port *port;
1327 	port_fwd_end_t  port_fwd_end;
1328 	int i;
1329 	portid_t   pt_id;
1330 	streamid_t sm_id;
1331 	lcoreid_t  lc_id;
1332 	uint64_t total_recv;
1333 	uint64_t total_xmit;
1334 	uint64_t total_rx_dropped;
1335 	uint64_t total_tx_dropped;
1336 	uint64_t total_rx_nombuf;
1337 	uint64_t tx_dropped;
1338 	uint64_t rx_bad_ip_csum;
1339 	uint64_t rx_bad_l4_csum;
1340 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1341 	uint64_t fwd_cycles;
1342 #endif
1343 
1344 	static const char *acc_stats_border = "+++++++++++++++";
1345 
1346 	if (test_done) {
1347 		printf("Packet forwarding not started\n");
1348 		return;
1349 	}
1350 	printf("Telling cores to stop...");
1351 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1352 		fwd_lcores[lc_id]->stopped = 1;
1353 	printf("\nWaiting for lcores to finish...\n");
1354 	rte_eal_mp_wait_lcore();
1355 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1356 	if (port_fwd_end != NULL) {
1357 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1358 			pt_id = fwd_ports_ids[i];
1359 			(*port_fwd_end)(pt_id);
1360 		}
1361 	}
1362 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1363 	fwd_cycles = 0;
1364 #endif
1365 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1366 		if (cur_fwd_config.nb_fwd_streams >
1367 		    cur_fwd_config.nb_fwd_ports) {
1368 			fwd_stream_stats_display(sm_id);
1369 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1370 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1371 		} else {
1372 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1373 				fwd_streams[sm_id];
1374 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1375 				fwd_streams[sm_id];
1376 		}
1377 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1378 		tx_dropped = (uint64_t) (tx_dropped +
1379 					 fwd_streams[sm_id]->fwd_dropped);
1380 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1381 
1382 		rx_bad_ip_csum =
1383 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1384 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1385 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1386 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1387 							rx_bad_ip_csum;
1388 
1389 		rx_bad_l4_csum =
1390 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1391 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1392 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1393 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1394 							rx_bad_l4_csum;
1395 
1396 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1397 		fwd_cycles = (uint64_t) (fwd_cycles +
1398 					 fwd_streams[sm_id]->core_cycles);
1399 #endif
1400 	}
1401 	total_recv = 0;
1402 	total_xmit = 0;
1403 	total_rx_dropped = 0;
1404 	total_tx_dropped = 0;
1405 	total_rx_nombuf  = 0;
1406 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1407 		pt_id = fwd_ports_ids[i];
1408 
1409 		port = &ports[pt_id];
1410 		rte_eth_stats_get(pt_id, &stats);
1411 		stats.ipackets -= port->stats.ipackets;
1412 		port->stats.ipackets = 0;
1413 		stats.opackets -= port->stats.opackets;
1414 		port->stats.opackets = 0;
1415 		stats.ibytes   -= port->stats.ibytes;
1416 		port->stats.ibytes = 0;
1417 		stats.obytes   -= port->stats.obytes;
1418 		port->stats.obytes = 0;
1419 		stats.imissed  -= port->stats.imissed;
1420 		port->stats.imissed = 0;
1421 		stats.oerrors  -= port->stats.oerrors;
1422 		port->stats.oerrors = 0;
1423 		stats.rx_nombuf -= port->stats.rx_nombuf;
1424 		port->stats.rx_nombuf = 0;
1425 
1426 		total_recv += stats.ipackets;
1427 		total_xmit += stats.opackets;
1428 		total_rx_dropped += stats.imissed;
1429 		total_tx_dropped += port->tx_dropped;
1430 		total_rx_nombuf  += stats.rx_nombuf;
1431 
1432 		fwd_port_stats_display(pt_id, &stats);
1433 	}
1434 
1435 	printf("\n  %s Accumulated forward statistics for all ports"
1436 	       "%s\n",
1437 	       acc_stats_border, acc_stats_border);
1438 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1439 	       "%-"PRIu64"\n"
1440 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1441 	       "%-"PRIu64"\n",
1442 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1443 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1444 	if (total_rx_nombuf > 0)
1445 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1446 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1447 	       "%s\n",
1448 	       acc_stats_border, acc_stats_border);
1449 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1450 	if (total_recv > 0)
1451 		printf("\n  CPU cycles/packet=%u (total cycles="
1452 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1453 		       (unsigned int)(fwd_cycles / total_recv),
1454 		       fwd_cycles, total_recv);
1455 #endif
1456 	printf("\nDone.\n");
1457 	test_done = 1;
1458 }
1459 
1460 void
1461 dev_set_link_up(portid_t pid)
1462 {
1463 	if (rte_eth_dev_set_link_up(pid) < 0)
1464 		printf("\nSet link up fail.\n");
1465 }
1466 
1467 void
1468 dev_set_link_down(portid_t pid)
1469 {
1470 	if (rte_eth_dev_set_link_down(pid) < 0)
1471 		printf("\nSet link down fail.\n");
1472 }
1473 
1474 static int
1475 all_ports_started(void)
1476 {
1477 	portid_t pi;
1478 	struct rte_port *port;
1479 
1480 	RTE_ETH_FOREACH_DEV(pi) {
1481 		port = &ports[pi];
1482 		/* Check if there is a port which is not started */
1483 		if ((port->port_status != RTE_PORT_STARTED) &&
1484 			(port->slave_flag == 0))
1485 			return 0;
1486 	}
1487 
1488 	/* No port is not started */
1489 	return 1;
1490 }
1491 
1492 int
1493 port_is_stopped(portid_t port_id)
1494 {
1495 	struct rte_port *port = &ports[port_id];
1496 
1497 	if ((port->port_status != RTE_PORT_STOPPED) &&
1498 	    (port->slave_flag == 0))
1499 		return 0;
1500 	return 1;
1501 }
1502 
1503 int
1504 all_ports_stopped(void)
1505 {
1506 	portid_t pi;
1507 
1508 	RTE_ETH_FOREACH_DEV(pi) {
1509 		if (!port_is_stopped(pi))
1510 			return 0;
1511 	}
1512 
1513 	return 1;
1514 }
1515 
1516 int
1517 port_is_started(portid_t port_id)
1518 {
1519 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1520 		return 0;
1521 
1522 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1523 		return 0;
1524 
1525 	return 1;
1526 }
1527 
1528 static int
1529 port_is_closed(portid_t port_id)
1530 {
1531 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1532 		return 0;
1533 
1534 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1535 		return 0;
1536 
1537 	return 1;
1538 }
1539 
1540 int
1541 start_port(portid_t pid)
1542 {
1543 	int diag, need_check_link_status = -1;
1544 	portid_t pi;
1545 	queueid_t qi;
1546 	struct rte_port *port;
1547 	struct ether_addr mac_addr;
1548 	enum rte_eth_event_type event_type;
1549 
1550 	if (port_id_is_invalid(pid, ENABLED_WARN))
1551 		return 0;
1552 
1553 	if(dcb_config)
1554 		dcb_test = 1;
1555 	RTE_ETH_FOREACH_DEV(pi) {
1556 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1557 			continue;
1558 
1559 		need_check_link_status = 0;
1560 		port = &ports[pi];
1561 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1562 						 RTE_PORT_HANDLING) == 0) {
1563 			printf("Port %d is now not stopped\n", pi);
1564 			continue;
1565 		}
1566 
1567 		if (port->need_reconfig > 0) {
1568 			port->need_reconfig = 0;
1569 
1570 			if (flow_isolate_all) {
1571 				int ret = port_flow_isolate(pi, 1);
1572 				if (ret) {
1573 					printf("Failed to apply isolated"
1574 					       " mode on port %d\n", pi);
1575 					return -1;
1576 				}
1577 			}
1578 
1579 			printf("Configuring Port %d (socket %u)\n", pi,
1580 					port->socket_id);
1581 			/* configure port */
1582 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1583 						&(port->dev_conf));
1584 			if (diag != 0) {
1585 				if (rte_atomic16_cmpset(&(port->port_status),
1586 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1587 					printf("Port %d can not be set back "
1588 							"to stopped\n", pi);
1589 				printf("Fail to configure port %d\n", pi);
1590 				/* try to reconfigure port next time */
1591 				port->need_reconfig = 1;
1592 				return -1;
1593 			}
1594 		}
1595 		if (port->need_reconfig_queues > 0) {
1596 			port->need_reconfig_queues = 0;
1597 			/* setup tx queues */
1598 			for (qi = 0; qi < nb_txq; qi++) {
1599 				port->tx_conf[qi].txq_flags =
1600 					ETH_TXQ_FLAGS_IGNORE;
1601 				/* Apply Tx offloads configuration */
1602 				port->tx_conf[qi].offloads =
1603 					port->dev_conf.txmode.offloads;
1604 				if ((numa_support) &&
1605 					(txring_numa[pi] != NUMA_NO_CONFIG))
1606 					diag = rte_eth_tx_queue_setup(pi, qi,
1607 						port->nb_tx_desc[qi],
1608 						txring_numa[pi],
1609 						&(port->tx_conf[qi]));
1610 				else
1611 					diag = rte_eth_tx_queue_setup(pi, qi,
1612 						port->nb_tx_desc[qi],
1613 						port->socket_id,
1614 						&(port->tx_conf[qi]));
1615 
1616 				if (diag == 0)
1617 					continue;
1618 
1619 				/* Fail to setup tx queue, return */
1620 				if (rte_atomic16_cmpset(&(port->port_status),
1621 							RTE_PORT_HANDLING,
1622 							RTE_PORT_STOPPED) == 0)
1623 					printf("Port %d can not be set back "
1624 							"to stopped\n", pi);
1625 				printf("Fail to configure port %d tx queues\n",
1626 				       pi);
1627 				/* try to reconfigure queues next time */
1628 				port->need_reconfig_queues = 1;
1629 				return -1;
1630 			}
1631 			for (qi = 0; qi < nb_rxq; qi++) {
1632 				/* Apply Rx offloads configuration */
1633 				port->rx_conf[qi].offloads =
1634 					port->dev_conf.rxmode.offloads;
1635 				/* setup rx queues */
1636 				if ((numa_support) &&
1637 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1638 					struct rte_mempool * mp =
1639 						mbuf_pool_find(rxring_numa[pi]);
1640 					if (mp == NULL) {
1641 						printf("Failed to setup RX queue:"
1642 							"No mempool allocation"
1643 							" on the socket %d\n",
1644 							rxring_numa[pi]);
1645 						return -1;
1646 					}
1647 
1648 					diag = rte_eth_rx_queue_setup(pi, qi,
1649 					     port->nb_rx_desc[pi],
1650 					     rxring_numa[pi],
1651 					     &(port->rx_conf[qi]),
1652 					     mp);
1653 				} else {
1654 					struct rte_mempool *mp =
1655 						mbuf_pool_find(port->socket_id);
1656 					if (mp == NULL) {
1657 						printf("Failed to setup RX queue:"
1658 							"No mempool allocation"
1659 							" on the socket %d\n",
1660 							port->socket_id);
1661 						return -1;
1662 					}
1663 					diag = rte_eth_rx_queue_setup(pi, qi,
1664 					     port->nb_rx_desc[pi],
1665 					     port->socket_id,
1666 					     &(port->rx_conf[qi]),
1667 					     mp);
1668 				}
1669 				if (diag == 0)
1670 					continue;
1671 
1672 				/* Fail to setup rx queue, return */
1673 				if (rte_atomic16_cmpset(&(port->port_status),
1674 							RTE_PORT_HANDLING,
1675 							RTE_PORT_STOPPED) == 0)
1676 					printf("Port %d can not be set back "
1677 							"to stopped\n", pi);
1678 				printf("Fail to configure port %d rx queues\n",
1679 				       pi);
1680 				/* try to reconfigure queues next time */
1681 				port->need_reconfig_queues = 1;
1682 				return -1;
1683 			}
1684 		}
1685 
1686 		/* start port */
1687 		if (rte_eth_dev_start(pi) < 0) {
1688 			printf("Fail to start port %d\n", pi);
1689 
1690 			/* Fail to setup rx queue, return */
1691 			if (rte_atomic16_cmpset(&(port->port_status),
1692 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1693 				printf("Port %d can not be set back to "
1694 							"stopped\n", pi);
1695 			continue;
1696 		}
1697 
1698 		if (rte_atomic16_cmpset(&(port->port_status),
1699 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1700 			printf("Port %d can not be set into started\n", pi);
1701 
1702 		rte_eth_macaddr_get(pi, &mac_addr);
1703 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1704 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1705 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1706 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1707 
1708 		/* at least one port started, need checking link status */
1709 		need_check_link_status = 1;
1710 	}
1711 
1712 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1713 	     event_type < RTE_ETH_EVENT_MAX;
1714 	     event_type++) {
1715 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1716 						event_type,
1717 						eth_event_callback,
1718 						NULL);
1719 		if (diag) {
1720 			printf("Failed to setup even callback for event %d\n",
1721 				event_type);
1722 			return -1;
1723 		}
1724 	}
1725 
1726 	if (need_check_link_status == 1 && !no_link_check)
1727 		check_all_ports_link_status(RTE_PORT_ALL);
1728 	else if (need_check_link_status == 0)
1729 		printf("Please stop the ports first\n");
1730 
1731 	printf("Done\n");
1732 	return 0;
1733 }
1734 
1735 void
1736 stop_port(portid_t pid)
1737 {
1738 	portid_t pi;
1739 	struct rte_port *port;
1740 	int need_check_link_status = 0;
1741 
1742 	if (dcb_test) {
1743 		dcb_test = 0;
1744 		dcb_config = 0;
1745 	}
1746 
1747 	if (port_id_is_invalid(pid, ENABLED_WARN))
1748 		return;
1749 
1750 	printf("Stopping ports...\n");
1751 
1752 	RTE_ETH_FOREACH_DEV(pi) {
1753 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1754 			continue;
1755 
1756 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1757 			printf("Please remove port %d from forwarding configuration.\n", pi);
1758 			continue;
1759 		}
1760 
1761 		if (port_is_bonding_slave(pi)) {
1762 			printf("Please remove port %d from bonded device.\n", pi);
1763 			continue;
1764 		}
1765 
1766 		port = &ports[pi];
1767 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1768 						RTE_PORT_HANDLING) == 0)
1769 			continue;
1770 
1771 		rte_eth_dev_stop(pi);
1772 
1773 		if (rte_atomic16_cmpset(&(port->port_status),
1774 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1775 			printf("Port %d can not be set into stopped\n", pi);
1776 		need_check_link_status = 1;
1777 	}
1778 	if (need_check_link_status && !no_link_check)
1779 		check_all_ports_link_status(RTE_PORT_ALL);
1780 
1781 	printf("Done\n");
1782 }
1783 
1784 void
1785 close_port(portid_t pid)
1786 {
1787 	portid_t pi;
1788 	struct rte_port *port;
1789 
1790 	if (port_id_is_invalid(pid, ENABLED_WARN))
1791 		return;
1792 
1793 	printf("Closing ports...\n");
1794 
1795 	RTE_ETH_FOREACH_DEV(pi) {
1796 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1797 			continue;
1798 
1799 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1800 			printf("Please remove port %d from forwarding configuration.\n", pi);
1801 			continue;
1802 		}
1803 
1804 		if (port_is_bonding_slave(pi)) {
1805 			printf("Please remove port %d from bonded device.\n", pi);
1806 			continue;
1807 		}
1808 
1809 		port = &ports[pi];
1810 		if (rte_atomic16_cmpset(&(port->port_status),
1811 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1812 			printf("Port %d is already closed\n", pi);
1813 			continue;
1814 		}
1815 
1816 		if (rte_atomic16_cmpset(&(port->port_status),
1817 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1818 			printf("Port %d is now not stopped\n", pi);
1819 			continue;
1820 		}
1821 
1822 		if (port->flow_list)
1823 			port_flow_flush(pi);
1824 		rte_eth_dev_close(pi);
1825 
1826 		if (rte_atomic16_cmpset(&(port->port_status),
1827 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1828 			printf("Port %d cannot be set to closed\n", pi);
1829 	}
1830 
1831 	printf("Done\n");
1832 }
1833 
1834 void
1835 reset_port(portid_t pid)
1836 {
1837 	int diag;
1838 	portid_t pi;
1839 	struct rte_port *port;
1840 
1841 	if (port_id_is_invalid(pid, ENABLED_WARN))
1842 		return;
1843 
1844 	printf("Resetting ports...\n");
1845 
1846 	RTE_ETH_FOREACH_DEV(pi) {
1847 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1848 			continue;
1849 
1850 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1851 			printf("Please remove port %d from forwarding "
1852 			       "configuration.\n", pi);
1853 			continue;
1854 		}
1855 
1856 		if (port_is_bonding_slave(pi)) {
1857 			printf("Please remove port %d from bonded device.\n",
1858 			       pi);
1859 			continue;
1860 		}
1861 
1862 		diag = rte_eth_dev_reset(pi);
1863 		if (diag == 0) {
1864 			port = &ports[pi];
1865 			port->need_reconfig = 1;
1866 			port->need_reconfig_queues = 1;
1867 		} else {
1868 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1869 		}
1870 	}
1871 
1872 	printf("Done\n");
1873 }
1874 
1875 static int
1876 eth_dev_event_callback_register(void)
1877 {
1878 	int ret;
1879 
1880 	/* register the device event callback */
1881 	ret = rte_dev_event_callback_register(NULL,
1882 		eth_dev_event_callback, NULL);
1883 	if (ret) {
1884 		printf("Failed to register device event callback\n");
1885 		return -1;
1886 	}
1887 
1888 	return 0;
1889 }
1890 
1891 
1892 static int
1893 eth_dev_event_callback_unregister(void)
1894 {
1895 	int ret;
1896 
1897 	/* unregister the device event callback */
1898 	ret = rte_dev_event_callback_unregister(NULL,
1899 		eth_dev_event_callback, NULL);
1900 	if (ret < 0) {
1901 		printf("Failed to unregister device event callback\n");
1902 		return -1;
1903 	}
1904 
1905 	return 0;
1906 }
1907 
1908 void
1909 attach_port(char *identifier)
1910 {
1911 	portid_t pi = 0;
1912 	unsigned int socket_id;
1913 
1914 	printf("Attaching a new port...\n");
1915 
1916 	if (identifier == NULL) {
1917 		printf("Invalid parameters are specified\n");
1918 		return;
1919 	}
1920 
1921 	if (rte_eth_dev_attach(identifier, &pi))
1922 		return;
1923 
1924 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1925 	/* if socket_id is invalid, set to 0 */
1926 	if (check_socket_id(socket_id) < 0)
1927 		socket_id = 0;
1928 	reconfig(pi, socket_id);
1929 	rte_eth_promiscuous_enable(pi);
1930 
1931 	nb_ports = rte_eth_dev_count_avail();
1932 
1933 	ports[pi].port_status = RTE_PORT_STOPPED;
1934 
1935 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1936 	printf("Done\n");
1937 }
1938 
1939 void
1940 detach_port(portid_t port_id)
1941 {
1942 	char name[RTE_ETH_NAME_MAX_LEN];
1943 
1944 	printf("Detaching a port...\n");
1945 
1946 	if (!port_is_closed(port_id)) {
1947 		printf("Please close port first\n");
1948 		return;
1949 	}
1950 
1951 	if (ports[port_id].flow_list)
1952 		port_flow_flush(port_id);
1953 
1954 	if (rte_eth_dev_detach(port_id, name)) {
1955 		TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1956 		return;
1957 	}
1958 
1959 	nb_ports = rte_eth_dev_count_avail();
1960 
1961 	printf("Port '%s' is detached. Now total ports is %d\n",
1962 			name, nb_ports);
1963 	printf("Done\n");
1964 	return;
1965 }
1966 
1967 void
1968 pmd_test_exit(void)
1969 {
1970 	portid_t pt_id;
1971 	int ret;
1972 
1973 	if (test_done == 0)
1974 		stop_packet_forwarding();
1975 
1976 	if (ports != NULL) {
1977 		no_link_check = 1;
1978 		RTE_ETH_FOREACH_DEV(pt_id) {
1979 			printf("\nShutting down port %d...\n", pt_id);
1980 			fflush(stdout);
1981 			stop_port(pt_id);
1982 			close_port(pt_id);
1983 		}
1984 	}
1985 
1986 	if (hot_plug) {
1987 		ret = rte_dev_event_monitor_stop();
1988 		if (ret)
1989 			RTE_LOG(ERR, EAL,
1990 				"fail to stop device event monitor.");
1991 
1992 		ret = eth_dev_event_callback_unregister();
1993 		if (ret)
1994 			RTE_LOG(ERR, EAL,
1995 				"fail to unregister all event callbacks.");
1996 	}
1997 
1998 	printf("\nBye...\n");
1999 }
2000 
2001 typedef void (*cmd_func_t)(void);
2002 struct pmd_test_command {
2003 	const char *cmd_name;
2004 	cmd_func_t cmd_func;
2005 };
2006 
2007 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2008 
2009 /* Check the link status of all ports in up to 9s, and print them finally */
2010 static void
2011 check_all_ports_link_status(uint32_t port_mask)
2012 {
2013 #define CHECK_INTERVAL 100 /* 100ms */
2014 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2015 	portid_t portid;
2016 	uint8_t count, all_ports_up, print_flag = 0;
2017 	struct rte_eth_link link;
2018 
2019 	printf("Checking link statuses...\n");
2020 	fflush(stdout);
2021 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2022 		all_ports_up = 1;
2023 		RTE_ETH_FOREACH_DEV(portid) {
2024 			if ((port_mask & (1 << portid)) == 0)
2025 				continue;
2026 			memset(&link, 0, sizeof(link));
2027 			rte_eth_link_get_nowait(portid, &link);
2028 			/* print link status if flag set */
2029 			if (print_flag == 1) {
2030 				if (link.link_status)
2031 					printf(
2032 					"Port%d Link Up. speed %u Mbps- %s\n",
2033 					portid, link.link_speed,
2034 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2035 					("full-duplex") : ("half-duplex\n"));
2036 				else
2037 					printf("Port %d Link Down\n", portid);
2038 				continue;
2039 			}
2040 			/* clear all_ports_up flag if any link down */
2041 			if (link.link_status == ETH_LINK_DOWN) {
2042 				all_ports_up = 0;
2043 				break;
2044 			}
2045 		}
2046 		/* after finally printing all link status, get out */
2047 		if (print_flag == 1)
2048 			break;
2049 
2050 		if (all_ports_up == 0) {
2051 			fflush(stdout);
2052 			rte_delay_ms(CHECK_INTERVAL);
2053 		}
2054 
2055 		/* set the print_flag if all ports up or timeout */
2056 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2057 			print_flag = 1;
2058 		}
2059 
2060 		if (lsc_interrupt)
2061 			break;
2062 	}
2063 }
2064 
2065 static void
2066 rmv_event_callback(void *arg)
2067 {
2068 	struct rte_eth_dev *dev;
2069 	portid_t port_id = (intptr_t)arg;
2070 
2071 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2072 	dev = &rte_eth_devices[port_id];
2073 
2074 	stop_port(port_id);
2075 	close_port(port_id);
2076 	printf("removing device %s\n", dev->device->name);
2077 	if (rte_eal_dev_detach(dev->device))
2078 		TESTPMD_LOG(ERR, "Failed to detach device %s\n",
2079 			dev->device->name);
2080 }
2081 
2082 /* This function is used by the interrupt thread */
2083 static int
2084 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2085 		  void *ret_param)
2086 {
2087 	static const char * const event_desc[] = {
2088 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2089 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2090 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2091 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2092 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2093 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
2094 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2095 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2096 		[RTE_ETH_EVENT_NEW] = "device probed",
2097 		[RTE_ETH_EVENT_DESTROY] = "device released",
2098 		[RTE_ETH_EVENT_MAX] = NULL,
2099 	};
2100 
2101 	RTE_SET_USED(param);
2102 	RTE_SET_USED(ret_param);
2103 
2104 	if (type >= RTE_ETH_EVENT_MAX) {
2105 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2106 			port_id, __func__, type);
2107 		fflush(stderr);
2108 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2109 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2110 			event_desc[type]);
2111 		fflush(stdout);
2112 	}
2113 
2114 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2115 		return 0;
2116 
2117 	switch (type) {
2118 	case RTE_ETH_EVENT_INTR_RMV:
2119 		if (rte_eal_alarm_set(100000,
2120 				rmv_event_callback, (void *)(intptr_t)port_id))
2121 			fprintf(stderr, "Could not set up deferred device removal\n");
2122 		break;
2123 	default:
2124 		break;
2125 	}
2126 	return 0;
2127 }
2128 
2129 /* This function is used by the interrupt thread */
2130 static void
2131 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2132 			     __rte_unused void *arg)
2133 {
2134 	if (type >= RTE_DEV_EVENT_MAX) {
2135 		fprintf(stderr, "%s called upon invalid event %d\n",
2136 			__func__, type);
2137 		fflush(stderr);
2138 	}
2139 
2140 	switch (type) {
2141 	case RTE_DEV_EVENT_REMOVE:
2142 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2143 			device_name);
2144 		/* TODO: After finish failure handle, begin to stop
2145 		 * packet forward, stop port, close port, detach port.
2146 		 */
2147 		break;
2148 	case RTE_DEV_EVENT_ADD:
2149 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2150 			device_name);
2151 		/* TODO: After finish kernel driver binding,
2152 		 * begin to attach port.
2153 		 */
2154 		break;
2155 	default:
2156 		break;
2157 	}
2158 }
2159 
2160 static int
2161 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2162 {
2163 	uint16_t i;
2164 	int diag;
2165 	uint8_t mapping_found = 0;
2166 
2167 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2168 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2169 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2170 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2171 					tx_queue_stats_mappings[i].queue_id,
2172 					tx_queue_stats_mappings[i].stats_counter_id);
2173 			if (diag != 0)
2174 				return diag;
2175 			mapping_found = 1;
2176 		}
2177 	}
2178 	if (mapping_found)
2179 		port->tx_queue_stats_mapping_enabled = 1;
2180 	return 0;
2181 }
2182 
2183 static int
2184 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2185 {
2186 	uint16_t i;
2187 	int diag;
2188 	uint8_t mapping_found = 0;
2189 
2190 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2191 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2192 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2193 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2194 					rx_queue_stats_mappings[i].queue_id,
2195 					rx_queue_stats_mappings[i].stats_counter_id);
2196 			if (diag != 0)
2197 				return diag;
2198 			mapping_found = 1;
2199 		}
2200 	}
2201 	if (mapping_found)
2202 		port->rx_queue_stats_mapping_enabled = 1;
2203 	return 0;
2204 }
2205 
2206 static void
2207 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2208 {
2209 	int diag = 0;
2210 
2211 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2212 	if (diag != 0) {
2213 		if (diag == -ENOTSUP) {
2214 			port->tx_queue_stats_mapping_enabled = 0;
2215 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2216 		}
2217 		else
2218 			rte_exit(EXIT_FAILURE,
2219 					"set_tx_queue_stats_mapping_registers "
2220 					"failed for port id=%d diag=%d\n",
2221 					pi, diag);
2222 	}
2223 
2224 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2225 	if (diag != 0) {
2226 		if (diag == -ENOTSUP) {
2227 			port->rx_queue_stats_mapping_enabled = 0;
2228 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2229 		}
2230 		else
2231 			rte_exit(EXIT_FAILURE,
2232 					"set_rx_queue_stats_mapping_registers "
2233 					"failed for port id=%d diag=%d\n",
2234 					pi, diag);
2235 	}
2236 }
2237 
2238 static void
2239 rxtx_port_config(struct rte_port *port)
2240 {
2241 	uint16_t qid;
2242 
2243 	for (qid = 0; qid < nb_rxq; qid++) {
2244 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2245 
2246 		/* Check if any Rx parameters have been passed */
2247 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2248 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2249 
2250 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2251 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2252 
2253 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2254 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2255 
2256 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2257 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2258 
2259 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2260 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2261 
2262 		port->nb_rx_desc[qid] = nb_rxd;
2263 	}
2264 
2265 	for (qid = 0; qid < nb_txq; qid++) {
2266 		port->tx_conf[qid] = port->dev_info.default_txconf;
2267 
2268 		/* Check if any Tx parameters have been passed */
2269 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2270 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2271 
2272 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2273 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2274 
2275 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2276 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2277 
2278 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2279 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2280 
2281 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2282 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2283 
2284 		port->nb_tx_desc[qid] = nb_txd;
2285 	}
2286 }
2287 
2288 void
2289 init_port_config(void)
2290 {
2291 	portid_t pid;
2292 	struct rte_port *port;
2293 	struct rte_eth_dev_info dev_info;
2294 
2295 	RTE_ETH_FOREACH_DEV(pid) {
2296 		port = &ports[pid];
2297 		port->dev_conf.fdir_conf = fdir_conf;
2298 		if (nb_rxq > 1) {
2299 			rte_eth_dev_info_get(pid, &dev_info);
2300 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2301 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2302 				rss_hf & dev_info.flow_type_rss_offloads;
2303 		} else {
2304 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2305 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2306 		}
2307 
2308 		if (port->dcb_flag == 0) {
2309 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2310 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2311 			else
2312 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2313 		}
2314 
2315 		rxtx_port_config(port);
2316 
2317 		rte_eth_macaddr_get(pid, &port->eth_addr);
2318 
2319 		map_port_queue_stats_mapping_registers(pid, port);
2320 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2321 		rte_pmd_ixgbe_bypass_init(pid);
2322 #endif
2323 
2324 		if (lsc_interrupt &&
2325 		    (rte_eth_devices[pid].data->dev_flags &
2326 		     RTE_ETH_DEV_INTR_LSC))
2327 			port->dev_conf.intr_conf.lsc = 1;
2328 		if (rmv_interrupt &&
2329 		    (rte_eth_devices[pid].data->dev_flags &
2330 		     RTE_ETH_DEV_INTR_RMV))
2331 			port->dev_conf.intr_conf.rmv = 1;
2332 
2333 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2334 		/* Detect softnic port */
2335 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2336 			port->softnic_enable = 1;
2337 			memset(&port->softport, 0, sizeof(struct softnic_port));
2338 
2339 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2340 				port->softport.tm_flag = 1;
2341 		}
2342 #endif
2343 	}
2344 }
2345 
2346 void set_port_slave_flag(portid_t slave_pid)
2347 {
2348 	struct rte_port *port;
2349 
2350 	port = &ports[slave_pid];
2351 	port->slave_flag = 1;
2352 }
2353 
2354 void clear_port_slave_flag(portid_t slave_pid)
2355 {
2356 	struct rte_port *port;
2357 
2358 	port = &ports[slave_pid];
2359 	port->slave_flag = 0;
2360 }
2361 
2362 uint8_t port_is_bonding_slave(portid_t slave_pid)
2363 {
2364 	struct rte_port *port;
2365 
2366 	port = &ports[slave_pid];
2367 	return port->slave_flag;
2368 }
2369 
2370 const uint16_t vlan_tags[] = {
2371 		0,  1,  2,  3,  4,  5,  6,  7,
2372 		8,  9, 10, 11,  12, 13, 14, 15,
2373 		16, 17, 18, 19, 20, 21, 22, 23,
2374 		24, 25, 26, 27, 28, 29, 30, 31
2375 };
2376 
2377 static  int
2378 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2379 		 enum dcb_mode_enable dcb_mode,
2380 		 enum rte_eth_nb_tcs num_tcs,
2381 		 uint8_t pfc_en)
2382 {
2383 	uint8_t i;
2384 
2385 	/*
2386 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2387 	 * given above, and the number of traffic classes available for use.
2388 	 */
2389 	if (dcb_mode == DCB_VT_ENABLED) {
2390 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2391 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2392 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2393 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2394 
2395 		/* VMDQ+DCB RX and TX configurations */
2396 		vmdq_rx_conf->enable_default_pool = 0;
2397 		vmdq_rx_conf->default_pool = 0;
2398 		vmdq_rx_conf->nb_queue_pools =
2399 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2400 		vmdq_tx_conf->nb_queue_pools =
2401 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2402 
2403 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2404 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2405 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2406 			vmdq_rx_conf->pool_map[i].pools =
2407 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2408 		}
2409 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2410 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2411 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2412 		}
2413 
2414 		/* set DCB mode of RX and TX of multiple queues */
2415 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2416 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2417 	} else {
2418 		struct rte_eth_dcb_rx_conf *rx_conf =
2419 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2420 		struct rte_eth_dcb_tx_conf *tx_conf =
2421 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2422 
2423 		rx_conf->nb_tcs = num_tcs;
2424 		tx_conf->nb_tcs = num_tcs;
2425 
2426 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2427 			rx_conf->dcb_tc[i] = i % num_tcs;
2428 			tx_conf->dcb_tc[i] = i % num_tcs;
2429 		}
2430 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2431 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2432 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2433 	}
2434 
2435 	if (pfc_en)
2436 		eth_conf->dcb_capability_en =
2437 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2438 	else
2439 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2440 
2441 	return 0;
2442 }
2443 
2444 int
2445 init_port_dcb_config(portid_t pid,
2446 		     enum dcb_mode_enable dcb_mode,
2447 		     enum rte_eth_nb_tcs num_tcs,
2448 		     uint8_t pfc_en)
2449 {
2450 	struct rte_eth_conf port_conf;
2451 	struct rte_port *rte_port;
2452 	int retval;
2453 	uint16_t i;
2454 
2455 	rte_port = &ports[pid];
2456 
2457 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2458 	/* Enter DCB configuration status */
2459 	dcb_config = 1;
2460 
2461 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2462 	port_conf.txmode = rte_port->dev_conf.txmode;
2463 
2464 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2465 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2466 	if (retval < 0)
2467 		return retval;
2468 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2469 
2470 	/**
2471 	 * Write the configuration into the device.
2472 	 * Set the numbers of RX & TX queues to 0, so
2473 	 * the RX & TX queues will not be setup.
2474 	 */
2475 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2476 
2477 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2478 
2479 	/* If dev_info.vmdq_pool_base is greater than 0,
2480 	 * the queue id of vmdq pools is started after pf queues.
2481 	 */
2482 	if (dcb_mode == DCB_VT_ENABLED &&
2483 	    rte_port->dev_info.vmdq_pool_base > 0) {
2484 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2485 			" for port %d.", pid);
2486 		return -1;
2487 	}
2488 
2489 	/* Assume the ports in testpmd have the same dcb capability
2490 	 * and has the same number of rxq and txq in dcb mode
2491 	 */
2492 	if (dcb_mode == DCB_VT_ENABLED) {
2493 		if (rte_port->dev_info.max_vfs > 0) {
2494 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2495 			nb_txq = rte_port->dev_info.nb_tx_queues;
2496 		} else {
2497 			nb_rxq = rte_port->dev_info.max_rx_queues;
2498 			nb_txq = rte_port->dev_info.max_tx_queues;
2499 		}
2500 	} else {
2501 		/*if vt is disabled, use all pf queues */
2502 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2503 			nb_rxq = rte_port->dev_info.max_rx_queues;
2504 			nb_txq = rte_port->dev_info.max_tx_queues;
2505 		} else {
2506 			nb_rxq = (queueid_t)num_tcs;
2507 			nb_txq = (queueid_t)num_tcs;
2508 
2509 		}
2510 	}
2511 	rx_free_thresh = 64;
2512 
2513 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2514 
2515 	rxtx_port_config(rte_port);
2516 	/* VLAN filter */
2517 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2518 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2519 		rx_vft_set(pid, vlan_tags[i], 1);
2520 
2521 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2522 	map_port_queue_stats_mapping_registers(pid, rte_port);
2523 
2524 	rte_port->dcb_flag = 1;
2525 
2526 	return 0;
2527 }
2528 
2529 static void
2530 init_port(void)
2531 {
2532 	/* Configuration of Ethernet ports. */
2533 	ports = rte_zmalloc("testpmd: ports",
2534 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2535 			    RTE_CACHE_LINE_SIZE);
2536 	if (ports == NULL) {
2537 		rte_exit(EXIT_FAILURE,
2538 				"rte_zmalloc(%d struct rte_port) failed\n",
2539 				RTE_MAX_ETHPORTS);
2540 	}
2541 }
2542 
2543 static void
2544 force_quit(void)
2545 {
2546 	pmd_test_exit();
2547 	prompt_exit();
2548 }
2549 
2550 static void
2551 print_stats(void)
2552 {
2553 	uint8_t i;
2554 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2555 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2556 
2557 	/* Clear screen and move to top left */
2558 	printf("%s%s", clr, top_left);
2559 
2560 	printf("\nPort statistics ====================================");
2561 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2562 		nic_stats_display(fwd_ports_ids[i]);
2563 }
2564 
2565 static void
2566 signal_handler(int signum)
2567 {
2568 	if (signum == SIGINT || signum == SIGTERM) {
2569 		printf("\nSignal %d received, preparing to exit...\n",
2570 				signum);
2571 #ifdef RTE_LIBRTE_PDUMP
2572 		/* uninitialize packet capture framework */
2573 		rte_pdump_uninit();
2574 #endif
2575 #ifdef RTE_LIBRTE_LATENCY_STATS
2576 		rte_latencystats_uninit();
2577 #endif
2578 		force_quit();
2579 		/* Set flag to indicate the force termination. */
2580 		f_quit = 1;
2581 		/* exit with the expected status */
2582 		signal(signum, SIG_DFL);
2583 		kill(getpid(), signum);
2584 	}
2585 }
2586 
2587 int
2588 main(int argc, char** argv)
2589 {
2590 	int diag;
2591 	portid_t port_id;
2592 	int ret;
2593 
2594 	signal(SIGINT, signal_handler);
2595 	signal(SIGTERM, signal_handler);
2596 
2597 	diag = rte_eal_init(argc, argv);
2598 	if (diag < 0)
2599 		rte_panic("Cannot init EAL\n");
2600 
2601 	testpmd_logtype = rte_log_register("testpmd");
2602 	if (testpmd_logtype < 0)
2603 		rte_panic("Cannot register log type");
2604 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2605 
2606 	if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2607 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2608 			strerror(errno));
2609 	}
2610 
2611 #ifdef RTE_LIBRTE_PDUMP
2612 	/* initialize packet capture framework */
2613 	rte_pdump_init(NULL);
2614 #endif
2615 
2616 	nb_ports = (portid_t) rte_eth_dev_count_avail();
2617 	if (nb_ports == 0)
2618 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2619 
2620 	/* allocate port structures, and init them */
2621 	init_port();
2622 
2623 	set_def_fwd_config();
2624 	if (nb_lcores == 0)
2625 		rte_panic("Empty set of forwarding logical cores - check the "
2626 			  "core mask supplied in the command parameters\n");
2627 
2628 	/* Bitrate/latency stats disabled by default */
2629 #ifdef RTE_LIBRTE_BITRATE
2630 	bitrate_enabled = 0;
2631 #endif
2632 #ifdef RTE_LIBRTE_LATENCY_STATS
2633 	latencystats_enabled = 0;
2634 #endif
2635 
2636 	argc -= diag;
2637 	argv += diag;
2638 	if (argc > 1)
2639 		launch_args_parse(argc, argv);
2640 
2641 	if (tx_first && interactive)
2642 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2643 				"interactive mode.\n");
2644 
2645 	if (tx_first && lsc_interrupt) {
2646 		printf("Warning: lsc_interrupt needs to be off when "
2647 				" using tx_first. Disabling.\n");
2648 		lsc_interrupt = 0;
2649 	}
2650 
2651 	if (!nb_rxq && !nb_txq)
2652 		printf("Warning: Either rx or tx queues should be non-zero\n");
2653 
2654 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2655 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2656 		       "but nb_txq=%d will prevent to fully test it.\n",
2657 		       nb_rxq, nb_txq);
2658 
2659 	init_config();
2660 
2661 	if (hot_plug) {
2662 		/* enable hot plug monitoring */
2663 		ret = rte_dev_event_monitor_start();
2664 		if (ret) {
2665 			rte_errno = EINVAL;
2666 			return -1;
2667 		}
2668 		eth_dev_event_callback_register();
2669 
2670 	}
2671 
2672 	if (start_port(RTE_PORT_ALL) != 0)
2673 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2674 
2675 	/* set all ports to promiscuous mode by default */
2676 	RTE_ETH_FOREACH_DEV(port_id)
2677 		rte_eth_promiscuous_enable(port_id);
2678 
2679 	/* Init metrics library */
2680 	rte_metrics_init(rte_socket_id());
2681 
2682 #ifdef RTE_LIBRTE_LATENCY_STATS
2683 	if (latencystats_enabled != 0) {
2684 		int ret = rte_latencystats_init(1, NULL);
2685 		if (ret)
2686 			printf("Warning: latencystats init()"
2687 				" returned error %d\n",	ret);
2688 		printf("Latencystats running on lcore %d\n",
2689 			latencystats_lcore_id);
2690 	}
2691 #endif
2692 
2693 	/* Setup bitrate stats */
2694 #ifdef RTE_LIBRTE_BITRATE
2695 	if (bitrate_enabled != 0) {
2696 		bitrate_data = rte_stats_bitrate_create();
2697 		if (bitrate_data == NULL)
2698 			rte_exit(EXIT_FAILURE,
2699 				"Could not allocate bitrate data.\n");
2700 		rte_stats_bitrate_reg(bitrate_data);
2701 	}
2702 #endif
2703 
2704 #ifdef RTE_LIBRTE_CMDLINE
2705 	if (strlen(cmdline_filename) != 0)
2706 		cmdline_read_from_file(cmdline_filename);
2707 
2708 	if (interactive == 1) {
2709 		if (auto_start) {
2710 			printf("Start automatic packet forwarding\n");
2711 			start_packet_forwarding(0);
2712 		}
2713 		prompt();
2714 		pmd_test_exit();
2715 	} else
2716 #endif
2717 	{
2718 		char c;
2719 		int rc;
2720 
2721 		f_quit = 0;
2722 
2723 		printf("No commandline core given, start packet forwarding\n");
2724 		start_packet_forwarding(tx_first);
2725 		if (stats_period != 0) {
2726 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2727 			uint64_t timer_period;
2728 
2729 			/* Convert to number of cycles */
2730 			timer_period = stats_period * rte_get_timer_hz();
2731 
2732 			while (f_quit == 0) {
2733 				cur_time = rte_get_timer_cycles();
2734 				diff_time += cur_time - prev_time;
2735 
2736 				if (diff_time >= timer_period) {
2737 					print_stats();
2738 					/* Reset the timer */
2739 					diff_time = 0;
2740 				}
2741 				/* Sleep to avoid unnecessary checks */
2742 				prev_time = cur_time;
2743 				sleep(1);
2744 			}
2745 		}
2746 
2747 		printf("Press enter to exit\n");
2748 		rc = read(0, &c, 1);
2749 		pmd_test_exit();
2750 		if (rc < 0)
2751 			return 1;
2752 	}
2753 
2754 	return 0;
2755 }
2756