xref: /dpdk/app/test-pmd/testpmd.c (revision 8599ed31f2cfad122f6a2096093193f9793f876c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
33 #include <rte_eal.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
41 #include <rte_mbuf.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_dev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
51 #endif
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
54 #endif
55 #include <rte_flow.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
59 #endif
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
62 #endif
63 
64 #include "testpmd.h"
65 
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
68 
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
72 uint8_t tx_first;
73 char cmdline_filename[PATH_MAX] = {0};
74 
75 /*
76  * NUMA support configuration.
77  * When set, the NUMA support attempts to dispatch the allocation of the
78  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79  * probed ports among the CPU sockets 0 and 1.
80  * Otherwise, all memory is allocated from CPU socket 0.
81  */
82 uint8_t numa_support = 1; /**< numa enabled by default */
83 
84 /*
85  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86  * not configured.
87  */
88 uint8_t socket_num = UMA_NO_CONFIG;
89 
90 /*
91  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
92  */
93 uint8_t mp_anon = 0;
94 
95 /*
96  * Store specified sockets on which memory pool to be used by ports
97  * is allocated.
98  */
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
100 
101 /*
102  * Store specified sockets on which RX ring to be used by ports
103  * is allocated.
104  */
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
106 
107 /*
108  * Store specified sockets on which TX ring to be used by ports
109  * is allocated.
110  */
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
112 
113 /*
114  * Record the Ethernet address of peer target ports to which packets are
115  * forwarded.
116  * Must be instantiated with the ethernet addresses of peer traffic generator
117  * ports.
118  */
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
121 
122 /*
123  * Probed Target Environment.
124  */
125 struct rte_port *ports;	       /**< For all probed ethernet ports. */
126 portid_t nb_ports;             /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
129 
130 /*
131  * Test Forwarding Configuration.
132  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
134  */
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
138 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
139 
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
142 
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
145 
146 /*
147  * Forwarding engines.
148  */
149 struct fwd_engine * fwd_engines[] = {
150 	&io_fwd_engine,
151 	&mac_fwd_engine,
152 	&mac_swap_engine,
153 	&flow_gen_engine,
154 	&rx_only_engine,
155 	&tx_only_engine,
156 	&csum_fwd_engine,
157 	&icmp_echo_engine,
158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
159 	&softnic_tm_engine,
160 	&softnic_tm_bypass_engine,
161 #endif
162 #ifdef RTE_LIBRTE_IEEE1588
163 	&ieee1588_fwd_engine,
164 #endif
165 	NULL,
166 };
167 
168 struct fwd_config cur_fwd_config;
169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
170 uint32_t retry_enabled;
171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173 
174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
175 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
176                                       * specified on command-line. */
177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
178 
179 /*
180  * In container, it cannot terminate the process which running with 'stats-period'
181  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
182  */
183 uint8_t f_quit;
184 
185 /*
186  * Configuration of packet segments used by the "txonly" processing engine.
187  */
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 	TXONLY_DEF_PACKET_LEN,
191 };
192 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
196 
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
202 
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
205 
206 /*
207  * Configurable number of RX/TX queues.
208  */
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
211 
212 /*
213  * Configurable number of RX/TX ring descriptors.
214  * Defaults are supplied by drivers via ethdev.
215  */
216 #define RTE_TEST_RX_DESC_DEFAULT 0
217 #define RTE_TEST_TX_DESC_DEFAULT 0
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220 
221 #define RTE_PMD_PARAM_UNSET -1
222 /*
223  * Configurable values of RX and TX ring threshold registers.
224  */
225 
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229 
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Configurable value of RX free threshold.
236  */
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238 
239 /*
240  * Configurable value of RX drop enable.
241  */
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243 
244 /*
245  * Configurable value of TX free threshold.
246  */
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248 
249 /*
250  * Configurable value of TX RS bit threshold.
251  */
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253 
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268 
269 /*
270  * Flow API isolated mode.
271  */
272 uint8_t flow_isolate_all;
273 
274 /*
275  * Avoids to check link status when starting/stopping a port.
276  */
277 uint8_t no_link_check = 0; /* check by default */
278 
279 /*
280  * Enable link status change notification
281  */
282 uint8_t lsc_interrupt = 1; /* enabled by default */
283 
284 /*
285  * Enable device removal notification.
286  */
287 uint8_t rmv_interrupt = 1; /* enabled by default */
288 
289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
290 
291 /*
292  * Display or mask ether events
293  * Default to all events except VF_MBOX
294  */
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
301 
302 /*
303  * NIC bypass mode configuration options.
304  */
305 
306 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
307 /* The NIC bypass watchdog timeout. */
308 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
309 #endif
310 
311 
312 #ifdef RTE_LIBRTE_LATENCY_STATS
313 
314 /*
315  * Set when latency stats is enabled in the commandline
316  */
317 uint8_t latencystats_enabled;
318 
319 /*
320  * Lcore ID to serive latency statistics.
321  */
322 lcoreid_t latencystats_lcore_id = -1;
323 
324 #endif
325 
326 /*
327  * Ethernet device configuration.
328  */
329 struct rte_eth_rxmode rx_mode = {
330 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
331 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
332 	.ignore_offload_bitfield = 1,
333 };
334 
335 struct rte_eth_txmode tx_mode = {
336 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
337 };
338 
339 struct rte_fdir_conf fdir_conf = {
340 	.mode = RTE_FDIR_MODE_NONE,
341 	.pballoc = RTE_FDIR_PBALLOC_64K,
342 	.status = RTE_FDIR_REPORT_STATUS,
343 	.mask = {
344 		.vlan_tci_mask = 0x0,
345 		.ipv4_mask     = {
346 			.src_ip = 0xFFFFFFFF,
347 			.dst_ip = 0xFFFFFFFF,
348 		},
349 		.ipv6_mask     = {
350 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
351 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
352 		},
353 		.src_port_mask = 0xFFFF,
354 		.dst_port_mask = 0xFFFF,
355 		.mac_addr_byte_mask = 0xFF,
356 		.tunnel_type_mask = 1,
357 		.tunnel_id_mask = 0xFFFFFFFF,
358 	},
359 	.drop_queue = 127,
360 };
361 
362 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
363 
364 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
365 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
366 
367 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
368 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
369 
370 uint16_t nb_tx_queue_stats_mappings = 0;
371 uint16_t nb_rx_queue_stats_mappings = 0;
372 
373 /*
374  * Display zero values by default for xstats
375  */
376 uint8_t xstats_hide_zero;
377 
378 unsigned int num_sockets = 0;
379 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
380 
381 #ifdef RTE_LIBRTE_BITRATE
382 /* Bitrate statistics */
383 struct rte_stats_bitrates *bitrate_data;
384 lcoreid_t bitrate_lcore_id;
385 uint8_t bitrate_enabled;
386 #endif
387 
388 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
389 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
390 
391 /* Forward function declarations */
392 static void map_port_queue_stats_mapping_registers(portid_t pi,
393 						   struct rte_port *port);
394 static void check_all_ports_link_status(uint32_t port_mask);
395 static int eth_event_callback(portid_t port_id,
396 			      enum rte_eth_event_type type,
397 			      void *param, void *ret_param);
398 static void eth_dev_event_callback(char *device_name,
399 				enum rte_dev_event_type type,
400 				void *param);
401 static int eth_dev_event_callback_register(void);
402 static int eth_dev_event_callback_unregister(void);
403 
404 
405 /*
406  * Check if all the ports are started.
407  * If yes, return positive value. If not, return zero.
408  */
409 static int all_ports_started(void);
410 
411 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
412 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
413 
414 /*
415  * Helper function to check if socket is already discovered.
416  * If yes, return positive value. If not, return zero.
417  */
418 int
419 new_socket_id(unsigned int socket_id)
420 {
421 	unsigned int i;
422 
423 	for (i = 0; i < num_sockets; i++) {
424 		if (socket_ids[i] == socket_id)
425 			return 0;
426 	}
427 	return 1;
428 }
429 
430 /*
431  * Setup default configuration.
432  */
433 static void
434 set_default_fwd_lcores_config(void)
435 {
436 	unsigned int i;
437 	unsigned int nb_lc;
438 	unsigned int sock_num;
439 
440 	nb_lc = 0;
441 	for (i = 0; i < RTE_MAX_LCORE; i++) {
442 		sock_num = rte_lcore_to_socket_id(i);
443 		if (new_socket_id(sock_num)) {
444 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
445 				rte_exit(EXIT_FAILURE,
446 					 "Total sockets greater than %u\n",
447 					 RTE_MAX_NUMA_NODES);
448 			}
449 			socket_ids[num_sockets++] = sock_num;
450 		}
451 		if (!rte_lcore_is_enabled(i))
452 			continue;
453 		if (i == rte_get_master_lcore())
454 			continue;
455 		fwd_lcores_cpuids[nb_lc++] = i;
456 	}
457 	nb_lcores = (lcoreid_t) nb_lc;
458 	nb_cfg_lcores = nb_lcores;
459 	nb_fwd_lcores = 1;
460 }
461 
462 static void
463 set_def_peer_eth_addrs(void)
464 {
465 	portid_t i;
466 
467 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
468 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
469 		peer_eth_addrs[i].addr_bytes[5] = i;
470 	}
471 }
472 
473 static void
474 set_default_fwd_ports_config(void)
475 {
476 	portid_t pt_id;
477 	int i = 0;
478 
479 	RTE_ETH_FOREACH_DEV(pt_id)
480 		fwd_ports_ids[i++] = pt_id;
481 
482 	nb_cfg_ports = nb_ports;
483 	nb_fwd_ports = nb_ports;
484 }
485 
486 void
487 set_def_fwd_config(void)
488 {
489 	set_default_fwd_lcores_config();
490 	set_def_peer_eth_addrs();
491 	set_default_fwd_ports_config();
492 }
493 
494 /*
495  * Configuration initialisation done once at init time.
496  */
497 static void
498 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
499 		 unsigned int socket_id)
500 {
501 	char pool_name[RTE_MEMPOOL_NAMESIZE];
502 	struct rte_mempool *rte_mp = NULL;
503 	uint32_t mb_size;
504 
505 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
506 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
507 
508 	TESTPMD_LOG(INFO,
509 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
510 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
511 
512 	if (mp_anon != 0) {
513 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
514 			mb_size, (unsigned) mb_mempool_cache,
515 			sizeof(struct rte_pktmbuf_pool_private),
516 			socket_id, 0);
517 		if (rte_mp == NULL)
518 			goto err;
519 
520 		if (rte_mempool_populate_anon(rte_mp) == 0) {
521 			rte_mempool_free(rte_mp);
522 			rte_mp = NULL;
523 			goto err;
524 		}
525 		rte_pktmbuf_pool_init(rte_mp, NULL);
526 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
527 	} else {
528 		/* wrapper to rte_mempool_create() */
529 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
530 				rte_mbuf_best_mempool_ops());
531 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
532 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
533 	}
534 
535 err:
536 	if (rte_mp == NULL) {
537 		rte_exit(EXIT_FAILURE,
538 			"Creation of mbuf pool for socket %u failed: %s\n",
539 			socket_id, rte_strerror(rte_errno));
540 	} else if (verbose_level > 0) {
541 		rte_mempool_dump(stdout, rte_mp);
542 	}
543 }
544 
545 /*
546  * Check given socket id is valid or not with NUMA mode,
547  * if valid, return 0, else return -1
548  */
549 static int
550 check_socket_id(const unsigned int socket_id)
551 {
552 	static int warning_once = 0;
553 
554 	if (new_socket_id(socket_id)) {
555 		if (!warning_once && numa_support)
556 			printf("Warning: NUMA should be configured manually by"
557 			       " using --port-numa-config and"
558 			       " --ring-numa-config parameters along with"
559 			       " --numa.\n");
560 		warning_once = 1;
561 		return -1;
562 	}
563 	return 0;
564 }
565 
566 /*
567  * Get the allowed maximum number of RX queues.
568  * *pid return the port id which has minimal value of
569  * max_rx_queues in all ports.
570  */
571 queueid_t
572 get_allowed_max_nb_rxq(portid_t *pid)
573 {
574 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
575 	portid_t pi;
576 	struct rte_eth_dev_info dev_info;
577 
578 	RTE_ETH_FOREACH_DEV(pi) {
579 		rte_eth_dev_info_get(pi, &dev_info);
580 		if (dev_info.max_rx_queues < allowed_max_rxq) {
581 			allowed_max_rxq = dev_info.max_rx_queues;
582 			*pid = pi;
583 		}
584 	}
585 	return allowed_max_rxq;
586 }
587 
588 /*
589  * Check input rxq is valid or not.
590  * If input rxq is not greater than any of maximum number
591  * of RX queues of all ports, it is valid.
592  * if valid, return 0, else return -1
593  */
594 int
595 check_nb_rxq(queueid_t rxq)
596 {
597 	queueid_t allowed_max_rxq;
598 	portid_t pid = 0;
599 
600 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
601 	if (rxq > allowed_max_rxq) {
602 		printf("Fail: input rxq (%u) can't be greater "
603 		       "than max_rx_queues (%u) of port %u\n",
604 		       rxq,
605 		       allowed_max_rxq,
606 		       pid);
607 		return -1;
608 	}
609 	return 0;
610 }
611 
612 /*
613  * Get the allowed maximum number of TX queues.
614  * *pid return the port id which has minimal value of
615  * max_tx_queues in all ports.
616  */
617 queueid_t
618 get_allowed_max_nb_txq(portid_t *pid)
619 {
620 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
621 	portid_t pi;
622 	struct rte_eth_dev_info dev_info;
623 
624 	RTE_ETH_FOREACH_DEV(pi) {
625 		rte_eth_dev_info_get(pi, &dev_info);
626 		if (dev_info.max_tx_queues < allowed_max_txq) {
627 			allowed_max_txq = dev_info.max_tx_queues;
628 			*pid = pi;
629 		}
630 	}
631 	return allowed_max_txq;
632 }
633 
634 /*
635  * Check input txq is valid or not.
636  * If input txq is not greater than any of maximum number
637  * of TX queues of all ports, it is valid.
638  * if valid, return 0, else return -1
639  */
640 int
641 check_nb_txq(queueid_t txq)
642 {
643 	queueid_t allowed_max_txq;
644 	portid_t pid = 0;
645 
646 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
647 	if (txq > allowed_max_txq) {
648 		printf("Fail: input txq (%u) can't be greater "
649 		       "than max_tx_queues (%u) of port %u\n",
650 		       txq,
651 		       allowed_max_txq,
652 		       pid);
653 		return -1;
654 	}
655 	return 0;
656 }
657 
658 static void
659 init_config(void)
660 {
661 	portid_t pid;
662 	struct rte_port *port;
663 	struct rte_mempool *mbp;
664 	unsigned int nb_mbuf_per_pool;
665 	lcoreid_t  lc_id;
666 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
667 	struct rte_gro_param gro_param;
668 	uint32_t gso_types;
669 
670 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
671 
672 	if (numa_support) {
673 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
674 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
675 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
676 	}
677 
678 	/* Configuration of logical cores. */
679 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
680 				sizeof(struct fwd_lcore *) * nb_lcores,
681 				RTE_CACHE_LINE_SIZE);
682 	if (fwd_lcores == NULL) {
683 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
684 							"failed\n", nb_lcores);
685 	}
686 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
687 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
688 					       sizeof(struct fwd_lcore),
689 					       RTE_CACHE_LINE_SIZE);
690 		if (fwd_lcores[lc_id] == NULL) {
691 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
692 								"failed\n");
693 		}
694 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
695 	}
696 
697 	RTE_ETH_FOREACH_DEV(pid) {
698 		port = &ports[pid];
699 		/* Apply default TxRx configuration for all ports */
700 		port->dev_conf.txmode = tx_mode;
701 		port->dev_conf.rxmode = rx_mode;
702 		rte_eth_dev_info_get(pid, &port->dev_info);
703 		if (!(port->dev_info.tx_offload_capa &
704 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
705 			port->dev_conf.txmode.offloads &=
706 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
707 		if (numa_support) {
708 			if (port_numa[pid] != NUMA_NO_CONFIG)
709 				port_per_socket[port_numa[pid]]++;
710 			else {
711 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
712 
713 				/* if socket_id is invalid, set to 0 */
714 				if (check_socket_id(socket_id) < 0)
715 					socket_id = 0;
716 				port_per_socket[socket_id]++;
717 			}
718 		}
719 
720 		/* set flag to initialize port/queue */
721 		port->need_reconfig = 1;
722 		port->need_reconfig_queues = 1;
723 	}
724 
725 	/*
726 	 * Create pools of mbuf.
727 	 * If NUMA support is disabled, create a single pool of mbuf in
728 	 * socket 0 memory by default.
729 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
730 	 *
731 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
732 	 * nb_txd can be configured at run time.
733 	 */
734 	if (param_total_num_mbufs)
735 		nb_mbuf_per_pool = param_total_num_mbufs;
736 	else {
737 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
738 			(nb_lcores * mb_mempool_cache) +
739 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
740 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
741 	}
742 
743 	if (numa_support) {
744 		uint8_t i;
745 
746 		for (i = 0; i < num_sockets; i++)
747 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
748 					 socket_ids[i]);
749 	} else {
750 		if (socket_num == UMA_NO_CONFIG)
751 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
752 		else
753 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
754 						 socket_num);
755 	}
756 
757 	init_port_config();
758 
759 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
760 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
761 	/*
762 	 * Records which Mbuf pool to use by each logical core, if needed.
763 	 */
764 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
765 		mbp = mbuf_pool_find(
766 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
767 
768 		if (mbp == NULL)
769 			mbp = mbuf_pool_find(0);
770 		fwd_lcores[lc_id]->mbp = mbp;
771 		/* initialize GSO context */
772 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
773 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
774 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
775 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
776 			ETHER_CRC_LEN;
777 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
778 	}
779 
780 	/* Configuration of packet forwarding streams. */
781 	if (init_fwd_streams() < 0)
782 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
783 
784 	fwd_config_setup();
785 
786 	/* create a gro context for each lcore */
787 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
788 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
789 	gro_param.max_item_per_flow = MAX_PKT_BURST;
790 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
791 		gro_param.socket_id = rte_lcore_to_socket_id(
792 				fwd_lcores_cpuids[lc_id]);
793 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
794 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
795 			rte_exit(EXIT_FAILURE,
796 					"rte_gro_ctx_create() failed\n");
797 		}
798 	}
799 }
800 
801 
802 void
803 reconfig(portid_t new_port_id, unsigned socket_id)
804 {
805 	struct rte_port *port;
806 
807 	/* Reconfiguration of Ethernet ports. */
808 	port = &ports[new_port_id];
809 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
810 
811 	/* set flag to initialize port/queue */
812 	port->need_reconfig = 1;
813 	port->need_reconfig_queues = 1;
814 	port->socket_id = socket_id;
815 
816 	init_port_config();
817 }
818 
819 
820 int
821 init_fwd_streams(void)
822 {
823 	portid_t pid;
824 	struct rte_port *port;
825 	streamid_t sm_id, nb_fwd_streams_new;
826 	queueid_t q;
827 
828 	/* set socket id according to numa or not */
829 	RTE_ETH_FOREACH_DEV(pid) {
830 		port = &ports[pid];
831 		if (nb_rxq > port->dev_info.max_rx_queues) {
832 			printf("Fail: nb_rxq(%d) is greater than "
833 				"max_rx_queues(%d)\n", nb_rxq,
834 				port->dev_info.max_rx_queues);
835 			return -1;
836 		}
837 		if (nb_txq > port->dev_info.max_tx_queues) {
838 			printf("Fail: nb_txq(%d) is greater than "
839 				"max_tx_queues(%d)\n", nb_txq,
840 				port->dev_info.max_tx_queues);
841 			return -1;
842 		}
843 		if (numa_support) {
844 			if (port_numa[pid] != NUMA_NO_CONFIG)
845 				port->socket_id = port_numa[pid];
846 			else {
847 				port->socket_id = rte_eth_dev_socket_id(pid);
848 
849 				/* if socket_id is invalid, set to 0 */
850 				if (check_socket_id(port->socket_id) < 0)
851 					port->socket_id = 0;
852 			}
853 		}
854 		else {
855 			if (socket_num == UMA_NO_CONFIG)
856 				port->socket_id = 0;
857 			else
858 				port->socket_id = socket_num;
859 		}
860 	}
861 
862 	q = RTE_MAX(nb_rxq, nb_txq);
863 	if (q == 0) {
864 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
865 		return -1;
866 	}
867 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
868 	if (nb_fwd_streams_new == nb_fwd_streams)
869 		return 0;
870 	/* clear the old */
871 	if (fwd_streams != NULL) {
872 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
873 			if (fwd_streams[sm_id] == NULL)
874 				continue;
875 			rte_free(fwd_streams[sm_id]);
876 			fwd_streams[sm_id] = NULL;
877 		}
878 		rte_free(fwd_streams);
879 		fwd_streams = NULL;
880 	}
881 
882 	/* init new */
883 	nb_fwd_streams = nb_fwd_streams_new;
884 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
885 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
886 	if (fwd_streams == NULL)
887 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
888 						"failed\n", nb_fwd_streams);
889 
890 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
891 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
892 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
893 		if (fwd_streams[sm_id] == NULL)
894 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
895 								" failed\n");
896 	}
897 
898 	return 0;
899 }
900 
901 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
902 static void
903 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
904 {
905 	unsigned int total_burst;
906 	unsigned int nb_burst;
907 	unsigned int burst_stats[3];
908 	uint16_t pktnb_stats[3];
909 	uint16_t nb_pkt;
910 	int burst_percent[3];
911 
912 	/*
913 	 * First compute the total number of packet bursts and the
914 	 * two highest numbers of bursts of the same number of packets.
915 	 */
916 	total_burst = 0;
917 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
918 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
919 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
920 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
921 		if (nb_burst == 0)
922 			continue;
923 		total_burst += nb_burst;
924 		if (nb_burst > burst_stats[0]) {
925 			burst_stats[1] = burst_stats[0];
926 			pktnb_stats[1] = pktnb_stats[0];
927 			burst_stats[0] = nb_burst;
928 			pktnb_stats[0] = nb_pkt;
929 		}
930 	}
931 	if (total_burst == 0)
932 		return;
933 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
934 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
935 	       burst_percent[0], (int) pktnb_stats[0]);
936 	if (burst_stats[0] == total_burst) {
937 		printf("]\n");
938 		return;
939 	}
940 	if (burst_stats[0] + burst_stats[1] == total_burst) {
941 		printf(" + %d%% of %d pkts]\n",
942 		       100 - burst_percent[0], pktnb_stats[1]);
943 		return;
944 	}
945 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
946 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
947 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
948 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
949 		return;
950 	}
951 	printf(" + %d%% of %d pkts + %d%% of others]\n",
952 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
953 }
954 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
955 
956 static void
957 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
958 {
959 	struct rte_port *port;
960 	uint8_t i;
961 
962 	static const char *fwd_stats_border = "----------------------";
963 
964 	port = &ports[port_id];
965 	printf("\n  %s Forward statistics for port %-2d %s\n",
966 	       fwd_stats_border, port_id, fwd_stats_border);
967 
968 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
969 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
970 		       "%-"PRIu64"\n",
971 		       stats->ipackets, stats->imissed,
972 		       (uint64_t) (stats->ipackets + stats->imissed));
973 
974 		if (cur_fwd_eng == &csum_fwd_engine)
975 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
976 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
977 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
978 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
979 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
980 		}
981 
982 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
983 		       "%-"PRIu64"\n",
984 		       stats->opackets, port->tx_dropped,
985 		       (uint64_t) (stats->opackets + port->tx_dropped));
986 	}
987 	else {
988 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
989 		       "%14"PRIu64"\n",
990 		       stats->ipackets, stats->imissed,
991 		       (uint64_t) (stats->ipackets + stats->imissed));
992 
993 		if (cur_fwd_eng == &csum_fwd_engine)
994 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
995 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
996 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
997 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
998 			printf("  RX-nombufs:             %14"PRIu64"\n",
999 			       stats->rx_nombuf);
1000 		}
1001 
1002 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1003 		       "%14"PRIu64"\n",
1004 		       stats->opackets, port->tx_dropped,
1005 		       (uint64_t) (stats->opackets + port->tx_dropped));
1006 	}
1007 
1008 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1009 	if (port->rx_stream)
1010 		pkt_burst_stats_display("RX",
1011 			&port->rx_stream->rx_burst_stats);
1012 	if (port->tx_stream)
1013 		pkt_burst_stats_display("TX",
1014 			&port->tx_stream->tx_burst_stats);
1015 #endif
1016 
1017 	if (port->rx_queue_stats_mapping_enabled) {
1018 		printf("\n");
1019 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1020 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1021 			       "     RX-errors:%14"PRIu64
1022 			       "    RX-bytes:%14"PRIu64"\n",
1023 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1024 		}
1025 		printf("\n");
1026 	}
1027 	if (port->tx_queue_stats_mapping_enabled) {
1028 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1029 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1030 			       "                                 TX-bytes:%14"PRIu64"\n",
1031 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1032 		}
1033 	}
1034 
1035 	printf("  %s--------------------------------%s\n",
1036 	       fwd_stats_border, fwd_stats_border);
1037 }
1038 
1039 static void
1040 fwd_stream_stats_display(streamid_t stream_id)
1041 {
1042 	struct fwd_stream *fs;
1043 	static const char *fwd_top_stats_border = "-------";
1044 
1045 	fs = fwd_streams[stream_id];
1046 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1047 	    (fs->fwd_dropped == 0))
1048 		return;
1049 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1050 	       "TX Port=%2d/Queue=%2d %s\n",
1051 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1052 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1053 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1054 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1055 
1056 	/* if checksum mode */
1057 	if (cur_fwd_eng == &csum_fwd_engine) {
1058 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1059 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1060 	}
1061 
1062 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1063 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1064 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1065 #endif
1066 }
1067 
1068 static void
1069 flush_fwd_rx_queues(void)
1070 {
1071 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1072 	portid_t  rxp;
1073 	portid_t port_id;
1074 	queueid_t rxq;
1075 	uint16_t  nb_rx;
1076 	uint16_t  i;
1077 	uint8_t   j;
1078 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1079 	uint64_t timer_period;
1080 
1081 	/* convert to number of cycles */
1082 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1083 
1084 	for (j = 0; j < 2; j++) {
1085 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1086 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1087 				port_id = fwd_ports_ids[rxp];
1088 				/**
1089 				* testpmd can stuck in the below do while loop
1090 				* if rte_eth_rx_burst() always returns nonzero
1091 				* packets. So timer is added to exit this loop
1092 				* after 1sec timer expiry.
1093 				*/
1094 				prev_tsc = rte_rdtsc();
1095 				do {
1096 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1097 						pkts_burst, MAX_PKT_BURST);
1098 					for (i = 0; i < nb_rx; i++)
1099 						rte_pktmbuf_free(pkts_burst[i]);
1100 
1101 					cur_tsc = rte_rdtsc();
1102 					diff_tsc = cur_tsc - prev_tsc;
1103 					timer_tsc += diff_tsc;
1104 				} while ((nb_rx > 0) &&
1105 					(timer_tsc < timer_period));
1106 				timer_tsc = 0;
1107 			}
1108 		}
1109 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1110 	}
1111 }
1112 
1113 static void
1114 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1115 {
1116 	struct fwd_stream **fsm;
1117 	streamid_t nb_fs;
1118 	streamid_t sm_id;
1119 #ifdef RTE_LIBRTE_BITRATE
1120 	uint64_t tics_per_1sec;
1121 	uint64_t tics_datum;
1122 	uint64_t tics_current;
1123 	uint8_t idx_port, cnt_ports;
1124 
1125 	cnt_ports = rte_eth_dev_count();
1126 	tics_datum = rte_rdtsc();
1127 	tics_per_1sec = rte_get_timer_hz();
1128 #endif
1129 	fsm = &fwd_streams[fc->stream_idx];
1130 	nb_fs = fc->stream_nb;
1131 	do {
1132 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1133 			(*pkt_fwd)(fsm[sm_id]);
1134 #ifdef RTE_LIBRTE_BITRATE
1135 		if (bitrate_enabled != 0 &&
1136 				bitrate_lcore_id == rte_lcore_id()) {
1137 			tics_current = rte_rdtsc();
1138 			if (tics_current - tics_datum >= tics_per_1sec) {
1139 				/* Periodic bitrate calculation */
1140 				for (idx_port = 0;
1141 						idx_port < cnt_ports;
1142 						idx_port++)
1143 					rte_stats_bitrate_calc(bitrate_data,
1144 						idx_port);
1145 				tics_datum = tics_current;
1146 			}
1147 		}
1148 #endif
1149 #ifdef RTE_LIBRTE_LATENCY_STATS
1150 		if (latencystats_enabled != 0 &&
1151 				latencystats_lcore_id == rte_lcore_id())
1152 			rte_latencystats_update();
1153 #endif
1154 
1155 	} while (! fc->stopped);
1156 }
1157 
1158 static int
1159 start_pkt_forward_on_core(void *fwd_arg)
1160 {
1161 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1162 			     cur_fwd_config.fwd_eng->packet_fwd);
1163 	return 0;
1164 }
1165 
1166 /*
1167  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1168  * Used to start communication flows in network loopback test configurations.
1169  */
1170 static int
1171 run_one_txonly_burst_on_core(void *fwd_arg)
1172 {
1173 	struct fwd_lcore *fwd_lc;
1174 	struct fwd_lcore tmp_lcore;
1175 
1176 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1177 	tmp_lcore = *fwd_lc;
1178 	tmp_lcore.stopped = 1;
1179 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1180 	return 0;
1181 }
1182 
1183 /*
1184  * Launch packet forwarding:
1185  *     - Setup per-port forwarding context.
1186  *     - launch logical cores with their forwarding configuration.
1187  */
1188 static void
1189 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1190 {
1191 	port_fwd_begin_t port_fwd_begin;
1192 	unsigned int i;
1193 	unsigned int lc_id;
1194 	int diag;
1195 
1196 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1197 	if (port_fwd_begin != NULL) {
1198 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1199 			(*port_fwd_begin)(fwd_ports_ids[i]);
1200 	}
1201 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1202 		lc_id = fwd_lcores_cpuids[i];
1203 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1204 			fwd_lcores[i]->stopped = 0;
1205 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1206 						     fwd_lcores[i], lc_id);
1207 			if (diag != 0)
1208 				printf("launch lcore %u failed - diag=%d\n",
1209 				       lc_id, diag);
1210 		}
1211 	}
1212 }
1213 
1214 /*
1215  * Launch packet forwarding configuration.
1216  */
1217 void
1218 start_packet_forwarding(int with_tx_first)
1219 {
1220 	port_fwd_begin_t port_fwd_begin;
1221 	port_fwd_end_t  port_fwd_end;
1222 	struct rte_port *port;
1223 	unsigned int i;
1224 	portid_t   pt_id;
1225 	streamid_t sm_id;
1226 
1227 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1228 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1229 
1230 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1231 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1232 
1233 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1234 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1235 		(!nb_rxq || !nb_txq))
1236 		rte_exit(EXIT_FAILURE,
1237 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1238 			cur_fwd_eng->fwd_mode_name);
1239 
1240 	if (all_ports_started() == 0) {
1241 		printf("Not all ports were started\n");
1242 		return;
1243 	}
1244 	if (test_done == 0) {
1245 		printf("Packet forwarding already started\n");
1246 		return;
1247 	}
1248 
1249 	if (init_fwd_streams() < 0) {
1250 		printf("Fail from init_fwd_streams()\n");
1251 		return;
1252 	}
1253 
1254 	if(dcb_test) {
1255 		for (i = 0; i < nb_fwd_ports; i++) {
1256 			pt_id = fwd_ports_ids[i];
1257 			port = &ports[pt_id];
1258 			if (!port->dcb_flag) {
1259 				printf("In DCB mode, all forwarding ports must "
1260                                        "be configured in this mode.\n");
1261 				return;
1262 			}
1263 		}
1264 		if (nb_fwd_lcores == 1) {
1265 			printf("In DCB mode,the nb forwarding cores "
1266                                "should be larger than 1.\n");
1267 			return;
1268 		}
1269 	}
1270 	test_done = 0;
1271 
1272 	if(!no_flush_rx)
1273 		flush_fwd_rx_queues();
1274 
1275 	fwd_config_setup();
1276 	pkt_fwd_config_display(&cur_fwd_config);
1277 	rxtx_config_display();
1278 
1279 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1280 		pt_id = fwd_ports_ids[i];
1281 		port = &ports[pt_id];
1282 		rte_eth_stats_get(pt_id, &port->stats);
1283 		port->tx_dropped = 0;
1284 
1285 		map_port_queue_stats_mapping_registers(pt_id, port);
1286 	}
1287 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1288 		fwd_streams[sm_id]->rx_packets = 0;
1289 		fwd_streams[sm_id]->tx_packets = 0;
1290 		fwd_streams[sm_id]->fwd_dropped = 0;
1291 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1292 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1293 
1294 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1295 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1296 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1297 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1298 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1299 #endif
1300 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1301 		fwd_streams[sm_id]->core_cycles = 0;
1302 #endif
1303 	}
1304 	if (with_tx_first) {
1305 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1306 		if (port_fwd_begin != NULL) {
1307 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1308 				(*port_fwd_begin)(fwd_ports_ids[i]);
1309 		}
1310 		while (with_tx_first--) {
1311 			launch_packet_forwarding(
1312 					run_one_txonly_burst_on_core);
1313 			rte_eal_mp_wait_lcore();
1314 		}
1315 		port_fwd_end = tx_only_engine.port_fwd_end;
1316 		if (port_fwd_end != NULL) {
1317 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1318 				(*port_fwd_end)(fwd_ports_ids[i]);
1319 		}
1320 	}
1321 	launch_packet_forwarding(start_pkt_forward_on_core);
1322 }
1323 
1324 void
1325 stop_packet_forwarding(void)
1326 {
1327 	struct rte_eth_stats stats;
1328 	struct rte_port *port;
1329 	port_fwd_end_t  port_fwd_end;
1330 	int i;
1331 	portid_t   pt_id;
1332 	streamid_t sm_id;
1333 	lcoreid_t  lc_id;
1334 	uint64_t total_recv;
1335 	uint64_t total_xmit;
1336 	uint64_t total_rx_dropped;
1337 	uint64_t total_tx_dropped;
1338 	uint64_t total_rx_nombuf;
1339 	uint64_t tx_dropped;
1340 	uint64_t rx_bad_ip_csum;
1341 	uint64_t rx_bad_l4_csum;
1342 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1343 	uint64_t fwd_cycles;
1344 #endif
1345 
1346 	static const char *acc_stats_border = "+++++++++++++++";
1347 
1348 	if (test_done) {
1349 		printf("Packet forwarding not started\n");
1350 		return;
1351 	}
1352 	printf("Telling cores to stop...");
1353 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1354 		fwd_lcores[lc_id]->stopped = 1;
1355 	printf("\nWaiting for lcores to finish...\n");
1356 	rte_eal_mp_wait_lcore();
1357 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1358 	if (port_fwd_end != NULL) {
1359 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1360 			pt_id = fwd_ports_ids[i];
1361 			(*port_fwd_end)(pt_id);
1362 		}
1363 	}
1364 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1365 	fwd_cycles = 0;
1366 #endif
1367 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1368 		if (cur_fwd_config.nb_fwd_streams >
1369 		    cur_fwd_config.nb_fwd_ports) {
1370 			fwd_stream_stats_display(sm_id);
1371 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1372 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1373 		} else {
1374 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1375 				fwd_streams[sm_id];
1376 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1377 				fwd_streams[sm_id];
1378 		}
1379 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1380 		tx_dropped = (uint64_t) (tx_dropped +
1381 					 fwd_streams[sm_id]->fwd_dropped);
1382 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1383 
1384 		rx_bad_ip_csum =
1385 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1386 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1387 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1388 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1389 							rx_bad_ip_csum;
1390 
1391 		rx_bad_l4_csum =
1392 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1393 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1394 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1395 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1396 							rx_bad_l4_csum;
1397 
1398 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1399 		fwd_cycles = (uint64_t) (fwd_cycles +
1400 					 fwd_streams[sm_id]->core_cycles);
1401 #endif
1402 	}
1403 	total_recv = 0;
1404 	total_xmit = 0;
1405 	total_rx_dropped = 0;
1406 	total_tx_dropped = 0;
1407 	total_rx_nombuf  = 0;
1408 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1409 		pt_id = fwd_ports_ids[i];
1410 
1411 		port = &ports[pt_id];
1412 		rte_eth_stats_get(pt_id, &stats);
1413 		stats.ipackets -= port->stats.ipackets;
1414 		port->stats.ipackets = 0;
1415 		stats.opackets -= port->stats.opackets;
1416 		port->stats.opackets = 0;
1417 		stats.ibytes   -= port->stats.ibytes;
1418 		port->stats.ibytes = 0;
1419 		stats.obytes   -= port->stats.obytes;
1420 		port->stats.obytes = 0;
1421 		stats.imissed  -= port->stats.imissed;
1422 		port->stats.imissed = 0;
1423 		stats.oerrors  -= port->stats.oerrors;
1424 		port->stats.oerrors = 0;
1425 		stats.rx_nombuf -= port->stats.rx_nombuf;
1426 		port->stats.rx_nombuf = 0;
1427 
1428 		total_recv += stats.ipackets;
1429 		total_xmit += stats.opackets;
1430 		total_rx_dropped += stats.imissed;
1431 		total_tx_dropped += port->tx_dropped;
1432 		total_rx_nombuf  += stats.rx_nombuf;
1433 
1434 		fwd_port_stats_display(pt_id, &stats);
1435 	}
1436 
1437 	printf("\n  %s Accumulated forward statistics for all ports"
1438 	       "%s\n",
1439 	       acc_stats_border, acc_stats_border);
1440 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1441 	       "%-"PRIu64"\n"
1442 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1443 	       "%-"PRIu64"\n",
1444 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1445 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1446 	if (total_rx_nombuf > 0)
1447 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1448 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1449 	       "%s\n",
1450 	       acc_stats_border, acc_stats_border);
1451 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1452 	if (total_recv > 0)
1453 		printf("\n  CPU cycles/packet=%u (total cycles="
1454 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1455 		       (unsigned int)(fwd_cycles / total_recv),
1456 		       fwd_cycles, total_recv);
1457 #endif
1458 	printf("\nDone.\n");
1459 	test_done = 1;
1460 }
1461 
1462 void
1463 dev_set_link_up(portid_t pid)
1464 {
1465 	if (rte_eth_dev_set_link_up(pid) < 0)
1466 		printf("\nSet link up fail.\n");
1467 }
1468 
1469 void
1470 dev_set_link_down(portid_t pid)
1471 {
1472 	if (rte_eth_dev_set_link_down(pid) < 0)
1473 		printf("\nSet link down fail.\n");
1474 }
1475 
1476 static int
1477 all_ports_started(void)
1478 {
1479 	portid_t pi;
1480 	struct rte_port *port;
1481 
1482 	RTE_ETH_FOREACH_DEV(pi) {
1483 		port = &ports[pi];
1484 		/* Check if there is a port which is not started */
1485 		if ((port->port_status != RTE_PORT_STARTED) &&
1486 			(port->slave_flag == 0))
1487 			return 0;
1488 	}
1489 
1490 	/* No port is not started */
1491 	return 1;
1492 }
1493 
1494 int
1495 port_is_stopped(portid_t port_id)
1496 {
1497 	struct rte_port *port = &ports[port_id];
1498 
1499 	if ((port->port_status != RTE_PORT_STOPPED) &&
1500 	    (port->slave_flag == 0))
1501 		return 0;
1502 	return 1;
1503 }
1504 
1505 int
1506 all_ports_stopped(void)
1507 {
1508 	portid_t pi;
1509 
1510 	RTE_ETH_FOREACH_DEV(pi) {
1511 		if (!port_is_stopped(pi))
1512 			return 0;
1513 	}
1514 
1515 	return 1;
1516 }
1517 
1518 int
1519 port_is_started(portid_t port_id)
1520 {
1521 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1522 		return 0;
1523 
1524 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1525 		return 0;
1526 
1527 	return 1;
1528 }
1529 
1530 static int
1531 port_is_closed(portid_t port_id)
1532 {
1533 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1534 		return 0;
1535 
1536 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1537 		return 0;
1538 
1539 	return 1;
1540 }
1541 
1542 int
1543 start_port(portid_t pid)
1544 {
1545 	int diag, need_check_link_status = -1;
1546 	portid_t pi;
1547 	queueid_t qi;
1548 	struct rte_port *port;
1549 	struct ether_addr mac_addr;
1550 	enum rte_eth_event_type event_type;
1551 
1552 	if (port_id_is_invalid(pid, ENABLED_WARN))
1553 		return 0;
1554 
1555 	if(dcb_config)
1556 		dcb_test = 1;
1557 	RTE_ETH_FOREACH_DEV(pi) {
1558 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1559 			continue;
1560 
1561 		need_check_link_status = 0;
1562 		port = &ports[pi];
1563 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1564 						 RTE_PORT_HANDLING) == 0) {
1565 			printf("Port %d is now not stopped\n", pi);
1566 			continue;
1567 		}
1568 
1569 		if (port->need_reconfig > 0) {
1570 			port->need_reconfig = 0;
1571 
1572 			if (flow_isolate_all) {
1573 				int ret = port_flow_isolate(pi, 1);
1574 				if (ret) {
1575 					printf("Failed to apply isolated"
1576 					       " mode on port %d\n", pi);
1577 					return -1;
1578 				}
1579 			}
1580 
1581 			printf("Configuring Port %d (socket %u)\n", pi,
1582 					port->socket_id);
1583 			/* configure port */
1584 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1585 						&(port->dev_conf));
1586 			if (diag != 0) {
1587 				if (rte_atomic16_cmpset(&(port->port_status),
1588 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1589 					printf("Port %d can not be set back "
1590 							"to stopped\n", pi);
1591 				printf("Fail to configure port %d\n", pi);
1592 				/* try to reconfigure port next time */
1593 				port->need_reconfig = 1;
1594 				return -1;
1595 			}
1596 		}
1597 		if (port->need_reconfig_queues > 0) {
1598 			port->need_reconfig_queues = 0;
1599 			port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1600 			/* Apply Tx offloads configuration */
1601 			port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1602 			/* setup tx queues */
1603 			for (qi = 0; qi < nb_txq; qi++) {
1604 				if ((numa_support) &&
1605 					(txring_numa[pi] != NUMA_NO_CONFIG))
1606 					diag = rte_eth_tx_queue_setup(pi, qi,
1607 						nb_txd,txring_numa[pi],
1608 						&(port->tx_conf));
1609 				else
1610 					diag = rte_eth_tx_queue_setup(pi, qi,
1611 						nb_txd,port->socket_id,
1612 						&(port->tx_conf));
1613 
1614 				if (diag == 0)
1615 					continue;
1616 
1617 				/* Fail to setup tx queue, return */
1618 				if (rte_atomic16_cmpset(&(port->port_status),
1619 							RTE_PORT_HANDLING,
1620 							RTE_PORT_STOPPED) == 0)
1621 					printf("Port %d can not be set back "
1622 							"to stopped\n", pi);
1623 				printf("Fail to configure port %d tx queues\n", pi);
1624 				/* try to reconfigure queues next time */
1625 				port->need_reconfig_queues = 1;
1626 				return -1;
1627 			}
1628 			/* Apply Rx offloads configuration */
1629 			port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1630 			/* setup rx queues */
1631 			for (qi = 0; qi < nb_rxq; qi++) {
1632 				if ((numa_support) &&
1633 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1634 					struct rte_mempool * mp =
1635 						mbuf_pool_find(rxring_numa[pi]);
1636 					if (mp == NULL) {
1637 						printf("Failed to setup RX queue:"
1638 							"No mempool allocation"
1639 							" on the socket %d\n",
1640 							rxring_numa[pi]);
1641 						return -1;
1642 					}
1643 
1644 					diag = rte_eth_rx_queue_setup(pi, qi,
1645 					     nb_rxd,rxring_numa[pi],
1646 					     &(port->rx_conf),mp);
1647 				} else {
1648 					struct rte_mempool *mp =
1649 						mbuf_pool_find(port->socket_id);
1650 					if (mp == NULL) {
1651 						printf("Failed to setup RX queue:"
1652 							"No mempool allocation"
1653 							" on the socket %d\n",
1654 							port->socket_id);
1655 						return -1;
1656 					}
1657 					diag = rte_eth_rx_queue_setup(pi, qi,
1658 					     nb_rxd,port->socket_id,
1659 					     &(port->rx_conf), mp);
1660 				}
1661 				if (diag == 0)
1662 					continue;
1663 
1664 				/* Fail to setup rx queue, return */
1665 				if (rte_atomic16_cmpset(&(port->port_status),
1666 							RTE_PORT_HANDLING,
1667 							RTE_PORT_STOPPED) == 0)
1668 					printf("Port %d can not be set back "
1669 							"to stopped\n", pi);
1670 				printf("Fail to configure port %d rx queues\n", pi);
1671 				/* try to reconfigure queues next time */
1672 				port->need_reconfig_queues = 1;
1673 				return -1;
1674 			}
1675 		}
1676 
1677 		/* start port */
1678 		if (rte_eth_dev_start(pi) < 0) {
1679 			printf("Fail to start port %d\n", pi);
1680 
1681 			/* Fail to setup rx queue, return */
1682 			if (rte_atomic16_cmpset(&(port->port_status),
1683 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1684 				printf("Port %d can not be set back to "
1685 							"stopped\n", pi);
1686 			continue;
1687 		}
1688 
1689 		if (rte_atomic16_cmpset(&(port->port_status),
1690 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1691 			printf("Port %d can not be set into started\n", pi);
1692 
1693 		rte_eth_macaddr_get(pi, &mac_addr);
1694 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1695 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1696 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1697 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1698 
1699 		/* at least one port started, need checking link status */
1700 		need_check_link_status = 1;
1701 	}
1702 
1703 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1704 	     event_type < RTE_ETH_EVENT_MAX;
1705 	     event_type++) {
1706 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1707 						event_type,
1708 						eth_event_callback,
1709 						NULL);
1710 		if (diag) {
1711 			printf("Failed to setup even callback for event %d\n",
1712 				event_type);
1713 			return -1;
1714 		}
1715 	}
1716 
1717 	if (need_check_link_status == 1 && !no_link_check)
1718 		check_all_ports_link_status(RTE_PORT_ALL);
1719 	else if (need_check_link_status == 0)
1720 		printf("Please stop the ports first\n");
1721 
1722 	printf("Done\n");
1723 	return 0;
1724 }
1725 
1726 void
1727 stop_port(portid_t pid)
1728 {
1729 	portid_t pi;
1730 	struct rte_port *port;
1731 	int need_check_link_status = 0;
1732 
1733 	if (dcb_test) {
1734 		dcb_test = 0;
1735 		dcb_config = 0;
1736 	}
1737 
1738 	if (port_id_is_invalid(pid, ENABLED_WARN))
1739 		return;
1740 
1741 	printf("Stopping ports...\n");
1742 
1743 	RTE_ETH_FOREACH_DEV(pi) {
1744 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1745 			continue;
1746 
1747 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1748 			printf("Please remove port %d from forwarding configuration.\n", pi);
1749 			continue;
1750 		}
1751 
1752 		if (port_is_bonding_slave(pi)) {
1753 			printf("Please remove port %d from bonded device.\n", pi);
1754 			continue;
1755 		}
1756 
1757 		port = &ports[pi];
1758 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1759 						RTE_PORT_HANDLING) == 0)
1760 			continue;
1761 
1762 		rte_eth_dev_stop(pi);
1763 
1764 		if (rte_atomic16_cmpset(&(port->port_status),
1765 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1766 			printf("Port %d can not be set into stopped\n", pi);
1767 		need_check_link_status = 1;
1768 	}
1769 	if (need_check_link_status && !no_link_check)
1770 		check_all_ports_link_status(RTE_PORT_ALL);
1771 
1772 	printf("Done\n");
1773 }
1774 
1775 void
1776 close_port(portid_t pid)
1777 {
1778 	portid_t pi;
1779 	struct rte_port *port;
1780 
1781 	if (port_id_is_invalid(pid, ENABLED_WARN))
1782 		return;
1783 
1784 	printf("Closing ports...\n");
1785 
1786 	RTE_ETH_FOREACH_DEV(pi) {
1787 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1788 			continue;
1789 
1790 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1791 			printf("Please remove port %d from forwarding configuration.\n", pi);
1792 			continue;
1793 		}
1794 
1795 		if (port_is_bonding_slave(pi)) {
1796 			printf("Please remove port %d from bonded device.\n", pi);
1797 			continue;
1798 		}
1799 
1800 		port = &ports[pi];
1801 		if (rte_atomic16_cmpset(&(port->port_status),
1802 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1803 			printf("Port %d is already closed\n", pi);
1804 			continue;
1805 		}
1806 
1807 		if (rte_atomic16_cmpset(&(port->port_status),
1808 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1809 			printf("Port %d is now not stopped\n", pi);
1810 			continue;
1811 		}
1812 
1813 		if (port->flow_list)
1814 			port_flow_flush(pi);
1815 		rte_eth_dev_close(pi);
1816 
1817 		if (rte_atomic16_cmpset(&(port->port_status),
1818 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1819 			printf("Port %d cannot be set to closed\n", pi);
1820 	}
1821 
1822 	printf("Done\n");
1823 }
1824 
1825 void
1826 reset_port(portid_t pid)
1827 {
1828 	int diag;
1829 	portid_t pi;
1830 	struct rte_port *port;
1831 
1832 	if (port_id_is_invalid(pid, ENABLED_WARN))
1833 		return;
1834 
1835 	printf("Resetting ports...\n");
1836 
1837 	RTE_ETH_FOREACH_DEV(pi) {
1838 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1839 			continue;
1840 
1841 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1842 			printf("Please remove port %d from forwarding "
1843 			       "configuration.\n", pi);
1844 			continue;
1845 		}
1846 
1847 		if (port_is_bonding_slave(pi)) {
1848 			printf("Please remove port %d from bonded device.\n",
1849 			       pi);
1850 			continue;
1851 		}
1852 
1853 		diag = rte_eth_dev_reset(pi);
1854 		if (diag == 0) {
1855 			port = &ports[pi];
1856 			port->need_reconfig = 1;
1857 			port->need_reconfig_queues = 1;
1858 		} else {
1859 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1860 		}
1861 	}
1862 
1863 	printf("Done\n");
1864 }
1865 
1866 static int
1867 eth_dev_event_callback_register(void)
1868 {
1869 	int ret;
1870 
1871 	/* register the device event callback */
1872 	ret = rte_dev_event_callback_register(NULL,
1873 		eth_dev_event_callback, NULL);
1874 	if (ret) {
1875 		printf("Failed to register device event callback\n");
1876 		return -1;
1877 	}
1878 
1879 	return 0;
1880 }
1881 
1882 
1883 static int
1884 eth_dev_event_callback_unregister(void)
1885 {
1886 	int ret;
1887 
1888 	/* unregister the device event callback */
1889 	ret = rte_dev_event_callback_unregister(NULL,
1890 		eth_dev_event_callback, NULL);
1891 	if (ret < 0) {
1892 		printf("Failed to unregister device event callback\n");
1893 		return -1;
1894 	}
1895 
1896 	return 0;
1897 }
1898 
1899 void
1900 attach_port(char *identifier)
1901 {
1902 	portid_t pi = 0;
1903 	unsigned int socket_id;
1904 
1905 	printf("Attaching a new port...\n");
1906 
1907 	if (identifier == NULL) {
1908 		printf("Invalid parameters are specified\n");
1909 		return;
1910 	}
1911 
1912 	if (rte_eth_dev_attach(identifier, &pi))
1913 		return;
1914 
1915 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1916 	/* if socket_id is invalid, set to 0 */
1917 	if (check_socket_id(socket_id) < 0)
1918 		socket_id = 0;
1919 	reconfig(pi, socket_id);
1920 	rte_eth_promiscuous_enable(pi);
1921 
1922 	nb_ports = rte_eth_dev_count();
1923 
1924 	ports[pi].port_status = RTE_PORT_STOPPED;
1925 
1926 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1927 	printf("Done\n");
1928 }
1929 
1930 void
1931 detach_port(portid_t port_id)
1932 {
1933 	char name[RTE_ETH_NAME_MAX_LEN];
1934 
1935 	printf("Detaching a port...\n");
1936 
1937 	if (!port_is_closed(port_id)) {
1938 		printf("Please close port first\n");
1939 		return;
1940 	}
1941 
1942 	if (ports[port_id].flow_list)
1943 		port_flow_flush(port_id);
1944 
1945 	if (rte_eth_dev_detach(port_id, name)) {
1946 		TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1947 		return;
1948 	}
1949 
1950 	nb_ports = rte_eth_dev_count();
1951 
1952 	printf("Port '%s' is detached. Now total ports is %d\n",
1953 			name, nb_ports);
1954 	printf("Done\n");
1955 	return;
1956 }
1957 
1958 void
1959 pmd_test_exit(void)
1960 {
1961 	portid_t pt_id;
1962 	int ret;
1963 
1964 	if (test_done == 0)
1965 		stop_packet_forwarding();
1966 
1967 	if (ports != NULL) {
1968 		no_link_check = 1;
1969 		RTE_ETH_FOREACH_DEV(pt_id) {
1970 			printf("\nShutting down port %d...\n", pt_id);
1971 			fflush(stdout);
1972 			stop_port(pt_id);
1973 			close_port(pt_id);
1974 		}
1975 	}
1976 
1977 	if (hot_plug) {
1978 		ret = rte_dev_event_monitor_stop();
1979 		if (ret)
1980 			RTE_LOG(ERR, EAL,
1981 				"fail to stop device event monitor.");
1982 
1983 		ret = eth_dev_event_callback_unregister();
1984 		if (ret)
1985 			RTE_LOG(ERR, EAL,
1986 				"fail to unregister all event callbacks.");
1987 	}
1988 
1989 	printf("\nBye...\n");
1990 }
1991 
1992 typedef void (*cmd_func_t)(void);
1993 struct pmd_test_command {
1994 	const char *cmd_name;
1995 	cmd_func_t cmd_func;
1996 };
1997 
1998 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1999 
2000 /* Check the link status of all ports in up to 9s, and print them finally */
2001 static void
2002 check_all_ports_link_status(uint32_t port_mask)
2003 {
2004 #define CHECK_INTERVAL 100 /* 100ms */
2005 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2006 	portid_t portid;
2007 	uint8_t count, all_ports_up, print_flag = 0;
2008 	struct rte_eth_link link;
2009 
2010 	printf("Checking link statuses...\n");
2011 	fflush(stdout);
2012 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2013 		all_ports_up = 1;
2014 		RTE_ETH_FOREACH_DEV(portid) {
2015 			if ((port_mask & (1 << portid)) == 0)
2016 				continue;
2017 			memset(&link, 0, sizeof(link));
2018 			rte_eth_link_get_nowait(portid, &link);
2019 			/* print link status if flag set */
2020 			if (print_flag == 1) {
2021 				if (link.link_status)
2022 					printf(
2023 					"Port%d Link Up. speed %u Mbps- %s\n",
2024 					portid, link.link_speed,
2025 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2026 					("full-duplex") : ("half-duplex\n"));
2027 				else
2028 					printf("Port %d Link Down\n", portid);
2029 				continue;
2030 			}
2031 			/* clear all_ports_up flag if any link down */
2032 			if (link.link_status == ETH_LINK_DOWN) {
2033 				all_ports_up = 0;
2034 				break;
2035 			}
2036 		}
2037 		/* after finally printing all link status, get out */
2038 		if (print_flag == 1)
2039 			break;
2040 
2041 		if (all_ports_up == 0) {
2042 			fflush(stdout);
2043 			rte_delay_ms(CHECK_INTERVAL);
2044 		}
2045 
2046 		/* set the print_flag if all ports up or timeout */
2047 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2048 			print_flag = 1;
2049 		}
2050 
2051 		if (lsc_interrupt)
2052 			break;
2053 	}
2054 }
2055 
2056 static void
2057 rmv_event_callback(void *arg)
2058 {
2059 	struct rte_eth_dev *dev;
2060 	portid_t port_id = (intptr_t)arg;
2061 
2062 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2063 	dev = &rte_eth_devices[port_id];
2064 
2065 	stop_port(port_id);
2066 	close_port(port_id);
2067 	printf("removing device %s\n", dev->device->name);
2068 	if (rte_eal_dev_detach(dev->device))
2069 		TESTPMD_LOG(ERR, "Failed to detach device %s\n",
2070 			dev->device->name);
2071 }
2072 
2073 /* This function is used by the interrupt thread */
2074 static int
2075 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2076 		  void *ret_param)
2077 {
2078 	static const char * const event_desc[] = {
2079 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2080 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2081 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2082 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2083 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2084 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2085 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2086 		[RTE_ETH_EVENT_NEW] = "device probed",
2087 		[RTE_ETH_EVENT_DESTROY] = "device released",
2088 		[RTE_ETH_EVENT_MAX] = NULL,
2089 	};
2090 
2091 	RTE_SET_USED(param);
2092 	RTE_SET_USED(ret_param);
2093 
2094 	if (type >= RTE_ETH_EVENT_MAX) {
2095 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2096 			port_id, __func__, type);
2097 		fflush(stderr);
2098 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2099 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2100 			event_desc[type]);
2101 		fflush(stdout);
2102 	}
2103 
2104 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2105 		return 0;
2106 
2107 	switch (type) {
2108 	case RTE_ETH_EVENT_INTR_RMV:
2109 		if (rte_eal_alarm_set(100000,
2110 				rmv_event_callback, (void *)(intptr_t)port_id))
2111 			fprintf(stderr, "Could not set up deferred device removal\n");
2112 		break;
2113 	default:
2114 		break;
2115 	}
2116 	return 0;
2117 }
2118 
2119 /* This function is used by the interrupt thread */
2120 static void
2121 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2122 			     __rte_unused void *arg)
2123 {
2124 	if (type >= RTE_DEV_EVENT_MAX) {
2125 		fprintf(stderr, "%s called upon invalid event %d\n",
2126 			__func__, type);
2127 		fflush(stderr);
2128 	}
2129 
2130 	switch (type) {
2131 	case RTE_DEV_EVENT_REMOVE:
2132 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2133 			device_name);
2134 		/* TODO: After finish failure handle, begin to stop
2135 		 * packet forward, stop port, close port, detach port.
2136 		 */
2137 		break;
2138 	case RTE_DEV_EVENT_ADD:
2139 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2140 			device_name);
2141 		/* TODO: After finish kernel driver binding,
2142 		 * begin to attach port.
2143 		 */
2144 		break;
2145 	default:
2146 		break;
2147 	}
2148 }
2149 
2150 static int
2151 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2152 {
2153 	uint16_t i;
2154 	int diag;
2155 	uint8_t mapping_found = 0;
2156 
2157 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2158 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2159 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2160 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2161 					tx_queue_stats_mappings[i].queue_id,
2162 					tx_queue_stats_mappings[i].stats_counter_id);
2163 			if (diag != 0)
2164 				return diag;
2165 			mapping_found = 1;
2166 		}
2167 	}
2168 	if (mapping_found)
2169 		port->tx_queue_stats_mapping_enabled = 1;
2170 	return 0;
2171 }
2172 
2173 static int
2174 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2175 {
2176 	uint16_t i;
2177 	int diag;
2178 	uint8_t mapping_found = 0;
2179 
2180 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2181 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2182 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2183 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2184 					rx_queue_stats_mappings[i].queue_id,
2185 					rx_queue_stats_mappings[i].stats_counter_id);
2186 			if (diag != 0)
2187 				return diag;
2188 			mapping_found = 1;
2189 		}
2190 	}
2191 	if (mapping_found)
2192 		port->rx_queue_stats_mapping_enabled = 1;
2193 	return 0;
2194 }
2195 
2196 static void
2197 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2198 {
2199 	int diag = 0;
2200 
2201 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2202 	if (diag != 0) {
2203 		if (diag == -ENOTSUP) {
2204 			port->tx_queue_stats_mapping_enabled = 0;
2205 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2206 		}
2207 		else
2208 			rte_exit(EXIT_FAILURE,
2209 					"set_tx_queue_stats_mapping_registers "
2210 					"failed for port id=%d diag=%d\n",
2211 					pi, diag);
2212 	}
2213 
2214 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2215 	if (diag != 0) {
2216 		if (diag == -ENOTSUP) {
2217 			port->rx_queue_stats_mapping_enabled = 0;
2218 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2219 		}
2220 		else
2221 			rte_exit(EXIT_FAILURE,
2222 					"set_rx_queue_stats_mapping_registers "
2223 					"failed for port id=%d diag=%d\n",
2224 					pi, diag);
2225 	}
2226 }
2227 
2228 static void
2229 rxtx_port_config(struct rte_port *port)
2230 {
2231 	port->rx_conf = port->dev_info.default_rxconf;
2232 	port->tx_conf = port->dev_info.default_txconf;
2233 
2234 	/* Check if any RX/TX parameters have been passed */
2235 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2236 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2237 
2238 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2239 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2240 
2241 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2242 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2243 
2244 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2245 		port->rx_conf.rx_free_thresh = rx_free_thresh;
2246 
2247 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2248 		port->rx_conf.rx_drop_en = rx_drop_en;
2249 
2250 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2251 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2252 
2253 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2254 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2255 
2256 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2257 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2258 
2259 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2260 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2261 
2262 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2263 		port->tx_conf.tx_free_thresh = tx_free_thresh;
2264 }
2265 
2266 void
2267 init_port_config(void)
2268 {
2269 	portid_t pid;
2270 	struct rte_port *port;
2271 
2272 	RTE_ETH_FOREACH_DEV(pid) {
2273 		port = &ports[pid];
2274 		port->dev_conf.fdir_conf = fdir_conf;
2275 		if (nb_rxq > 1) {
2276 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2277 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2278 		} else {
2279 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2280 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2281 		}
2282 
2283 		if (port->dcb_flag == 0) {
2284 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2285 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2286 			else
2287 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2288 		}
2289 
2290 		rxtx_port_config(port);
2291 
2292 		rte_eth_macaddr_get(pid, &port->eth_addr);
2293 
2294 		map_port_queue_stats_mapping_registers(pid, port);
2295 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2296 		rte_pmd_ixgbe_bypass_init(pid);
2297 #endif
2298 
2299 		if (lsc_interrupt &&
2300 		    (rte_eth_devices[pid].data->dev_flags &
2301 		     RTE_ETH_DEV_INTR_LSC))
2302 			port->dev_conf.intr_conf.lsc = 1;
2303 		if (rmv_interrupt &&
2304 		    (rte_eth_devices[pid].data->dev_flags &
2305 		     RTE_ETH_DEV_INTR_RMV))
2306 			port->dev_conf.intr_conf.rmv = 1;
2307 
2308 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2309 		/* Detect softnic port */
2310 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2311 			port->softnic_enable = 1;
2312 			memset(&port->softport, 0, sizeof(struct softnic_port));
2313 
2314 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2315 				port->softport.tm_flag = 1;
2316 		}
2317 #endif
2318 	}
2319 }
2320 
2321 void set_port_slave_flag(portid_t slave_pid)
2322 {
2323 	struct rte_port *port;
2324 
2325 	port = &ports[slave_pid];
2326 	port->slave_flag = 1;
2327 }
2328 
2329 void clear_port_slave_flag(portid_t slave_pid)
2330 {
2331 	struct rte_port *port;
2332 
2333 	port = &ports[slave_pid];
2334 	port->slave_flag = 0;
2335 }
2336 
2337 uint8_t port_is_bonding_slave(portid_t slave_pid)
2338 {
2339 	struct rte_port *port;
2340 
2341 	port = &ports[slave_pid];
2342 	return port->slave_flag;
2343 }
2344 
2345 const uint16_t vlan_tags[] = {
2346 		0,  1,  2,  3,  4,  5,  6,  7,
2347 		8,  9, 10, 11,  12, 13, 14, 15,
2348 		16, 17, 18, 19, 20, 21, 22, 23,
2349 		24, 25, 26, 27, 28, 29, 30, 31
2350 };
2351 
2352 static  int
2353 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2354 		 enum dcb_mode_enable dcb_mode,
2355 		 enum rte_eth_nb_tcs num_tcs,
2356 		 uint8_t pfc_en)
2357 {
2358 	uint8_t i;
2359 
2360 	/*
2361 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2362 	 * given above, and the number of traffic classes available for use.
2363 	 */
2364 	if (dcb_mode == DCB_VT_ENABLED) {
2365 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2366 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2367 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2368 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2369 
2370 		/* VMDQ+DCB RX and TX configurations */
2371 		vmdq_rx_conf->enable_default_pool = 0;
2372 		vmdq_rx_conf->default_pool = 0;
2373 		vmdq_rx_conf->nb_queue_pools =
2374 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2375 		vmdq_tx_conf->nb_queue_pools =
2376 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2377 
2378 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2379 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2380 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2381 			vmdq_rx_conf->pool_map[i].pools =
2382 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2383 		}
2384 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2385 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2386 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2387 		}
2388 
2389 		/* set DCB mode of RX and TX of multiple queues */
2390 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2391 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2392 	} else {
2393 		struct rte_eth_dcb_rx_conf *rx_conf =
2394 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2395 		struct rte_eth_dcb_tx_conf *tx_conf =
2396 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2397 
2398 		rx_conf->nb_tcs = num_tcs;
2399 		tx_conf->nb_tcs = num_tcs;
2400 
2401 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2402 			rx_conf->dcb_tc[i] = i % num_tcs;
2403 			tx_conf->dcb_tc[i] = i % num_tcs;
2404 		}
2405 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2406 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2407 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2408 	}
2409 
2410 	if (pfc_en)
2411 		eth_conf->dcb_capability_en =
2412 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2413 	else
2414 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2415 
2416 	return 0;
2417 }
2418 
2419 int
2420 init_port_dcb_config(portid_t pid,
2421 		     enum dcb_mode_enable dcb_mode,
2422 		     enum rte_eth_nb_tcs num_tcs,
2423 		     uint8_t pfc_en)
2424 {
2425 	struct rte_eth_conf port_conf;
2426 	struct rte_port *rte_port;
2427 	int retval;
2428 	uint16_t i;
2429 
2430 	rte_port = &ports[pid];
2431 
2432 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2433 	/* Enter DCB configuration status */
2434 	dcb_config = 1;
2435 
2436 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2437 	port_conf.txmode = rte_port->dev_conf.txmode;
2438 
2439 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2440 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2441 	if (retval < 0)
2442 		return retval;
2443 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2444 
2445 	/**
2446 	 * Write the configuration into the device.
2447 	 * Set the numbers of RX & TX queues to 0, so
2448 	 * the RX & TX queues will not be setup.
2449 	 */
2450 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2451 
2452 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2453 
2454 	/* If dev_info.vmdq_pool_base is greater than 0,
2455 	 * the queue id of vmdq pools is started after pf queues.
2456 	 */
2457 	if (dcb_mode == DCB_VT_ENABLED &&
2458 	    rte_port->dev_info.vmdq_pool_base > 0) {
2459 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2460 			" for port %d.", pid);
2461 		return -1;
2462 	}
2463 
2464 	/* Assume the ports in testpmd have the same dcb capability
2465 	 * and has the same number of rxq and txq in dcb mode
2466 	 */
2467 	if (dcb_mode == DCB_VT_ENABLED) {
2468 		if (rte_port->dev_info.max_vfs > 0) {
2469 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2470 			nb_txq = rte_port->dev_info.nb_tx_queues;
2471 		} else {
2472 			nb_rxq = rte_port->dev_info.max_rx_queues;
2473 			nb_txq = rte_port->dev_info.max_tx_queues;
2474 		}
2475 	} else {
2476 		/*if vt is disabled, use all pf queues */
2477 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2478 			nb_rxq = rte_port->dev_info.max_rx_queues;
2479 			nb_txq = rte_port->dev_info.max_tx_queues;
2480 		} else {
2481 			nb_rxq = (queueid_t)num_tcs;
2482 			nb_txq = (queueid_t)num_tcs;
2483 
2484 		}
2485 	}
2486 	rx_free_thresh = 64;
2487 
2488 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2489 
2490 	rxtx_port_config(rte_port);
2491 	/* VLAN filter */
2492 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2493 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2494 		rx_vft_set(pid, vlan_tags[i], 1);
2495 
2496 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2497 	map_port_queue_stats_mapping_registers(pid, rte_port);
2498 
2499 	rte_port->dcb_flag = 1;
2500 
2501 	return 0;
2502 }
2503 
2504 static void
2505 init_port(void)
2506 {
2507 	/* Configuration of Ethernet ports. */
2508 	ports = rte_zmalloc("testpmd: ports",
2509 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2510 			    RTE_CACHE_LINE_SIZE);
2511 	if (ports == NULL) {
2512 		rte_exit(EXIT_FAILURE,
2513 				"rte_zmalloc(%d struct rte_port) failed\n",
2514 				RTE_MAX_ETHPORTS);
2515 	}
2516 }
2517 
2518 static void
2519 force_quit(void)
2520 {
2521 	pmd_test_exit();
2522 	prompt_exit();
2523 }
2524 
2525 static void
2526 print_stats(void)
2527 {
2528 	uint8_t i;
2529 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2530 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2531 
2532 	/* Clear screen and move to top left */
2533 	printf("%s%s", clr, top_left);
2534 
2535 	printf("\nPort statistics ====================================");
2536 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2537 		nic_stats_display(fwd_ports_ids[i]);
2538 }
2539 
2540 static void
2541 signal_handler(int signum)
2542 {
2543 	if (signum == SIGINT || signum == SIGTERM) {
2544 		printf("\nSignal %d received, preparing to exit...\n",
2545 				signum);
2546 #ifdef RTE_LIBRTE_PDUMP
2547 		/* uninitialize packet capture framework */
2548 		rte_pdump_uninit();
2549 #endif
2550 #ifdef RTE_LIBRTE_LATENCY_STATS
2551 		rte_latencystats_uninit();
2552 #endif
2553 		force_quit();
2554 		/* Set flag to indicate the force termination. */
2555 		f_quit = 1;
2556 		/* exit with the expected status */
2557 		signal(signum, SIG_DFL);
2558 		kill(getpid(), signum);
2559 	}
2560 }
2561 
2562 int
2563 main(int argc, char** argv)
2564 {
2565 	int diag;
2566 	portid_t port_id;
2567 	int ret;
2568 
2569 	signal(SIGINT, signal_handler);
2570 	signal(SIGTERM, signal_handler);
2571 
2572 	diag = rte_eal_init(argc, argv);
2573 	if (diag < 0)
2574 		rte_panic("Cannot init EAL\n");
2575 
2576 	testpmd_logtype = rte_log_register("testpmd");
2577 	if (testpmd_logtype < 0)
2578 		rte_panic("Cannot register log type");
2579 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2580 
2581 	if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2582 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2583 			strerror(errno));
2584 	}
2585 
2586 #ifdef RTE_LIBRTE_PDUMP
2587 	/* initialize packet capture framework */
2588 	rte_pdump_init(NULL);
2589 #endif
2590 
2591 	nb_ports = (portid_t) rte_eth_dev_count();
2592 	if (nb_ports == 0)
2593 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2594 
2595 	/* allocate port structures, and init them */
2596 	init_port();
2597 
2598 	set_def_fwd_config();
2599 	if (nb_lcores == 0)
2600 		rte_panic("Empty set of forwarding logical cores - check the "
2601 			  "core mask supplied in the command parameters\n");
2602 
2603 	/* Bitrate/latency stats disabled by default */
2604 #ifdef RTE_LIBRTE_BITRATE
2605 	bitrate_enabled = 0;
2606 #endif
2607 #ifdef RTE_LIBRTE_LATENCY_STATS
2608 	latencystats_enabled = 0;
2609 #endif
2610 
2611 	argc -= diag;
2612 	argv += diag;
2613 	if (argc > 1)
2614 		launch_args_parse(argc, argv);
2615 
2616 	if (tx_first && interactive)
2617 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2618 				"interactive mode.\n");
2619 
2620 	if (tx_first && lsc_interrupt) {
2621 		printf("Warning: lsc_interrupt needs to be off when "
2622 				" using tx_first. Disabling.\n");
2623 		lsc_interrupt = 0;
2624 	}
2625 
2626 	if (!nb_rxq && !nb_txq)
2627 		printf("Warning: Either rx or tx queues should be non-zero\n");
2628 
2629 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2630 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2631 		       "but nb_txq=%d will prevent to fully test it.\n",
2632 		       nb_rxq, nb_txq);
2633 
2634 	init_config();
2635 
2636 	if (hot_plug) {
2637 		/* enable hot plug monitoring */
2638 		ret = rte_dev_event_monitor_start();
2639 		if (ret) {
2640 			rte_errno = EINVAL;
2641 			return -1;
2642 		}
2643 		eth_dev_event_callback_register();
2644 
2645 	}
2646 
2647 	if (start_port(RTE_PORT_ALL) != 0)
2648 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2649 
2650 	/* set all ports to promiscuous mode by default */
2651 	RTE_ETH_FOREACH_DEV(port_id)
2652 		rte_eth_promiscuous_enable(port_id);
2653 
2654 	/* Init metrics library */
2655 	rte_metrics_init(rte_socket_id());
2656 
2657 #ifdef RTE_LIBRTE_LATENCY_STATS
2658 	if (latencystats_enabled != 0) {
2659 		int ret = rte_latencystats_init(1, NULL);
2660 		if (ret)
2661 			printf("Warning: latencystats init()"
2662 				" returned error %d\n",	ret);
2663 		printf("Latencystats running on lcore %d\n",
2664 			latencystats_lcore_id);
2665 	}
2666 #endif
2667 
2668 	/* Setup bitrate stats */
2669 #ifdef RTE_LIBRTE_BITRATE
2670 	if (bitrate_enabled != 0) {
2671 		bitrate_data = rte_stats_bitrate_create();
2672 		if (bitrate_data == NULL)
2673 			rte_exit(EXIT_FAILURE,
2674 				"Could not allocate bitrate data.\n");
2675 		rte_stats_bitrate_reg(bitrate_data);
2676 	}
2677 #endif
2678 
2679 #ifdef RTE_LIBRTE_CMDLINE
2680 	if (strlen(cmdline_filename) != 0)
2681 		cmdline_read_from_file(cmdline_filename);
2682 
2683 	if (interactive == 1) {
2684 		if (auto_start) {
2685 			printf("Start automatic packet forwarding\n");
2686 			start_packet_forwarding(0);
2687 		}
2688 		prompt();
2689 		pmd_test_exit();
2690 	} else
2691 #endif
2692 	{
2693 		char c;
2694 		int rc;
2695 
2696 		f_quit = 0;
2697 
2698 		printf("No commandline core given, start packet forwarding\n");
2699 		start_packet_forwarding(tx_first);
2700 		if (stats_period != 0) {
2701 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2702 			uint64_t timer_period;
2703 
2704 			/* Convert to number of cycles */
2705 			timer_period = stats_period * rte_get_timer_hz();
2706 
2707 			while (f_quit == 0) {
2708 				cur_time = rte_get_timer_cycles();
2709 				diff_time += cur_time - prev_time;
2710 
2711 				if (diff_time >= timer_period) {
2712 					print_stats();
2713 					/* Reset the timer */
2714 					diff_time = 0;
2715 				}
2716 				/* Sleep to avoid unnecessary checks */
2717 				prev_time = cur_time;
2718 				sleep(1);
2719 			}
2720 		}
2721 
2722 		printf("Press enter to exit\n");
2723 		rc = read(0, &c, 1);
2724 		pmd_test_exit();
2725 		if (rc < 0)
2726 			return 1;
2727 	}
2728 
2729 	return 0;
2730 }
2731