xref: /dpdk/app/test-pmd/testpmd.c (revision 8b9bd0efe0b6920a08e28eebacf2bb916bdf5653)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 
16 #include <sys/queue.h>
17 #include <sys/stat.h>
18 
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <inttypes.h>
22 
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
26 #include <rte_log.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
32 #include <rte_eal.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
40 #include <rte_mbuf.h>
41 #include <rte_mbuf_pool_ops.h>
42 #include <rte_interrupts.h>
43 #include <rte_pci.h>
44 #include <rte_ether.h>
45 #include <rte_ethdev.h>
46 #include <rte_dev.h>
47 #include <rte_string_fns.h>
48 #ifdef RTE_LIBRTE_IXGBE_PMD
49 #include <rte_pmd_ixgbe.h>
50 #endif
51 #ifdef RTE_LIBRTE_PDUMP
52 #include <rte_pdump.h>
53 #endif
54 #include <rte_flow.h>
55 #include <rte_metrics.h>
56 #ifdef RTE_LIBRTE_BITRATE
57 #include <rte_bitrate.h>
58 #endif
59 #ifdef RTE_LIBRTE_LATENCY_STATS
60 #include <rte_latencystats.h>
61 #endif
62 
63 #include "testpmd.h"
64 
65 uint16_t verbose_level = 0; /**< Silent by default. */
66 int testpmd_logtype; /**< Log type for testpmd logs */
67 
68 /* use master core for command line ? */
69 uint8_t interactive = 0;
70 uint8_t auto_start = 0;
71 uint8_t tx_first;
72 char cmdline_filename[PATH_MAX] = {0};
73 
74 /*
75  * NUMA support configuration.
76  * When set, the NUMA support attempts to dispatch the allocation of the
77  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
78  * probed ports among the CPU sockets 0 and 1.
79  * Otherwise, all memory is allocated from CPU socket 0.
80  */
81 uint8_t numa_support = 1; /**< numa enabled by default */
82 
83 /*
84  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
85  * not configured.
86  */
87 uint8_t socket_num = UMA_NO_CONFIG;
88 
89 /*
90  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
91  */
92 uint8_t mp_anon = 0;
93 
94 /*
95  * Record the Ethernet address of peer target ports to which packets are
96  * forwarded.
97  * Must be instantiated with the ethernet addresses of peer traffic generator
98  * ports.
99  */
100 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
101 portid_t nb_peer_eth_addrs = 0;
102 
103 /*
104  * Probed Target Environment.
105  */
106 struct rte_port *ports;	       /**< For all probed ethernet ports. */
107 portid_t nb_ports;             /**< Number of probed ethernet ports. */
108 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
109 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
110 
111 /*
112  * Test Forwarding Configuration.
113  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
114  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
115  */
116 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
117 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
118 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
119 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
120 
121 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
122 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
123 
124 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
125 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
126 
127 /*
128  * Forwarding engines.
129  */
130 struct fwd_engine * fwd_engines[] = {
131 	&io_fwd_engine,
132 	&mac_fwd_engine,
133 	&mac_swap_engine,
134 	&flow_gen_engine,
135 	&rx_only_engine,
136 	&tx_only_engine,
137 	&csum_fwd_engine,
138 	&icmp_echo_engine,
139 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
140 	&softnic_tm_engine,
141 	&softnic_tm_bypass_engine,
142 #endif
143 #ifdef RTE_LIBRTE_IEEE1588
144 	&ieee1588_fwd_engine,
145 #endif
146 	NULL,
147 };
148 
149 struct fwd_config cur_fwd_config;
150 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
151 uint32_t retry_enabled;
152 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
153 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
154 
155 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
156 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
157                                       * specified on command-line. */
158 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
159 
160 /*
161  * In container, it cannot terminate the process which running with 'stats-period'
162  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
163  */
164 uint8_t f_quit;
165 
166 /*
167  * Configuration of packet segments used by the "txonly" processing engine.
168  */
169 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
170 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
171 	TXONLY_DEF_PACKET_LEN,
172 };
173 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
174 
175 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
176 /**< Split policy for packets to TX. */
177 
178 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
179 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
180 
181 /* current configuration is in DCB or not,0 means it is not in DCB mode */
182 uint8_t dcb_config = 0;
183 
184 /* Whether the dcb is in testing status */
185 uint8_t dcb_test = 0;
186 
187 /*
188  * Configurable number of RX/TX queues.
189  */
190 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
191 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
192 
193 /*
194  * Configurable number of RX/TX ring descriptors.
195  */
196 #define RTE_TEST_RX_DESC_DEFAULT 128
197 #define RTE_TEST_TX_DESC_DEFAULT 512
198 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
199 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
200 
201 #define RTE_PMD_PARAM_UNSET -1
202 /*
203  * Configurable values of RX and TX ring threshold registers.
204  */
205 
206 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
207 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
209 
210 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
213 
214 /*
215  * Configurable value of RX free threshold.
216  */
217 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
218 
219 /*
220  * Configurable value of RX drop enable.
221  */
222 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
223 
224 /*
225  * Configurable value of TX free threshold.
226  */
227 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
228 
229 /*
230  * Configurable value of TX RS bit threshold.
231  */
232 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Receive Side Scaling (RSS) configuration.
236  */
237 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
238 
239 /*
240  * Port topology configuration
241  */
242 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
243 
244 /*
245  * Avoids to flush all the RX streams before starts forwarding.
246  */
247 uint8_t no_flush_rx = 0; /* flush by default */
248 
249 /*
250  * Flow API isolated mode.
251  */
252 uint8_t flow_isolate_all;
253 
254 /*
255  * Avoids to check link status when starting/stopping a port.
256  */
257 uint8_t no_link_check = 0; /* check by default */
258 
259 /*
260  * Enable link status change notification
261  */
262 uint8_t lsc_interrupt = 1; /* enabled by default */
263 
264 /*
265  * Enable device removal notification.
266  */
267 uint8_t rmv_interrupt = 1; /* enabled by default */
268 
269 /*
270  * Display or mask ether events
271  * Default to all events except VF_MBOX
272  */
273 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
274 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
275 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
276 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
277 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
278 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
279 
280 /*
281  * NIC bypass mode configuration options.
282  */
283 
284 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
285 /* The NIC bypass watchdog timeout. */
286 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
287 #endif
288 
289 
290 #ifdef RTE_LIBRTE_LATENCY_STATS
291 
292 /*
293  * Set when latency stats is enabled in the commandline
294  */
295 uint8_t latencystats_enabled;
296 
297 /*
298  * Lcore ID to serive latency statistics.
299  */
300 lcoreid_t latencystats_lcore_id = -1;
301 
302 #endif
303 
304 /*
305  * Ethernet device configuration.
306  */
307 struct rte_eth_rxmode rx_mode = {
308 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
309 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
310 	.ignore_offload_bitfield = 1,
311 };
312 
313 struct rte_eth_txmode tx_mode = {
314 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
315 };
316 
317 struct rte_fdir_conf fdir_conf = {
318 	.mode = RTE_FDIR_MODE_NONE,
319 	.pballoc = RTE_FDIR_PBALLOC_64K,
320 	.status = RTE_FDIR_REPORT_STATUS,
321 	.mask = {
322 		.vlan_tci_mask = 0x0,
323 		.ipv4_mask     = {
324 			.src_ip = 0xFFFFFFFF,
325 			.dst_ip = 0xFFFFFFFF,
326 		},
327 		.ipv6_mask     = {
328 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
329 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
330 		},
331 		.src_port_mask = 0xFFFF,
332 		.dst_port_mask = 0xFFFF,
333 		.mac_addr_byte_mask = 0xFF,
334 		.tunnel_type_mask = 1,
335 		.tunnel_id_mask = 0xFFFFFFFF,
336 	},
337 	.drop_queue = 127,
338 };
339 
340 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
341 
342 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
343 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
344 
345 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
346 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
347 
348 uint16_t nb_tx_queue_stats_mappings = 0;
349 uint16_t nb_rx_queue_stats_mappings = 0;
350 
351 /*
352  * Display zero values by default for xstats
353  */
354 uint8_t xstats_hide_zero;
355 
356 unsigned int num_sockets = 0;
357 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
358 
359 #ifdef RTE_LIBRTE_BITRATE
360 /* Bitrate statistics */
361 struct rte_stats_bitrates *bitrate_data;
362 lcoreid_t bitrate_lcore_id;
363 uint8_t bitrate_enabled;
364 #endif
365 
366 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
367 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
368 
369 /* Forward function declarations */
370 static void map_port_queue_stats_mapping_registers(portid_t pi,
371 						   struct rte_port *port);
372 static void check_all_ports_link_status(uint32_t port_mask);
373 static int eth_event_callback(portid_t port_id,
374 			      enum rte_eth_event_type type,
375 			      void *param, void *ret_param);
376 
377 /*
378  * Check if all the ports are started.
379  * If yes, return positive value. If not, return zero.
380  */
381 static int all_ports_started(void);
382 
383 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
384 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
385 
386 /*
387  * Helper function to check if socket is already discovered.
388  * If yes, return positive value. If not, return zero.
389  */
390 int
391 new_socket_id(unsigned int socket_id)
392 {
393 	unsigned int i;
394 
395 	for (i = 0; i < num_sockets; i++) {
396 		if (socket_ids[i] == socket_id)
397 			return 0;
398 	}
399 	return 1;
400 }
401 
402 /*
403  * Setup default configuration.
404  */
405 static void
406 set_default_fwd_lcores_config(void)
407 {
408 	unsigned int i;
409 	unsigned int nb_lc;
410 	unsigned int sock_num;
411 
412 	nb_lc = 0;
413 	for (i = 0; i < RTE_MAX_LCORE; i++) {
414 		sock_num = rte_lcore_to_socket_id(i);
415 		if (new_socket_id(sock_num)) {
416 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
417 				rte_exit(EXIT_FAILURE,
418 					 "Total sockets greater than %u\n",
419 					 RTE_MAX_NUMA_NODES);
420 			}
421 			socket_ids[num_sockets++] = sock_num;
422 		}
423 		if (!rte_lcore_is_enabled(i))
424 			continue;
425 		if (i == rte_get_master_lcore())
426 			continue;
427 		fwd_lcores_cpuids[nb_lc++] = i;
428 	}
429 	nb_lcores = (lcoreid_t) nb_lc;
430 	nb_cfg_lcores = nb_lcores;
431 	nb_fwd_lcores = 1;
432 }
433 
434 static void
435 set_def_peer_eth_addrs(void)
436 {
437 	portid_t i;
438 
439 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
440 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
441 		peer_eth_addrs[i].addr_bytes[5] = i;
442 	}
443 }
444 
445 static void
446 set_default_fwd_ports_config(void)
447 {
448 	portid_t pt_id;
449 	int i = 0;
450 
451 	RTE_ETH_FOREACH_DEV(pt_id)
452 		fwd_ports_ids[i++] = pt_id;
453 
454 	nb_cfg_ports = nb_ports;
455 	nb_fwd_ports = nb_ports;
456 }
457 
458 void
459 set_def_fwd_config(void)
460 {
461 	set_default_fwd_lcores_config();
462 	set_def_peer_eth_addrs();
463 	set_default_fwd_ports_config();
464 }
465 
466 /*
467  * Configuration initialisation done once at init time.
468  */
469 static void
470 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
471 		 unsigned int socket_id)
472 {
473 	char pool_name[RTE_MEMPOOL_NAMESIZE];
474 	struct rte_mempool *rte_mp = NULL;
475 	uint32_t mb_size;
476 
477 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
478 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
479 
480 	TESTPMD_LOG(INFO,
481 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
482 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
483 
484 	if (mp_anon != 0) {
485 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
486 			mb_size, (unsigned) mb_mempool_cache,
487 			sizeof(struct rte_pktmbuf_pool_private),
488 			socket_id, 0);
489 		if (rte_mp == NULL)
490 			goto err;
491 
492 		if (rte_mempool_populate_anon(rte_mp) == 0) {
493 			rte_mempool_free(rte_mp);
494 			rte_mp = NULL;
495 			goto err;
496 		}
497 		rte_pktmbuf_pool_init(rte_mp, NULL);
498 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
499 	} else {
500 		/* wrapper to rte_mempool_create() */
501 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
502 				rte_mbuf_best_mempool_ops());
503 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
504 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
505 	}
506 
507 err:
508 	if (rte_mp == NULL) {
509 		rte_exit(EXIT_FAILURE,
510 			"Creation of mbuf pool for socket %u failed: %s\n",
511 			socket_id, rte_strerror(rte_errno));
512 	} else if (verbose_level > 0) {
513 		rte_mempool_dump(stdout, rte_mp);
514 	}
515 }
516 
517 /*
518  * Check given socket id is valid or not with NUMA mode,
519  * if valid, return 0, else return -1
520  */
521 static int
522 check_socket_id(const unsigned int socket_id)
523 {
524 	static int warning_once = 0;
525 
526 	if (new_socket_id(socket_id)) {
527 		if (!warning_once && numa_support)
528 			printf("Warning: NUMA should be configured manually by"
529 			       " using --port-numa-config and"
530 			       " --ring-numa-config parameters along with"
531 			       " --numa.\n");
532 		warning_once = 1;
533 		return -1;
534 	}
535 	return 0;
536 }
537 
538 /*
539  * Get the allowed maximum number of RX queues.
540  * *pid return the port id which has minimal value of
541  * max_rx_queues in all ports.
542  */
543 queueid_t
544 get_allowed_max_nb_rxq(portid_t *pid)
545 {
546 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
547 	portid_t pi;
548 	struct rte_eth_dev_info dev_info;
549 
550 	RTE_ETH_FOREACH_DEV(pi) {
551 		rte_eth_dev_info_get(pi, &dev_info);
552 		if (dev_info.max_rx_queues < allowed_max_rxq) {
553 			allowed_max_rxq = dev_info.max_rx_queues;
554 			*pid = pi;
555 		}
556 	}
557 	return allowed_max_rxq;
558 }
559 
560 /*
561  * Check input rxq is valid or not.
562  * If input rxq is not greater than any of maximum number
563  * of RX queues of all ports, it is valid.
564  * if valid, return 0, else return -1
565  */
566 int
567 check_nb_rxq(queueid_t rxq)
568 {
569 	queueid_t allowed_max_rxq;
570 	portid_t pid = 0;
571 
572 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
573 	if (rxq > allowed_max_rxq) {
574 		printf("Fail: input rxq (%u) can't be greater "
575 		       "than max_rx_queues (%u) of port %u\n",
576 		       rxq,
577 		       allowed_max_rxq,
578 		       pid);
579 		return -1;
580 	}
581 	return 0;
582 }
583 
584 /*
585  * Get the allowed maximum number of TX queues.
586  * *pid return the port id which has minimal value of
587  * max_tx_queues in all ports.
588  */
589 queueid_t
590 get_allowed_max_nb_txq(portid_t *pid)
591 {
592 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
593 	portid_t pi;
594 	struct rte_eth_dev_info dev_info;
595 
596 	RTE_ETH_FOREACH_DEV(pi) {
597 		rte_eth_dev_info_get(pi, &dev_info);
598 		if (dev_info.max_tx_queues < allowed_max_txq) {
599 			allowed_max_txq = dev_info.max_tx_queues;
600 			*pid = pi;
601 		}
602 	}
603 	return allowed_max_txq;
604 }
605 
606 /*
607  * Check input txq is valid or not.
608  * If input txq is not greater than any of maximum number
609  * of TX queues of all ports, it is valid.
610  * if valid, return 0, else return -1
611  */
612 int
613 check_nb_txq(queueid_t txq)
614 {
615 	queueid_t allowed_max_txq;
616 	portid_t pid = 0;
617 
618 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
619 	if (txq > allowed_max_txq) {
620 		printf("Fail: input txq (%u) can't be greater "
621 		       "than max_tx_queues (%u) of port %u\n",
622 		       txq,
623 		       allowed_max_txq,
624 		       pid);
625 		return -1;
626 	}
627 	return 0;
628 }
629 
630 static void
631 init_config(void)
632 {
633 	portid_t pid;
634 	struct rte_port *port;
635 	struct rte_mempool *mbp;
636 	unsigned int nb_mbuf_per_pool;
637 	lcoreid_t  lc_id;
638 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
639 	struct rte_gro_param gro_param;
640 	uint32_t gso_types;
641 
642 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
643 
644 	if (numa_support) {
645 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
646 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
647 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
648 	}
649 
650 	/* Configuration of logical cores. */
651 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
652 				sizeof(struct fwd_lcore *) * nb_lcores,
653 				RTE_CACHE_LINE_SIZE);
654 	if (fwd_lcores == NULL) {
655 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
656 							"failed\n", nb_lcores);
657 	}
658 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
659 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
660 					       sizeof(struct fwd_lcore),
661 					       RTE_CACHE_LINE_SIZE);
662 		if (fwd_lcores[lc_id] == NULL) {
663 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
664 								"failed\n");
665 		}
666 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
667 	}
668 
669 	RTE_ETH_FOREACH_DEV(pid) {
670 		port = &ports[pid];
671 		/* Apply default TxRx configuration for all ports */
672 		port->dev_conf.txmode = tx_mode;
673 		port->dev_conf.rxmode = rx_mode;
674 		rte_eth_dev_info_get(pid, &port->dev_info);
675 		if (!(port->dev_info.tx_offload_capa &
676 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
677 			port->dev_conf.txmode.offloads &=
678 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
679 		if (!(port->dev_info.rx_offload_capa &
680 		      DEV_RX_OFFLOAD_CRC_STRIP))
681 			port->dev_conf.rxmode.offloads &=
682 				~DEV_RX_OFFLOAD_CRC_STRIP;
683 		if (numa_support) {
684 			if (port_numa[pid] != NUMA_NO_CONFIG)
685 				port_per_socket[port_numa[pid]]++;
686 			else {
687 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
688 
689 				/* if socket_id is invalid, set to 0 */
690 				if (check_socket_id(socket_id) < 0)
691 					socket_id = 0;
692 				port_per_socket[socket_id]++;
693 			}
694 		}
695 
696 		/* set flag to initialize port/queue */
697 		port->need_reconfig = 1;
698 		port->need_reconfig_queues = 1;
699 	}
700 
701 	/*
702 	 * Create pools of mbuf.
703 	 * If NUMA support is disabled, create a single pool of mbuf in
704 	 * socket 0 memory by default.
705 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
706 	 *
707 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
708 	 * nb_txd can be configured at run time.
709 	 */
710 	if (param_total_num_mbufs)
711 		nb_mbuf_per_pool = param_total_num_mbufs;
712 	else {
713 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
714 			(nb_lcores * mb_mempool_cache) +
715 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
716 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
717 	}
718 
719 	if (numa_support) {
720 		uint8_t i;
721 
722 		for (i = 0; i < num_sockets; i++)
723 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
724 					 socket_ids[i]);
725 	} else {
726 		if (socket_num == UMA_NO_CONFIG)
727 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
728 		else
729 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
730 						 socket_num);
731 	}
732 
733 	init_port_config();
734 
735 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
736 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
737 	/*
738 	 * Records which Mbuf pool to use by each logical core, if needed.
739 	 */
740 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
741 		mbp = mbuf_pool_find(
742 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
743 
744 		if (mbp == NULL)
745 			mbp = mbuf_pool_find(0);
746 		fwd_lcores[lc_id]->mbp = mbp;
747 		/* initialize GSO context */
748 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
749 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
750 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
751 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
752 			ETHER_CRC_LEN;
753 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
754 	}
755 
756 	/* Configuration of packet forwarding streams. */
757 	if (init_fwd_streams() < 0)
758 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
759 
760 	fwd_config_setup();
761 
762 	/* create a gro context for each lcore */
763 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
764 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
765 	gro_param.max_item_per_flow = MAX_PKT_BURST;
766 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
767 		gro_param.socket_id = rte_lcore_to_socket_id(
768 				fwd_lcores_cpuids[lc_id]);
769 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
770 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
771 			rte_exit(EXIT_FAILURE,
772 					"rte_gro_ctx_create() failed\n");
773 		}
774 	}
775 }
776 
777 
778 void
779 reconfig(portid_t new_port_id, unsigned socket_id)
780 {
781 	struct rte_port *port;
782 
783 	/* Reconfiguration of Ethernet ports. */
784 	port = &ports[new_port_id];
785 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
786 
787 	/* set flag to initialize port/queue */
788 	port->need_reconfig = 1;
789 	port->need_reconfig_queues = 1;
790 	port->socket_id = socket_id;
791 
792 	init_port_config();
793 }
794 
795 
796 int
797 init_fwd_streams(void)
798 {
799 	portid_t pid;
800 	struct rte_port *port;
801 	streamid_t sm_id, nb_fwd_streams_new;
802 	queueid_t q;
803 
804 	/* set socket id according to numa or not */
805 	RTE_ETH_FOREACH_DEV(pid) {
806 		port = &ports[pid];
807 		if (nb_rxq > port->dev_info.max_rx_queues) {
808 			printf("Fail: nb_rxq(%d) is greater than "
809 				"max_rx_queues(%d)\n", nb_rxq,
810 				port->dev_info.max_rx_queues);
811 			return -1;
812 		}
813 		if (nb_txq > port->dev_info.max_tx_queues) {
814 			printf("Fail: nb_txq(%d) is greater than "
815 				"max_tx_queues(%d)\n", nb_txq,
816 				port->dev_info.max_tx_queues);
817 			return -1;
818 		}
819 		if (numa_support) {
820 			if (port_numa[pid] != NUMA_NO_CONFIG)
821 				port->socket_id = port_numa[pid];
822 			else {
823 				port->socket_id = rte_eth_dev_socket_id(pid);
824 
825 				/* if socket_id is invalid, set to 0 */
826 				if (check_socket_id(port->socket_id) < 0)
827 					port->socket_id = 0;
828 			}
829 		}
830 		else {
831 			if (socket_num == UMA_NO_CONFIG)
832 				port->socket_id = 0;
833 			else
834 				port->socket_id = socket_num;
835 		}
836 	}
837 
838 	q = RTE_MAX(nb_rxq, nb_txq);
839 	if (q == 0) {
840 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
841 		return -1;
842 	}
843 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
844 	if (nb_fwd_streams_new == nb_fwd_streams)
845 		return 0;
846 	/* clear the old */
847 	if (fwd_streams != NULL) {
848 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
849 			if (fwd_streams[sm_id] == NULL)
850 				continue;
851 			rte_free(fwd_streams[sm_id]);
852 			fwd_streams[sm_id] = NULL;
853 		}
854 		rte_free(fwd_streams);
855 		fwd_streams = NULL;
856 	}
857 
858 	/* init new */
859 	nb_fwd_streams = nb_fwd_streams_new;
860 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
861 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
862 	if (fwd_streams == NULL)
863 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
864 						"failed\n", nb_fwd_streams);
865 
866 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
867 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
868 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
869 		if (fwd_streams[sm_id] == NULL)
870 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
871 								" failed\n");
872 	}
873 
874 	return 0;
875 }
876 
877 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
878 static void
879 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
880 {
881 	unsigned int total_burst;
882 	unsigned int nb_burst;
883 	unsigned int burst_stats[3];
884 	uint16_t pktnb_stats[3];
885 	uint16_t nb_pkt;
886 	int burst_percent[3];
887 
888 	/*
889 	 * First compute the total number of packet bursts and the
890 	 * two highest numbers of bursts of the same number of packets.
891 	 */
892 	total_burst = 0;
893 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
894 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
895 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
896 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
897 		if (nb_burst == 0)
898 			continue;
899 		total_burst += nb_burst;
900 		if (nb_burst > burst_stats[0]) {
901 			burst_stats[1] = burst_stats[0];
902 			pktnb_stats[1] = pktnb_stats[0];
903 			burst_stats[0] = nb_burst;
904 			pktnb_stats[0] = nb_pkt;
905 		}
906 	}
907 	if (total_burst == 0)
908 		return;
909 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
910 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
911 	       burst_percent[0], (int) pktnb_stats[0]);
912 	if (burst_stats[0] == total_burst) {
913 		printf("]\n");
914 		return;
915 	}
916 	if (burst_stats[0] + burst_stats[1] == total_burst) {
917 		printf(" + %d%% of %d pkts]\n",
918 		       100 - burst_percent[0], pktnb_stats[1]);
919 		return;
920 	}
921 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
922 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
923 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
924 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
925 		return;
926 	}
927 	printf(" + %d%% of %d pkts + %d%% of others]\n",
928 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
929 }
930 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
931 
932 static void
933 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
934 {
935 	struct rte_port *port;
936 	uint8_t i;
937 
938 	static const char *fwd_stats_border = "----------------------";
939 
940 	port = &ports[port_id];
941 	printf("\n  %s Forward statistics for port %-2d %s\n",
942 	       fwd_stats_border, port_id, fwd_stats_border);
943 
944 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
945 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
946 		       "%-"PRIu64"\n",
947 		       stats->ipackets, stats->imissed,
948 		       (uint64_t) (stats->ipackets + stats->imissed));
949 
950 		if (cur_fwd_eng == &csum_fwd_engine)
951 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
952 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
953 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
954 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
955 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
956 		}
957 
958 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
959 		       "%-"PRIu64"\n",
960 		       stats->opackets, port->tx_dropped,
961 		       (uint64_t) (stats->opackets + port->tx_dropped));
962 	}
963 	else {
964 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
965 		       "%14"PRIu64"\n",
966 		       stats->ipackets, stats->imissed,
967 		       (uint64_t) (stats->ipackets + stats->imissed));
968 
969 		if (cur_fwd_eng == &csum_fwd_engine)
970 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
971 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
972 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
973 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
974 			printf("  RX-nombufs:             %14"PRIu64"\n",
975 			       stats->rx_nombuf);
976 		}
977 
978 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
979 		       "%14"PRIu64"\n",
980 		       stats->opackets, port->tx_dropped,
981 		       (uint64_t) (stats->opackets + port->tx_dropped));
982 	}
983 
984 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
985 	if (port->rx_stream)
986 		pkt_burst_stats_display("RX",
987 			&port->rx_stream->rx_burst_stats);
988 	if (port->tx_stream)
989 		pkt_burst_stats_display("TX",
990 			&port->tx_stream->tx_burst_stats);
991 #endif
992 
993 	if (port->rx_queue_stats_mapping_enabled) {
994 		printf("\n");
995 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
996 			printf("  Stats reg %2d RX-packets:%14"PRIu64
997 			       "     RX-errors:%14"PRIu64
998 			       "    RX-bytes:%14"PRIu64"\n",
999 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1000 		}
1001 		printf("\n");
1002 	}
1003 	if (port->tx_queue_stats_mapping_enabled) {
1004 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1005 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1006 			       "                                 TX-bytes:%14"PRIu64"\n",
1007 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1008 		}
1009 	}
1010 
1011 	printf("  %s--------------------------------%s\n",
1012 	       fwd_stats_border, fwd_stats_border);
1013 }
1014 
1015 static void
1016 fwd_stream_stats_display(streamid_t stream_id)
1017 {
1018 	struct fwd_stream *fs;
1019 	static const char *fwd_top_stats_border = "-------";
1020 
1021 	fs = fwd_streams[stream_id];
1022 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1023 	    (fs->fwd_dropped == 0))
1024 		return;
1025 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1026 	       "TX Port=%2d/Queue=%2d %s\n",
1027 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1028 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1029 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1030 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1031 
1032 	/* if checksum mode */
1033 	if (cur_fwd_eng == &csum_fwd_engine) {
1034 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1035 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1036 	}
1037 
1038 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1039 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1040 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1041 #endif
1042 }
1043 
1044 static void
1045 flush_fwd_rx_queues(void)
1046 {
1047 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1048 	portid_t  rxp;
1049 	portid_t port_id;
1050 	queueid_t rxq;
1051 	uint16_t  nb_rx;
1052 	uint16_t  i;
1053 	uint8_t   j;
1054 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1055 	uint64_t timer_period;
1056 
1057 	/* convert to number of cycles */
1058 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1059 
1060 	for (j = 0; j < 2; j++) {
1061 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1062 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1063 				port_id = fwd_ports_ids[rxp];
1064 				/**
1065 				* testpmd can stuck in the below do while loop
1066 				* if rte_eth_rx_burst() always returns nonzero
1067 				* packets. So timer is added to exit this loop
1068 				* after 1sec timer expiry.
1069 				*/
1070 				prev_tsc = rte_rdtsc();
1071 				do {
1072 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1073 						pkts_burst, MAX_PKT_BURST);
1074 					for (i = 0; i < nb_rx; i++)
1075 						rte_pktmbuf_free(pkts_burst[i]);
1076 
1077 					cur_tsc = rte_rdtsc();
1078 					diff_tsc = cur_tsc - prev_tsc;
1079 					timer_tsc += diff_tsc;
1080 				} while ((nb_rx > 0) &&
1081 					(timer_tsc < timer_period));
1082 				timer_tsc = 0;
1083 			}
1084 		}
1085 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1086 	}
1087 }
1088 
1089 static void
1090 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1091 {
1092 	struct fwd_stream **fsm;
1093 	streamid_t nb_fs;
1094 	streamid_t sm_id;
1095 #ifdef RTE_LIBRTE_BITRATE
1096 	uint64_t tics_per_1sec;
1097 	uint64_t tics_datum;
1098 	uint64_t tics_current;
1099 	uint8_t idx_port, cnt_ports;
1100 
1101 	cnt_ports = rte_eth_dev_count();
1102 	tics_datum = rte_rdtsc();
1103 	tics_per_1sec = rte_get_timer_hz();
1104 #endif
1105 	fsm = &fwd_streams[fc->stream_idx];
1106 	nb_fs = fc->stream_nb;
1107 	do {
1108 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1109 			(*pkt_fwd)(fsm[sm_id]);
1110 #ifdef RTE_LIBRTE_BITRATE
1111 		if (bitrate_enabled != 0 &&
1112 				bitrate_lcore_id == rte_lcore_id()) {
1113 			tics_current = rte_rdtsc();
1114 			if (tics_current - tics_datum >= tics_per_1sec) {
1115 				/* Periodic bitrate calculation */
1116 				for (idx_port = 0;
1117 						idx_port < cnt_ports;
1118 						idx_port++)
1119 					rte_stats_bitrate_calc(bitrate_data,
1120 						idx_port);
1121 				tics_datum = tics_current;
1122 			}
1123 		}
1124 #endif
1125 #ifdef RTE_LIBRTE_LATENCY_STATS
1126 		if (latencystats_enabled != 0 &&
1127 				latencystats_lcore_id == rte_lcore_id())
1128 			rte_latencystats_update();
1129 #endif
1130 
1131 	} while (! fc->stopped);
1132 }
1133 
1134 static int
1135 start_pkt_forward_on_core(void *fwd_arg)
1136 {
1137 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1138 			     cur_fwd_config.fwd_eng->packet_fwd);
1139 	return 0;
1140 }
1141 
1142 /*
1143  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1144  * Used to start communication flows in network loopback test configurations.
1145  */
1146 static int
1147 run_one_txonly_burst_on_core(void *fwd_arg)
1148 {
1149 	struct fwd_lcore *fwd_lc;
1150 	struct fwd_lcore tmp_lcore;
1151 
1152 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1153 	tmp_lcore = *fwd_lc;
1154 	tmp_lcore.stopped = 1;
1155 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1156 	return 0;
1157 }
1158 
1159 /*
1160  * Launch packet forwarding:
1161  *     - Setup per-port forwarding context.
1162  *     - launch logical cores with their forwarding configuration.
1163  */
1164 static void
1165 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1166 {
1167 	port_fwd_begin_t port_fwd_begin;
1168 	unsigned int i;
1169 	unsigned int lc_id;
1170 	int diag;
1171 
1172 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1173 	if (port_fwd_begin != NULL) {
1174 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1175 			(*port_fwd_begin)(fwd_ports_ids[i]);
1176 	}
1177 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1178 		lc_id = fwd_lcores_cpuids[i];
1179 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1180 			fwd_lcores[i]->stopped = 0;
1181 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1182 						     fwd_lcores[i], lc_id);
1183 			if (diag != 0)
1184 				printf("launch lcore %u failed - diag=%d\n",
1185 				       lc_id, diag);
1186 		}
1187 	}
1188 }
1189 
1190 /*
1191  * Launch packet forwarding configuration.
1192  */
1193 void
1194 start_packet_forwarding(int with_tx_first)
1195 {
1196 	port_fwd_begin_t port_fwd_begin;
1197 	port_fwd_end_t  port_fwd_end;
1198 	struct rte_port *port;
1199 	unsigned int i;
1200 	portid_t   pt_id;
1201 	streamid_t sm_id;
1202 
1203 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1204 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1205 
1206 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1207 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1208 
1209 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1210 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1211 		(!nb_rxq || !nb_txq))
1212 		rte_exit(EXIT_FAILURE,
1213 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1214 			cur_fwd_eng->fwd_mode_name);
1215 
1216 	if (all_ports_started() == 0) {
1217 		printf("Not all ports were started\n");
1218 		return;
1219 	}
1220 	if (test_done == 0) {
1221 		printf("Packet forwarding already started\n");
1222 		return;
1223 	}
1224 
1225 	if (init_fwd_streams() < 0) {
1226 		printf("Fail from init_fwd_streams()\n");
1227 		return;
1228 	}
1229 
1230 	if(dcb_test) {
1231 		for (i = 0; i < nb_fwd_ports; i++) {
1232 			pt_id = fwd_ports_ids[i];
1233 			port = &ports[pt_id];
1234 			if (!port->dcb_flag) {
1235 				printf("In DCB mode, all forwarding ports must "
1236                                        "be configured in this mode.\n");
1237 				return;
1238 			}
1239 		}
1240 		if (nb_fwd_lcores == 1) {
1241 			printf("In DCB mode,the nb forwarding cores "
1242                                "should be larger than 1.\n");
1243 			return;
1244 		}
1245 	}
1246 	test_done = 0;
1247 
1248 	if(!no_flush_rx)
1249 		flush_fwd_rx_queues();
1250 
1251 	fwd_config_setup();
1252 	pkt_fwd_config_display(&cur_fwd_config);
1253 	rxtx_config_display();
1254 
1255 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1256 		pt_id = fwd_ports_ids[i];
1257 		port = &ports[pt_id];
1258 		rte_eth_stats_get(pt_id, &port->stats);
1259 		port->tx_dropped = 0;
1260 
1261 		map_port_queue_stats_mapping_registers(pt_id, port);
1262 	}
1263 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1264 		fwd_streams[sm_id]->rx_packets = 0;
1265 		fwd_streams[sm_id]->tx_packets = 0;
1266 		fwd_streams[sm_id]->fwd_dropped = 0;
1267 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1268 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1269 
1270 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1271 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1272 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1273 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1274 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1275 #endif
1276 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1277 		fwd_streams[sm_id]->core_cycles = 0;
1278 #endif
1279 	}
1280 	if (with_tx_first) {
1281 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1282 		if (port_fwd_begin != NULL) {
1283 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1284 				(*port_fwd_begin)(fwd_ports_ids[i]);
1285 		}
1286 		while (with_tx_first--) {
1287 			launch_packet_forwarding(
1288 					run_one_txonly_burst_on_core);
1289 			rte_eal_mp_wait_lcore();
1290 		}
1291 		port_fwd_end = tx_only_engine.port_fwd_end;
1292 		if (port_fwd_end != NULL) {
1293 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1294 				(*port_fwd_end)(fwd_ports_ids[i]);
1295 		}
1296 	}
1297 	launch_packet_forwarding(start_pkt_forward_on_core);
1298 }
1299 
1300 void
1301 stop_packet_forwarding(void)
1302 {
1303 	struct rte_eth_stats stats;
1304 	struct rte_port *port;
1305 	port_fwd_end_t  port_fwd_end;
1306 	int i;
1307 	portid_t   pt_id;
1308 	streamid_t sm_id;
1309 	lcoreid_t  lc_id;
1310 	uint64_t total_recv;
1311 	uint64_t total_xmit;
1312 	uint64_t total_rx_dropped;
1313 	uint64_t total_tx_dropped;
1314 	uint64_t total_rx_nombuf;
1315 	uint64_t tx_dropped;
1316 	uint64_t rx_bad_ip_csum;
1317 	uint64_t rx_bad_l4_csum;
1318 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1319 	uint64_t fwd_cycles;
1320 #endif
1321 
1322 	static const char *acc_stats_border = "+++++++++++++++";
1323 
1324 	if (test_done) {
1325 		printf("Packet forwarding not started\n");
1326 		return;
1327 	}
1328 	printf("Telling cores to stop...");
1329 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1330 		fwd_lcores[lc_id]->stopped = 1;
1331 	printf("\nWaiting for lcores to finish...\n");
1332 	rte_eal_mp_wait_lcore();
1333 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1334 	if (port_fwd_end != NULL) {
1335 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1336 			pt_id = fwd_ports_ids[i];
1337 			(*port_fwd_end)(pt_id);
1338 		}
1339 	}
1340 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1341 	fwd_cycles = 0;
1342 #endif
1343 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1344 		if (cur_fwd_config.nb_fwd_streams >
1345 		    cur_fwd_config.nb_fwd_ports) {
1346 			fwd_stream_stats_display(sm_id);
1347 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1348 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1349 		} else {
1350 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1351 				fwd_streams[sm_id];
1352 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1353 				fwd_streams[sm_id];
1354 		}
1355 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1356 		tx_dropped = (uint64_t) (tx_dropped +
1357 					 fwd_streams[sm_id]->fwd_dropped);
1358 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1359 
1360 		rx_bad_ip_csum =
1361 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1362 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1363 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1364 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1365 							rx_bad_ip_csum;
1366 
1367 		rx_bad_l4_csum =
1368 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1369 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1370 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1371 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1372 							rx_bad_l4_csum;
1373 
1374 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1375 		fwd_cycles = (uint64_t) (fwd_cycles +
1376 					 fwd_streams[sm_id]->core_cycles);
1377 #endif
1378 	}
1379 	total_recv = 0;
1380 	total_xmit = 0;
1381 	total_rx_dropped = 0;
1382 	total_tx_dropped = 0;
1383 	total_rx_nombuf  = 0;
1384 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1385 		pt_id = fwd_ports_ids[i];
1386 
1387 		port = &ports[pt_id];
1388 		rte_eth_stats_get(pt_id, &stats);
1389 		stats.ipackets -= port->stats.ipackets;
1390 		port->stats.ipackets = 0;
1391 		stats.opackets -= port->stats.opackets;
1392 		port->stats.opackets = 0;
1393 		stats.ibytes   -= port->stats.ibytes;
1394 		port->stats.ibytes = 0;
1395 		stats.obytes   -= port->stats.obytes;
1396 		port->stats.obytes = 0;
1397 		stats.imissed  -= port->stats.imissed;
1398 		port->stats.imissed = 0;
1399 		stats.oerrors  -= port->stats.oerrors;
1400 		port->stats.oerrors = 0;
1401 		stats.rx_nombuf -= port->stats.rx_nombuf;
1402 		port->stats.rx_nombuf = 0;
1403 
1404 		total_recv += stats.ipackets;
1405 		total_xmit += stats.opackets;
1406 		total_rx_dropped += stats.imissed;
1407 		total_tx_dropped += port->tx_dropped;
1408 		total_rx_nombuf  += stats.rx_nombuf;
1409 
1410 		fwd_port_stats_display(pt_id, &stats);
1411 	}
1412 
1413 	printf("\n  %s Accumulated forward statistics for all ports"
1414 	       "%s\n",
1415 	       acc_stats_border, acc_stats_border);
1416 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1417 	       "%-"PRIu64"\n"
1418 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1419 	       "%-"PRIu64"\n",
1420 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1421 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1422 	if (total_rx_nombuf > 0)
1423 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1424 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1425 	       "%s\n",
1426 	       acc_stats_border, acc_stats_border);
1427 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1428 	if (total_recv > 0)
1429 		printf("\n  CPU cycles/packet=%u (total cycles="
1430 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1431 		       (unsigned int)(fwd_cycles / total_recv),
1432 		       fwd_cycles, total_recv);
1433 #endif
1434 	printf("\nDone.\n");
1435 	test_done = 1;
1436 }
1437 
1438 void
1439 dev_set_link_up(portid_t pid)
1440 {
1441 	if (rte_eth_dev_set_link_up(pid) < 0)
1442 		printf("\nSet link up fail.\n");
1443 }
1444 
1445 void
1446 dev_set_link_down(portid_t pid)
1447 {
1448 	if (rte_eth_dev_set_link_down(pid) < 0)
1449 		printf("\nSet link down fail.\n");
1450 }
1451 
1452 static int
1453 all_ports_started(void)
1454 {
1455 	portid_t pi;
1456 	struct rte_port *port;
1457 
1458 	RTE_ETH_FOREACH_DEV(pi) {
1459 		port = &ports[pi];
1460 		/* Check if there is a port which is not started */
1461 		if ((port->port_status != RTE_PORT_STARTED) &&
1462 			(port->slave_flag == 0))
1463 			return 0;
1464 	}
1465 
1466 	/* No port is not started */
1467 	return 1;
1468 }
1469 
1470 int
1471 port_is_stopped(portid_t port_id)
1472 {
1473 	struct rte_port *port = &ports[port_id];
1474 
1475 	if ((port->port_status != RTE_PORT_STOPPED) &&
1476 	    (port->slave_flag == 0))
1477 		return 0;
1478 	return 1;
1479 }
1480 
1481 int
1482 all_ports_stopped(void)
1483 {
1484 	portid_t pi;
1485 
1486 	RTE_ETH_FOREACH_DEV(pi) {
1487 		if (!port_is_stopped(pi))
1488 			return 0;
1489 	}
1490 
1491 	return 1;
1492 }
1493 
1494 int
1495 port_is_started(portid_t port_id)
1496 {
1497 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1498 		return 0;
1499 
1500 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1501 		return 0;
1502 
1503 	return 1;
1504 }
1505 
1506 static int
1507 port_is_closed(portid_t port_id)
1508 {
1509 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1510 		return 0;
1511 
1512 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1513 		return 0;
1514 
1515 	return 1;
1516 }
1517 
1518 int
1519 start_port(portid_t pid)
1520 {
1521 	int diag, need_check_link_status = -1;
1522 	portid_t pi;
1523 	queueid_t qi;
1524 	struct rte_port *port;
1525 	struct ether_addr mac_addr;
1526 	enum rte_eth_event_type event_type;
1527 
1528 	if (port_id_is_invalid(pid, ENABLED_WARN))
1529 		return 0;
1530 
1531 	if(dcb_config)
1532 		dcb_test = 1;
1533 	RTE_ETH_FOREACH_DEV(pi) {
1534 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1535 			continue;
1536 
1537 		need_check_link_status = 0;
1538 		port = &ports[pi];
1539 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1540 						 RTE_PORT_HANDLING) == 0) {
1541 			printf("Port %d is now not stopped\n", pi);
1542 			continue;
1543 		}
1544 
1545 		if (port->need_reconfig > 0) {
1546 			port->need_reconfig = 0;
1547 
1548 			if (flow_isolate_all) {
1549 				int ret = port_flow_isolate(pi, 1);
1550 				if (ret) {
1551 					printf("Failed to apply isolated"
1552 					       " mode on port %d\n", pi);
1553 					return -1;
1554 				}
1555 			}
1556 
1557 			printf("Configuring Port %d (socket %u)\n", pi,
1558 					port->socket_id);
1559 			/* configure port */
1560 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1561 						&(port->dev_conf));
1562 			if (diag != 0) {
1563 				if (rte_atomic16_cmpset(&(port->port_status),
1564 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1565 					printf("Port %d can not be set back "
1566 							"to stopped\n", pi);
1567 				printf("Fail to configure port %d\n", pi);
1568 				/* try to reconfigure port next time */
1569 				port->need_reconfig = 1;
1570 				return -1;
1571 			}
1572 		}
1573 		if (port->need_reconfig_queues > 0) {
1574 			port->need_reconfig_queues = 0;
1575 			port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1576 			/* Apply Tx offloads configuration */
1577 			port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1578 			/* setup tx queues */
1579 			for (qi = 0; qi < nb_txq; qi++) {
1580 				if ((numa_support) &&
1581 					(txring_numa[pi] != NUMA_NO_CONFIG))
1582 					diag = rte_eth_tx_queue_setup(pi, qi,
1583 						nb_txd,txring_numa[pi],
1584 						&(port->tx_conf));
1585 				else
1586 					diag = rte_eth_tx_queue_setup(pi, qi,
1587 						nb_txd,port->socket_id,
1588 						&(port->tx_conf));
1589 
1590 				if (diag == 0)
1591 					continue;
1592 
1593 				/* Fail to setup tx queue, return */
1594 				if (rte_atomic16_cmpset(&(port->port_status),
1595 							RTE_PORT_HANDLING,
1596 							RTE_PORT_STOPPED) == 0)
1597 					printf("Port %d can not be set back "
1598 							"to stopped\n", pi);
1599 				printf("Fail to configure port %d tx queues\n", pi);
1600 				/* try to reconfigure queues next time */
1601 				port->need_reconfig_queues = 1;
1602 				return -1;
1603 			}
1604 			/* Apply Rx offloads configuration */
1605 			port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1606 			/* setup rx queues */
1607 			for (qi = 0; qi < nb_rxq; qi++) {
1608 				if ((numa_support) &&
1609 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1610 					struct rte_mempool * mp =
1611 						mbuf_pool_find(rxring_numa[pi]);
1612 					if (mp == NULL) {
1613 						printf("Failed to setup RX queue:"
1614 							"No mempool allocation"
1615 							" on the socket %d\n",
1616 							rxring_numa[pi]);
1617 						return -1;
1618 					}
1619 
1620 					diag = rte_eth_rx_queue_setup(pi, qi,
1621 					     nb_rxd,rxring_numa[pi],
1622 					     &(port->rx_conf),mp);
1623 				} else {
1624 					struct rte_mempool *mp =
1625 						mbuf_pool_find(port->socket_id);
1626 					if (mp == NULL) {
1627 						printf("Failed to setup RX queue:"
1628 							"No mempool allocation"
1629 							" on the socket %d\n",
1630 							port->socket_id);
1631 						return -1;
1632 					}
1633 					diag = rte_eth_rx_queue_setup(pi, qi,
1634 					     nb_rxd,port->socket_id,
1635 					     &(port->rx_conf), mp);
1636 				}
1637 				if (diag == 0)
1638 					continue;
1639 
1640 				/* Fail to setup rx queue, return */
1641 				if (rte_atomic16_cmpset(&(port->port_status),
1642 							RTE_PORT_HANDLING,
1643 							RTE_PORT_STOPPED) == 0)
1644 					printf("Port %d can not be set back "
1645 							"to stopped\n", pi);
1646 				printf("Fail to configure port %d rx queues\n", pi);
1647 				/* try to reconfigure queues next time */
1648 				port->need_reconfig_queues = 1;
1649 				return -1;
1650 			}
1651 		}
1652 
1653 		/* start port */
1654 		if (rte_eth_dev_start(pi) < 0) {
1655 			printf("Fail to start port %d\n", pi);
1656 
1657 			/* Fail to setup rx queue, return */
1658 			if (rte_atomic16_cmpset(&(port->port_status),
1659 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1660 				printf("Port %d can not be set back to "
1661 							"stopped\n", pi);
1662 			continue;
1663 		}
1664 
1665 		if (rte_atomic16_cmpset(&(port->port_status),
1666 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1667 			printf("Port %d can not be set into started\n", pi);
1668 
1669 		rte_eth_macaddr_get(pi, &mac_addr);
1670 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1671 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1672 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1673 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1674 
1675 		/* at least one port started, need checking link status */
1676 		need_check_link_status = 1;
1677 	}
1678 
1679 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1680 	     event_type < RTE_ETH_EVENT_MAX;
1681 	     event_type++) {
1682 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1683 						event_type,
1684 						eth_event_callback,
1685 						NULL);
1686 		if (diag) {
1687 			printf("Failed to setup even callback for event %d\n",
1688 				event_type);
1689 			return -1;
1690 		}
1691 	}
1692 
1693 	if (need_check_link_status == 1 && !no_link_check)
1694 		check_all_ports_link_status(RTE_PORT_ALL);
1695 	else if (need_check_link_status == 0)
1696 		printf("Please stop the ports first\n");
1697 
1698 	printf("Done\n");
1699 	return 0;
1700 }
1701 
1702 void
1703 stop_port(portid_t pid)
1704 {
1705 	portid_t pi;
1706 	struct rte_port *port;
1707 	int need_check_link_status = 0;
1708 
1709 	if (dcb_test) {
1710 		dcb_test = 0;
1711 		dcb_config = 0;
1712 	}
1713 
1714 	if (port_id_is_invalid(pid, ENABLED_WARN))
1715 		return;
1716 
1717 	printf("Stopping ports...\n");
1718 
1719 	RTE_ETH_FOREACH_DEV(pi) {
1720 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1721 			continue;
1722 
1723 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1724 			printf("Please remove port %d from forwarding configuration.\n", pi);
1725 			continue;
1726 		}
1727 
1728 		if (port_is_bonding_slave(pi)) {
1729 			printf("Please remove port %d from bonded device.\n", pi);
1730 			continue;
1731 		}
1732 
1733 		port = &ports[pi];
1734 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1735 						RTE_PORT_HANDLING) == 0)
1736 			continue;
1737 
1738 		rte_eth_dev_stop(pi);
1739 
1740 		if (rte_atomic16_cmpset(&(port->port_status),
1741 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1742 			printf("Port %d can not be set into stopped\n", pi);
1743 		need_check_link_status = 1;
1744 	}
1745 	if (need_check_link_status && !no_link_check)
1746 		check_all_ports_link_status(RTE_PORT_ALL);
1747 
1748 	printf("Done\n");
1749 }
1750 
1751 void
1752 close_port(portid_t pid)
1753 {
1754 	portid_t pi;
1755 	struct rte_port *port;
1756 
1757 	if (port_id_is_invalid(pid, ENABLED_WARN))
1758 		return;
1759 
1760 	printf("Closing ports...\n");
1761 
1762 	RTE_ETH_FOREACH_DEV(pi) {
1763 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1764 			continue;
1765 
1766 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1767 			printf("Please remove port %d from forwarding configuration.\n", pi);
1768 			continue;
1769 		}
1770 
1771 		if (port_is_bonding_slave(pi)) {
1772 			printf("Please remove port %d from bonded device.\n", pi);
1773 			continue;
1774 		}
1775 
1776 		port = &ports[pi];
1777 		if (rte_atomic16_cmpset(&(port->port_status),
1778 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1779 			printf("Port %d is already closed\n", pi);
1780 			continue;
1781 		}
1782 
1783 		if (rte_atomic16_cmpset(&(port->port_status),
1784 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1785 			printf("Port %d is now not stopped\n", pi);
1786 			continue;
1787 		}
1788 
1789 		if (port->flow_list)
1790 			port_flow_flush(pi);
1791 		rte_eth_dev_close(pi);
1792 
1793 		if (rte_atomic16_cmpset(&(port->port_status),
1794 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1795 			printf("Port %d cannot be set to closed\n", pi);
1796 	}
1797 
1798 	printf("Done\n");
1799 }
1800 
1801 void
1802 reset_port(portid_t pid)
1803 {
1804 	int diag;
1805 	portid_t pi;
1806 	struct rte_port *port;
1807 
1808 	if (port_id_is_invalid(pid, ENABLED_WARN))
1809 		return;
1810 
1811 	printf("Resetting ports...\n");
1812 
1813 	RTE_ETH_FOREACH_DEV(pi) {
1814 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1815 			continue;
1816 
1817 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1818 			printf("Please remove port %d from forwarding "
1819 			       "configuration.\n", pi);
1820 			continue;
1821 		}
1822 
1823 		if (port_is_bonding_slave(pi)) {
1824 			printf("Please remove port %d from bonded device.\n",
1825 			       pi);
1826 			continue;
1827 		}
1828 
1829 		diag = rte_eth_dev_reset(pi);
1830 		if (diag == 0) {
1831 			port = &ports[pi];
1832 			port->need_reconfig = 1;
1833 			port->need_reconfig_queues = 1;
1834 		} else {
1835 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1836 		}
1837 	}
1838 
1839 	printf("Done\n");
1840 }
1841 
1842 void
1843 attach_port(char *identifier)
1844 {
1845 	portid_t pi = 0;
1846 	unsigned int socket_id;
1847 
1848 	printf("Attaching a new port...\n");
1849 
1850 	if (identifier == NULL) {
1851 		printf("Invalid parameters are specified\n");
1852 		return;
1853 	}
1854 
1855 	if (rte_eth_dev_attach(identifier, &pi))
1856 		return;
1857 
1858 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1859 	/* if socket_id is invalid, set to 0 */
1860 	if (check_socket_id(socket_id) < 0)
1861 		socket_id = 0;
1862 	reconfig(pi, socket_id);
1863 	rte_eth_promiscuous_enable(pi);
1864 
1865 	nb_ports = rte_eth_dev_count();
1866 
1867 	ports[pi].port_status = RTE_PORT_STOPPED;
1868 
1869 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1870 	printf("Done\n");
1871 }
1872 
1873 void
1874 detach_port(portid_t port_id)
1875 {
1876 	char name[RTE_ETH_NAME_MAX_LEN];
1877 
1878 	printf("Detaching a port...\n");
1879 
1880 	if (!port_is_closed(port_id)) {
1881 		printf("Please close port first\n");
1882 		return;
1883 	}
1884 
1885 	if (ports[port_id].flow_list)
1886 		port_flow_flush(port_id);
1887 
1888 	if (rte_eth_dev_detach(port_id, name)) {
1889 		TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1890 		return;
1891 	}
1892 
1893 	nb_ports = rte_eth_dev_count();
1894 
1895 	printf("Port '%s' is detached. Now total ports is %d\n",
1896 			name, nb_ports);
1897 	printf("Done\n");
1898 	return;
1899 }
1900 
1901 void
1902 pmd_test_exit(void)
1903 {
1904 	portid_t pt_id;
1905 
1906 	if (test_done == 0)
1907 		stop_packet_forwarding();
1908 
1909 	if (ports != NULL) {
1910 		no_link_check = 1;
1911 		RTE_ETH_FOREACH_DEV(pt_id) {
1912 			printf("\nShutting down port %d...\n", pt_id);
1913 			fflush(stdout);
1914 			stop_port(pt_id);
1915 			close_port(pt_id);
1916 		}
1917 	}
1918 	printf("\nBye...\n");
1919 }
1920 
1921 typedef void (*cmd_func_t)(void);
1922 struct pmd_test_command {
1923 	const char *cmd_name;
1924 	cmd_func_t cmd_func;
1925 };
1926 
1927 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1928 
1929 /* Check the link status of all ports in up to 9s, and print them finally */
1930 static void
1931 check_all_ports_link_status(uint32_t port_mask)
1932 {
1933 #define CHECK_INTERVAL 100 /* 100ms */
1934 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1935 	portid_t portid;
1936 	uint8_t count, all_ports_up, print_flag = 0;
1937 	struct rte_eth_link link;
1938 
1939 	printf("Checking link statuses...\n");
1940 	fflush(stdout);
1941 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1942 		all_ports_up = 1;
1943 		RTE_ETH_FOREACH_DEV(portid) {
1944 			if ((port_mask & (1 << portid)) == 0)
1945 				continue;
1946 			memset(&link, 0, sizeof(link));
1947 			rte_eth_link_get_nowait(portid, &link);
1948 			/* print link status if flag set */
1949 			if (print_flag == 1) {
1950 				if (link.link_status)
1951 					printf(
1952 					"Port%d Link Up. speed %u Mbps- %s\n",
1953 					portid, link.link_speed,
1954 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1955 					("full-duplex") : ("half-duplex\n"));
1956 				else
1957 					printf("Port %d Link Down\n", portid);
1958 				continue;
1959 			}
1960 			/* clear all_ports_up flag if any link down */
1961 			if (link.link_status == ETH_LINK_DOWN) {
1962 				all_ports_up = 0;
1963 				break;
1964 			}
1965 		}
1966 		/* after finally printing all link status, get out */
1967 		if (print_flag == 1)
1968 			break;
1969 
1970 		if (all_ports_up == 0) {
1971 			fflush(stdout);
1972 			rte_delay_ms(CHECK_INTERVAL);
1973 		}
1974 
1975 		/* set the print_flag if all ports up or timeout */
1976 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1977 			print_flag = 1;
1978 		}
1979 
1980 		if (lsc_interrupt)
1981 			break;
1982 	}
1983 }
1984 
1985 static void
1986 rmv_event_callback(void *arg)
1987 {
1988 	struct rte_eth_dev *dev;
1989 	portid_t port_id = (intptr_t)arg;
1990 
1991 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1992 	dev = &rte_eth_devices[port_id];
1993 
1994 	stop_port(port_id);
1995 	close_port(port_id);
1996 	printf("removing device %s\n", dev->device->name);
1997 	if (rte_eal_dev_detach(dev->device))
1998 		TESTPMD_LOG(ERR, "Failed to detach device %s\n",
1999 			dev->device->name);
2000 }
2001 
2002 /* This function is used by the interrupt thread */
2003 static int
2004 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2005 		  void *ret_param)
2006 {
2007 	static const char * const event_desc[] = {
2008 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2009 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2010 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2011 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2012 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2013 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2014 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2015 		[RTE_ETH_EVENT_NEW] = "device probed",
2016 		[RTE_ETH_EVENT_DESTROY] = "device released",
2017 		[RTE_ETH_EVENT_MAX] = NULL,
2018 	};
2019 
2020 	RTE_SET_USED(param);
2021 	RTE_SET_USED(ret_param);
2022 
2023 	if (type >= RTE_ETH_EVENT_MAX) {
2024 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2025 			port_id, __func__, type);
2026 		fflush(stderr);
2027 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2028 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2029 			event_desc[type]);
2030 		fflush(stdout);
2031 	}
2032 
2033 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2034 		return 0;
2035 
2036 	switch (type) {
2037 	case RTE_ETH_EVENT_INTR_RMV:
2038 		if (rte_eal_alarm_set(100000,
2039 				rmv_event_callback, (void *)(intptr_t)port_id))
2040 			fprintf(stderr, "Could not set up deferred device removal\n");
2041 		break;
2042 	default:
2043 		break;
2044 	}
2045 	return 0;
2046 }
2047 
2048 static int
2049 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2050 {
2051 	uint16_t i;
2052 	int diag;
2053 	uint8_t mapping_found = 0;
2054 
2055 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2056 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2057 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2058 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2059 					tx_queue_stats_mappings[i].queue_id,
2060 					tx_queue_stats_mappings[i].stats_counter_id);
2061 			if (diag != 0)
2062 				return diag;
2063 			mapping_found = 1;
2064 		}
2065 	}
2066 	if (mapping_found)
2067 		port->tx_queue_stats_mapping_enabled = 1;
2068 	return 0;
2069 }
2070 
2071 static int
2072 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2073 {
2074 	uint16_t i;
2075 	int diag;
2076 	uint8_t mapping_found = 0;
2077 
2078 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2079 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2080 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2081 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2082 					rx_queue_stats_mappings[i].queue_id,
2083 					rx_queue_stats_mappings[i].stats_counter_id);
2084 			if (diag != 0)
2085 				return diag;
2086 			mapping_found = 1;
2087 		}
2088 	}
2089 	if (mapping_found)
2090 		port->rx_queue_stats_mapping_enabled = 1;
2091 	return 0;
2092 }
2093 
2094 static void
2095 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2096 {
2097 	int diag = 0;
2098 
2099 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2100 	if (diag != 0) {
2101 		if (diag == -ENOTSUP) {
2102 			port->tx_queue_stats_mapping_enabled = 0;
2103 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2104 		}
2105 		else
2106 			rte_exit(EXIT_FAILURE,
2107 					"set_tx_queue_stats_mapping_registers "
2108 					"failed for port id=%d diag=%d\n",
2109 					pi, diag);
2110 	}
2111 
2112 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2113 	if (diag != 0) {
2114 		if (diag == -ENOTSUP) {
2115 			port->rx_queue_stats_mapping_enabled = 0;
2116 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2117 		}
2118 		else
2119 			rte_exit(EXIT_FAILURE,
2120 					"set_rx_queue_stats_mapping_registers "
2121 					"failed for port id=%d diag=%d\n",
2122 					pi, diag);
2123 	}
2124 }
2125 
2126 static void
2127 rxtx_port_config(struct rte_port *port)
2128 {
2129 	port->rx_conf = port->dev_info.default_rxconf;
2130 	port->tx_conf = port->dev_info.default_txconf;
2131 
2132 	/* Check if any RX/TX parameters have been passed */
2133 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2134 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2135 
2136 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2137 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2138 
2139 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2140 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2141 
2142 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2143 		port->rx_conf.rx_free_thresh = rx_free_thresh;
2144 
2145 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2146 		port->rx_conf.rx_drop_en = rx_drop_en;
2147 
2148 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2149 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2150 
2151 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2152 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2153 
2154 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2155 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2156 
2157 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2158 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2159 
2160 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2161 		port->tx_conf.tx_free_thresh = tx_free_thresh;
2162 }
2163 
2164 void
2165 init_port_config(void)
2166 {
2167 	portid_t pid;
2168 	struct rte_port *port;
2169 
2170 	RTE_ETH_FOREACH_DEV(pid) {
2171 		port = &ports[pid];
2172 		port->dev_conf.fdir_conf = fdir_conf;
2173 		if (nb_rxq > 1) {
2174 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2175 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2176 		} else {
2177 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2178 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2179 		}
2180 
2181 		if (port->dcb_flag == 0) {
2182 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2183 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2184 			else
2185 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2186 		}
2187 
2188 		rxtx_port_config(port);
2189 
2190 		rte_eth_macaddr_get(pid, &port->eth_addr);
2191 
2192 		map_port_queue_stats_mapping_registers(pid, port);
2193 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2194 		rte_pmd_ixgbe_bypass_init(pid);
2195 #endif
2196 
2197 		if (lsc_interrupt &&
2198 		    (rte_eth_devices[pid].data->dev_flags &
2199 		     RTE_ETH_DEV_INTR_LSC))
2200 			port->dev_conf.intr_conf.lsc = 1;
2201 		if (rmv_interrupt &&
2202 		    (rte_eth_devices[pid].data->dev_flags &
2203 		     RTE_ETH_DEV_INTR_RMV))
2204 			port->dev_conf.intr_conf.rmv = 1;
2205 
2206 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2207 		/* Detect softnic port */
2208 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2209 			port->softnic_enable = 1;
2210 			memset(&port->softport, 0, sizeof(struct softnic_port));
2211 
2212 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2213 				port->softport.tm_flag = 1;
2214 		}
2215 #endif
2216 	}
2217 }
2218 
2219 void set_port_slave_flag(portid_t slave_pid)
2220 {
2221 	struct rte_port *port;
2222 
2223 	port = &ports[slave_pid];
2224 	port->slave_flag = 1;
2225 }
2226 
2227 void clear_port_slave_flag(portid_t slave_pid)
2228 {
2229 	struct rte_port *port;
2230 
2231 	port = &ports[slave_pid];
2232 	port->slave_flag = 0;
2233 }
2234 
2235 uint8_t port_is_bonding_slave(portid_t slave_pid)
2236 {
2237 	struct rte_port *port;
2238 
2239 	port = &ports[slave_pid];
2240 	return port->slave_flag;
2241 }
2242 
2243 const uint16_t vlan_tags[] = {
2244 		0,  1,  2,  3,  4,  5,  6,  7,
2245 		8,  9, 10, 11,  12, 13, 14, 15,
2246 		16, 17, 18, 19, 20, 21, 22, 23,
2247 		24, 25, 26, 27, 28, 29, 30, 31
2248 };
2249 
2250 static  int
2251 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2252 		 enum dcb_mode_enable dcb_mode,
2253 		 enum rte_eth_nb_tcs num_tcs,
2254 		 uint8_t pfc_en)
2255 {
2256 	uint8_t i;
2257 
2258 	/*
2259 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2260 	 * given above, and the number of traffic classes available for use.
2261 	 */
2262 	if (dcb_mode == DCB_VT_ENABLED) {
2263 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2264 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2265 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2266 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2267 
2268 		/* VMDQ+DCB RX and TX configurations */
2269 		vmdq_rx_conf->enable_default_pool = 0;
2270 		vmdq_rx_conf->default_pool = 0;
2271 		vmdq_rx_conf->nb_queue_pools =
2272 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2273 		vmdq_tx_conf->nb_queue_pools =
2274 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2275 
2276 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2277 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2278 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2279 			vmdq_rx_conf->pool_map[i].pools =
2280 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2281 		}
2282 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2283 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2284 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2285 		}
2286 
2287 		/* set DCB mode of RX and TX of multiple queues */
2288 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2289 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2290 	} else {
2291 		struct rte_eth_dcb_rx_conf *rx_conf =
2292 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2293 		struct rte_eth_dcb_tx_conf *tx_conf =
2294 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2295 
2296 		rx_conf->nb_tcs = num_tcs;
2297 		tx_conf->nb_tcs = num_tcs;
2298 
2299 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2300 			rx_conf->dcb_tc[i] = i % num_tcs;
2301 			tx_conf->dcb_tc[i] = i % num_tcs;
2302 		}
2303 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2304 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2305 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2306 	}
2307 
2308 	if (pfc_en)
2309 		eth_conf->dcb_capability_en =
2310 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2311 	else
2312 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2313 
2314 	return 0;
2315 }
2316 
2317 int
2318 init_port_dcb_config(portid_t pid,
2319 		     enum dcb_mode_enable dcb_mode,
2320 		     enum rte_eth_nb_tcs num_tcs,
2321 		     uint8_t pfc_en)
2322 {
2323 	struct rte_eth_conf port_conf;
2324 	struct rte_port *rte_port;
2325 	int retval;
2326 	uint16_t i;
2327 
2328 	rte_port = &ports[pid];
2329 
2330 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2331 	/* Enter DCB configuration status */
2332 	dcb_config = 1;
2333 
2334 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2335 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2336 	if (retval < 0)
2337 		return retval;
2338 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2339 
2340 	/**
2341 	 * Write the configuration into the device.
2342 	 * Set the numbers of RX & TX queues to 0, so
2343 	 * the RX & TX queues will not be setup.
2344 	 */
2345 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2346 
2347 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2348 
2349 	/* If dev_info.vmdq_pool_base is greater than 0,
2350 	 * the queue id of vmdq pools is started after pf queues.
2351 	 */
2352 	if (dcb_mode == DCB_VT_ENABLED &&
2353 	    rte_port->dev_info.vmdq_pool_base > 0) {
2354 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2355 			" for port %d.", pid);
2356 		return -1;
2357 	}
2358 
2359 	/* Assume the ports in testpmd have the same dcb capability
2360 	 * and has the same number of rxq and txq in dcb mode
2361 	 */
2362 	if (dcb_mode == DCB_VT_ENABLED) {
2363 		if (rte_port->dev_info.max_vfs > 0) {
2364 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2365 			nb_txq = rte_port->dev_info.nb_tx_queues;
2366 		} else {
2367 			nb_rxq = rte_port->dev_info.max_rx_queues;
2368 			nb_txq = rte_port->dev_info.max_tx_queues;
2369 		}
2370 	} else {
2371 		/*if vt is disabled, use all pf queues */
2372 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2373 			nb_rxq = rte_port->dev_info.max_rx_queues;
2374 			nb_txq = rte_port->dev_info.max_tx_queues;
2375 		} else {
2376 			nb_rxq = (queueid_t)num_tcs;
2377 			nb_txq = (queueid_t)num_tcs;
2378 
2379 		}
2380 	}
2381 	rx_free_thresh = 64;
2382 
2383 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2384 
2385 	rxtx_port_config(rte_port);
2386 	/* VLAN filter */
2387 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2388 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2389 		rx_vft_set(pid, vlan_tags[i], 1);
2390 
2391 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2392 	map_port_queue_stats_mapping_registers(pid, rte_port);
2393 
2394 	rte_port->dcb_flag = 1;
2395 
2396 	return 0;
2397 }
2398 
2399 static void
2400 init_port(void)
2401 {
2402 	/* Configuration of Ethernet ports. */
2403 	ports = rte_zmalloc("testpmd: ports",
2404 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2405 			    RTE_CACHE_LINE_SIZE);
2406 	if (ports == NULL) {
2407 		rte_exit(EXIT_FAILURE,
2408 				"rte_zmalloc(%d struct rte_port) failed\n",
2409 				RTE_MAX_ETHPORTS);
2410 	}
2411 }
2412 
2413 static void
2414 force_quit(void)
2415 {
2416 	pmd_test_exit();
2417 	prompt_exit();
2418 }
2419 
2420 static void
2421 print_stats(void)
2422 {
2423 	uint8_t i;
2424 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2425 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2426 
2427 	/* Clear screen and move to top left */
2428 	printf("%s%s", clr, top_left);
2429 
2430 	printf("\nPort statistics ====================================");
2431 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2432 		nic_stats_display(fwd_ports_ids[i]);
2433 }
2434 
2435 static void
2436 signal_handler(int signum)
2437 {
2438 	if (signum == SIGINT || signum == SIGTERM) {
2439 		printf("\nSignal %d received, preparing to exit...\n",
2440 				signum);
2441 #ifdef RTE_LIBRTE_PDUMP
2442 		/* uninitialize packet capture framework */
2443 		rte_pdump_uninit();
2444 #endif
2445 #ifdef RTE_LIBRTE_LATENCY_STATS
2446 		rte_latencystats_uninit();
2447 #endif
2448 		force_quit();
2449 		/* Set flag to indicate the force termination. */
2450 		f_quit = 1;
2451 		/* exit with the expected status */
2452 		signal(signum, SIG_DFL);
2453 		kill(getpid(), signum);
2454 	}
2455 }
2456 
2457 int
2458 main(int argc, char** argv)
2459 {
2460 	int  diag;
2461 	portid_t port_id;
2462 
2463 	signal(SIGINT, signal_handler);
2464 	signal(SIGTERM, signal_handler);
2465 
2466 	diag = rte_eal_init(argc, argv);
2467 	if (diag < 0)
2468 		rte_panic("Cannot init EAL\n");
2469 
2470 	testpmd_logtype = rte_log_register("testpmd");
2471 	if (testpmd_logtype < 0)
2472 		rte_panic("Cannot register log type");
2473 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2474 
2475 	if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2476 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2477 			strerror(errno));
2478 	}
2479 
2480 #ifdef RTE_LIBRTE_PDUMP
2481 	/* initialize packet capture framework */
2482 	rte_pdump_init(NULL);
2483 #endif
2484 
2485 	nb_ports = (portid_t) rte_eth_dev_count();
2486 	if (nb_ports == 0)
2487 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2488 
2489 	/* allocate port structures, and init them */
2490 	init_port();
2491 
2492 	set_def_fwd_config();
2493 	if (nb_lcores == 0)
2494 		rte_panic("Empty set of forwarding logical cores - check the "
2495 			  "core mask supplied in the command parameters\n");
2496 
2497 	/* Bitrate/latency stats disabled by default */
2498 #ifdef RTE_LIBRTE_BITRATE
2499 	bitrate_enabled = 0;
2500 #endif
2501 #ifdef RTE_LIBRTE_LATENCY_STATS
2502 	latencystats_enabled = 0;
2503 #endif
2504 
2505 	argc -= diag;
2506 	argv += diag;
2507 	if (argc > 1)
2508 		launch_args_parse(argc, argv);
2509 
2510 	if (tx_first && interactive)
2511 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2512 				"interactive mode.\n");
2513 
2514 	if (tx_first && lsc_interrupt) {
2515 		printf("Warning: lsc_interrupt needs to be off when "
2516 				" using tx_first. Disabling.\n");
2517 		lsc_interrupt = 0;
2518 	}
2519 
2520 	if (!nb_rxq && !nb_txq)
2521 		printf("Warning: Either rx or tx queues should be non-zero\n");
2522 
2523 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2524 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2525 		       "but nb_txq=%d will prevent to fully test it.\n",
2526 		       nb_rxq, nb_txq);
2527 
2528 	init_config();
2529 	if (start_port(RTE_PORT_ALL) != 0)
2530 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2531 
2532 	/* set all ports to promiscuous mode by default */
2533 	RTE_ETH_FOREACH_DEV(port_id)
2534 		rte_eth_promiscuous_enable(port_id);
2535 
2536 	/* Init metrics library */
2537 	rte_metrics_init(rte_socket_id());
2538 
2539 #ifdef RTE_LIBRTE_LATENCY_STATS
2540 	if (latencystats_enabled != 0) {
2541 		int ret = rte_latencystats_init(1, NULL);
2542 		if (ret)
2543 			printf("Warning: latencystats init()"
2544 				" returned error %d\n",	ret);
2545 		printf("Latencystats running on lcore %d\n",
2546 			latencystats_lcore_id);
2547 	}
2548 #endif
2549 
2550 	/* Setup bitrate stats */
2551 #ifdef RTE_LIBRTE_BITRATE
2552 	if (bitrate_enabled != 0) {
2553 		bitrate_data = rte_stats_bitrate_create();
2554 		if (bitrate_data == NULL)
2555 			rte_exit(EXIT_FAILURE,
2556 				"Could not allocate bitrate data.\n");
2557 		rte_stats_bitrate_reg(bitrate_data);
2558 	}
2559 #endif
2560 
2561 #ifdef RTE_LIBRTE_CMDLINE
2562 	if (strlen(cmdline_filename) != 0)
2563 		cmdline_read_from_file(cmdline_filename);
2564 
2565 	if (interactive == 1) {
2566 		if (auto_start) {
2567 			printf("Start automatic packet forwarding\n");
2568 			start_packet_forwarding(0);
2569 		}
2570 		prompt();
2571 		pmd_test_exit();
2572 	} else
2573 #endif
2574 	{
2575 		char c;
2576 		int rc;
2577 
2578 		f_quit = 0;
2579 
2580 		printf("No commandline core given, start packet forwarding\n");
2581 		start_packet_forwarding(tx_first);
2582 		if (stats_period != 0) {
2583 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2584 			uint64_t timer_period;
2585 
2586 			/* Convert to number of cycles */
2587 			timer_period = stats_period * rte_get_timer_hz();
2588 
2589 			while (f_quit == 0) {
2590 				cur_time = rte_get_timer_cycles();
2591 				diff_time += cur_time - prev_time;
2592 
2593 				if (diff_time >= timer_period) {
2594 					print_stats();
2595 					/* Reset the timer */
2596 					diff_time = 0;
2597 				}
2598 				/* Sleep to avoid unnecessary checks */
2599 				prev_time = cur_time;
2600 				sleep(1);
2601 			}
2602 		}
2603 
2604 		printf("Press enter to exit\n");
2605 		rc = read(0, &c, 1);
2606 		pmd_test_exit();
2607 		if (rc < 0)
2608 			return 1;
2609 	}
2610 
2611 	return 0;
2612 }
2613