xref: /dpdk/app/test-pmd/testpmd.c (revision d429cc0b53735cc7b1e304ec1d0f35ae06ace7d0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 
16 #include <sys/queue.h>
17 #include <sys/stat.h>
18 
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <inttypes.h>
22 
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
26 #include <rte_log.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
32 #include <rte_eal.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
40 #include <rte_mbuf.h>
41 #include <rte_mbuf_pool_ops.h>
42 #include <rte_interrupts.h>
43 #include <rte_pci.h>
44 #include <rte_ether.h>
45 #include <rte_ethdev.h>
46 #include <rte_dev.h>
47 #include <rte_string_fns.h>
48 #ifdef RTE_LIBRTE_IXGBE_PMD
49 #include <rte_pmd_ixgbe.h>
50 #endif
51 #ifdef RTE_LIBRTE_PDUMP
52 #include <rte_pdump.h>
53 #endif
54 #include <rte_flow.h>
55 #include <rte_metrics.h>
56 #ifdef RTE_LIBRTE_BITRATE
57 #include <rte_bitrate.h>
58 #endif
59 #ifdef RTE_LIBRTE_LATENCY_STATS
60 #include <rte_latencystats.h>
61 #endif
62 
63 #include "testpmd.h"
64 
65 uint16_t verbose_level = 0; /**< Silent by default. */
66 int testpmd_logtype; /**< Log type for testpmd logs */
67 
68 /* use master core for command line ? */
69 uint8_t interactive = 0;
70 uint8_t auto_start = 0;
71 uint8_t tx_first;
72 char cmdline_filename[PATH_MAX] = {0};
73 
74 /*
75  * NUMA support configuration.
76  * When set, the NUMA support attempts to dispatch the allocation of the
77  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
78  * probed ports among the CPU sockets 0 and 1.
79  * Otherwise, all memory is allocated from CPU socket 0.
80  */
81 uint8_t numa_support = 1; /**< numa enabled by default */
82 
83 /*
84  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
85  * not configured.
86  */
87 uint8_t socket_num = UMA_NO_CONFIG;
88 
89 /*
90  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
91  */
92 uint8_t mp_anon = 0;
93 
94 /*
95  * Store specified sockets on which memory pool to be used by ports
96  * is allocated.
97  */
98 uint8_t port_numa[RTE_MAX_ETHPORTS];
99 
100 /*
101  * Store specified sockets on which RX ring to be used by ports
102  * is allocated.
103  */
104 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
105 
106 /*
107  * Store specified sockets on which TX ring to be used by ports
108  * is allocated.
109  */
110 uint8_t txring_numa[RTE_MAX_ETHPORTS];
111 
112 /*
113  * Record the Ethernet address of peer target ports to which packets are
114  * forwarded.
115  * Must be instantiated with the ethernet addresses of peer traffic generator
116  * ports.
117  */
118 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
119 portid_t nb_peer_eth_addrs = 0;
120 
121 /*
122  * Probed Target Environment.
123  */
124 struct rte_port *ports;	       /**< For all probed ethernet ports. */
125 portid_t nb_ports;             /**< Number of probed ethernet ports. */
126 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
127 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
128 
129 /*
130  * Test Forwarding Configuration.
131  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
132  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
133  */
134 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
135 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
136 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
137 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
138 
139 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
140 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
141 
142 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
143 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
144 
145 /*
146  * Forwarding engines.
147  */
148 struct fwd_engine * fwd_engines[] = {
149 	&io_fwd_engine,
150 	&mac_fwd_engine,
151 	&mac_swap_engine,
152 	&flow_gen_engine,
153 	&rx_only_engine,
154 	&tx_only_engine,
155 	&csum_fwd_engine,
156 	&icmp_echo_engine,
157 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
158 	&softnic_tm_engine,
159 	&softnic_tm_bypass_engine,
160 #endif
161 #ifdef RTE_LIBRTE_IEEE1588
162 	&ieee1588_fwd_engine,
163 #endif
164 	NULL,
165 };
166 
167 struct fwd_config cur_fwd_config;
168 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
169 uint32_t retry_enabled;
170 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
171 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
172 
173 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
174 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
175                                       * specified on command-line. */
176 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
177 
178 /*
179  * In container, it cannot terminate the process which running with 'stats-period'
180  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
181  */
182 uint8_t f_quit;
183 
184 /*
185  * Configuration of packet segments used by the "txonly" processing engine.
186  */
187 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
188 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
189 	TXONLY_DEF_PACKET_LEN,
190 };
191 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
192 
193 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
194 /**< Split policy for packets to TX. */
195 
196 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
197 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
198 
199 /* current configuration is in DCB or not,0 means it is not in DCB mode */
200 uint8_t dcb_config = 0;
201 
202 /* Whether the dcb is in testing status */
203 uint8_t dcb_test = 0;
204 
205 /*
206  * Configurable number of RX/TX queues.
207  */
208 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
209 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
210 
211 /*
212  * Configurable number of RX/TX ring descriptors.
213  */
214 #define RTE_TEST_RX_DESC_DEFAULT 1024
215 #define RTE_TEST_TX_DESC_DEFAULT 1024
216 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
217 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
218 
219 #define RTE_PMD_PARAM_UNSET -1
220 /*
221  * Configurable values of RX and TX ring threshold registers.
222  */
223 
224 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
227 
228 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
231 
232 /*
233  * Configurable value of RX free threshold.
234  */
235 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
236 
237 /*
238  * Configurable value of RX drop enable.
239  */
240 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
241 
242 /*
243  * Configurable value of TX free threshold.
244  */
245 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
246 
247 /*
248  * Configurable value of TX RS bit threshold.
249  */
250 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
251 
252 /*
253  * Receive Side Scaling (RSS) configuration.
254  */
255 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
256 
257 /*
258  * Port topology configuration
259  */
260 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
261 
262 /*
263  * Avoids to flush all the RX streams before starts forwarding.
264  */
265 uint8_t no_flush_rx = 0; /* flush by default */
266 
267 /*
268  * Flow API isolated mode.
269  */
270 uint8_t flow_isolate_all;
271 
272 /*
273  * Avoids to check link status when starting/stopping a port.
274  */
275 uint8_t no_link_check = 0; /* check by default */
276 
277 /*
278  * Enable link status change notification
279  */
280 uint8_t lsc_interrupt = 1; /* enabled by default */
281 
282 /*
283  * Enable device removal notification.
284  */
285 uint8_t rmv_interrupt = 1; /* enabled by default */
286 
287 /*
288  * Display or mask ether events
289  * Default to all events except VF_MBOX
290  */
291 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
292 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
293 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
294 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
295 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
297 
298 /*
299  * NIC bypass mode configuration options.
300  */
301 
302 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
303 /* The NIC bypass watchdog timeout. */
304 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
305 #endif
306 
307 
308 #ifdef RTE_LIBRTE_LATENCY_STATS
309 
310 /*
311  * Set when latency stats is enabled in the commandline
312  */
313 uint8_t latencystats_enabled;
314 
315 /*
316  * Lcore ID to serive latency statistics.
317  */
318 lcoreid_t latencystats_lcore_id = -1;
319 
320 #endif
321 
322 /*
323  * Ethernet device configuration.
324  */
325 struct rte_eth_rxmode rx_mode = {
326 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
327 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
328 	.ignore_offload_bitfield = 1,
329 };
330 
331 struct rte_eth_txmode tx_mode = {
332 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
333 };
334 
335 struct rte_fdir_conf fdir_conf = {
336 	.mode = RTE_FDIR_MODE_NONE,
337 	.pballoc = RTE_FDIR_PBALLOC_64K,
338 	.status = RTE_FDIR_REPORT_STATUS,
339 	.mask = {
340 		.vlan_tci_mask = 0x0,
341 		.ipv4_mask     = {
342 			.src_ip = 0xFFFFFFFF,
343 			.dst_ip = 0xFFFFFFFF,
344 		},
345 		.ipv6_mask     = {
346 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
347 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
348 		},
349 		.src_port_mask = 0xFFFF,
350 		.dst_port_mask = 0xFFFF,
351 		.mac_addr_byte_mask = 0xFF,
352 		.tunnel_type_mask = 1,
353 		.tunnel_id_mask = 0xFFFFFFFF,
354 	},
355 	.drop_queue = 127,
356 };
357 
358 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
359 
360 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
361 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
362 
363 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
364 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
365 
366 uint16_t nb_tx_queue_stats_mappings = 0;
367 uint16_t nb_rx_queue_stats_mappings = 0;
368 
369 /*
370  * Display zero values by default for xstats
371  */
372 uint8_t xstats_hide_zero;
373 
374 unsigned int num_sockets = 0;
375 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
376 
377 #ifdef RTE_LIBRTE_BITRATE
378 /* Bitrate statistics */
379 struct rte_stats_bitrates *bitrate_data;
380 lcoreid_t bitrate_lcore_id;
381 uint8_t bitrate_enabled;
382 #endif
383 
384 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
385 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
386 
387 /* Forward function declarations */
388 static void map_port_queue_stats_mapping_registers(portid_t pi,
389 						   struct rte_port *port);
390 static void check_all_ports_link_status(uint32_t port_mask);
391 static int eth_event_callback(portid_t port_id,
392 			      enum rte_eth_event_type type,
393 			      void *param, void *ret_param);
394 
395 /*
396  * Check if all the ports are started.
397  * If yes, return positive value. If not, return zero.
398  */
399 static int all_ports_started(void);
400 
401 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
402 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
403 
404 /*
405  * Helper function to check if socket is already discovered.
406  * If yes, return positive value. If not, return zero.
407  */
408 int
409 new_socket_id(unsigned int socket_id)
410 {
411 	unsigned int i;
412 
413 	for (i = 0; i < num_sockets; i++) {
414 		if (socket_ids[i] == socket_id)
415 			return 0;
416 	}
417 	return 1;
418 }
419 
420 /*
421  * Setup default configuration.
422  */
423 static void
424 set_default_fwd_lcores_config(void)
425 {
426 	unsigned int i;
427 	unsigned int nb_lc;
428 	unsigned int sock_num;
429 
430 	nb_lc = 0;
431 	for (i = 0; i < RTE_MAX_LCORE; i++) {
432 		sock_num = rte_lcore_to_socket_id(i);
433 		if (new_socket_id(sock_num)) {
434 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
435 				rte_exit(EXIT_FAILURE,
436 					 "Total sockets greater than %u\n",
437 					 RTE_MAX_NUMA_NODES);
438 			}
439 			socket_ids[num_sockets++] = sock_num;
440 		}
441 		if (!rte_lcore_is_enabled(i))
442 			continue;
443 		if (i == rte_get_master_lcore())
444 			continue;
445 		fwd_lcores_cpuids[nb_lc++] = i;
446 	}
447 	nb_lcores = (lcoreid_t) nb_lc;
448 	nb_cfg_lcores = nb_lcores;
449 	nb_fwd_lcores = 1;
450 }
451 
452 static void
453 set_def_peer_eth_addrs(void)
454 {
455 	portid_t i;
456 
457 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
458 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
459 		peer_eth_addrs[i].addr_bytes[5] = i;
460 	}
461 }
462 
463 static void
464 set_default_fwd_ports_config(void)
465 {
466 	portid_t pt_id;
467 	int i = 0;
468 
469 	RTE_ETH_FOREACH_DEV(pt_id)
470 		fwd_ports_ids[i++] = pt_id;
471 
472 	nb_cfg_ports = nb_ports;
473 	nb_fwd_ports = nb_ports;
474 }
475 
476 void
477 set_def_fwd_config(void)
478 {
479 	set_default_fwd_lcores_config();
480 	set_def_peer_eth_addrs();
481 	set_default_fwd_ports_config();
482 }
483 
484 /*
485  * Configuration initialisation done once at init time.
486  */
487 static void
488 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
489 		 unsigned int socket_id)
490 {
491 	char pool_name[RTE_MEMPOOL_NAMESIZE];
492 	struct rte_mempool *rte_mp = NULL;
493 	uint32_t mb_size;
494 
495 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
496 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
497 
498 	TESTPMD_LOG(INFO,
499 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
500 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
501 
502 	if (mp_anon != 0) {
503 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
504 			mb_size, (unsigned) mb_mempool_cache,
505 			sizeof(struct rte_pktmbuf_pool_private),
506 			socket_id, 0);
507 		if (rte_mp == NULL)
508 			goto err;
509 
510 		if (rte_mempool_populate_anon(rte_mp) == 0) {
511 			rte_mempool_free(rte_mp);
512 			rte_mp = NULL;
513 			goto err;
514 		}
515 		rte_pktmbuf_pool_init(rte_mp, NULL);
516 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
517 	} else {
518 		/* wrapper to rte_mempool_create() */
519 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
520 				rte_mbuf_best_mempool_ops());
521 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
522 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
523 	}
524 
525 err:
526 	if (rte_mp == NULL) {
527 		rte_exit(EXIT_FAILURE,
528 			"Creation of mbuf pool for socket %u failed: %s\n",
529 			socket_id, rte_strerror(rte_errno));
530 	} else if (verbose_level > 0) {
531 		rte_mempool_dump(stdout, rte_mp);
532 	}
533 }
534 
535 /*
536  * Check given socket id is valid or not with NUMA mode,
537  * if valid, return 0, else return -1
538  */
539 static int
540 check_socket_id(const unsigned int socket_id)
541 {
542 	static int warning_once = 0;
543 
544 	if (new_socket_id(socket_id)) {
545 		if (!warning_once && numa_support)
546 			printf("Warning: NUMA should be configured manually by"
547 			       " using --port-numa-config and"
548 			       " --ring-numa-config parameters along with"
549 			       " --numa.\n");
550 		warning_once = 1;
551 		return -1;
552 	}
553 	return 0;
554 }
555 
556 /*
557  * Get the allowed maximum number of RX queues.
558  * *pid return the port id which has minimal value of
559  * max_rx_queues in all ports.
560  */
561 queueid_t
562 get_allowed_max_nb_rxq(portid_t *pid)
563 {
564 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
565 	portid_t pi;
566 	struct rte_eth_dev_info dev_info;
567 
568 	RTE_ETH_FOREACH_DEV(pi) {
569 		rte_eth_dev_info_get(pi, &dev_info);
570 		if (dev_info.max_rx_queues < allowed_max_rxq) {
571 			allowed_max_rxq = dev_info.max_rx_queues;
572 			*pid = pi;
573 		}
574 	}
575 	return allowed_max_rxq;
576 }
577 
578 /*
579  * Check input rxq is valid or not.
580  * If input rxq is not greater than any of maximum number
581  * of RX queues of all ports, it is valid.
582  * if valid, return 0, else return -1
583  */
584 int
585 check_nb_rxq(queueid_t rxq)
586 {
587 	queueid_t allowed_max_rxq;
588 	portid_t pid = 0;
589 
590 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
591 	if (rxq > allowed_max_rxq) {
592 		printf("Fail: input rxq (%u) can't be greater "
593 		       "than max_rx_queues (%u) of port %u\n",
594 		       rxq,
595 		       allowed_max_rxq,
596 		       pid);
597 		return -1;
598 	}
599 	return 0;
600 }
601 
602 /*
603  * Get the allowed maximum number of TX queues.
604  * *pid return the port id which has minimal value of
605  * max_tx_queues in all ports.
606  */
607 queueid_t
608 get_allowed_max_nb_txq(portid_t *pid)
609 {
610 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
611 	portid_t pi;
612 	struct rte_eth_dev_info dev_info;
613 
614 	RTE_ETH_FOREACH_DEV(pi) {
615 		rte_eth_dev_info_get(pi, &dev_info);
616 		if (dev_info.max_tx_queues < allowed_max_txq) {
617 			allowed_max_txq = dev_info.max_tx_queues;
618 			*pid = pi;
619 		}
620 	}
621 	return allowed_max_txq;
622 }
623 
624 /*
625  * Check input txq is valid or not.
626  * If input txq is not greater than any of maximum number
627  * of TX queues of all ports, it is valid.
628  * if valid, return 0, else return -1
629  */
630 int
631 check_nb_txq(queueid_t txq)
632 {
633 	queueid_t allowed_max_txq;
634 	portid_t pid = 0;
635 
636 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
637 	if (txq > allowed_max_txq) {
638 		printf("Fail: input txq (%u) can't be greater "
639 		       "than max_tx_queues (%u) of port %u\n",
640 		       txq,
641 		       allowed_max_txq,
642 		       pid);
643 		return -1;
644 	}
645 	return 0;
646 }
647 
648 static void
649 init_config(void)
650 {
651 	portid_t pid;
652 	struct rte_port *port;
653 	struct rte_mempool *mbp;
654 	unsigned int nb_mbuf_per_pool;
655 	lcoreid_t  lc_id;
656 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
657 	struct rte_gro_param gro_param;
658 	uint32_t gso_types;
659 
660 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
661 
662 	if (numa_support) {
663 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
664 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
665 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
666 	}
667 
668 	/* Configuration of logical cores. */
669 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
670 				sizeof(struct fwd_lcore *) * nb_lcores,
671 				RTE_CACHE_LINE_SIZE);
672 	if (fwd_lcores == NULL) {
673 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
674 							"failed\n", nb_lcores);
675 	}
676 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
677 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
678 					       sizeof(struct fwd_lcore),
679 					       RTE_CACHE_LINE_SIZE);
680 		if (fwd_lcores[lc_id] == NULL) {
681 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
682 								"failed\n");
683 		}
684 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
685 	}
686 
687 	RTE_ETH_FOREACH_DEV(pid) {
688 		port = &ports[pid];
689 		/* Apply default TxRx configuration for all ports */
690 		port->dev_conf.txmode = tx_mode;
691 		port->dev_conf.rxmode = rx_mode;
692 		rte_eth_dev_info_get(pid, &port->dev_info);
693 		if (!(port->dev_info.tx_offload_capa &
694 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
695 			port->dev_conf.txmode.offloads &=
696 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
697 		if (numa_support) {
698 			if (port_numa[pid] != NUMA_NO_CONFIG)
699 				port_per_socket[port_numa[pid]]++;
700 			else {
701 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
702 
703 				/* if socket_id is invalid, set to 0 */
704 				if (check_socket_id(socket_id) < 0)
705 					socket_id = 0;
706 				port_per_socket[socket_id]++;
707 			}
708 		}
709 
710 		/* set flag to initialize port/queue */
711 		port->need_reconfig = 1;
712 		port->need_reconfig_queues = 1;
713 	}
714 
715 	/*
716 	 * Create pools of mbuf.
717 	 * If NUMA support is disabled, create a single pool of mbuf in
718 	 * socket 0 memory by default.
719 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
720 	 *
721 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
722 	 * nb_txd can be configured at run time.
723 	 */
724 	if (param_total_num_mbufs)
725 		nb_mbuf_per_pool = param_total_num_mbufs;
726 	else {
727 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
728 			(nb_lcores * mb_mempool_cache) +
729 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
730 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
731 	}
732 
733 	if (numa_support) {
734 		uint8_t i;
735 
736 		for (i = 0; i < num_sockets; i++)
737 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
738 					 socket_ids[i]);
739 	} else {
740 		if (socket_num == UMA_NO_CONFIG)
741 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
742 		else
743 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
744 						 socket_num);
745 	}
746 
747 	init_port_config();
748 
749 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
750 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
751 	/*
752 	 * Records which Mbuf pool to use by each logical core, if needed.
753 	 */
754 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
755 		mbp = mbuf_pool_find(
756 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
757 
758 		if (mbp == NULL)
759 			mbp = mbuf_pool_find(0);
760 		fwd_lcores[lc_id]->mbp = mbp;
761 		/* initialize GSO context */
762 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
763 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
764 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
765 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
766 			ETHER_CRC_LEN;
767 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
768 	}
769 
770 	/* Configuration of packet forwarding streams. */
771 	if (init_fwd_streams() < 0)
772 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
773 
774 	fwd_config_setup();
775 
776 	/* create a gro context for each lcore */
777 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
778 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
779 	gro_param.max_item_per_flow = MAX_PKT_BURST;
780 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
781 		gro_param.socket_id = rte_lcore_to_socket_id(
782 				fwd_lcores_cpuids[lc_id]);
783 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
784 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
785 			rte_exit(EXIT_FAILURE,
786 					"rte_gro_ctx_create() failed\n");
787 		}
788 	}
789 }
790 
791 
792 void
793 reconfig(portid_t new_port_id, unsigned socket_id)
794 {
795 	struct rte_port *port;
796 
797 	/* Reconfiguration of Ethernet ports. */
798 	port = &ports[new_port_id];
799 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
800 
801 	/* set flag to initialize port/queue */
802 	port->need_reconfig = 1;
803 	port->need_reconfig_queues = 1;
804 	port->socket_id = socket_id;
805 
806 	init_port_config();
807 }
808 
809 
810 int
811 init_fwd_streams(void)
812 {
813 	portid_t pid;
814 	struct rte_port *port;
815 	streamid_t sm_id, nb_fwd_streams_new;
816 	queueid_t q;
817 
818 	/* set socket id according to numa or not */
819 	RTE_ETH_FOREACH_DEV(pid) {
820 		port = &ports[pid];
821 		if (nb_rxq > port->dev_info.max_rx_queues) {
822 			printf("Fail: nb_rxq(%d) is greater than "
823 				"max_rx_queues(%d)\n", nb_rxq,
824 				port->dev_info.max_rx_queues);
825 			return -1;
826 		}
827 		if (nb_txq > port->dev_info.max_tx_queues) {
828 			printf("Fail: nb_txq(%d) is greater than "
829 				"max_tx_queues(%d)\n", nb_txq,
830 				port->dev_info.max_tx_queues);
831 			return -1;
832 		}
833 		if (numa_support) {
834 			if (port_numa[pid] != NUMA_NO_CONFIG)
835 				port->socket_id = port_numa[pid];
836 			else {
837 				port->socket_id = rte_eth_dev_socket_id(pid);
838 
839 				/* if socket_id is invalid, set to 0 */
840 				if (check_socket_id(port->socket_id) < 0)
841 					port->socket_id = 0;
842 			}
843 		}
844 		else {
845 			if (socket_num == UMA_NO_CONFIG)
846 				port->socket_id = 0;
847 			else
848 				port->socket_id = socket_num;
849 		}
850 	}
851 
852 	q = RTE_MAX(nb_rxq, nb_txq);
853 	if (q == 0) {
854 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
855 		return -1;
856 	}
857 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
858 	if (nb_fwd_streams_new == nb_fwd_streams)
859 		return 0;
860 	/* clear the old */
861 	if (fwd_streams != NULL) {
862 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
863 			if (fwd_streams[sm_id] == NULL)
864 				continue;
865 			rte_free(fwd_streams[sm_id]);
866 			fwd_streams[sm_id] = NULL;
867 		}
868 		rte_free(fwd_streams);
869 		fwd_streams = NULL;
870 	}
871 
872 	/* init new */
873 	nb_fwd_streams = nb_fwd_streams_new;
874 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
875 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
876 	if (fwd_streams == NULL)
877 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
878 						"failed\n", nb_fwd_streams);
879 
880 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
881 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
882 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
883 		if (fwd_streams[sm_id] == NULL)
884 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
885 								" failed\n");
886 	}
887 
888 	return 0;
889 }
890 
891 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
892 static void
893 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
894 {
895 	unsigned int total_burst;
896 	unsigned int nb_burst;
897 	unsigned int burst_stats[3];
898 	uint16_t pktnb_stats[3];
899 	uint16_t nb_pkt;
900 	int burst_percent[3];
901 
902 	/*
903 	 * First compute the total number of packet bursts and the
904 	 * two highest numbers of bursts of the same number of packets.
905 	 */
906 	total_burst = 0;
907 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
908 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
909 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
910 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
911 		if (nb_burst == 0)
912 			continue;
913 		total_burst += nb_burst;
914 		if (nb_burst > burst_stats[0]) {
915 			burst_stats[1] = burst_stats[0];
916 			pktnb_stats[1] = pktnb_stats[0];
917 			burst_stats[0] = nb_burst;
918 			pktnb_stats[0] = nb_pkt;
919 		}
920 	}
921 	if (total_burst == 0)
922 		return;
923 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
924 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
925 	       burst_percent[0], (int) pktnb_stats[0]);
926 	if (burst_stats[0] == total_burst) {
927 		printf("]\n");
928 		return;
929 	}
930 	if (burst_stats[0] + burst_stats[1] == total_burst) {
931 		printf(" + %d%% of %d pkts]\n",
932 		       100 - burst_percent[0], pktnb_stats[1]);
933 		return;
934 	}
935 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
936 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
937 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
938 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
939 		return;
940 	}
941 	printf(" + %d%% of %d pkts + %d%% of others]\n",
942 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
943 }
944 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
945 
946 static void
947 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
948 {
949 	struct rte_port *port;
950 	uint8_t i;
951 
952 	static const char *fwd_stats_border = "----------------------";
953 
954 	port = &ports[port_id];
955 	printf("\n  %s Forward statistics for port %-2d %s\n",
956 	       fwd_stats_border, port_id, fwd_stats_border);
957 
958 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
959 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
960 		       "%-"PRIu64"\n",
961 		       stats->ipackets, stats->imissed,
962 		       (uint64_t) (stats->ipackets + stats->imissed));
963 
964 		if (cur_fwd_eng == &csum_fwd_engine)
965 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
966 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
967 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
968 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
969 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
970 		}
971 
972 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
973 		       "%-"PRIu64"\n",
974 		       stats->opackets, port->tx_dropped,
975 		       (uint64_t) (stats->opackets + port->tx_dropped));
976 	}
977 	else {
978 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
979 		       "%14"PRIu64"\n",
980 		       stats->ipackets, stats->imissed,
981 		       (uint64_t) (stats->ipackets + stats->imissed));
982 
983 		if (cur_fwd_eng == &csum_fwd_engine)
984 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
985 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
986 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
987 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
988 			printf("  RX-nombufs:             %14"PRIu64"\n",
989 			       stats->rx_nombuf);
990 		}
991 
992 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
993 		       "%14"PRIu64"\n",
994 		       stats->opackets, port->tx_dropped,
995 		       (uint64_t) (stats->opackets + port->tx_dropped));
996 	}
997 
998 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
999 	if (port->rx_stream)
1000 		pkt_burst_stats_display("RX",
1001 			&port->rx_stream->rx_burst_stats);
1002 	if (port->tx_stream)
1003 		pkt_burst_stats_display("TX",
1004 			&port->tx_stream->tx_burst_stats);
1005 #endif
1006 
1007 	if (port->rx_queue_stats_mapping_enabled) {
1008 		printf("\n");
1009 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1010 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1011 			       "     RX-errors:%14"PRIu64
1012 			       "    RX-bytes:%14"PRIu64"\n",
1013 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1014 		}
1015 		printf("\n");
1016 	}
1017 	if (port->tx_queue_stats_mapping_enabled) {
1018 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1019 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1020 			       "                                 TX-bytes:%14"PRIu64"\n",
1021 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1022 		}
1023 	}
1024 
1025 	printf("  %s--------------------------------%s\n",
1026 	       fwd_stats_border, fwd_stats_border);
1027 }
1028 
1029 static void
1030 fwd_stream_stats_display(streamid_t stream_id)
1031 {
1032 	struct fwd_stream *fs;
1033 	static const char *fwd_top_stats_border = "-------";
1034 
1035 	fs = fwd_streams[stream_id];
1036 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1037 	    (fs->fwd_dropped == 0))
1038 		return;
1039 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1040 	       "TX Port=%2d/Queue=%2d %s\n",
1041 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1042 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1043 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1044 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1045 
1046 	/* if checksum mode */
1047 	if (cur_fwd_eng == &csum_fwd_engine) {
1048 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1049 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1050 	}
1051 
1052 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1053 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1054 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1055 #endif
1056 }
1057 
1058 static void
1059 flush_fwd_rx_queues(void)
1060 {
1061 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1062 	portid_t  rxp;
1063 	portid_t port_id;
1064 	queueid_t rxq;
1065 	uint16_t  nb_rx;
1066 	uint16_t  i;
1067 	uint8_t   j;
1068 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1069 	uint64_t timer_period;
1070 
1071 	/* convert to number of cycles */
1072 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1073 
1074 	for (j = 0; j < 2; j++) {
1075 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1076 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1077 				port_id = fwd_ports_ids[rxp];
1078 				/**
1079 				* testpmd can stuck in the below do while loop
1080 				* if rte_eth_rx_burst() always returns nonzero
1081 				* packets. So timer is added to exit this loop
1082 				* after 1sec timer expiry.
1083 				*/
1084 				prev_tsc = rte_rdtsc();
1085 				do {
1086 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1087 						pkts_burst, MAX_PKT_BURST);
1088 					for (i = 0; i < nb_rx; i++)
1089 						rte_pktmbuf_free(pkts_burst[i]);
1090 
1091 					cur_tsc = rte_rdtsc();
1092 					diff_tsc = cur_tsc - prev_tsc;
1093 					timer_tsc += diff_tsc;
1094 				} while ((nb_rx > 0) &&
1095 					(timer_tsc < timer_period));
1096 				timer_tsc = 0;
1097 			}
1098 		}
1099 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1100 	}
1101 }
1102 
1103 static void
1104 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1105 {
1106 	struct fwd_stream **fsm;
1107 	streamid_t nb_fs;
1108 	streamid_t sm_id;
1109 #ifdef RTE_LIBRTE_BITRATE
1110 	uint64_t tics_per_1sec;
1111 	uint64_t tics_datum;
1112 	uint64_t tics_current;
1113 	uint8_t idx_port, cnt_ports;
1114 
1115 	cnt_ports = rte_eth_dev_count();
1116 	tics_datum = rte_rdtsc();
1117 	tics_per_1sec = rte_get_timer_hz();
1118 #endif
1119 	fsm = &fwd_streams[fc->stream_idx];
1120 	nb_fs = fc->stream_nb;
1121 	do {
1122 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1123 			(*pkt_fwd)(fsm[sm_id]);
1124 #ifdef RTE_LIBRTE_BITRATE
1125 		if (bitrate_enabled != 0 &&
1126 				bitrate_lcore_id == rte_lcore_id()) {
1127 			tics_current = rte_rdtsc();
1128 			if (tics_current - tics_datum >= tics_per_1sec) {
1129 				/* Periodic bitrate calculation */
1130 				for (idx_port = 0;
1131 						idx_port < cnt_ports;
1132 						idx_port++)
1133 					rte_stats_bitrate_calc(bitrate_data,
1134 						idx_port);
1135 				tics_datum = tics_current;
1136 			}
1137 		}
1138 #endif
1139 #ifdef RTE_LIBRTE_LATENCY_STATS
1140 		if (latencystats_enabled != 0 &&
1141 				latencystats_lcore_id == rte_lcore_id())
1142 			rte_latencystats_update();
1143 #endif
1144 
1145 	} while (! fc->stopped);
1146 }
1147 
1148 static int
1149 start_pkt_forward_on_core(void *fwd_arg)
1150 {
1151 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1152 			     cur_fwd_config.fwd_eng->packet_fwd);
1153 	return 0;
1154 }
1155 
1156 /*
1157  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1158  * Used to start communication flows in network loopback test configurations.
1159  */
1160 static int
1161 run_one_txonly_burst_on_core(void *fwd_arg)
1162 {
1163 	struct fwd_lcore *fwd_lc;
1164 	struct fwd_lcore tmp_lcore;
1165 
1166 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1167 	tmp_lcore = *fwd_lc;
1168 	tmp_lcore.stopped = 1;
1169 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1170 	return 0;
1171 }
1172 
1173 /*
1174  * Launch packet forwarding:
1175  *     - Setup per-port forwarding context.
1176  *     - launch logical cores with their forwarding configuration.
1177  */
1178 static void
1179 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1180 {
1181 	port_fwd_begin_t port_fwd_begin;
1182 	unsigned int i;
1183 	unsigned int lc_id;
1184 	int diag;
1185 
1186 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1187 	if (port_fwd_begin != NULL) {
1188 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1189 			(*port_fwd_begin)(fwd_ports_ids[i]);
1190 	}
1191 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1192 		lc_id = fwd_lcores_cpuids[i];
1193 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1194 			fwd_lcores[i]->stopped = 0;
1195 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1196 						     fwd_lcores[i], lc_id);
1197 			if (diag != 0)
1198 				printf("launch lcore %u failed - diag=%d\n",
1199 				       lc_id, diag);
1200 		}
1201 	}
1202 }
1203 
1204 /*
1205  * Launch packet forwarding configuration.
1206  */
1207 void
1208 start_packet_forwarding(int with_tx_first)
1209 {
1210 	port_fwd_begin_t port_fwd_begin;
1211 	port_fwd_end_t  port_fwd_end;
1212 	struct rte_port *port;
1213 	unsigned int i;
1214 	portid_t   pt_id;
1215 	streamid_t sm_id;
1216 
1217 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1218 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1219 
1220 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1221 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1222 
1223 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1224 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1225 		(!nb_rxq || !nb_txq))
1226 		rte_exit(EXIT_FAILURE,
1227 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1228 			cur_fwd_eng->fwd_mode_name);
1229 
1230 	if (all_ports_started() == 0) {
1231 		printf("Not all ports were started\n");
1232 		return;
1233 	}
1234 	if (test_done == 0) {
1235 		printf("Packet forwarding already started\n");
1236 		return;
1237 	}
1238 
1239 	if (init_fwd_streams() < 0) {
1240 		printf("Fail from init_fwd_streams()\n");
1241 		return;
1242 	}
1243 
1244 	if(dcb_test) {
1245 		for (i = 0; i < nb_fwd_ports; i++) {
1246 			pt_id = fwd_ports_ids[i];
1247 			port = &ports[pt_id];
1248 			if (!port->dcb_flag) {
1249 				printf("In DCB mode, all forwarding ports must "
1250                                        "be configured in this mode.\n");
1251 				return;
1252 			}
1253 		}
1254 		if (nb_fwd_lcores == 1) {
1255 			printf("In DCB mode,the nb forwarding cores "
1256                                "should be larger than 1.\n");
1257 			return;
1258 		}
1259 	}
1260 	test_done = 0;
1261 
1262 	if(!no_flush_rx)
1263 		flush_fwd_rx_queues();
1264 
1265 	fwd_config_setup();
1266 	pkt_fwd_config_display(&cur_fwd_config);
1267 	rxtx_config_display();
1268 
1269 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1270 		pt_id = fwd_ports_ids[i];
1271 		port = &ports[pt_id];
1272 		rte_eth_stats_get(pt_id, &port->stats);
1273 		port->tx_dropped = 0;
1274 
1275 		map_port_queue_stats_mapping_registers(pt_id, port);
1276 	}
1277 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1278 		fwd_streams[sm_id]->rx_packets = 0;
1279 		fwd_streams[sm_id]->tx_packets = 0;
1280 		fwd_streams[sm_id]->fwd_dropped = 0;
1281 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1282 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1283 
1284 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1285 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1286 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1287 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1288 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1289 #endif
1290 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1291 		fwd_streams[sm_id]->core_cycles = 0;
1292 #endif
1293 	}
1294 	if (with_tx_first) {
1295 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1296 		if (port_fwd_begin != NULL) {
1297 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1298 				(*port_fwd_begin)(fwd_ports_ids[i]);
1299 		}
1300 		while (with_tx_first--) {
1301 			launch_packet_forwarding(
1302 					run_one_txonly_burst_on_core);
1303 			rte_eal_mp_wait_lcore();
1304 		}
1305 		port_fwd_end = tx_only_engine.port_fwd_end;
1306 		if (port_fwd_end != NULL) {
1307 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1308 				(*port_fwd_end)(fwd_ports_ids[i]);
1309 		}
1310 	}
1311 	launch_packet_forwarding(start_pkt_forward_on_core);
1312 }
1313 
1314 void
1315 stop_packet_forwarding(void)
1316 {
1317 	struct rte_eth_stats stats;
1318 	struct rte_port *port;
1319 	port_fwd_end_t  port_fwd_end;
1320 	int i;
1321 	portid_t   pt_id;
1322 	streamid_t sm_id;
1323 	lcoreid_t  lc_id;
1324 	uint64_t total_recv;
1325 	uint64_t total_xmit;
1326 	uint64_t total_rx_dropped;
1327 	uint64_t total_tx_dropped;
1328 	uint64_t total_rx_nombuf;
1329 	uint64_t tx_dropped;
1330 	uint64_t rx_bad_ip_csum;
1331 	uint64_t rx_bad_l4_csum;
1332 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1333 	uint64_t fwd_cycles;
1334 #endif
1335 
1336 	static const char *acc_stats_border = "+++++++++++++++";
1337 
1338 	if (test_done) {
1339 		printf("Packet forwarding not started\n");
1340 		return;
1341 	}
1342 	printf("Telling cores to stop...");
1343 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1344 		fwd_lcores[lc_id]->stopped = 1;
1345 	printf("\nWaiting for lcores to finish...\n");
1346 	rte_eal_mp_wait_lcore();
1347 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1348 	if (port_fwd_end != NULL) {
1349 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1350 			pt_id = fwd_ports_ids[i];
1351 			(*port_fwd_end)(pt_id);
1352 		}
1353 	}
1354 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1355 	fwd_cycles = 0;
1356 #endif
1357 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1358 		if (cur_fwd_config.nb_fwd_streams >
1359 		    cur_fwd_config.nb_fwd_ports) {
1360 			fwd_stream_stats_display(sm_id);
1361 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1362 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1363 		} else {
1364 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1365 				fwd_streams[sm_id];
1366 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1367 				fwd_streams[sm_id];
1368 		}
1369 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1370 		tx_dropped = (uint64_t) (tx_dropped +
1371 					 fwd_streams[sm_id]->fwd_dropped);
1372 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1373 
1374 		rx_bad_ip_csum =
1375 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1376 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1377 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1378 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1379 							rx_bad_ip_csum;
1380 
1381 		rx_bad_l4_csum =
1382 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1383 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1384 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1385 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1386 							rx_bad_l4_csum;
1387 
1388 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1389 		fwd_cycles = (uint64_t) (fwd_cycles +
1390 					 fwd_streams[sm_id]->core_cycles);
1391 #endif
1392 	}
1393 	total_recv = 0;
1394 	total_xmit = 0;
1395 	total_rx_dropped = 0;
1396 	total_tx_dropped = 0;
1397 	total_rx_nombuf  = 0;
1398 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1399 		pt_id = fwd_ports_ids[i];
1400 
1401 		port = &ports[pt_id];
1402 		rte_eth_stats_get(pt_id, &stats);
1403 		stats.ipackets -= port->stats.ipackets;
1404 		port->stats.ipackets = 0;
1405 		stats.opackets -= port->stats.opackets;
1406 		port->stats.opackets = 0;
1407 		stats.ibytes   -= port->stats.ibytes;
1408 		port->stats.ibytes = 0;
1409 		stats.obytes   -= port->stats.obytes;
1410 		port->stats.obytes = 0;
1411 		stats.imissed  -= port->stats.imissed;
1412 		port->stats.imissed = 0;
1413 		stats.oerrors  -= port->stats.oerrors;
1414 		port->stats.oerrors = 0;
1415 		stats.rx_nombuf -= port->stats.rx_nombuf;
1416 		port->stats.rx_nombuf = 0;
1417 
1418 		total_recv += stats.ipackets;
1419 		total_xmit += stats.opackets;
1420 		total_rx_dropped += stats.imissed;
1421 		total_tx_dropped += port->tx_dropped;
1422 		total_rx_nombuf  += stats.rx_nombuf;
1423 
1424 		fwd_port_stats_display(pt_id, &stats);
1425 	}
1426 
1427 	printf("\n  %s Accumulated forward statistics for all ports"
1428 	       "%s\n",
1429 	       acc_stats_border, acc_stats_border);
1430 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1431 	       "%-"PRIu64"\n"
1432 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1433 	       "%-"PRIu64"\n",
1434 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1435 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1436 	if (total_rx_nombuf > 0)
1437 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1438 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1439 	       "%s\n",
1440 	       acc_stats_border, acc_stats_border);
1441 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1442 	if (total_recv > 0)
1443 		printf("\n  CPU cycles/packet=%u (total cycles="
1444 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1445 		       (unsigned int)(fwd_cycles / total_recv),
1446 		       fwd_cycles, total_recv);
1447 #endif
1448 	printf("\nDone.\n");
1449 	test_done = 1;
1450 }
1451 
1452 void
1453 dev_set_link_up(portid_t pid)
1454 {
1455 	if (rte_eth_dev_set_link_up(pid) < 0)
1456 		printf("\nSet link up fail.\n");
1457 }
1458 
1459 void
1460 dev_set_link_down(portid_t pid)
1461 {
1462 	if (rte_eth_dev_set_link_down(pid) < 0)
1463 		printf("\nSet link down fail.\n");
1464 }
1465 
1466 static int
1467 all_ports_started(void)
1468 {
1469 	portid_t pi;
1470 	struct rte_port *port;
1471 
1472 	RTE_ETH_FOREACH_DEV(pi) {
1473 		port = &ports[pi];
1474 		/* Check if there is a port which is not started */
1475 		if ((port->port_status != RTE_PORT_STARTED) &&
1476 			(port->slave_flag == 0))
1477 			return 0;
1478 	}
1479 
1480 	/* No port is not started */
1481 	return 1;
1482 }
1483 
1484 int
1485 port_is_stopped(portid_t port_id)
1486 {
1487 	struct rte_port *port = &ports[port_id];
1488 
1489 	if ((port->port_status != RTE_PORT_STOPPED) &&
1490 	    (port->slave_flag == 0))
1491 		return 0;
1492 	return 1;
1493 }
1494 
1495 int
1496 all_ports_stopped(void)
1497 {
1498 	portid_t pi;
1499 
1500 	RTE_ETH_FOREACH_DEV(pi) {
1501 		if (!port_is_stopped(pi))
1502 			return 0;
1503 	}
1504 
1505 	return 1;
1506 }
1507 
1508 int
1509 port_is_started(portid_t port_id)
1510 {
1511 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1512 		return 0;
1513 
1514 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1515 		return 0;
1516 
1517 	return 1;
1518 }
1519 
1520 static int
1521 port_is_closed(portid_t port_id)
1522 {
1523 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1524 		return 0;
1525 
1526 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1527 		return 0;
1528 
1529 	return 1;
1530 }
1531 
1532 int
1533 start_port(portid_t pid)
1534 {
1535 	int diag, need_check_link_status = -1;
1536 	portid_t pi;
1537 	queueid_t qi;
1538 	struct rte_port *port;
1539 	struct ether_addr mac_addr;
1540 	enum rte_eth_event_type event_type;
1541 
1542 	if (port_id_is_invalid(pid, ENABLED_WARN))
1543 		return 0;
1544 
1545 	if(dcb_config)
1546 		dcb_test = 1;
1547 	RTE_ETH_FOREACH_DEV(pi) {
1548 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1549 			continue;
1550 
1551 		need_check_link_status = 0;
1552 		port = &ports[pi];
1553 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1554 						 RTE_PORT_HANDLING) == 0) {
1555 			printf("Port %d is now not stopped\n", pi);
1556 			continue;
1557 		}
1558 
1559 		if (port->need_reconfig > 0) {
1560 			port->need_reconfig = 0;
1561 
1562 			if (flow_isolate_all) {
1563 				int ret = port_flow_isolate(pi, 1);
1564 				if (ret) {
1565 					printf("Failed to apply isolated"
1566 					       " mode on port %d\n", pi);
1567 					return -1;
1568 				}
1569 			}
1570 
1571 			printf("Configuring Port %d (socket %u)\n", pi,
1572 					port->socket_id);
1573 			/* configure port */
1574 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1575 						&(port->dev_conf));
1576 			if (diag != 0) {
1577 				if (rte_atomic16_cmpset(&(port->port_status),
1578 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1579 					printf("Port %d can not be set back "
1580 							"to stopped\n", pi);
1581 				printf("Fail to configure port %d\n", pi);
1582 				/* try to reconfigure port next time */
1583 				port->need_reconfig = 1;
1584 				return -1;
1585 			}
1586 		}
1587 		if (port->need_reconfig_queues > 0) {
1588 			port->need_reconfig_queues = 0;
1589 			port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1590 			/* Apply Tx offloads configuration */
1591 			port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1592 			/* setup tx queues */
1593 			for (qi = 0; qi < nb_txq; qi++) {
1594 				if ((numa_support) &&
1595 					(txring_numa[pi] != NUMA_NO_CONFIG))
1596 					diag = rte_eth_tx_queue_setup(pi, qi,
1597 						nb_txd,txring_numa[pi],
1598 						&(port->tx_conf));
1599 				else
1600 					diag = rte_eth_tx_queue_setup(pi, qi,
1601 						nb_txd,port->socket_id,
1602 						&(port->tx_conf));
1603 
1604 				if (diag == 0)
1605 					continue;
1606 
1607 				/* Fail to setup tx queue, return */
1608 				if (rte_atomic16_cmpset(&(port->port_status),
1609 							RTE_PORT_HANDLING,
1610 							RTE_PORT_STOPPED) == 0)
1611 					printf("Port %d can not be set back "
1612 							"to stopped\n", pi);
1613 				printf("Fail to configure port %d tx queues\n", pi);
1614 				/* try to reconfigure queues next time */
1615 				port->need_reconfig_queues = 1;
1616 				return -1;
1617 			}
1618 			/* Apply Rx offloads configuration */
1619 			port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1620 			/* setup rx queues */
1621 			for (qi = 0; qi < nb_rxq; qi++) {
1622 				if ((numa_support) &&
1623 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1624 					struct rte_mempool * mp =
1625 						mbuf_pool_find(rxring_numa[pi]);
1626 					if (mp == NULL) {
1627 						printf("Failed to setup RX queue:"
1628 							"No mempool allocation"
1629 							" on the socket %d\n",
1630 							rxring_numa[pi]);
1631 						return -1;
1632 					}
1633 
1634 					diag = rte_eth_rx_queue_setup(pi, qi,
1635 					     nb_rxd,rxring_numa[pi],
1636 					     &(port->rx_conf),mp);
1637 				} else {
1638 					struct rte_mempool *mp =
1639 						mbuf_pool_find(port->socket_id);
1640 					if (mp == NULL) {
1641 						printf("Failed to setup RX queue:"
1642 							"No mempool allocation"
1643 							" on the socket %d\n",
1644 							port->socket_id);
1645 						return -1;
1646 					}
1647 					diag = rte_eth_rx_queue_setup(pi, qi,
1648 					     nb_rxd,port->socket_id,
1649 					     &(port->rx_conf), mp);
1650 				}
1651 				if (diag == 0)
1652 					continue;
1653 
1654 				/* Fail to setup rx queue, return */
1655 				if (rte_atomic16_cmpset(&(port->port_status),
1656 							RTE_PORT_HANDLING,
1657 							RTE_PORT_STOPPED) == 0)
1658 					printf("Port %d can not be set back "
1659 							"to stopped\n", pi);
1660 				printf("Fail to configure port %d rx queues\n", pi);
1661 				/* try to reconfigure queues next time */
1662 				port->need_reconfig_queues = 1;
1663 				return -1;
1664 			}
1665 		}
1666 
1667 		/* start port */
1668 		if (rte_eth_dev_start(pi) < 0) {
1669 			printf("Fail to start port %d\n", pi);
1670 
1671 			/* Fail to setup rx queue, return */
1672 			if (rte_atomic16_cmpset(&(port->port_status),
1673 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1674 				printf("Port %d can not be set back to "
1675 							"stopped\n", pi);
1676 			continue;
1677 		}
1678 
1679 		if (rte_atomic16_cmpset(&(port->port_status),
1680 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1681 			printf("Port %d can not be set into started\n", pi);
1682 
1683 		rte_eth_macaddr_get(pi, &mac_addr);
1684 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1685 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1686 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1687 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1688 
1689 		/* at least one port started, need checking link status */
1690 		need_check_link_status = 1;
1691 	}
1692 
1693 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1694 	     event_type < RTE_ETH_EVENT_MAX;
1695 	     event_type++) {
1696 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1697 						event_type,
1698 						eth_event_callback,
1699 						NULL);
1700 		if (diag) {
1701 			printf("Failed to setup even callback for event %d\n",
1702 				event_type);
1703 			return -1;
1704 		}
1705 	}
1706 
1707 	if (need_check_link_status == 1 && !no_link_check)
1708 		check_all_ports_link_status(RTE_PORT_ALL);
1709 	else if (need_check_link_status == 0)
1710 		printf("Please stop the ports first\n");
1711 
1712 	printf("Done\n");
1713 	return 0;
1714 }
1715 
1716 void
1717 stop_port(portid_t pid)
1718 {
1719 	portid_t pi;
1720 	struct rte_port *port;
1721 	int need_check_link_status = 0;
1722 
1723 	if (dcb_test) {
1724 		dcb_test = 0;
1725 		dcb_config = 0;
1726 	}
1727 
1728 	if (port_id_is_invalid(pid, ENABLED_WARN))
1729 		return;
1730 
1731 	printf("Stopping ports...\n");
1732 
1733 	RTE_ETH_FOREACH_DEV(pi) {
1734 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1735 			continue;
1736 
1737 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1738 			printf("Please remove port %d from forwarding configuration.\n", pi);
1739 			continue;
1740 		}
1741 
1742 		if (port_is_bonding_slave(pi)) {
1743 			printf("Please remove port %d from bonded device.\n", pi);
1744 			continue;
1745 		}
1746 
1747 		port = &ports[pi];
1748 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1749 						RTE_PORT_HANDLING) == 0)
1750 			continue;
1751 
1752 		rte_eth_dev_stop(pi);
1753 
1754 		if (rte_atomic16_cmpset(&(port->port_status),
1755 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1756 			printf("Port %d can not be set into stopped\n", pi);
1757 		need_check_link_status = 1;
1758 	}
1759 	if (need_check_link_status && !no_link_check)
1760 		check_all_ports_link_status(RTE_PORT_ALL);
1761 
1762 	printf("Done\n");
1763 }
1764 
1765 void
1766 close_port(portid_t pid)
1767 {
1768 	portid_t pi;
1769 	struct rte_port *port;
1770 
1771 	if (port_id_is_invalid(pid, ENABLED_WARN))
1772 		return;
1773 
1774 	printf("Closing ports...\n");
1775 
1776 	RTE_ETH_FOREACH_DEV(pi) {
1777 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1778 			continue;
1779 
1780 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1781 			printf("Please remove port %d from forwarding configuration.\n", pi);
1782 			continue;
1783 		}
1784 
1785 		if (port_is_bonding_slave(pi)) {
1786 			printf("Please remove port %d from bonded device.\n", pi);
1787 			continue;
1788 		}
1789 
1790 		port = &ports[pi];
1791 		if (rte_atomic16_cmpset(&(port->port_status),
1792 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1793 			printf("Port %d is already closed\n", pi);
1794 			continue;
1795 		}
1796 
1797 		if (rte_atomic16_cmpset(&(port->port_status),
1798 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1799 			printf("Port %d is now not stopped\n", pi);
1800 			continue;
1801 		}
1802 
1803 		if (port->flow_list)
1804 			port_flow_flush(pi);
1805 		rte_eth_dev_close(pi);
1806 
1807 		if (rte_atomic16_cmpset(&(port->port_status),
1808 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1809 			printf("Port %d cannot be set to closed\n", pi);
1810 	}
1811 
1812 	printf("Done\n");
1813 }
1814 
1815 void
1816 reset_port(portid_t pid)
1817 {
1818 	int diag;
1819 	portid_t pi;
1820 	struct rte_port *port;
1821 
1822 	if (port_id_is_invalid(pid, ENABLED_WARN))
1823 		return;
1824 
1825 	printf("Resetting ports...\n");
1826 
1827 	RTE_ETH_FOREACH_DEV(pi) {
1828 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1829 			continue;
1830 
1831 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1832 			printf("Please remove port %d from forwarding "
1833 			       "configuration.\n", pi);
1834 			continue;
1835 		}
1836 
1837 		if (port_is_bonding_slave(pi)) {
1838 			printf("Please remove port %d from bonded device.\n",
1839 			       pi);
1840 			continue;
1841 		}
1842 
1843 		diag = rte_eth_dev_reset(pi);
1844 		if (diag == 0) {
1845 			port = &ports[pi];
1846 			port->need_reconfig = 1;
1847 			port->need_reconfig_queues = 1;
1848 		} else {
1849 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1850 		}
1851 	}
1852 
1853 	printf("Done\n");
1854 }
1855 
1856 void
1857 attach_port(char *identifier)
1858 {
1859 	portid_t pi = 0;
1860 	unsigned int socket_id;
1861 
1862 	printf("Attaching a new port...\n");
1863 
1864 	if (identifier == NULL) {
1865 		printf("Invalid parameters are specified\n");
1866 		return;
1867 	}
1868 
1869 	if (rte_eth_dev_attach(identifier, &pi))
1870 		return;
1871 
1872 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1873 	/* if socket_id is invalid, set to 0 */
1874 	if (check_socket_id(socket_id) < 0)
1875 		socket_id = 0;
1876 	reconfig(pi, socket_id);
1877 	rte_eth_promiscuous_enable(pi);
1878 
1879 	nb_ports = rte_eth_dev_count();
1880 
1881 	ports[pi].port_status = RTE_PORT_STOPPED;
1882 
1883 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1884 	printf("Done\n");
1885 }
1886 
1887 void
1888 detach_port(portid_t port_id)
1889 {
1890 	char name[RTE_ETH_NAME_MAX_LEN];
1891 
1892 	printf("Detaching a port...\n");
1893 
1894 	if (!port_is_closed(port_id)) {
1895 		printf("Please close port first\n");
1896 		return;
1897 	}
1898 
1899 	if (ports[port_id].flow_list)
1900 		port_flow_flush(port_id);
1901 
1902 	if (rte_eth_dev_detach(port_id, name)) {
1903 		TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1904 		return;
1905 	}
1906 
1907 	nb_ports = rte_eth_dev_count();
1908 
1909 	printf("Port '%s' is detached. Now total ports is %d\n",
1910 			name, nb_ports);
1911 	printf("Done\n");
1912 	return;
1913 }
1914 
1915 void
1916 pmd_test_exit(void)
1917 {
1918 	portid_t pt_id;
1919 
1920 	if (test_done == 0)
1921 		stop_packet_forwarding();
1922 
1923 	if (ports != NULL) {
1924 		no_link_check = 1;
1925 		RTE_ETH_FOREACH_DEV(pt_id) {
1926 			printf("\nShutting down port %d...\n", pt_id);
1927 			fflush(stdout);
1928 			stop_port(pt_id);
1929 			close_port(pt_id);
1930 		}
1931 	}
1932 	printf("\nBye...\n");
1933 }
1934 
1935 typedef void (*cmd_func_t)(void);
1936 struct pmd_test_command {
1937 	const char *cmd_name;
1938 	cmd_func_t cmd_func;
1939 };
1940 
1941 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1942 
1943 /* Check the link status of all ports in up to 9s, and print them finally */
1944 static void
1945 check_all_ports_link_status(uint32_t port_mask)
1946 {
1947 #define CHECK_INTERVAL 100 /* 100ms */
1948 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1949 	portid_t portid;
1950 	uint8_t count, all_ports_up, print_flag = 0;
1951 	struct rte_eth_link link;
1952 
1953 	printf("Checking link statuses...\n");
1954 	fflush(stdout);
1955 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1956 		all_ports_up = 1;
1957 		RTE_ETH_FOREACH_DEV(portid) {
1958 			if ((port_mask & (1 << portid)) == 0)
1959 				continue;
1960 			memset(&link, 0, sizeof(link));
1961 			rte_eth_link_get_nowait(portid, &link);
1962 			/* print link status if flag set */
1963 			if (print_flag == 1) {
1964 				if (link.link_status)
1965 					printf(
1966 					"Port%d Link Up. speed %u Mbps- %s\n",
1967 					portid, link.link_speed,
1968 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1969 					("full-duplex") : ("half-duplex\n"));
1970 				else
1971 					printf("Port %d Link Down\n", portid);
1972 				continue;
1973 			}
1974 			/* clear all_ports_up flag if any link down */
1975 			if (link.link_status == ETH_LINK_DOWN) {
1976 				all_ports_up = 0;
1977 				break;
1978 			}
1979 		}
1980 		/* after finally printing all link status, get out */
1981 		if (print_flag == 1)
1982 			break;
1983 
1984 		if (all_ports_up == 0) {
1985 			fflush(stdout);
1986 			rte_delay_ms(CHECK_INTERVAL);
1987 		}
1988 
1989 		/* set the print_flag if all ports up or timeout */
1990 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1991 			print_flag = 1;
1992 		}
1993 
1994 		if (lsc_interrupt)
1995 			break;
1996 	}
1997 }
1998 
1999 static void
2000 rmv_event_callback(void *arg)
2001 {
2002 	struct rte_eth_dev *dev;
2003 	portid_t port_id = (intptr_t)arg;
2004 
2005 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2006 	dev = &rte_eth_devices[port_id];
2007 
2008 	stop_port(port_id);
2009 	close_port(port_id);
2010 	printf("removing device %s\n", dev->device->name);
2011 	if (rte_eal_dev_detach(dev->device))
2012 		TESTPMD_LOG(ERR, "Failed to detach device %s\n",
2013 			dev->device->name);
2014 }
2015 
2016 /* This function is used by the interrupt thread */
2017 static int
2018 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2019 		  void *ret_param)
2020 {
2021 	static const char * const event_desc[] = {
2022 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2023 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2024 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2025 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2026 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2027 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2028 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2029 		[RTE_ETH_EVENT_NEW] = "device probed",
2030 		[RTE_ETH_EVENT_DESTROY] = "device released",
2031 		[RTE_ETH_EVENT_MAX] = NULL,
2032 	};
2033 
2034 	RTE_SET_USED(param);
2035 	RTE_SET_USED(ret_param);
2036 
2037 	if (type >= RTE_ETH_EVENT_MAX) {
2038 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2039 			port_id, __func__, type);
2040 		fflush(stderr);
2041 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2042 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2043 			event_desc[type]);
2044 		fflush(stdout);
2045 	}
2046 
2047 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2048 		return 0;
2049 
2050 	switch (type) {
2051 	case RTE_ETH_EVENT_INTR_RMV:
2052 		if (rte_eal_alarm_set(100000,
2053 				rmv_event_callback, (void *)(intptr_t)port_id))
2054 			fprintf(stderr, "Could not set up deferred device removal\n");
2055 		break;
2056 	default:
2057 		break;
2058 	}
2059 	return 0;
2060 }
2061 
2062 static int
2063 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2064 {
2065 	uint16_t i;
2066 	int diag;
2067 	uint8_t mapping_found = 0;
2068 
2069 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2070 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2071 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2072 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2073 					tx_queue_stats_mappings[i].queue_id,
2074 					tx_queue_stats_mappings[i].stats_counter_id);
2075 			if (diag != 0)
2076 				return diag;
2077 			mapping_found = 1;
2078 		}
2079 	}
2080 	if (mapping_found)
2081 		port->tx_queue_stats_mapping_enabled = 1;
2082 	return 0;
2083 }
2084 
2085 static int
2086 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2087 {
2088 	uint16_t i;
2089 	int diag;
2090 	uint8_t mapping_found = 0;
2091 
2092 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2093 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2094 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2095 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2096 					rx_queue_stats_mappings[i].queue_id,
2097 					rx_queue_stats_mappings[i].stats_counter_id);
2098 			if (diag != 0)
2099 				return diag;
2100 			mapping_found = 1;
2101 		}
2102 	}
2103 	if (mapping_found)
2104 		port->rx_queue_stats_mapping_enabled = 1;
2105 	return 0;
2106 }
2107 
2108 static void
2109 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2110 {
2111 	int diag = 0;
2112 
2113 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2114 	if (diag != 0) {
2115 		if (diag == -ENOTSUP) {
2116 			port->tx_queue_stats_mapping_enabled = 0;
2117 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2118 		}
2119 		else
2120 			rte_exit(EXIT_FAILURE,
2121 					"set_tx_queue_stats_mapping_registers "
2122 					"failed for port id=%d diag=%d\n",
2123 					pi, diag);
2124 	}
2125 
2126 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2127 	if (diag != 0) {
2128 		if (diag == -ENOTSUP) {
2129 			port->rx_queue_stats_mapping_enabled = 0;
2130 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2131 		}
2132 		else
2133 			rte_exit(EXIT_FAILURE,
2134 					"set_rx_queue_stats_mapping_registers "
2135 					"failed for port id=%d diag=%d\n",
2136 					pi, diag);
2137 	}
2138 }
2139 
2140 static void
2141 rxtx_port_config(struct rte_port *port)
2142 {
2143 	port->rx_conf = port->dev_info.default_rxconf;
2144 	port->tx_conf = port->dev_info.default_txconf;
2145 
2146 	/* Check if any RX/TX parameters have been passed */
2147 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2148 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2149 
2150 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2151 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2152 
2153 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2154 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2155 
2156 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2157 		port->rx_conf.rx_free_thresh = rx_free_thresh;
2158 
2159 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2160 		port->rx_conf.rx_drop_en = rx_drop_en;
2161 
2162 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2163 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2164 
2165 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2166 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2167 
2168 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2169 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2170 
2171 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2172 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2173 
2174 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2175 		port->tx_conf.tx_free_thresh = tx_free_thresh;
2176 }
2177 
2178 void
2179 init_port_config(void)
2180 {
2181 	portid_t pid;
2182 	struct rte_port *port;
2183 
2184 	RTE_ETH_FOREACH_DEV(pid) {
2185 		port = &ports[pid];
2186 		port->dev_conf.fdir_conf = fdir_conf;
2187 		if (nb_rxq > 1) {
2188 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2189 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2190 		} else {
2191 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2192 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2193 		}
2194 
2195 		if (port->dcb_flag == 0) {
2196 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2197 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2198 			else
2199 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2200 		}
2201 
2202 		rxtx_port_config(port);
2203 
2204 		rte_eth_macaddr_get(pid, &port->eth_addr);
2205 
2206 		map_port_queue_stats_mapping_registers(pid, port);
2207 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2208 		rte_pmd_ixgbe_bypass_init(pid);
2209 #endif
2210 
2211 		if (lsc_interrupt &&
2212 		    (rte_eth_devices[pid].data->dev_flags &
2213 		     RTE_ETH_DEV_INTR_LSC))
2214 			port->dev_conf.intr_conf.lsc = 1;
2215 		if (rmv_interrupt &&
2216 		    (rte_eth_devices[pid].data->dev_flags &
2217 		     RTE_ETH_DEV_INTR_RMV))
2218 			port->dev_conf.intr_conf.rmv = 1;
2219 
2220 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2221 		/* Detect softnic port */
2222 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2223 			port->softnic_enable = 1;
2224 			memset(&port->softport, 0, sizeof(struct softnic_port));
2225 
2226 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2227 				port->softport.tm_flag = 1;
2228 		}
2229 #endif
2230 	}
2231 }
2232 
2233 void set_port_slave_flag(portid_t slave_pid)
2234 {
2235 	struct rte_port *port;
2236 
2237 	port = &ports[slave_pid];
2238 	port->slave_flag = 1;
2239 }
2240 
2241 void clear_port_slave_flag(portid_t slave_pid)
2242 {
2243 	struct rte_port *port;
2244 
2245 	port = &ports[slave_pid];
2246 	port->slave_flag = 0;
2247 }
2248 
2249 uint8_t port_is_bonding_slave(portid_t slave_pid)
2250 {
2251 	struct rte_port *port;
2252 
2253 	port = &ports[slave_pid];
2254 	return port->slave_flag;
2255 }
2256 
2257 const uint16_t vlan_tags[] = {
2258 		0,  1,  2,  3,  4,  5,  6,  7,
2259 		8,  9, 10, 11,  12, 13, 14, 15,
2260 		16, 17, 18, 19, 20, 21, 22, 23,
2261 		24, 25, 26, 27, 28, 29, 30, 31
2262 };
2263 
2264 static  int
2265 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2266 		 enum dcb_mode_enable dcb_mode,
2267 		 enum rte_eth_nb_tcs num_tcs,
2268 		 uint8_t pfc_en)
2269 {
2270 	uint8_t i;
2271 
2272 	/*
2273 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2274 	 * given above, and the number of traffic classes available for use.
2275 	 */
2276 	if (dcb_mode == DCB_VT_ENABLED) {
2277 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2278 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2279 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2280 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2281 
2282 		/* VMDQ+DCB RX and TX configurations */
2283 		vmdq_rx_conf->enable_default_pool = 0;
2284 		vmdq_rx_conf->default_pool = 0;
2285 		vmdq_rx_conf->nb_queue_pools =
2286 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2287 		vmdq_tx_conf->nb_queue_pools =
2288 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2289 
2290 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2291 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2292 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2293 			vmdq_rx_conf->pool_map[i].pools =
2294 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2295 		}
2296 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2297 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2298 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2299 		}
2300 
2301 		/* set DCB mode of RX and TX of multiple queues */
2302 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2303 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2304 	} else {
2305 		struct rte_eth_dcb_rx_conf *rx_conf =
2306 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2307 		struct rte_eth_dcb_tx_conf *tx_conf =
2308 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2309 
2310 		rx_conf->nb_tcs = num_tcs;
2311 		tx_conf->nb_tcs = num_tcs;
2312 
2313 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2314 			rx_conf->dcb_tc[i] = i % num_tcs;
2315 			tx_conf->dcb_tc[i] = i % num_tcs;
2316 		}
2317 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2318 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2319 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2320 	}
2321 
2322 	if (pfc_en)
2323 		eth_conf->dcb_capability_en =
2324 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2325 	else
2326 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2327 
2328 	return 0;
2329 }
2330 
2331 int
2332 init_port_dcb_config(portid_t pid,
2333 		     enum dcb_mode_enable dcb_mode,
2334 		     enum rte_eth_nb_tcs num_tcs,
2335 		     uint8_t pfc_en)
2336 {
2337 	struct rte_eth_conf port_conf;
2338 	struct rte_port *rte_port;
2339 	int retval;
2340 	uint16_t i;
2341 
2342 	rte_port = &ports[pid];
2343 
2344 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2345 	/* Enter DCB configuration status */
2346 	dcb_config = 1;
2347 
2348 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2349 	port_conf.txmode = rte_port->dev_conf.txmode;
2350 
2351 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2352 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2353 	if (retval < 0)
2354 		return retval;
2355 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2356 
2357 	/**
2358 	 * Write the configuration into the device.
2359 	 * Set the numbers of RX & TX queues to 0, so
2360 	 * the RX & TX queues will not be setup.
2361 	 */
2362 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2363 
2364 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2365 
2366 	/* If dev_info.vmdq_pool_base is greater than 0,
2367 	 * the queue id of vmdq pools is started after pf queues.
2368 	 */
2369 	if (dcb_mode == DCB_VT_ENABLED &&
2370 	    rte_port->dev_info.vmdq_pool_base > 0) {
2371 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2372 			" for port %d.", pid);
2373 		return -1;
2374 	}
2375 
2376 	/* Assume the ports in testpmd have the same dcb capability
2377 	 * and has the same number of rxq and txq in dcb mode
2378 	 */
2379 	if (dcb_mode == DCB_VT_ENABLED) {
2380 		if (rte_port->dev_info.max_vfs > 0) {
2381 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2382 			nb_txq = rte_port->dev_info.nb_tx_queues;
2383 		} else {
2384 			nb_rxq = rte_port->dev_info.max_rx_queues;
2385 			nb_txq = rte_port->dev_info.max_tx_queues;
2386 		}
2387 	} else {
2388 		/*if vt is disabled, use all pf queues */
2389 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2390 			nb_rxq = rte_port->dev_info.max_rx_queues;
2391 			nb_txq = rte_port->dev_info.max_tx_queues;
2392 		} else {
2393 			nb_rxq = (queueid_t)num_tcs;
2394 			nb_txq = (queueid_t)num_tcs;
2395 
2396 		}
2397 	}
2398 	rx_free_thresh = 64;
2399 
2400 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2401 
2402 	rxtx_port_config(rte_port);
2403 	/* VLAN filter */
2404 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2405 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2406 		rx_vft_set(pid, vlan_tags[i], 1);
2407 
2408 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2409 	map_port_queue_stats_mapping_registers(pid, rte_port);
2410 
2411 	rte_port->dcb_flag = 1;
2412 
2413 	return 0;
2414 }
2415 
2416 static void
2417 init_port(void)
2418 {
2419 	/* Configuration of Ethernet ports. */
2420 	ports = rte_zmalloc("testpmd: ports",
2421 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2422 			    RTE_CACHE_LINE_SIZE);
2423 	if (ports == NULL) {
2424 		rte_exit(EXIT_FAILURE,
2425 				"rte_zmalloc(%d struct rte_port) failed\n",
2426 				RTE_MAX_ETHPORTS);
2427 	}
2428 }
2429 
2430 static void
2431 force_quit(void)
2432 {
2433 	pmd_test_exit();
2434 	prompt_exit();
2435 }
2436 
2437 static void
2438 print_stats(void)
2439 {
2440 	uint8_t i;
2441 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2442 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2443 
2444 	/* Clear screen and move to top left */
2445 	printf("%s%s", clr, top_left);
2446 
2447 	printf("\nPort statistics ====================================");
2448 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2449 		nic_stats_display(fwd_ports_ids[i]);
2450 }
2451 
2452 static void
2453 signal_handler(int signum)
2454 {
2455 	if (signum == SIGINT || signum == SIGTERM) {
2456 		printf("\nSignal %d received, preparing to exit...\n",
2457 				signum);
2458 #ifdef RTE_LIBRTE_PDUMP
2459 		/* uninitialize packet capture framework */
2460 		rte_pdump_uninit();
2461 #endif
2462 #ifdef RTE_LIBRTE_LATENCY_STATS
2463 		rte_latencystats_uninit();
2464 #endif
2465 		force_quit();
2466 		/* Set flag to indicate the force termination. */
2467 		f_quit = 1;
2468 		/* exit with the expected status */
2469 		signal(signum, SIG_DFL);
2470 		kill(getpid(), signum);
2471 	}
2472 }
2473 
2474 int
2475 main(int argc, char** argv)
2476 {
2477 	int  diag;
2478 	portid_t port_id;
2479 
2480 	signal(SIGINT, signal_handler);
2481 	signal(SIGTERM, signal_handler);
2482 
2483 	diag = rte_eal_init(argc, argv);
2484 	if (diag < 0)
2485 		rte_panic("Cannot init EAL\n");
2486 
2487 	testpmd_logtype = rte_log_register("testpmd");
2488 	if (testpmd_logtype < 0)
2489 		rte_panic("Cannot register log type");
2490 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2491 
2492 	if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2493 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2494 			strerror(errno));
2495 	}
2496 
2497 #ifdef RTE_LIBRTE_PDUMP
2498 	/* initialize packet capture framework */
2499 	rte_pdump_init(NULL);
2500 #endif
2501 
2502 	nb_ports = (portid_t) rte_eth_dev_count();
2503 	if (nb_ports == 0)
2504 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2505 
2506 	/* allocate port structures, and init them */
2507 	init_port();
2508 
2509 	set_def_fwd_config();
2510 	if (nb_lcores == 0)
2511 		rte_panic("Empty set of forwarding logical cores - check the "
2512 			  "core mask supplied in the command parameters\n");
2513 
2514 	/* Bitrate/latency stats disabled by default */
2515 #ifdef RTE_LIBRTE_BITRATE
2516 	bitrate_enabled = 0;
2517 #endif
2518 #ifdef RTE_LIBRTE_LATENCY_STATS
2519 	latencystats_enabled = 0;
2520 #endif
2521 
2522 	argc -= diag;
2523 	argv += diag;
2524 	if (argc > 1)
2525 		launch_args_parse(argc, argv);
2526 
2527 	if (tx_first && interactive)
2528 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2529 				"interactive mode.\n");
2530 
2531 	if (tx_first && lsc_interrupt) {
2532 		printf("Warning: lsc_interrupt needs to be off when "
2533 				" using tx_first. Disabling.\n");
2534 		lsc_interrupt = 0;
2535 	}
2536 
2537 	if (!nb_rxq && !nb_txq)
2538 		printf("Warning: Either rx or tx queues should be non-zero\n");
2539 
2540 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2541 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2542 		       "but nb_txq=%d will prevent to fully test it.\n",
2543 		       nb_rxq, nb_txq);
2544 
2545 	init_config();
2546 	if (start_port(RTE_PORT_ALL) != 0)
2547 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2548 
2549 	/* set all ports to promiscuous mode by default */
2550 	RTE_ETH_FOREACH_DEV(port_id)
2551 		rte_eth_promiscuous_enable(port_id);
2552 
2553 	/* Init metrics library */
2554 	rte_metrics_init(rte_socket_id());
2555 
2556 #ifdef RTE_LIBRTE_LATENCY_STATS
2557 	if (latencystats_enabled != 0) {
2558 		int ret = rte_latencystats_init(1, NULL);
2559 		if (ret)
2560 			printf("Warning: latencystats init()"
2561 				" returned error %d\n",	ret);
2562 		printf("Latencystats running on lcore %d\n",
2563 			latencystats_lcore_id);
2564 	}
2565 #endif
2566 
2567 	/* Setup bitrate stats */
2568 #ifdef RTE_LIBRTE_BITRATE
2569 	if (bitrate_enabled != 0) {
2570 		bitrate_data = rte_stats_bitrate_create();
2571 		if (bitrate_data == NULL)
2572 			rte_exit(EXIT_FAILURE,
2573 				"Could not allocate bitrate data.\n");
2574 		rte_stats_bitrate_reg(bitrate_data);
2575 	}
2576 #endif
2577 
2578 #ifdef RTE_LIBRTE_CMDLINE
2579 	if (strlen(cmdline_filename) != 0)
2580 		cmdline_read_from_file(cmdline_filename);
2581 
2582 	if (interactive == 1) {
2583 		if (auto_start) {
2584 			printf("Start automatic packet forwarding\n");
2585 			start_packet_forwarding(0);
2586 		}
2587 		prompt();
2588 		pmd_test_exit();
2589 	} else
2590 #endif
2591 	{
2592 		char c;
2593 		int rc;
2594 
2595 		f_quit = 0;
2596 
2597 		printf("No commandline core given, start packet forwarding\n");
2598 		start_packet_forwarding(tx_first);
2599 		if (stats_period != 0) {
2600 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2601 			uint64_t timer_period;
2602 
2603 			/* Convert to number of cycles */
2604 			timer_period = stats_period * rte_get_timer_hz();
2605 
2606 			while (f_quit == 0) {
2607 				cur_time = rte_get_timer_cycles();
2608 				diff_time += cur_time - prev_time;
2609 
2610 				if (diff_time >= timer_period) {
2611 					print_stats();
2612 					/* Reset the timer */
2613 					diff_time = 0;
2614 				}
2615 				/* Sleep to avoid unnecessary checks */
2616 				prev_time = cur_time;
2617 				sleep(1);
2618 			}
2619 		}
2620 
2621 		printf("Press enter to exit\n");
2622 		rc = read(0, &c, 1);
2623 		pmd_test_exit();
2624 		if (rc < 0)
2625 			return 1;
2626 	}
2627 
2628 	return 0;
2629 }
2630