xref: /dpdk/app/test-pmd/testpmd.c (revision bd8f10f6d69f997f088a72499c4bb298b70f76fd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 
16 #include <sys/queue.h>
17 #include <sys/stat.h>
18 
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <inttypes.h>
22 
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
26 #include <rte_log.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
32 #include <rte_eal.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
40 #include <rte_mbuf.h>
41 #include <rte_mbuf_pool_ops.h>
42 #include <rte_interrupts.h>
43 #include <rte_pci.h>
44 #include <rte_ether.h>
45 #include <rte_ethdev.h>
46 #include <rte_dev.h>
47 #include <rte_string_fns.h>
48 #ifdef RTE_LIBRTE_IXGBE_PMD
49 #include <rte_pmd_ixgbe.h>
50 #endif
51 #ifdef RTE_LIBRTE_PDUMP
52 #include <rte_pdump.h>
53 #endif
54 #include <rte_flow.h>
55 #include <rte_metrics.h>
56 #ifdef RTE_LIBRTE_BITRATE
57 #include <rte_bitrate.h>
58 #endif
59 #ifdef RTE_LIBRTE_LATENCY_STATS
60 #include <rte_latencystats.h>
61 #endif
62 
63 #include "testpmd.h"
64 
65 uint16_t verbose_level = 0; /**< Silent by default. */
66 int testpmd_logtype; /**< Log type for testpmd logs */
67 
68 /* use master core for command line ? */
69 uint8_t interactive = 0;
70 uint8_t auto_start = 0;
71 uint8_t tx_first;
72 char cmdline_filename[PATH_MAX] = {0};
73 
74 /*
75  * NUMA support configuration.
76  * When set, the NUMA support attempts to dispatch the allocation of the
77  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
78  * probed ports among the CPU sockets 0 and 1.
79  * Otherwise, all memory is allocated from CPU socket 0.
80  */
81 uint8_t numa_support = 1; /**< numa enabled by default */
82 
83 /*
84  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
85  * not configured.
86  */
87 uint8_t socket_num = UMA_NO_CONFIG;
88 
89 /*
90  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
91  */
92 uint8_t mp_anon = 0;
93 
94 /*
95  * Store specified sockets on which memory pool to be used by ports
96  * is allocated.
97  */
98 uint8_t port_numa[RTE_MAX_ETHPORTS];
99 
100 /*
101  * Store specified sockets on which RX ring to be used by ports
102  * is allocated.
103  */
104 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
105 
106 /*
107  * Store specified sockets on which TX ring to be used by ports
108  * is allocated.
109  */
110 uint8_t txring_numa[RTE_MAX_ETHPORTS];
111 
112 /*
113  * Record the Ethernet address of peer target ports to which packets are
114  * forwarded.
115  * Must be instantiated with the ethernet addresses of peer traffic generator
116  * ports.
117  */
118 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
119 portid_t nb_peer_eth_addrs = 0;
120 
121 /*
122  * Probed Target Environment.
123  */
124 struct rte_port *ports;	       /**< For all probed ethernet ports. */
125 portid_t nb_ports;             /**< Number of probed ethernet ports. */
126 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
127 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
128 
129 /*
130  * Test Forwarding Configuration.
131  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
132  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
133  */
134 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
135 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
136 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
137 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
138 
139 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
140 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
141 
142 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
143 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
144 
145 /*
146  * Forwarding engines.
147  */
148 struct fwd_engine * fwd_engines[] = {
149 	&io_fwd_engine,
150 	&mac_fwd_engine,
151 	&mac_swap_engine,
152 	&flow_gen_engine,
153 	&rx_only_engine,
154 	&tx_only_engine,
155 	&csum_fwd_engine,
156 	&icmp_echo_engine,
157 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
158 	&softnic_tm_engine,
159 	&softnic_tm_bypass_engine,
160 #endif
161 #ifdef RTE_LIBRTE_IEEE1588
162 	&ieee1588_fwd_engine,
163 #endif
164 	NULL,
165 };
166 
167 struct fwd_config cur_fwd_config;
168 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
169 uint32_t retry_enabled;
170 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
171 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
172 
173 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
174 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
175                                       * specified on command-line. */
176 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
177 
178 /*
179  * In container, it cannot terminate the process which running with 'stats-period'
180  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
181  */
182 uint8_t f_quit;
183 
184 /*
185  * Configuration of packet segments used by the "txonly" processing engine.
186  */
187 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
188 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
189 	TXONLY_DEF_PACKET_LEN,
190 };
191 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
192 
193 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
194 /**< Split policy for packets to TX. */
195 
196 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
197 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
198 
199 /* current configuration is in DCB or not,0 means it is not in DCB mode */
200 uint8_t dcb_config = 0;
201 
202 /* Whether the dcb is in testing status */
203 uint8_t dcb_test = 0;
204 
205 /*
206  * Configurable number of RX/TX queues.
207  */
208 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
209 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
210 
211 /*
212  * Configurable number of RX/TX ring descriptors.
213  */
214 #define RTE_TEST_RX_DESC_DEFAULT 1024
215 #define RTE_TEST_TX_DESC_DEFAULT 1024
216 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
217 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
218 
219 #define RTE_PMD_PARAM_UNSET -1
220 /*
221  * Configurable values of RX and TX ring threshold registers.
222  */
223 
224 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
227 
228 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
231 
232 /*
233  * Configurable value of RX free threshold.
234  */
235 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
236 
237 /*
238  * Configurable value of RX drop enable.
239  */
240 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
241 
242 /*
243  * Configurable value of TX free threshold.
244  */
245 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
246 
247 /*
248  * Configurable value of TX RS bit threshold.
249  */
250 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
251 
252 /*
253  * Receive Side Scaling (RSS) configuration.
254  */
255 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
256 
257 /*
258  * Port topology configuration
259  */
260 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
261 
262 /*
263  * Avoids to flush all the RX streams before starts forwarding.
264  */
265 uint8_t no_flush_rx = 0; /* flush by default */
266 
267 /*
268  * Flow API isolated mode.
269  */
270 uint8_t flow_isolate_all;
271 
272 /*
273  * Avoids to check link status when starting/stopping a port.
274  */
275 uint8_t no_link_check = 0; /* check by default */
276 
277 /*
278  * Enable link status change notification
279  */
280 uint8_t lsc_interrupt = 1; /* enabled by default */
281 
282 /*
283  * Enable device removal notification.
284  */
285 uint8_t rmv_interrupt = 1; /* enabled by default */
286 
287 /*
288  * Display or mask ether events
289  * Default to all events except VF_MBOX
290  */
291 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
292 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
293 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
294 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
295 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
297 
298 /*
299  * NIC bypass mode configuration options.
300  */
301 
302 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
303 /* The NIC bypass watchdog timeout. */
304 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
305 #endif
306 
307 
308 #ifdef RTE_LIBRTE_LATENCY_STATS
309 
310 /*
311  * Set when latency stats is enabled in the commandline
312  */
313 uint8_t latencystats_enabled;
314 
315 /*
316  * Lcore ID to serive latency statistics.
317  */
318 lcoreid_t latencystats_lcore_id = -1;
319 
320 #endif
321 
322 /*
323  * Ethernet device configuration.
324  */
325 struct rte_eth_rxmode rx_mode = {
326 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
327 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
328 	.ignore_offload_bitfield = 1,
329 };
330 
331 struct rte_eth_txmode tx_mode = {
332 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
333 };
334 
335 struct rte_fdir_conf fdir_conf = {
336 	.mode = RTE_FDIR_MODE_NONE,
337 	.pballoc = RTE_FDIR_PBALLOC_64K,
338 	.status = RTE_FDIR_REPORT_STATUS,
339 	.mask = {
340 		.vlan_tci_mask = 0x0,
341 		.ipv4_mask     = {
342 			.src_ip = 0xFFFFFFFF,
343 			.dst_ip = 0xFFFFFFFF,
344 		},
345 		.ipv6_mask     = {
346 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
347 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
348 		},
349 		.src_port_mask = 0xFFFF,
350 		.dst_port_mask = 0xFFFF,
351 		.mac_addr_byte_mask = 0xFF,
352 		.tunnel_type_mask = 1,
353 		.tunnel_id_mask = 0xFFFFFFFF,
354 	},
355 	.drop_queue = 127,
356 };
357 
358 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
359 
360 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
361 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
362 
363 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
364 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
365 
366 uint16_t nb_tx_queue_stats_mappings = 0;
367 uint16_t nb_rx_queue_stats_mappings = 0;
368 
369 /*
370  * Display zero values by default for xstats
371  */
372 uint8_t xstats_hide_zero;
373 
374 unsigned int num_sockets = 0;
375 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
376 
377 #ifdef RTE_LIBRTE_BITRATE
378 /* Bitrate statistics */
379 struct rte_stats_bitrates *bitrate_data;
380 lcoreid_t bitrate_lcore_id;
381 uint8_t bitrate_enabled;
382 #endif
383 
384 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
385 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
386 
387 /* Forward function declarations */
388 static void map_port_queue_stats_mapping_registers(portid_t pi,
389 						   struct rte_port *port);
390 static void check_all_ports_link_status(uint32_t port_mask);
391 static int eth_event_callback(portid_t port_id,
392 			      enum rte_eth_event_type type,
393 			      void *param, void *ret_param);
394 
395 /*
396  * Check if all the ports are started.
397  * If yes, return positive value. If not, return zero.
398  */
399 static int all_ports_started(void);
400 
401 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
402 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
403 
404 /*
405  * Helper function to check if socket is already discovered.
406  * If yes, return positive value. If not, return zero.
407  */
408 int
409 new_socket_id(unsigned int socket_id)
410 {
411 	unsigned int i;
412 
413 	for (i = 0; i < num_sockets; i++) {
414 		if (socket_ids[i] == socket_id)
415 			return 0;
416 	}
417 	return 1;
418 }
419 
420 /*
421  * Setup default configuration.
422  */
423 static void
424 set_default_fwd_lcores_config(void)
425 {
426 	unsigned int i;
427 	unsigned int nb_lc;
428 	unsigned int sock_num;
429 
430 	nb_lc = 0;
431 	for (i = 0; i < RTE_MAX_LCORE; i++) {
432 		sock_num = rte_lcore_to_socket_id(i);
433 		if (new_socket_id(sock_num)) {
434 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
435 				rte_exit(EXIT_FAILURE,
436 					 "Total sockets greater than %u\n",
437 					 RTE_MAX_NUMA_NODES);
438 			}
439 			socket_ids[num_sockets++] = sock_num;
440 		}
441 		if (!rte_lcore_is_enabled(i))
442 			continue;
443 		if (i == rte_get_master_lcore())
444 			continue;
445 		fwd_lcores_cpuids[nb_lc++] = i;
446 	}
447 	nb_lcores = (lcoreid_t) nb_lc;
448 	nb_cfg_lcores = nb_lcores;
449 	nb_fwd_lcores = 1;
450 }
451 
452 static void
453 set_def_peer_eth_addrs(void)
454 {
455 	portid_t i;
456 
457 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
458 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
459 		peer_eth_addrs[i].addr_bytes[5] = i;
460 	}
461 }
462 
463 static void
464 set_default_fwd_ports_config(void)
465 {
466 	portid_t pt_id;
467 	int i = 0;
468 
469 	RTE_ETH_FOREACH_DEV(pt_id)
470 		fwd_ports_ids[i++] = pt_id;
471 
472 	nb_cfg_ports = nb_ports;
473 	nb_fwd_ports = nb_ports;
474 }
475 
476 void
477 set_def_fwd_config(void)
478 {
479 	set_default_fwd_lcores_config();
480 	set_def_peer_eth_addrs();
481 	set_default_fwd_ports_config();
482 }
483 
484 /*
485  * Configuration initialisation done once at init time.
486  */
487 static void
488 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
489 		 unsigned int socket_id)
490 {
491 	char pool_name[RTE_MEMPOOL_NAMESIZE];
492 	struct rte_mempool *rte_mp = NULL;
493 	uint32_t mb_size;
494 
495 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
496 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
497 
498 	TESTPMD_LOG(INFO,
499 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
500 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
501 
502 	if (mp_anon != 0) {
503 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
504 			mb_size, (unsigned) mb_mempool_cache,
505 			sizeof(struct rte_pktmbuf_pool_private),
506 			socket_id, 0);
507 		if (rte_mp == NULL)
508 			goto err;
509 
510 		if (rte_mempool_populate_anon(rte_mp) == 0) {
511 			rte_mempool_free(rte_mp);
512 			rte_mp = NULL;
513 			goto err;
514 		}
515 		rte_pktmbuf_pool_init(rte_mp, NULL);
516 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
517 	} else {
518 		/* wrapper to rte_mempool_create() */
519 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
520 				rte_mbuf_best_mempool_ops());
521 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
522 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
523 	}
524 
525 err:
526 	if (rte_mp == NULL) {
527 		rte_exit(EXIT_FAILURE,
528 			"Creation of mbuf pool for socket %u failed: %s\n",
529 			socket_id, rte_strerror(rte_errno));
530 	} else if (verbose_level > 0) {
531 		rte_mempool_dump(stdout, rte_mp);
532 	}
533 }
534 
535 /*
536  * Check given socket id is valid or not with NUMA mode,
537  * if valid, return 0, else return -1
538  */
539 static int
540 check_socket_id(const unsigned int socket_id)
541 {
542 	static int warning_once = 0;
543 
544 	if (new_socket_id(socket_id)) {
545 		if (!warning_once && numa_support)
546 			printf("Warning: NUMA should be configured manually by"
547 			       " using --port-numa-config and"
548 			       " --ring-numa-config parameters along with"
549 			       " --numa.\n");
550 		warning_once = 1;
551 		return -1;
552 	}
553 	return 0;
554 }
555 
556 /*
557  * Get the allowed maximum number of RX queues.
558  * *pid return the port id which has minimal value of
559  * max_rx_queues in all ports.
560  */
561 queueid_t
562 get_allowed_max_nb_rxq(portid_t *pid)
563 {
564 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
565 	portid_t pi;
566 	struct rte_eth_dev_info dev_info;
567 
568 	RTE_ETH_FOREACH_DEV(pi) {
569 		rte_eth_dev_info_get(pi, &dev_info);
570 		if (dev_info.max_rx_queues < allowed_max_rxq) {
571 			allowed_max_rxq = dev_info.max_rx_queues;
572 			*pid = pi;
573 		}
574 	}
575 	return allowed_max_rxq;
576 }
577 
578 /*
579  * Check input rxq is valid or not.
580  * If input rxq is not greater than any of maximum number
581  * of RX queues of all ports, it is valid.
582  * if valid, return 0, else return -1
583  */
584 int
585 check_nb_rxq(queueid_t rxq)
586 {
587 	queueid_t allowed_max_rxq;
588 	portid_t pid = 0;
589 
590 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
591 	if (rxq > allowed_max_rxq) {
592 		printf("Fail: input rxq (%u) can't be greater "
593 		       "than max_rx_queues (%u) of port %u\n",
594 		       rxq,
595 		       allowed_max_rxq,
596 		       pid);
597 		return -1;
598 	}
599 	return 0;
600 }
601 
602 /*
603  * Get the allowed maximum number of TX queues.
604  * *pid return the port id which has minimal value of
605  * max_tx_queues in all ports.
606  */
607 queueid_t
608 get_allowed_max_nb_txq(portid_t *pid)
609 {
610 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
611 	portid_t pi;
612 	struct rte_eth_dev_info dev_info;
613 
614 	RTE_ETH_FOREACH_DEV(pi) {
615 		rte_eth_dev_info_get(pi, &dev_info);
616 		if (dev_info.max_tx_queues < allowed_max_txq) {
617 			allowed_max_txq = dev_info.max_tx_queues;
618 			*pid = pi;
619 		}
620 	}
621 	return allowed_max_txq;
622 }
623 
624 /*
625  * Check input txq is valid or not.
626  * If input txq is not greater than any of maximum number
627  * of TX queues of all ports, it is valid.
628  * if valid, return 0, else return -1
629  */
630 int
631 check_nb_txq(queueid_t txq)
632 {
633 	queueid_t allowed_max_txq;
634 	portid_t pid = 0;
635 
636 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
637 	if (txq > allowed_max_txq) {
638 		printf("Fail: input txq (%u) can't be greater "
639 		       "than max_tx_queues (%u) of port %u\n",
640 		       txq,
641 		       allowed_max_txq,
642 		       pid);
643 		return -1;
644 	}
645 	return 0;
646 }
647 
648 static void
649 init_config(void)
650 {
651 	portid_t pid;
652 	struct rte_port *port;
653 	struct rte_mempool *mbp;
654 	unsigned int nb_mbuf_per_pool;
655 	lcoreid_t  lc_id;
656 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
657 	struct rte_gro_param gro_param;
658 	uint32_t gso_types;
659 
660 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
661 
662 	if (numa_support) {
663 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
664 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
665 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
666 	}
667 
668 	/* Configuration of logical cores. */
669 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
670 				sizeof(struct fwd_lcore *) * nb_lcores,
671 				RTE_CACHE_LINE_SIZE);
672 	if (fwd_lcores == NULL) {
673 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
674 							"failed\n", nb_lcores);
675 	}
676 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
677 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
678 					       sizeof(struct fwd_lcore),
679 					       RTE_CACHE_LINE_SIZE);
680 		if (fwd_lcores[lc_id] == NULL) {
681 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
682 								"failed\n");
683 		}
684 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
685 	}
686 
687 	RTE_ETH_FOREACH_DEV(pid) {
688 		port = &ports[pid];
689 		/* Apply default TxRx configuration for all ports */
690 		port->dev_conf.txmode = tx_mode;
691 		port->dev_conf.rxmode = rx_mode;
692 		rte_eth_dev_info_get(pid, &port->dev_info);
693 		if (!(port->dev_info.tx_offload_capa &
694 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
695 			port->dev_conf.txmode.offloads &=
696 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
697 		if (!(port->dev_info.rx_offload_capa &
698 		      DEV_RX_OFFLOAD_CRC_STRIP))
699 			port->dev_conf.rxmode.offloads &=
700 				~DEV_RX_OFFLOAD_CRC_STRIP;
701 		if (numa_support) {
702 			if (port_numa[pid] != NUMA_NO_CONFIG)
703 				port_per_socket[port_numa[pid]]++;
704 			else {
705 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
706 
707 				/* if socket_id is invalid, set to 0 */
708 				if (check_socket_id(socket_id) < 0)
709 					socket_id = 0;
710 				port_per_socket[socket_id]++;
711 			}
712 		}
713 
714 		/* set flag to initialize port/queue */
715 		port->need_reconfig = 1;
716 		port->need_reconfig_queues = 1;
717 	}
718 
719 	/*
720 	 * Create pools of mbuf.
721 	 * If NUMA support is disabled, create a single pool of mbuf in
722 	 * socket 0 memory by default.
723 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
724 	 *
725 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
726 	 * nb_txd can be configured at run time.
727 	 */
728 	if (param_total_num_mbufs)
729 		nb_mbuf_per_pool = param_total_num_mbufs;
730 	else {
731 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
732 			(nb_lcores * mb_mempool_cache) +
733 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
734 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
735 	}
736 
737 	if (numa_support) {
738 		uint8_t i;
739 
740 		for (i = 0; i < num_sockets; i++)
741 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
742 					 socket_ids[i]);
743 	} else {
744 		if (socket_num == UMA_NO_CONFIG)
745 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
746 		else
747 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
748 						 socket_num);
749 	}
750 
751 	init_port_config();
752 
753 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
754 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
755 	/*
756 	 * Records which Mbuf pool to use by each logical core, if needed.
757 	 */
758 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
759 		mbp = mbuf_pool_find(
760 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
761 
762 		if (mbp == NULL)
763 			mbp = mbuf_pool_find(0);
764 		fwd_lcores[lc_id]->mbp = mbp;
765 		/* initialize GSO context */
766 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
767 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
768 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
769 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
770 			ETHER_CRC_LEN;
771 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
772 	}
773 
774 	/* Configuration of packet forwarding streams. */
775 	if (init_fwd_streams() < 0)
776 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
777 
778 	fwd_config_setup();
779 
780 	/* create a gro context for each lcore */
781 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
782 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
783 	gro_param.max_item_per_flow = MAX_PKT_BURST;
784 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
785 		gro_param.socket_id = rte_lcore_to_socket_id(
786 				fwd_lcores_cpuids[lc_id]);
787 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
788 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
789 			rte_exit(EXIT_FAILURE,
790 					"rte_gro_ctx_create() failed\n");
791 		}
792 	}
793 }
794 
795 
796 void
797 reconfig(portid_t new_port_id, unsigned socket_id)
798 {
799 	struct rte_port *port;
800 
801 	/* Reconfiguration of Ethernet ports. */
802 	port = &ports[new_port_id];
803 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
804 
805 	/* set flag to initialize port/queue */
806 	port->need_reconfig = 1;
807 	port->need_reconfig_queues = 1;
808 	port->socket_id = socket_id;
809 
810 	init_port_config();
811 }
812 
813 
814 int
815 init_fwd_streams(void)
816 {
817 	portid_t pid;
818 	struct rte_port *port;
819 	streamid_t sm_id, nb_fwd_streams_new;
820 	queueid_t q;
821 
822 	/* set socket id according to numa or not */
823 	RTE_ETH_FOREACH_DEV(pid) {
824 		port = &ports[pid];
825 		if (nb_rxq > port->dev_info.max_rx_queues) {
826 			printf("Fail: nb_rxq(%d) is greater than "
827 				"max_rx_queues(%d)\n", nb_rxq,
828 				port->dev_info.max_rx_queues);
829 			return -1;
830 		}
831 		if (nb_txq > port->dev_info.max_tx_queues) {
832 			printf("Fail: nb_txq(%d) is greater than "
833 				"max_tx_queues(%d)\n", nb_txq,
834 				port->dev_info.max_tx_queues);
835 			return -1;
836 		}
837 		if (numa_support) {
838 			if (port_numa[pid] != NUMA_NO_CONFIG)
839 				port->socket_id = port_numa[pid];
840 			else {
841 				port->socket_id = rte_eth_dev_socket_id(pid);
842 
843 				/* if socket_id is invalid, set to 0 */
844 				if (check_socket_id(port->socket_id) < 0)
845 					port->socket_id = 0;
846 			}
847 		}
848 		else {
849 			if (socket_num == UMA_NO_CONFIG)
850 				port->socket_id = 0;
851 			else
852 				port->socket_id = socket_num;
853 		}
854 	}
855 
856 	q = RTE_MAX(nb_rxq, nb_txq);
857 	if (q == 0) {
858 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
859 		return -1;
860 	}
861 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
862 	if (nb_fwd_streams_new == nb_fwd_streams)
863 		return 0;
864 	/* clear the old */
865 	if (fwd_streams != NULL) {
866 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
867 			if (fwd_streams[sm_id] == NULL)
868 				continue;
869 			rte_free(fwd_streams[sm_id]);
870 			fwd_streams[sm_id] = NULL;
871 		}
872 		rte_free(fwd_streams);
873 		fwd_streams = NULL;
874 	}
875 
876 	/* init new */
877 	nb_fwd_streams = nb_fwd_streams_new;
878 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
879 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
880 	if (fwd_streams == NULL)
881 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
882 						"failed\n", nb_fwd_streams);
883 
884 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
885 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
886 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
887 		if (fwd_streams[sm_id] == NULL)
888 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
889 								" failed\n");
890 	}
891 
892 	return 0;
893 }
894 
895 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
896 static void
897 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
898 {
899 	unsigned int total_burst;
900 	unsigned int nb_burst;
901 	unsigned int burst_stats[3];
902 	uint16_t pktnb_stats[3];
903 	uint16_t nb_pkt;
904 	int burst_percent[3];
905 
906 	/*
907 	 * First compute the total number of packet bursts and the
908 	 * two highest numbers of bursts of the same number of packets.
909 	 */
910 	total_burst = 0;
911 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
912 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
913 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
914 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
915 		if (nb_burst == 0)
916 			continue;
917 		total_burst += nb_burst;
918 		if (nb_burst > burst_stats[0]) {
919 			burst_stats[1] = burst_stats[0];
920 			pktnb_stats[1] = pktnb_stats[0];
921 			burst_stats[0] = nb_burst;
922 			pktnb_stats[0] = nb_pkt;
923 		}
924 	}
925 	if (total_burst == 0)
926 		return;
927 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
928 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
929 	       burst_percent[0], (int) pktnb_stats[0]);
930 	if (burst_stats[0] == total_burst) {
931 		printf("]\n");
932 		return;
933 	}
934 	if (burst_stats[0] + burst_stats[1] == total_burst) {
935 		printf(" + %d%% of %d pkts]\n",
936 		       100 - burst_percent[0], pktnb_stats[1]);
937 		return;
938 	}
939 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
940 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
941 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
942 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
943 		return;
944 	}
945 	printf(" + %d%% of %d pkts + %d%% of others]\n",
946 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
947 }
948 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
949 
950 static void
951 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
952 {
953 	struct rte_port *port;
954 	uint8_t i;
955 
956 	static const char *fwd_stats_border = "----------------------";
957 
958 	port = &ports[port_id];
959 	printf("\n  %s Forward statistics for port %-2d %s\n",
960 	       fwd_stats_border, port_id, fwd_stats_border);
961 
962 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
963 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
964 		       "%-"PRIu64"\n",
965 		       stats->ipackets, stats->imissed,
966 		       (uint64_t) (stats->ipackets + stats->imissed));
967 
968 		if (cur_fwd_eng == &csum_fwd_engine)
969 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
970 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
971 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
972 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
973 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
974 		}
975 
976 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
977 		       "%-"PRIu64"\n",
978 		       stats->opackets, port->tx_dropped,
979 		       (uint64_t) (stats->opackets + port->tx_dropped));
980 	}
981 	else {
982 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
983 		       "%14"PRIu64"\n",
984 		       stats->ipackets, stats->imissed,
985 		       (uint64_t) (stats->ipackets + stats->imissed));
986 
987 		if (cur_fwd_eng == &csum_fwd_engine)
988 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
989 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
990 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
991 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
992 			printf("  RX-nombufs:             %14"PRIu64"\n",
993 			       stats->rx_nombuf);
994 		}
995 
996 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
997 		       "%14"PRIu64"\n",
998 		       stats->opackets, port->tx_dropped,
999 		       (uint64_t) (stats->opackets + port->tx_dropped));
1000 	}
1001 
1002 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1003 	if (port->rx_stream)
1004 		pkt_burst_stats_display("RX",
1005 			&port->rx_stream->rx_burst_stats);
1006 	if (port->tx_stream)
1007 		pkt_burst_stats_display("TX",
1008 			&port->tx_stream->tx_burst_stats);
1009 #endif
1010 
1011 	if (port->rx_queue_stats_mapping_enabled) {
1012 		printf("\n");
1013 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1014 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1015 			       "     RX-errors:%14"PRIu64
1016 			       "    RX-bytes:%14"PRIu64"\n",
1017 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1018 		}
1019 		printf("\n");
1020 	}
1021 	if (port->tx_queue_stats_mapping_enabled) {
1022 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1023 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1024 			       "                                 TX-bytes:%14"PRIu64"\n",
1025 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1026 		}
1027 	}
1028 
1029 	printf("  %s--------------------------------%s\n",
1030 	       fwd_stats_border, fwd_stats_border);
1031 }
1032 
1033 static void
1034 fwd_stream_stats_display(streamid_t stream_id)
1035 {
1036 	struct fwd_stream *fs;
1037 	static const char *fwd_top_stats_border = "-------";
1038 
1039 	fs = fwd_streams[stream_id];
1040 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1041 	    (fs->fwd_dropped == 0))
1042 		return;
1043 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1044 	       "TX Port=%2d/Queue=%2d %s\n",
1045 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1046 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1047 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1048 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1049 
1050 	/* if checksum mode */
1051 	if (cur_fwd_eng == &csum_fwd_engine) {
1052 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1053 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1054 	}
1055 
1056 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1057 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1058 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1059 #endif
1060 }
1061 
1062 static void
1063 flush_fwd_rx_queues(void)
1064 {
1065 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1066 	portid_t  rxp;
1067 	portid_t port_id;
1068 	queueid_t rxq;
1069 	uint16_t  nb_rx;
1070 	uint16_t  i;
1071 	uint8_t   j;
1072 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1073 	uint64_t timer_period;
1074 
1075 	/* convert to number of cycles */
1076 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1077 
1078 	for (j = 0; j < 2; j++) {
1079 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1080 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1081 				port_id = fwd_ports_ids[rxp];
1082 				/**
1083 				* testpmd can stuck in the below do while loop
1084 				* if rte_eth_rx_burst() always returns nonzero
1085 				* packets. So timer is added to exit this loop
1086 				* after 1sec timer expiry.
1087 				*/
1088 				prev_tsc = rte_rdtsc();
1089 				do {
1090 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1091 						pkts_burst, MAX_PKT_BURST);
1092 					for (i = 0; i < nb_rx; i++)
1093 						rte_pktmbuf_free(pkts_burst[i]);
1094 
1095 					cur_tsc = rte_rdtsc();
1096 					diff_tsc = cur_tsc - prev_tsc;
1097 					timer_tsc += diff_tsc;
1098 				} while ((nb_rx > 0) &&
1099 					(timer_tsc < timer_period));
1100 				timer_tsc = 0;
1101 			}
1102 		}
1103 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1104 	}
1105 }
1106 
1107 static void
1108 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1109 {
1110 	struct fwd_stream **fsm;
1111 	streamid_t nb_fs;
1112 	streamid_t sm_id;
1113 #ifdef RTE_LIBRTE_BITRATE
1114 	uint64_t tics_per_1sec;
1115 	uint64_t tics_datum;
1116 	uint64_t tics_current;
1117 	uint8_t idx_port, cnt_ports;
1118 
1119 	cnt_ports = rte_eth_dev_count();
1120 	tics_datum = rte_rdtsc();
1121 	tics_per_1sec = rte_get_timer_hz();
1122 #endif
1123 	fsm = &fwd_streams[fc->stream_idx];
1124 	nb_fs = fc->stream_nb;
1125 	do {
1126 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1127 			(*pkt_fwd)(fsm[sm_id]);
1128 #ifdef RTE_LIBRTE_BITRATE
1129 		if (bitrate_enabled != 0 &&
1130 				bitrate_lcore_id == rte_lcore_id()) {
1131 			tics_current = rte_rdtsc();
1132 			if (tics_current - tics_datum >= tics_per_1sec) {
1133 				/* Periodic bitrate calculation */
1134 				for (idx_port = 0;
1135 						idx_port < cnt_ports;
1136 						idx_port++)
1137 					rte_stats_bitrate_calc(bitrate_data,
1138 						idx_port);
1139 				tics_datum = tics_current;
1140 			}
1141 		}
1142 #endif
1143 #ifdef RTE_LIBRTE_LATENCY_STATS
1144 		if (latencystats_enabled != 0 &&
1145 				latencystats_lcore_id == rte_lcore_id())
1146 			rte_latencystats_update();
1147 #endif
1148 
1149 	} while (! fc->stopped);
1150 }
1151 
1152 static int
1153 start_pkt_forward_on_core(void *fwd_arg)
1154 {
1155 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1156 			     cur_fwd_config.fwd_eng->packet_fwd);
1157 	return 0;
1158 }
1159 
1160 /*
1161  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1162  * Used to start communication flows in network loopback test configurations.
1163  */
1164 static int
1165 run_one_txonly_burst_on_core(void *fwd_arg)
1166 {
1167 	struct fwd_lcore *fwd_lc;
1168 	struct fwd_lcore tmp_lcore;
1169 
1170 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1171 	tmp_lcore = *fwd_lc;
1172 	tmp_lcore.stopped = 1;
1173 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1174 	return 0;
1175 }
1176 
1177 /*
1178  * Launch packet forwarding:
1179  *     - Setup per-port forwarding context.
1180  *     - launch logical cores with their forwarding configuration.
1181  */
1182 static void
1183 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1184 {
1185 	port_fwd_begin_t port_fwd_begin;
1186 	unsigned int i;
1187 	unsigned int lc_id;
1188 	int diag;
1189 
1190 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1191 	if (port_fwd_begin != NULL) {
1192 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1193 			(*port_fwd_begin)(fwd_ports_ids[i]);
1194 	}
1195 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1196 		lc_id = fwd_lcores_cpuids[i];
1197 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1198 			fwd_lcores[i]->stopped = 0;
1199 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1200 						     fwd_lcores[i], lc_id);
1201 			if (diag != 0)
1202 				printf("launch lcore %u failed - diag=%d\n",
1203 				       lc_id, diag);
1204 		}
1205 	}
1206 }
1207 
1208 /*
1209  * Launch packet forwarding configuration.
1210  */
1211 void
1212 start_packet_forwarding(int with_tx_first)
1213 {
1214 	port_fwd_begin_t port_fwd_begin;
1215 	port_fwd_end_t  port_fwd_end;
1216 	struct rte_port *port;
1217 	unsigned int i;
1218 	portid_t   pt_id;
1219 	streamid_t sm_id;
1220 
1221 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1222 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1223 
1224 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1225 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1226 
1227 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1228 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1229 		(!nb_rxq || !nb_txq))
1230 		rte_exit(EXIT_FAILURE,
1231 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1232 			cur_fwd_eng->fwd_mode_name);
1233 
1234 	if (all_ports_started() == 0) {
1235 		printf("Not all ports were started\n");
1236 		return;
1237 	}
1238 	if (test_done == 0) {
1239 		printf("Packet forwarding already started\n");
1240 		return;
1241 	}
1242 
1243 	if (init_fwd_streams() < 0) {
1244 		printf("Fail from init_fwd_streams()\n");
1245 		return;
1246 	}
1247 
1248 	if(dcb_test) {
1249 		for (i = 0; i < nb_fwd_ports; i++) {
1250 			pt_id = fwd_ports_ids[i];
1251 			port = &ports[pt_id];
1252 			if (!port->dcb_flag) {
1253 				printf("In DCB mode, all forwarding ports must "
1254                                        "be configured in this mode.\n");
1255 				return;
1256 			}
1257 		}
1258 		if (nb_fwd_lcores == 1) {
1259 			printf("In DCB mode,the nb forwarding cores "
1260                                "should be larger than 1.\n");
1261 			return;
1262 		}
1263 	}
1264 	test_done = 0;
1265 
1266 	if(!no_flush_rx)
1267 		flush_fwd_rx_queues();
1268 
1269 	fwd_config_setup();
1270 	pkt_fwd_config_display(&cur_fwd_config);
1271 	rxtx_config_display();
1272 
1273 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1274 		pt_id = fwd_ports_ids[i];
1275 		port = &ports[pt_id];
1276 		rte_eth_stats_get(pt_id, &port->stats);
1277 		port->tx_dropped = 0;
1278 
1279 		map_port_queue_stats_mapping_registers(pt_id, port);
1280 	}
1281 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1282 		fwd_streams[sm_id]->rx_packets = 0;
1283 		fwd_streams[sm_id]->tx_packets = 0;
1284 		fwd_streams[sm_id]->fwd_dropped = 0;
1285 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1286 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1287 
1288 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1289 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1290 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1291 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1292 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1293 #endif
1294 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1295 		fwd_streams[sm_id]->core_cycles = 0;
1296 #endif
1297 	}
1298 	if (with_tx_first) {
1299 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1300 		if (port_fwd_begin != NULL) {
1301 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1302 				(*port_fwd_begin)(fwd_ports_ids[i]);
1303 		}
1304 		while (with_tx_first--) {
1305 			launch_packet_forwarding(
1306 					run_one_txonly_burst_on_core);
1307 			rte_eal_mp_wait_lcore();
1308 		}
1309 		port_fwd_end = tx_only_engine.port_fwd_end;
1310 		if (port_fwd_end != NULL) {
1311 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1312 				(*port_fwd_end)(fwd_ports_ids[i]);
1313 		}
1314 	}
1315 	launch_packet_forwarding(start_pkt_forward_on_core);
1316 }
1317 
1318 void
1319 stop_packet_forwarding(void)
1320 {
1321 	struct rte_eth_stats stats;
1322 	struct rte_port *port;
1323 	port_fwd_end_t  port_fwd_end;
1324 	int i;
1325 	portid_t   pt_id;
1326 	streamid_t sm_id;
1327 	lcoreid_t  lc_id;
1328 	uint64_t total_recv;
1329 	uint64_t total_xmit;
1330 	uint64_t total_rx_dropped;
1331 	uint64_t total_tx_dropped;
1332 	uint64_t total_rx_nombuf;
1333 	uint64_t tx_dropped;
1334 	uint64_t rx_bad_ip_csum;
1335 	uint64_t rx_bad_l4_csum;
1336 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1337 	uint64_t fwd_cycles;
1338 #endif
1339 
1340 	static const char *acc_stats_border = "+++++++++++++++";
1341 
1342 	if (test_done) {
1343 		printf("Packet forwarding not started\n");
1344 		return;
1345 	}
1346 	printf("Telling cores to stop...");
1347 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1348 		fwd_lcores[lc_id]->stopped = 1;
1349 	printf("\nWaiting for lcores to finish...\n");
1350 	rte_eal_mp_wait_lcore();
1351 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1352 	if (port_fwd_end != NULL) {
1353 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1354 			pt_id = fwd_ports_ids[i];
1355 			(*port_fwd_end)(pt_id);
1356 		}
1357 	}
1358 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1359 	fwd_cycles = 0;
1360 #endif
1361 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1362 		if (cur_fwd_config.nb_fwd_streams >
1363 		    cur_fwd_config.nb_fwd_ports) {
1364 			fwd_stream_stats_display(sm_id);
1365 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1366 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1367 		} else {
1368 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1369 				fwd_streams[sm_id];
1370 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1371 				fwd_streams[sm_id];
1372 		}
1373 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1374 		tx_dropped = (uint64_t) (tx_dropped +
1375 					 fwd_streams[sm_id]->fwd_dropped);
1376 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1377 
1378 		rx_bad_ip_csum =
1379 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1380 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1381 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1382 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1383 							rx_bad_ip_csum;
1384 
1385 		rx_bad_l4_csum =
1386 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1387 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1388 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1389 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1390 							rx_bad_l4_csum;
1391 
1392 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1393 		fwd_cycles = (uint64_t) (fwd_cycles +
1394 					 fwd_streams[sm_id]->core_cycles);
1395 #endif
1396 	}
1397 	total_recv = 0;
1398 	total_xmit = 0;
1399 	total_rx_dropped = 0;
1400 	total_tx_dropped = 0;
1401 	total_rx_nombuf  = 0;
1402 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1403 		pt_id = fwd_ports_ids[i];
1404 
1405 		port = &ports[pt_id];
1406 		rte_eth_stats_get(pt_id, &stats);
1407 		stats.ipackets -= port->stats.ipackets;
1408 		port->stats.ipackets = 0;
1409 		stats.opackets -= port->stats.opackets;
1410 		port->stats.opackets = 0;
1411 		stats.ibytes   -= port->stats.ibytes;
1412 		port->stats.ibytes = 0;
1413 		stats.obytes   -= port->stats.obytes;
1414 		port->stats.obytes = 0;
1415 		stats.imissed  -= port->stats.imissed;
1416 		port->stats.imissed = 0;
1417 		stats.oerrors  -= port->stats.oerrors;
1418 		port->stats.oerrors = 0;
1419 		stats.rx_nombuf -= port->stats.rx_nombuf;
1420 		port->stats.rx_nombuf = 0;
1421 
1422 		total_recv += stats.ipackets;
1423 		total_xmit += stats.opackets;
1424 		total_rx_dropped += stats.imissed;
1425 		total_tx_dropped += port->tx_dropped;
1426 		total_rx_nombuf  += stats.rx_nombuf;
1427 
1428 		fwd_port_stats_display(pt_id, &stats);
1429 	}
1430 
1431 	printf("\n  %s Accumulated forward statistics for all ports"
1432 	       "%s\n",
1433 	       acc_stats_border, acc_stats_border);
1434 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1435 	       "%-"PRIu64"\n"
1436 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1437 	       "%-"PRIu64"\n",
1438 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1439 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1440 	if (total_rx_nombuf > 0)
1441 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1442 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1443 	       "%s\n",
1444 	       acc_stats_border, acc_stats_border);
1445 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1446 	if (total_recv > 0)
1447 		printf("\n  CPU cycles/packet=%u (total cycles="
1448 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1449 		       (unsigned int)(fwd_cycles / total_recv),
1450 		       fwd_cycles, total_recv);
1451 #endif
1452 	printf("\nDone.\n");
1453 	test_done = 1;
1454 }
1455 
1456 void
1457 dev_set_link_up(portid_t pid)
1458 {
1459 	if (rte_eth_dev_set_link_up(pid) < 0)
1460 		printf("\nSet link up fail.\n");
1461 }
1462 
1463 void
1464 dev_set_link_down(portid_t pid)
1465 {
1466 	if (rte_eth_dev_set_link_down(pid) < 0)
1467 		printf("\nSet link down fail.\n");
1468 }
1469 
1470 static int
1471 all_ports_started(void)
1472 {
1473 	portid_t pi;
1474 	struct rte_port *port;
1475 
1476 	RTE_ETH_FOREACH_DEV(pi) {
1477 		port = &ports[pi];
1478 		/* Check if there is a port which is not started */
1479 		if ((port->port_status != RTE_PORT_STARTED) &&
1480 			(port->slave_flag == 0))
1481 			return 0;
1482 	}
1483 
1484 	/* No port is not started */
1485 	return 1;
1486 }
1487 
1488 int
1489 port_is_stopped(portid_t port_id)
1490 {
1491 	struct rte_port *port = &ports[port_id];
1492 
1493 	if ((port->port_status != RTE_PORT_STOPPED) &&
1494 	    (port->slave_flag == 0))
1495 		return 0;
1496 	return 1;
1497 }
1498 
1499 int
1500 all_ports_stopped(void)
1501 {
1502 	portid_t pi;
1503 
1504 	RTE_ETH_FOREACH_DEV(pi) {
1505 		if (!port_is_stopped(pi))
1506 			return 0;
1507 	}
1508 
1509 	return 1;
1510 }
1511 
1512 int
1513 port_is_started(portid_t port_id)
1514 {
1515 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1516 		return 0;
1517 
1518 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1519 		return 0;
1520 
1521 	return 1;
1522 }
1523 
1524 static int
1525 port_is_closed(portid_t port_id)
1526 {
1527 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1528 		return 0;
1529 
1530 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1531 		return 0;
1532 
1533 	return 1;
1534 }
1535 
1536 int
1537 start_port(portid_t pid)
1538 {
1539 	int diag, need_check_link_status = -1;
1540 	portid_t pi;
1541 	queueid_t qi;
1542 	struct rte_port *port;
1543 	struct ether_addr mac_addr;
1544 	enum rte_eth_event_type event_type;
1545 
1546 	if (port_id_is_invalid(pid, ENABLED_WARN))
1547 		return 0;
1548 
1549 	if(dcb_config)
1550 		dcb_test = 1;
1551 	RTE_ETH_FOREACH_DEV(pi) {
1552 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1553 			continue;
1554 
1555 		need_check_link_status = 0;
1556 		port = &ports[pi];
1557 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1558 						 RTE_PORT_HANDLING) == 0) {
1559 			printf("Port %d is now not stopped\n", pi);
1560 			continue;
1561 		}
1562 
1563 		if (port->need_reconfig > 0) {
1564 			port->need_reconfig = 0;
1565 
1566 			if (flow_isolate_all) {
1567 				int ret = port_flow_isolate(pi, 1);
1568 				if (ret) {
1569 					printf("Failed to apply isolated"
1570 					       " mode on port %d\n", pi);
1571 					return -1;
1572 				}
1573 			}
1574 
1575 			printf("Configuring Port %d (socket %u)\n", pi,
1576 					port->socket_id);
1577 			/* configure port */
1578 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1579 						&(port->dev_conf));
1580 			if (diag != 0) {
1581 				if (rte_atomic16_cmpset(&(port->port_status),
1582 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1583 					printf("Port %d can not be set back "
1584 							"to stopped\n", pi);
1585 				printf("Fail to configure port %d\n", pi);
1586 				/* try to reconfigure port next time */
1587 				port->need_reconfig = 1;
1588 				return -1;
1589 			}
1590 		}
1591 		if (port->need_reconfig_queues > 0) {
1592 			port->need_reconfig_queues = 0;
1593 			port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1594 			/* Apply Tx offloads configuration */
1595 			port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1596 			/* setup tx queues */
1597 			for (qi = 0; qi < nb_txq; qi++) {
1598 				if ((numa_support) &&
1599 					(txring_numa[pi] != NUMA_NO_CONFIG))
1600 					diag = rte_eth_tx_queue_setup(pi, qi,
1601 						nb_txd,txring_numa[pi],
1602 						&(port->tx_conf));
1603 				else
1604 					diag = rte_eth_tx_queue_setup(pi, qi,
1605 						nb_txd,port->socket_id,
1606 						&(port->tx_conf));
1607 
1608 				if (diag == 0)
1609 					continue;
1610 
1611 				/* Fail to setup tx queue, return */
1612 				if (rte_atomic16_cmpset(&(port->port_status),
1613 							RTE_PORT_HANDLING,
1614 							RTE_PORT_STOPPED) == 0)
1615 					printf("Port %d can not be set back "
1616 							"to stopped\n", pi);
1617 				printf("Fail to configure port %d tx queues\n", pi);
1618 				/* try to reconfigure queues next time */
1619 				port->need_reconfig_queues = 1;
1620 				return -1;
1621 			}
1622 			/* Apply Rx offloads configuration */
1623 			port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1624 			/* setup rx queues */
1625 			for (qi = 0; qi < nb_rxq; qi++) {
1626 				if ((numa_support) &&
1627 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1628 					struct rte_mempool * mp =
1629 						mbuf_pool_find(rxring_numa[pi]);
1630 					if (mp == NULL) {
1631 						printf("Failed to setup RX queue:"
1632 							"No mempool allocation"
1633 							" on the socket %d\n",
1634 							rxring_numa[pi]);
1635 						return -1;
1636 					}
1637 
1638 					diag = rte_eth_rx_queue_setup(pi, qi,
1639 					     nb_rxd,rxring_numa[pi],
1640 					     &(port->rx_conf),mp);
1641 				} else {
1642 					struct rte_mempool *mp =
1643 						mbuf_pool_find(port->socket_id);
1644 					if (mp == NULL) {
1645 						printf("Failed to setup RX queue:"
1646 							"No mempool allocation"
1647 							" on the socket %d\n",
1648 							port->socket_id);
1649 						return -1;
1650 					}
1651 					diag = rte_eth_rx_queue_setup(pi, qi,
1652 					     nb_rxd,port->socket_id,
1653 					     &(port->rx_conf), mp);
1654 				}
1655 				if (diag == 0)
1656 					continue;
1657 
1658 				/* Fail to setup rx queue, return */
1659 				if (rte_atomic16_cmpset(&(port->port_status),
1660 							RTE_PORT_HANDLING,
1661 							RTE_PORT_STOPPED) == 0)
1662 					printf("Port %d can not be set back "
1663 							"to stopped\n", pi);
1664 				printf("Fail to configure port %d rx queues\n", pi);
1665 				/* try to reconfigure queues next time */
1666 				port->need_reconfig_queues = 1;
1667 				return -1;
1668 			}
1669 		}
1670 
1671 		/* start port */
1672 		if (rte_eth_dev_start(pi) < 0) {
1673 			printf("Fail to start port %d\n", pi);
1674 
1675 			/* Fail to setup rx queue, return */
1676 			if (rte_atomic16_cmpset(&(port->port_status),
1677 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1678 				printf("Port %d can not be set back to "
1679 							"stopped\n", pi);
1680 			continue;
1681 		}
1682 
1683 		if (rte_atomic16_cmpset(&(port->port_status),
1684 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1685 			printf("Port %d can not be set into started\n", pi);
1686 
1687 		rte_eth_macaddr_get(pi, &mac_addr);
1688 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1689 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1690 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1691 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1692 
1693 		/* at least one port started, need checking link status */
1694 		need_check_link_status = 1;
1695 	}
1696 
1697 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1698 	     event_type < RTE_ETH_EVENT_MAX;
1699 	     event_type++) {
1700 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1701 						event_type,
1702 						eth_event_callback,
1703 						NULL);
1704 		if (diag) {
1705 			printf("Failed to setup even callback for event %d\n",
1706 				event_type);
1707 			return -1;
1708 		}
1709 	}
1710 
1711 	if (need_check_link_status == 1 && !no_link_check)
1712 		check_all_ports_link_status(RTE_PORT_ALL);
1713 	else if (need_check_link_status == 0)
1714 		printf("Please stop the ports first\n");
1715 
1716 	printf("Done\n");
1717 	return 0;
1718 }
1719 
1720 void
1721 stop_port(portid_t pid)
1722 {
1723 	portid_t pi;
1724 	struct rte_port *port;
1725 	int need_check_link_status = 0;
1726 
1727 	if (dcb_test) {
1728 		dcb_test = 0;
1729 		dcb_config = 0;
1730 	}
1731 
1732 	if (port_id_is_invalid(pid, ENABLED_WARN))
1733 		return;
1734 
1735 	printf("Stopping ports...\n");
1736 
1737 	RTE_ETH_FOREACH_DEV(pi) {
1738 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1739 			continue;
1740 
1741 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1742 			printf("Please remove port %d from forwarding configuration.\n", pi);
1743 			continue;
1744 		}
1745 
1746 		if (port_is_bonding_slave(pi)) {
1747 			printf("Please remove port %d from bonded device.\n", pi);
1748 			continue;
1749 		}
1750 
1751 		port = &ports[pi];
1752 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1753 						RTE_PORT_HANDLING) == 0)
1754 			continue;
1755 
1756 		rte_eth_dev_stop(pi);
1757 
1758 		if (rte_atomic16_cmpset(&(port->port_status),
1759 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1760 			printf("Port %d can not be set into stopped\n", pi);
1761 		need_check_link_status = 1;
1762 	}
1763 	if (need_check_link_status && !no_link_check)
1764 		check_all_ports_link_status(RTE_PORT_ALL);
1765 
1766 	printf("Done\n");
1767 }
1768 
1769 void
1770 close_port(portid_t pid)
1771 {
1772 	portid_t pi;
1773 	struct rte_port *port;
1774 
1775 	if (port_id_is_invalid(pid, ENABLED_WARN))
1776 		return;
1777 
1778 	printf("Closing ports...\n");
1779 
1780 	RTE_ETH_FOREACH_DEV(pi) {
1781 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1782 			continue;
1783 
1784 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1785 			printf("Please remove port %d from forwarding configuration.\n", pi);
1786 			continue;
1787 		}
1788 
1789 		if (port_is_bonding_slave(pi)) {
1790 			printf("Please remove port %d from bonded device.\n", pi);
1791 			continue;
1792 		}
1793 
1794 		port = &ports[pi];
1795 		if (rte_atomic16_cmpset(&(port->port_status),
1796 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1797 			printf("Port %d is already closed\n", pi);
1798 			continue;
1799 		}
1800 
1801 		if (rte_atomic16_cmpset(&(port->port_status),
1802 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1803 			printf("Port %d is now not stopped\n", pi);
1804 			continue;
1805 		}
1806 
1807 		if (port->flow_list)
1808 			port_flow_flush(pi);
1809 		rte_eth_dev_close(pi);
1810 
1811 		if (rte_atomic16_cmpset(&(port->port_status),
1812 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1813 			printf("Port %d cannot be set to closed\n", pi);
1814 	}
1815 
1816 	printf("Done\n");
1817 }
1818 
1819 void
1820 reset_port(portid_t pid)
1821 {
1822 	int diag;
1823 	portid_t pi;
1824 	struct rte_port *port;
1825 
1826 	if (port_id_is_invalid(pid, ENABLED_WARN))
1827 		return;
1828 
1829 	printf("Resetting ports...\n");
1830 
1831 	RTE_ETH_FOREACH_DEV(pi) {
1832 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1833 			continue;
1834 
1835 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1836 			printf("Please remove port %d from forwarding "
1837 			       "configuration.\n", pi);
1838 			continue;
1839 		}
1840 
1841 		if (port_is_bonding_slave(pi)) {
1842 			printf("Please remove port %d from bonded device.\n",
1843 			       pi);
1844 			continue;
1845 		}
1846 
1847 		diag = rte_eth_dev_reset(pi);
1848 		if (diag == 0) {
1849 			port = &ports[pi];
1850 			port->need_reconfig = 1;
1851 			port->need_reconfig_queues = 1;
1852 		} else {
1853 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1854 		}
1855 	}
1856 
1857 	printf("Done\n");
1858 }
1859 
1860 void
1861 attach_port(char *identifier)
1862 {
1863 	portid_t pi = 0;
1864 	unsigned int socket_id;
1865 
1866 	printf("Attaching a new port...\n");
1867 
1868 	if (identifier == NULL) {
1869 		printf("Invalid parameters are specified\n");
1870 		return;
1871 	}
1872 
1873 	if (rte_eth_dev_attach(identifier, &pi))
1874 		return;
1875 
1876 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1877 	/* if socket_id is invalid, set to 0 */
1878 	if (check_socket_id(socket_id) < 0)
1879 		socket_id = 0;
1880 	reconfig(pi, socket_id);
1881 	rte_eth_promiscuous_enable(pi);
1882 
1883 	nb_ports = rte_eth_dev_count();
1884 
1885 	ports[pi].port_status = RTE_PORT_STOPPED;
1886 
1887 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1888 	printf("Done\n");
1889 }
1890 
1891 void
1892 detach_port(portid_t port_id)
1893 {
1894 	char name[RTE_ETH_NAME_MAX_LEN];
1895 
1896 	printf("Detaching a port...\n");
1897 
1898 	if (!port_is_closed(port_id)) {
1899 		printf("Please close port first\n");
1900 		return;
1901 	}
1902 
1903 	if (ports[port_id].flow_list)
1904 		port_flow_flush(port_id);
1905 
1906 	if (rte_eth_dev_detach(port_id, name)) {
1907 		TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1908 		return;
1909 	}
1910 
1911 	nb_ports = rte_eth_dev_count();
1912 
1913 	printf("Port '%s' is detached. Now total ports is %d\n",
1914 			name, nb_ports);
1915 	printf("Done\n");
1916 	return;
1917 }
1918 
1919 void
1920 pmd_test_exit(void)
1921 {
1922 	portid_t pt_id;
1923 
1924 	if (test_done == 0)
1925 		stop_packet_forwarding();
1926 
1927 	if (ports != NULL) {
1928 		no_link_check = 1;
1929 		RTE_ETH_FOREACH_DEV(pt_id) {
1930 			printf("\nShutting down port %d...\n", pt_id);
1931 			fflush(stdout);
1932 			stop_port(pt_id);
1933 			close_port(pt_id);
1934 		}
1935 	}
1936 	printf("\nBye...\n");
1937 }
1938 
1939 typedef void (*cmd_func_t)(void);
1940 struct pmd_test_command {
1941 	const char *cmd_name;
1942 	cmd_func_t cmd_func;
1943 };
1944 
1945 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1946 
1947 /* Check the link status of all ports in up to 9s, and print them finally */
1948 static void
1949 check_all_ports_link_status(uint32_t port_mask)
1950 {
1951 #define CHECK_INTERVAL 100 /* 100ms */
1952 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1953 	portid_t portid;
1954 	uint8_t count, all_ports_up, print_flag = 0;
1955 	struct rte_eth_link link;
1956 
1957 	printf("Checking link statuses...\n");
1958 	fflush(stdout);
1959 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1960 		all_ports_up = 1;
1961 		RTE_ETH_FOREACH_DEV(portid) {
1962 			if ((port_mask & (1 << portid)) == 0)
1963 				continue;
1964 			memset(&link, 0, sizeof(link));
1965 			rte_eth_link_get_nowait(portid, &link);
1966 			/* print link status if flag set */
1967 			if (print_flag == 1) {
1968 				if (link.link_status)
1969 					printf(
1970 					"Port%d Link Up. speed %u Mbps- %s\n",
1971 					portid, link.link_speed,
1972 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1973 					("full-duplex") : ("half-duplex\n"));
1974 				else
1975 					printf("Port %d Link Down\n", portid);
1976 				continue;
1977 			}
1978 			/* clear all_ports_up flag if any link down */
1979 			if (link.link_status == ETH_LINK_DOWN) {
1980 				all_ports_up = 0;
1981 				break;
1982 			}
1983 		}
1984 		/* after finally printing all link status, get out */
1985 		if (print_flag == 1)
1986 			break;
1987 
1988 		if (all_ports_up == 0) {
1989 			fflush(stdout);
1990 			rte_delay_ms(CHECK_INTERVAL);
1991 		}
1992 
1993 		/* set the print_flag if all ports up or timeout */
1994 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1995 			print_flag = 1;
1996 		}
1997 
1998 		if (lsc_interrupt)
1999 			break;
2000 	}
2001 }
2002 
2003 static void
2004 rmv_event_callback(void *arg)
2005 {
2006 	struct rte_eth_dev *dev;
2007 	portid_t port_id = (intptr_t)arg;
2008 
2009 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2010 	dev = &rte_eth_devices[port_id];
2011 
2012 	stop_port(port_id);
2013 	close_port(port_id);
2014 	printf("removing device %s\n", dev->device->name);
2015 	if (rte_eal_dev_detach(dev->device))
2016 		TESTPMD_LOG(ERR, "Failed to detach device %s\n",
2017 			dev->device->name);
2018 }
2019 
2020 /* This function is used by the interrupt thread */
2021 static int
2022 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2023 		  void *ret_param)
2024 {
2025 	static const char * const event_desc[] = {
2026 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2027 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2028 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2029 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2030 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2031 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2032 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2033 		[RTE_ETH_EVENT_NEW] = "device probed",
2034 		[RTE_ETH_EVENT_DESTROY] = "device released",
2035 		[RTE_ETH_EVENT_MAX] = NULL,
2036 	};
2037 
2038 	RTE_SET_USED(param);
2039 	RTE_SET_USED(ret_param);
2040 
2041 	if (type >= RTE_ETH_EVENT_MAX) {
2042 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2043 			port_id, __func__, type);
2044 		fflush(stderr);
2045 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2046 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2047 			event_desc[type]);
2048 		fflush(stdout);
2049 	}
2050 
2051 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2052 		return 0;
2053 
2054 	switch (type) {
2055 	case RTE_ETH_EVENT_INTR_RMV:
2056 		if (rte_eal_alarm_set(100000,
2057 				rmv_event_callback, (void *)(intptr_t)port_id))
2058 			fprintf(stderr, "Could not set up deferred device removal\n");
2059 		break;
2060 	default:
2061 		break;
2062 	}
2063 	return 0;
2064 }
2065 
2066 static int
2067 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2068 {
2069 	uint16_t i;
2070 	int diag;
2071 	uint8_t mapping_found = 0;
2072 
2073 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2074 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2075 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2076 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2077 					tx_queue_stats_mappings[i].queue_id,
2078 					tx_queue_stats_mappings[i].stats_counter_id);
2079 			if (diag != 0)
2080 				return diag;
2081 			mapping_found = 1;
2082 		}
2083 	}
2084 	if (mapping_found)
2085 		port->tx_queue_stats_mapping_enabled = 1;
2086 	return 0;
2087 }
2088 
2089 static int
2090 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2091 {
2092 	uint16_t i;
2093 	int diag;
2094 	uint8_t mapping_found = 0;
2095 
2096 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2097 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2098 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2099 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2100 					rx_queue_stats_mappings[i].queue_id,
2101 					rx_queue_stats_mappings[i].stats_counter_id);
2102 			if (diag != 0)
2103 				return diag;
2104 			mapping_found = 1;
2105 		}
2106 	}
2107 	if (mapping_found)
2108 		port->rx_queue_stats_mapping_enabled = 1;
2109 	return 0;
2110 }
2111 
2112 static void
2113 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2114 {
2115 	int diag = 0;
2116 
2117 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2118 	if (diag != 0) {
2119 		if (diag == -ENOTSUP) {
2120 			port->tx_queue_stats_mapping_enabled = 0;
2121 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2122 		}
2123 		else
2124 			rte_exit(EXIT_FAILURE,
2125 					"set_tx_queue_stats_mapping_registers "
2126 					"failed for port id=%d diag=%d\n",
2127 					pi, diag);
2128 	}
2129 
2130 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2131 	if (diag != 0) {
2132 		if (diag == -ENOTSUP) {
2133 			port->rx_queue_stats_mapping_enabled = 0;
2134 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2135 		}
2136 		else
2137 			rte_exit(EXIT_FAILURE,
2138 					"set_rx_queue_stats_mapping_registers "
2139 					"failed for port id=%d diag=%d\n",
2140 					pi, diag);
2141 	}
2142 }
2143 
2144 static void
2145 rxtx_port_config(struct rte_port *port)
2146 {
2147 	port->rx_conf = port->dev_info.default_rxconf;
2148 	port->tx_conf = port->dev_info.default_txconf;
2149 
2150 	/* Check if any RX/TX parameters have been passed */
2151 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2152 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2153 
2154 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2155 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2156 
2157 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2158 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2159 
2160 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2161 		port->rx_conf.rx_free_thresh = rx_free_thresh;
2162 
2163 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2164 		port->rx_conf.rx_drop_en = rx_drop_en;
2165 
2166 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2167 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2168 
2169 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2170 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2171 
2172 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2173 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2174 
2175 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2176 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2177 
2178 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2179 		port->tx_conf.tx_free_thresh = tx_free_thresh;
2180 }
2181 
2182 void
2183 init_port_config(void)
2184 {
2185 	portid_t pid;
2186 	struct rte_port *port;
2187 
2188 	RTE_ETH_FOREACH_DEV(pid) {
2189 		port = &ports[pid];
2190 		port->dev_conf.fdir_conf = fdir_conf;
2191 		if (nb_rxq > 1) {
2192 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2193 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2194 		} else {
2195 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2196 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2197 		}
2198 
2199 		if (port->dcb_flag == 0) {
2200 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2201 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2202 			else
2203 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2204 		}
2205 
2206 		rxtx_port_config(port);
2207 
2208 		rte_eth_macaddr_get(pid, &port->eth_addr);
2209 
2210 		map_port_queue_stats_mapping_registers(pid, port);
2211 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2212 		rte_pmd_ixgbe_bypass_init(pid);
2213 #endif
2214 
2215 		if (lsc_interrupt &&
2216 		    (rte_eth_devices[pid].data->dev_flags &
2217 		     RTE_ETH_DEV_INTR_LSC))
2218 			port->dev_conf.intr_conf.lsc = 1;
2219 		if (rmv_interrupt &&
2220 		    (rte_eth_devices[pid].data->dev_flags &
2221 		     RTE_ETH_DEV_INTR_RMV))
2222 			port->dev_conf.intr_conf.rmv = 1;
2223 
2224 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2225 		/* Detect softnic port */
2226 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2227 			port->softnic_enable = 1;
2228 			memset(&port->softport, 0, sizeof(struct softnic_port));
2229 
2230 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2231 				port->softport.tm_flag = 1;
2232 		}
2233 #endif
2234 	}
2235 }
2236 
2237 void set_port_slave_flag(portid_t slave_pid)
2238 {
2239 	struct rte_port *port;
2240 
2241 	port = &ports[slave_pid];
2242 	port->slave_flag = 1;
2243 }
2244 
2245 void clear_port_slave_flag(portid_t slave_pid)
2246 {
2247 	struct rte_port *port;
2248 
2249 	port = &ports[slave_pid];
2250 	port->slave_flag = 0;
2251 }
2252 
2253 uint8_t port_is_bonding_slave(portid_t slave_pid)
2254 {
2255 	struct rte_port *port;
2256 
2257 	port = &ports[slave_pid];
2258 	return port->slave_flag;
2259 }
2260 
2261 const uint16_t vlan_tags[] = {
2262 		0,  1,  2,  3,  4,  5,  6,  7,
2263 		8,  9, 10, 11,  12, 13, 14, 15,
2264 		16, 17, 18, 19, 20, 21, 22, 23,
2265 		24, 25, 26, 27, 28, 29, 30, 31
2266 };
2267 
2268 static  int
2269 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2270 		 enum dcb_mode_enable dcb_mode,
2271 		 enum rte_eth_nb_tcs num_tcs,
2272 		 uint8_t pfc_en)
2273 {
2274 	uint8_t i;
2275 
2276 	/*
2277 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2278 	 * given above, and the number of traffic classes available for use.
2279 	 */
2280 	if (dcb_mode == DCB_VT_ENABLED) {
2281 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2282 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2283 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2284 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2285 
2286 		/* VMDQ+DCB RX and TX configurations */
2287 		vmdq_rx_conf->enable_default_pool = 0;
2288 		vmdq_rx_conf->default_pool = 0;
2289 		vmdq_rx_conf->nb_queue_pools =
2290 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2291 		vmdq_tx_conf->nb_queue_pools =
2292 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2293 
2294 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2295 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2296 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2297 			vmdq_rx_conf->pool_map[i].pools =
2298 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2299 		}
2300 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2301 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2302 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2303 		}
2304 
2305 		/* set DCB mode of RX and TX of multiple queues */
2306 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2307 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2308 	} else {
2309 		struct rte_eth_dcb_rx_conf *rx_conf =
2310 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2311 		struct rte_eth_dcb_tx_conf *tx_conf =
2312 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2313 
2314 		rx_conf->nb_tcs = num_tcs;
2315 		tx_conf->nb_tcs = num_tcs;
2316 
2317 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2318 			rx_conf->dcb_tc[i] = i % num_tcs;
2319 			tx_conf->dcb_tc[i] = i % num_tcs;
2320 		}
2321 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2322 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2323 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2324 	}
2325 
2326 	if (pfc_en)
2327 		eth_conf->dcb_capability_en =
2328 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2329 	else
2330 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2331 
2332 	return 0;
2333 }
2334 
2335 int
2336 init_port_dcb_config(portid_t pid,
2337 		     enum dcb_mode_enable dcb_mode,
2338 		     enum rte_eth_nb_tcs num_tcs,
2339 		     uint8_t pfc_en)
2340 {
2341 	struct rte_eth_conf port_conf;
2342 	struct rte_port *rte_port;
2343 	int retval;
2344 	uint16_t i;
2345 
2346 	rte_port = &ports[pid];
2347 
2348 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2349 	/* Enter DCB configuration status */
2350 	dcb_config = 1;
2351 
2352 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2353 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2354 	if (retval < 0)
2355 		return retval;
2356 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2357 
2358 	/**
2359 	 * Write the configuration into the device.
2360 	 * Set the numbers of RX & TX queues to 0, so
2361 	 * the RX & TX queues will not be setup.
2362 	 */
2363 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2364 
2365 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2366 
2367 	/* If dev_info.vmdq_pool_base is greater than 0,
2368 	 * the queue id of vmdq pools is started after pf queues.
2369 	 */
2370 	if (dcb_mode == DCB_VT_ENABLED &&
2371 	    rte_port->dev_info.vmdq_pool_base > 0) {
2372 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2373 			" for port %d.", pid);
2374 		return -1;
2375 	}
2376 
2377 	/* Assume the ports in testpmd have the same dcb capability
2378 	 * and has the same number of rxq and txq in dcb mode
2379 	 */
2380 	if (dcb_mode == DCB_VT_ENABLED) {
2381 		if (rte_port->dev_info.max_vfs > 0) {
2382 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2383 			nb_txq = rte_port->dev_info.nb_tx_queues;
2384 		} else {
2385 			nb_rxq = rte_port->dev_info.max_rx_queues;
2386 			nb_txq = rte_port->dev_info.max_tx_queues;
2387 		}
2388 	} else {
2389 		/*if vt is disabled, use all pf queues */
2390 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2391 			nb_rxq = rte_port->dev_info.max_rx_queues;
2392 			nb_txq = rte_port->dev_info.max_tx_queues;
2393 		} else {
2394 			nb_rxq = (queueid_t)num_tcs;
2395 			nb_txq = (queueid_t)num_tcs;
2396 
2397 		}
2398 	}
2399 	rx_free_thresh = 64;
2400 
2401 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2402 
2403 	rxtx_port_config(rte_port);
2404 	/* VLAN filter */
2405 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2406 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2407 		rx_vft_set(pid, vlan_tags[i], 1);
2408 
2409 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2410 	map_port_queue_stats_mapping_registers(pid, rte_port);
2411 
2412 	rte_port->dcb_flag = 1;
2413 
2414 	return 0;
2415 }
2416 
2417 static void
2418 init_port(void)
2419 {
2420 	/* Configuration of Ethernet ports. */
2421 	ports = rte_zmalloc("testpmd: ports",
2422 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2423 			    RTE_CACHE_LINE_SIZE);
2424 	if (ports == NULL) {
2425 		rte_exit(EXIT_FAILURE,
2426 				"rte_zmalloc(%d struct rte_port) failed\n",
2427 				RTE_MAX_ETHPORTS);
2428 	}
2429 }
2430 
2431 static void
2432 force_quit(void)
2433 {
2434 	pmd_test_exit();
2435 	prompt_exit();
2436 }
2437 
2438 static void
2439 print_stats(void)
2440 {
2441 	uint8_t i;
2442 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2443 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2444 
2445 	/* Clear screen and move to top left */
2446 	printf("%s%s", clr, top_left);
2447 
2448 	printf("\nPort statistics ====================================");
2449 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2450 		nic_stats_display(fwd_ports_ids[i]);
2451 }
2452 
2453 static void
2454 signal_handler(int signum)
2455 {
2456 	if (signum == SIGINT || signum == SIGTERM) {
2457 		printf("\nSignal %d received, preparing to exit...\n",
2458 				signum);
2459 #ifdef RTE_LIBRTE_PDUMP
2460 		/* uninitialize packet capture framework */
2461 		rte_pdump_uninit();
2462 #endif
2463 #ifdef RTE_LIBRTE_LATENCY_STATS
2464 		rte_latencystats_uninit();
2465 #endif
2466 		force_quit();
2467 		/* Set flag to indicate the force termination. */
2468 		f_quit = 1;
2469 		/* exit with the expected status */
2470 		signal(signum, SIG_DFL);
2471 		kill(getpid(), signum);
2472 	}
2473 }
2474 
2475 int
2476 main(int argc, char** argv)
2477 {
2478 	int  diag;
2479 	portid_t port_id;
2480 
2481 	signal(SIGINT, signal_handler);
2482 	signal(SIGTERM, signal_handler);
2483 
2484 	diag = rte_eal_init(argc, argv);
2485 	if (diag < 0)
2486 		rte_panic("Cannot init EAL\n");
2487 
2488 	testpmd_logtype = rte_log_register("testpmd");
2489 	if (testpmd_logtype < 0)
2490 		rte_panic("Cannot register log type");
2491 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2492 
2493 	if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2494 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2495 			strerror(errno));
2496 	}
2497 
2498 #ifdef RTE_LIBRTE_PDUMP
2499 	/* initialize packet capture framework */
2500 	rte_pdump_init(NULL);
2501 #endif
2502 
2503 	nb_ports = (portid_t) rte_eth_dev_count();
2504 	if (nb_ports == 0)
2505 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2506 
2507 	/* allocate port structures, and init them */
2508 	init_port();
2509 
2510 	set_def_fwd_config();
2511 	if (nb_lcores == 0)
2512 		rte_panic("Empty set of forwarding logical cores - check the "
2513 			  "core mask supplied in the command parameters\n");
2514 
2515 	/* Bitrate/latency stats disabled by default */
2516 #ifdef RTE_LIBRTE_BITRATE
2517 	bitrate_enabled = 0;
2518 #endif
2519 #ifdef RTE_LIBRTE_LATENCY_STATS
2520 	latencystats_enabled = 0;
2521 #endif
2522 
2523 	argc -= diag;
2524 	argv += diag;
2525 	if (argc > 1)
2526 		launch_args_parse(argc, argv);
2527 
2528 	if (tx_first && interactive)
2529 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2530 				"interactive mode.\n");
2531 
2532 	if (tx_first && lsc_interrupt) {
2533 		printf("Warning: lsc_interrupt needs to be off when "
2534 				" using tx_first. Disabling.\n");
2535 		lsc_interrupt = 0;
2536 	}
2537 
2538 	if (!nb_rxq && !nb_txq)
2539 		printf("Warning: Either rx or tx queues should be non-zero\n");
2540 
2541 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2542 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2543 		       "but nb_txq=%d will prevent to fully test it.\n",
2544 		       nb_rxq, nb_txq);
2545 
2546 	init_config();
2547 	if (start_port(RTE_PORT_ALL) != 0)
2548 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2549 
2550 	/* set all ports to promiscuous mode by default */
2551 	RTE_ETH_FOREACH_DEV(port_id)
2552 		rte_eth_promiscuous_enable(port_id);
2553 
2554 	/* Init metrics library */
2555 	rte_metrics_init(rte_socket_id());
2556 
2557 #ifdef RTE_LIBRTE_LATENCY_STATS
2558 	if (latencystats_enabled != 0) {
2559 		int ret = rte_latencystats_init(1, NULL);
2560 		if (ret)
2561 			printf("Warning: latencystats init()"
2562 				" returned error %d\n",	ret);
2563 		printf("Latencystats running on lcore %d\n",
2564 			latencystats_lcore_id);
2565 	}
2566 #endif
2567 
2568 	/* Setup bitrate stats */
2569 #ifdef RTE_LIBRTE_BITRATE
2570 	if (bitrate_enabled != 0) {
2571 		bitrate_data = rte_stats_bitrate_create();
2572 		if (bitrate_data == NULL)
2573 			rte_exit(EXIT_FAILURE,
2574 				"Could not allocate bitrate data.\n");
2575 		rte_stats_bitrate_reg(bitrate_data);
2576 	}
2577 #endif
2578 
2579 #ifdef RTE_LIBRTE_CMDLINE
2580 	if (strlen(cmdline_filename) != 0)
2581 		cmdline_read_from_file(cmdline_filename);
2582 
2583 	if (interactive == 1) {
2584 		if (auto_start) {
2585 			printf("Start automatic packet forwarding\n");
2586 			start_packet_forwarding(0);
2587 		}
2588 		prompt();
2589 		pmd_test_exit();
2590 	} else
2591 #endif
2592 	{
2593 		char c;
2594 		int rc;
2595 
2596 		f_quit = 0;
2597 
2598 		printf("No commandline core given, start packet forwarding\n");
2599 		start_packet_forwarding(tx_first);
2600 		if (stats_period != 0) {
2601 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2602 			uint64_t timer_period;
2603 
2604 			/* Convert to number of cycles */
2605 			timer_period = stats_period * rte_get_timer_hz();
2606 
2607 			while (f_quit == 0) {
2608 				cur_time = rte_get_timer_cycles();
2609 				diff_time += cur_time - prev_time;
2610 
2611 				if (diff_time >= timer_period) {
2612 					print_stats();
2613 					/* Reset the timer */
2614 					diff_time = 0;
2615 				}
2616 				/* Sleep to avoid unnecessary checks */
2617 				prev_time = cur_time;
2618 				sleep(1);
2619 			}
2620 		}
2621 
2622 		printf("Press enter to exit\n");
2623 		rc = read(0, &c, 1);
2624 		pmd_test_exit();
2625 		if (rc < 0)
2626 			return 1;
2627 	}
2628 
2629 	return 0;
2630 }
2631