xref: /dpdk/app/test-pmd/testpmd.c (revision 0da2a62bf9d72625fb5b779d9fd91e471ed2fb4a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
33 #include <rte_eal.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
41 #include <rte_mbuf.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_dev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
51 #endif
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
54 #endif
55 #include <rte_flow.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
59 #endif
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
62 #endif
63 
64 #include "testpmd.h"
65 
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
68 
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
72 uint8_t tx_first;
73 char cmdline_filename[PATH_MAX] = {0};
74 
75 /*
76  * NUMA support configuration.
77  * When set, the NUMA support attempts to dispatch the allocation of the
78  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79  * probed ports among the CPU sockets 0 and 1.
80  * Otherwise, all memory is allocated from CPU socket 0.
81  */
82 uint8_t numa_support = 1; /**< numa enabled by default */
83 
84 /*
85  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86  * not configured.
87  */
88 uint8_t socket_num = UMA_NO_CONFIG;
89 
90 /*
91  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
92  */
93 uint8_t mp_anon = 0;
94 
95 /*
96  * Store specified sockets on which memory pool to be used by ports
97  * is allocated.
98  */
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
100 
101 /*
102  * Store specified sockets on which RX ring to be used by ports
103  * is allocated.
104  */
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
106 
107 /*
108  * Store specified sockets on which TX ring to be used by ports
109  * is allocated.
110  */
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
112 
113 /*
114  * Record the Ethernet address of peer target ports to which packets are
115  * forwarded.
116  * Must be instantiated with the ethernet addresses of peer traffic generator
117  * ports.
118  */
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
121 
122 /*
123  * Probed Target Environment.
124  */
125 struct rte_port *ports;	       /**< For all probed ethernet ports. */
126 portid_t nb_ports;             /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
129 
130 /*
131  * Test Forwarding Configuration.
132  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
134  */
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
138 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
139 
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
142 
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
145 
146 /*
147  * Forwarding engines.
148  */
149 struct fwd_engine * fwd_engines[] = {
150 	&io_fwd_engine,
151 	&mac_fwd_engine,
152 	&mac_swap_engine,
153 	&flow_gen_engine,
154 	&rx_only_engine,
155 	&tx_only_engine,
156 	&csum_fwd_engine,
157 	&icmp_echo_engine,
158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
159 	&softnic_tm_engine,
160 	&softnic_tm_bypass_engine,
161 #endif
162 #ifdef RTE_LIBRTE_IEEE1588
163 	&ieee1588_fwd_engine,
164 #endif
165 	NULL,
166 };
167 
168 struct fwd_config cur_fwd_config;
169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
170 uint32_t retry_enabled;
171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173 
174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
175 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
176                                       * specified on command-line. */
177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
178 
179 /*
180  * In container, it cannot terminate the process which running with 'stats-period'
181  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
182  */
183 uint8_t f_quit;
184 
185 /*
186  * Configuration of packet segments used by the "txonly" processing engine.
187  */
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 	TXONLY_DEF_PACKET_LEN,
191 };
192 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
196 
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
202 
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
205 
206 /*
207  * Configurable number of RX/TX queues.
208  */
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
211 
212 /*
213  * Configurable number of RX/TX ring descriptors.
214  * Defaults are supplied by drivers via ethdev.
215  */
216 #define RTE_TEST_RX_DESC_DEFAULT 0
217 #define RTE_TEST_TX_DESC_DEFAULT 0
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220 
221 #define RTE_PMD_PARAM_UNSET -1
222 /*
223  * Configurable values of RX and TX ring threshold registers.
224  */
225 
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229 
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Configurable value of RX free threshold.
236  */
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238 
239 /*
240  * Configurable value of RX drop enable.
241  */
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243 
244 /*
245  * Configurable value of TX free threshold.
246  */
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248 
249 /*
250  * Configurable value of TX RS bit threshold.
251  */
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253 
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268 
269 /*
270  * Flow API isolated mode.
271  */
272 uint8_t flow_isolate_all;
273 
274 /*
275  * Avoids to check link status when starting/stopping a port.
276  */
277 uint8_t no_link_check = 0; /* check by default */
278 
279 /*
280  * Enable link status change notification
281  */
282 uint8_t lsc_interrupt = 1; /* enabled by default */
283 
284 /*
285  * Enable device removal notification.
286  */
287 uint8_t rmv_interrupt = 1; /* enabled by default */
288 
289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
290 
291 /*
292  * Display or mask ether events
293  * Default to all events except VF_MBOX
294  */
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
301 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
302 /*
303  * Decide if all memory are locked for performance.
304  */
305 int do_mlockall = 0;
306 
307 /*
308  * NIC bypass mode configuration options.
309  */
310 
311 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
312 /* The NIC bypass watchdog timeout. */
313 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
314 #endif
315 
316 
317 #ifdef RTE_LIBRTE_LATENCY_STATS
318 
319 /*
320  * Set when latency stats is enabled in the commandline
321  */
322 uint8_t latencystats_enabled;
323 
324 /*
325  * Lcore ID to serive latency statistics.
326  */
327 lcoreid_t latencystats_lcore_id = -1;
328 
329 #endif
330 
331 /*
332  * Ethernet device configuration.
333  */
334 struct rte_eth_rxmode rx_mode = {
335 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
336 	.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
337 	.ignore_offload_bitfield = 1,
338 };
339 
340 struct rte_eth_txmode tx_mode = {
341 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
342 };
343 
344 struct rte_fdir_conf fdir_conf = {
345 	.mode = RTE_FDIR_MODE_NONE,
346 	.pballoc = RTE_FDIR_PBALLOC_64K,
347 	.status = RTE_FDIR_REPORT_STATUS,
348 	.mask = {
349 		.vlan_tci_mask = 0x0,
350 		.ipv4_mask     = {
351 			.src_ip = 0xFFFFFFFF,
352 			.dst_ip = 0xFFFFFFFF,
353 		},
354 		.ipv6_mask     = {
355 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
357 		},
358 		.src_port_mask = 0xFFFF,
359 		.dst_port_mask = 0xFFFF,
360 		.mac_addr_byte_mask = 0xFF,
361 		.tunnel_type_mask = 1,
362 		.tunnel_id_mask = 0xFFFFFFFF,
363 	},
364 	.drop_queue = 127,
365 };
366 
367 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
368 
369 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
371 
372 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
373 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
374 
375 uint16_t nb_tx_queue_stats_mappings = 0;
376 uint16_t nb_rx_queue_stats_mappings = 0;
377 
378 /*
379  * Display zero values by default for xstats
380  */
381 uint8_t xstats_hide_zero;
382 
383 unsigned int num_sockets = 0;
384 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
385 
386 #ifdef RTE_LIBRTE_BITRATE
387 /* Bitrate statistics */
388 struct rte_stats_bitrates *bitrate_data;
389 lcoreid_t bitrate_lcore_id;
390 uint8_t bitrate_enabled;
391 #endif
392 
393 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
394 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
395 
396 /* Forward function declarations */
397 static void map_port_queue_stats_mapping_registers(portid_t pi,
398 						   struct rte_port *port);
399 static void check_all_ports_link_status(uint32_t port_mask);
400 static int eth_event_callback(portid_t port_id,
401 			      enum rte_eth_event_type type,
402 			      void *param, void *ret_param);
403 static void eth_dev_event_callback(char *device_name,
404 				enum rte_dev_event_type type,
405 				void *param);
406 static int eth_dev_event_callback_register(void);
407 static int eth_dev_event_callback_unregister(void);
408 
409 
410 /*
411  * Check if all the ports are started.
412  * If yes, return positive value. If not, return zero.
413  */
414 static int all_ports_started(void);
415 
416 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
417 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
418 
419 /*
420  * Helper function to check if socket is already discovered.
421  * If yes, return positive value. If not, return zero.
422  */
423 int
424 new_socket_id(unsigned int socket_id)
425 {
426 	unsigned int i;
427 
428 	for (i = 0; i < num_sockets; i++) {
429 		if (socket_ids[i] == socket_id)
430 			return 0;
431 	}
432 	return 1;
433 }
434 
435 /*
436  * Setup default configuration.
437  */
438 static void
439 set_default_fwd_lcores_config(void)
440 {
441 	unsigned int i;
442 	unsigned int nb_lc;
443 	unsigned int sock_num;
444 
445 	nb_lc = 0;
446 	for (i = 0; i < RTE_MAX_LCORE; i++) {
447 		sock_num = rte_lcore_to_socket_id(i);
448 		if (new_socket_id(sock_num)) {
449 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
450 				rte_exit(EXIT_FAILURE,
451 					 "Total sockets greater than %u\n",
452 					 RTE_MAX_NUMA_NODES);
453 			}
454 			socket_ids[num_sockets++] = sock_num;
455 		}
456 		if (!rte_lcore_is_enabled(i))
457 			continue;
458 		if (i == rte_get_master_lcore())
459 			continue;
460 		fwd_lcores_cpuids[nb_lc++] = i;
461 	}
462 	nb_lcores = (lcoreid_t) nb_lc;
463 	nb_cfg_lcores = nb_lcores;
464 	nb_fwd_lcores = 1;
465 }
466 
467 static void
468 set_def_peer_eth_addrs(void)
469 {
470 	portid_t i;
471 
472 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
473 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
474 		peer_eth_addrs[i].addr_bytes[5] = i;
475 	}
476 }
477 
478 static void
479 set_default_fwd_ports_config(void)
480 {
481 	portid_t pt_id;
482 	int i = 0;
483 
484 	RTE_ETH_FOREACH_DEV(pt_id)
485 		fwd_ports_ids[i++] = pt_id;
486 
487 	nb_cfg_ports = nb_ports;
488 	nb_fwd_ports = nb_ports;
489 }
490 
491 void
492 set_def_fwd_config(void)
493 {
494 	set_default_fwd_lcores_config();
495 	set_def_peer_eth_addrs();
496 	set_default_fwd_ports_config();
497 }
498 
499 /*
500  * Configuration initialisation done once at init time.
501  */
502 static void
503 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
504 		 unsigned int socket_id)
505 {
506 	char pool_name[RTE_MEMPOOL_NAMESIZE];
507 	struct rte_mempool *rte_mp = NULL;
508 	uint32_t mb_size;
509 
510 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
511 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
512 
513 	TESTPMD_LOG(INFO,
514 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
515 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
516 
517 	if (mp_anon != 0) {
518 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
519 			mb_size, (unsigned) mb_mempool_cache,
520 			sizeof(struct rte_pktmbuf_pool_private),
521 			socket_id, 0);
522 		if (rte_mp == NULL)
523 			goto err;
524 
525 		if (rte_mempool_populate_anon(rte_mp) == 0) {
526 			rte_mempool_free(rte_mp);
527 			rte_mp = NULL;
528 			goto err;
529 		}
530 		rte_pktmbuf_pool_init(rte_mp, NULL);
531 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
532 	} else {
533 		/* wrapper to rte_mempool_create() */
534 		TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
535 				rte_mbuf_best_mempool_ops());
536 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
537 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
538 	}
539 
540 err:
541 	if (rte_mp == NULL) {
542 		rte_exit(EXIT_FAILURE,
543 			"Creation of mbuf pool for socket %u failed: %s\n",
544 			socket_id, rte_strerror(rte_errno));
545 	} else if (verbose_level > 0) {
546 		rte_mempool_dump(stdout, rte_mp);
547 	}
548 }
549 
550 /*
551  * Check given socket id is valid or not with NUMA mode,
552  * if valid, return 0, else return -1
553  */
554 static int
555 check_socket_id(const unsigned int socket_id)
556 {
557 	static int warning_once = 0;
558 
559 	if (new_socket_id(socket_id)) {
560 		if (!warning_once && numa_support)
561 			printf("Warning: NUMA should be configured manually by"
562 			       " using --port-numa-config and"
563 			       " --ring-numa-config parameters along with"
564 			       " --numa.\n");
565 		warning_once = 1;
566 		return -1;
567 	}
568 	return 0;
569 }
570 
571 /*
572  * Get the allowed maximum number of RX queues.
573  * *pid return the port id which has minimal value of
574  * max_rx_queues in all ports.
575  */
576 queueid_t
577 get_allowed_max_nb_rxq(portid_t *pid)
578 {
579 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
580 	portid_t pi;
581 	struct rte_eth_dev_info dev_info;
582 
583 	RTE_ETH_FOREACH_DEV(pi) {
584 		rte_eth_dev_info_get(pi, &dev_info);
585 		if (dev_info.max_rx_queues < allowed_max_rxq) {
586 			allowed_max_rxq = dev_info.max_rx_queues;
587 			*pid = pi;
588 		}
589 	}
590 	return allowed_max_rxq;
591 }
592 
593 /*
594  * Check input rxq is valid or not.
595  * If input rxq is not greater than any of maximum number
596  * of RX queues of all ports, it is valid.
597  * if valid, return 0, else return -1
598  */
599 int
600 check_nb_rxq(queueid_t rxq)
601 {
602 	queueid_t allowed_max_rxq;
603 	portid_t pid = 0;
604 
605 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
606 	if (rxq > allowed_max_rxq) {
607 		printf("Fail: input rxq (%u) can't be greater "
608 		       "than max_rx_queues (%u) of port %u\n",
609 		       rxq,
610 		       allowed_max_rxq,
611 		       pid);
612 		return -1;
613 	}
614 	return 0;
615 }
616 
617 /*
618  * Get the allowed maximum number of TX queues.
619  * *pid return the port id which has minimal value of
620  * max_tx_queues in all ports.
621  */
622 queueid_t
623 get_allowed_max_nb_txq(portid_t *pid)
624 {
625 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
626 	portid_t pi;
627 	struct rte_eth_dev_info dev_info;
628 
629 	RTE_ETH_FOREACH_DEV(pi) {
630 		rte_eth_dev_info_get(pi, &dev_info);
631 		if (dev_info.max_tx_queues < allowed_max_txq) {
632 			allowed_max_txq = dev_info.max_tx_queues;
633 			*pid = pi;
634 		}
635 	}
636 	return allowed_max_txq;
637 }
638 
639 /*
640  * Check input txq is valid or not.
641  * If input txq is not greater than any of maximum number
642  * of TX queues of all ports, it is valid.
643  * if valid, return 0, else return -1
644  */
645 int
646 check_nb_txq(queueid_t txq)
647 {
648 	queueid_t allowed_max_txq;
649 	portid_t pid = 0;
650 
651 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
652 	if (txq > allowed_max_txq) {
653 		printf("Fail: input txq (%u) can't be greater "
654 		       "than max_tx_queues (%u) of port %u\n",
655 		       txq,
656 		       allowed_max_txq,
657 		       pid);
658 		return -1;
659 	}
660 	return 0;
661 }
662 
663 static void
664 init_config(void)
665 {
666 	portid_t pid;
667 	struct rte_port *port;
668 	struct rte_mempool *mbp;
669 	unsigned int nb_mbuf_per_pool;
670 	lcoreid_t  lc_id;
671 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
672 	struct rte_gro_param gro_param;
673 	uint32_t gso_types;
674 
675 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
676 
677 	if (numa_support) {
678 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
679 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
680 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
681 	}
682 
683 	/* Configuration of logical cores. */
684 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
685 				sizeof(struct fwd_lcore *) * nb_lcores,
686 				RTE_CACHE_LINE_SIZE);
687 	if (fwd_lcores == NULL) {
688 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
689 							"failed\n", nb_lcores);
690 	}
691 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
692 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
693 					       sizeof(struct fwd_lcore),
694 					       RTE_CACHE_LINE_SIZE);
695 		if (fwd_lcores[lc_id] == NULL) {
696 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
697 								"failed\n");
698 		}
699 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
700 	}
701 
702 	RTE_ETH_FOREACH_DEV(pid) {
703 		port = &ports[pid];
704 		/* Apply default TxRx configuration for all ports */
705 		port->dev_conf.txmode = tx_mode;
706 		port->dev_conf.rxmode = rx_mode;
707 		rte_eth_dev_info_get(pid, &port->dev_info);
708 		if (!(port->dev_info.tx_offload_capa &
709 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
710 			port->dev_conf.txmode.offloads &=
711 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
712 		if (numa_support) {
713 			if (port_numa[pid] != NUMA_NO_CONFIG)
714 				port_per_socket[port_numa[pid]]++;
715 			else {
716 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
717 
718 				/* if socket_id is invalid, set to 0 */
719 				if (check_socket_id(socket_id) < 0)
720 					socket_id = 0;
721 				port_per_socket[socket_id]++;
722 			}
723 		}
724 
725 		/* set flag to initialize port/queue */
726 		port->need_reconfig = 1;
727 		port->need_reconfig_queues = 1;
728 	}
729 
730 	/*
731 	 * Create pools of mbuf.
732 	 * If NUMA support is disabled, create a single pool of mbuf in
733 	 * socket 0 memory by default.
734 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
735 	 *
736 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
737 	 * nb_txd can be configured at run time.
738 	 */
739 	if (param_total_num_mbufs)
740 		nb_mbuf_per_pool = param_total_num_mbufs;
741 	else {
742 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
743 			(nb_lcores * mb_mempool_cache) +
744 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
745 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
746 	}
747 
748 	if (numa_support) {
749 		uint8_t i;
750 
751 		for (i = 0; i < num_sockets; i++)
752 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
753 					 socket_ids[i]);
754 	} else {
755 		if (socket_num == UMA_NO_CONFIG)
756 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
757 		else
758 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
759 						 socket_num);
760 	}
761 
762 	init_port_config();
763 
764 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
765 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
766 	/*
767 	 * Records which Mbuf pool to use by each logical core, if needed.
768 	 */
769 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
770 		mbp = mbuf_pool_find(
771 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
772 
773 		if (mbp == NULL)
774 			mbp = mbuf_pool_find(0);
775 		fwd_lcores[lc_id]->mbp = mbp;
776 		/* initialize GSO context */
777 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
778 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
779 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
780 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
781 			ETHER_CRC_LEN;
782 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
783 	}
784 
785 	/* Configuration of packet forwarding streams. */
786 	if (init_fwd_streams() < 0)
787 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
788 
789 	fwd_config_setup();
790 
791 	/* create a gro context for each lcore */
792 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
793 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
794 	gro_param.max_item_per_flow = MAX_PKT_BURST;
795 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
796 		gro_param.socket_id = rte_lcore_to_socket_id(
797 				fwd_lcores_cpuids[lc_id]);
798 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
799 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
800 			rte_exit(EXIT_FAILURE,
801 					"rte_gro_ctx_create() failed\n");
802 		}
803 	}
804 }
805 
806 
807 void
808 reconfig(portid_t new_port_id, unsigned socket_id)
809 {
810 	struct rte_port *port;
811 
812 	/* Reconfiguration of Ethernet ports. */
813 	port = &ports[new_port_id];
814 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
815 
816 	/* set flag to initialize port/queue */
817 	port->need_reconfig = 1;
818 	port->need_reconfig_queues = 1;
819 	port->socket_id = socket_id;
820 
821 	init_port_config();
822 }
823 
824 
825 int
826 init_fwd_streams(void)
827 {
828 	portid_t pid;
829 	struct rte_port *port;
830 	streamid_t sm_id, nb_fwd_streams_new;
831 	queueid_t q;
832 
833 	/* set socket id according to numa or not */
834 	RTE_ETH_FOREACH_DEV(pid) {
835 		port = &ports[pid];
836 		if (nb_rxq > port->dev_info.max_rx_queues) {
837 			printf("Fail: nb_rxq(%d) is greater than "
838 				"max_rx_queues(%d)\n", nb_rxq,
839 				port->dev_info.max_rx_queues);
840 			return -1;
841 		}
842 		if (nb_txq > port->dev_info.max_tx_queues) {
843 			printf("Fail: nb_txq(%d) is greater than "
844 				"max_tx_queues(%d)\n", nb_txq,
845 				port->dev_info.max_tx_queues);
846 			return -1;
847 		}
848 		if (numa_support) {
849 			if (port_numa[pid] != NUMA_NO_CONFIG)
850 				port->socket_id = port_numa[pid];
851 			else {
852 				port->socket_id = rte_eth_dev_socket_id(pid);
853 
854 				/* if socket_id is invalid, set to 0 */
855 				if (check_socket_id(port->socket_id) < 0)
856 					port->socket_id = 0;
857 			}
858 		}
859 		else {
860 			if (socket_num == UMA_NO_CONFIG)
861 				port->socket_id = 0;
862 			else
863 				port->socket_id = socket_num;
864 		}
865 	}
866 
867 	q = RTE_MAX(nb_rxq, nb_txq);
868 	if (q == 0) {
869 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
870 		return -1;
871 	}
872 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
873 	if (nb_fwd_streams_new == nb_fwd_streams)
874 		return 0;
875 	/* clear the old */
876 	if (fwd_streams != NULL) {
877 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
878 			if (fwd_streams[sm_id] == NULL)
879 				continue;
880 			rte_free(fwd_streams[sm_id]);
881 			fwd_streams[sm_id] = NULL;
882 		}
883 		rte_free(fwd_streams);
884 		fwd_streams = NULL;
885 	}
886 
887 	/* init new */
888 	nb_fwd_streams = nb_fwd_streams_new;
889 	if (nb_fwd_streams) {
890 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
891 			sizeof(struct fwd_stream *) * nb_fwd_streams,
892 			RTE_CACHE_LINE_SIZE);
893 		if (fwd_streams == NULL)
894 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
895 				 " (struct fwd_stream *)) failed\n",
896 				 nb_fwd_streams);
897 
898 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
899 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
900 				" struct fwd_stream", sizeof(struct fwd_stream),
901 				RTE_CACHE_LINE_SIZE);
902 			if (fwd_streams[sm_id] == NULL)
903 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
904 					 "(struct fwd_stream) failed\n");
905 		}
906 	}
907 
908 	return 0;
909 }
910 
911 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
912 static void
913 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
914 {
915 	unsigned int total_burst;
916 	unsigned int nb_burst;
917 	unsigned int burst_stats[3];
918 	uint16_t pktnb_stats[3];
919 	uint16_t nb_pkt;
920 	int burst_percent[3];
921 
922 	/*
923 	 * First compute the total number of packet bursts and the
924 	 * two highest numbers of bursts of the same number of packets.
925 	 */
926 	total_burst = 0;
927 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
928 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
929 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
930 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
931 		if (nb_burst == 0)
932 			continue;
933 		total_burst += nb_burst;
934 		if (nb_burst > burst_stats[0]) {
935 			burst_stats[1] = burst_stats[0];
936 			pktnb_stats[1] = pktnb_stats[0];
937 			burst_stats[0] = nb_burst;
938 			pktnb_stats[0] = nb_pkt;
939 		}
940 	}
941 	if (total_burst == 0)
942 		return;
943 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
944 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
945 	       burst_percent[0], (int) pktnb_stats[0]);
946 	if (burst_stats[0] == total_burst) {
947 		printf("]\n");
948 		return;
949 	}
950 	if (burst_stats[0] + burst_stats[1] == total_burst) {
951 		printf(" + %d%% of %d pkts]\n",
952 		       100 - burst_percent[0], pktnb_stats[1]);
953 		return;
954 	}
955 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
956 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
957 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
958 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
959 		return;
960 	}
961 	printf(" + %d%% of %d pkts + %d%% of others]\n",
962 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
963 }
964 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
965 
966 static void
967 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
968 {
969 	struct rte_port *port;
970 	uint8_t i;
971 
972 	static const char *fwd_stats_border = "----------------------";
973 
974 	port = &ports[port_id];
975 	printf("\n  %s Forward statistics for port %-2d %s\n",
976 	       fwd_stats_border, port_id, fwd_stats_border);
977 
978 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
979 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
980 		       "%-"PRIu64"\n",
981 		       stats->ipackets, stats->imissed,
982 		       (uint64_t) (stats->ipackets + stats->imissed));
983 
984 		if (cur_fwd_eng == &csum_fwd_engine)
985 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
986 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
987 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
988 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
989 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
990 		}
991 
992 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
993 		       "%-"PRIu64"\n",
994 		       stats->opackets, port->tx_dropped,
995 		       (uint64_t) (stats->opackets + port->tx_dropped));
996 	}
997 	else {
998 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
999 		       "%14"PRIu64"\n",
1000 		       stats->ipackets, stats->imissed,
1001 		       (uint64_t) (stats->ipackets + stats->imissed));
1002 
1003 		if (cur_fwd_eng == &csum_fwd_engine)
1004 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1005 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1006 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1007 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1008 			printf("  RX-nombufs:             %14"PRIu64"\n",
1009 			       stats->rx_nombuf);
1010 		}
1011 
1012 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1013 		       "%14"PRIu64"\n",
1014 		       stats->opackets, port->tx_dropped,
1015 		       (uint64_t) (stats->opackets + port->tx_dropped));
1016 	}
1017 
1018 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1019 	if (port->rx_stream)
1020 		pkt_burst_stats_display("RX",
1021 			&port->rx_stream->rx_burst_stats);
1022 	if (port->tx_stream)
1023 		pkt_burst_stats_display("TX",
1024 			&port->tx_stream->tx_burst_stats);
1025 #endif
1026 
1027 	if (port->rx_queue_stats_mapping_enabled) {
1028 		printf("\n");
1029 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1030 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1031 			       "     RX-errors:%14"PRIu64
1032 			       "    RX-bytes:%14"PRIu64"\n",
1033 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1034 		}
1035 		printf("\n");
1036 	}
1037 	if (port->tx_queue_stats_mapping_enabled) {
1038 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1039 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1040 			       "                                 TX-bytes:%14"PRIu64"\n",
1041 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1042 		}
1043 	}
1044 
1045 	printf("  %s--------------------------------%s\n",
1046 	       fwd_stats_border, fwd_stats_border);
1047 }
1048 
1049 static void
1050 fwd_stream_stats_display(streamid_t stream_id)
1051 {
1052 	struct fwd_stream *fs;
1053 	static const char *fwd_top_stats_border = "-------";
1054 
1055 	fs = fwd_streams[stream_id];
1056 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1057 	    (fs->fwd_dropped == 0))
1058 		return;
1059 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1060 	       "TX Port=%2d/Queue=%2d %s\n",
1061 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1062 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1063 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1064 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1065 
1066 	/* if checksum mode */
1067 	if (cur_fwd_eng == &csum_fwd_engine) {
1068 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1069 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1070 	}
1071 
1072 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1073 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1074 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1075 #endif
1076 }
1077 
1078 static void
1079 flush_fwd_rx_queues(void)
1080 {
1081 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1082 	portid_t  rxp;
1083 	portid_t port_id;
1084 	queueid_t rxq;
1085 	uint16_t  nb_rx;
1086 	uint16_t  i;
1087 	uint8_t   j;
1088 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1089 	uint64_t timer_period;
1090 
1091 	/* convert to number of cycles */
1092 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1093 
1094 	for (j = 0; j < 2; j++) {
1095 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1096 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1097 				port_id = fwd_ports_ids[rxp];
1098 				/**
1099 				* testpmd can stuck in the below do while loop
1100 				* if rte_eth_rx_burst() always returns nonzero
1101 				* packets. So timer is added to exit this loop
1102 				* after 1sec timer expiry.
1103 				*/
1104 				prev_tsc = rte_rdtsc();
1105 				do {
1106 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1107 						pkts_burst, MAX_PKT_BURST);
1108 					for (i = 0; i < nb_rx; i++)
1109 						rte_pktmbuf_free(pkts_burst[i]);
1110 
1111 					cur_tsc = rte_rdtsc();
1112 					diff_tsc = cur_tsc - prev_tsc;
1113 					timer_tsc += diff_tsc;
1114 				} while ((nb_rx > 0) &&
1115 					(timer_tsc < timer_period));
1116 				timer_tsc = 0;
1117 			}
1118 		}
1119 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1120 	}
1121 }
1122 
1123 static void
1124 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1125 {
1126 	struct fwd_stream **fsm;
1127 	streamid_t nb_fs;
1128 	streamid_t sm_id;
1129 #ifdef RTE_LIBRTE_BITRATE
1130 	uint64_t tics_per_1sec;
1131 	uint64_t tics_datum;
1132 	uint64_t tics_current;
1133 	uint16_t idx_port;
1134 
1135 	tics_datum = rte_rdtsc();
1136 	tics_per_1sec = rte_get_timer_hz();
1137 #endif
1138 	fsm = &fwd_streams[fc->stream_idx];
1139 	nb_fs = fc->stream_nb;
1140 	do {
1141 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1142 			(*pkt_fwd)(fsm[sm_id]);
1143 #ifdef RTE_LIBRTE_BITRATE
1144 		if (bitrate_enabled != 0 &&
1145 				bitrate_lcore_id == rte_lcore_id()) {
1146 			tics_current = rte_rdtsc();
1147 			if (tics_current - tics_datum >= tics_per_1sec) {
1148 				/* Periodic bitrate calculation */
1149 				RTE_ETH_FOREACH_DEV(idx_port)
1150 					rte_stats_bitrate_calc(bitrate_data,
1151 						idx_port);
1152 				tics_datum = tics_current;
1153 			}
1154 		}
1155 #endif
1156 #ifdef RTE_LIBRTE_LATENCY_STATS
1157 		if (latencystats_enabled != 0 &&
1158 				latencystats_lcore_id == rte_lcore_id())
1159 			rte_latencystats_update();
1160 #endif
1161 
1162 	} while (! fc->stopped);
1163 }
1164 
1165 static int
1166 start_pkt_forward_on_core(void *fwd_arg)
1167 {
1168 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1169 			     cur_fwd_config.fwd_eng->packet_fwd);
1170 	return 0;
1171 }
1172 
1173 /*
1174  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1175  * Used to start communication flows in network loopback test configurations.
1176  */
1177 static int
1178 run_one_txonly_burst_on_core(void *fwd_arg)
1179 {
1180 	struct fwd_lcore *fwd_lc;
1181 	struct fwd_lcore tmp_lcore;
1182 
1183 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1184 	tmp_lcore = *fwd_lc;
1185 	tmp_lcore.stopped = 1;
1186 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1187 	return 0;
1188 }
1189 
1190 /*
1191  * Launch packet forwarding:
1192  *     - Setup per-port forwarding context.
1193  *     - launch logical cores with their forwarding configuration.
1194  */
1195 static void
1196 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1197 {
1198 	port_fwd_begin_t port_fwd_begin;
1199 	unsigned int i;
1200 	unsigned int lc_id;
1201 	int diag;
1202 
1203 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1204 	if (port_fwd_begin != NULL) {
1205 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1206 			(*port_fwd_begin)(fwd_ports_ids[i]);
1207 	}
1208 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1209 		lc_id = fwd_lcores_cpuids[i];
1210 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1211 			fwd_lcores[i]->stopped = 0;
1212 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1213 						     fwd_lcores[i], lc_id);
1214 			if (diag != 0)
1215 				printf("launch lcore %u failed - diag=%d\n",
1216 				       lc_id, diag);
1217 		}
1218 	}
1219 }
1220 
1221 /*
1222  * Update the forward ports list.
1223  */
1224 void
1225 update_fwd_ports(portid_t new_pid)
1226 {
1227 	unsigned int i;
1228 	unsigned int new_nb_fwd_ports = 0;
1229 	int move = 0;
1230 
1231 	for (i = 0; i < nb_fwd_ports; ++i) {
1232 		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1233 			move = 1;
1234 		else if (move)
1235 			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1236 		else
1237 			new_nb_fwd_ports++;
1238 	}
1239 	if (new_pid < RTE_MAX_ETHPORTS)
1240 		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1241 
1242 	nb_fwd_ports = new_nb_fwd_ports;
1243 	nb_cfg_ports = new_nb_fwd_ports;
1244 }
1245 
1246 /*
1247  * Launch packet forwarding configuration.
1248  */
1249 void
1250 start_packet_forwarding(int with_tx_first)
1251 {
1252 	port_fwd_begin_t port_fwd_begin;
1253 	port_fwd_end_t  port_fwd_end;
1254 	struct rte_port *port;
1255 	unsigned int i;
1256 	portid_t   pt_id;
1257 	streamid_t sm_id;
1258 
1259 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1260 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1261 
1262 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1263 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1264 
1265 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1266 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1267 		(!nb_rxq || !nb_txq))
1268 		rte_exit(EXIT_FAILURE,
1269 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1270 			cur_fwd_eng->fwd_mode_name);
1271 
1272 	if (all_ports_started() == 0) {
1273 		printf("Not all ports were started\n");
1274 		return;
1275 	}
1276 	if (test_done == 0) {
1277 		printf("Packet forwarding already started\n");
1278 		return;
1279 	}
1280 
1281 
1282 	if(dcb_test) {
1283 		for (i = 0; i < nb_fwd_ports; i++) {
1284 			pt_id = fwd_ports_ids[i];
1285 			port = &ports[pt_id];
1286 			if (!port->dcb_flag) {
1287 				printf("In DCB mode, all forwarding ports must "
1288                                        "be configured in this mode.\n");
1289 				return;
1290 			}
1291 		}
1292 		if (nb_fwd_lcores == 1) {
1293 			printf("In DCB mode,the nb forwarding cores "
1294                                "should be larger than 1.\n");
1295 			return;
1296 		}
1297 	}
1298 	test_done = 0;
1299 
1300 	fwd_config_setup();
1301 
1302 	if(!no_flush_rx)
1303 		flush_fwd_rx_queues();
1304 
1305 	pkt_fwd_config_display(&cur_fwd_config);
1306 	rxtx_config_display();
1307 
1308 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1309 		pt_id = fwd_ports_ids[i];
1310 		port = &ports[pt_id];
1311 		rte_eth_stats_get(pt_id, &port->stats);
1312 		port->tx_dropped = 0;
1313 
1314 		map_port_queue_stats_mapping_registers(pt_id, port);
1315 	}
1316 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1317 		fwd_streams[sm_id]->rx_packets = 0;
1318 		fwd_streams[sm_id]->tx_packets = 0;
1319 		fwd_streams[sm_id]->fwd_dropped = 0;
1320 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1321 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1322 
1323 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1324 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1325 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1326 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1327 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1328 #endif
1329 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1330 		fwd_streams[sm_id]->core_cycles = 0;
1331 #endif
1332 	}
1333 	if (with_tx_first) {
1334 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1335 		if (port_fwd_begin != NULL) {
1336 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1337 				(*port_fwd_begin)(fwd_ports_ids[i]);
1338 		}
1339 		while (with_tx_first--) {
1340 			launch_packet_forwarding(
1341 					run_one_txonly_burst_on_core);
1342 			rte_eal_mp_wait_lcore();
1343 		}
1344 		port_fwd_end = tx_only_engine.port_fwd_end;
1345 		if (port_fwd_end != NULL) {
1346 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1347 				(*port_fwd_end)(fwd_ports_ids[i]);
1348 		}
1349 	}
1350 	launch_packet_forwarding(start_pkt_forward_on_core);
1351 }
1352 
1353 void
1354 stop_packet_forwarding(void)
1355 {
1356 	struct rte_eth_stats stats;
1357 	struct rte_port *port;
1358 	port_fwd_end_t  port_fwd_end;
1359 	int i;
1360 	portid_t   pt_id;
1361 	streamid_t sm_id;
1362 	lcoreid_t  lc_id;
1363 	uint64_t total_recv;
1364 	uint64_t total_xmit;
1365 	uint64_t total_rx_dropped;
1366 	uint64_t total_tx_dropped;
1367 	uint64_t total_rx_nombuf;
1368 	uint64_t tx_dropped;
1369 	uint64_t rx_bad_ip_csum;
1370 	uint64_t rx_bad_l4_csum;
1371 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1372 	uint64_t fwd_cycles;
1373 #endif
1374 
1375 	static const char *acc_stats_border = "+++++++++++++++";
1376 
1377 	if (test_done) {
1378 		printf("Packet forwarding not started\n");
1379 		return;
1380 	}
1381 	printf("Telling cores to stop...");
1382 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1383 		fwd_lcores[lc_id]->stopped = 1;
1384 	printf("\nWaiting for lcores to finish...\n");
1385 	rte_eal_mp_wait_lcore();
1386 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1387 	if (port_fwd_end != NULL) {
1388 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1389 			pt_id = fwd_ports_ids[i];
1390 			(*port_fwd_end)(pt_id);
1391 		}
1392 	}
1393 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1394 	fwd_cycles = 0;
1395 #endif
1396 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1397 		if (cur_fwd_config.nb_fwd_streams >
1398 		    cur_fwd_config.nb_fwd_ports) {
1399 			fwd_stream_stats_display(sm_id);
1400 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1401 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1402 		} else {
1403 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1404 				fwd_streams[sm_id];
1405 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1406 				fwd_streams[sm_id];
1407 		}
1408 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1409 		tx_dropped = (uint64_t) (tx_dropped +
1410 					 fwd_streams[sm_id]->fwd_dropped);
1411 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1412 
1413 		rx_bad_ip_csum =
1414 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1415 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1416 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1417 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1418 							rx_bad_ip_csum;
1419 
1420 		rx_bad_l4_csum =
1421 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1422 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1423 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1424 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1425 							rx_bad_l4_csum;
1426 
1427 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1428 		fwd_cycles = (uint64_t) (fwd_cycles +
1429 					 fwd_streams[sm_id]->core_cycles);
1430 #endif
1431 	}
1432 	total_recv = 0;
1433 	total_xmit = 0;
1434 	total_rx_dropped = 0;
1435 	total_tx_dropped = 0;
1436 	total_rx_nombuf  = 0;
1437 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1438 		pt_id = fwd_ports_ids[i];
1439 
1440 		port = &ports[pt_id];
1441 		rte_eth_stats_get(pt_id, &stats);
1442 		stats.ipackets -= port->stats.ipackets;
1443 		port->stats.ipackets = 0;
1444 		stats.opackets -= port->stats.opackets;
1445 		port->stats.opackets = 0;
1446 		stats.ibytes   -= port->stats.ibytes;
1447 		port->stats.ibytes = 0;
1448 		stats.obytes   -= port->stats.obytes;
1449 		port->stats.obytes = 0;
1450 		stats.imissed  -= port->stats.imissed;
1451 		port->stats.imissed = 0;
1452 		stats.oerrors  -= port->stats.oerrors;
1453 		port->stats.oerrors = 0;
1454 		stats.rx_nombuf -= port->stats.rx_nombuf;
1455 		port->stats.rx_nombuf = 0;
1456 
1457 		total_recv += stats.ipackets;
1458 		total_xmit += stats.opackets;
1459 		total_rx_dropped += stats.imissed;
1460 		total_tx_dropped += port->tx_dropped;
1461 		total_rx_nombuf  += stats.rx_nombuf;
1462 
1463 		fwd_port_stats_display(pt_id, &stats);
1464 	}
1465 
1466 	printf("\n  %s Accumulated forward statistics for all ports"
1467 	       "%s\n",
1468 	       acc_stats_border, acc_stats_border);
1469 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1470 	       "%-"PRIu64"\n"
1471 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1472 	       "%-"PRIu64"\n",
1473 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1474 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1475 	if (total_rx_nombuf > 0)
1476 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1477 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1478 	       "%s\n",
1479 	       acc_stats_border, acc_stats_border);
1480 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1481 	if (total_recv > 0)
1482 		printf("\n  CPU cycles/packet=%u (total cycles="
1483 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1484 		       (unsigned int)(fwd_cycles / total_recv),
1485 		       fwd_cycles, total_recv);
1486 #endif
1487 	printf("\nDone.\n");
1488 	test_done = 1;
1489 }
1490 
1491 void
1492 dev_set_link_up(portid_t pid)
1493 {
1494 	if (rte_eth_dev_set_link_up(pid) < 0)
1495 		printf("\nSet link up fail.\n");
1496 }
1497 
1498 void
1499 dev_set_link_down(portid_t pid)
1500 {
1501 	if (rte_eth_dev_set_link_down(pid) < 0)
1502 		printf("\nSet link down fail.\n");
1503 }
1504 
1505 static int
1506 all_ports_started(void)
1507 {
1508 	portid_t pi;
1509 	struct rte_port *port;
1510 
1511 	RTE_ETH_FOREACH_DEV(pi) {
1512 		port = &ports[pi];
1513 		/* Check if there is a port which is not started */
1514 		if ((port->port_status != RTE_PORT_STARTED) &&
1515 			(port->slave_flag == 0))
1516 			return 0;
1517 	}
1518 
1519 	/* No port is not started */
1520 	return 1;
1521 }
1522 
1523 int
1524 port_is_stopped(portid_t port_id)
1525 {
1526 	struct rte_port *port = &ports[port_id];
1527 
1528 	if ((port->port_status != RTE_PORT_STOPPED) &&
1529 	    (port->slave_flag == 0))
1530 		return 0;
1531 	return 1;
1532 }
1533 
1534 int
1535 all_ports_stopped(void)
1536 {
1537 	portid_t pi;
1538 
1539 	RTE_ETH_FOREACH_DEV(pi) {
1540 		if (!port_is_stopped(pi))
1541 			return 0;
1542 	}
1543 
1544 	return 1;
1545 }
1546 
1547 int
1548 port_is_started(portid_t port_id)
1549 {
1550 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1551 		return 0;
1552 
1553 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1554 		return 0;
1555 
1556 	return 1;
1557 }
1558 
1559 static int
1560 port_is_closed(portid_t port_id)
1561 {
1562 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1563 		return 0;
1564 
1565 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1566 		return 0;
1567 
1568 	return 1;
1569 }
1570 
1571 int
1572 start_port(portid_t pid)
1573 {
1574 	int diag, need_check_link_status = -1;
1575 	portid_t pi;
1576 	queueid_t qi;
1577 	struct rte_port *port;
1578 	struct ether_addr mac_addr;
1579 	enum rte_eth_event_type event_type;
1580 
1581 	if (port_id_is_invalid(pid, ENABLED_WARN))
1582 		return 0;
1583 
1584 	if(dcb_config)
1585 		dcb_test = 1;
1586 	RTE_ETH_FOREACH_DEV(pi) {
1587 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1588 			continue;
1589 
1590 		need_check_link_status = 0;
1591 		port = &ports[pi];
1592 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1593 						 RTE_PORT_HANDLING) == 0) {
1594 			printf("Port %d is now not stopped\n", pi);
1595 			continue;
1596 		}
1597 
1598 		if (port->need_reconfig > 0) {
1599 			port->need_reconfig = 0;
1600 
1601 			if (flow_isolate_all) {
1602 				int ret = port_flow_isolate(pi, 1);
1603 				if (ret) {
1604 					printf("Failed to apply isolated"
1605 					       " mode on port %d\n", pi);
1606 					return -1;
1607 				}
1608 			}
1609 
1610 			printf("Configuring Port %d (socket %u)\n", pi,
1611 					port->socket_id);
1612 			/* configure port */
1613 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1614 						&(port->dev_conf));
1615 			if (diag != 0) {
1616 				if (rte_atomic16_cmpset(&(port->port_status),
1617 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1618 					printf("Port %d can not be set back "
1619 							"to stopped\n", pi);
1620 				printf("Fail to configure port %d\n", pi);
1621 				/* try to reconfigure port next time */
1622 				port->need_reconfig = 1;
1623 				return -1;
1624 			}
1625 		}
1626 		if (port->need_reconfig_queues > 0) {
1627 			port->need_reconfig_queues = 0;
1628 			/* setup tx queues */
1629 			for (qi = 0; qi < nb_txq; qi++) {
1630 				port->tx_conf[qi].txq_flags =
1631 					ETH_TXQ_FLAGS_IGNORE;
1632 				/* Apply Tx offloads configuration */
1633 				port->tx_conf[qi].offloads =
1634 					port->dev_conf.txmode.offloads;
1635 				if ((numa_support) &&
1636 					(txring_numa[pi] != NUMA_NO_CONFIG))
1637 					diag = rte_eth_tx_queue_setup(pi, qi,
1638 						port->nb_tx_desc[qi],
1639 						txring_numa[pi],
1640 						&(port->tx_conf[qi]));
1641 				else
1642 					diag = rte_eth_tx_queue_setup(pi, qi,
1643 						port->nb_tx_desc[qi],
1644 						port->socket_id,
1645 						&(port->tx_conf[qi]));
1646 
1647 				if (diag == 0)
1648 					continue;
1649 
1650 				/* Fail to setup tx queue, return */
1651 				if (rte_atomic16_cmpset(&(port->port_status),
1652 							RTE_PORT_HANDLING,
1653 							RTE_PORT_STOPPED) == 0)
1654 					printf("Port %d can not be set back "
1655 							"to stopped\n", pi);
1656 				printf("Fail to configure port %d tx queues\n",
1657 				       pi);
1658 				/* try to reconfigure queues next time */
1659 				port->need_reconfig_queues = 1;
1660 				return -1;
1661 			}
1662 			for (qi = 0; qi < nb_rxq; qi++) {
1663 				/* Apply Rx offloads configuration */
1664 				port->rx_conf[qi].offloads =
1665 					port->dev_conf.rxmode.offloads;
1666 				/* setup rx queues */
1667 				if ((numa_support) &&
1668 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1669 					struct rte_mempool * mp =
1670 						mbuf_pool_find(rxring_numa[pi]);
1671 					if (mp == NULL) {
1672 						printf("Failed to setup RX queue:"
1673 							"No mempool allocation"
1674 							" on the socket %d\n",
1675 							rxring_numa[pi]);
1676 						return -1;
1677 					}
1678 
1679 					diag = rte_eth_rx_queue_setup(pi, qi,
1680 					     port->nb_rx_desc[pi],
1681 					     rxring_numa[pi],
1682 					     &(port->rx_conf[qi]),
1683 					     mp);
1684 				} else {
1685 					struct rte_mempool *mp =
1686 						mbuf_pool_find(port->socket_id);
1687 					if (mp == NULL) {
1688 						printf("Failed to setup RX queue:"
1689 							"No mempool allocation"
1690 							" on the socket %d\n",
1691 							port->socket_id);
1692 						return -1;
1693 					}
1694 					diag = rte_eth_rx_queue_setup(pi, qi,
1695 					     port->nb_rx_desc[pi],
1696 					     port->socket_id,
1697 					     &(port->rx_conf[qi]),
1698 					     mp);
1699 				}
1700 				if (diag == 0)
1701 					continue;
1702 
1703 				/* Fail to setup rx queue, return */
1704 				if (rte_atomic16_cmpset(&(port->port_status),
1705 							RTE_PORT_HANDLING,
1706 							RTE_PORT_STOPPED) == 0)
1707 					printf("Port %d can not be set back "
1708 							"to stopped\n", pi);
1709 				printf("Fail to configure port %d rx queues\n",
1710 				       pi);
1711 				/* try to reconfigure queues next time */
1712 				port->need_reconfig_queues = 1;
1713 				return -1;
1714 			}
1715 		}
1716 
1717 		/* start port */
1718 		if (rte_eth_dev_start(pi) < 0) {
1719 			printf("Fail to start port %d\n", pi);
1720 
1721 			/* Fail to setup rx queue, return */
1722 			if (rte_atomic16_cmpset(&(port->port_status),
1723 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1724 				printf("Port %d can not be set back to "
1725 							"stopped\n", pi);
1726 			continue;
1727 		}
1728 
1729 		if (rte_atomic16_cmpset(&(port->port_status),
1730 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1731 			printf("Port %d can not be set into started\n", pi);
1732 
1733 		rte_eth_macaddr_get(pi, &mac_addr);
1734 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1735 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1736 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1737 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1738 
1739 		/* at least one port started, need checking link status */
1740 		need_check_link_status = 1;
1741 	}
1742 
1743 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
1744 	     event_type < RTE_ETH_EVENT_MAX;
1745 	     event_type++) {
1746 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1747 						event_type,
1748 						eth_event_callback,
1749 						NULL);
1750 		if (diag) {
1751 			printf("Failed to setup even callback for event %d\n",
1752 				event_type);
1753 			return -1;
1754 		}
1755 	}
1756 
1757 	if (need_check_link_status == 1 && !no_link_check)
1758 		check_all_ports_link_status(RTE_PORT_ALL);
1759 	else if (need_check_link_status == 0)
1760 		printf("Please stop the ports first\n");
1761 
1762 	printf("Done\n");
1763 	return 0;
1764 }
1765 
1766 void
1767 stop_port(portid_t pid)
1768 {
1769 	portid_t pi;
1770 	struct rte_port *port;
1771 	int need_check_link_status = 0;
1772 
1773 	if (dcb_test) {
1774 		dcb_test = 0;
1775 		dcb_config = 0;
1776 	}
1777 
1778 	if (port_id_is_invalid(pid, ENABLED_WARN))
1779 		return;
1780 
1781 	printf("Stopping ports...\n");
1782 
1783 	RTE_ETH_FOREACH_DEV(pi) {
1784 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1785 			continue;
1786 
1787 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1788 			printf("Please remove port %d from forwarding configuration.\n", pi);
1789 			continue;
1790 		}
1791 
1792 		if (port_is_bonding_slave(pi)) {
1793 			printf("Please remove port %d from bonded device.\n", pi);
1794 			continue;
1795 		}
1796 
1797 		port = &ports[pi];
1798 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1799 						RTE_PORT_HANDLING) == 0)
1800 			continue;
1801 
1802 		rte_eth_dev_stop(pi);
1803 
1804 		if (rte_atomic16_cmpset(&(port->port_status),
1805 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1806 			printf("Port %d can not be set into stopped\n", pi);
1807 		need_check_link_status = 1;
1808 	}
1809 	if (need_check_link_status && !no_link_check)
1810 		check_all_ports_link_status(RTE_PORT_ALL);
1811 
1812 	printf("Done\n");
1813 }
1814 
1815 void
1816 close_port(portid_t pid)
1817 {
1818 	portid_t pi;
1819 	struct rte_port *port;
1820 
1821 	if (port_id_is_invalid(pid, ENABLED_WARN))
1822 		return;
1823 
1824 	printf("Closing ports...\n");
1825 
1826 	RTE_ETH_FOREACH_DEV(pi) {
1827 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1828 			continue;
1829 
1830 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1831 			printf("Please remove port %d from forwarding configuration.\n", pi);
1832 			continue;
1833 		}
1834 
1835 		if (port_is_bonding_slave(pi)) {
1836 			printf("Please remove port %d from bonded device.\n", pi);
1837 			continue;
1838 		}
1839 
1840 		port = &ports[pi];
1841 		if (rte_atomic16_cmpset(&(port->port_status),
1842 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1843 			printf("Port %d is already closed\n", pi);
1844 			continue;
1845 		}
1846 
1847 		if (rte_atomic16_cmpset(&(port->port_status),
1848 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1849 			printf("Port %d is now not stopped\n", pi);
1850 			continue;
1851 		}
1852 
1853 		if (port->flow_list)
1854 			port_flow_flush(pi);
1855 		rte_eth_dev_close(pi);
1856 
1857 		if (rte_atomic16_cmpset(&(port->port_status),
1858 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1859 			printf("Port %d cannot be set to closed\n", pi);
1860 	}
1861 
1862 	printf("Done\n");
1863 }
1864 
1865 void
1866 reset_port(portid_t pid)
1867 {
1868 	int diag;
1869 	portid_t pi;
1870 	struct rte_port *port;
1871 
1872 	if (port_id_is_invalid(pid, ENABLED_WARN))
1873 		return;
1874 
1875 	printf("Resetting ports...\n");
1876 
1877 	RTE_ETH_FOREACH_DEV(pi) {
1878 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1879 			continue;
1880 
1881 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1882 			printf("Please remove port %d from forwarding "
1883 			       "configuration.\n", pi);
1884 			continue;
1885 		}
1886 
1887 		if (port_is_bonding_slave(pi)) {
1888 			printf("Please remove port %d from bonded device.\n",
1889 			       pi);
1890 			continue;
1891 		}
1892 
1893 		diag = rte_eth_dev_reset(pi);
1894 		if (diag == 0) {
1895 			port = &ports[pi];
1896 			port->need_reconfig = 1;
1897 			port->need_reconfig_queues = 1;
1898 		} else {
1899 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1900 		}
1901 	}
1902 
1903 	printf("Done\n");
1904 }
1905 
1906 static int
1907 eth_dev_event_callback_register(void)
1908 {
1909 	int ret;
1910 
1911 	/* register the device event callback */
1912 	ret = rte_dev_event_callback_register(NULL,
1913 		eth_dev_event_callback, NULL);
1914 	if (ret) {
1915 		printf("Failed to register device event callback\n");
1916 		return -1;
1917 	}
1918 
1919 	return 0;
1920 }
1921 
1922 
1923 static int
1924 eth_dev_event_callback_unregister(void)
1925 {
1926 	int ret;
1927 
1928 	/* unregister the device event callback */
1929 	ret = rte_dev_event_callback_unregister(NULL,
1930 		eth_dev_event_callback, NULL);
1931 	if (ret < 0) {
1932 		printf("Failed to unregister device event callback\n");
1933 		return -1;
1934 	}
1935 
1936 	return 0;
1937 }
1938 
1939 void
1940 attach_port(char *identifier)
1941 {
1942 	portid_t pi = 0;
1943 	unsigned int socket_id;
1944 
1945 	printf("Attaching a new port...\n");
1946 
1947 	if (identifier == NULL) {
1948 		printf("Invalid parameters are specified\n");
1949 		return;
1950 	}
1951 
1952 	if (rte_eth_dev_attach(identifier, &pi))
1953 		return;
1954 
1955 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1956 	/* if socket_id is invalid, set to 0 */
1957 	if (check_socket_id(socket_id) < 0)
1958 		socket_id = 0;
1959 	reconfig(pi, socket_id);
1960 	rte_eth_promiscuous_enable(pi);
1961 
1962 	nb_ports = rte_eth_dev_count_avail();
1963 
1964 	ports[pi].port_status = RTE_PORT_STOPPED;
1965 
1966 	update_fwd_ports(pi);
1967 
1968 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1969 	printf("Done\n");
1970 }
1971 
1972 void
1973 detach_port(portid_t port_id)
1974 {
1975 	char name[RTE_ETH_NAME_MAX_LEN];
1976 
1977 	printf("Detaching a port...\n");
1978 
1979 	if (!port_is_closed(port_id)) {
1980 		printf("Please close port first\n");
1981 		return;
1982 	}
1983 
1984 	if (ports[port_id].flow_list)
1985 		port_flow_flush(port_id);
1986 
1987 	if (rte_eth_dev_detach(port_id, name)) {
1988 		TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1989 		return;
1990 	}
1991 
1992 	nb_ports = rte_eth_dev_count_avail();
1993 
1994 	update_fwd_ports(RTE_MAX_ETHPORTS);
1995 
1996 	printf("Port '%s' is detached. Now total ports is %d\n",
1997 			name, nb_ports);
1998 	printf("Done\n");
1999 	return;
2000 }
2001 
2002 void
2003 pmd_test_exit(void)
2004 {
2005 	portid_t pt_id;
2006 	int ret;
2007 
2008 	if (test_done == 0)
2009 		stop_packet_forwarding();
2010 
2011 	if (ports != NULL) {
2012 		no_link_check = 1;
2013 		RTE_ETH_FOREACH_DEV(pt_id) {
2014 			printf("\nShutting down port %d...\n", pt_id);
2015 			fflush(stdout);
2016 			stop_port(pt_id);
2017 			close_port(pt_id);
2018 		}
2019 	}
2020 
2021 	if (hot_plug) {
2022 		ret = rte_dev_event_monitor_stop();
2023 		if (ret)
2024 			RTE_LOG(ERR, EAL,
2025 				"fail to stop device event monitor.");
2026 
2027 		ret = eth_dev_event_callback_unregister();
2028 		if (ret)
2029 			RTE_LOG(ERR, EAL,
2030 				"fail to unregister all event callbacks.");
2031 	}
2032 
2033 	printf("\nBye...\n");
2034 }
2035 
2036 typedef void (*cmd_func_t)(void);
2037 struct pmd_test_command {
2038 	const char *cmd_name;
2039 	cmd_func_t cmd_func;
2040 };
2041 
2042 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2043 
2044 /* Check the link status of all ports in up to 9s, and print them finally */
2045 static void
2046 check_all_ports_link_status(uint32_t port_mask)
2047 {
2048 #define CHECK_INTERVAL 100 /* 100ms */
2049 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2050 	portid_t portid;
2051 	uint8_t count, all_ports_up, print_flag = 0;
2052 	struct rte_eth_link link;
2053 
2054 	printf("Checking link statuses...\n");
2055 	fflush(stdout);
2056 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2057 		all_ports_up = 1;
2058 		RTE_ETH_FOREACH_DEV(portid) {
2059 			if ((port_mask & (1 << portid)) == 0)
2060 				continue;
2061 			memset(&link, 0, sizeof(link));
2062 			rte_eth_link_get_nowait(portid, &link);
2063 			/* print link status if flag set */
2064 			if (print_flag == 1) {
2065 				if (link.link_status)
2066 					printf(
2067 					"Port%d Link Up. speed %u Mbps- %s\n",
2068 					portid, link.link_speed,
2069 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2070 					("full-duplex") : ("half-duplex\n"));
2071 				else
2072 					printf("Port %d Link Down\n", portid);
2073 				continue;
2074 			}
2075 			/* clear all_ports_up flag if any link down */
2076 			if (link.link_status == ETH_LINK_DOWN) {
2077 				all_ports_up = 0;
2078 				break;
2079 			}
2080 		}
2081 		/* after finally printing all link status, get out */
2082 		if (print_flag == 1)
2083 			break;
2084 
2085 		if (all_ports_up == 0) {
2086 			fflush(stdout);
2087 			rte_delay_ms(CHECK_INTERVAL);
2088 		}
2089 
2090 		/* set the print_flag if all ports up or timeout */
2091 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2092 			print_flag = 1;
2093 		}
2094 
2095 		if (lsc_interrupt)
2096 			break;
2097 	}
2098 }
2099 
2100 static void
2101 rmv_event_callback(void *arg)
2102 {
2103 	int org_no_link_check = no_link_check;
2104 	struct rte_eth_dev *dev;
2105 	portid_t port_id = (intptr_t)arg;
2106 
2107 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2108 	dev = &rte_eth_devices[port_id];
2109 
2110 	no_link_check = 1;
2111 	stop_port(port_id);
2112 	no_link_check = org_no_link_check;
2113 	close_port(port_id);
2114 	printf("removing device %s\n", dev->device->name);
2115 	if (rte_eal_dev_detach(dev->device))
2116 		TESTPMD_LOG(ERR, "Failed to detach device %s\n",
2117 			dev->device->name);
2118 }
2119 
2120 /* This function is used by the interrupt thread */
2121 static int
2122 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2123 		  void *ret_param)
2124 {
2125 	static const char * const event_desc[] = {
2126 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2127 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2128 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2129 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2130 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2131 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
2132 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2133 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2134 		[RTE_ETH_EVENT_NEW] = "device probed",
2135 		[RTE_ETH_EVENT_DESTROY] = "device released",
2136 		[RTE_ETH_EVENT_MAX] = NULL,
2137 	};
2138 
2139 	RTE_SET_USED(param);
2140 	RTE_SET_USED(ret_param);
2141 
2142 	if (type >= RTE_ETH_EVENT_MAX) {
2143 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2144 			port_id, __func__, type);
2145 		fflush(stderr);
2146 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2147 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2148 			event_desc[type]);
2149 		fflush(stdout);
2150 	}
2151 
2152 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2153 		return 0;
2154 
2155 	switch (type) {
2156 	case RTE_ETH_EVENT_INTR_RMV:
2157 		if (rte_eal_alarm_set(100000,
2158 				rmv_event_callback, (void *)(intptr_t)port_id))
2159 			fprintf(stderr, "Could not set up deferred device removal\n");
2160 		break;
2161 	default:
2162 		break;
2163 	}
2164 	return 0;
2165 }
2166 
2167 /* This function is used by the interrupt thread */
2168 static void
2169 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2170 			     __rte_unused void *arg)
2171 {
2172 	if (type >= RTE_DEV_EVENT_MAX) {
2173 		fprintf(stderr, "%s called upon invalid event %d\n",
2174 			__func__, type);
2175 		fflush(stderr);
2176 	}
2177 
2178 	switch (type) {
2179 	case RTE_DEV_EVENT_REMOVE:
2180 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2181 			device_name);
2182 		/* TODO: After finish failure handle, begin to stop
2183 		 * packet forward, stop port, close port, detach port.
2184 		 */
2185 		break;
2186 	case RTE_DEV_EVENT_ADD:
2187 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2188 			device_name);
2189 		/* TODO: After finish kernel driver binding,
2190 		 * begin to attach port.
2191 		 */
2192 		break;
2193 	default:
2194 		break;
2195 	}
2196 }
2197 
2198 static int
2199 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2200 {
2201 	uint16_t i;
2202 	int diag;
2203 	uint8_t mapping_found = 0;
2204 
2205 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2206 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2207 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2208 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2209 					tx_queue_stats_mappings[i].queue_id,
2210 					tx_queue_stats_mappings[i].stats_counter_id);
2211 			if (diag != 0)
2212 				return diag;
2213 			mapping_found = 1;
2214 		}
2215 	}
2216 	if (mapping_found)
2217 		port->tx_queue_stats_mapping_enabled = 1;
2218 	return 0;
2219 }
2220 
2221 static int
2222 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2223 {
2224 	uint16_t i;
2225 	int diag;
2226 	uint8_t mapping_found = 0;
2227 
2228 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2229 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2230 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2231 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2232 					rx_queue_stats_mappings[i].queue_id,
2233 					rx_queue_stats_mappings[i].stats_counter_id);
2234 			if (diag != 0)
2235 				return diag;
2236 			mapping_found = 1;
2237 		}
2238 	}
2239 	if (mapping_found)
2240 		port->rx_queue_stats_mapping_enabled = 1;
2241 	return 0;
2242 }
2243 
2244 static void
2245 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2246 {
2247 	int diag = 0;
2248 
2249 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2250 	if (diag != 0) {
2251 		if (diag == -ENOTSUP) {
2252 			port->tx_queue_stats_mapping_enabled = 0;
2253 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2254 		}
2255 		else
2256 			rte_exit(EXIT_FAILURE,
2257 					"set_tx_queue_stats_mapping_registers "
2258 					"failed for port id=%d diag=%d\n",
2259 					pi, diag);
2260 	}
2261 
2262 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2263 	if (diag != 0) {
2264 		if (diag == -ENOTSUP) {
2265 			port->rx_queue_stats_mapping_enabled = 0;
2266 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2267 		}
2268 		else
2269 			rte_exit(EXIT_FAILURE,
2270 					"set_rx_queue_stats_mapping_registers "
2271 					"failed for port id=%d diag=%d\n",
2272 					pi, diag);
2273 	}
2274 }
2275 
2276 static void
2277 rxtx_port_config(struct rte_port *port)
2278 {
2279 	uint16_t qid;
2280 
2281 	for (qid = 0; qid < nb_rxq; qid++) {
2282 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2283 
2284 		/* Check if any Rx parameters have been passed */
2285 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2286 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2287 
2288 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2289 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2290 
2291 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2292 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2293 
2294 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2295 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2296 
2297 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2298 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2299 
2300 		port->nb_rx_desc[qid] = nb_rxd;
2301 	}
2302 
2303 	for (qid = 0; qid < nb_txq; qid++) {
2304 		port->tx_conf[qid] = port->dev_info.default_txconf;
2305 
2306 		/* Check if any Tx parameters have been passed */
2307 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2308 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2309 
2310 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2311 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2312 
2313 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2314 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2315 
2316 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2317 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2318 
2319 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2320 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2321 
2322 		port->nb_tx_desc[qid] = nb_txd;
2323 	}
2324 }
2325 
2326 void
2327 init_port_config(void)
2328 {
2329 	portid_t pid;
2330 	struct rte_port *port;
2331 	struct rte_eth_dev_info dev_info;
2332 
2333 	RTE_ETH_FOREACH_DEV(pid) {
2334 		port = &ports[pid];
2335 		port->dev_conf.fdir_conf = fdir_conf;
2336 		if (nb_rxq > 1) {
2337 			rte_eth_dev_info_get(pid, &dev_info);
2338 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2339 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2340 				rss_hf & dev_info.flow_type_rss_offloads;
2341 		} else {
2342 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2343 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2344 		}
2345 
2346 		if (port->dcb_flag == 0) {
2347 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2348 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2349 			else
2350 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2351 		}
2352 
2353 		rxtx_port_config(port);
2354 
2355 		rte_eth_macaddr_get(pid, &port->eth_addr);
2356 
2357 		map_port_queue_stats_mapping_registers(pid, port);
2358 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2359 		rte_pmd_ixgbe_bypass_init(pid);
2360 #endif
2361 
2362 		if (lsc_interrupt &&
2363 		    (rte_eth_devices[pid].data->dev_flags &
2364 		     RTE_ETH_DEV_INTR_LSC))
2365 			port->dev_conf.intr_conf.lsc = 1;
2366 		if (rmv_interrupt &&
2367 		    (rte_eth_devices[pid].data->dev_flags &
2368 		     RTE_ETH_DEV_INTR_RMV))
2369 			port->dev_conf.intr_conf.rmv = 1;
2370 
2371 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2372 		/* Detect softnic port */
2373 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2374 			port->softnic_enable = 1;
2375 			memset(&port->softport, 0, sizeof(struct softnic_port));
2376 
2377 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2378 				port->softport.tm_flag = 1;
2379 		}
2380 #endif
2381 	}
2382 }
2383 
2384 void set_port_slave_flag(portid_t slave_pid)
2385 {
2386 	struct rte_port *port;
2387 
2388 	port = &ports[slave_pid];
2389 	port->slave_flag = 1;
2390 }
2391 
2392 void clear_port_slave_flag(portid_t slave_pid)
2393 {
2394 	struct rte_port *port;
2395 
2396 	port = &ports[slave_pid];
2397 	port->slave_flag = 0;
2398 }
2399 
2400 uint8_t port_is_bonding_slave(portid_t slave_pid)
2401 {
2402 	struct rte_port *port;
2403 
2404 	port = &ports[slave_pid];
2405 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2406 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2407 		return 1;
2408 	return 0;
2409 }
2410 
2411 const uint16_t vlan_tags[] = {
2412 		0,  1,  2,  3,  4,  5,  6,  7,
2413 		8,  9, 10, 11,  12, 13, 14, 15,
2414 		16, 17, 18, 19, 20, 21, 22, 23,
2415 		24, 25, 26, 27, 28, 29, 30, 31
2416 };
2417 
2418 static  int
2419 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2420 		 enum dcb_mode_enable dcb_mode,
2421 		 enum rte_eth_nb_tcs num_tcs,
2422 		 uint8_t pfc_en)
2423 {
2424 	uint8_t i;
2425 
2426 	/*
2427 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2428 	 * given above, and the number of traffic classes available for use.
2429 	 */
2430 	if (dcb_mode == DCB_VT_ENABLED) {
2431 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2432 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2433 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2434 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2435 
2436 		/* VMDQ+DCB RX and TX configurations */
2437 		vmdq_rx_conf->enable_default_pool = 0;
2438 		vmdq_rx_conf->default_pool = 0;
2439 		vmdq_rx_conf->nb_queue_pools =
2440 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2441 		vmdq_tx_conf->nb_queue_pools =
2442 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2443 
2444 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2445 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2446 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2447 			vmdq_rx_conf->pool_map[i].pools =
2448 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2449 		}
2450 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2451 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2452 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2453 		}
2454 
2455 		/* set DCB mode of RX and TX of multiple queues */
2456 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2457 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2458 	} else {
2459 		struct rte_eth_dcb_rx_conf *rx_conf =
2460 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2461 		struct rte_eth_dcb_tx_conf *tx_conf =
2462 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2463 
2464 		rx_conf->nb_tcs = num_tcs;
2465 		tx_conf->nb_tcs = num_tcs;
2466 
2467 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2468 			rx_conf->dcb_tc[i] = i % num_tcs;
2469 			tx_conf->dcb_tc[i] = i % num_tcs;
2470 		}
2471 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2472 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2473 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2474 	}
2475 
2476 	if (pfc_en)
2477 		eth_conf->dcb_capability_en =
2478 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2479 	else
2480 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2481 
2482 	return 0;
2483 }
2484 
2485 int
2486 init_port_dcb_config(portid_t pid,
2487 		     enum dcb_mode_enable dcb_mode,
2488 		     enum rte_eth_nb_tcs num_tcs,
2489 		     uint8_t pfc_en)
2490 {
2491 	struct rte_eth_conf port_conf;
2492 	struct rte_port *rte_port;
2493 	int retval;
2494 	uint16_t i;
2495 
2496 	rte_port = &ports[pid];
2497 
2498 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2499 	/* Enter DCB configuration status */
2500 	dcb_config = 1;
2501 
2502 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2503 	port_conf.txmode = rte_port->dev_conf.txmode;
2504 
2505 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2506 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2507 	if (retval < 0)
2508 		return retval;
2509 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2510 
2511 	/**
2512 	 * Write the configuration into the device.
2513 	 * Set the numbers of RX & TX queues to 0, so
2514 	 * the RX & TX queues will not be setup.
2515 	 */
2516 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2517 
2518 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2519 
2520 	/* If dev_info.vmdq_pool_base is greater than 0,
2521 	 * the queue id of vmdq pools is started after pf queues.
2522 	 */
2523 	if (dcb_mode == DCB_VT_ENABLED &&
2524 	    rte_port->dev_info.vmdq_pool_base > 0) {
2525 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2526 			" for port %d.", pid);
2527 		return -1;
2528 	}
2529 
2530 	/* Assume the ports in testpmd have the same dcb capability
2531 	 * and has the same number of rxq and txq in dcb mode
2532 	 */
2533 	if (dcb_mode == DCB_VT_ENABLED) {
2534 		if (rte_port->dev_info.max_vfs > 0) {
2535 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2536 			nb_txq = rte_port->dev_info.nb_tx_queues;
2537 		} else {
2538 			nb_rxq = rte_port->dev_info.max_rx_queues;
2539 			nb_txq = rte_port->dev_info.max_tx_queues;
2540 		}
2541 	} else {
2542 		/*if vt is disabled, use all pf queues */
2543 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2544 			nb_rxq = rte_port->dev_info.max_rx_queues;
2545 			nb_txq = rte_port->dev_info.max_tx_queues;
2546 		} else {
2547 			nb_rxq = (queueid_t)num_tcs;
2548 			nb_txq = (queueid_t)num_tcs;
2549 
2550 		}
2551 	}
2552 	rx_free_thresh = 64;
2553 
2554 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2555 
2556 	rxtx_port_config(rte_port);
2557 	/* VLAN filter */
2558 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2559 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2560 		rx_vft_set(pid, vlan_tags[i], 1);
2561 
2562 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2563 	map_port_queue_stats_mapping_registers(pid, rte_port);
2564 
2565 	rte_port->dcb_flag = 1;
2566 
2567 	return 0;
2568 }
2569 
2570 static void
2571 init_port(void)
2572 {
2573 	/* Configuration of Ethernet ports. */
2574 	ports = rte_zmalloc("testpmd: ports",
2575 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2576 			    RTE_CACHE_LINE_SIZE);
2577 	if (ports == NULL) {
2578 		rte_exit(EXIT_FAILURE,
2579 				"rte_zmalloc(%d struct rte_port) failed\n",
2580 				RTE_MAX_ETHPORTS);
2581 	}
2582 }
2583 
2584 static void
2585 force_quit(void)
2586 {
2587 	pmd_test_exit();
2588 	prompt_exit();
2589 }
2590 
2591 static void
2592 print_stats(void)
2593 {
2594 	uint8_t i;
2595 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2596 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2597 
2598 	/* Clear screen and move to top left */
2599 	printf("%s%s", clr, top_left);
2600 
2601 	printf("\nPort statistics ====================================");
2602 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2603 		nic_stats_display(fwd_ports_ids[i]);
2604 }
2605 
2606 static void
2607 signal_handler(int signum)
2608 {
2609 	if (signum == SIGINT || signum == SIGTERM) {
2610 		printf("\nSignal %d received, preparing to exit...\n",
2611 				signum);
2612 #ifdef RTE_LIBRTE_PDUMP
2613 		/* uninitialize packet capture framework */
2614 		rte_pdump_uninit();
2615 #endif
2616 #ifdef RTE_LIBRTE_LATENCY_STATS
2617 		rte_latencystats_uninit();
2618 #endif
2619 		force_quit();
2620 		/* Set flag to indicate the force termination. */
2621 		f_quit = 1;
2622 		/* exit with the expected status */
2623 		signal(signum, SIG_DFL);
2624 		kill(getpid(), signum);
2625 	}
2626 }
2627 
2628 int
2629 main(int argc, char** argv)
2630 {
2631 	int diag;
2632 	portid_t port_id;
2633 	int ret;
2634 
2635 	signal(SIGINT, signal_handler);
2636 	signal(SIGTERM, signal_handler);
2637 
2638 	diag = rte_eal_init(argc, argv);
2639 	if (diag < 0)
2640 		rte_panic("Cannot init EAL\n");
2641 
2642 	testpmd_logtype = rte_log_register("testpmd");
2643 	if (testpmd_logtype < 0)
2644 		rte_panic("Cannot register log type");
2645 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2646 
2647 	/* Bitrate/latency stats disabled by default */
2648 #ifdef RTE_LIBRTE_BITRATE
2649 	bitrate_enabled = 0;
2650 #endif
2651 #ifdef RTE_LIBRTE_LATENCY_STATS
2652 	latencystats_enabled = 0;
2653 #endif
2654 
2655 	/* on FreeBSD, mlockall() is disabled by default */
2656 #ifdef RTE_EXEC_ENV_BSDAPP
2657 	do_mlockall = 0;
2658 #else
2659 	do_mlockall = 1;
2660 #endif
2661 
2662 	argc -= diag;
2663 	argv += diag;
2664 	if (argc > 1)
2665 		launch_args_parse(argc, argv);
2666 
2667 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2668 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2669 			strerror(errno));
2670 	}
2671 
2672 #ifdef RTE_LIBRTE_PDUMP
2673 	/* initialize packet capture framework */
2674 	rte_pdump_init(NULL);
2675 #endif
2676 
2677 	nb_ports = (portid_t) rte_eth_dev_count_avail();
2678 	if (nb_ports == 0)
2679 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2680 
2681 	/* allocate port structures, and init them */
2682 	init_port();
2683 
2684 	set_def_fwd_config();
2685 	if (nb_lcores == 0)
2686 		rte_panic("Empty set of forwarding logical cores - check the "
2687 			  "core mask supplied in the command parameters\n");
2688 
2689 	if (tx_first && interactive)
2690 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2691 				"interactive mode.\n");
2692 
2693 	if (tx_first && lsc_interrupt) {
2694 		printf("Warning: lsc_interrupt needs to be off when "
2695 				" using tx_first. Disabling.\n");
2696 		lsc_interrupt = 0;
2697 	}
2698 
2699 	if (!nb_rxq && !nb_txq)
2700 		printf("Warning: Either rx or tx queues should be non-zero\n");
2701 
2702 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2703 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2704 		       "but nb_txq=%d will prevent to fully test it.\n",
2705 		       nb_rxq, nb_txq);
2706 
2707 	init_config();
2708 
2709 	if (hot_plug) {
2710 		/* enable hot plug monitoring */
2711 		ret = rte_dev_event_monitor_start();
2712 		if (ret) {
2713 			rte_errno = EINVAL;
2714 			return -1;
2715 		}
2716 		eth_dev_event_callback_register();
2717 
2718 	}
2719 
2720 	if (start_port(RTE_PORT_ALL) != 0)
2721 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2722 
2723 	/* set all ports to promiscuous mode by default */
2724 	RTE_ETH_FOREACH_DEV(port_id)
2725 		rte_eth_promiscuous_enable(port_id);
2726 
2727 	/* Init metrics library */
2728 	rte_metrics_init(rte_socket_id());
2729 
2730 #ifdef RTE_LIBRTE_LATENCY_STATS
2731 	if (latencystats_enabled != 0) {
2732 		int ret = rte_latencystats_init(1, NULL);
2733 		if (ret)
2734 			printf("Warning: latencystats init()"
2735 				" returned error %d\n",	ret);
2736 		printf("Latencystats running on lcore %d\n",
2737 			latencystats_lcore_id);
2738 	}
2739 #endif
2740 
2741 	/* Setup bitrate stats */
2742 #ifdef RTE_LIBRTE_BITRATE
2743 	if (bitrate_enabled != 0) {
2744 		bitrate_data = rte_stats_bitrate_create();
2745 		if (bitrate_data == NULL)
2746 			rte_exit(EXIT_FAILURE,
2747 				"Could not allocate bitrate data.\n");
2748 		rte_stats_bitrate_reg(bitrate_data);
2749 	}
2750 #endif
2751 
2752 #ifdef RTE_LIBRTE_CMDLINE
2753 	if (strlen(cmdline_filename) != 0)
2754 		cmdline_read_from_file(cmdline_filename);
2755 
2756 	if (interactive == 1) {
2757 		if (auto_start) {
2758 			printf("Start automatic packet forwarding\n");
2759 			start_packet_forwarding(0);
2760 		}
2761 		prompt();
2762 		pmd_test_exit();
2763 	} else
2764 #endif
2765 	{
2766 		char c;
2767 		int rc;
2768 
2769 		f_quit = 0;
2770 
2771 		printf("No commandline core given, start packet forwarding\n");
2772 		start_packet_forwarding(tx_first);
2773 		if (stats_period != 0) {
2774 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2775 			uint64_t timer_period;
2776 
2777 			/* Convert to number of cycles */
2778 			timer_period = stats_period * rte_get_timer_hz();
2779 
2780 			while (f_quit == 0) {
2781 				cur_time = rte_get_timer_cycles();
2782 				diff_time += cur_time - prev_time;
2783 
2784 				if (diff_time >= timer_period) {
2785 					print_stats();
2786 					/* Reset the timer */
2787 					diff_time = 0;
2788 				}
2789 				/* Sleep to avoid unnecessary checks */
2790 				prev_time = cur_time;
2791 				sleep(1);
2792 			}
2793 		}
2794 
2795 		printf("Press enter to exit\n");
2796 		rc = read(0, &c, 1);
2797 		pmd_test_exit();
2798 		if (rc < 0)
2799 			return 1;
2800 	}
2801 
2802 	return 0;
2803 }
2804