xref: /dpdk/app/test-pmd/testpmd.c (revision c9cce42876f5a45c4fb71eddb328562cf7fe315e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
34 #include <rte_eal.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
42 #include <rte_mbuf.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_dev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
52 #endif
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
55 #endif
56 #include <rte_flow.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
60 #endif
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
63 #endif
64 
65 #include "testpmd.h"
66 
67 #ifndef MAP_HUGETLB
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
70 #else
71 #define HUGE_FLAG MAP_HUGETLB
72 #endif
73 
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
77 #else
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
79 #endif
80 
81 #define EXTMEM_HEAP_NAME "extmem"
82 
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
85 
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
89 uint8_t tx_first;
90 char cmdline_filename[PATH_MAX] = {0};
91 
92 /*
93  * NUMA support configuration.
94  * When set, the NUMA support attempts to dispatch the allocation of the
95  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96  * probed ports among the CPU sockets 0 and 1.
97  * Otherwise, all memory is allocated from CPU socket 0.
98  */
99 uint8_t numa_support = 1; /**< numa enabled by default */
100 
101 /*
102  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103  * not configured.
104  */
105 uint8_t socket_num = UMA_NO_CONFIG;
106 
107 /*
108  * Select mempool allocation type:
109  * - native: use regular DPDK memory
110  * - anon: use regular DPDK memory to create mempool, but populate using
111  *         anonymous memory (may not be IOVA-contiguous)
112  * - xmem: use externally allocated hugepage memory
113  */
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115 
116 /*
117  * Store specified sockets on which memory pool to be used by ports
118  * is allocated.
119  */
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
121 
122 /*
123  * Store specified sockets on which RX ring to be used by ports
124  * is allocated.
125  */
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
127 
128 /*
129  * Store specified sockets on which TX ring to be used by ports
130  * is allocated.
131  */
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
133 
134 /*
135  * Record the Ethernet address of peer target ports to which packets are
136  * forwarded.
137  * Must be instantiated with the ethernet addresses of peer traffic generator
138  * ports.
139  */
140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
142 
143 /*
144  * Probed Target Environment.
145  */
146 struct rte_port *ports;	       /**< For all probed ethernet ports. */
147 portid_t nb_ports;             /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150 
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
152 
153 /*
154  * Test Forwarding Configuration.
155  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157  */
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162 
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165 
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168 
169 /*
170  * Forwarding engines.
171  */
172 struct fwd_engine * fwd_engines[] = {
173 	&io_fwd_engine,
174 	&mac_fwd_engine,
175 	&mac_swap_engine,
176 	&flow_gen_engine,
177 	&rx_only_engine,
178 	&tx_only_engine,
179 	&csum_fwd_engine,
180 	&icmp_echo_engine,
181 	&noisy_vnf_engine,
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
183 	&softnic_fwd_engine,
184 #endif
185 #ifdef RTE_LIBRTE_IEEE1588
186 	&ieee1588_fwd_engine,
187 #endif
188 	NULL,
189 };
190 
191 struct fwd_config cur_fwd_config;
192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193 uint32_t retry_enabled;
194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
196 
197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
199                                       * specified on command-line. */
200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
201 
202 /*
203  * In container, it cannot terminate the process which running with 'stats-period'
204  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
205  */
206 uint8_t f_quit;
207 
208 /*
209  * Configuration of packet segments used by the "txonly" processing engine.
210  */
211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213 	TXONLY_DEF_PACKET_LEN,
214 };
215 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
216 
217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
218 /**< Split policy for packets to TX. */
219 
220 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
221 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
222 
223 /* current configuration is in DCB or not,0 means it is not in DCB mode */
224 uint8_t dcb_config = 0;
225 
226 /* Whether the dcb is in testing status */
227 uint8_t dcb_test = 0;
228 
229 /*
230  * Configurable number of RX/TX queues.
231  */
232 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
233 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
234 
235 /*
236  * Configurable number of RX/TX ring descriptors.
237  * Defaults are supplied by drivers via ethdev.
238  */
239 #define RTE_TEST_RX_DESC_DEFAULT 0
240 #define RTE_TEST_TX_DESC_DEFAULT 0
241 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
242 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
243 
244 #define RTE_PMD_PARAM_UNSET -1
245 /*
246  * Configurable values of RX and TX ring threshold registers.
247  */
248 
249 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
250 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
251 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
252 
253 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
254 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
255 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
256 
257 /*
258  * Configurable value of RX free threshold.
259  */
260 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
261 
262 /*
263  * Configurable value of RX drop enable.
264  */
265 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
266 
267 /*
268  * Configurable value of TX free threshold.
269  */
270 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
271 
272 /*
273  * Configurable value of TX RS bit threshold.
274  */
275 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
276 
277 /*
278  * Configurable value of buffered packets before sending.
279  */
280 uint16_t noisy_tx_sw_bufsz;
281 
282 /*
283  * Configurable value of packet buffer timeout.
284  */
285 uint16_t noisy_tx_sw_buf_flush_time;
286 
287 /*
288  * Configurable value for size of VNF internal memory area
289  * used for simulating noisy neighbour behaviour
290  */
291 uint64_t noisy_lkup_mem_sz;
292 
293 /*
294  * Configurable value of number of random writes done in
295  * VNF simulation memory area.
296  */
297 uint64_t noisy_lkup_num_writes;
298 
299 /*
300  * Configurable value of number of random reads done in
301  * VNF simulation memory area.
302  */
303 uint64_t noisy_lkup_num_reads;
304 
305 /*
306  * Configurable value of number of random reads/writes done in
307  * VNF simulation memory area.
308  */
309 uint64_t noisy_lkup_num_reads_writes;
310 
311 /*
312  * Receive Side Scaling (RSS) configuration.
313  */
314 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
315 
316 /*
317  * Port topology configuration
318  */
319 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
320 
321 /*
322  * Avoids to flush all the RX streams before starts forwarding.
323  */
324 uint8_t no_flush_rx = 0; /* flush by default */
325 
326 /*
327  * Flow API isolated mode.
328  */
329 uint8_t flow_isolate_all;
330 
331 /*
332  * Avoids to check link status when starting/stopping a port.
333  */
334 uint8_t no_link_check = 0; /* check by default */
335 
336 /*
337  * Enable link status change notification
338  */
339 uint8_t lsc_interrupt = 1; /* enabled by default */
340 
341 /*
342  * Enable device removal notification.
343  */
344 uint8_t rmv_interrupt = 1; /* enabled by default */
345 
346 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
347 
348 /*
349  * Display or mask ether events
350  * Default to all events except VF_MBOX
351  */
352 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
353 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
354 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
355 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
356 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
357 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
358 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
359 /*
360  * Decide if all memory are locked for performance.
361  */
362 int do_mlockall = 0;
363 
364 /*
365  * NIC bypass mode configuration options.
366  */
367 
368 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
369 /* The NIC bypass watchdog timeout. */
370 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
371 #endif
372 
373 
374 #ifdef RTE_LIBRTE_LATENCY_STATS
375 
376 /*
377  * Set when latency stats is enabled in the commandline
378  */
379 uint8_t latencystats_enabled;
380 
381 /*
382  * Lcore ID to serive latency statistics.
383  */
384 lcoreid_t latencystats_lcore_id = -1;
385 
386 #endif
387 
388 /*
389  * Ethernet device configuration.
390  */
391 struct rte_eth_rxmode rx_mode = {
392 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
393 };
394 
395 struct rte_eth_txmode tx_mode = {
396 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
397 };
398 
399 struct rte_fdir_conf fdir_conf = {
400 	.mode = RTE_FDIR_MODE_NONE,
401 	.pballoc = RTE_FDIR_PBALLOC_64K,
402 	.status = RTE_FDIR_REPORT_STATUS,
403 	.mask = {
404 		.vlan_tci_mask = 0xFFEF,
405 		.ipv4_mask     = {
406 			.src_ip = 0xFFFFFFFF,
407 			.dst_ip = 0xFFFFFFFF,
408 		},
409 		.ipv6_mask     = {
410 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
411 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
412 		},
413 		.src_port_mask = 0xFFFF,
414 		.dst_port_mask = 0xFFFF,
415 		.mac_addr_byte_mask = 0xFF,
416 		.tunnel_type_mask = 1,
417 		.tunnel_id_mask = 0xFFFFFFFF,
418 	},
419 	.drop_queue = 127,
420 };
421 
422 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
423 
424 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
425 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
426 
427 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
428 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
429 
430 uint16_t nb_tx_queue_stats_mappings = 0;
431 uint16_t nb_rx_queue_stats_mappings = 0;
432 
433 /*
434  * Display zero values by default for xstats
435  */
436 uint8_t xstats_hide_zero;
437 
438 unsigned int num_sockets = 0;
439 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
440 
441 #ifdef RTE_LIBRTE_BITRATE
442 /* Bitrate statistics */
443 struct rte_stats_bitrates *bitrate_data;
444 lcoreid_t bitrate_lcore_id;
445 uint8_t bitrate_enabled;
446 #endif
447 
448 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
449 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
450 
451 struct vxlan_encap_conf vxlan_encap_conf = {
452 	.select_ipv4 = 1,
453 	.select_vlan = 0,
454 	.vni = "\x00\x00\x00",
455 	.udp_src = 0,
456 	.udp_dst = RTE_BE16(4789),
457 	.ipv4_src = IPv4(127, 0, 0, 1),
458 	.ipv4_dst = IPv4(255, 255, 255, 255),
459 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
460 		"\x00\x00\x00\x00\x00\x00\x00\x01",
461 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
462 		"\x00\x00\x00\x00\x00\x00\x11\x11",
463 	.vlan_tci = 0,
464 	.eth_src = "\x00\x00\x00\x00\x00\x00",
465 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
466 };
467 
468 struct nvgre_encap_conf nvgre_encap_conf = {
469 	.select_ipv4 = 1,
470 	.select_vlan = 0,
471 	.tni = "\x00\x00\x00",
472 	.ipv4_src = IPv4(127, 0, 0, 1),
473 	.ipv4_dst = IPv4(255, 255, 255, 255),
474 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
475 		"\x00\x00\x00\x00\x00\x00\x00\x01",
476 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
477 		"\x00\x00\x00\x00\x00\x00\x11\x11",
478 	.vlan_tci = 0,
479 	.eth_src = "\x00\x00\x00\x00\x00\x00",
480 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
481 };
482 
483 /* Forward function declarations */
484 static void setup_attached_port(portid_t pi);
485 static void map_port_queue_stats_mapping_registers(portid_t pi,
486 						   struct rte_port *port);
487 static void check_all_ports_link_status(uint32_t port_mask);
488 static int eth_event_callback(portid_t port_id,
489 			      enum rte_eth_event_type type,
490 			      void *param, void *ret_param);
491 static void eth_dev_event_callback(const char *device_name,
492 				enum rte_dev_event_type type,
493 				void *param);
494 
495 /*
496  * Check if all the ports are started.
497  * If yes, return positive value. If not, return zero.
498  */
499 static int all_ports_started(void);
500 
501 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
502 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
503 
504 /*
505  * Helper function to check if socket is already discovered.
506  * If yes, return positive value. If not, return zero.
507  */
508 int
509 new_socket_id(unsigned int socket_id)
510 {
511 	unsigned int i;
512 
513 	for (i = 0; i < num_sockets; i++) {
514 		if (socket_ids[i] == socket_id)
515 			return 0;
516 	}
517 	return 1;
518 }
519 
520 /*
521  * Setup default configuration.
522  */
523 static void
524 set_default_fwd_lcores_config(void)
525 {
526 	unsigned int i;
527 	unsigned int nb_lc;
528 	unsigned int sock_num;
529 
530 	nb_lc = 0;
531 	for (i = 0; i < RTE_MAX_LCORE; i++) {
532 		if (!rte_lcore_is_enabled(i))
533 			continue;
534 		sock_num = rte_lcore_to_socket_id(i);
535 		if (new_socket_id(sock_num)) {
536 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
537 				rte_exit(EXIT_FAILURE,
538 					 "Total sockets greater than %u\n",
539 					 RTE_MAX_NUMA_NODES);
540 			}
541 			socket_ids[num_sockets++] = sock_num;
542 		}
543 		if (i == rte_get_master_lcore())
544 			continue;
545 		fwd_lcores_cpuids[nb_lc++] = i;
546 	}
547 	nb_lcores = (lcoreid_t) nb_lc;
548 	nb_cfg_lcores = nb_lcores;
549 	nb_fwd_lcores = 1;
550 }
551 
552 static void
553 set_def_peer_eth_addrs(void)
554 {
555 	portid_t i;
556 
557 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
558 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
559 		peer_eth_addrs[i].addr_bytes[5] = i;
560 	}
561 }
562 
563 static void
564 set_default_fwd_ports_config(void)
565 {
566 	portid_t pt_id;
567 	int i = 0;
568 
569 	RTE_ETH_FOREACH_DEV(pt_id) {
570 		fwd_ports_ids[i++] = pt_id;
571 
572 		/* Update sockets info according to the attached device */
573 		int socket_id = rte_eth_dev_socket_id(pt_id);
574 		if (socket_id >= 0 && new_socket_id(socket_id)) {
575 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
576 				rte_exit(EXIT_FAILURE,
577 					 "Total sockets greater than %u\n",
578 					 RTE_MAX_NUMA_NODES);
579 			}
580 			socket_ids[num_sockets++] = socket_id;
581 		}
582 	}
583 
584 	nb_cfg_ports = nb_ports;
585 	nb_fwd_ports = nb_ports;
586 }
587 
588 void
589 set_def_fwd_config(void)
590 {
591 	set_default_fwd_lcores_config();
592 	set_def_peer_eth_addrs();
593 	set_default_fwd_ports_config();
594 }
595 
596 /* extremely pessimistic estimation of memory required to create a mempool */
597 static int
598 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
599 {
600 	unsigned int n_pages, mbuf_per_pg, leftover;
601 	uint64_t total_mem, mbuf_mem, obj_sz;
602 
603 	/* there is no good way to predict how much space the mempool will
604 	 * occupy because it will allocate chunks on the fly, and some of those
605 	 * will come from default DPDK memory while some will come from our
606 	 * external memory, so just assume 128MB will be enough for everyone.
607 	 */
608 	uint64_t hdr_mem = 128 << 20;
609 
610 	/* account for possible non-contiguousness */
611 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
612 	if (obj_sz > pgsz) {
613 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
614 		return -1;
615 	}
616 
617 	mbuf_per_pg = pgsz / obj_sz;
618 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
619 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
620 
621 	mbuf_mem = n_pages * pgsz;
622 
623 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
624 
625 	if (total_mem > SIZE_MAX) {
626 		TESTPMD_LOG(ERR, "Memory size too big\n");
627 		return -1;
628 	}
629 	*out = (size_t)total_mem;
630 
631 	return 0;
632 }
633 
634 static inline uint32_t
635 bsf64(uint64_t v)
636 {
637 	return (uint32_t)__builtin_ctzll(v);
638 }
639 
640 static inline uint32_t
641 log2_u64(uint64_t v)
642 {
643 	if (v == 0)
644 		return 0;
645 	v = rte_align64pow2(v);
646 	return bsf64(v);
647 }
648 
649 static int
650 pagesz_flags(uint64_t page_sz)
651 {
652 	/* as per mmap() manpage, all page sizes are log2 of page size
653 	 * shifted by MAP_HUGE_SHIFT
654 	 */
655 	int log2 = log2_u64(page_sz);
656 
657 	return (log2 << HUGE_SHIFT);
658 }
659 
660 static void *
661 alloc_mem(size_t memsz, size_t pgsz, bool huge)
662 {
663 	void *addr;
664 	int flags;
665 
666 	/* allocate anonymous hugepages */
667 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
668 	if (huge)
669 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
670 
671 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
672 	if (addr == MAP_FAILED)
673 		return NULL;
674 
675 	return addr;
676 }
677 
678 struct extmem_param {
679 	void *addr;
680 	size_t len;
681 	size_t pgsz;
682 	rte_iova_t *iova_table;
683 	unsigned int iova_table_len;
684 };
685 
686 static int
687 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
688 		bool huge)
689 {
690 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
691 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
692 	unsigned int cur_page, n_pages, pgsz_idx;
693 	size_t mem_sz, cur_pgsz;
694 	rte_iova_t *iovas = NULL;
695 	void *addr;
696 	int ret;
697 
698 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
699 		/* skip anything that is too big */
700 		if (pgsizes[pgsz_idx] > SIZE_MAX)
701 			continue;
702 
703 		cur_pgsz = pgsizes[pgsz_idx];
704 
705 		/* if we were told not to allocate hugepages, override */
706 		if (!huge)
707 			cur_pgsz = sysconf(_SC_PAGESIZE);
708 
709 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
710 		if (ret < 0) {
711 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
712 			return -1;
713 		}
714 
715 		/* allocate our memory */
716 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
717 
718 		/* if we couldn't allocate memory with a specified page size,
719 		 * that doesn't mean we can't do it with other page sizes, so
720 		 * try another one.
721 		 */
722 		if (addr == NULL)
723 			continue;
724 
725 		/* store IOVA addresses for every page in this memory area */
726 		n_pages = mem_sz / cur_pgsz;
727 
728 		iovas = malloc(sizeof(*iovas) * n_pages);
729 
730 		if (iovas == NULL) {
731 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
732 			goto fail;
733 		}
734 		/* lock memory if it's not huge pages */
735 		if (!huge)
736 			mlock(addr, mem_sz);
737 
738 		/* populate IOVA addresses */
739 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
740 			rte_iova_t iova;
741 			size_t offset;
742 			void *cur;
743 
744 			offset = cur_pgsz * cur_page;
745 			cur = RTE_PTR_ADD(addr, offset);
746 
747 			/* touch the page before getting its IOVA */
748 			*(volatile char *)cur = 0;
749 
750 			iova = rte_mem_virt2iova(cur);
751 
752 			iovas[cur_page] = iova;
753 		}
754 
755 		break;
756 	}
757 	/* if we couldn't allocate anything */
758 	if (iovas == NULL)
759 		return -1;
760 
761 	param->addr = addr;
762 	param->len = mem_sz;
763 	param->pgsz = cur_pgsz;
764 	param->iova_table = iovas;
765 	param->iova_table_len = n_pages;
766 
767 	return 0;
768 fail:
769 	if (iovas)
770 		free(iovas);
771 	if (addr)
772 		munmap(addr, mem_sz);
773 
774 	return -1;
775 }
776 
777 static int
778 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
779 {
780 	struct extmem_param param;
781 	int socket_id, ret;
782 
783 	memset(&param, 0, sizeof(param));
784 
785 	/* check if our heap exists */
786 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
787 	if (socket_id < 0) {
788 		/* create our heap */
789 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
790 		if (ret < 0) {
791 			TESTPMD_LOG(ERR, "Cannot create heap\n");
792 			return -1;
793 		}
794 	}
795 
796 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
797 	if (ret < 0) {
798 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
799 		return -1;
800 	}
801 
802 	/* we now have a valid memory area, so add it to heap */
803 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
804 			param.addr, param.len, param.iova_table,
805 			param.iova_table_len, param.pgsz);
806 
807 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
808 
809 	/* not needed any more */
810 	free(param.iova_table);
811 
812 	if (ret < 0) {
813 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
814 		munmap(param.addr, param.len);
815 		return -1;
816 	}
817 
818 	/* success */
819 
820 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
821 			param.len >> 20);
822 
823 	return 0;
824 }
825 
826 /*
827  * Configuration initialisation done once at init time.
828  */
829 static void
830 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
831 		 unsigned int socket_id)
832 {
833 	char pool_name[RTE_MEMPOOL_NAMESIZE];
834 	struct rte_mempool *rte_mp = NULL;
835 	uint32_t mb_size;
836 
837 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
838 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
839 
840 	TESTPMD_LOG(INFO,
841 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
842 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
843 
844 	switch (mp_alloc_type) {
845 	case MP_ALLOC_NATIVE:
846 		{
847 			/* wrapper to rte_mempool_create() */
848 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
849 					rte_mbuf_best_mempool_ops());
850 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
851 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
852 			break;
853 		}
854 	case MP_ALLOC_ANON:
855 		{
856 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
857 				mb_size, (unsigned int) mb_mempool_cache,
858 				sizeof(struct rte_pktmbuf_pool_private),
859 				socket_id, 0);
860 			if (rte_mp == NULL)
861 				goto err;
862 
863 			if (rte_mempool_populate_anon(rte_mp) == 0) {
864 				rte_mempool_free(rte_mp);
865 				rte_mp = NULL;
866 				goto err;
867 			}
868 			rte_pktmbuf_pool_init(rte_mp, NULL);
869 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
870 			break;
871 		}
872 	case MP_ALLOC_XMEM:
873 	case MP_ALLOC_XMEM_HUGE:
874 		{
875 			int heap_socket;
876 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
877 
878 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
879 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
880 
881 			heap_socket =
882 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
883 			if (heap_socket < 0)
884 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
885 
886 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
887 					rte_mbuf_best_mempool_ops());
888 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
889 					mb_mempool_cache, 0, mbuf_seg_size,
890 					heap_socket);
891 			break;
892 		}
893 	default:
894 		{
895 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
896 		}
897 	}
898 
899 err:
900 	if (rte_mp == NULL) {
901 		rte_exit(EXIT_FAILURE,
902 			"Creation of mbuf pool for socket %u failed: %s\n",
903 			socket_id, rte_strerror(rte_errno));
904 	} else if (verbose_level > 0) {
905 		rte_mempool_dump(stdout, rte_mp);
906 	}
907 }
908 
909 /*
910  * Check given socket id is valid or not with NUMA mode,
911  * if valid, return 0, else return -1
912  */
913 static int
914 check_socket_id(const unsigned int socket_id)
915 {
916 	static int warning_once = 0;
917 
918 	if (new_socket_id(socket_id)) {
919 		if (!warning_once && numa_support)
920 			printf("Warning: NUMA should be configured manually by"
921 			       " using --port-numa-config and"
922 			       " --ring-numa-config parameters along with"
923 			       " --numa.\n");
924 		warning_once = 1;
925 		return -1;
926 	}
927 	return 0;
928 }
929 
930 /*
931  * Get the allowed maximum number of RX queues.
932  * *pid return the port id which has minimal value of
933  * max_rx_queues in all ports.
934  */
935 queueid_t
936 get_allowed_max_nb_rxq(portid_t *pid)
937 {
938 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
939 	portid_t pi;
940 	struct rte_eth_dev_info dev_info;
941 
942 	RTE_ETH_FOREACH_DEV(pi) {
943 		rte_eth_dev_info_get(pi, &dev_info);
944 		if (dev_info.max_rx_queues < allowed_max_rxq) {
945 			allowed_max_rxq = dev_info.max_rx_queues;
946 			*pid = pi;
947 		}
948 	}
949 	return allowed_max_rxq;
950 }
951 
952 /*
953  * Check input rxq is valid or not.
954  * If input rxq is not greater than any of maximum number
955  * of RX queues of all ports, it is valid.
956  * if valid, return 0, else return -1
957  */
958 int
959 check_nb_rxq(queueid_t rxq)
960 {
961 	queueid_t allowed_max_rxq;
962 	portid_t pid = 0;
963 
964 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
965 	if (rxq > allowed_max_rxq) {
966 		printf("Fail: input rxq (%u) can't be greater "
967 		       "than max_rx_queues (%u) of port %u\n",
968 		       rxq,
969 		       allowed_max_rxq,
970 		       pid);
971 		return -1;
972 	}
973 	return 0;
974 }
975 
976 /*
977  * Get the allowed maximum number of TX queues.
978  * *pid return the port id which has minimal value of
979  * max_tx_queues in all ports.
980  */
981 queueid_t
982 get_allowed_max_nb_txq(portid_t *pid)
983 {
984 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
985 	portid_t pi;
986 	struct rte_eth_dev_info dev_info;
987 
988 	RTE_ETH_FOREACH_DEV(pi) {
989 		rte_eth_dev_info_get(pi, &dev_info);
990 		if (dev_info.max_tx_queues < allowed_max_txq) {
991 			allowed_max_txq = dev_info.max_tx_queues;
992 			*pid = pi;
993 		}
994 	}
995 	return allowed_max_txq;
996 }
997 
998 /*
999  * Check input txq is valid or not.
1000  * If input txq is not greater than any of maximum number
1001  * of TX queues of all ports, it is valid.
1002  * if valid, return 0, else return -1
1003  */
1004 int
1005 check_nb_txq(queueid_t txq)
1006 {
1007 	queueid_t allowed_max_txq;
1008 	portid_t pid = 0;
1009 
1010 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
1011 	if (txq > allowed_max_txq) {
1012 		printf("Fail: input txq (%u) can't be greater "
1013 		       "than max_tx_queues (%u) of port %u\n",
1014 		       txq,
1015 		       allowed_max_txq,
1016 		       pid);
1017 		return -1;
1018 	}
1019 	return 0;
1020 }
1021 
1022 static void
1023 init_config(void)
1024 {
1025 	portid_t pid;
1026 	struct rte_port *port;
1027 	struct rte_mempool *mbp;
1028 	unsigned int nb_mbuf_per_pool;
1029 	lcoreid_t  lc_id;
1030 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1031 	struct rte_gro_param gro_param;
1032 	uint32_t gso_types;
1033 	int k;
1034 
1035 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1036 
1037 	/* Configuration of logical cores. */
1038 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1039 				sizeof(struct fwd_lcore *) * nb_lcores,
1040 				RTE_CACHE_LINE_SIZE);
1041 	if (fwd_lcores == NULL) {
1042 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1043 							"failed\n", nb_lcores);
1044 	}
1045 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1046 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1047 					       sizeof(struct fwd_lcore),
1048 					       RTE_CACHE_LINE_SIZE);
1049 		if (fwd_lcores[lc_id] == NULL) {
1050 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1051 								"failed\n");
1052 		}
1053 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1054 	}
1055 
1056 	RTE_ETH_FOREACH_DEV(pid) {
1057 		port = &ports[pid];
1058 		/* Apply default TxRx configuration for all ports */
1059 		port->dev_conf.txmode = tx_mode;
1060 		port->dev_conf.rxmode = rx_mode;
1061 		rte_eth_dev_info_get(pid, &port->dev_info);
1062 
1063 		if (!(port->dev_info.tx_offload_capa &
1064 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1065 			port->dev_conf.txmode.offloads &=
1066 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1067 		if (!(port->dev_info.tx_offload_capa &
1068 			DEV_TX_OFFLOAD_MATCH_METADATA))
1069 			port->dev_conf.txmode.offloads &=
1070 				~DEV_TX_OFFLOAD_MATCH_METADATA;
1071 		if (numa_support) {
1072 			if (port_numa[pid] != NUMA_NO_CONFIG)
1073 				port_per_socket[port_numa[pid]]++;
1074 			else {
1075 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
1076 
1077 				/*
1078 				 * if socket_id is invalid,
1079 				 * set to the first available socket.
1080 				 */
1081 				if (check_socket_id(socket_id) < 0)
1082 					socket_id = socket_ids[0];
1083 				port_per_socket[socket_id]++;
1084 			}
1085 		}
1086 
1087 		/* Apply Rx offloads configuration */
1088 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1089 			port->rx_conf[k].offloads =
1090 				port->dev_conf.rxmode.offloads;
1091 		/* Apply Tx offloads configuration */
1092 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1093 			port->tx_conf[k].offloads =
1094 				port->dev_conf.txmode.offloads;
1095 
1096 		/* set flag to initialize port/queue */
1097 		port->need_reconfig = 1;
1098 		port->need_reconfig_queues = 1;
1099 		port->tx_metadata = 0;
1100 	}
1101 
1102 	/*
1103 	 * Create pools of mbuf.
1104 	 * If NUMA support is disabled, create a single pool of mbuf in
1105 	 * socket 0 memory by default.
1106 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1107 	 *
1108 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1109 	 * nb_txd can be configured at run time.
1110 	 */
1111 	if (param_total_num_mbufs)
1112 		nb_mbuf_per_pool = param_total_num_mbufs;
1113 	else {
1114 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1115 			(nb_lcores * mb_mempool_cache) +
1116 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1117 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1118 	}
1119 
1120 	if (numa_support) {
1121 		uint8_t i;
1122 
1123 		for (i = 0; i < num_sockets; i++)
1124 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1125 					 socket_ids[i]);
1126 	} else {
1127 		if (socket_num == UMA_NO_CONFIG)
1128 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
1129 		else
1130 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1131 						 socket_num);
1132 	}
1133 
1134 	init_port_config();
1135 
1136 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1137 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1138 	/*
1139 	 * Records which Mbuf pool to use by each logical core, if needed.
1140 	 */
1141 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1142 		mbp = mbuf_pool_find(
1143 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1144 
1145 		if (mbp == NULL)
1146 			mbp = mbuf_pool_find(0);
1147 		fwd_lcores[lc_id]->mbp = mbp;
1148 		/* initialize GSO context */
1149 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1150 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1151 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1152 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
1153 			ETHER_CRC_LEN;
1154 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
1155 	}
1156 
1157 	/* Configuration of packet forwarding streams. */
1158 	if (init_fwd_streams() < 0)
1159 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1160 
1161 	fwd_config_setup();
1162 
1163 	/* create a gro context for each lcore */
1164 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1165 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1166 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1167 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1168 		gro_param.socket_id = rte_lcore_to_socket_id(
1169 				fwd_lcores_cpuids[lc_id]);
1170 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1171 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1172 			rte_exit(EXIT_FAILURE,
1173 					"rte_gro_ctx_create() failed\n");
1174 		}
1175 	}
1176 
1177 #if defined RTE_LIBRTE_PMD_SOFTNIC
1178 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1179 		RTE_ETH_FOREACH_DEV(pid) {
1180 			port = &ports[pid];
1181 			const char *driver = port->dev_info.driver_name;
1182 
1183 			if (strcmp(driver, "net_softnic") == 0)
1184 				port->softport.fwd_lcore_arg = fwd_lcores;
1185 		}
1186 	}
1187 #endif
1188 
1189 }
1190 
1191 
1192 void
1193 reconfig(portid_t new_port_id, unsigned socket_id)
1194 {
1195 	struct rte_port *port;
1196 
1197 	/* Reconfiguration of Ethernet ports. */
1198 	port = &ports[new_port_id];
1199 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
1200 
1201 	/* set flag to initialize port/queue */
1202 	port->need_reconfig = 1;
1203 	port->need_reconfig_queues = 1;
1204 	port->socket_id = socket_id;
1205 
1206 	init_port_config();
1207 }
1208 
1209 
1210 int
1211 init_fwd_streams(void)
1212 {
1213 	portid_t pid;
1214 	struct rte_port *port;
1215 	streamid_t sm_id, nb_fwd_streams_new;
1216 	queueid_t q;
1217 
1218 	/* set socket id according to numa or not */
1219 	RTE_ETH_FOREACH_DEV(pid) {
1220 		port = &ports[pid];
1221 		if (nb_rxq > port->dev_info.max_rx_queues) {
1222 			printf("Fail: nb_rxq(%d) is greater than "
1223 				"max_rx_queues(%d)\n", nb_rxq,
1224 				port->dev_info.max_rx_queues);
1225 			return -1;
1226 		}
1227 		if (nb_txq > port->dev_info.max_tx_queues) {
1228 			printf("Fail: nb_txq(%d) is greater than "
1229 				"max_tx_queues(%d)\n", nb_txq,
1230 				port->dev_info.max_tx_queues);
1231 			return -1;
1232 		}
1233 		if (numa_support) {
1234 			if (port_numa[pid] != NUMA_NO_CONFIG)
1235 				port->socket_id = port_numa[pid];
1236 			else {
1237 				port->socket_id = rte_eth_dev_socket_id(pid);
1238 
1239 				/*
1240 				 * if socket_id is invalid,
1241 				 * set to the first available socket.
1242 				 */
1243 				if (check_socket_id(port->socket_id) < 0)
1244 					port->socket_id = socket_ids[0];
1245 			}
1246 		}
1247 		else {
1248 			if (socket_num == UMA_NO_CONFIG)
1249 				port->socket_id = 0;
1250 			else
1251 				port->socket_id = socket_num;
1252 		}
1253 	}
1254 
1255 	q = RTE_MAX(nb_rxq, nb_txq);
1256 	if (q == 0) {
1257 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1258 		return -1;
1259 	}
1260 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1261 	if (nb_fwd_streams_new == nb_fwd_streams)
1262 		return 0;
1263 	/* clear the old */
1264 	if (fwd_streams != NULL) {
1265 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1266 			if (fwd_streams[sm_id] == NULL)
1267 				continue;
1268 			rte_free(fwd_streams[sm_id]);
1269 			fwd_streams[sm_id] = NULL;
1270 		}
1271 		rte_free(fwd_streams);
1272 		fwd_streams = NULL;
1273 	}
1274 
1275 	/* init new */
1276 	nb_fwd_streams = nb_fwd_streams_new;
1277 	if (nb_fwd_streams) {
1278 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1279 			sizeof(struct fwd_stream *) * nb_fwd_streams,
1280 			RTE_CACHE_LINE_SIZE);
1281 		if (fwd_streams == NULL)
1282 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1283 				 " (struct fwd_stream *)) failed\n",
1284 				 nb_fwd_streams);
1285 
1286 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1287 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1288 				" struct fwd_stream", sizeof(struct fwd_stream),
1289 				RTE_CACHE_LINE_SIZE);
1290 			if (fwd_streams[sm_id] == NULL)
1291 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
1292 					 "(struct fwd_stream) failed\n");
1293 		}
1294 	}
1295 
1296 	return 0;
1297 }
1298 
1299 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1300 static void
1301 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1302 {
1303 	unsigned int total_burst;
1304 	unsigned int nb_burst;
1305 	unsigned int burst_stats[3];
1306 	uint16_t pktnb_stats[3];
1307 	uint16_t nb_pkt;
1308 	int burst_percent[3];
1309 
1310 	/*
1311 	 * First compute the total number of packet bursts and the
1312 	 * two highest numbers of bursts of the same number of packets.
1313 	 */
1314 	total_burst = 0;
1315 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1316 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1317 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1318 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1319 		if (nb_burst == 0)
1320 			continue;
1321 		total_burst += nb_burst;
1322 		if (nb_burst > burst_stats[0]) {
1323 			burst_stats[1] = burst_stats[0];
1324 			pktnb_stats[1] = pktnb_stats[0];
1325 			burst_stats[0] = nb_burst;
1326 			pktnb_stats[0] = nb_pkt;
1327 		} else if (nb_burst > burst_stats[1]) {
1328 			burst_stats[1] = nb_burst;
1329 			pktnb_stats[1] = nb_pkt;
1330 		}
1331 	}
1332 	if (total_burst == 0)
1333 		return;
1334 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1335 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1336 	       burst_percent[0], (int) pktnb_stats[0]);
1337 	if (burst_stats[0] == total_burst) {
1338 		printf("]\n");
1339 		return;
1340 	}
1341 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1342 		printf(" + %d%% of %d pkts]\n",
1343 		       100 - burst_percent[0], pktnb_stats[1]);
1344 		return;
1345 	}
1346 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1347 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1348 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1349 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1350 		return;
1351 	}
1352 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1353 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1354 }
1355 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1356 
1357 static void
1358 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1359 {
1360 	struct rte_port *port;
1361 	uint8_t i;
1362 
1363 	static const char *fwd_stats_border = "----------------------";
1364 
1365 	port = &ports[port_id];
1366 	printf("\n  %s Forward statistics for port %-2d %s\n",
1367 	       fwd_stats_border, port_id, fwd_stats_border);
1368 
1369 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1370 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1371 		       "%-"PRIu64"\n",
1372 		       stats->ipackets, stats->imissed,
1373 		       (uint64_t) (stats->ipackets + stats->imissed));
1374 
1375 		if (cur_fwd_eng == &csum_fwd_engine)
1376 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
1377 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1378 			       port->rx_bad_outer_l4_csum);
1379 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1380 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
1381 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1382 		}
1383 
1384 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1385 		       "%-"PRIu64"\n",
1386 		       stats->opackets, port->tx_dropped,
1387 		       (uint64_t) (stats->opackets + port->tx_dropped));
1388 	}
1389 	else {
1390 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1391 		       "%14"PRIu64"\n",
1392 		       stats->ipackets, stats->imissed,
1393 		       (uint64_t) (stats->ipackets + stats->imissed));
1394 
1395 		if (cur_fwd_eng == &csum_fwd_engine)
1396 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"    Bad-outer-l4csum: %-14"PRIu64"\n",
1397 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1398 			       port->rx_bad_outer_l4_csum);
1399 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1400 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1401 			printf("  RX-nombufs:             %14"PRIu64"\n",
1402 			       stats->rx_nombuf);
1403 		}
1404 
1405 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1406 		       "%14"PRIu64"\n",
1407 		       stats->opackets, port->tx_dropped,
1408 		       (uint64_t) (stats->opackets + port->tx_dropped));
1409 	}
1410 
1411 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1412 	if (port->rx_stream)
1413 		pkt_burst_stats_display("RX",
1414 			&port->rx_stream->rx_burst_stats);
1415 	if (port->tx_stream)
1416 		pkt_burst_stats_display("TX",
1417 			&port->tx_stream->tx_burst_stats);
1418 #endif
1419 
1420 	if (port->rx_queue_stats_mapping_enabled) {
1421 		printf("\n");
1422 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1423 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1424 			       "     RX-errors:%14"PRIu64
1425 			       "    RX-bytes:%14"PRIu64"\n",
1426 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1427 		}
1428 		printf("\n");
1429 	}
1430 	if (port->tx_queue_stats_mapping_enabled) {
1431 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1432 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1433 			       "                                 TX-bytes:%14"PRIu64"\n",
1434 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1435 		}
1436 	}
1437 
1438 	printf("  %s--------------------------------%s\n",
1439 	       fwd_stats_border, fwd_stats_border);
1440 }
1441 
1442 static void
1443 fwd_stream_stats_display(streamid_t stream_id)
1444 {
1445 	struct fwd_stream *fs;
1446 	static const char *fwd_top_stats_border = "-------";
1447 
1448 	fs = fwd_streams[stream_id];
1449 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1450 	    (fs->fwd_dropped == 0))
1451 		return;
1452 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1453 	       "TX Port=%2d/Queue=%2d %s\n",
1454 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1455 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1456 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1457 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1458 
1459 	/* if checksum mode */
1460 	if (cur_fwd_eng == &csum_fwd_engine) {
1461 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1462 			"%-14u Rx- bad outer L4 checksum: %-14u\n",
1463 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1464 			fs->rx_bad_outer_l4_csum);
1465 	}
1466 
1467 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1468 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1469 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1470 #endif
1471 }
1472 
1473 static void
1474 flush_fwd_rx_queues(void)
1475 {
1476 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1477 	portid_t  rxp;
1478 	portid_t port_id;
1479 	queueid_t rxq;
1480 	uint16_t  nb_rx;
1481 	uint16_t  i;
1482 	uint8_t   j;
1483 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1484 	uint64_t timer_period;
1485 
1486 	/* convert to number of cycles */
1487 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1488 
1489 	for (j = 0; j < 2; j++) {
1490 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1491 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1492 				port_id = fwd_ports_ids[rxp];
1493 				/**
1494 				* testpmd can stuck in the below do while loop
1495 				* if rte_eth_rx_burst() always returns nonzero
1496 				* packets. So timer is added to exit this loop
1497 				* after 1sec timer expiry.
1498 				*/
1499 				prev_tsc = rte_rdtsc();
1500 				do {
1501 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1502 						pkts_burst, MAX_PKT_BURST);
1503 					for (i = 0; i < nb_rx; i++)
1504 						rte_pktmbuf_free(pkts_burst[i]);
1505 
1506 					cur_tsc = rte_rdtsc();
1507 					diff_tsc = cur_tsc - prev_tsc;
1508 					timer_tsc += diff_tsc;
1509 				} while ((nb_rx > 0) &&
1510 					(timer_tsc < timer_period));
1511 				timer_tsc = 0;
1512 			}
1513 		}
1514 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1515 	}
1516 }
1517 
1518 static void
1519 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1520 {
1521 	struct fwd_stream **fsm;
1522 	streamid_t nb_fs;
1523 	streamid_t sm_id;
1524 #ifdef RTE_LIBRTE_BITRATE
1525 	uint64_t tics_per_1sec;
1526 	uint64_t tics_datum;
1527 	uint64_t tics_current;
1528 	uint16_t i, cnt_ports;
1529 
1530 	cnt_ports = nb_ports;
1531 	tics_datum = rte_rdtsc();
1532 	tics_per_1sec = rte_get_timer_hz();
1533 #endif
1534 	fsm = &fwd_streams[fc->stream_idx];
1535 	nb_fs = fc->stream_nb;
1536 	do {
1537 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1538 			(*pkt_fwd)(fsm[sm_id]);
1539 #ifdef RTE_LIBRTE_BITRATE
1540 		if (bitrate_enabled != 0 &&
1541 				bitrate_lcore_id == rte_lcore_id()) {
1542 			tics_current = rte_rdtsc();
1543 			if (tics_current - tics_datum >= tics_per_1sec) {
1544 				/* Periodic bitrate calculation */
1545 				for (i = 0; i < cnt_ports; i++)
1546 					rte_stats_bitrate_calc(bitrate_data,
1547 						ports_ids[i]);
1548 				tics_datum = tics_current;
1549 			}
1550 		}
1551 #endif
1552 #ifdef RTE_LIBRTE_LATENCY_STATS
1553 		if (latencystats_enabled != 0 &&
1554 				latencystats_lcore_id == rte_lcore_id())
1555 			rte_latencystats_update();
1556 #endif
1557 
1558 	} while (! fc->stopped);
1559 }
1560 
1561 static int
1562 start_pkt_forward_on_core(void *fwd_arg)
1563 {
1564 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1565 			     cur_fwd_config.fwd_eng->packet_fwd);
1566 	return 0;
1567 }
1568 
1569 /*
1570  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1571  * Used to start communication flows in network loopback test configurations.
1572  */
1573 static int
1574 run_one_txonly_burst_on_core(void *fwd_arg)
1575 {
1576 	struct fwd_lcore *fwd_lc;
1577 	struct fwd_lcore tmp_lcore;
1578 
1579 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1580 	tmp_lcore = *fwd_lc;
1581 	tmp_lcore.stopped = 1;
1582 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1583 	return 0;
1584 }
1585 
1586 /*
1587  * Launch packet forwarding:
1588  *     - Setup per-port forwarding context.
1589  *     - launch logical cores with their forwarding configuration.
1590  */
1591 static void
1592 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1593 {
1594 	port_fwd_begin_t port_fwd_begin;
1595 	unsigned int i;
1596 	unsigned int lc_id;
1597 	int diag;
1598 
1599 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1600 	if (port_fwd_begin != NULL) {
1601 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1602 			(*port_fwd_begin)(fwd_ports_ids[i]);
1603 	}
1604 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1605 		lc_id = fwd_lcores_cpuids[i];
1606 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1607 			fwd_lcores[i]->stopped = 0;
1608 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1609 						     fwd_lcores[i], lc_id);
1610 			if (diag != 0)
1611 				printf("launch lcore %u failed - diag=%d\n",
1612 				       lc_id, diag);
1613 		}
1614 	}
1615 }
1616 
1617 /*
1618  * Update the forward ports list.
1619  */
1620 void
1621 update_fwd_ports(portid_t new_pid)
1622 {
1623 	unsigned int i;
1624 	unsigned int new_nb_fwd_ports = 0;
1625 	int move = 0;
1626 
1627 	for (i = 0; i < nb_fwd_ports; ++i) {
1628 		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1629 			move = 1;
1630 		else if (move)
1631 			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1632 		else
1633 			new_nb_fwd_ports++;
1634 	}
1635 	if (new_pid < RTE_MAX_ETHPORTS)
1636 		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1637 
1638 	nb_fwd_ports = new_nb_fwd_ports;
1639 	nb_cfg_ports = new_nb_fwd_ports;
1640 }
1641 
1642 /*
1643  * Launch packet forwarding configuration.
1644  */
1645 void
1646 start_packet_forwarding(int with_tx_first)
1647 {
1648 	port_fwd_begin_t port_fwd_begin;
1649 	port_fwd_end_t  port_fwd_end;
1650 	struct rte_port *port;
1651 	unsigned int i;
1652 	portid_t   pt_id;
1653 	streamid_t sm_id;
1654 
1655 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1656 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1657 
1658 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1659 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1660 
1661 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1662 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1663 		(!nb_rxq || !nb_txq))
1664 		rte_exit(EXIT_FAILURE,
1665 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1666 			cur_fwd_eng->fwd_mode_name);
1667 
1668 	if (all_ports_started() == 0) {
1669 		printf("Not all ports were started\n");
1670 		return;
1671 	}
1672 	if (test_done == 0) {
1673 		printf("Packet forwarding already started\n");
1674 		return;
1675 	}
1676 
1677 
1678 	if(dcb_test) {
1679 		for (i = 0; i < nb_fwd_ports; i++) {
1680 			pt_id = fwd_ports_ids[i];
1681 			port = &ports[pt_id];
1682 			if (!port->dcb_flag) {
1683 				printf("In DCB mode, all forwarding ports must "
1684                                        "be configured in this mode.\n");
1685 				return;
1686 			}
1687 		}
1688 		if (nb_fwd_lcores == 1) {
1689 			printf("In DCB mode,the nb forwarding cores "
1690                                "should be larger than 1.\n");
1691 			return;
1692 		}
1693 	}
1694 	test_done = 0;
1695 
1696 	fwd_config_setup();
1697 
1698 	if(!no_flush_rx)
1699 		flush_fwd_rx_queues();
1700 
1701 	pkt_fwd_config_display(&cur_fwd_config);
1702 	rxtx_config_display();
1703 
1704 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1705 		pt_id = fwd_ports_ids[i];
1706 		port = &ports[pt_id];
1707 		rte_eth_stats_get(pt_id, &port->stats);
1708 		port->tx_dropped = 0;
1709 
1710 		map_port_queue_stats_mapping_registers(pt_id, port);
1711 	}
1712 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1713 		fwd_streams[sm_id]->rx_packets = 0;
1714 		fwd_streams[sm_id]->tx_packets = 0;
1715 		fwd_streams[sm_id]->fwd_dropped = 0;
1716 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1717 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1718 		fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
1719 
1720 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1721 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1722 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1723 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1724 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1725 #endif
1726 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1727 		fwd_streams[sm_id]->core_cycles = 0;
1728 #endif
1729 	}
1730 	if (with_tx_first) {
1731 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1732 		if (port_fwd_begin != NULL) {
1733 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1734 				(*port_fwd_begin)(fwd_ports_ids[i]);
1735 		}
1736 		while (with_tx_first--) {
1737 			launch_packet_forwarding(
1738 					run_one_txonly_burst_on_core);
1739 			rte_eal_mp_wait_lcore();
1740 		}
1741 		port_fwd_end = tx_only_engine.port_fwd_end;
1742 		if (port_fwd_end != NULL) {
1743 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1744 				(*port_fwd_end)(fwd_ports_ids[i]);
1745 		}
1746 	}
1747 	launch_packet_forwarding(start_pkt_forward_on_core);
1748 }
1749 
1750 void
1751 stop_packet_forwarding(void)
1752 {
1753 	struct rte_eth_stats stats;
1754 	struct rte_port *port;
1755 	port_fwd_end_t  port_fwd_end;
1756 	int i;
1757 	portid_t   pt_id;
1758 	streamid_t sm_id;
1759 	lcoreid_t  lc_id;
1760 	uint64_t total_recv;
1761 	uint64_t total_xmit;
1762 	uint64_t total_rx_dropped;
1763 	uint64_t total_tx_dropped;
1764 	uint64_t total_rx_nombuf;
1765 	uint64_t tx_dropped;
1766 	uint64_t rx_bad_ip_csum;
1767 	uint64_t rx_bad_l4_csum;
1768 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1769 	uint64_t fwd_cycles;
1770 #endif
1771 
1772 	static const char *acc_stats_border = "+++++++++++++++";
1773 
1774 	if (test_done) {
1775 		printf("Packet forwarding not started\n");
1776 		return;
1777 	}
1778 	printf("Telling cores to stop...");
1779 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1780 		fwd_lcores[lc_id]->stopped = 1;
1781 	printf("\nWaiting for lcores to finish...\n");
1782 	rte_eal_mp_wait_lcore();
1783 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1784 	if (port_fwd_end != NULL) {
1785 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1786 			pt_id = fwd_ports_ids[i];
1787 			(*port_fwd_end)(pt_id);
1788 		}
1789 	}
1790 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1791 	fwd_cycles = 0;
1792 #endif
1793 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1794 		if (cur_fwd_config.nb_fwd_streams >
1795 		    cur_fwd_config.nb_fwd_ports) {
1796 			fwd_stream_stats_display(sm_id);
1797 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1798 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1799 		} else {
1800 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1801 				fwd_streams[sm_id];
1802 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1803 				fwd_streams[sm_id];
1804 		}
1805 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1806 		tx_dropped = (uint64_t) (tx_dropped +
1807 					 fwd_streams[sm_id]->fwd_dropped);
1808 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1809 
1810 		rx_bad_ip_csum =
1811 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1812 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1813 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1814 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1815 							rx_bad_ip_csum;
1816 
1817 		rx_bad_l4_csum =
1818 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1819 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1820 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1821 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1822 							rx_bad_l4_csum;
1823 
1824 		ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
1825 				fwd_streams[sm_id]->rx_bad_outer_l4_csum;
1826 
1827 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1828 		fwd_cycles = (uint64_t) (fwd_cycles +
1829 					 fwd_streams[sm_id]->core_cycles);
1830 #endif
1831 	}
1832 	total_recv = 0;
1833 	total_xmit = 0;
1834 	total_rx_dropped = 0;
1835 	total_tx_dropped = 0;
1836 	total_rx_nombuf  = 0;
1837 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1838 		pt_id = fwd_ports_ids[i];
1839 
1840 		port = &ports[pt_id];
1841 		rte_eth_stats_get(pt_id, &stats);
1842 		stats.ipackets -= port->stats.ipackets;
1843 		port->stats.ipackets = 0;
1844 		stats.opackets -= port->stats.opackets;
1845 		port->stats.opackets = 0;
1846 		stats.ibytes   -= port->stats.ibytes;
1847 		port->stats.ibytes = 0;
1848 		stats.obytes   -= port->stats.obytes;
1849 		port->stats.obytes = 0;
1850 		stats.imissed  -= port->stats.imissed;
1851 		port->stats.imissed = 0;
1852 		stats.oerrors  -= port->stats.oerrors;
1853 		port->stats.oerrors = 0;
1854 		stats.rx_nombuf -= port->stats.rx_nombuf;
1855 		port->stats.rx_nombuf = 0;
1856 
1857 		total_recv += stats.ipackets;
1858 		total_xmit += stats.opackets;
1859 		total_rx_dropped += stats.imissed;
1860 		total_tx_dropped += port->tx_dropped;
1861 		total_rx_nombuf  += stats.rx_nombuf;
1862 
1863 		fwd_port_stats_display(pt_id, &stats);
1864 	}
1865 
1866 	printf("\n  %s Accumulated forward statistics for all ports"
1867 	       "%s\n",
1868 	       acc_stats_border, acc_stats_border);
1869 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1870 	       "%-"PRIu64"\n"
1871 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1872 	       "%-"PRIu64"\n",
1873 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1874 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1875 	if (total_rx_nombuf > 0)
1876 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1877 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1878 	       "%s\n",
1879 	       acc_stats_border, acc_stats_border);
1880 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1881 	if (total_recv > 0)
1882 		printf("\n  CPU cycles/packet=%u (total cycles="
1883 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1884 		       (unsigned int)(fwd_cycles / total_recv),
1885 		       fwd_cycles, total_recv);
1886 #endif
1887 	printf("\nDone.\n");
1888 	test_done = 1;
1889 }
1890 
1891 void
1892 dev_set_link_up(portid_t pid)
1893 {
1894 	if (rte_eth_dev_set_link_up(pid) < 0)
1895 		printf("\nSet link up fail.\n");
1896 }
1897 
1898 void
1899 dev_set_link_down(portid_t pid)
1900 {
1901 	if (rte_eth_dev_set_link_down(pid) < 0)
1902 		printf("\nSet link down fail.\n");
1903 }
1904 
1905 static int
1906 all_ports_started(void)
1907 {
1908 	portid_t pi;
1909 	struct rte_port *port;
1910 
1911 	RTE_ETH_FOREACH_DEV(pi) {
1912 		port = &ports[pi];
1913 		/* Check if there is a port which is not started */
1914 		if ((port->port_status != RTE_PORT_STARTED) &&
1915 			(port->slave_flag == 0))
1916 			return 0;
1917 	}
1918 
1919 	/* No port is not started */
1920 	return 1;
1921 }
1922 
1923 int
1924 port_is_stopped(portid_t port_id)
1925 {
1926 	struct rte_port *port = &ports[port_id];
1927 
1928 	if ((port->port_status != RTE_PORT_STOPPED) &&
1929 	    (port->slave_flag == 0))
1930 		return 0;
1931 	return 1;
1932 }
1933 
1934 int
1935 all_ports_stopped(void)
1936 {
1937 	portid_t pi;
1938 
1939 	RTE_ETH_FOREACH_DEV(pi) {
1940 		if (!port_is_stopped(pi))
1941 			return 0;
1942 	}
1943 
1944 	return 1;
1945 }
1946 
1947 int
1948 port_is_started(portid_t port_id)
1949 {
1950 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1951 		return 0;
1952 
1953 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1954 		return 0;
1955 
1956 	return 1;
1957 }
1958 
1959 int
1960 start_port(portid_t pid)
1961 {
1962 	int diag, need_check_link_status = -1;
1963 	portid_t pi;
1964 	queueid_t qi;
1965 	struct rte_port *port;
1966 	struct ether_addr mac_addr;
1967 	enum rte_eth_event_type event_type;
1968 
1969 	if (port_id_is_invalid(pid, ENABLED_WARN))
1970 		return 0;
1971 
1972 	if(dcb_config)
1973 		dcb_test = 1;
1974 	RTE_ETH_FOREACH_DEV(pi) {
1975 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1976 			continue;
1977 
1978 		need_check_link_status = 0;
1979 		port = &ports[pi];
1980 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1981 						 RTE_PORT_HANDLING) == 0) {
1982 			printf("Port %d is now not stopped\n", pi);
1983 			continue;
1984 		}
1985 
1986 		if (port->need_reconfig > 0) {
1987 			port->need_reconfig = 0;
1988 
1989 			if (flow_isolate_all) {
1990 				int ret = port_flow_isolate(pi, 1);
1991 				if (ret) {
1992 					printf("Failed to apply isolated"
1993 					       " mode on port %d\n", pi);
1994 					return -1;
1995 				}
1996 			}
1997 			configure_rxtx_dump_callbacks(0);
1998 			printf("Configuring Port %d (socket %u)\n", pi,
1999 					port->socket_id);
2000 			/* configure port */
2001 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
2002 						&(port->dev_conf));
2003 			if (diag != 0) {
2004 				if (rte_atomic16_cmpset(&(port->port_status),
2005 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2006 					printf("Port %d can not be set back "
2007 							"to stopped\n", pi);
2008 				printf("Fail to configure port %d\n", pi);
2009 				/* try to reconfigure port next time */
2010 				port->need_reconfig = 1;
2011 				return -1;
2012 			}
2013 		}
2014 		if (port->need_reconfig_queues > 0) {
2015 			port->need_reconfig_queues = 0;
2016 			/* setup tx queues */
2017 			for (qi = 0; qi < nb_txq; qi++) {
2018 				if ((numa_support) &&
2019 					(txring_numa[pi] != NUMA_NO_CONFIG))
2020 					diag = rte_eth_tx_queue_setup(pi, qi,
2021 						port->nb_tx_desc[qi],
2022 						txring_numa[pi],
2023 						&(port->tx_conf[qi]));
2024 				else
2025 					diag = rte_eth_tx_queue_setup(pi, qi,
2026 						port->nb_tx_desc[qi],
2027 						port->socket_id,
2028 						&(port->tx_conf[qi]));
2029 
2030 				if (diag == 0)
2031 					continue;
2032 
2033 				/* Fail to setup tx queue, return */
2034 				if (rte_atomic16_cmpset(&(port->port_status),
2035 							RTE_PORT_HANDLING,
2036 							RTE_PORT_STOPPED) == 0)
2037 					printf("Port %d can not be set back "
2038 							"to stopped\n", pi);
2039 				printf("Fail to configure port %d tx queues\n",
2040 				       pi);
2041 				/* try to reconfigure queues next time */
2042 				port->need_reconfig_queues = 1;
2043 				return -1;
2044 			}
2045 			for (qi = 0; qi < nb_rxq; qi++) {
2046 				/* setup rx queues */
2047 				if ((numa_support) &&
2048 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2049 					struct rte_mempool * mp =
2050 						mbuf_pool_find(rxring_numa[pi]);
2051 					if (mp == NULL) {
2052 						printf("Failed to setup RX queue:"
2053 							"No mempool allocation"
2054 							" on the socket %d\n",
2055 							rxring_numa[pi]);
2056 						return -1;
2057 					}
2058 
2059 					diag = rte_eth_rx_queue_setup(pi, qi,
2060 					     port->nb_rx_desc[qi],
2061 					     rxring_numa[pi],
2062 					     &(port->rx_conf[qi]),
2063 					     mp);
2064 				} else {
2065 					struct rte_mempool *mp =
2066 						mbuf_pool_find(port->socket_id);
2067 					if (mp == NULL) {
2068 						printf("Failed to setup RX queue:"
2069 							"No mempool allocation"
2070 							" on the socket %d\n",
2071 							port->socket_id);
2072 						return -1;
2073 					}
2074 					diag = rte_eth_rx_queue_setup(pi, qi,
2075 					     port->nb_rx_desc[qi],
2076 					     port->socket_id,
2077 					     &(port->rx_conf[qi]),
2078 					     mp);
2079 				}
2080 				if (diag == 0)
2081 					continue;
2082 
2083 				/* Fail to setup rx queue, return */
2084 				if (rte_atomic16_cmpset(&(port->port_status),
2085 							RTE_PORT_HANDLING,
2086 							RTE_PORT_STOPPED) == 0)
2087 					printf("Port %d can not be set back "
2088 							"to stopped\n", pi);
2089 				printf("Fail to configure port %d rx queues\n",
2090 				       pi);
2091 				/* try to reconfigure queues next time */
2092 				port->need_reconfig_queues = 1;
2093 				return -1;
2094 			}
2095 		}
2096 		configure_rxtx_dump_callbacks(verbose_level);
2097 		/* start port */
2098 		if (rte_eth_dev_start(pi) < 0) {
2099 			printf("Fail to start port %d\n", pi);
2100 
2101 			/* Fail to setup rx queue, return */
2102 			if (rte_atomic16_cmpset(&(port->port_status),
2103 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2104 				printf("Port %d can not be set back to "
2105 							"stopped\n", pi);
2106 			continue;
2107 		}
2108 
2109 		if (rte_atomic16_cmpset(&(port->port_status),
2110 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2111 			printf("Port %d can not be set into started\n", pi);
2112 
2113 		rte_eth_macaddr_get(pi, &mac_addr);
2114 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2115 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2116 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2117 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2118 
2119 		/* at least one port started, need checking link status */
2120 		need_check_link_status = 1;
2121 	}
2122 
2123 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
2124 	     event_type < RTE_ETH_EVENT_MAX;
2125 	     event_type++) {
2126 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
2127 						event_type,
2128 						eth_event_callback,
2129 						NULL);
2130 		if (diag) {
2131 			printf("Failed to setup even callback for event %d\n",
2132 				event_type);
2133 			return -1;
2134 		}
2135 	}
2136 
2137 	if (need_check_link_status == 1 && !no_link_check)
2138 		check_all_ports_link_status(RTE_PORT_ALL);
2139 	else if (need_check_link_status == 0)
2140 		printf("Please stop the ports first\n");
2141 
2142 	printf("Done\n");
2143 	return 0;
2144 }
2145 
2146 void
2147 stop_port(portid_t pid)
2148 {
2149 	portid_t pi;
2150 	struct rte_port *port;
2151 	int need_check_link_status = 0;
2152 
2153 	if (dcb_test) {
2154 		dcb_test = 0;
2155 		dcb_config = 0;
2156 	}
2157 
2158 	if (port_id_is_invalid(pid, ENABLED_WARN))
2159 		return;
2160 
2161 	printf("Stopping ports...\n");
2162 
2163 	RTE_ETH_FOREACH_DEV(pi) {
2164 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2165 			continue;
2166 
2167 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2168 			printf("Please remove port %d from forwarding configuration.\n", pi);
2169 			continue;
2170 		}
2171 
2172 		if (port_is_bonding_slave(pi)) {
2173 			printf("Please remove port %d from bonded device.\n", pi);
2174 			continue;
2175 		}
2176 
2177 		port = &ports[pi];
2178 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2179 						RTE_PORT_HANDLING) == 0)
2180 			continue;
2181 
2182 		rte_eth_dev_stop(pi);
2183 
2184 		if (rte_atomic16_cmpset(&(port->port_status),
2185 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2186 			printf("Port %d can not be set into stopped\n", pi);
2187 		need_check_link_status = 1;
2188 	}
2189 	if (need_check_link_status && !no_link_check)
2190 		check_all_ports_link_status(RTE_PORT_ALL);
2191 
2192 	printf("Done\n");
2193 }
2194 
2195 static void
2196 remove_unused_fwd_ports(void)
2197 {
2198 	int i;
2199 	int last_port_idx = nb_ports - 1;
2200 
2201 	for (i = 0; i <= last_port_idx; i++) { /* iterate in ports_ids */
2202 		if (rte_eth_devices[ports_ids[i]].state != RTE_ETH_DEV_UNUSED)
2203 			continue;
2204 		/* skip unused ports at the end */
2205 		while (i <= last_port_idx &&
2206 				rte_eth_devices[ports_ids[last_port_idx]].state
2207 				== RTE_ETH_DEV_UNUSED)
2208 			last_port_idx--;
2209 		if (last_port_idx < i)
2210 			break;
2211 		/* overwrite unused port with last valid port */
2212 		ports_ids[i] = ports_ids[last_port_idx];
2213 		/* decrease ports count */
2214 		last_port_idx--;
2215 	}
2216 	nb_ports = rte_eth_dev_count_avail();
2217 	update_fwd_ports(RTE_MAX_ETHPORTS);
2218 }
2219 
2220 void
2221 close_port(portid_t pid)
2222 {
2223 	portid_t pi;
2224 	struct rte_port *port;
2225 
2226 	if (port_id_is_invalid(pid, ENABLED_WARN))
2227 		return;
2228 
2229 	printf("Closing ports...\n");
2230 
2231 	RTE_ETH_FOREACH_DEV(pi) {
2232 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2233 			continue;
2234 
2235 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2236 			printf("Please remove port %d from forwarding configuration.\n", pi);
2237 			continue;
2238 		}
2239 
2240 		if (port_is_bonding_slave(pi)) {
2241 			printf("Please remove port %d from bonded device.\n", pi);
2242 			continue;
2243 		}
2244 
2245 		port = &ports[pi];
2246 		if (rte_atomic16_cmpset(&(port->port_status),
2247 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2248 			printf("Port %d is already closed\n", pi);
2249 			continue;
2250 		}
2251 
2252 		if (rte_atomic16_cmpset(&(port->port_status),
2253 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2254 			printf("Port %d is now not stopped\n", pi);
2255 			continue;
2256 		}
2257 
2258 		if (port->flow_list)
2259 			port_flow_flush(pi);
2260 		rte_eth_dev_close(pi);
2261 
2262 		remove_unused_fwd_ports();
2263 
2264 		if (rte_atomic16_cmpset(&(port->port_status),
2265 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2266 			printf("Port %d cannot be set to closed\n", pi);
2267 	}
2268 
2269 	printf("Done\n");
2270 }
2271 
2272 void
2273 reset_port(portid_t pid)
2274 {
2275 	int diag;
2276 	portid_t pi;
2277 	struct rte_port *port;
2278 
2279 	if (port_id_is_invalid(pid, ENABLED_WARN))
2280 		return;
2281 
2282 	printf("Resetting ports...\n");
2283 
2284 	RTE_ETH_FOREACH_DEV(pi) {
2285 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2286 			continue;
2287 
2288 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2289 			printf("Please remove port %d from forwarding "
2290 			       "configuration.\n", pi);
2291 			continue;
2292 		}
2293 
2294 		if (port_is_bonding_slave(pi)) {
2295 			printf("Please remove port %d from bonded device.\n",
2296 			       pi);
2297 			continue;
2298 		}
2299 
2300 		diag = rte_eth_dev_reset(pi);
2301 		if (diag == 0) {
2302 			port = &ports[pi];
2303 			port->need_reconfig = 1;
2304 			port->need_reconfig_queues = 1;
2305 		} else {
2306 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
2307 		}
2308 	}
2309 
2310 	printf("Done\n");
2311 }
2312 
2313 void
2314 attach_port(char *identifier)
2315 {
2316 	portid_t pi = 0;
2317 	struct rte_dev_iterator iterator;
2318 
2319 	printf("Attaching a new port...\n");
2320 
2321 	if (identifier == NULL) {
2322 		printf("Invalid parameters are specified\n");
2323 		return;
2324 	}
2325 
2326 	if (rte_dev_probe(identifier) != 0) {
2327 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2328 		return;
2329 	}
2330 
2331 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator)
2332 		setup_attached_port(pi);
2333 }
2334 
2335 static void
2336 setup_attached_port(portid_t pi)
2337 {
2338 	unsigned int socket_id;
2339 
2340 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2341 	/* if socket_id is invalid, set to the first available socket. */
2342 	if (check_socket_id(socket_id) < 0)
2343 		socket_id = socket_ids[0];
2344 	reconfig(pi, socket_id);
2345 	rte_eth_promiscuous_enable(pi);
2346 
2347 	ports_ids[nb_ports] = pi;
2348 	nb_ports = rte_eth_dev_count_avail();
2349 
2350 	ports[pi].port_status = RTE_PORT_STOPPED;
2351 
2352 	update_fwd_ports(pi);
2353 
2354 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2355 	printf("Done\n");
2356 }
2357 
2358 void
2359 detach_port(portid_t port_id)
2360 {
2361 	printf("Removing a device...\n");
2362 
2363 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2364 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2365 			printf("Port not stopped\n");
2366 			return;
2367 		}
2368 		printf("Port was not closed\n");
2369 		if (ports[port_id].flow_list)
2370 			port_flow_flush(port_id);
2371 	}
2372 
2373 	if (rte_dev_remove(rte_eth_devices[port_id].device) != 0) {
2374 		TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2375 		return;
2376 	}
2377 
2378 	remove_unused_fwd_ports();
2379 
2380 	printf("Port %u is detached. Now total ports is %d\n",
2381 			port_id, nb_ports);
2382 	printf("Done\n");
2383 	return;
2384 }
2385 
2386 void
2387 pmd_test_exit(void)
2388 {
2389 	struct rte_device *device;
2390 	portid_t pt_id;
2391 	int ret;
2392 
2393 	if (test_done == 0)
2394 		stop_packet_forwarding();
2395 
2396 	if (ports != NULL) {
2397 		no_link_check = 1;
2398 		RTE_ETH_FOREACH_DEV(pt_id) {
2399 			printf("\nShutting down port %d...\n", pt_id);
2400 			fflush(stdout);
2401 			stop_port(pt_id);
2402 			close_port(pt_id);
2403 
2404 			/*
2405 			 * This is a workaround to fix a virtio-user issue that
2406 			 * requires to call clean-up routine to remove existing
2407 			 * socket.
2408 			 * This workaround valid only for testpmd, needs a fix
2409 			 * valid for all applications.
2410 			 * TODO: Implement proper resource cleanup
2411 			 */
2412 			device = rte_eth_devices[pt_id].device;
2413 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2414 				detach_port(pt_id);
2415 		}
2416 	}
2417 
2418 	if (hot_plug) {
2419 		ret = rte_dev_event_monitor_stop();
2420 		if (ret) {
2421 			RTE_LOG(ERR, EAL,
2422 				"fail to stop device event monitor.");
2423 			return;
2424 		}
2425 
2426 		ret = rte_dev_event_callback_unregister(NULL,
2427 			eth_dev_event_callback, NULL);
2428 		if (ret < 0) {
2429 			RTE_LOG(ERR, EAL,
2430 				"fail to unregister device event callback.\n");
2431 			return;
2432 		}
2433 
2434 		ret = rte_dev_hotplug_handle_disable();
2435 		if (ret) {
2436 			RTE_LOG(ERR, EAL,
2437 				"fail to disable hotplug handling.\n");
2438 			return;
2439 		}
2440 	}
2441 
2442 	printf("\nBye...\n");
2443 }
2444 
2445 typedef void (*cmd_func_t)(void);
2446 struct pmd_test_command {
2447 	const char *cmd_name;
2448 	cmd_func_t cmd_func;
2449 };
2450 
2451 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2452 
2453 /* Check the link status of all ports in up to 9s, and print them finally */
2454 static void
2455 check_all_ports_link_status(uint32_t port_mask)
2456 {
2457 #define CHECK_INTERVAL 100 /* 100ms */
2458 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2459 	portid_t portid;
2460 	uint8_t count, all_ports_up, print_flag = 0;
2461 	struct rte_eth_link link;
2462 
2463 	printf("Checking link statuses...\n");
2464 	fflush(stdout);
2465 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2466 		all_ports_up = 1;
2467 		RTE_ETH_FOREACH_DEV(portid) {
2468 			if ((port_mask & (1 << portid)) == 0)
2469 				continue;
2470 			memset(&link, 0, sizeof(link));
2471 			rte_eth_link_get_nowait(portid, &link);
2472 			/* print link status if flag set */
2473 			if (print_flag == 1) {
2474 				if (link.link_status)
2475 					printf(
2476 					"Port%d Link Up. speed %u Mbps- %s\n",
2477 					portid, link.link_speed,
2478 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2479 					("full-duplex") : ("half-duplex\n"));
2480 				else
2481 					printf("Port %d Link Down\n", portid);
2482 				continue;
2483 			}
2484 			/* clear all_ports_up flag if any link down */
2485 			if (link.link_status == ETH_LINK_DOWN) {
2486 				all_ports_up = 0;
2487 				break;
2488 			}
2489 		}
2490 		/* after finally printing all link status, get out */
2491 		if (print_flag == 1)
2492 			break;
2493 
2494 		if (all_ports_up == 0) {
2495 			fflush(stdout);
2496 			rte_delay_ms(CHECK_INTERVAL);
2497 		}
2498 
2499 		/* set the print_flag if all ports up or timeout */
2500 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2501 			print_flag = 1;
2502 		}
2503 
2504 		if (lsc_interrupt)
2505 			break;
2506 	}
2507 }
2508 
2509 static void
2510 rmv_event_callback(void *arg)
2511 {
2512 	int need_to_start = 0;
2513 	int org_no_link_check = no_link_check;
2514 	portid_t port_id = (intptr_t)arg;
2515 
2516 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2517 
2518 	if (!test_done && port_is_forwarding(port_id)) {
2519 		need_to_start = 1;
2520 		stop_packet_forwarding();
2521 	}
2522 	no_link_check = 1;
2523 	stop_port(port_id);
2524 	no_link_check = org_no_link_check;
2525 	close_port(port_id);
2526 	detach_port(port_id);
2527 	if (need_to_start)
2528 		start_packet_forwarding(0);
2529 }
2530 
2531 /* This function is used by the interrupt thread */
2532 static int
2533 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2534 		  void *ret_param)
2535 {
2536 	static const char * const event_desc[] = {
2537 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2538 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2539 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2540 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2541 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2542 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
2543 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2544 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2545 		[RTE_ETH_EVENT_NEW] = "device probed",
2546 		[RTE_ETH_EVENT_DESTROY] = "device released",
2547 		[RTE_ETH_EVENT_MAX] = NULL,
2548 	};
2549 
2550 	RTE_SET_USED(param);
2551 	RTE_SET_USED(ret_param);
2552 
2553 	if (type >= RTE_ETH_EVENT_MAX) {
2554 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2555 			port_id, __func__, type);
2556 		fflush(stderr);
2557 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2558 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
2559 			event_desc[type]);
2560 		fflush(stdout);
2561 	}
2562 
2563 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2564 		return 0;
2565 
2566 	switch (type) {
2567 	case RTE_ETH_EVENT_INTR_RMV:
2568 		if (rte_eal_alarm_set(100000,
2569 				rmv_event_callback, (void *)(intptr_t)port_id))
2570 			fprintf(stderr, "Could not set up deferred device removal\n");
2571 		break;
2572 	default:
2573 		break;
2574 	}
2575 	return 0;
2576 }
2577 
2578 /* This function is used by the interrupt thread */
2579 static void
2580 eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2581 			     __rte_unused void *arg)
2582 {
2583 	uint16_t port_id;
2584 	int ret;
2585 
2586 	if (type >= RTE_DEV_EVENT_MAX) {
2587 		fprintf(stderr, "%s called upon invalid event %d\n",
2588 			__func__, type);
2589 		fflush(stderr);
2590 	}
2591 
2592 	switch (type) {
2593 	case RTE_DEV_EVENT_REMOVE:
2594 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2595 			device_name);
2596 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2597 		if (ret) {
2598 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2599 				device_name);
2600 			return;
2601 		}
2602 		rmv_event_callback((void *)(intptr_t)port_id);
2603 		break;
2604 	case RTE_DEV_EVENT_ADD:
2605 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2606 			device_name);
2607 		/* TODO: After finish kernel driver binding,
2608 		 * begin to attach port.
2609 		 */
2610 		break;
2611 	default:
2612 		break;
2613 	}
2614 }
2615 
2616 static int
2617 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2618 {
2619 	uint16_t i;
2620 	int diag;
2621 	uint8_t mapping_found = 0;
2622 
2623 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2624 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2625 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2626 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2627 					tx_queue_stats_mappings[i].queue_id,
2628 					tx_queue_stats_mappings[i].stats_counter_id);
2629 			if (diag != 0)
2630 				return diag;
2631 			mapping_found = 1;
2632 		}
2633 	}
2634 	if (mapping_found)
2635 		port->tx_queue_stats_mapping_enabled = 1;
2636 	return 0;
2637 }
2638 
2639 static int
2640 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2641 {
2642 	uint16_t i;
2643 	int diag;
2644 	uint8_t mapping_found = 0;
2645 
2646 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2647 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2648 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2649 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2650 					rx_queue_stats_mappings[i].queue_id,
2651 					rx_queue_stats_mappings[i].stats_counter_id);
2652 			if (diag != 0)
2653 				return diag;
2654 			mapping_found = 1;
2655 		}
2656 	}
2657 	if (mapping_found)
2658 		port->rx_queue_stats_mapping_enabled = 1;
2659 	return 0;
2660 }
2661 
2662 static void
2663 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2664 {
2665 	int diag = 0;
2666 
2667 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2668 	if (diag != 0) {
2669 		if (diag == -ENOTSUP) {
2670 			port->tx_queue_stats_mapping_enabled = 0;
2671 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2672 		}
2673 		else
2674 			rte_exit(EXIT_FAILURE,
2675 					"set_tx_queue_stats_mapping_registers "
2676 					"failed for port id=%d diag=%d\n",
2677 					pi, diag);
2678 	}
2679 
2680 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2681 	if (diag != 0) {
2682 		if (diag == -ENOTSUP) {
2683 			port->rx_queue_stats_mapping_enabled = 0;
2684 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2685 		}
2686 		else
2687 			rte_exit(EXIT_FAILURE,
2688 					"set_rx_queue_stats_mapping_registers "
2689 					"failed for port id=%d diag=%d\n",
2690 					pi, diag);
2691 	}
2692 }
2693 
2694 static void
2695 rxtx_port_config(struct rte_port *port)
2696 {
2697 	uint16_t qid;
2698 
2699 	for (qid = 0; qid < nb_rxq; qid++) {
2700 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2701 
2702 		/* Check if any Rx parameters have been passed */
2703 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2704 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2705 
2706 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2707 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2708 
2709 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2710 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2711 
2712 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2713 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2714 
2715 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2716 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2717 
2718 		port->nb_rx_desc[qid] = nb_rxd;
2719 	}
2720 
2721 	for (qid = 0; qid < nb_txq; qid++) {
2722 		port->tx_conf[qid] = port->dev_info.default_txconf;
2723 
2724 		/* Check if any Tx parameters have been passed */
2725 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2726 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2727 
2728 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2729 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2730 
2731 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2732 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2733 
2734 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2735 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2736 
2737 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2738 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2739 
2740 		port->nb_tx_desc[qid] = nb_txd;
2741 	}
2742 }
2743 
2744 void
2745 init_port_config(void)
2746 {
2747 	portid_t pid;
2748 	struct rte_port *port;
2749 
2750 	RTE_ETH_FOREACH_DEV(pid) {
2751 		port = &ports[pid];
2752 		port->dev_conf.fdir_conf = fdir_conf;
2753 		rte_eth_dev_info_get(pid, &port->dev_info);
2754 		if (nb_rxq > 1) {
2755 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2756 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2757 				rss_hf & port->dev_info.flow_type_rss_offloads;
2758 		} else {
2759 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2760 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2761 		}
2762 
2763 		if (port->dcb_flag == 0) {
2764 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2765 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2766 			else
2767 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2768 		}
2769 
2770 		rxtx_port_config(port);
2771 
2772 		rte_eth_macaddr_get(pid, &port->eth_addr);
2773 
2774 		map_port_queue_stats_mapping_registers(pid, port);
2775 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2776 		rte_pmd_ixgbe_bypass_init(pid);
2777 #endif
2778 
2779 		if (lsc_interrupt &&
2780 		    (rte_eth_devices[pid].data->dev_flags &
2781 		     RTE_ETH_DEV_INTR_LSC))
2782 			port->dev_conf.intr_conf.lsc = 1;
2783 		if (rmv_interrupt &&
2784 		    (rte_eth_devices[pid].data->dev_flags &
2785 		     RTE_ETH_DEV_INTR_RMV))
2786 			port->dev_conf.intr_conf.rmv = 1;
2787 	}
2788 }
2789 
2790 void set_port_slave_flag(portid_t slave_pid)
2791 {
2792 	struct rte_port *port;
2793 
2794 	port = &ports[slave_pid];
2795 	port->slave_flag = 1;
2796 }
2797 
2798 void clear_port_slave_flag(portid_t slave_pid)
2799 {
2800 	struct rte_port *port;
2801 
2802 	port = &ports[slave_pid];
2803 	port->slave_flag = 0;
2804 }
2805 
2806 uint8_t port_is_bonding_slave(portid_t slave_pid)
2807 {
2808 	struct rte_port *port;
2809 
2810 	port = &ports[slave_pid];
2811 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2812 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2813 		return 1;
2814 	return 0;
2815 }
2816 
2817 const uint16_t vlan_tags[] = {
2818 		0,  1,  2,  3,  4,  5,  6,  7,
2819 		8,  9, 10, 11,  12, 13, 14, 15,
2820 		16, 17, 18, 19, 20, 21, 22, 23,
2821 		24, 25, 26, 27, 28, 29, 30, 31
2822 };
2823 
2824 static  int
2825 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2826 		 enum dcb_mode_enable dcb_mode,
2827 		 enum rte_eth_nb_tcs num_tcs,
2828 		 uint8_t pfc_en)
2829 {
2830 	uint8_t i;
2831 	int32_t rc;
2832 	struct rte_eth_rss_conf rss_conf;
2833 
2834 	/*
2835 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2836 	 * given above, and the number of traffic classes available for use.
2837 	 */
2838 	if (dcb_mode == DCB_VT_ENABLED) {
2839 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2840 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2841 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2842 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2843 
2844 		/* VMDQ+DCB RX and TX configurations */
2845 		vmdq_rx_conf->enable_default_pool = 0;
2846 		vmdq_rx_conf->default_pool = 0;
2847 		vmdq_rx_conf->nb_queue_pools =
2848 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2849 		vmdq_tx_conf->nb_queue_pools =
2850 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2851 
2852 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2853 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2854 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2855 			vmdq_rx_conf->pool_map[i].pools =
2856 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2857 		}
2858 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2859 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2860 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2861 		}
2862 
2863 		/* set DCB mode of RX and TX of multiple queues */
2864 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2865 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2866 	} else {
2867 		struct rte_eth_dcb_rx_conf *rx_conf =
2868 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2869 		struct rte_eth_dcb_tx_conf *tx_conf =
2870 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2871 
2872 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2873 		if (rc != 0)
2874 			return rc;
2875 
2876 		rx_conf->nb_tcs = num_tcs;
2877 		tx_conf->nb_tcs = num_tcs;
2878 
2879 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2880 			rx_conf->dcb_tc[i] = i % num_tcs;
2881 			tx_conf->dcb_tc[i] = i % num_tcs;
2882 		}
2883 
2884 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2885 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
2886 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2887 	}
2888 
2889 	if (pfc_en)
2890 		eth_conf->dcb_capability_en =
2891 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2892 	else
2893 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2894 
2895 	return 0;
2896 }
2897 
2898 int
2899 init_port_dcb_config(portid_t pid,
2900 		     enum dcb_mode_enable dcb_mode,
2901 		     enum rte_eth_nb_tcs num_tcs,
2902 		     uint8_t pfc_en)
2903 {
2904 	struct rte_eth_conf port_conf;
2905 	struct rte_port *rte_port;
2906 	int retval;
2907 	uint16_t i;
2908 
2909 	rte_port = &ports[pid];
2910 
2911 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2912 	/* Enter DCB configuration status */
2913 	dcb_config = 1;
2914 
2915 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2916 	port_conf.txmode = rte_port->dev_conf.txmode;
2917 
2918 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2919 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2920 	if (retval < 0)
2921 		return retval;
2922 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2923 
2924 	/* re-configure the device . */
2925 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2926 
2927 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2928 
2929 	/* If dev_info.vmdq_pool_base is greater than 0,
2930 	 * the queue id of vmdq pools is started after pf queues.
2931 	 */
2932 	if (dcb_mode == DCB_VT_ENABLED &&
2933 	    rte_port->dev_info.vmdq_pool_base > 0) {
2934 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2935 			" for port %d.", pid);
2936 		return -1;
2937 	}
2938 
2939 	/* Assume the ports in testpmd have the same dcb capability
2940 	 * and has the same number of rxq and txq in dcb mode
2941 	 */
2942 	if (dcb_mode == DCB_VT_ENABLED) {
2943 		if (rte_port->dev_info.max_vfs > 0) {
2944 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2945 			nb_txq = rte_port->dev_info.nb_tx_queues;
2946 		} else {
2947 			nb_rxq = rte_port->dev_info.max_rx_queues;
2948 			nb_txq = rte_port->dev_info.max_tx_queues;
2949 		}
2950 	} else {
2951 		/*if vt is disabled, use all pf queues */
2952 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2953 			nb_rxq = rte_port->dev_info.max_rx_queues;
2954 			nb_txq = rte_port->dev_info.max_tx_queues;
2955 		} else {
2956 			nb_rxq = (queueid_t)num_tcs;
2957 			nb_txq = (queueid_t)num_tcs;
2958 
2959 		}
2960 	}
2961 	rx_free_thresh = 64;
2962 
2963 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2964 
2965 	rxtx_port_config(rte_port);
2966 	/* VLAN filter */
2967 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2968 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2969 		rx_vft_set(pid, vlan_tags[i], 1);
2970 
2971 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2972 	map_port_queue_stats_mapping_registers(pid, rte_port);
2973 
2974 	rte_port->dcb_flag = 1;
2975 
2976 	return 0;
2977 }
2978 
2979 static void
2980 init_port(void)
2981 {
2982 	/* Configuration of Ethernet ports. */
2983 	ports = rte_zmalloc("testpmd: ports",
2984 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2985 			    RTE_CACHE_LINE_SIZE);
2986 	if (ports == NULL) {
2987 		rte_exit(EXIT_FAILURE,
2988 				"rte_zmalloc(%d struct rte_port) failed\n",
2989 				RTE_MAX_ETHPORTS);
2990 	}
2991 
2992 	/* Initialize ports NUMA structures */
2993 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2994 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2995 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2996 }
2997 
2998 static void
2999 force_quit(void)
3000 {
3001 	pmd_test_exit();
3002 	prompt_exit();
3003 }
3004 
3005 static void
3006 print_stats(void)
3007 {
3008 	uint8_t i;
3009 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3010 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3011 
3012 	/* Clear screen and move to top left */
3013 	printf("%s%s", clr, top_left);
3014 
3015 	printf("\nPort statistics ====================================");
3016 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3017 		nic_stats_display(fwd_ports_ids[i]);
3018 }
3019 
3020 static void
3021 signal_handler(int signum)
3022 {
3023 	if (signum == SIGINT || signum == SIGTERM) {
3024 		printf("\nSignal %d received, preparing to exit...\n",
3025 				signum);
3026 #ifdef RTE_LIBRTE_PDUMP
3027 		/* uninitialize packet capture framework */
3028 		rte_pdump_uninit();
3029 #endif
3030 #ifdef RTE_LIBRTE_LATENCY_STATS
3031 		rte_latencystats_uninit();
3032 #endif
3033 		force_quit();
3034 		/* Set flag to indicate the force termination. */
3035 		f_quit = 1;
3036 		/* exit with the expected status */
3037 		signal(signum, SIG_DFL);
3038 		kill(getpid(), signum);
3039 	}
3040 }
3041 
3042 int
3043 main(int argc, char** argv)
3044 {
3045 	int diag;
3046 	portid_t port_id;
3047 	uint16_t count;
3048 	int ret;
3049 
3050 	signal(SIGINT, signal_handler);
3051 	signal(SIGTERM, signal_handler);
3052 
3053 	diag = rte_eal_init(argc, argv);
3054 	if (diag < 0)
3055 		rte_panic("Cannot init EAL\n");
3056 
3057 	testpmd_logtype = rte_log_register("testpmd");
3058 	if (testpmd_logtype < 0)
3059 		rte_panic("Cannot register log type");
3060 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3061 
3062 #ifdef RTE_LIBRTE_PDUMP
3063 	/* initialize packet capture framework */
3064 	rte_pdump_init(NULL);
3065 #endif
3066 
3067 	count = 0;
3068 	RTE_ETH_FOREACH_DEV(port_id) {
3069 		ports_ids[count] = port_id;
3070 		count++;
3071 	}
3072 	nb_ports = (portid_t) count;
3073 	if (nb_ports == 0)
3074 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3075 
3076 	/* allocate port structures, and init them */
3077 	init_port();
3078 
3079 	set_def_fwd_config();
3080 	if (nb_lcores == 0)
3081 		rte_panic("Empty set of forwarding logical cores - check the "
3082 			  "core mask supplied in the command parameters\n");
3083 
3084 	/* Bitrate/latency stats disabled by default */
3085 #ifdef RTE_LIBRTE_BITRATE
3086 	bitrate_enabled = 0;
3087 #endif
3088 #ifdef RTE_LIBRTE_LATENCY_STATS
3089 	latencystats_enabled = 0;
3090 #endif
3091 
3092 	/* on FreeBSD, mlockall() is disabled by default */
3093 #ifdef RTE_EXEC_ENV_BSDAPP
3094 	do_mlockall = 0;
3095 #else
3096 	do_mlockall = 1;
3097 #endif
3098 
3099 	argc -= diag;
3100 	argv += diag;
3101 	if (argc > 1)
3102 		launch_args_parse(argc, argv);
3103 
3104 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3105 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3106 			strerror(errno));
3107 	}
3108 
3109 	if (tx_first && interactive)
3110 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3111 				"interactive mode.\n");
3112 
3113 	if (tx_first && lsc_interrupt) {
3114 		printf("Warning: lsc_interrupt needs to be off when "
3115 				" using tx_first. Disabling.\n");
3116 		lsc_interrupt = 0;
3117 	}
3118 
3119 	if (!nb_rxq && !nb_txq)
3120 		printf("Warning: Either rx or tx queues should be non-zero\n");
3121 
3122 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3123 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3124 		       "but nb_txq=%d will prevent to fully test it.\n",
3125 		       nb_rxq, nb_txq);
3126 
3127 	init_config();
3128 
3129 	if (hot_plug) {
3130 		ret = rte_dev_hotplug_handle_enable();
3131 		if (ret) {
3132 			RTE_LOG(ERR, EAL,
3133 				"fail to enable hotplug handling.");
3134 			return -1;
3135 		}
3136 
3137 		ret = rte_dev_event_monitor_start();
3138 		if (ret) {
3139 			RTE_LOG(ERR, EAL,
3140 				"fail to start device event monitoring.");
3141 			return -1;
3142 		}
3143 
3144 		ret = rte_dev_event_callback_register(NULL,
3145 			eth_dev_event_callback, NULL);
3146 		if (ret) {
3147 			RTE_LOG(ERR, EAL,
3148 				"fail  to register device event callback\n");
3149 			return -1;
3150 		}
3151 	}
3152 
3153 	if (start_port(RTE_PORT_ALL) != 0)
3154 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3155 
3156 	/* set all ports to promiscuous mode by default */
3157 	RTE_ETH_FOREACH_DEV(port_id)
3158 		rte_eth_promiscuous_enable(port_id);
3159 
3160 	/* Init metrics library */
3161 	rte_metrics_init(rte_socket_id());
3162 
3163 #ifdef RTE_LIBRTE_LATENCY_STATS
3164 	if (latencystats_enabled != 0) {
3165 		int ret = rte_latencystats_init(1, NULL);
3166 		if (ret)
3167 			printf("Warning: latencystats init()"
3168 				" returned error %d\n",	ret);
3169 		printf("Latencystats running on lcore %d\n",
3170 			latencystats_lcore_id);
3171 	}
3172 #endif
3173 
3174 	/* Setup bitrate stats */
3175 #ifdef RTE_LIBRTE_BITRATE
3176 	if (bitrate_enabled != 0) {
3177 		bitrate_data = rte_stats_bitrate_create();
3178 		if (bitrate_data == NULL)
3179 			rte_exit(EXIT_FAILURE,
3180 				"Could not allocate bitrate data.\n");
3181 		rte_stats_bitrate_reg(bitrate_data);
3182 	}
3183 #endif
3184 
3185 #ifdef RTE_LIBRTE_CMDLINE
3186 	if (strlen(cmdline_filename) != 0)
3187 		cmdline_read_from_file(cmdline_filename);
3188 
3189 	if (interactive == 1) {
3190 		if (auto_start) {
3191 			printf("Start automatic packet forwarding\n");
3192 			start_packet_forwarding(0);
3193 		}
3194 		prompt();
3195 		pmd_test_exit();
3196 	} else
3197 #endif
3198 	{
3199 		char c;
3200 		int rc;
3201 
3202 		f_quit = 0;
3203 
3204 		printf("No commandline core given, start packet forwarding\n");
3205 		start_packet_forwarding(tx_first);
3206 		if (stats_period != 0) {
3207 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3208 			uint64_t timer_period;
3209 
3210 			/* Convert to number of cycles */
3211 			timer_period = stats_period * rte_get_timer_hz();
3212 
3213 			while (f_quit == 0) {
3214 				cur_time = rte_get_timer_cycles();
3215 				diff_time += cur_time - prev_time;
3216 
3217 				if (diff_time >= timer_period) {
3218 					print_stats();
3219 					/* Reset the timer */
3220 					diff_time = 0;
3221 				}
3222 				/* Sleep to avoid unnecessary checks */
3223 				prev_time = cur_time;
3224 				sleep(1);
3225 			}
3226 		}
3227 
3228 		printf("Press enter to exit\n");
3229 		rc = read(0, &c, 1);
3230 		pmd_test_exit();
3231 		if (rc < 0)
3232 			return 1;
3233 	}
3234 
3235 	return 0;
3236 }
3237