xref: /dpdk/app/test-pmd/testpmd.c (revision c18feafa193c0d816eae3a4861b1f9016cf236d7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
34 #include <rte_eal.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
42 #include <rte_mbuf.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_dev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
52 #endif
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
55 #endif
56 #include <rte_flow.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
60 #endif
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
63 #endif
64 
65 #include "testpmd.h"
66 
67 #ifndef MAP_HUGETLB
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
70 #else
71 #define HUGE_FLAG MAP_HUGETLB
72 #endif
73 
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
77 #else
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
79 #endif
80 
81 #define EXTMEM_HEAP_NAME "extmem"
82 
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
85 
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
89 uint8_t tx_first;
90 char cmdline_filename[PATH_MAX] = {0};
91 
92 /*
93  * NUMA support configuration.
94  * When set, the NUMA support attempts to dispatch the allocation of the
95  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96  * probed ports among the CPU sockets 0 and 1.
97  * Otherwise, all memory is allocated from CPU socket 0.
98  */
99 uint8_t numa_support = 1; /**< numa enabled by default */
100 
101 /*
102  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103  * not configured.
104  */
105 uint8_t socket_num = UMA_NO_CONFIG;
106 
107 /*
108  * Select mempool allocation type:
109  * - native: use regular DPDK memory
110  * - anon: use regular DPDK memory to create mempool, but populate using
111  *         anonymous memory (may not be IOVA-contiguous)
112  * - xmem: use externally allocated hugepage memory
113  */
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115 
116 /*
117  * Store specified sockets on which memory pool to be used by ports
118  * is allocated.
119  */
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
121 
122 /*
123  * Store specified sockets on which RX ring to be used by ports
124  * is allocated.
125  */
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
127 
128 /*
129  * Store specified sockets on which TX ring to be used by ports
130  * is allocated.
131  */
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
133 
134 /*
135  * Record the Ethernet address of peer target ports to which packets are
136  * forwarded.
137  * Must be instantiated with the ethernet addresses of peer traffic generator
138  * ports.
139  */
140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
142 
143 /*
144  * Probed Target Environment.
145  */
146 struct rte_port *ports;	       /**< For all probed ethernet ports. */
147 portid_t nb_ports;             /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150 
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
152 
153 /*
154  * Test Forwarding Configuration.
155  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157  */
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162 
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165 
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168 
169 /*
170  * Forwarding engines.
171  */
172 struct fwd_engine * fwd_engines[] = {
173 	&io_fwd_engine,
174 	&mac_fwd_engine,
175 	&mac_swap_engine,
176 	&flow_gen_engine,
177 	&rx_only_engine,
178 	&tx_only_engine,
179 	&csum_fwd_engine,
180 	&icmp_echo_engine,
181 	&noisy_vnf_engine,
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
183 	&softnic_fwd_engine,
184 #endif
185 #ifdef RTE_LIBRTE_IEEE1588
186 	&ieee1588_fwd_engine,
187 #endif
188 	NULL,
189 };
190 
191 struct fwd_config cur_fwd_config;
192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193 uint32_t retry_enabled;
194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
196 
197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
199                                       * specified on command-line. */
200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
201 
202 /*
203  * In container, it cannot terminate the process which running with 'stats-period'
204  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
205  */
206 uint8_t f_quit;
207 
208 /*
209  * Configuration of packet segments used by the "txonly" processing engine.
210  */
211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213 	TXONLY_DEF_PACKET_LEN,
214 };
215 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
216 
217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
218 /**< Split policy for packets to TX. */
219 
220 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
221 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
222 
223 /* current configuration is in DCB or not,0 means it is not in DCB mode */
224 uint8_t dcb_config = 0;
225 
226 /* Whether the dcb is in testing status */
227 uint8_t dcb_test = 0;
228 
229 /*
230  * Configurable number of RX/TX queues.
231  */
232 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
233 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
234 
235 /*
236  * Configurable number of RX/TX ring descriptors.
237  * Defaults are supplied by drivers via ethdev.
238  */
239 #define RTE_TEST_RX_DESC_DEFAULT 0
240 #define RTE_TEST_TX_DESC_DEFAULT 0
241 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
242 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
243 
244 #define RTE_PMD_PARAM_UNSET -1
245 /*
246  * Configurable values of RX and TX ring threshold registers.
247  */
248 
249 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
250 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
251 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
252 
253 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
254 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
255 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
256 
257 /*
258  * Configurable value of RX free threshold.
259  */
260 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
261 
262 /*
263  * Configurable value of RX drop enable.
264  */
265 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
266 
267 /*
268  * Configurable value of TX free threshold.
269  */
270 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
271 
272 /*
273  * Configurable value of TX RS bit threshold.
274  */
275 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
276 
277 /*
278  * Configurable value of buffered packets before sending.
279  */
280 uint16_t noisy_tx_sw_bufsz;
281 
282 /*
283  * Configurable value of packet buffer timeout.
284  */
285 uint16_t noisy_tx_sw_buf_flush_time;
286 
287 /*
288  * Configurable value for size of VNF internal memory area
289  * used for simulating noisy neighbour behaviour
290  */
291 uint64_t noisy_lkup_mem_sz;
292 
293 /*
294  * Configurable value of number of random writes done in
295  * VNF simulation memory area.
296  */
297 uint64_t noisy_lkup_num_writes;
298 
299 /*
300  * Configurable value of number of random reads done in
301  * VNF simulation memory area.
302  */
303 uint64_t noisy_lkup_num_reads;
304 
305 /*
306  * Configurable value of number of random reads/writes done in
307  * VNF simulation memory area.
308  */
309 uint64_t noisy_lkup_num_reads_writes;
310 
311 /*
312  * Receive Side Scaling (RSS) configuration.
313  */
314 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
315 
316 /*
317  * Port topology configuration
318  */
319 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
320 
321 /*
322  * Avoids to flush all the RX streams before starts forwarding.
323  */
324 uint8_t no_flush_rx = 0; /* flush by default */
325 
326 /*
327  * Flow API isolated mode.
328  */
329 uint8_t flow_isolate_all;
330 
331 /*
332  * Avoids to check link status when starting/stopping a port.
333  */
334 uint8_t no_link_check = 0; /* check by default */
335 
336 /*
337  * Enable link status change notification
338  */
339 uint8_t lsc_interrupt = 1; /* enabled by default */
340 
341 /*
342  * Enable device removal notification.
343  */
344 uint8_t rmv_interrupt = 1; /* enabled by default */
345 
346 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
347 
348 /*
349  * Display or mask ether events
350  * Default to all events except VF_MBOX
351  */
352 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
353 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
354 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
355 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
356 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
357 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
358 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
359 /*
360  * Decide if all memory are locked for performance.
361  */
362 int do_mlockall = 0;
363 
364 /*
365  * NIC bypass mode configuration options.
366  */
367 
368 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
369 /* The NIC bypass watchdog timeout. */
370 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
371 #endif
372 
373 
374 #ifdef RTE_LIBRTE_LATENCY_STATS
375 
376 /*
377  * Set when latency stats is enabled in the commandline
378  */
379 uint8_t latencystats_enabled;
380 
381 /*
382  * Lcore ID to serive latency statistics.
383  */
384 lcoreid_t latencystats_lcore_id = -1;
385 
386 #endif
387 
388 /*
389  * Ethernet device configuration.
390  */
391 struct rte_eth_rxmode rx_mode = {
392 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
393 };
394 
395 struct rte_eth_txmode tx_mode = {
396 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
397 };
398 
399 struct rte_fdir_conf fdir_conf = {
400 	.mode = RTE_FDIR_MODE_NONE,
401 	.pballoc = RTE_FDIR_PBALLOC_64K,
402 	.status = RTE_FDIR_REPORT_STATUS,
403 	.mask = {
404 		.vlan_tci_mask = 0xFFEF,
405 		.ipv4_mask     = {
406 			.src_ip = 0xFFFFFFFF,
407 			.dst_ip = 0xFFFFFFFF,
408 		},
409 		.ipv6_mask     = {
410 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
411 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
412 		},
413 		.src_port_mask = 0xFFFF,
414 		.dst_port_mask = 0xFFFF,
415 		.mac_addr_byte_mask = 0xFF,
416 		.tunnel_type_mask = 1,
417 		.tunnel_id_mask = 0xFFFFFFFF,
418 	},
419 	.drop_queue = 127,
420 };
421 
422 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
423 
424 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
425 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
426 
427 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
428 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
429 
430 uint16_t nb_tx_queue_stats_mappings = 0;
431 uint16_t nb_rx_queue_stats_mappings = 0;
432 
433 /*
434  * Display zero values by default for xstats
435  */
436 uint8_t xstats_hide_zero;
437 
438 unsigned int num_sockets = 0;
439 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
440 
441 #ifdef RTE_LIBRTE_BITRATE
442 /* Bitrate statistics */
443 struct rte_stats_bitrates *bitrate_data;
444 lcoreid_t bitrate_lcore_id;
445 uint8_t bitrate_enabled;
446 #endif
447 
448 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
449 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
450 
451 struct vxlan_encap_conf vxlan_encap_conf = {
452 	.select_ipv4 = 1,
453 	.select_vlan = 0,
454 	.vni = "\x00\x00\x00",
455 	.udp_src = 0,
456 	.udp_dst = RTE_BE16(4789),
457 	.ipv4_src = IPv4(127, 0, 0, 1),
458 	.ipv4_dst = IPv4(255, 255, 255, 255),
459 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
460 		"\x00\x00\x00\x00\x00\x00\x00\x01",
461 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
462 		"\x00\x00\x00\x00\x00\x00\x11\x11",
463 	.vlan_tci = 0,
464 	.eth_src = "\x00\x00\x00\x00\x00\x00",
465 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
466 };
467 
468 struct nvgre_encap_conf nvgre_encap_conf = {
469 	.select_ipv4 = 1,
470 	.select_vlan = 0,
471 	.tni = "\x00\x00\x00",
472 	.ipv4_src = IPv4(127, 0, 0, 1),
473 	.ipv4_dst = IPv4(255, 255, 255, 255),
474 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
475 		"\x00\x00\x00\x00\x00\x00\x00\x01",
476 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
477 		"\x00\x00\x00\x00\x00\x00\x11\x11",
478 	.vlan_tci = 0,
479 	.eth_src = "\x00\x00\x00\x00\x00\x00",
480 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
481 };
482 
483 /* Forward function declarations */
484 static void map_port_queue_stats_mapping_registers(portid_t pi,
485 						   struct rte_port *port);
486 static void check_all_ports_link_status(uint32_t port_mask);
487 static int eth_event_callback(portid_t port_id,
488 			      enum rte_eth_event_type type,
489 			      void *param, void *ret_param);
490 static void eth_dev_event_callback(const char *device_name,
491 				enum rte_dev_event_type type,
492 				void *param);
493 
494 /*
495  * Check if all the ports are started.
496  * If yes, return positive value. If not, return zero.
497  */
498 static int all_ports_started(void);
499 
500 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
501 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
502 
503 /*
504  * Helper function to check if socket is already discovered.
505  * If yes, return positive value. If not, return zero.
506  */
507 int
508 new_socket_id(unsigned int socket_id)
509 {
510 	unsigned int i;
511 
512 	for (i = 0; i < num_sockets; i++) {
513 		if (socket_ids[i] == socket_id)
514 			return 0;
515 	}
516 	return 1;
517 }
518 
519 /*
520  * Setup default configuration.
521  */
522 static void
523 set_default_fwd_lcores_config(void)
524 {
525 	unsigned int i;
526 	unsigned int nb_lc;
527 	unsigned int sock_num;
528 
529 	nb_lc = 0;
530 	for (i = 0; i < RTE_MAX_LCORE; i++) {
531 		if (!rte_lcore_is_enabled(i))
532 			continue;
533 		sock_num = rte_lcore_to_socket_id(i);
534 		if (new_socket_id(sock_num)) {
535 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
536 				rte_exit(EXIT_FAILURE,
537 					 "Total sockets greater than %u\n",
538 					 RTE_MAX_NUMA_NODES);
539 			}
540 			socket_ids[num_sockets++] = sock_num;
541 		}
542 		if (i == rte_get_master_lcore())
543 			continue;
544 		fwd_lcores_cpuids[nb_lc++] = i;
545 	}
546 	nb_lcores = (lcoreid_t) nb_lc;
547 	nb_cfg_lcores = nb_lcores;
548 	nb_fwd_lcores = 1;
549 }
550 
551 static void
552 set_def_peer_eth_addrs(void)
553 {
554 	portid_t i;
555 
556 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
557 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
558 		peer_eth_addrs[i].addr_bytes[5] = i;
559 	}
560 }
561 
562 static void
563 set_default_fwd_ports_config(void)
564 {
565 	portid_t pt_id;
566 	int i = 0;
567 
568 	RTE_ETH_FOREACH_DEV(pt_id) {
569 		fwd_ports_ids[i++] = pt_id;
570 
571 		/* Update sockets info according to the attached device */
572 		int socket_id = rte_eth_dev_socket_id(pt_id);
573 		if (socket_id >= 0 && new_socket_id(socket_id)) {
574 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
575 				rte_exit(EXIT_FAILURE,
576 					 "Total sockets greater than %u\n",
577 					 RTE_MAX_NUMA_NODES);
578 			}
579 			socket_ids[num_sockets++] = socket_id;
580 		}
581 	}
582 
583 	nb_cfg_ports = nb_ports;
584 	nb_fwd_ports = nb_ports;
585 }
586 
587 void
588 set_def_fwd_config(void)
589 {
590 	set_default_fwd_lcores_config();
591 	set_def_peer_eth_addrs();
592 	set_default_fwd_ports_config();
593 }
594 
595 /* extremely pessimistic estimation of memory required to create a mempool */
596 static int
597 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
598 {
599 	unsigned int n_pages, mbuf_per_pg, leftover;
600 	uint64_t total_mem, mbuf_mem, obj_sz;
601 
602 	/* there is no good way to predict how much space the mempool will
603 	 * occupy because it will allocate chunks on the fly, and some of those
604 	 * will come from default DPDK memory while some will come from our
605 	 * external memory, so just assume 128MB will be enough for everyone.
606 	 */
607 	uint64_t hdr_mem = 128 << 20;
608 
609 	/* account for possible non-contiguousness */
610 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
611 	if (obj_sz > pgsz) {
612 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
613 		return -1;
614 	}
615 
616 	mbuf_per_pg = pgsz / obj_sz;
617 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
618 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
619 
620 	mbuf_mem = n_pages * pgsz;
621 
622 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
623 
624 	if (total_mem > SIZE_MAX) {
625 		TESTPMD_LOG(ERR, "Memory size too big\n");
626 		return -1;
627 	}
628 	*out = (size_t)total_mem;
629 
630 	return 0;
631 }
632 
633 static inline uint32_t
634 bsf64(uint64_t v)
635 {
636 	return (uint32_t)__builtin_ctzll(v);
637 }
638 
639 static inline uint32_t
640 log2_u64(uint64_t v)
641 {
642 	if (v == 0)
643 		return 0;
644 	v = rte_align64pow2(v);
645 	return bsf64(v);
646 }
647 
648 static int
649 pagesz_flags(uint64_t page_sz)
650 {
651 	/* as per mmap() manpage, all page sizes are log2 of page size
652 	 * shifted by MAP_HUGE_SHIFT
653 	 */
654 	int log2 = log2_u64(page_sz);
655 
656 	return (log2 << HUGE_SHIFT);
657 }
658 
659 static void *
660 alloc_mem(size_t memsz, size_t pgsz, bool huge)
661 {
662 	void *addr;
663 	int flags;
664 
665 	/* allocate anonymous hugepages */
666 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
667 	if (huge)
668 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
669 
670 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
671 	if (addr == MAP_FAILED)
672 		return NULL;
673 
674 	return addr;
675 }
676 
677 struct extmem_param {
678 	void *addr;
679 	size_t len;
680 	size_t pgsz;
681 	rte_iova_t *iova_table;
682 	unsigned int iova_table_len;
683 };
684 
685 static int
686 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
687 		bool huge)
688 {
689 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
690 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
691 	unsigned int cur_page, n_pages, pgsz_idx;
692 	size_t mem_sz, cur_pgsz;
693 	rte_iova_t *iovas = NULL;
694 	void *addr;
695 	int ret;
696 
697 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
698 		/* skip anything that is too big */
699 		if (pgsizes[pgsz_idx] > SIZE_MAX)
700 			continue;
701 
702 		cur_pgsz = pgsizes[pgsz_idx];
703 
704 		/* if we were told not to allocate hugepages, override */
705 		if (!huge)
706 			cur_pgsz = sysconf(_SC_PAGESIZE);
707 
708 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
709 		if (ret < 0) {
710 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
711 			return -1;
712 		}
713 
714 		/* allocate our memory */
715 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
716 
717 		/* if we couldn't allocate memory with a specified page size,
718 		 * that doesn't mean we can't do it with other page sizes, so
719 		 * try another one.
720 		 */
721 		if (addr == NULL)
722 			continue;
723 
724 		/* store IOVA addresses for every page in this memory area */
725 		n_pages = mem_sz / cur_pgsz;
726 
727 		iovas = malloc(sizeof(*iovas) * n_pages);
728 
729 		if (iovas == NULL) {
730 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
731 			goto fail;
732 		}
733 		/* lock memory if it's not huge pages */
734 		if (!huge)
735 			mlock(addr, mem_sz);
736 
737 		/* populate IOVA addresses */
738 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
739 			rte_iova_t iova;
740 			size_t offset;
741 			void *cur;
742 
743 			offset = cur_pgsz * cur_page;
744 			cur = RTE_PTR_ADD(addr, offset);
745 
746 			/* touch the page before getting its IOVA */
747 			*(volatile char *)cur = 0;
748 
749 			iova = rte_mem_virt2iova(cur);
750 
751 			iovas[cur_page] = iova;
752 		}
753 
754 		break;
755 	}
756 	/* if we couldn't allocate anything */
757 	if (iovas == NULL)
758 		return -1;
759 
760 	param->addr = addr;
761 	param->len = mem_sz;
762 	param->pgsz = cur_pgsz;
763 	param->iova_table = iovas;
764 	param->iova_table_len = n_pages;
765 
766 	return 0;
767 fail:
768 	if (iovas)
769 		free(iovas);
770 	if (addr)
771 		munmap(addr, mem_sz);
772 
773 	return -1;
774 }
775 
776 static int
777 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
778 {
779 	struct extmem_param param;
780 	int socket_id, ret;
781 
782 	memset(&param, 0, sizeof(param));
783 
784 	/* check if our heap exists */
785 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
786 	if (socket_id < 0) {
787 		/* create our heap */
788 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
789 		if (ret < 0) {
790 			TESTPMD_LOG(ERR, "Cannot create heap\n");
791 			return -1;
792 		}
793 	}
794 
795 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
796 	if (ret < 0) {
797 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
798 		return -1;
799 	}
800 
801 	/* we now have a valid memory area, so add it to heap */
802 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
803 			param.addr, param.len, param.iova_table,
804 			param.iova_table_len, param.pgsz);
805 
806 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
807 
808 	/* not needed any more */
809 	free(param.iova_table);
810 
811 	if (ret < 0) {
812 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
813 		munmap(param.addr, param.len);
814 		return -1;
815 	}
816 
817 	/* success */
818 
819 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
820 			param.len >> 20);
821 
822 	return 0;
823 }
824 
825 /*
826  * Configuration initialisation done once at init time.
827  */
828 static void
829 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
830 		 unsigned int socket_id)
831 {
832 	char pool_name[RTE_MEMPOOL_NAMESIZE];
833 	struct rte_mempool *rte_mp = NULL;
834 	uint32_t mb_size;
835 
836 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
837 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
838 
839 	TESTPMD_LOG(INFO,
840 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
841 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
842 
843 	switch (mp_alloc_type) {
844 	case MP_ALLOC_NATIVE:
845 		{
846 			/* wrapper to rte_mempool_create() */
847 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
848 					rte_mbuf_best_mempool_ops());
849 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
850 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
851 			break;
852 		}
853 	case MP_ALLOC_ANON:
854 		{
855 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
856 				mb_size, (unsigned int) mb_mempool_cache,
857 				sizeof(struct rte_pktmbuf_pool_private),
858 				socket_id, 0);
859 			if (rte_mp == NULL)
860 				goto err;
861 
862 			if (rte_mempool_populate_anon(rte_mp) == 0) {
863 				rte_mempool_free(rte_mp);
864 				rte_mp = NULL;
865 				goto err;
866 			}
867 			rte_pktmbuf_pool_init(rte_mp, NULL);
868 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
869 			break;
870 		}
871 	case MP_ALLOC_XMEM:
872 	case MP_ALLOC_XMEM_HUGE:
873 		{
874 			int heap_socket;
875 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
876 
877 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
878 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
879 
880 			heap_socket =
881 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
882 			if (heap_socket < 0)
883 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
884 
885 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
886 					rte_mbuf_best_mempool_ops());
887 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
888 					mb_mempool_cache, 0, mbuf_seg_size,
889 					heap_socket);
890 			break;
891 		}
892 	default:
893 		{
894 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
895 		}
896 	}
897 
898 err:
899 	if (rte_mp == NULL) {
900 		rte_exit(EXIT_FAILURE,
901 			"Creation of mbuf pool for socket %u failed: %s\n",
902 			socket_id, rte_strerror(rte_errno));
903 	} else if (verbose_level > 0) {
904 		rte_mempool_dump(stdout, rte_mp);
905 	}
906 }
907 
908 /*
909  * Check given socket id is valid or not with NUMA mode,
910  * if valid, return 0, else return -1
911  */
912 static int
913 check_socket_id(const unsigned int socket_id)
914 {
915 	static int warning_once = 0;
916 
917 	if (new_socket_id(socket_id)) {
918 		if (!warning_once && numa_support)
919 			printf("Warning: NUMA should be configured manually by"
920 			       " using --port-numa-config and"
921 			       " --ring-numa-config parameters along with"
922 			       " --numa.\n");
923 		warning_once = 1;
924 		return -1;
925 	}
926 	return 0;
927 }
928 
929 /*
930  * Get the allowed maximum number of RX queues.
931  * *pid return the port id which has minimal value of
932  * max_rx_queues in all ports.
933  */
934 queueid_t
935 get_allowed_max_nb_rxq(portid_t *pid)
936 {
937 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
938 	portid_t pi;
939 	struct rte_eth_dev_info dev_info;
940 
941 	RTE_ETH_FOREACH_DEV(pi) {
942 		rte_eth_dev_info_get(pi, &dev_info);
943 		if (dev_info.max_rx_queues < allowed_max_rxq) {
944 			allowed_max_rxq = dev_info.max_rx_queues;
945 			*pid = pi;
946 		}
947 	}
948 	return allowed_max_rxq;
949 }
950 
951 /*
952  * Check input rxq is valid or not.
953  * If input rxq is not greater than any of maximum number
954  * of RX queues of all ports, it is valid.
955  * if valid, return 0, else return -1
956  */
957 int
958 check_nb_rxq(queueid_t rxq)
959 {
960 	queueid_t allowed_max_rxq;
961 	portid_t pid = 0;
962 
963 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
964 	if (rxq > allowed_max_rxq) {
965 		printf("Fail: input rxq (%u) can't be greater "
966 		       "than max_rx_queues (%u) of port %u\n",
967 		       rxq,
968 		       allowed_max_rxq,
969 		       pid);
970 		return -1;
971 	}
972 	return 0;
973 }
974 
975 /*
976  * Get the allowed maximum number of TX queues.
977  * *pid return the port id which has minimal value of
978  * max_tx_queues in all ports.
979  */
980 queueid_t
981 get_allowed_max_nb_txq(portid_t *pid)
982 {
983 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
984 	portid_t pi;
985 	struct rte_eth_dev_info dev_info;
986 
987 	RTE_ETH_FOREACH_DEV(pi) {
988 		rte_eth_dev_info_get(pi, &dev_info);
989 		if (dev_info.max_tx_queues < allowed_max_txq) {
990 			allowed_max_txq = dev_info.max_tx_queues;
991 			*pid = pi;
992 		}
993 	}
994 	return allowed_max_txq;
995 }
996 
997 /*
998  * Check input txq is valid or not.
999  * If input txq is not greater than any of maximum number
1000  * of TX queues of all ports, it is valid.
1001  * if valid, return 0, else return -1
1002  */
1003 int
1004 check_nb_txq(queueid_t txq)
1005 {
1006 	queueid_t allowed_max_txq;
1007 	portid_t pid = 0;
1008 
1009 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
1010 	if (txq > allowed_max_txq) {
1011 		printf("Fail: input txq (%u) can't be greater "
1012 		       "than max_tx_queues (%u) of port %u\n",
1013 		       txq,
1014 		       allowed_max_txq,
1015 		       pid);
1016 		return -1;
1017 	}
1018 	return 0;
1019 }
1020 
1021 static void
1022 init_config(void)
1023 {
1024 	portid_t pid;
1025 	struct rte_port *port;
1026 	struct rte_mempool *mbp;
1027 	unsigned int nb_mbuf_per_pool;
1028 	lcoreid_t  lc_id;
1029 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1030 	struct rte_gro_param gro_param;
1031 	uint32_t gso_types;
1032 	int k;
1033 
1034 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1035 
1036 	/* Configuration of logical cores. */
1037 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1038 				sizeof(struct fwd_lcore *) * nb_lcores,
1039 				RTE_CACHE_LINE_SIZE);
1040 	if (fwd_lcores == NULL) {
1041 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1042 							"failed\n", nb_lcores);
1043 	}
1044 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1045 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1046 					       sizeof(struct fwd_lcore),
1047 					       RTE_CACHE_LINE_SIZE);
1048 		if (fwd_lcores[lc_id] == NULL) {
1049 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1050 								"failed\n");
1051 		}
1052 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1053 	}
1054 
1055 	RTE_ETH_FOREACH_DEV(pid) {
1056 		port = &ports[pid];
1057 		/* Apply default TxRx configuration for all ports */
1058 		port->dev_conf.txmode = tx_mode;
1059 		port->dev_conf.rxmode = rx_mode;
1060 		rte_eth_dev_info_get(pid, &port->dev_info);
1061 
1062 		if (!(port->dev_info.tx_offload_capa &
1063 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1064 			port->dev_conf.txmode.offloads &=
1065 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1066 		if (!(port->dev_info.tx_offload_capa &
1067 			DEV_TX_OFFLOAD_MATCH_METADATA))
1068 			port->dev_conf.txmode.offloads &=
1069 				~DEV_TX_OFFLOAD_MATCH_METADATA;
1070 		if (numa_support) {
1071 			if (port_numa[pid] != NUMA_NO_CONFIG)
1072 				port_per_socket[port_numa[pid]]++;
1073 			else {
1074 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
1075 
1076 				/*
1077 				 * if socket_id is invalid,
1078 				 * set to the first available socket.
1079 				 */
1080 				if (check_socket_id(socket_id) < 0)
1081 					socket_id = socket_ids[0];
1082 				port_per_socket[socket_id]++;
1083 			}
1084 		}
1085 
1086 		/* Apply Rx offloads configuration */
1087 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1088 			port->rx_conf[k].offloads =
1089 				port->dev_conf.rxmode.offloads;
1090 		/* Apply Tx offloads configuration */
1091 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1092 			port->tx_conf[k].offloads =
1093 				port->dev_conf.txmode.offloads;
1094 
1095 		/* set flag to initialize port/queue */
1096 		port->need_reconfig = 1;
1097 		port->need_reconfig_queues = 1;
1098 		port->tx_metadata = 0;
1099 	}
1100 
1101 	/*
1102 	 * Create pools of mbuf.
1103 	 * If NUMA support is disabled, create a single pool of mbuf in
1104 	 * socket 0 memory by default.
1105 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1106 	 *
1107 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1108 	 * nb_txd can be configured at run time.
1109 	 */
1110 	if (param_total_num_mbufs)
1111 		nb_mbuf_per_pool = param_total_num_mbufs;
1112 	else {
1113 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1114 			(nb_lcores * mb_mempool_cache) +
1115 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1116 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1117 	}
1118 
1119 	if (numa_support) {
1120 		uint8_t i;
1121 
1122 		for (i = 0; i < num_sockets; i++)
1123 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1124 					 socket_ids[i]);
1125 	} else {
1126 		if (socket_num == UMA_NO_CONFIG)
1127 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
1128 		else
1129 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1130 						 socket_num);
1131 	}
1132 
1133 	init_port_config();
1134 
1135 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1136 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1137 	/*
1138 	 * Records which Mbuf pool to use by each logical core, if needed.
1139 	 */
1140 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1141 		mbp = mbuf_pool_find(
1142 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1143 
1144 		if (mbp == NULL)
1145 			mbp = mbuf_pool_find(0);
1146 		fwd_lcores[lc_id]->mbp = mbp;
1147 		/* initialize GSO context */
1148 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1149 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1150 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1151 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
1152 			ETHER_CRC_LEN;
1153 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
1154 	}
1155 
1156 	/* Configuration of packet forwarding streams. */
1157 	if (init_fwd_streams() < 0)
1158 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1159 
1160 	fwd_config_setup();
1161 
1162 	/* create a gro context for each lcore */
1163 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1164 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1165 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1166 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1167 		gro_param.socket_id = rte_lcore_to_socket_id(
1168 				fwd_lcores_cpuids[lc_id]);
1169 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1170 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1171 			rte_exit(EXIT_FAILURE,
1172 					"rte_gro_ctx_create() failed\n");
1173 		}
1174 	}
1175 
1176 #if defined RTE_LIBRTE_PMD_SOFTNIC
1177 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1178 		RTE_ETH_FOREACH_DEV(pid) {
1179 			port = &ports[pid];
1180 			const char *driver = port->dev_info.driver_name;
1181 
1182 			if (strcmp(driver, "net_softnic") == 0)
1183 				port->softport.fwd_lcore_arg = fwd_lcores;
1184 		}
1185 	}
1186 #endif
1187 
1188 }
1189 
1190 
1191 void
1192 reconfig(portid_t new_port_id, unsigned socket_id)
1193 {
1194 	struct rte_port *port;
1195 
1196 	/* Reconfiguration of Ethernet ports. */
1197 	port = &ports[new_port_id];
1198 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
1199 
1200 	/* set flag to initialize port/queue */
1201 	port->need_reconfig = 1;
1202 	port->need_reconfig_queues = 1;
1203 	port->socket_id = socket_id;
1204 
1205 	init_port_config();
1206 }
1207 
1208 
1209 int
1210 init_fwd_streams(void)
1211 {
1212 	portid_t pid;
1213 	struct rte_port *port;
1214 	streamid_t sm_id, nb_fwd_streams_new;
1215 	queueid_t q;
1216 
1217 	/* set socket id according to numa or not */
1218 	RTE_ETH_FOREACH_DEV(pid) {
1219 		port = &ports[pid];
1220 		if (nb_rxq > port->dev_info.max_rx_queues) {
1221 			printf("Fail: nb_rxq(%d) is greater than "
1222 				"max_rx_queues(%d)\n", nb_rxq,
1223 				port->dev_info.max_rx_queues);
1224 			return -1;
1225 		}
1226 		if (nb_txq > port->dev_info.max_tx_queues) {
1227 			printf("Fail: nb_txq(%d) is greater than "
1228 				"max_tx_queues(%d)\n", nb_txq,
1229 				port->dev_info.max_tx_queues);
1230 			return -1;
1231 		}
1232 		if (numa_support) {
1233 			if (port_numa[pid] != NUMA_NO_CONFIG)
1234 				port->socket_id = port_numa[pid];
1235 			else {
1236 				port->socket_id = rte_eth_dev_socket_id(pid);
1237 
1238 				/*
1239 				 * if socket_id is invalid,
1240 				 * set to the first available socket.
1241 				 */
1242 				if (check_socket_id(port->socket_id) < 0)
1243 					port->socket_id = socket_ids[0];
1244 			}
1245 		}
1246 		else {
1247 			if (socket_num == UMA_NO_CONFIG)
1248 				port->socket_id = 0;
1249 			else
1250 				port->socket_id = socket_num;
1251 		}
1252 	}
1253 
1254 	q = RTE_MAX(nb_rxq, nb_txq);
1255 	if (q == 0) {
1256 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1257 		return -1;
1258 	}
1259 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1260 	if (nb_fwd_streams_new == nb_fwd_streams)
1261 		return 0;
1262 	/* clear the old */
1263 	if (fwd_streams != NULL) {
1264 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1265 			if (fwd_streams[sm_id] == NULL)
1266 				continue;
1267 			rte_free(fwd_streams[sm_id]);
1268 			fwd_streams[sm_id] = NULL;
1269 		}
1270 		rte_free(fwd_streams);
1271 		fwd_streams = NULL;
1272 	}
1273 
1274 	/* init new */
1275 	nb_fwd_streams = nb_fwd_streams_new;
1276 	if (nb_fwd_streams) {
1277 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1278 			sizeof(struct fwd_stream *) * nb_fwd_streams,
1279 			RTE_CACHE_LINE_SIZE);
1280 		if (fwd_streams == NULL)
1281 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1282 				 " (struct fwd_stream *)) failed\n",
1283 				 nb_fwd_streams);
1284 
1285 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1286 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1287 				" struct fwd_stream", sizeof(struct fwd_stream),
1288 				RTE_CACHE_LINE_SIZE);
1289 			if (fwd_streams[sm_id] == NULL)
1290 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
1291 					 "(struct fwd_stream) failed\n");
1292 		}
1293 	}
1294 
1295 	return 0;
1296 }
1297 
1298 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1299 static void
1300 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1301 {
1302 	unsigned int total_burst;
1303 	unsigned int nb_burst;
1304 	unsigned int burst_stats[3];
1305 	uint16_t pktnb_stats[3];
1306 	uint16_t nb_pkt;
1307 	int burst_percent[3];
1308 
1309 	/*
1310 	 * First compute the total number of packet bursts and the
1311 	 * two highest numbers of bursts of the same number of packets.
1312 	 */
1313 	total_burst = 0;
1314 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1315 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1316 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1317 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1318 		if (nb_burst == 0)
1319 			continue;
1320 		total_burst += nb_burst;
1321 		if (nb_burst > burst_stats[0]) {
1322 			burst_stats[1] = burst_stats[0];
1323 			pktnb_stats[1] = pktnb_stats[0];
1324 			burst_stats[0] = nb_burst;
1325 			pktnb_stats[0] = nb_pkt;
1326 		} else if (nb_burst > burst_stats[1]) {
1327 			burst_stats[1] = nb_burst;
1328 			pktnb_stats[1] = nb_pkt;
1329 		}
1330 	}
1331 	if (total_burst == 0)
1332 		return;
1333 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1334 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1335 	       burst_percent[0], (int) pktnb_stats[0]);
1336 	if (burst_stats[0] == total_burst) {
1337 		printf("]\n");
1338 		return;
1339 	}
1340 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1341 		printf(" + %d%% of %d pkts]\n",
1342 		       100 - burst_percent[0], pktnb_stats[1]);
1343 		return;
1344 	}
1345 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1346 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1347 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1348 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1349 		return;
1350 	}
1351 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1352 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1353 }
1354 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1355 
1356 static void
1357 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1358 {
1359 	struct rte_port *port;
1360 	uint8_t i;
1361 
1362 	static const char *fwd_stats_border = "----------------------";
1363 
1364 	port = &ports[port_id];
1365 	printf("\n  %s Forward statistics for port %-2d %s\n",
1366 	       fwd_stats_border, port_id, fwd_stats_border);
1367 
1368 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1369 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1370 		       "%-"PRIu64"\n",
1371 		       stats->ipackets, stats->imissed,
1372 		       (uint64_t) (stats->ipackets + stats->imissed));
1373 
1374 		if (cur_fwd_eng == &csum_fwd_engine)
1375 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
1376 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1377 			       port->rx_bad_outer_l4_csum);
1378 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1379 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
1380 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1381 		}
1382 
1383 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1384 		       "%-"PRIu64"\n",
1385 		       stats->opackets, port->tx_dropped,
1386 		       (uint64_t) (stats->opackets + port->tx_dropped));
1387 	}
1388 	else {
1389 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1390 		       "%14"PRIu64"\n",
1391 		       stats->ipackets, stats->imissed,
1392 		       (uint64_t) (stats->ipackets + stats->imissed));
1393 
1394 		if (cur_fwd_eng == &csum_fwd_engine)
1395 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"    Bad-outer-l4csum: %-14"PRIu64"\n",
1396 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1397 			       port->rx_bad_outer_l4_csum);
1398 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1399 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1400 			printf("  RX-nombufs:             %14"PRIu64"\n",
1401 			       stats->rx_nombuf);
1402 		}
1403 
1404 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1405 		       "%14"PRIu64"\n",
1406 		       stats->opackets, port->tx_dropped,
1407 		       (uint64_t) (stats->opackets + port->tx_dropped));
1408 	}
1409 
1410 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1411 	if (port->rx_stream)
1412 		pkt_burst_stats_display("RX",
1413 			&port->rx_stream->rx_burst_stats);
1414 	if (port->tx_stream)
1415 		pkt_burst_stats_display("TX",
1416 			&port->tx_stream->tx_burst_stats);
1417 #endif
1418 
1419 	if (port->rx_queue_stats_mapping_enabled) {
1420 		printf("\n");
1421 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1422 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1423 			       "     RX-errors:%14"PRIu64
1424 			       "    RX-bytes:%14"PRIu64"\n",
1425 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1426 		}
1427 		printf("\n");
1428 	}
1429 	if (port->tx_queue_stats_mapping_enabled) {
1430 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1431 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1432 			       "                                 TX-bytes:%14"PRIu64"\n",
1433 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1434 		}
1435 	}
1436 
1437 	printf("  %s--------------------------------%s\n",
1438 	       fwd_stats_border, fwd_stats_border);
1439 }
1440 
1441 static void
1442 fwd_stream_stats_display(streamid_t stream_id)
1443 {
1444 	struct fwd_stream *fs;
1445 	static const char *fwd_top_stats_border = "-------";
1446 
1447 	fs = fwd_streams[stream_id];
1448 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1449 	    (fs->fwd_dropped == 0))
1450 		return;
1451 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1452 	       "TX Port=%2d/Queue=%2d %s\n",
1453 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1454 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1455 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1456 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1457 
1458 	/* if checksum mode */
1459 	if (cur_fwd_eng == &csum_fwd_engine) {
1460 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1461 			"%-14u Rx- bad outer L4 checksum: %-14u\n",
1462 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1463 			fs->rx_bad_outer_l4_csum);
1464 	}
1465 
1466 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1467 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1468 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1469 #endif
1470 }
1471 
1472 static void
1473 flush_fwd_rx_queues(void)
1474 {
1475 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1476 	portid_t  rxp;
1477 	portid_t port_id;
1478 	queueid_t rxq;
1479 	uint16_t  nb_rx;
1480 	uint16_t  i;
1481 	uint8_t   j;
1482 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1483 	uint64_t timer_period;
1484 
1485 	/* convert to number of cycles */
1486 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1487 
1488 	for (j = 0; j < 2; j++) {
1489 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1490 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1491 				port_id = fwd_ports_ids[rxp];
1492 				/**
1493 				* testpmd can stuck in the below do while loop
1494 				* if rte_eth_rx_burst() always returns nonzero
1495 				* packets. So timer is added to exit this loop
1496 				* after 1sec timer expiry.
1497 				*/
1498 				prev_tsc = rte_rdtsc();
1499 				do {
1500 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1501 						pkts_burst, MAX_PKT_BURST);
1502 					for (i = 0; i < nb_rx; i++)
1503 						rte_pktmbuf_free(pkts_burst[i]);
1504 
1505 					cur_tsc = rte_rdtsc();
1506 					diff_tsc = cur_tsc - prev_tsc;
1507 					timer_tsc += diff_tsc;
1508 				} while ((nb_rx > 0) &&
1509 					(timer_tsc < timer_period));
1510 				timer_tsc = 0;
1511 			}
1512 		}
1513 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1514 	}
1515 }
1516 
1517 static void
1518 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1519 {
1520 	struct fwd_stream **fsm;
1521 	streamid_t nb_fs;
1522 	streamid_t sm_id;
1523 #ifdef RTE_LIBRTE_BITRATE
1524 	uint64_t tics_per_1sec;
1525 	uint64_t tics_datum;
1526 	uint64_t tics_current;
1527 	uint16_t i, cnt_ports;
1528 
1529 	cnt_ports = nb_ports;
1530 	tics_datum = rte_rdtsc();
1531 	tics_per_1sec = rte_get_timer_hz();
1532 #endif
1533 	fsm = &fwd_streams[fc->stream_idx];
1534 	nb_fs = fc->stream_nb;
1535 	do {
1536 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1537 			(*pkt_fwd)(fsm[sm_id]);
1538 #ifdef RTE_LIBRTE_BITRATE
1539 		if (bitrate_enabled != 0 &&
1540 				bitrate_lcore_id == rte_lcore_id()) {
1541 			tics_current = rte_rdtsc();
1542 			if (tics_current - tics_datum >= tics_per_1sec) {
1543 				/* Periodic bitrate calculation */
1544 				for (i = 0; i < cnt_ports; i++)
1545 					rte_stats_bitrate_calc(bitrate_data,
1546 						ports_ids[i]);
1547 				tics_datum = tics_current;
1548 			}
1549 		}
1550 #endif
1551 #ifdef RTE_LIBRTE_LATENCY_STATS
1552 		if (latencystats_enabled != 0 &&
1553 				latencystats_lcore_id == rte_lcore_id())
1554 			rte_latencystats_update();
1555 #endif
1556 
1557 	} while (! fc->stopped);
1558 }
1559 
1560 static int
1561 start_pkt_forward_on_core(void *fwd_arg)
1562 {
1563 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1564 			     cur_fwd_config.fwd_eng->packet_fwd);
1565 	return 0;
1566 }
1567 
1568 /*
1569  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1570  * Used to start communication flows in network loopback test configurations.
1571  */
1572 static int
1573 run_one_txonly_burst_on_core(void *fwd_arg)
1574 {
1575 	struct fwd_lcore *fwd_lc;
1576 	struct fwd_lcore tmp_lcore;
1577 
1578 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1579 	tmp_lcore = *fwd_lc;
1580 	tmp_lcore.stopped = 1;
1581 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1582 	return 0;
1583 }
1584 
1585 /*
1586  * Launch packet forwarding:
1587  *     - Setup per-port forwarding context.
1588  *     - launch logical cores with their forwarding configuration.
1589  */
1590 static void
1591 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1592 {
1593 	port_fwd_begin_t port_fwd_begin;
1594 	unsigned int i;
1595 	unsigned int lc_id;
1596 	int diag;
1597 
1598 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1599 	if (port_fwd_begin != NULL) {
1600 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1601 			(*port_fwd_begin)(fwd_ports_ids[i]);
1602 	}
1603 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1604 		lc_id = fwd_lcores_cpuids[i];
1605 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1606 			fwd_lcores[i]->stopped = 0;
1607 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1608 						     fwd_lcores[i], lc_id);
1609 			if (diag != 0)
1610 				printf("launch lcore %u failed - diag=%d\n",
1611 				       lc_id, diag);
1612 		}
1613 	}
1614 }
1615 
1616 /*
1617  * Update the forward ports list.
1618  */
1619 void
1620 update_fwd_ports(portid_t new_pid)
1621 {
1622 	unsigned int i;
1623 	unsigned int new_nb_fwd_ports = 0;
1624 	int move = 0;
1625 
1626 	for (i = 0; i < nb_fwd_ports; ++i) {
1627 		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1628 			move = 1;
1629 		else if (move)
1630 			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1631 		else
1632 			new_nb_fwd_ports++;
1633 	}
1634 	if (new_pid < RTE_MAX_ETHPORTS)
1635 		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1636 
1637 	nb_fwd_ports = new_nb_fwd_ports;
1638 	nb_cfg_ports = new_nb_fwd_ports;
1639 }
1640 
1641 /*
1642  * Launch packet forwarding configuration.
1643  */
1644 void
1645 start_packet_forwarding(int with_tx_first)
1646 {
1647 	port_fwd_begin_t port_fwd_begin;
1648 	port_fwd_end_t  port_fwd_end;
1649 	struct rte_port *port;
1650 	unsigned int i;
1651 	portid_t   pt_id;
1652 	streamid_t sm_id;
1653 
1654 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1655 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1656 
1657 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1658 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1659 
1660 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1661 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1662 		(!nb_rxq || !nb_txq))
1663 		rte_exit(EXIT_FAILURE,
1664 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1665 			cur_fwd_eng->fwd_mode_name);
1666 
1667 	if (all_ports_started() == 0) {
1668 		printf("Not all ports were started\n");
1669 		return;
1670 	}
1671 	if (test_done == 0) {
1672 		printf("Packet forwarding already started\n");
1673 		return;
1674 	}
1675 
1676 
1677 	if(dcb_test) {
1678 		for (i = 0; i < nb_fwd_ports; i++) {
1679 			pt_id = fwd_ports_ids[i];
1680 			port = &ports[pt_id];
1681 			if (!port->dcb_flag) {
1682 				printf("In DCB mode, all forwarding ports must "
1683                                        "be configured in this mode.\n");
1684 				return;
1685 			}
1686 		}
1687 		if (nb_fwd_lcores == 1) {
1688 			printf("In DCB mode,the nb forwarding cores "
1689                                "should be larger than 1.\n");
1690 			return;
1691 		}
1692 	}
1693 	test_done = 0;
1694 
1695 	fwd_config_setup();
1696 
1697 	if(!no_flush_rx)
1698 		flush_fwd_rx_queues();
1699 
1700 	pkt_fwd_config_display(&cur_fwd_config);
1701 	rxtx_config_display();
1702 
1703 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1704 		pt_id = fwd_ports_ids[i];
1705 		port = &ports[pt_id];
1706 		rte_eth_stats_get(pt_id, &port->stats);
1707 		port->tx_dropped = 0;
1708 
1709 		map_port_queue_stats_mapping_registers(pt_id, port);
1710 	}
1711 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1712 		fwd_streams[sm_id]->rx_packets = 0;
1713 		fwd_streams[sm_id]->tx_packets = 0;
1714 		fwd_streams[sm_id]->fwd_dropped = 0;
1715 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1716 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1717 		fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
1718 
1719 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1720 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1721 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1722 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1723 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1724 #endif
1725 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1726 		fwd_streams[sm_id]->core_cycles = 0;
1727 #endif
1728 	}
1729 	if (with_tx_first) {
1730 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1731 		if (port_fwd_begin != NULL) {
1732 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1733 				(*port_fwd_begin)(fwd_ports_ids[i]);
1734 		}
1735 		while (with_tx_first--) {
1736 			launch_packet_forwarding(
1737 					run_one_txonly_burst_on_core);
1738 			rte_eal_mp_wait_lcore();
1739 		}
1740 		port_fwd_end = tx_only_engine.port_fwd_end;
1741 		if (port_fwd_end != NULL) {
1742 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1743 				(*port_fwd_end)(fwd_ports_ids[i]);
1744 		}
1745 	}
1746 	launch_packet_forwarding(start_pkt_forward_on_core);
1747 }
1748 
1749 void
1750 stop_packet_forwarding(void)
1751 {
1752 	struct rte_eth_stats stats;
1753 	struct rte_port *port;
1754 	port_fwd_end_t  port_fwd_end;
1755 	int i;
1756 	portid_t   pt_id;
1757 	streamid_t sm_id;
1758 	lcoreid_t  lc_id;
1759 	uint64_t total_recv;
1760 	uint64_t total_xmit;
1761 	uint64_t total_rx_dropped;
1762 	uint64_t total_tx_dropped;
1763 	uint64_t total_rx_nombuf;
1764 	uint64_t tx_dropped;
1765 	uint64_t rx_bad_ip_csum;
1766 	uint64_t rx_bad_l4_csum;
1767 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1768 	uint64_t fwd_cycles;
1769 #endif
1770 
1771 	static const char *acc_stats_border = "+++++++++++++++";
1772 
1773 	if (test_done) {
1774 		printf("Packet forwarding not started\n");
1775 		return;
1776 	}
1777 	printf("Telling cores to stop...");
1778 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1779 		fwd_lcores[lc_id]->stopped = 1;
1780 	printf("\nWaiting for lcores to finish...\n");
1781 	rte_eal_mp_wait_lcore();
1782 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1783 	if (port_fwd_end != NULL) {
1784 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1785 			pt_id = fwd_ports_ids[i];
1786 			(*port_fwd_end)(pt_id);
1787 		}
1788 	}
1789 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1790 	fwd_cycles = 0;
1791 #endif
1792 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1793 		if (cur_fwd_config.nb_fwd_streams >
1794 		    cur_fwd_config.nb_fwd_ports) {
1795 			fwd_stream_stats_display(sm_id);
1796 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1797 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1798 		} else {
1799 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1800 				fwd_streams[sm_id];
1801 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1802 				fwd_streams[sm_id];
1803 		}
1804 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1805 		tx_dropped = (uint64_t) (tx_dropped +
1806 					 fwd_streams[sm_id]->fwd_dropped);
1807 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1808 
1809 		rx_bad_ip_csum =
1810 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1811 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1812 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1813 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1814 							rx_bad_ip_csum;
1815 
1816 		rx_bad_l4_csum =
1817 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1818 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1819 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1820 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1821 							rx_bad_l4_csum;
1822 
1823 		ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
1824 				fwd_streams[sm_id]->rx_bad_outer_l4_csum;
1825 
1826 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1827 		fwd_cycles = (uint64_t) (fwd_cycles +
1828 					 fwd_streams[sm_id]->core_cycles);
1829 #endif
1830 	}
1831 	total_recv = 0;
1832 	total_xmit = 0;
1833 	total_rx_dropped = 0;
1834 	total_tx_dropped = 0;
1835 	total_rx_nombuf  = 0;
1836 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1837 		pt_id = fwd_ports_ids[i];
1838 
1839 		port = &ports[pt_id];
1840 		rte_eth_stats_get(pt_id, &stats);
1841 		stats.ipackets -= port->stats.ipackets;
1842 		port->stats.ipackets = 0;
1843 		stats.opackets -= port->stats.opackets;
1844 		port->stats.opackets = 0;
1845 		stats.ibytes   -= port->stats.ibytes;
1846 		port->stats.ibytes = 0;
1847 		stats.obytes   -= port->stats.obytes;
1848 		port->stats.obytes = 0;
1849 		stats.imissed  -= port->stats.imissed;
1850 		port->stats.imissed = 0;
1851 		stats.oerrors  -= port->stats.oerrors;
1852 		port->stats.oerrors = 0;
1853 		stats.rx_nombuf -= port->stats.rx_nombuf;
1854 		port->stats.rx_nombuf = 0;
1855 
1856 		total_recv += stats.ipackets;
1857 		total_xmit += stats.opackets;
1858 		total_rx_dropped += stats.imissed;
1859 		total_tx_dropped += port->tx_dropped;
1860 		total_rx_nombuf  += stats.rx_nombuf;
1861 
1862 		fwd_port_stats_display(pt_id, &stats);
1863 	}
1864 
1865 	printf("\n  %s Accumulated forward statistics for all ports"
1866 	       "%s\n",
1867 	       acc_stats_border, acc_stats_border);
1868 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1869 	       "%-"PRIu64"\n"
1870 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1871 	       "%-"PRIu64"\n",
1872 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1873 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1874 	if (total_rx_nombuf > 0)
1875 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1876 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1877 	       "%s\n",
1878 	       acc_stats_border, acc_stats_border);
1879 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1880 	if (total_recv > 0)
1881 		printf("\n  CPU cycles/packet=%u (total cycles="
1882 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1883 		       (unsigned int)(fwd_cycles / total_recv),
1884 		       fwd_cycles, total_recv);
1885 #endif
1886 	printf("\nDone.\n");
1887 	test_done = 1;
1888 }
1889 
1890 void
1891 dev_set_link_up(portid_t pid)
1892 {
1893 	if (rte_eth_dev_set_link_up(pid) < 0)
1894 		printf("\nSet link up fail.\n");
1895 }
1896 
1897 void
1898 dev_set_link_down(portid_t pid)
1899 {
1900 	if (rte_eth_dev_set_link_down(pid) < 0)
1901 		printf("\nSet link down fail.\n");
1902 }
1903 
1904 static int
1905 all_ports_started(void)
1906 {
1907 	portid_t pi;
1908 	struct rte_port *port;
1909 
1910 	RTE_ETH_FOREACH_DEV(pi) {
1911 		port = &ports[pi];
1912 		/* Check if there is a port which is not started */
1913 		if ((port->port_status != RTE_PORT_STARTED) &&
1914 			(port->slave_flag == 0))
1915 			return 0;
1916 	}
1917 
1918 	/* No port is not started */
1919 	return 1;
1920 }
1921 
1922 int
1923 port_is_stopped(portid_t port_id)
1924 {
1925 	struct rte_port *port = &ports[port_id];
1926 
1927 	if ((port->port_status != RTE_PORT_STOPPED) &&
1928 	    (port->slave_flag == 0))
1929 		return 0;
1930 	return 1;
1931 }
1932 
1933 int
1934 all_ports_stopped(void)
1935 {
1936 	portid_t pi;
1937 
1938 	RTE_ETH_FOREACH_DEV(pi) {
1939 		if (!port_is_stopped(pi))
1940 			return 0;
1941 	}
1942 
1943 	return 1;
1944 }
1945 
1946 int
1947 port_is_started(portid_t port_id)
1948 {
1949 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1950 		return 0;
1951 
1952 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1953 		return 0;
1954 
1955 	return 1;
1956 }
1957 
1958 int
1959 start_port(portid_t pid)
1960 {
1961 	int diag, need_check_link_status = -1;
1962 	portid_t pi;
1963 	queueid_t qi;
1964 	struct rte_port *port;
1965 	struct ether_addr mac_addr;
1966 	enum rte_eth_event_type event_type;
1967 
1968 	if (port_id_is_invalid(pid, ENABLED_WARN))
1969 		return 0;
1970 
1971 	if(dcb_config)
1972 		dcb_test = 1;
1973 	RTE_ETH_FOREACH_DEV(pi) {
1974 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1975 			continue;
1976 
1977 		need_check_link_status = 0;
1978 		port = &ports[pi];
1979 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1980 						 RTE_PORT_HANDLING) == 0) {
1981 			printf("Port %d is now not stopped\n", pi);
1982 			continue;
1983 		}
1984 
1985 		if (port->need_reconfig > 0) {
1986 			port->need_reconfig = 0;
1987 
1988 			if (flow_isolate_all) {
1989 				int ret = port_flow_isolate(pi, 1);
1990 				if (ret) {
1991 					printf("Failed to apply isolated"
1992 					       " mode on port %d\n", pi);
1993 					return -1;
1994 				}
1995 			}
1996 			configure_rxtx_dump_callbacks(0);
1997 			printf("Configuring Port %d (socket %u)\n", pi,
1998 					port->socket_id);
1999 			/* configure port */
2000 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
2001 						&(port->dev_conf));
2002 			if (diag != 0) {
2003 				if (rte_atomic16_cmpset(&(port->port_status),
2004 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2005 					printf("Port %d can not be set back "
2006 							"to stopped\n", pi);
2007 				printf("Fail to configure port %d\n", pi);
2008 				/* try to reconfigure port next time */
2009 				port->need_reconfig = 1;
2010 				return -1;
2011 			}
2012 		}
2013 		if (port->need_reconfig_queues > 0) {
2014 			port->need_reconfig_queues = 0;
2015 			/* setup tx queues */
2016 			for (qi = 0; qi < nb_txq; qi++) {
2017 				if ((numa_support) &&
2018 					(txring_numa[pi] != NUMA_NO_CONFIG))
2019 					diag = rte_eth_tx_queue_setup(pi, qi,
2020 						port->nb_tx_desc[qi],
2021 						txring_numa[pi],
2022 						&(port->tx_conf[qi]));
2023 				else
2024 					diag = rte_eth_tx_queue_setup(pi, qi,
2025 						port->nb_tx_desc[qi],
2026 						port->socket_id,
2027 						&(port->tx_conf[qi]));
2028 
2029 				if (diag == 0)
2030 					continue;
2031 
2032 				/* Fail to setup tx queue, return */
2033 				if (rte_atomic16_cmpset(&(port->port_status),
2034 							RTE_PORT_HANDLING,
2035 							RTE_PORT_STOPPED) == 0)
2036 					printf("Port %d can not be set back "
2037 							"to stopped\n", pi);
2038 				printf("Fail to configure port %d tx queues\n",
2039 				       pi);
2040 				/* try to reconfigure queues next time */
2041 				port->need_reconfig_queues = 1;
2042 				return -1;
2043 			}
2044 			for (qi = 0; qi < nb_rxq; qi++) {
2045 				/* setup rx queues */
2046 				if ((numa_support) &&
2047 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2048 					struct rte_mempool * mp =
2049 						mbuf_pool_find(rxring_numa[pi]);
2050 					if (mp == NULL) {
2051 						printf("Failed to setup RX queue:"
2052 							"No mempool allocation"
2053 							" on the socket %d\n",
2054 							rxring_numa[pi]);
2055 						return -1;
2056 					}
2057 
2058 					diag = rte_eth_rx_queue_setup(pi, qi,
2059 					     port->nb_rx_desc[qi],
2060 					     rxring_numa[pi],
2061 					     &(port->rx_conf[qi]),
2062 					     mp);
2063 				} else {
2064 					struct rte_mempool *mp =
2065 						mbuf_pool_find(port->socket_id);
2066 					if (mp == NULL) {
2067 						printf("Failed to setup RX queue:"
2068 							"No mempool allocation"
2069 							" on the socket %d\n",
2070 							port->socket_id);
2071 						return -1;
2072 					}
2073 					diag = rte_eth_rx_queue_setup(pi, qi,
2074 					     port->nb_rx_desc[qi],
2075 					     port->socket_id,
2076 					     &(port->rx_conf[qi]),
2077 					     mp);
2078 				}
2079 				if (diag == 0)
2080 					continue;
2081 
2082 				/* Fail to setup rx queue, return */
2083 				if (rte_atomic16_cmpset(&(port->port_status),
2084 							RTE_PORT_HANDLING,
2085 							RTE_PORT_STOPPED) == 0)
2086 					printf("Port %d can not be set back "
2087 							"to stopped\n", pi);
2088 				printf("Fail to configure port %d rx queues\n",
2089 				       pi);
2090 				/* try to reconfigure queues next time */
2091 				port->need_reconfig_queues = 1;
2092 				return -1;
2093 			}
2094 		}
2095 		configure_rxtx_dump_callbacks(verbose_level);
2096 		/* start port */
2097 		if (rte_eth_dev_start(pi) < 0) {
2098 			printf("Fail to start port %d\n", pi);
2099 
2100 			/* Fail to setup rx queue, return */
2101 			if (rte_atomic16_cmpset(&(port->port_status),
2102 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2103 				printf("Port %d can not be set back to "
2104 							"stopped\n", pi);
2105 			continue;
2106 		}
2107 
2108 		if (rte_atomic16_cmpset(&(port->port_status),
2109 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2110 			printf("Port %d can not be set into started\n", pi);
2111 
2112 		rte_eth_macaddr_get(pi, &mac_addr);
2113 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2114 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2115 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2116 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2117 
2118 		/* at least one port started, need checking link status */
2119 		need_check_link_status = 1;
2120 	}
2121 
2122 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
2123 	     event_type < RTE_ETH_EVENT_MAX;
2124 	     event_type++) {
2125 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
2126 						event_type,
2127 						eth_event_callback,
2128 						NULL);
2129 		if (diag) {
2130 			printf("Failed to setup even callback for event %d\n",
2131 				event_type);
2132 			return -1;
2133 		}
2134 	}
2135 
2136 	if (need_check_link_status == 1 && !no_link_check)
2137 		check_all_ports_link_status(RTE_PORT_ALL);
2138 	else if (need_check_link_status == 0)
2139 		printf("Please stop the ports first\n");
2140 
2141 	printf("Done\n");
2142 	return 0;
2143 }
2144 
2145 void
2146 stop_port(portid_t pid)
2147 {
2148 	portid_t pi;
2149 	struct rte_port *port;
2150 	int need_check_link_status = 0;
2151 
2152 	if (dcb_test) {
2153 		dcb_test = 0;
2154 		dcb_config = 0;
2155 	}
2156 
2157 	if (port_id_is_invalid(pid, ENABLED_WARN))
2158 		return;
2159 
2160 	printf("Stopping ports...\n");
2161 
2162 	RTE_ETH_FOREACH_DEV(pi) {
2163 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2164 			continue;
2165 
2166 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2167 			printf("Please remove port %d from forwarding configuration.\n", pi);
2168 			continue;
2169 		}
2170 
2171 		if (port_is_bonding_slave(pi)) {
2172 			printf("Please remove port %d from bonded device.\n", pi);
2173 			continue;
2174 		}
2175 
2176 		port = &ports[pi];
2177 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2178 						RTE_PORT_HANDLING) == 0)
2179 			continue;
2180 
2181 		rte_eth_dev_stop(pi);
2182 
2183 		if (rte_atomic16_cmpset(&(port->port_status),
2184 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2185 			printf("Port %d can not be set into stopped\n", pi);
2186 		need_check_link_status = 1;
2187 	}
2188 	if (need_check_link_status && !no_link_check)
2189 		check_all_ports_link_status(RTE_PORT_ALL);
2190 
2191 	printf("Done\n");
2192 }
2193 
2194 static void
2195 remove_unused_fwd_ports(void)
2196 {
2197 	int i;
2198 	int last_port_idx = nb_ports - 1;
2199 
2200 	for (i = 0; i <= last_port_idx; i++) { /* iterate in ports_ids */
2201 		if (rte_eth_devices[ports_ids[i]].state != RTE_ETH_DEV_UNUSED)
2202 			continue;
2203 		/* skip unused ports at the end */
2204 		while (i <= last_port_idx &&
2205 				rte_eth_devices[ports_ids[last_port_idx]].state
2206 				== RTE_ETH_DEV_UNUSED)
2207 			last_port_idx--;
2208 		if (last_port_idx < i)
2209 			break;
2210 		/* overwrite unused port with last valid port */
2211 		ports_ids[i] = ports_ids[last_port_idx];
2212 		/* decrease ports count */
2213 		last_port_idx--;
2214 	}
2215 	nb_ports = rte_eth_dev_count_avail();
2216 	update_fwd_ports(RTE_MAX_ETHPORTS);
2217 }
2218 
2219 void
2220 close_port(portid_t pid)
2221 {
2222 	portid_t pi;
2223 	struct rte_port *port;
2224 
2225 	if (port_id_is_invalid(pid, ENABLED_WARN))
2226 		return;
2227 
2228 	printf("Closing ports...\n");
2229 
2230 	RTE_ETH_FOREACH_DEV(pi) {
2231 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2232 			continue;
2233 
2234 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2235 			printf("Please remove port %d from forwarding configuration.\n", pi);
2236 			continue;
2237 		}
2238 
2239 		if (port_is_bonding_slave(pi)) {
2240 			printf("Please remove port %d from bonded device.\n", pi);
2241 			continue;
2242 		}
2243 
2244 		port = &ports[pi];
2245 		if (rte_atomic16_cmpset(&(port->port_status),
2246 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2247 			printf("Port %d is already closed\n", pi);
2248 			continue;
2249 		}
2250 
2251 		if (rte_atomic16_cmpset(&(port->port_status),
2252 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2253 			printf("Port %d is now not stopped\n", pi);
2254 			continue;
2255 		}
2256 
2257 		if (port->flow_list)
2258 			port_flow_flush(pi);
2259 		rte_eth_dev_close(pi);
2260 
2261 		remove_unused_fwd_ports();
2262 
2263 		if (rte_atomic16_cmpset(&(port->port_status),
2264 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2265 			printf("Port %d cannot be set to closed\n", pi);
2266 	}
2267 
2268 	printf("Done\n");
2269 }
2270 
2271 void
2272 reset_port(portid_t pid)
2273 {
2274 	int diag;
2275 	portid_t pi;
2276 	struct rte_port *port;
2277 
2278 	if (port_id_is_invalid(pid, ENABLED_WARN))
2279 		return;
2280 
2281 	printf("Resetting ports...\n");
2282 
2283 	RTE_ETH_FOREACH_DEV(pi) {
2284 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2285 			continue;
2286 
2287 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2288 			printf("Please remove port %d from forwarding "
2289 			       "configuration.\n", pi);
2290 			continue;
2291 		}
2292 
2293 		if (port_is_bonding_slave(pi)) {
2294 			printf("Please remove port %d from bonded device.\n",
2295 			       pi);
2296 			continue;
2297 		}
2298 
2299 		diag = rte_eth_dev_reset(pi);
2300 		if (diag == 0) {
2301 			port = &ports[pi];
2302 			port->need_reconfig = 1;
2303 			port->need_reconfig_queues = 1;
2304 		} else {
2305 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
2306 		}
2307 	}
2308 
2309 	printf("Done\n");
2310 }
2311 
2312 void
2313 attach_port(char *identifier)
2314 {
2315 	portid_t pi = 0;
2316 	unsigned int socket_id;
2317 
2318 	printf("Attaching a new port...\n");
2319 
2320 	if (identifier == NULL) {
2321 		printf("Invalid parameters are specified\n");
2322 		return;
2323 	}
2324 
2325 	if (rte_eth_dev_attach(identifier, &pi))
2326 		return;
2327 
2328 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2329 	/* if socket_id is invalid, set to the first available socket. */
2330 	if (check_socket_id(socket_id) < 0)
2331 		socket_id = socket_ids[0];
2332 	reconfig(pi, socket_id);
2333 	rte_eth_promiscuous_enable(pi);
2334 
2335 	ports_ids[nb_ports] = pi;
2336 	nb_ports = rte_eth_dev_count_avail();
2337 
2338 	ports[pi].port_status = RTE_PORT_STOPPED;
2339 
2340 	update_fwd_ports(pi);
2341 
2342 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2343 	printf("Done\n");
2344 }
2345 
2346 void
2347 detach_port(portid_t port_id)
2348 {
2349 	char name[RTE_ETH_NAME_MAX_LEN];
2350 
2351 	printf("Detaching a port...\n");
2352 
2353 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2354 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2355 			printf("Port not stopped\n");
2356 			return;
2357 		}
2358 		printf("Port was not closed\n");
2359 		if (ports[port_id].flow_list)
2360 			port_flow_flush(port_id);
2361 	}
2362 
2363 	if (rte_eth_dev_detach(port_id, name)) {
2364 		TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2365 		return;
2366 	}
2367 
2368 	remove_unused_fwd_ports();
2369 
2370 	printf("Port %u is detached. Now total ports is %d\n",
2371 			port_id, nb_ports);
2372 	printf("Done\n");
2373 	return;
2374 }
2375 
2376 void
2377 pmd_test_exit(void)
2378 {
2379 	struct rte_device *device;
2380 	portid_t pt_id;
2381 	int ret;
2382 
2383 	if (test_done == 0)
2384 		stop_packet_forwarding();
2385 
2386 	if (ports != NULL) {
2387 		no_link_check = 1;
2388 		RTE_ETH_FOREACH_DEV(pt_id) {
2389 			printf("\nShutting down port %d...\n", pt_id);
2390 			fflush(stdout);
2391 			stop_port(pt_id);
2392 			close_port(pt_id);
2393 
2394 			/*
2395 			 * This is a workaround to fix a virtio-user issue that
2396 			 * requires to call clean-up routine to remove existing
2397 			 * socket.
2398 			 * This workaround valid only for testpmd, needs a fix
2399 			 * valid for all applications.
2400 			 * TODO: Implement proper resource cleanup
2401 			 */
2402 			device = rte_eth_devices[pt_id].device;
2403 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2404 				detach_port(pt_id);
2405 		}
2406 	}
2407 
2408 	if (hot_plug) {
2409 		ret = rte_dev_event_monitor_stop();
2410 		if (ret) {
2411 			RTE_LOG(ERR, EAL,
2412 				"fail to stop device event monitor.");
2413 			return;
2414 		}
2415 
2416 		ret = rte_dev_event_callback_unregister(NULL,
2417 			eth_dev_event_callback, NULL);
2418 		if (ret < 0) {
2419 			RTE_LOG(ERR, EAL,
2420 				"fail to unregister device event callback.\n");
2421 			return;
2422 		}
2423 
2424 		ret = rte_dev_hotplug_handle_disable();
2425 		if (ret) {
2426 			RTE_LOG(ERR, EAL,
2427 				"fail to disable hotplug handling.\n");
2428 			return;
2429 		}
2430 	}
2431 
2432 	printf("\nBye...\n");
2433 }
2434 
2435 typedef void (*cmd_func_t)(void);
2436 struct pmd_test_command {
2437 	const char *cmd_name;
2438 	cmd_func_t cmd_func;
2439 };
2440 
2441 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2442 
2443 /* Check the link status of all ports in up to 9s, and print them finally */
2444 static void
2445 check_all_ports_link_status(uint32_t port_mask)
2446 {
2447 #define CHECK_INTERVAL 100 /* 100ms */
2448 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2449 	portid_t portid;
2450 	uint8_t count, all_ports_up, print_flag = 0;
2451 	struct rte_eth_link link;
2452 
2453 	printf("Checking link statuses...\n");
2454 	fflush(stdout);
2455 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2456 		all_ports_up = 1;
2457 		RTE_ETH_FOREACH_DEV(portid) {
2458 			if ((port_mask & (1 << portid)) == 0)
2459 				continue;
2460 			memset(&link, 0, sizeof(link));
2461 			rte_eth_link_get_nowait(portid, &link);
2462 			/* print link status if flag set */
2463 			if (print_flag == 1) {
2464 				if (link.link_status)
2465 					printf(
2466 					"Port%d Link Up. speed %u Mbps- %s\n",
2467 					portid, link.link_speed,
2468 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2469 					("full-duplex") : ("half-duplex\n"));
2470 				else
2471 					printf("Port %d Link Down\n", portid);
2472 				continue;
2473 			}
2474 			/* clear all_ports_up flag if any link down */
2475 			if (link.link_status == ETH_LINK_DOWN) {
2476 				all_ports_up = 0;
2477 				break;
2478 			}
2479 		}
2480 		/* after finally printing all link status, get out */
2481 		if (print_flag == 1)
2482 			break;
2483 
2484 		if (all_ports_up == 0) {
2485 			fflush(stdout);
2486 			rte_delay_ms(CHECK_INTERVAL);
2487 		}
2488 
2489 		/* set the print_flag if all ports up or timeout */
2490 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2491 			print_flag = 1;
2492 		}
2493 
2494 		if (lsc_interrupt)
2495 			break;
2496 	}
2497 }
2498 
2499 static void
2500 rmv_event_callback(void *arg)
2501 {
2502 	int need_to_start = 0;
2503 	int org_no_link_check = no_link_check;
2504 	portid_t port_id = (intptr_t)arg;
2505 
2506 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2507 
2508 	if (!test_done && port_is_forwarding(port_id)) {
2509 		need_to_start = 1;
2510 		stop_packet_forwarding();
2511 	}
2512 	no_link_check = 1;
2513 	stop_port(port_id);
2514 	no_link_check = org_no_link_check;
2515 	close_port(port_id);
2516 	detach_port(port_id);
2517 	if (need_to_start)
2518 		start_packet_forwarding(0);
2519 }
2520 
2521 /* This function is used by the interrupt thread */
2522 static int
2523 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2524 		  void *ret_param)
2525 {
2526 	static const char * const event_desc[] = {
2527 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2528 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2529 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2530 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2531 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2532 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
2533 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2534 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2535 		[RTE_ETH_EVENT_NEW] = "device probed",
2536 		[RTE_ETH_EVENT_DESTROY] = "device released",
2537 		[RTE_ETH_EVENT_MAX] = NULL,
2538 	};
2539 
2540 	RTE_SET_USED(param);
2541 	RTE_SET_USED(ret_param);
2542 
2543 	if (type >= RTE_ETH_EVENT_MAX) {
2544 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2545 			port_id, __func__, type);
2546 		fflush(stderr);
2547 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2548 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
2549 			event_desc[type]);
2550 		fflush(stdout);
2551 	}
2552 
2553 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2554 		return 0;
2555 
2556 	switch (type) {
2557 	case RTE_ETH_EVENT_INTR_RMV:
2558 		if (rte_eal_alarm_set(100000,
2559 				rmv_event_callback, (void *)(intptr_t)port_id))
2560 			fprintf(stderr, "Could not set up deferred device removal\n");
2561 		break;
2562 	default:
2563 		break;
2564 	}
2565 	return 0;
2566 }
2567 
2568 /* This function is used by the interrupt thread */
2569 static void
2570 eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2571 			     __rte_unused void *arg)
2572 {
2573 	uint16_t port_id;
2574 	int ret;
2575 
2576 	if (type >= RTE_DEV_EVENT_MAX) {
2577 		fprintf(stderr, "%s called upon invalid event %d\n",
2578 			__func__, type);
2579 		fflush(stderr);
2580 	}
2581 
2582 	switch (type) {
2583 	case RTE_DEV_EVENT_REMOVE:
2584 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2585 			device_name);
2586 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2587 		if (ret) {
2588 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2589 				device_name);
2590 			return;
2591 		}
2592 		rmv_event_callback((void *)(intptr_t)port_id);
2593 		break;
2594 	case RTE_DEV_EVENT_ADD:
2595 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2596 			device_name);
2597 		/* TODO: After finish kernel driver binding,
2598 		 * begin to attach port.
2599 		 */
2600 		break;
2601 	default:
2602 		break;
2603 	}
2604 }
2605 
2606 static int
2607 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2608 {
2609 	uint16_t i;
2610 	int diag;
2611 	uint8_t mapping_found = 0;
2612 
2613 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2614 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2615 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2616 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2617 					tx_queue_stats_mappings[i].queue_id,
2618 					tx_queue_stats_mappings[i].stats_counter_id);
2619 			if (diag != 0)
2620 				return diag;
2621 			mapping_found = 1;
2622 		}
2623 	}
2624 	if (mapping_found)
2625 		port->tx_queue_stats_mapping_enabled = 1;
2626 	return 0;
2627 }
2628 
2629 static int
2630 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2631 {
2632 	uint16_t i;
2633 	int diag;
2634 	uint8_t mapping_found = 0;
2635 
2636 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2637 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2638 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2639 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2640 					rx_queue_stats_mappings[i].queue_id,
2641 					rx_queue_stats_mappings[i].stats_counter_id);
2642 			if (diag != 0)
2643 				return diag;
2644 			mapping_found = 1;
2645 		}
2646 	}
2647 	if (mapping_found)
2648 		port->rx_queue_stats_mapping_enabled = 1;
2649 	return 0;
2650 }
2651 
2652 static void
2653 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2654 {
2655 	int diag = 0;
2656 
2657 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2658 	if (diag != 0) {
2659 		if (diag == -ENOTSUP) {
2660 			port->tx_queue_stats_mapping_enabled = 0;
2661 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2662 		}
2663 		else
2664 			rte_exit(EXIT_FAILURE,
2665 					"set_tx_queue_stats_mapping_registers "
2666 					"failed for port id=%d diag=%d\n",
2667 					pi, diag);
2668 	}
2669 
2670 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2671 	if (diag != 0) {
2672 		if (diag == -ENOTSUP) {
2673 			port->rx_queue_stats_mapping_enabled = 0;
2674 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2675 		}
2676 		else
2677 			rte_exit(EXIT_FAILURE,
2678 					"set_rx_queue_stats_mapping_registers "
2679 					"failed for port id=%d diag=%d\n",
2680 					pi, diag);
2681 	}
2682 }
2683 
2684 static void
2685 rxtx_port_config(struct rte_port *port)
2686 {
2687 	uint16_t qid;
2688 
2689 	for (qid = 0; qid < nb_rxq; qid++) {
2690 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2691 
2692 		/* Check if any Rx parameters have been passed */
2693 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2694 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2695 
2696 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2697 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2698 
2699 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2700 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2701 
2702 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2703 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2704 
2705 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2706 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2707 
2708 		port->nb_rx_desc[qid] = nb_rxd;
2709 	}
2710 
2711 	for (qid = 0; qid < nb_txq; qid++) {
2712 		port->tx_conf[qid] = port->dev_info.default_txconf;
2713 
2714 		/* Check if any Tx parameters have been passed */
2715 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2716 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2717 
2718 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2719 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2720 
2721 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2722 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2723 
2724 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2725 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2726 
2727 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2728 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2729 
2730 		port->nb_tx_desc[qid] = nb_txd;
2731 	}
2732 }
2733 
2734 void
2735 init_port_config(void)
2736 {
2737 	portid_t pid;
2738 	struct rte_port *port;
2739 
2740 	RTE_ETH_FOREACH_DEV(pid) {
2741 		port = &ports[pid];
2742 		port->dev_conf.fdir_conf = fdir_conf;
2743 		rte_eth_dev_info_get(pid, &port->dev_info);
2744 		if (nb_rxq > 1) {
2745 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2746 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2747 				rss_hf & port->dev_info.flow_type_rss_offloads;
2748 		} else {
2749 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2750 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2751 		}
2752 
2753 		if (port->dcb_flag == 0) {
2754 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2755 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2756 			else
2757 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2758 		}
2759 
2760 		rxtx_port_config(port);
2761 
2762 		rte_eth_macaddr_get(pid, &port->eth_addr);
2763 
2764 		map_port_queue_stats_mapping_registers(pid, port);
2765 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2766 		rte_pmd_ixgbe_bypass_init(pid);
2767 #endif
2768 
2769 		if (lsc_interrupt &&
2770 		    (rte_eth_devices[pid].data->dev_flags &
2771 		     RTE_ETH_DEV_INTR_LSC))
2772 			port->dev_conf.intr_conf.lsc = 1;
2773 		if (rmv_interrupt &&
2774 		    (rte_eth_devices[pid].data->dev_flags &
2775 		     RTE_ETH_DEV_INTR_RMV))
2776 			port->dev_conf.intr_conf.rmv = 1;
2777 	}
2778 }
2779 
2780 void set_port_slave_flag(portid_t slave_pid)
2781 {
2782 	struct rte_port *port;
2783 
2784 	port = &ports[slave_pid];
2785 	port->slave_flag = 1;
2786 }
2787 
2788 void clear_port_slave_flag(portid_t slave_pid)
2789 {
2790 	struct rte_port *port;
2791 
2792 	port = &ports[slave_pid];
2793 	port->slave_flag = 0;
2794 }
2795 
2796 uint8_t port_is_bonding_slave(portid_t slave_pid)
2797 {
2798 	struct rte_port *port;
2799 
2800 	port = &ports[slave_pid];
2801 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2802 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2803 		return 1;
2804 	return 0;
2805 }
2806 
2807 const uint16_t vlan_tags[] = {
2808 		0,  1,  2,  3,  4,  5,  6,  7,
2809 		8,  9, 10, 11,  12, 13, 14, 15,
2810 		16, 17, 18, 19, 20, 21, 22, 23,
2811 		24, 25, 26, 27, 28, 29, 30, 31
2812 };
2813 
2814 static  int
2815 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2816 		 enum dcb_mode_enable dcb_mode,
2817 		 enum rte_eth_nb_tcs num_tcs,
2818 		 uint8_t pfc_en)
2819 {
2820 	uint8_t i;
2821 	int32_t rc;
2822 	struct rte_eth_rss_conf rss_conf;
2823 
2824 	/*
2825 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2826 	 * given above, and the number of traffic classes available for use.
2827 	 */
2828 	if (dcb_mode == DCB_VT_ENABLED) {
2829 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2830 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2831 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2832 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2833 
2834 		/* VMDQ+DCB RX and TX configurations */
2835 		vmdq_rx_conf->enable_default_pool = 0;
2836 		vmdq_rx_conf->default_pool = 0;
2837 		vmdq_rx_conf->nb_queue_pools =
2838 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2839 		vmdq_tx_conf->nb_queue_pools =
2840 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2841 
2842 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2843 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2844 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2845 			vmdq_rx_conf->pool_map[i].pools =
2846 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2847 		}
2848 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2849 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2850 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2851 		}
2852 
2853 		/* set DCB mode of RX and TX of multiple queues */
2854 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2855 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2856 	} else {
2857 		struct rte_eth_dcb_rx_conf *rx_conf =
2858 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2859 		struct rte_eth_dcb_tx_conf *tx_conf =
2860 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2861 
2862 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2863 		if (rc != 0)
2864 			return rc;
2865 
2866 		rx_conf->nb_tcs = num_tcs;
2867 		tx_conf->nb_tcs = num_tcs;
2868 
2869 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2870 			rx_conf->dcb_tc[i] = i % num_tcs;
2871 			tx_conf->dcb_tc[i] = i % num_tcs;
2872 		}
2873 
2874 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2875 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
2876 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2877 	}
2878 
2879 	if (pfc_en)
2880 		eth_conf->dcb_capability_en =
2881 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2882 	else
2883 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2884 
2885 	return 0;
2886 }
2887 
2888 int
2889 init_port_dcb_config(portid_t pid,
2890 		     enum dcb_mode_enable dcb_mode,
2891 		     enum rte_eth_nb_tcs num_tcs,
2892 		     uint8_t pfc_en)
2893 {
2894 	struct rte_eth_conf port_conf;
2895 	struct rte_port *rte_port;
2896 	int retval;
2897 	uint16_t i;
2898 
2899 	rte_port = &ports[pid];
2900 
2901 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2902 	/* Enter DCB configuration status */
2903 	dcb_config = 1;
2904 
2905 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2906 	port_conf.txmode = rte_port->dev_conf.txmode;
2907 
2908 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2909 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2910 	if (retval < 0)
2911 		return retval;
2912 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2913 
2914 	/* re-configure the device . */
2915 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2916 
2917 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2918 
2919 	/* If dev_info.vmdq_pool_base is greater than 0,
2920 	 * the queue id of vmdq pools is started after pf queues.
2921 	 */
2922 	if (dcb_mode == DCB_VT_ENABLED &&
2923 	    rte_port->dev_info.vmdq_pool_base > 0) {
2924 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2925 			" for port %d.", pid);
2926 		return -1;
2927 	}
2928 
2929 	/* Assume the ports in testpmd have the same dcb capability
2930 	 * and has the same number of rxq and txq in dcb mode
2931 	 */
2932 	if (dcb_mode == DCB_VT_ENABLED) {
2933 		if (rte_port->dev_info.max_vfs > 0) {
2934 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2935 			nb_txq = rte_port->dev_info.nb_tx_queues;
2936 		} else {
2937 			nb_rxq = rte_port->dev_info.max_rx_queues;
2938 			nb_txq = rte_port->dev_info.max_tx_queues;
2939 		}
2940 	} else {
2941 		/*if vt is disabled, use all pf queues */
2942 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2943 			nb_rxq = rte_port->dev_info.max_rx_queues;
2944 			nb_txq = rte_port->dev_info.max_tx_queues;
2945 		} else {
2946 			nb_rxq = (queueid_t)num_tcs;
2947 			nb_txq = (queueid_t)num_tcs;
2948 
2949 		}
2950 	}
2951 	rx_free_thresh = 64;
2952 
2953 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2954 
2955 	rxtx_port_config(rte_port);
2956 	/* VLAN filter */
2957 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2958 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2959 		rx_vft_set(pid, vlan_tags[i], 1);
2960 
2961 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2962 	map_port_queue_stats_mapping_registers(pid, rte_port);
2963 
2964 	rte_port->dcb_flag = 1;
2965 
2966 	return 0;
2967 }
2968 
2969 static void
2970 init_port(void)
2971 {
2972 	/* Configuration of Ethernet ports. */
2973 	ports = rte_zmalloc("testpmd: ports",
2974 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2975 			    RTE_CACHE_LINE_SIZE);
2976 	if (ports == NULL) {
2977 		rte_exit(EXIT_FAILURE,
2978 				"rte_zmalloc(%d struct rte_port) failed\n",
2979 				RTE_MAX_ETHPORTS);
2980 	}
2981 
2982 	/* Initialize ports NUMA structures */
2983 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2984 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2985 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2986 }
2987 
2988 static void
2989 force_quit(void)
2990 {
2991 	pmd_test_exit();
2992 	prompt_exit();
2993 }
2994 
2995 static void
2996 print_stats(void)
2997 {
2998 	uint8_t i;
2999 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3000 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3001 
3002 	/* Clear screen and move to top left */
3003 	printf("%s%s", clr, top_left);
3004 
3005 	printf("\nPort statistics ====================================");
3006 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3007 		nic_stats_display(fwd_ports_ids[i]);
3008 }
3009 
3010 static void
3011 signal_handler(int signum)
3012 {
3013 	if (signum == SIGINT || signum == SIGTERM) {
3014 		printf("\nSignal %d received, preparing to exit...\n",
3015 				signum);
3016 #ifdef RTE_LIBRTE_PDUMP
3017 		/* uninitialize packet capture framework */
3018 		rte_pdump_uninit();
3019 #endif
3020 #ifdef RTE_LIBRTE_LATENCY_STATS
3021 		rte_latencystats_uninit();
3022 #endif
3023 		force_quit();
3024 		/* Set flag to indicate the force termination. */
3025 		f_quit = 1;
3026 		/* exit with the expected status */
3027 		signal(signum, SIG_DFL);
3028 		kill(getpid(), signum);
3029 	}
3030 }
3031 
3032 int
3033 main(int argc, char** argv)
3034 {
3035 	int diag;
3036 	portid_t port_id;
3037 	uint16_t count;
3038 	int ret;
3039 
3040 	signal(SIGINT, signal_handler);
3041 	signal(SIGTERM, signal_handler);
3042 
3043 	diag = rte_eal_init(argc, argv);
3044 	if (diag < 0)
3045 		rte_panic("Cannot init EAL\n");
3046 
3047 	testpmd_logtype = rte_log_register("testpmd");
3048 	if (testpmd_logtype < 0)
3049 		rte_panic("Cannot register log type");
3050 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3051 
3052 #ifdef RTE_LIBRTE_PDUMP
3053 	/* initialize packet capture framework */
3054 	rte_pdump_init(NULL);
3055 #endif
3056 
3057 	count = 0;
3058 	RTE_ETH_FOREACH_DEV(port_id) {
3059 		ports_ids[count] = port_id;
3060 		count++;
3061 	}
3062 	nb_ports = (portid_t) count;
3063 	if (nb_ports == 0)
3064 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3065 
3066 	/* allocate port structures, and init them */
3067 	init_port();
3068 
3069 	set_def_fwd_config();
3070 	if (nb_lcores == 0)
3071 		rte_panic("Empty set of forwarding logical cores - check the "
3072 			  "core mask supplied in the command parameters\n");
3073 
3074 	/* Bitrate/latency stats disabled by default */
3075 #ifdef RTE_LIBRTE_BITRATE
3076 	bitrate_enabled = 0;
3077 #endif
3078 #ifdef RTE_LIBRTE_LATENCY_STATS
3079 	latencystats_enabled = 0;
3080 #endif
3081 
3082 	/* on FreeBSD, mlockall() is disabled by default */
3083 #ifdef RTE_EXEC_ENV_BSDAPP
3084 	do_mlockall = 0;
3085 #else
3086 	do_mlockall = 1;
3087 #endif
3088 
3089 	argc -= diag;
3090 	argv += diag;
3091 	if (argc > 1)
3092 		launch_args_parse(argc, argv);
3093 
3094 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3095 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3096 			strerror(errno));
3097 	}
3098 
3099 	if (tx_first && interactive)
3100 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3101 				"interactive mode.\n");
3102 
3103 	if (tx_first && lsc_interrupt) {
3104 		printf("Warning: lsc_interrupt needs to be off when "
3105 				" using tx_first. Disabling.\n");
3106 		lsc_interrupt = 0;
3107 	}
3108 
3109 	if (!nb_rxq && !nb_txq)
3110 		printf("Warning: Either rx or tx queues should be non-zero\n");
3111 
3112 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3113 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3114 		       "but nb_txq=%d will prevent to fully test it.\n",
3115 		       nb_rxq, nb_txq);
3116 
3117 	init_config();
3118 
3119 	if (hot_plug) {
3120 		ret = rte_dev_hotplug_handle_enable();
3121 		if (ret) {
3122 			RTE_LOG(ERR, EAL,
3123 				"fail to enable hotplug handling.");
3124 			return -1;
3125 		}
3126 
3127 		ret = rte_dev_event_monitor_start();
3128 		if (ret) {
3129 			RTE_LOG(ERR, EAL,
3130 				"fail to start device event monitoring.");
3131 			return -1;
3132 		}
3133 
3134 		ret = rte_dev_event_callback_register(NULL,
3135 			eth_dev_event_callback, NULL);
3136 		if (ret) {
3137 			RTE_LOG(ERR, EAL,
3138 				"fail  to register device event callback\n");
3139 			return -1;
3140 		}
3141 	}
3142 
3143 	if (start_port(RTE_PORT_ALL) != 0)
3144 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3145 
3146 	/* set all ports to promiscuous mode by default */
3147 	RTE_ETH_FOREACH_DEV(port_id)
3148 		rte_eth_promiscuous_enable(port_id);
3149 
3150 	/* Init metrics library */
3151 	rte_metrics_init(rte_socket_id());
3152 
3153 #ifdef RTE_LIBRTE_LATENCY_STATS
3154 	if (latencystats_enabled != 0) {
3155 		int ret = rte_latencystats_init(1, NULL);
3156 		if (ret)
3157 			printf("Warning: latencystats init()"
3158 				" returned error %d\n",	ret);
3159 		printf("Latencystats running on lcore %d\n",
3160 			latencystats_lcore_id);
3161 	}
3162 #endif
3163 
3164 	/* Setup bitrate stats */
3165 #ifdef RTE_LIBRTE_BITRATE
3166 	if (bitrate_enabled != 0) {
3167 		bitrate_data = rte_stats_bitrate_create();
3168 		if (bitrate_data == NULL)
3169 			rte_exit(EXIT_FAILURE,
3170 				"Could not allocate bitrate data.\n");
3171 		rte_stats_bitrate_reg(bitrate_data);
3172 	}
3173 #endif
3174 
3175 #ifdef RTE_LIBRTE_CMDLINE
3176 	if (strlen(cmdline_filename) != 0)
3177 		cmdline_read_from_file(cmdline_filename);
3178 
3179 	if (interactive == 1) {
3180 		if (auto_start) {
3181 			printf("Start automatic packet forwarding\n");
3182 			start_packet_forwarding(0);
3183 		}
3184 		prompt();
3185 		pmd_test_exit();
3186 	} else
3187 #endif
3188 	{
3189 		char c;
3190 		int rc;
3191 
3192 		f_quit = 0;
3193 
3194 		printf("No commandline core given, start packet forwarding\n");
3195 		start_packet_forwarding(tx_first);
3196 		if (stats_period != 0) {
3197 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3198 			uint64_t timer_period;
3199 
3200 			/* Convert to number of cycles */
3201 			timer_period = stats_period * rte_get_timer_hz();
3202 
3203 			while (f_quit == 0) {
3204 				cur_time = rte_get_timer_cycles();
3205 				diff_time += cur_time - prev_time;
3206 
3207 				if (diff_time >= timer_period) {
3208 					print_stats();
3209 					/* Reset the timer */
3210 					diff_time = 0;
3211 				}
3212 				/* Sleep to avoid unnecessary checks */
3213 				prev_time = cur_time;
3214 				sleep(1);
3215 			}
3216 		}
3217 
3218 		printf("Press enter to exit\n");
3219 		rc = read(0, &c, 1);
3220 		pmd_test_exit();
3221 		if (rc < 0)
3222 			return 1;
3223 	}
3224 
3225 	return 0;
3226 }
3227