xref: /dpdk/app/test-pmd/testpmd.c (revision 97b5d8b54511440d1dea977442401dca59000cb8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
34 #include <rte_eal.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
42 #include <rte_mbuf.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_dev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
52 #endif
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
55 #endif
56 #include <rte_flow.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
60 #endif
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
63 #endif
64 
65 #include "testpmd.h"
66 
67 #ifndef MAP_HUGETLB
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
70 #else
71 #define HUGE_FLAG MAP_HUGETLB
72 #endif
73 
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
77 #else
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
79 #endif
80 
81 #define EXTMEM_HEAP_NAME "extmem"
82 
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
85 
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
89 uint8_t tx_first;
90 char cmdline_filename[PATH_MAX] = {0};
91 
92 /*
93  * NUMA support configuration.
94  * When set, the NUMA support attempts to dispatch the allocation of the
95  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96  * probed ports among the CPU sockets 0 and 1.
97  * Otherwise, all memory is allocated from CPU socket 0.
98  */
99 uint8_t numa_support = 1; /**< numa enabled by default */
100 
101 /*
102  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103  * not configured.
104  */
105 uint8_t socket_num = UMA_NO_CONFIG;
106 
107 /*
108  * Select mempool allocation type:
109  * - native: use regular DPDK memory
110  * - anon: use regular DPDK memory to create mempool, but populate using
111  *         anonymous memory (may not be IOVA-contiguous)
112  * - xmem: use externally allocated hugepage memory
113  */
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115 
116 /*
117  * Store specified sockets on which memory pool to be used by ports
118  * is allocated.
119  */
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
121 
122 /*
123  * Store specified sockets on which RX ring to be used by ports
124  * is allocated.
125  */
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
127 
128 /*
129  * Store specified sockets on which TX ring to be used by ports
130  * is allocated.
131  */
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
133 
134 /*
135  * Record the Ethernet address of peer target ports to which packets are
136  * forwarded.
137  * Must be instantiated with the ethernet addresses of peer traffic generator
138  * ports.
139  */
140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
142 
143 /*
144  * Probed Target Environment.
145  */
146 struct rte_port *ports;	       /**< For all probed ethernet ports. */
147 portid_t nb_ports;             /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150 
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
152 
153 /*
154  * Test Forwarding Configuration.
155  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157  */
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162 
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165 
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168 
169 /*
170  * Forwarding engines.
171  */
172 struct fwd_engine * fwd_engines[] = {
173 	&io_fwd_engine,
174 	&mac_fwd_engine,
175 	&mac_swap_engine,
176 	&flow_gen_engine,
177 	&rx_only_engine,
178 	&tx_only_engine,
179 	&csum_fwd_engine,
180 	&icmp_echo_engine,
181 	&noisy_vnf_engine,
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
183 	&softnic_fwd_engine,
184 #endif
185 #ifdef RTE_LIBRTE_IEEE1588
186 	&ieee1588_fwd_engine,
187 #endif
188 	NULL,
189 };
190 
191 struct fwd_config cur_fwd_config;
192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193 uint32_t retry_enabled;
194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
196 
197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
199                                       * specified on command-line. */
200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
201 
202 /*
203  * In container, it cannot terminate the process which running with 'stats-period'
204  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
205  */
206 uint8_t f_quit;
207 
208 /*
209  * Configuration of packet segments used by the "txonly" processing engine.
210  */
211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213 	TXONLY_DEF_PACKET_LEN,
214 };
215 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
216 
217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
218 /**< Split policy for packets to TX. */
219 
220 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
221 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
222 
223 /* current configuration is in DCB or not,0 means it is not in DCB mode */
224 uint8_t dcb_config = 0;
225 
226 /* Whether the dcb is in testing status */
227 uint8_t dcb_test = 0;
228 
229 /*
230  * Configurable number of RX/TX queues.
231  */
232 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
233 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
234 
235 /*
236  * Configurable number of RX/TX ring descriptors.
237  * Defaults are supplied by drivers via ethdev.
238  */
239 #define RTE_TEST_RX_DESC_DEFAULT 0
240 #define RTE_TEST_TX_DESC_DEFAULT 0
241 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
242 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
243 
244 #define RTE_PMD_PARAM_UNSET -1
245 /*
246  * Configurable values of RX and TX ring threshold registers.
247  */
248 
249 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
250 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
251 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
252 
253 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
254 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
255 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
256 
257 /*
258  * Configurable value of RX free threshold.
259  */
260 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
261 
262 /*
263  * Configurable value of RX drop enable.
264  */
265 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
266 
267 /*
268  * Configurable value of TX free threshold.
269  */
270 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
271 
272 /*
273  * Configurable value of TX RS bit threshold.
274  */
275 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
276 
277 /*
278  * Configurable value of buffered packets before sending.
279  */
280 uint16_t noisy_tx_sw_bufsz;
281 
282 /*
283  * Configurable value of packet buffer timeout.
284  */
285 uint16_t noisy_tx_sw_buf_flush_time;
286 
287 /*
288  * Configurable value for size of VNF internal memory area
289  * used for simulating noisy neighbour behaviour
290  */
291 uint64_t noisy_lkup_mem_sz;
292 
293 /*
294  * Configurable value of number of random writes done in
295  * VNF simulation memory area.
296  */
297 uint64_t noisy_lkup_num_writes;
298 
299 /*
300  * Configurable value of number of random reads done in
301  * VNF simulation memory area.
302  */
303 uint64_t noisy_lkup_num_reads;
304 
305 /*
306  * Configurable value of number of random reads/writes done in
307  * VNF simulation memory area.
308  */
309 uint64_t noisy_lkup_num_reads_writes;
310 
311 /*
312  * Receive Side Scaling (RSS) configuration.
313  */
314 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
315 
316 /*
317  * Port topology configuration
318  */
319 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
320 
321 /*
322  * Avoids to flush all the RX streams before starts forwarding.
323  */
324 uint8_t no_flush_rx = 0; /* flush by default */
325 
326 /*
327  * Flow API isolated mode.
328  */
329 uint8_t flow_isolate_all;
330 
331 /*
332  * Avoids to check link status when starting/stopping a port.
333  */
334 uint8_t no_link_check = 0; /* check by default */
335 
336 /*
337  * Enable link status change notification
338  */
339 uint8_t lsc_interrupt = 1; /* enabled by default */
340 
341 /*
342  * Enable device removal notification.
343  */
344 uint8_t rmv_interrupt = 1; /* enabled by default */
345 
346 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
347 
348 /* Pretty printing of ethdev events */
349 static const char * const eth_event_desc[] = {
350 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
351 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
352 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
353 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
354 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
355 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
356 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
357 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
358 	[RTE_ETH_EVENT_NEW] = "device probed",
359 	[RTE_ETH_EVENT_DESTROY] = "device released",
360 	[RTE_ETH_EVENT_MAX] = NULL,
361 };
362 
363 /*
364  * Display or mask ether events
365  * Default to all events except VF_MBOX
366  */
367 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
368 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
369 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
370 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
371 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
372 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
373 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
374 /*
375  * Decide if all memory are locked for performance.
376  */
377 int do_mlockall = 0;
378 
379 /*
380  * NIC bypass mode configuration options.
381  */
382 
383 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
384 /* The NIC bypass watchdog timeout. */
385 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
386 #endif
387 
388 
389 #ifdef RTE_LIBRTE_LATENCY_STATS
390 
391 /*
392  * Set when latency stats is enabled in the commandline
393  */
394 uint8_t latencystats_enabled;
395 
396 /*
397  * Lcore ID to serive latency statistics.
398  */
399 lcoreid_t latencystats_lcore_id = -1;
400 
401 #endif
402 
403 /*
404  * Ethernet device configuration.
405  */
406 struct rte_eth_rxmode rx_mode = {
407 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
408 };
409 
410 struct rte_eth_txmode tx_mode = {
411 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
412 };
413 
414 struct rte_fdir_conf fdir_conf = {
415 	.mode = RTE_FDIR_MODE_NONE,
416 	.pballoc = RTE_FDIR_PBALLOC_64K,
417 	.status = RTE_FDIR_REPORT_STATUS,
418 	.mask = {
419 		.vlan_tci_mask = 0xFFEF,
420 		.ipv4_mask     = {
421 			.src_ip = 0xFFFFFFFF,
422 			.dst_ip = 0xFFFFFFFF,
423 		},
424 		.ipv6_mask     = {
425 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
426 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
427 		},
428 		.src_port_mask = 0xFFFF,
429 		.dst_port_mask = 0xFFFF,
430 		.mac_addr_byte_mask = 0xFF,
431 		.tunnel_type_mask = 1,
432 		.tunnel_id_mask = 0xFFFFFFFF,
433 	},
434 	.drop_queue = 127,
435 };
436 
437 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
438 
439 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
440 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
441 
442 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
443 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
444 
445 uint16_t nb_tx_queue_stats_mappings = 0;
446 uint16_t nb_rx_queue_stats_mappings = 0;
447 
448 /*
449  * Display zero values by default for xstats
450  */
451 uint8_t xstats_hide_zero;
452 
453 unsigned int num_sockets = 0;
454 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
455 
456 #ifdef RTE_LIBRTE_BITRATE
457 /* Bitrate statistics */
458 struct rte_stats_bitrates *bitrate_data;
459 lcoreid_t bitrate_lcore_id;
460 uint8_t bitrate_enabled;
461 #endif
462 
463 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
464 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
465 
466 struct vxlan_encap_conf vxlan_encap_conf = {
467 	.select_ipv4 = 1,
468 	.select_vlan = 0,
469 	.vni = "\x00\x00\x00",
470 	.udp_src = 0,
471 	.udp_dst = RTE_BE16(4789),
472 	.ipv4_src = IPv4(127, 0, 0, 1),
473 	.ipv4_dst = IPv4(255, 255, 255, 255),
474 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
475 		"\x00\x00\x00\x00\x00\x00\x00\x01",
476 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
477 		"\x00\x00\x00\x00\x00\x00\x11\x11",
478 	.vlan_tci = 0,
479 	.eth_src = "\x00\x00\x00\x00\x00\x00",
480 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
481 };
482 
483 struct nvgre_encap_conf nvgre_encap_conf = {
484 	.select_ipv4 = 1,
485 	.select_vlan = 0,
486 	.tni = "\x00\x00\x00",
487 	.ipv4_src = IPv4(127, 0, 0, 1),
488 	.ipv4_dst = IPv4(255, 255, 255, 255),
489 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
490 		"\x00\x00\x00\x00\x00\x00\x00\x01",
491 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
492 		"\x00\x00\x00\x00\x00\x00\x11\x11",
493 	.vlan_tci = 0,
494 	.eth_src = "\x00\x00\x00\x00\x00\x00",
495 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
496 };
497 
498 /* Forward function declarations */
499 static void setup_attached_port(portid_t pi);
500 static void map_port_queue_stats_mapping_registers(portid_t pi,
501 						   struct rte_port *port);
502 static void check_all_ports_link_status(uint32_t port_mask);
503 static int eth_event_callback(portid_t port_id,
504 			      enum rte_eth_event_type type,
505 			      void *param, void *ret_param);
506 static void eth_dev_event_callback(const char *device_name,
507 				enum rte_dev_event_type type,
508 				void *param);
509 
510 /*
511  * Check if all the ports are started.
512  * If yes, return positive value. If not, return zero.
513  */
514 static int all_ports_started(void);
515 
516 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
517 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
518 
519 /*
520  * Helper function to check if socket is already discovered.
521  * If yes, return positive value. If not, return zero.
522  */
523 int
524 new_socket_id(unsigned int socket_id)
525 {
526 	unsigned int i;
527 
528 	for (i = 0; i < num_sockets; i++) {
529 		if (socket_ids[i] == socket_id)
530 			return 0;
531 	}
532 	return 1;
533 }
534 
535 /*
536  * Setup default configuration.
537  */
538 static void
539 set_default_fwd_lcores_config(void)
540 {
541 	unsigned int i;
542 	unsigned int nb_lc;
543 	unsigned int sock_num;
544 
545 	nb_lc = 0;
546 	for (i = 0; i < RTE_MAX_LCORE; i++) {
547 		if (!rte_lcore_is_enabled(i))
548 			continue;
549 		sock_num = rte_lcore_to_socket_id(i);
550 		if (new_socket_id(sock_num)) {
551 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
552 				rte_exit(EXIT_FAILURE,
553 					 "Total sockets greater than %u\n",
554 					 RTE_MAX_NUMA_NODES);
555 			}
556 			socket_ids[num_sockets++] = sock_num;
557 		}
558 		if (i == rte_get_master_lcore())
559 			continue;
560 		fwd_lcores_cpuids[nb_lc++] = i;
561 	}
562 	nb_lcores = (lcoreid_t) nb_lc;
563 	nb_cfg_lcores = nb_lcores;
564 	nb_fwd_lcores = 1;
565 }
566 
567 static void
568 set_def_peer_eth_addrs(void)
569 {
570 	portid_t i;
571 
572 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
573 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
574 		peer_eth_addrs[i].addr_bytes[5] = i;
575 	}
576 }
577 
578 static void
579 set_default_fwd_ports_config(void)
580 {
581 	portid_t pt_id;
582 	int i = 0;
583 
584 	RTE_ETH_FOREACH_DEV(pt_id) {
585 		fwd_ports_ids[i++] = pt_id;
586 
587 		/* Update sockets info according to the attached device */
588 		int socket_id = rte_eth_dev_socket_id(pt_id);
589 		if (socket_id >= 0 && new_socket_id(socket_id)) {
590 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
591 				rte_exit(EXIT_FAILURE,
592 					 "Total sockets greater than %u\n",
593 					 RTE_MAX_NUMA_NODES);
594 			}
595 			socket_ids[num_sockets++] = socket_id;
596 		}
597 	}
598 
599 	nb_cfg_ports = nb_ports;
600 	nb_fwd_ports = nb_ports;
601 }
602 
603 void
604 set_def_fwd_config(void)
605 {
606 	set_default_fwd_lcores_config();
607 	set_def_peer_eth_addrs();
608 	set_default_fwd_ports_config();
609 }
610 
611 /* extremely pessimistic estimation of memory required to create a mempool */
612 static int
613 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
614 {
615 	unsigned int n_pages, mbuf_per_pg, leftover;
616 	uint64_t total_mem, mbuf_mem, obj_sz;
617 
618 	/* there is no good way to predict how much space the mempool will
619 	 * occupy because it will allocate chunks on the fly, and some of those
620 	 * will come from default DPDK memory while some will come from our
621 	 * external memory, so just assume 128MB will be enough for everyone.
622 	 */
623 	uint64_t hdr_mem = 128 << 20;
624 
625 	/* account for possible non-contiguousness */
626 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
627 	if (obj_sz > pgsz) {
628 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
629 		return -1;
630 	}
631 
632 	mbuf_per_pg = pgsz / obj_sz;
633 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
634 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
635 
636 	mbuf_mem = n_pages * pgsz;
637 
638 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
639 
640 	if (total_mem > SIZE_MAX) {
641 		TESTPMD_LOG(ERR, "Memory size too big\n");
642 		return -1;
643 	}
644 	*out = (size_t)total_mem;
645 
646 	return 0;
647 }
648 
649 static inline uint32_t
650 bsf64(uint64_t v)
651 {
652 	return (uint32_t)__builtin_ctzll(v);
653 }
654 
655 static inline uint32_t
656 log2_u64(uint64_t v)
657 {
658 	if (v == 0)
659 		return 0;
660 	v = rte_align64pow2(v);
661 	return bsf64(v);
662 }
663 
664 static int
665 pagesz_flags(uint64_t page_sz)
666 {
667 	/* as per mmap() manpage, all page sizes are log2 of page size
668 	 * shifted by MAP_HUGE_SHIFT
669 	 */
670 	int log2 = log2_u64(page_sz);
671 
672 	return (log2 << HUGE_SHIFT);
673 }
674 
675 static void *
676 alloc_mem(size_t memsz, size_t pgsz, bool huge)
677 {
678 	void *addr;
679 	int flags;
680 
681 	/* allocate anonymous hugepages */
682 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
683 	if (huge)
684 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
685 
686 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
687 	if (addr == MAP_FAILED)
688 		return NULL;
689 
690 	return addr;
691 }
692 
693 struct extmem_param {
694 	void *addr;
695 	size_t len;
696 	size_t pgsz;
697 	rte_iova_t *iova_table;
698 	unsigned int iova_table_len;
699 };
700 
701 static int
702 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
703 		bool huge)
704 {
705 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
706 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
707 	unsigned int cur_page, n_pages, pgsz_idx;
708 	size_t mem_sz, cur_pgsz;
709 	rte_iova_t *iovas = NULL;
710 	void *addr;
711 	int ret;
712 
713 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
714 		/* skip anything that is too big */
715 		if (pgsizes[pgsz_idx] > SIZE_MAX)
716 			continue;
717 
718 		cur_pgsz = pgsizes[pgsz_idx];
719 
720 		/* if we were told not to allocate hugepages, override */
721 		if (!huge)
722 			cur_pgsz = sysconf(_SC_PAGESIZE);
723 
724 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
725 		if (ret < 0) {
726 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
727 			return -1;
728 		}
729 
730 		/* allocate our memory */
731 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
732 
733 		/* if we couldn't allocate memory with a specified page size,
734 		 * that doesn't mean we can't do it with other page sizes, so
735 		 * try another one.
736 		 */
737 		if (addr == NULL)
738 			continue;
739 
740 		/* store IOVA addresses for every page in this memory area */
741 		n_pages = mem_sz / cur_pgsz;
742 
743 		iovas = malloc(sizeof(*iovas) * n_pages);
744 
745 		if (iovas == NULL) {
746 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
747 			goto fail;
748 		}
749 		/* lock memory if it's not huge pages */
750 		if (!huge)
751 			mlock(addr, mem_sz);
752 
753 		/* populate IOVA addresses */
754 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
755 			rte_iova_t iova;
756 			size_t offset;
757 			void *cur;
758 
759 			offset = cur_pgsz * cur_page;
760 			cur = RTE_PTR_ADD(addr, offset);
761 
762 			/* touch the page before getting its IOVA */
763 			*(volatile char *)cur = 0;
764 
765 			iova = rte_mem_virt2iova(cur);
766 
767 			iovas[cur_page] = iova;
768 		}
769 
770 		break;
771 	}
772 	/* if we couldn't allocate anything */
773 	if (iovas == NULL)
774 		return -1;
775 
776 	param->addr = addr;
777 	param->len = mem_sz;
778 	param->pgsz = cur_pgsz;
779 	param->iova_table = iovas;
780 	param->iova_table_len = n_pages;
781 
782 	return 0;
783 fail:
784 	if (iovas)
785 		free(iovas);
786 	if (addr)
787 		munmap(addr, mem_sz);
788 
789 	return -1;
790 }
791 
792 static int
793 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
794 {
795 	struct extmem_param param;
796 	int socket_id, ret;
797 
798 	memset(&param, 0, sizeof(param));
799 
800 	/* check if our heap exists */
801 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
802 	if (socket_id < 0) {
803 		/* create our heap */
804 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
805 		if (ret < 0) {
806 			TESTPMD_LOG(ERR, "Cannot create heap\n");
807 			return -1;
808 		}
809 	}
810 
811 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
812 	if (ret < 0) {
813 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
814 		return -1;
815 	}
816 
817 	/* we now have a valid memory area, so add it to heap */
818 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
819 			param.addr, param.len, param.iova_table,
820 			param.iova_table_len, param.pgsz);
821 
822 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
823 
824 	/* not needed any more */
825 	free(param.iova_table);
826 
827 	if (ret < 0) {
828 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
829 		munmap(param.addr, param.len);
830 		return -1;
831 	}
832 
833 	/* success */
834 
835 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
836 			param.len >> 20);
837 
838 	return 0;
839 }
840 
841 /*
842  * Configuration initialisation done once at init time.
843  */
844 static void
845 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
846 		 unsigned int socket_id)
847 {
848 	char pool_name[RTE_MEMPOOL_NAMESIZE];
849 	struct rte_mempool *rte_mp = NULL;
850 	uint32_t mb_size;
851 
852 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
853 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
854 
855 	TESTPMD_LOG(INFO,
856 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
857 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
858 
859 	switch (mp_alloc_type) {
860 	case MP_ALLOC_NATIVE:
861 		{
862 			/* wrapper to rte_mempool_create() */
863 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
864 					rte_mbuf_best_mempool_ops());
865 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
866 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
867 			break;
868 		}
869 	case MP_ALLOC_ANON:
870 		{
871 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
872 				mb_size, (unsigned int) mb_mempool_cache,
873 				sizeof(struct rte_pktmbuf_pool_private),
874 				socket_id, 0);
875 			if (rte_mp == NULL)
876 				goto err;
877 
878 			if (rte_mempool_populate_anon(rte_mp) == 0) {
879 				rte_mempool_free(rte_mp);
880 				rte_mp = NULL;
881 				goto err;
882 			}
883 			rte_pktmbuf_pool_init(rte_mp, NULL);
884 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
885 			break;
886 		}
887 	case MP_ALLOC_XMEM:
888 	case MP_ALLOC_XMEM_HUGE:
889 		{
890 			int heap_socket;
891 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
892 
893 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
894 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
895 
896 			heap_socket =
897 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
898 			if (heap_socket < 0)
899 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
900 
901 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
902 					rte_mbuf_best_mempool_ops());
903 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
904 					mb_mempool_cache, 0, mbuf_seg_size,
905 					heap_socket);
906 			break;
907 		}
908 	default:
909 		{
910 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
911 		}
912 	}
913 
914 err:
915 	if (rte_mp == NULL) {
916 		rte_exit(EXIT_FAILURE,
917 			"Creation of mbuf pool for socket %u failed: %s\n",
918 			socket_id, rte_strerror(rte_errno));
919 	} else if (verbose_level > 0) {
920 		rte_mempool_dump(stdout, rte_mp);
921 	}
922 }
923 
924 /*
925  * Check given socket id is valid or not with NUMA mode,
926  * if valid, return 0, else return -1
927  */
928 static int
929 check_socket_id(const unsigned int socket_id)
930 {
931 	static int warning_once = 0;
932 
933 	if (new_socket_id(socket_id)) {
934 		if (!warning_once && numa_support)
935 			printf("Warning: NUMA should be configured manually by"
936 			       " using --port-numa-config and"
937 			       " --ring-numa-config parameters along with"
938 			       " --numa.\n");
939 		warning_once = 1;
940 		return -1;
941 	}
942 	return 0;
943 }
944 
945 /*
946  * Get the allowed maximum number of RX queues.
947  * *pid return the port id which has minimal value of
948  * max_rx_queues in all ports.
949  */
950 queueid_t
951 get_allowed_max_nb_rxq(portid_t *pid)
952 {
953 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
954 	portid_t pi;
955 	struct rte_eth_dev_info dev_info;
956 
957 	RTE_ETH_FOREACH_DEV(pi) {
958 		rte_eth_dev_info_get(pi, &dev_info);
959 		if (dev_info.max_rx_queues < allowed_max_rxq) {
960 			allowed_max_rxq = dev_info.max_rx_queues;
961 			*pid = pi;
962 		}
963 	}
964 	return allowed_max_rxq;
965 }
966 
967 /*
968  * Check input rxq is valid or not.
969  * If input rxq is not greater than any of maximum number
970  * of RX queues of all ports, it is valid.
971  * if valid, return 0, else return -1
972  */
973 int
974 check_nb_rxq(queueid_t rxq)
975 {
976 	queueid_t allowed_max_rxq;
977 	portid_t pid = 0;
978 
979 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
980 	if (rxq > allowed_max_rxq) {
981 		printf("Fail: input rxq (%u) can't be greater "
982 		       "than max_rx_queues (%u) of port %u\n",
983 		       rxq,
984 		       allowed_max_rxq,
985 		       pid);
986 		return -1;
987 	}
988 	return 0;
989 }
990 
991 /*
992  * Get the allowed maximum number of TX queues.
993  * *pid return the port id which has minimal value of
994  * max_tx_queues in all ports.
995  */
996 queueid_t
997 get_allowed_max_nb_txq(portid_t *pid)
998 {
999 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
1000 	portid_t pi;
1001 	struct rte_eth_dev_info dev_info;
1002 
1003 	RTE_ETH_FOREACH_DEV(pi) {
1004 		rte_eth_dev_info_get(pi, &dev_info);
1005 		if (dev_info.max_tx_queues < allowed_max_txq) {
1006 			allowed_max_txq = dev_info.max_tx_queues;
1007 			*pid = pi;
1008 		}
1009 	}
1010 	return allowed_max_txq;
1011 }
1012 
1013 /*
1014  * Check input txq is valid or not.
1015  * If input txq is not greater than any of maximum number
1016  * of TX queues of all ports, it is valid.
1017  * if valid, return 0, else return -1
1018  */
1019 int
1020 check_nb_txq(queueid_t txq)
1021 {
1022 	queueid_t allowed_max_txq;
1023 	portid_t pid = 0;
1024 
1025 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
1026 	if (txq > allowed_max_txq) {
1027 		printf("Fail: input txq (%u) can't be greater "
1028 		       "than max_tx_queues (%u) of port %u\n",
1029 		       txq,
1030 		       allowed_max_txq,
1031 		       pid);
1032 		return -1;
1033 	}
1034 	return 0;
1035 }
1036 
1037 static void
1038 init_config(void)
1039 {
1040 	portid_t pid;
1041 	struct rte_port *port;
1042 	struct rte_mempool *mbp;
1043 	unsigned int nb_mbuf_per_pool;
1044 	lcoreid_t  lc_id;
1045 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1046 	struct rte_gro_param gro_param;
1047 	uint32_t gso_types;
1048 	int k;
1049 
1050 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1051 
1052 	/* Configuration of logical cores. */
1053 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1054 				sizeof(struct fwd_lcore *) * nb_lcores,
1055 				RTE_CACHE_LINE_SIZE);
1056 	if (fwd_lcores == NULL) {
1057 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1058 							"failed\n", nb_lcores);
1059 	}
1060 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1061 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1062 					       sizeof(struct fwd_lcore),
1063 					       RTE_CACHE_LINE_SIZE);
1064 		if (fwd_lcores[lc_id] == NULL) {
1065 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1066 								"failed\n");
1067 		}
1068 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1069 	}
1070 
1071 	RTE_ETH_FOREACH_DEV(pid) {
1072 		port = &ports[pid];
1073 		/* Apply default TxRx configuration for all ports */
1074 		port->dev_conf.txmode = tx_mode;
1075 		port->dev_conf.rxmode = rx_mode;
1076 		rte_eth_dev_info_get(pid, &port->dev_info);
1077 
1078 		if (!(port->dev_info.tx_offload_capa &
1079 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1080 			port->dev_conf.txmode.offloads &=
1081 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1082 		if (!(port->dev_info.tx_offload_capa &
1083 			DEV_TX_OFFLOAD_MATCH_METADATA))
1084 			port->dev_conf.txmode.offloads &=
1085 				~DEV_TX_OFFLOAD_MATCH_METADATA;
1086 		if (numa_support) {
1087 			if (port_numa[pid] != NUMA_NO_CONFIG)
1088 				port_per_socket[port_numa[pid]]++;
1089 			else {
1090 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
1091 
1092 				/*
1093 				 * if socket_id is invalid,
1094 				 * set to the first available socket.
1095 				 */
1096 				if (check_socket_id(socket_id) < 0)
1097 					socket_id = socket_ids[0];
1098 				port_per_socket[socket_id]++;
1099 			}
1100 		}
1101 
1102 		/* Apply Rx offloads configuration */
1103 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1104 			port->rx_conf[k].offloads =
1105 				port->dev_conf.rxmode.offloads;
1106 		/* Apply Tx offloads configuration */
1107 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1108 			port->tx_conf[k].offloads =
1109 				port->dev_conf.txmode.offloads;
1110 
1111 		/* set flag to initialize port/queue */
1112 		port->need_reconfig = 1;
1113 		port->need_reconfig_queues = 1;
1114 		port->tx_metadata = 0;
1115 	}
1116 
1117 	/*
1118 	 * Create pools of mbuf.
1119 	 * If NUMA support is disabled, create a single pool of mbuf in
1120 	 * socket 0 memory by default.
1121 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1122 	 *
1123 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1124 	 * nb_txd can be configured at run time.
1125 	 */
1126 	if (param_total_num_mbufs)
1127 		nb_mbuf_per_pool = param_total_num_mbufs;
1128 	else {
1129 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1130 			(nb_lcores * mb_mempool_cache) +
1131 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1132 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1133 	}
1134 
1135 	if (numa_support) {
1136 		uint8_t i;
1137 
1138 		for (i = 0; i < num_sockets; i++)
1139 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1140 					 socket_ids[i]);
1141 	} else {
1142 		if (socket_num == UMA_NO_CONFIG)
1143 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
1144 		else
1145 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1146 						 socket_num);
1147 	}
1148 
1149 	init_port_config();
1150 
1151 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1152 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1153 	/*
1154 	 * Records which Mbuf pool to use by each logical core, if needed.
1155 	 */
1156 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1157 		mbp = mbuf_pool_find(
1158 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1159 
1160 		if (mbp == NULL)
1161 			mbp = mbuf_pool_find(0);
1162 		fwd_lcores[lc_id]->mbp = mbp;
1163 		/* initialize GSO context */
1164 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1165 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1166 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1167 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
1168 			ETHER_CRC_LEN;
1169 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
1170 	}
1171 
1172 	/* Configuration of packet forwarding streams. */
1173 	if (init_fwd_streams() < 0)
1174 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1175 
1176 	fwd_config_setup();
1177 
1178 	/* create a gro context for each lcore */
1179 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1180 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1181 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1182 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1183 		gro_param.socket_id = rte_lcore_to_socket_id(
1184 				fwd_lcores_cpuids[lc_id]);
1185 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1186 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1187 			rte_exit(EXIT_FAILURE,
1188 					"rte_gro_ctx_create() failed\n");
1189 		}
1190 	}
1191 
1192 #if defined RTE_LIBRTE_PMD_SOFTNIC
1193 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1194 		RTE_ETH_FOREACH_DEV(pid) {
1195 			port = &ports[pid];
1196 			const char *driver = port->dev_info.driver_name;
1197 
1198 			if (strcmp(driver, "net_softnic") == 0)
1199 				port->softport.fwd_lcore_arg = fwd_lcores;
1200 		}
1201 	}
1202 #endif
1203 
1204 }
1205 
1206 
1207 void
1208 reconfig(portid_t new_port_id, unsigned socket_id)
1209 {
1210 	struct rte_port *port;
1211 
1212 	/* Reconfiguration of Ethernet ports. */
1213 	port = &ports[new_port_id];
1214 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
1215 
1216 	/* set flag to initialize port/queue */
1217 	port->need_reconfig = 1;
1218 	port->need_reconfig_queues = 1;
1219 	port->socket_id = socket_id;
1220 
1221 	init_port_config();
1222 }
1223 
1224 
1225 int
1226 init_fwd_streams(void)
1227 {
1228 	portid_t pid;
1229 	struct rte_port *port;
1230 	streamid_t sm_id, nb_fwd_streams_new;
1231 	queueid_t q;
1232 
1233 	/* set socket id according to numa or not */
1234 	RTE_ETH_FOREACH_DEV(pid) {
1235 		port = &ports[pid];
1236 		if (nb_rxq > port->dev_info.max_rx_queues) {
1237 			printf("Fail: nb_rxq(%d) is greater than "
1238 				"max_rx_queues(%d)\n", nb_rxq,
1239 				port->dev_info.max_rx_queues);
1240 			return -1;
1241 		}
1242 		if (nb_txq > port->dev_info.max_tx_queues) {
1243 			printf("Fail: nb_txq(%d) is greater than "
1244 				"max_tx_queues(%d)\n", nb_txq,
1245 				port->dev_info.max_tx_queues);
1246 			return -1;
1247 		}
1248 		if (numa_support) {
1249 			if (port_numa[pid] != NUMA_NO_CONFIG)
1250 				port->socket_id = port_numa[pid];
1251 			else {
1252 				port->socket_id = rte_eth_dev_socket_id(pid);
1253 
1254 				/*
1255 				 * if socket_id is invalid,
1256 				 * set to the first available socket.
1257 				 */
1258 				if (check_socket_id(port->socket_id) < 0)
1259 					port->socket_id = socket_ids[0];
1260 			}
1261 		}
1262 		else {
1263 			if (socket_num == UMA_NO_CONFIG)
1264 				port->socket_id = 0;
1265 			else
1266 				port->socket_id = socket_num;
1267 		}
1268 	}
1269 
1270 	q = RTE_MAX(nb_rxq, nb_txq);
1271 	if (q == 0) {
1272 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1273 		return -1;
1274 	}
1275 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1276 	if (nb_fwd_streams_new == nb_fwd_streams)
1277 		return 0;
1278 	/* clear the old */
1279 	if (fwd_streams != NULL) {
1280 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1281 			if (fwd_streams[sm_id] == NULL)
1282 				continue;
1283 			rte_free(fwd_streams[sm_id]);
1284 			fwd_streams[sm_id] = NULL;
1285 		}
1286 		rte_free(fwd_streams);
1287 		fwd_streams = NULL;
1288 	}
1289 
1290 	/* init new */
1291 	nb_fwd_streams = nb_fwd_streams_new;
1292 	if (nb_fwd_streams) {
1293 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1294 			sizeof(struct fwd_stream *) * nb_fwd_streams,
1295 			RTE_CACHE_LINE_SIZE);
1296 		if (fwd_streams == NULL)
1297 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1298 				 " (struct fwd_stream *)) failed\n",
1299 				 nb_fwd_streams);
1300 
1301 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1302 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1303 				" struct fwd_stream", sizeof(struct fwd_stream),
1304 				RTE_CACHE_LINE_SIZE);
1305 			if (fwd_streams[sm_id] == NULL)
1306 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
1307 					 "(struct fwd_stream) failed\n");
1308 		}
1309 	}
1310 
1311 	return 0;
1312 }
1313 
1314 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1315 static void
1316 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1317 {
1318 	unsigned int total_burst;
1319 	unsigned int nb_burst;
1320 	unsigned int burst_stats[3];
1321 	uint16_t pktnb_stats[3];
1322 	uint16_t nb_pkt;
1323 	int burst_percent[3];
1324 
1325 	/*
1326 	 * First compute the total number of packet bursts and the
1327 	 * two highest numbers of bursts of the same number of packets.
1328 	 */
1329 	total_burst = 0;
1330 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1331 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1332 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1333 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1334 		if (nb_burst == 0)
1335 			continue;
1336 		total_burst += nb_burst;
1337 		if (nb_burst > burst_stats[0]) {
1338 			burst_stats[1] = burst_stats[0];
1339 			pktnb_stats[1] = pktnb_stats[0];
1340 			burst_stats[0] = nb_burst;
1341 			pktnb_stats[0] = nb_pkt;
1342 		} else if (nb_burst > burst_stats[1]) {
1343 			burst_stats[1] = nb_burst;
1344 			pktnb_stats[1] = nb_pkt;
1345 		}
1346 	}
1347 	if (total_burst == 0)
1348 		return;
1349 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1350 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1351 	       burst_percent[0], (int) pktnb_stats[0]);
1352 	if (burst_stats[0] == total_burst) {
1353 		printf("]\n");
1354 		return;
1355 	}
1356 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1357 		printf(" + %d%% of %d pkts]\n",
1358 		       100 - burst_percent[0], pktnb_stats[1]);
1359 		return;
1360 	}
1361 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1362 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1363 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1364 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1365 		return;
1366 	}
1367 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1368 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1369 }
1370 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1371 
1372 static void
1373 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1374 {
1375 	struct rte_port *port;
1376 	uint8_t i;
1377 
1378 	static const char *fwd_stats_border = "----------------------";
1379 
1380 	port = &ports[port_id];
1381 	printf("\n  %s Forward statistics for port %-2d %s\n",
1382 	       fwd_stats_border, port_id, fwd_stats_border);
1383 
1384 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1385 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1386 		       "%-"PRIu64"\n",
1387 		       stats->ipackets, stats->imissed,
1388 		       (uint64_t) (stats->ipackets + stats->imissed));
1389 
1390 		if (cur_fwd_eng == &csum_fwd_engine)
1391 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
1392 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1393 			       port->rx_bad_outer_l4_csum);
1394 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1395 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
1396 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1397 		}
1398 
1399 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1400 		       "%-"PRIu64"\n",
1401 		       stats->opackets, port->tx_dropped,
1402 		       (uint64_t) (stats->opackets + port->tx_dropped));
1403 	}
1404 	else {
1405 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1406 		       "%14"PRIu64"\n",
1407 		       stats->ipackets, stats->imissed,
1408 		       (uint64_t) (stats->ipackets + stats->imissed));
1409 
1410 		if (cur_fwd_eng == &csum_fwd_engine)
1411 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"    Bad-outer-l4csum: %-14"PRIu64"\n",
1412 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1413 			       port->rx_bad_outer_l4_csum);
1414 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1415 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1416 			printf("  RX-nombufs:             %14"PRIu64"\n",
1417 			       stats->rx_nombuf);
1418 		}
1419 
1420 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1421 		       "%14"PRIu64"\n",
1422 		       stats->opackets, port->tx_dropped,
1423 		       (uint64_t) (stats->opackets + port->tx_dropped));
1424 	}
1425 
1426 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1427 	if (port->rx_stream)
1428 		pkt_burst_stats_display("RX",
1429 			&port->rx_stream->rx_burst_stats);
1430 	if (port->tx_stream)
1431 		pkt_burst_stats_display("TX",
1432 			&port->tx_stream->tx_burst_stats);
1433 #endif
1434 
1435 	if (port->rx_queue_stats_mapping_enabled) {
1436 		printf("\n");
1437 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1438 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1439 			       "     RX-errors:%14"PRIu64
1440 			       "    RX-bytes:%14"PRIu64"\n",
1441 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1442 		}
1443 		printf("\n");
1444 	}
1445 	if (port->tx_queue_stats_mapping_enabled) {
1446 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1447 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1448 			       "                                 TX-bytes:%14"PRIu64"\n",
1449 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1450 		}
1451 	}
1452 
1453 	printf("  %s--------------------------------%s\n",
1454 	       fwd_stats_border, fwd_stats_border);
1455 }
1456 
1457 static void
1458 fwd_stream_stats_display(streamid_t stream_id)
1459 {
1460 	struct fwd_stream *fs;
1461 	static const char *fwd_top_stats_border = "-------";
1462 
1463 	fs = fwd_streams[stream_id];
1464 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1465 	    (fs->fwd_dropped == 0))
1466 		return;
1467 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1468 	       "TX Port=%2d/Queue=%2d %s\n",
1469 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1470 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1471 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1472 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1473 
1474 	/* if checksum mode */
1475 	if (cur_fwd_eng == &csum_fwd_engine) {
1476 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1477 			"%-14u Rx- bad outer L4 checksum: %-14u\n",
1478 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1479 			fs->rx_bad_outer_l4_csum);
1480 	}
1481 
1482 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1483 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1484 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1485 #endif
1486 }
1487 
1488 static void
1489 flush_fwd_rx_queues(void)
1490 {
1491 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1492 	portid_t  rxp;
1493 	portid_t port_id;
1494 	queueid_t rxq;
1495 	uint16_t  nb_rx;
1496 	uint16_t  i;
1497 	uint8_t   j;
1498 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1499 	uint64_t timer_period;
1500 
1501 	/* convert to number of cycles */
1502 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1503 
1504 	for (j = 0; j < 2; j++) {
1505 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1506 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1507 				port_id = fwd_ports_ids[rxp];
1508 				/**
1509 				* testpmd can stuck in the below do while loop
1510 				* if rte_eth_rx_burst() always returns nonzero
1511 				* packets. So timer is added to exit this loop
1512 				* after 1sec timer expiry.
1513 				*/
1514 				prev_tsc = rte_rdtsc();
1515 				do {
1516 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1517 						pkts_burst, MAX_PKT_BURST);
1518 					for (i = 0; i < nb_rx; i++)
1519 						rte_pktmbuf_free(pkts_burst[i]);
1520 
1521 					cur_tsc = rte_rdtsc();
1522 					diff_tsc = cur_tsc - prev_tsc;
1523 					timer_tsc += diff_tsc;
1524 				} while ((nb_rx > 0) &&
1525 					(timer_tsc < timer_period));
1526 				timer_tsc = 0;
1527 			}
1528 		}
1529 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1530 	}
1531 }
1532 
1533 static void
1534 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1535 {
1536 	struct fwd_stream **fsm;
1537 	streamid_t nb_fs;
1538 	streamid_t sm_id;
1539 #ifdef RTE_LIBRTE_BITRATE
1540 	uint64_t tics_per_1sec;
1541 	uint64_t tics_datum;
1542 	uint64_t tics_current;
1543 	uint16_t i, cnt_ports;
1544 
1545 	cnt_ports = nb_ports;
1546 	tics_datum = rte_rdtsc();
1547 	tics_per_1sec = rte_get_timer_hz();
1548 #endif
1549 	fsm = &fwd_streams[fc->stream_idx];
1550 	nb_fs = fc->stream_nb;
1551 	do {
1552 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1553 			(*pkt_fwd)(fsm[sm_id]);
1554 #ifdef RTE_LIBRTE_BITRATE
1555 		if (bitrate_enabled != 0 &&
1556 				bitrate_lcore_id == rte_lcore_id()) {
1557 			tics_current = rte_rdtsc();
1558 			if (tics_current - tics_datum >= tics_per_1sec) {
1559 				/* Periodic bitrate calculation */
1560 				for (i = 0; i < cnt_ports; i++)
1561 					rte_stats_bitrate_calc(bitrate_data,
1562 						ports_ids[i]);
1563 				tics_datum = tics_current;
1564 			}
1565 		}
1566 #endif
1567 #ifdef RTE_LIBRTE_LATENCY_STATS
1568 		if (latencystats_enabled != 0 &&
1569 				latencystats_lcore_id == rte_lcore_id())
1570 			rte_latencystats_update();
1571 #endif
1572 
1573 	} while (! fc->stopped);
1574 }
1575 
1576 static int
1577 start_pkt_forward_on_core(void *fwd_arg)
1578 {
1579 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1580 			     cur_fwd_config.fwd_eng->packet_fwd);
1581 	return 0;
1582 }
1583 
1584 /*
1585  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1586  * Used to start communication flows in network loopback test configurations.
1587  */
1588 static int
1589 run_one_txonly_burst_on_core(void *fwd_arg)
1590 {
1591 	struct fwd_lcore *fwd_lc;
1592 	struct fwd_lcore tmp_lcore;
1593 
1594 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1595 	tmp_lcore = *fwd_lc;
1596 	tmp_lcore.stopped = 1;
1597 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1598 	return 0;
1599 }
1600 
1601 /*
1602  * Launch packet forwarding:
1603  *     - Setup per-port forwarding context.
1604  *     - launch logical cores with their forwarding configuration.
1605  */
1606 static void
1607 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1608 {
1609 	port_fwd_begin_t port_fwd_begin;
1610 	unsigned int i;
1611 	unsigned int lc_id;
1612 	int diag;
1613 
1614 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1615 	if (port_fwd_begin != NULL) {
1616 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1617 			(*port_fwd_begin)(fwd_ports_ids[i]);
1618 	}
1619 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1620 		lc_id = fwd_lcores_cpuids[i];
1621 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1622 			fwd_lcores[i]->stopped = 0;
1623 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1624 						     fwd_lcores[i], lc_id);
1625 			if (diag != 0)
1626 				printf("launch lcore %u failed - diag=%d\n",
1627 				       lc_id, diag);
1628 		}
1629 	}
1630 }
1631 
1632 /*
1633  * Launch packet forwarding configuration.
1634  */
1635 void
1636 start_packet_forwarding(int with_tx_first)
1637 {
1638 	port_fwd_begin_t port_fwd_begin;
1639 	port_fwd_end_t  port_fwd_end;
1640 	struct rte_port *port;
1641 	unsigned int i;
1642 	portid_t   pt_id;
1643 	streamid_t sm_id;
1644 
1645 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1646 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1647 
1648 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1649 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1650 
1651 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1652 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1653 		(!nb_rxq || !nb_txq))
1654 		rte_exit(EXIT_FAILURE,
1655 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1656 			cur_fwd_eng->fwd_mode_name);
1657 
1658 	if (all_ports_started() == 0) {
1659 		printf("Not all ports were started\n");
1660 		return;
1661 	}
1662 	if (test_done == 0) {
1663 		printf("Packet forwarding already started\n");
1664 		return;
1665 	}
1666 
1667 
1668 	if(dcb_test) {
1669 		for (i = 0; i < nb_fwd_ports; i++) {
1670 			pt_id = fwd_ports_ids[i];
1671 			port = &ports[pt_id];
1672 			if (!port->dcb_flag) {
1673 				printf("In DCB mode, all forwarding ports must "
1674                                        "be configured in this mode.\n");
1675 				return;
1676 			}
1677 		}
1678 		if (nb_fwd_lcores == 1) {
1679 			printf("In DCB mode,the nb forwarding cores "
1680                                "should be larger than 1.\n");
1681 			return;
1682 		}
1683 	}
1684 	test_done = 0;
1685 
1686 	fwd_config_setup();
1687 
1688 	if(!no_flush_rx)
1689 		flush_fwd_rx_queues();
1690 
1691 	pkt_fwd_config_display(&cur_fwd_config);
1692 	rxtx_config_display();
1693 
1694 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1695 		pt_id = fwd_ports_ids[i];
1696 		port = &ports[pt_id];
1697 		rte_eth_stats_get(pt_id, &port->stats);
1698 		port->tx_dropped = 0;
1699 
1700 		map_port_queue_stats_mapping_registers(pt_id, port);
1701 	}
1702 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1703 		fwd_streams[sm_id]->rx_packets = 0;
1704 		fwd_streams[sm_id]->tx_packets = 0;
1705 		fwd_streams[sm_id]->fwd_dropped = 0;
1706 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1707 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1708 		fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
1709 
1710 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1711 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1712 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1713 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1714 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1715 #endif
1716 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1717 		fwd_streams[sm_id]->core_cycles = 0;
1718 #endif
1719 	}
1720 	if (with_tx_first) {
1721 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1722 		if (port_fwd_begin != NULL) {
1723 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1724 				(*port_fwd_begin)(fwd_ports_ids[i]);
1725 		}
1726 		while (with_tx_first--) {
1727 			launch_packet_forwarding(
1728 					run_one_txonly_burst_on_core);
1729 			rte_eal_mp_wait_lcore();
1730 		}
1731 		port_fwd_end = tx_only_engine.port_fwd_end;
1732 		if (port_fwd_end != NULL) {
1733 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1734 				(*port_fwd_end)(fwd_ports_ids[i]);
1735 		}
1736 	}
1737 	launch_packet_forwarding(start_pkt_forward_on_core);
1738 }
1739 
1740 void
1741 stop_packet_forwarding(void)
1742 {
1743 	struct rte_eth_stats stats;
1744 	struct rte_port *port;
1745 	port_fwd_end_t  port_fwd_end;
1746 	int i;
1747 	portid_t   pt_id;
1748 	streamid_t sm_id;
1749 	lcoreid_t  lc_id;
1750 	uint64_t total_recv;
1751 	uint64_t total_xmit;
1752 	uint64_t total_rx_dropped;
1753 	uint64_t total_tx_dropped;
1754 	uint64_t total_rx_nombuf;
1755 	uint64_t tx_dropped;
1756 	uint64_t rx_bad_ip_csum;
1757 	uint64_t rx_bad_l4_csum;
1758 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1759 	uint64_t fwd_cycles;
1760 #endif
1761 
1762 	static const char *acc_stats_border = "+++++++++++++++";
1763 
1764 	if (test_done) {
1765 		printf("Packet forwarding not started\n");
1766 		return;
1767 	}
1768 	printf("Telling cores to stop...");
1769 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1770 		fwd_lcores[lc_id]->stopped = 1;
1771 	printf("\nWaiting for lcores to finish...\n");
1772 	rte_eal_mp_wait_lcore();
1773 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1774 	if (port_fwd_end != NULL) {
1775 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1776 			pt_id = fwd_ports_ids[i];
1777 			(*port_fwd_end)(pt_id);
1778 		}
1779 	}
1780 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1781 	fwd_cycles = 0;
1782 #endif
1783 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1784 		if (cur_fwd_config.nb_fwd_streams >
1785 		    cur_fwd_config.nb_fwd_ports) {
1786 			fwd_stream_stats_display(sm_id);
1787 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1788 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1789 		} else {
1790 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1791 				fwd_streams[sm_id];
1792 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1793 				fwd_streams[sm_id];
1794 		}
1795 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1796 		tx_dropped = (uint64_t) (tx_dropped +
1797 					 fwd_streams[sm_id]->fwd_dropped);
1798 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1799 
1800 		rx_bad_ip_csum =
1801 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1802 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1803 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1804 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1805 							rx_bad_ip_csum;
1806 
1807 		rx_bad_l4_csum =
1808 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1809 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1810 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1811 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1812 							rx_bad_l4_csum;
1813 
1814 		ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
1815 				fwd_streams[sm_id]->rx_bad_outer_l4_csum;
1816 
1817 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1818 		fwd_cycles = (uint64_t) (fwd_cycles +
1819 					 fwd_streams[sm_id]->core_cycles);
1820 #endif
1821 	}
1822 	total_recv = 0;
1823 	total_xmit = 0;
1824 	total_rx_dropped = 0;
1825 	total_tx_dropped = 0;
1826 	total_rx_nombuf  = 0;
1827 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1828 		pt_id = fwd_ports_ids[i];
1829 
1830 		port = &ports[pt_id];
1831 		rte_eth_stats_get(pt_id, &stats);
1832 		stats.ipackets -= port->stats.ipackets;
1833 		port->stats.ipackets = 0;
1834 		stats.opackets -= port->stats.opackets;
1835 		port->stats.opackets = 0;
1836 		stats.ibytes   -= port->stats.ibytes;
1837 		port->stats.ibytes = 0;
1838 		stats.obytes   -= port->stats.obytes;
1839 		port->stats.obytes = 0;
1840 		stats.imissed  -= port->stats.imissed;
1841 		port->stats.imissed = 0;
1842 		stats.oerrors  -= port->stats.oerrors;
1843 		port->stats.oerrors = 0;
1844 		stats.rx_nombuf -= port->stats.rx_nombuf;
1845 		port->stats.rx_nombuf = 0;
1846 
1847 		total_recv += stats.ipackets;
1848 		total_xmit += stats.opackets;
1849 		total_rx_dropped += stats.imissed;
1850 		total_tx_dropped += port->tx_dropped;
1851 		total_rx_nombuf  += stats.rx_nombuf;
1852 
1853 		fwd_port_stats_display(pt_id, &stats);
1854 	}
1855 
1856 	printf("\n  %s Accumulated forward statistics for all ports"
1857 	       "%s\n",
1858 	       acc_stats_border, acc_stats_border);
1859 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1860 	       "%-"PRIu64"\n"
1861 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1862 	       "%-"PRIu64"\n",
1863 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1864 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1865 	if (total_rx_nombuf > 0)
1866 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1867 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1868 	       "%s\n",
1869 	       acc_stats_border, acc_stats_border);
1870 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1871 	if (total_recv > 0)
1872 		printf("\n  CPU cycles/packet=%u (total cycles="
1873 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1874 		       (unsigned int)(fwd_cycles / total_recv),
1875 		       fwd_cycles, total_recv);
1876 #endif
1877 	printf("\nDone.\n");
1878 	test_done = 1;
1879 }
1880 
1881 void
1882 dev_set_link_up(portid_t pid)
1883 {
1884 	if (rte_eth_dev_set_link_up(pid) < 0)
1885 		printf("\nSet link up fail.\n");
1886 }
1887 
1888 void
1889 dev_set_link_down(portid_t pid)
1890 {
1891 	if (rte_eth_dev_set_link_down(pid) < 0)
1892 		printf("\nSet link down fail.\n");
1893 }
1894 
1895 static int
1896 all_ports_started(void)
1897 {
1898 	portid_t pi;
1899 	struct rte_port *port;
1900 
1901 	RTE_ETH_FOREACH_DEV(pi) {
1902 		port = &ports[pi];
1903 		/* Check if there is a port which is not started */
1904 		if ((port->port_status != RTE_PORT_STARTED) &&
1905 			(port->slave_flag == 0))
1906 			return 0;
1907 	}
1908 
1909 	/* No port is not started */
1910 	return 1;
1911 }
1912 
1913 int
1914 port_is_stopped(portid_t port_id)
1915 {
1916 	struct rte_port *port = &ports[port_id];
1917 
1918 	if ((port->port_status != RTE_PORT_STOPPED) &&
1919 	    (port->slave_flag == 0))
1920 		return 0;
1921 	return 1;
1922 }
1923 
1924 int
1925 all_ports_stopped(void)
1926 {
1927 	portid_t pi;
1928 
1929 	RTE_ETH_FOREACH_DEV(pi) {
1930 		if (!port_is_stopped(pi))
1931 			return 0;
1932 	}
1933 
1934 	return 1;
1935 }
1936 
1937 int
1938 port_is_started(portid_t port_id)
1939 {
1940 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1941 		return 0;
1942 
1943 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1944 		return 0;
1945 
1946 	return 1;
1947 }
1948 
1949 int
1950 start_port(portid_t pid)
1951 {
1952 	int diag, need_check_link_status = -1;
1953 	portid_t pi;
1954 	queueid_t qi;
1955 	struct rte_port *port;
1956 	struct ether_addr mac_addr;
1957 
1958 	if (port_id_is_invalid(pid, ENABLED_WARN))
1959 		return 0;
1960 
1961 	if(dcb_config)
1962 		dcb_test = 1;
1963 	RTE_ETH_FOREACH_DEV(pi) {
1964 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1965 			continue;
1966 
1967 		need_check_link_status = 0;
1968 		port = &ports[pi];
1969 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1970 						 RTE_PORT_HANDLING) == 0) {
1971 			printf("Port %d is now not stopped\n", pi);
1972 			continue;
1973 		}
1974 
1975 		if (port->need_reconfig > 0) {
1976 			port->need_reconfig = 0;
1977 
1978 			if (flow_isolate_all) {
1979 				int ret = port_flow_isolate(pi, 1);
1980 				if (ret) {
1981 					printf("Failed to apply isolated"
1982 					       " mode on port %d\n", pi);
1983 					return -1;
1984 				}
1985 			}
1986 			configure_rxtx_dump_callbacks(0);
1987 			printf("Configuring Port %d (socket %u)\n", pi,
1988 					port->socket_id);
1989 			/* configure port */
1990 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1991 						&(port->dev_conf));
1992 			if (diag != 0) {
1993 				if (rte_atomic16_cmpset(&(port->port_status),
1994 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1995 					printf("Port %d can not be set back "
1996 							"to stopped\n", pi);
1997 				printf("Fail to configure port %d\n", pi);
1998 				/* try to reconfigure port next time */
1999 				port->need_reconfig = 1;
2000 				return -1;
2001 			}
2002 		}
2003 		if (port->need_reconfig_queues > 0) {
2004 			port->need_reconfig_queues = 0;
2005 			/* setup tx queues */
2006 			for (qi = 0; qi < nb_txq; qi++) {
2007 				if ((numa_support) &&
2008 					(txring_numa[pi] != NUMA_NO_CONFIG))
2009 					diag = rte_eth_tx_queue_setup(pi, qi,
2010 						port->nb_tx_desc[qi],
2011 						txring_numa[pi],
2012 						&(port->tx_conf[qi]));
2013 				else
2014 					diag = rte_eth_tx_queue_setup(pi, qi,
2015 						port->nb_tx_desc[qi],
2016 						port->socket_id,
2017 						&(port->tx_conf[qi]));
2018 
2019 				if (diag == 0)
2020 					continue;
2021 
2022 				/* Fail to setup tx queue, return */
2023 				if (rte_atomic16_cmpset(&(port->port_status),
2024 							RTE_PORT_HANDLING,
2025 							RTE_PORT_STOPPED) == 0)
2026 					printf("Port %d can not be set back "
2027 							"to stopped\n", pi);
2028 				printf("Fail to configure port %d tx queues\n",
2029 				       pi);
2030 				/* try to reconfigure queues next time */
2031 				port->need_reconfig_queues = 1;
2032 				return -1;
2033 			}
2034 			for (qi = 0; qi < nb_rxq; qi++) {
2035 				/* setup rx queues */
2036 				if ((numa_support) &&
2037 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2038 					struct rte_mempool * mp =
2039 						mbuf_pool_find(rxring_numa[pi]);
2040 					if (mp == NULL) {
2041 						printf("Failed to setup RX queue:"
2042 							"No mempool allocation"
2043 							" on the socket %d\n",
2044 							rxring_numa[pi]);
2045 						return -1;
2046 					}
2047 
2048 					diag = rte_eth_rx_queue_setup(pi, qi,
2049 					     port->nb_rx_desc[qi],
2050 					     rxring_numa[pi],
2051 					     &(port->rx_conf[qi]),
2052 					     mp);
2053 				} else {
2054 					struct rte_mempool *mp =
2055 						mbuf_pool_find(port->socket_id);
2056 					if (mp == NULL) {
2057 						printf("Failed to setup RX queue:"
2058 							"No mempool allocation"
2059 							" on the socket %d\n",
2060 							port->socket_id);
2061 						return -1;
2062 					}
2063 					diag = rte_eth_rx_queue_setup(pi, qi,
2064 					     port->nb_rx_desc[qi],
2065 					     port->socket_id,
2066 					     &(port->rx_conf[qi]),
2067 					     mp);
2068 				}
2069 				if (diag == 0)
2070 					continue;
2071 
2072 				/* Fail to setup rx queue, return */
2073 				if (rte_atomic16_cmpset(&(port->port_status),
2074 							RTE_PORT_HANDLING,
2075 							RTE_PORT_STOPPED) == 0)
2076 					printf("Port %d can not be set back "
2077 							"to stopped\n", pi);
2078 				printf("Fail to configure port %d rx queues\n",
2079 				       pi);
2080 				/* try to reconfigure queues next time */
2081 				port->need_reconfig_queues = 1;
2082 				return -1;
2083 			}
2084 		}
2085 		configure_rxtx_dump_callbacks(verbose_level);
2086 		/* start port */
2087 		if (rte_eth_dev_start(pi) < 0) {
2088 			printf("Fail to start port %d\n", pi);
2089 
2090 			/* Fail to setup rx queue, return */
2091 			if (rte_atomic16_cmpset(&(port->port_status),
2092 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2093 				printf("Port %d can not be set back to "
2094 							"stopped\n", pi);
2095 			continue;
2096 		}
2097 
2098 		if (rte_atomic16_cmpset(&(port->port_status),
2099 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2100 			printf("Port %d can not be set into started\n", pi);
2101 
2102 		rte_eth_macaddr_get(pi, &mac_addr);
2103 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2104 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2105 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2106 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2107 
2108 		/* at least one port started, need checking link status */
2109 		need_check_link_status = 1;
2110 	}
2111 
2112 	if (need_check_link_status == 1 && !no_link_check)
2113 		check_all_ports_link_status(RTE_PORT_ALL);
2114 	else if (need_check_link_status == 0)
2115 		printf("Please stop the ports first\n");
2116 
2117 	printf("Done\n");
2118 	return 0;
2119 }
2120 
2121 void
2122 stop_port(portid_t pid)
2123 {
2124 	portid_t pi;
2125 	struct rte_port *port;
2126 	int need_check_link_status = 0;
2127 
2128 	if (dcb_test) {
2129 		dcb_test = 0;
2130 		dcb_config = 0;
2131 	}
2132 
2133 	if (port_id_is_invalid(pid, ENABLED_WARN))
2134 		return;
2135 
2136 	printf("Stopping ports...\n");
2137 
2138 	RTE_ETH_FOREACH_DEV(pi) {
2139 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2140 			continue;
2141 
2142 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2143 			printf("Please remove port %d from forwarding configuration.\n", pi);
2144 			continue;
2145 		}
2146 
2147 		if (port_is_bonding_slave(pi)) {
2148 			printf("Please remove port %d from bonded device.\n", pi);
2149 			continue;
2150 		}
2151 
2152 		port = &ports[pi];
2153 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2154 						RTE_PORT_HANDLING) == 0)
2155 			continue;
2156 
2157 		rte_eth_dev_stop(pi);
2158 
2159 		if (rte_atomic16_cmpset(&(port->port_status),
2160 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2161 			printf("Port %d can not be set into stopped\n", pi);
2162 		need_check_link_status = 1;
2163 	}
2164 	if (need_check_link_status && !no_link_check)
2165 		check_all_ports_link_status(RTE_PORT_ALL);
2166 
2167 	printf("Done\n");
2168 }
2169 
2170 static void
2171 remove_invalid_ports_in(portid_t *array, portid_t *total)
2172 {
2173 	portid_t i;
2174 	portid_t new_total = 0;
2175 
2176 	for (i = 0; i < *total; i++)
2177 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2178 			array[new_total] = array[i];
2179 			new_total++;
2180 		}
2181 	*total = new_total;
2182 }
2183 
2184 static void
2185 remove_invalid_ports(void)
2186 {
2187 	remove_invalid_ports_in(ports_ids, &nb_ports);
2188 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2189 	nb_cfg_ports = nb_fwd_ports;
2190 }
2191 
2192 void
2193 close_port(portid_t pid)
2194 {
2195 	portid_t pi;
2196 	struct rte_port *port;
2197 
2198 	if (port_id_is_invalid(pid, ENABLED_WARN))
2199 		return;
2200 
2201 	printf("Closing ports...\n");
2202 
2203 	RTE_ETH_FOREACH_DEV(pi) {
2204 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2205 			continue;
2206 
2207 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2208 			printf("Please remove port %d from forwarding configuration.\n", pi);
2209 			continue;
2210 		}
2211 
2212 		if (port_is_bonding_slave(pi)) {
2213 			printf("Please remove port %d from bonded device.\n", pi);
2214 			continue;
2215 		}
2216 
2217 		port = &ports[pi];
2218 		if (rte_atomic16_cmpset(&(port->port_status),
2219 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2220 			printf("Port %d is already closed\n", pi);
2221 			continue;
2222 		}
2223 
2224 		if (rte_atomic16_cmpset(&(port->port_status),
2225 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2226 			printf("Port %d is now not stopped\n", pi);
2227 			continue;
2228 		}
2229 
2230 		if (port->flow_list)
2231 			port_flow_flush(pi);
2232 		rte_eth_dev_close(pi);
2233 
2234 		remove_invalid_ports();
2235 
2236 		if (rte_atomic16_cmpset(&(port->port_status),
2237 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2238 			printf("Port %d cannot be set to closed\n", pi);
2239 	}
2240 
2241 	printf("Done\n");
2242 }
2243 
2244 void
2245 reset_port(portid_t pid)
2246 {
2247 	int diag;
2248 	portid_t pi;
2249 	struct rte_port *port;
2250 
2251 	if (port_id_is_invalid(pid, ENABLED_WARN))
2252 		return;
2253 
2254 	printf("Resetting ports...\n");
2255 
2256 	RTE_ETH_FOREACH_DEV(pi) {
2257 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2258 			continue;
2259 
2260 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2261 			printf("Please remove port %d from forwarding "
2262 			       "configuration.\n", pi);
2263 			continue;
2264 		}
2265 
2266 		if (port_is_bonding_slave(pi)) {
2267 			printf("Please remove port %d from bonded device.\n",
2268 			       pi);
2269 			continue;
2270 		}
2271 
2272 		diag = rte_eth_dev_reset(pi);
2273 		if (diag == 0) {
2274 			port = &ports[pi];
2275 			port->need_reconfig = 1;
2276 			port->need_reconfig_queues = 1;
2277 		} else {
2278 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
2279 		}
2280 	}
2281 
2282 	printf("Done\n");
2283 }
2284 
2285 void
2286 attach_port(char *identifier)
2287 {
2288 	portid_t pi = 0;
2289 	struct rte_dev_iterator iterator;
2290 
2291 	printf("Attaching a new port...\n");
2292 
2293 	if (identifier == NULL) {
2294 		printf("Invalid parameters are specified\n");
2295 		return;
2296 	}
2297 
2298 	if (rte_dev_probe(identifier) != 0) {
2299 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2300 		return;
2301 	}
2302 
2303 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2304 		if (port_is_forwarding(pi))
2305 			continue; /* port was already attached before */
2306 		setup_attached_port(pi);
2307 	}
2308 }
2309 
2310 static void
2311 setup_attached_port(portid_t pi)
2312 {
2313 	unsigned int socket_id;
2314 
2315 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2316 	/* if socket_id is invalid, set to the first available socket. */
2317 	if (check_socket_id(socket_id) < 0)
2318 		socket_id = socket_ids[0];
2319 	reconfig(pi, socket_id);
2320 	rte_eth_promiscuous_enable(pi);
2321 
2322 	ports_ids[nb_ports++] = pi;
2323 	fwd_ports_ids[nb_fwd_ports++] = pi;
2324 	nb_cfg_ports = nb_fwd_ports;
2325 	ports[pi].port_status = RTE_PORT_STOPPED;
2326 
2327 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2328 	printf("Done\n");
2329 }
2330 
2331 void
2332 detach_port_device(portid_t port_id)
2333 {
2334 	struct rte_device *dev;
2335 	portid_t sibling;
2336 
2337 	printf("Removing a device...\n");
2338 
2339 	dev = rte_eth_devices[port_id].device;
2340 	if (dev == NULL) {
2341 		printf("Device already removed\n");
2342 		return;
2343 	}
2344 
2345 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2346 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2347 			printf("Port not stopped\n");
2348 			return;
2349 		}
2350 		printf("Port was not closed\n");
2351 		if (ports[port_id].flow_list)
2352 			port_flow_flush(port_id);
2353 	}
2354 
2355 	if (rte_dev_remove(dev) != 0) {
2356 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2357 		return;
2358 	}
2359 
2360 	for (sibling = 0; sibling < RTE_MAX_ETHPORTS; sibling++) {
2361 		if (rte_eth_devices[sibling].device != dev)
2362 			continue;
2363 		/* reset mapping between old ports and removed device */
2364 		rte_eth_devices[sibling].device = NULL;
2365 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2366 			/* sibling ports are forced to be closed */
2367 			ports[sibling].port_status = RTE_PORT_CLOSED;
2368 			printf("Port %u is closed\n", sibling);
2369 		}
2370 	}
2371 
2372 	remove_invalid_ports();
2373 
2374 	printf("Device of port %u is detached\n", port_id);
2375 	printf("Now total ports is %d\n", nb_ports);
2376 	printf("Done\n");
2377 	return;
2378 }
2379 
2380 void
2381 pmd_test_exit(void)
2382 {
2383 	struct rte_device *device;
2384 	portid_t pt_id;
2385 	int ret;
2386 
2387 	if (test_done == 0)
2388 		stop_packet_forwarding();
2389 
2390 	if (ports != NULL) {
2391 		no_link_check = 1;
2392 		RTE_ETH_FOREACH_DEV(pt_id) {
2393 			printf("\nShutting down port %d...\n", pt_id);
2394 			fflush(stdout);
2395 			stop_port(pt_id);
2396 			close_port(pt_id);
2397 
2398 			/*
2399 			 * This is a workaround to fix a virtio-user issue that
2400 			 * requires to call clean-up routine to remove existing
2401 			 * socket.
2402 			 * This workaround valid only for testpmd, needs a fix
2403 			 * valid for all applications.
2404 			 * TODO: Implement proper resource cleanup
2405 			 */
2406 			device = rte_eth_devices[pt_id].device;
2407 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2408 				detach_port_device(pt_id);
2409 		}
2410 	}
2411 
2412 	if (hot_plug) {
2413 		ret = rte_dev_event_monitor_stop();
2414 		if (ret) {
2415 			RTE_LOG(ERR, EAL,
2416 				"fail to stop device event monitor.");
2417 			return;
2418 		}
2419 
2420 		ret = rte_dev_event_callback_unregister(NULL,
2421 			eth_dev_event_callback, NULL);
2422 		if (ret < 0) {
2423 			RTE_LOG(ERR, EAL,
2424 				"fail to unregister device event callback.\n");
2425 			return;
2426 		}
2427 
2428 		ret = rte_dev_hotplug_handle_disable();
2429 		if (ret) {
2430 			RTE_LOG(ERR, EAL,
2431 				"fail to disable hotplug handling.\n");
2432 			return;
2433 		}
2434 	}
2435 
2436 	printf("\nBye...\n");
2437 }
2438 
2439 typedef void (*cmd_func_t)(void);
2440 struct pmd_test_command {
2441 	const char *cmd_name;
2442 	cmd_func_t cmd_func;
2443 };
2444 
2445 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2446 
2447 /* Check the link status of all ports in up to 9s, and print them finally */
2448 static void
2449 check_all_ports_link_status(uint32_t port_mask)
2450 {
2451 #define CHECK_INTERVAL 100 /* 100ms */
2452 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2453 	portid_t portid;
2454 	uint8_t count, all_ports_up, print_flag = 0;
2455 	struct rte_eth_link link;
2456 
2457 	printf("Checking link statuses...\n");
2458 	fflush(stdout);
2459 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2460 		all_ports_up = 1;
2461 		RTE_ETH_FOREACH_DEV(portid) {
2462 			if ((port_mask & (1 << portid)) == 0)
2463 				continue;
2464 			memset(&link, 0, sizeof(link));
2465 			rte_eth_link_get_nowait(portid, &link);
2466 			/* print link status if flag set */
2467 			if (print_flag == 1) {
2468 				if (link.link_status)
2469 					printf(
2470 					"Port%d Link Up. speed %u Mbps- %s\n",
2471 					portid, link.link_speed,
2472 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2473 					("full-duplex") : ("half-duplex\n"));
2474 				else
2475 					printf("Port %d Link Down\n", portid);
2476 				continue;
2477 			}
2478 			/* clear all_ports_up flag if any link down */
2479 			if (link.link_status == ETH_LINK_DOWN) {
2480 				all_ports_up = 0;
2481 				break;
2482 			}
2483 		}
2484 		/* after finally printing all link status, get out */
2485 		if (print_flag == 1)
2486 			break;
2487 
2488 		if (all_ports_up == 0) {
2489 			fflush(stdout);
2490 			rte_delay_ms(CHECK_INTERVAL);
2491 		}
2492 
2493 		/* set the print_flag if all ports up or timeout */
2494 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2495 			print_flag = 1;
2496 		}
2497 
2498 		if (lsc_interrupt)
2499 			break;
2500 	}
2501 }
2502 
2503 static void
2504 rmv_event_callback(void *arg)
2505 {
2506 	int need_to_start = 0;
2507 	int org_no_link_check = no_link_check;
2508 	portid_t port_id = (intptr_t)arg;
2509 
2510 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2511 
2512 	if (!test_done && port_is_forwarding(port_id)) {
2513 		need_to_start = 1;
2514 		stop_packet_forwarding();
2515 	}
2516 	no_link_check = 1;
2517 	stop_port(port_id);
2518 	no_link_check = org_no_link_check;
2519 	close_port(port_id);
2520 	detach_port_device(port_id);
2521 	if (need_to_start)
2522 		start_packet_forwarding(0);
2523 }
2524 
2525 /* This function is used by the interrupt thread */
2526 static int
2527 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2528 		  void *ret_param)
2529 {
2530 	RTE_SET_USED(param);
2531 	RTE_SET_USED(ret_param);
2532 
2533 	if (type >= RTE_ETH_EVENT_MAX) {
2534 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2535 			port_id, __func__, type);
2536 		fflush(stderr);
2537 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2538 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
2539 			eth_event_desc[type]);
2540 		fflush(stdout);
2541 	}
2542 
2543 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2544 		return 0;
2545 
2546 	switch (type) {
2547 	case RTE_ETH_EVENT_INTR_RMV:
2548 		if (rte_eal_alarm_set(100000,
2549 				rmv_event_callback, (void *)(intptr_t)port_id))
2550 			fprintf(stderr, "Could not set up deferred device removal\n");
2551 		break;
2552 	default:
2553 		break;
2554 	}
2555 	return 0;
2556 }
2557 
2558 static int
2559 register_eth_event_callback(void)
2560 {
2561 	int ret;
2562 	enum rte_eth_event_type event;
2563 
2564 	for (event = RTE_ETH_EVENT_UNKNOWN;
2565 			event < RTE_ETH_EVENT_MAX; event++) {
2566 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
2567 				event,
2568 				eth_event_callback,
2569 				NULL);
2570 		if (ret != 0) {
2571 			TESTPMD_LOG(ERR, "Failed to register callback for "
2572 					"%s event\n", eth_event_desc[event]);
2573 			return -1;
2574 		}
2575 	}
2576 
2577 	return 0;
2578 }
2579 
2580 /* This function is used by the interrupt thread */
2581 static void
2582 eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2583 			     __rte_unused void *arg)
2584 {
2585 	uint16_t port_id;
2586 	int ret;
2587 
2588 	if (type >= RTE_DEV_EVENT_MAX) {
2589 		fprintf(stderr, "%s called upon invalid event %d\n",
2590 			__func__, type);
2591 		fflush(stderr);
2592 	}
2593 
2594 	switch (type) {
2595 	case RTE_DEV_EVENT_REMOVE:
2596 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2597 			device_name);
2598 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2599 		if (ret) {
2600 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2601 				device_name);
2602 			return;
2603 		}
2604 		rmv_event_callback((void *)(intptr_t)port_id);
2605 		break;
2606 	case RTE_DEV_EVENT_ADD:
2607 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2608 			device_name);
2609 		/* TODO: After finish kernel driver binding,
2610 		 * begin to attach port.
2611 		 */
2612 		break;
2613 	default:
2614 		break;
2615 	}
2616 }
2617 
2618 static int
2619 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2620 {
2621 	uint16_t i;
2622 	int diag;
2623 	uint8_t mapping_found = 0;
2624 
2625 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2626 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2627 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2628 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2629 					tx_queue_stats_mappings[i].queue_id,
2630 					tx_queue_stats_mappings[i].stats_counter_id);
2631 			if (diag != 0)
2632 				return diag;
2633 			mapping_found = 1;
2634 		}
2635 	}
2636 	if (mapping_found)
2637 		port->tx_queue_stats_mapping_enabled = 1;
2638 	return 0;
2639 }
2640 
2641 static int
2642 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2643 {
2644 	uint16_t i;
2645 	int diag;
2646 	uint8_t mapping_found = 0;
2647 
2648 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2649 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2650 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2651 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2652 					rx_queue_stats_mappings[i].queue_id,
2653 					rx_queue_stats_mappings[i].stats_counter_id);
2654 			if (diag != 0)
2655 				return diag;
2656 			mapping_found = 1;
2657 		}
2658 	}
2659 	if (mapping_found)
2660 		port->rx_queue_stats_mapping_enabled = 1;
2661 	return 0;
2662 }
2663 
2664 static void
2665 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2666 {
2667 	int diag = 0;
2668 
2669 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2670 	if (diag != 0) {
2671 		if (diag == -ENOTSUP) {
2672 			port->tx_queue_stats_mapping_enabled = 0;
2673 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2674 		}
2675 		else
2676 			rte_exit(EXIT_FAILURE,
2677 					"set_tx_queue_stats_mapping_registers "
2678 					"failed for port id=%d diag=%d\n",
2679 					pi, diag);
2680 	}
2681 
2682 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2683 	if (diag != 0) {
2684 		if (diag == -ENOTSUP) {
2685 			port->rx_queue_stats_mapping_enabled = 0;
2686 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2687 		}
2688 		else
2689 			rte_exit(EXIT_FAILURE,
2690 					"set_rx_queue_stats_mapping_registers "
2691 					"failed for port id=%d diag=%d\n",
2692 					pi, diag);
2693 	}
2694 }
2695 
2696 static void
2697 rxtx_port_config(struct rte_port *port)
2698 {
2699 	uint16_t qid;
2700 
2701 	for (qid = 0; qid < nb_rxq; qid++) {
2702 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2703 
2704 		/* Check if any Rx parameters have been passed */
2705 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2706 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2707 
2708 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2709 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2710 
2711 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2712 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2713 
2714 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2715 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2716 
2717 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2718 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2719 
2720 		port->nb_rx_desc[qid] = nb_rxd;
2721 	}
2722 
2723 	for (qid = 0; qid < nb_txq; qid++) {
2724 		port->tx_conf[qid] = port->dev_info.default_txconf;
2725 
2726 		/* Check if any Tx parameters have been passed */
2727 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2728 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2729 
2730 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2731 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2732 
2733 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2734 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2735 
2736 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2737 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2738 
2739 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2740 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2741 
2742 		port->nb_tx_desc[qid] = nb_txd;
2743 	}
2744 }
2745 
2746 void
2747 init_port_config(void)
2748 {
2749 	portid_t pid;
2750 	struct rte_port *port;
2751 
2752 	RTE_ETH_FOREACH_DEV(pid) {
2753 		port = &ports[pid];
2754 		port->dev_conf.fdir_conf = fdir_conf;
2755 		rte_eth_dev_info_get(pid, &port->dev_info);
2756 		if (nb_rxq > 1) {
2757 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2758 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2759 				rss_hf & port->dev_info.flow_type_rss_offloads;
2760 		} else {
2761 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2762 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2763 		}
2764 
2765 		if (port->dcb_flag == 0) {
2766 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2767 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2768 			else
2769 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2770 		}
2771 
2772 		rxtx_port_config(port);
2773 
2774 		rte_eth_macaddr_get(pid, &port->eth_addr);
2775 
2776 		map_port_queue_stats_mapping_registers(pid, port);
2777 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2778 		rte_pmd_ixgbe_bypass_init(pid);
2779 #endif
2780 
2781 		if (lsc_interrupt &&
2782 		    (rte_eth_devices[pid].data->dev_flags &
2783 		     RTE_ETH_DEV_INTR_LSC))
2784 			port->dev_conf.intr_conf.lsc = 1;
2785 		if (rmv_interrupt &&
2786 		    (rte_eth_devices[pid].data->dev_flags &
2787 		     RTE_ETH_DEV_INTR_RMV))
2788 			port->dev_conf.intr_conf.rmv = 1;
2789 	}
2790 }
2791 
2792 void set_port_slave_flag(portid_t slave_pid)
2793 {
2794 	struct rte_port *port;
2795 
2796 	port = &ports[slave_pid];
2797 	port->slave_flag = 1;
2798 }
2799 
2800 void clear_port_slave_flag(portid_t slave_pid)
2801 {
2802 	struct rte_port *port;
2803 
2804 	port = &ports[slave_pid];
2805 	port->slave_flag = 0;
2806 }
2807 
2808 uint8_t port_is_bonding_slave(portid_t slave_pid)
2809 {
2810 	struct rte_port *port;
2811 
2812 	port = &ports[slave_pid];
2813 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2814 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2815 		return 1;
2816 	return 0;
2817 }
2818 
2819 const uint16_t vlan_tags[] = {
2820 		0,  1,  2,  3,  4,  5,  6,  7,
2821 		8,  9, 10, 11,  12, 13, 14, 15,
2822 		16, 17, 18, 19, 20, 21, 22, 23,
2823 		24, 25, 26, 27, 28, 29, 30, 31
2824 };
2825 
2826 static  int
2827 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2828 		 enum dcb_mode_enable dcb_mode,
2829 		 enum rte_eth_nb_tcs num_tcs,
2830 		 uint8_t pfc_en)
2831 {
2832 	uint8_t i;
2833 	int32_t rc;
2834 	struct rte_eth_rss_conf rss_conf;
2835 
2836 	/*
2837 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2838 	 * given above, and the number of traffic classes available for use.
2839 	 */
2840 	if (dcb_mode == DCB_VT_ENABLED) {
2841 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2842 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2843 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2844 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2845 
2846 		/* VMDQ+DCB RX and TX configurations */
2847 		vmdq_rx_conf->enable_default_pool = 0;
2848 		vmdq_rx_conf->default_pool = 0;
2849 		vmdq_rx_conf->nb_queue_pools =
2850 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2851 		vmdq_tx_conf->nb_queue_pools =
2852 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2853 
2854 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2855 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2856 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2857 			vmdq_rx_conf->pool_map[i].pools =
2858 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2859 		}
2860 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2861 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2862 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2863 		}
2864 
2865 		/* set DCB mode of RX and TX of multiple queues */
2866 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2867 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2868 	} else {
2869 		struct rte_eth_dcb_rx_conf *rx_conf =
2870 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2871 		struct rte_eth_dcb_tx_conf *tx_conf =
2872 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2873 
2874 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2875 		if (rc != 0)
2876 			return rc;
2877 
2878 		rx_conf->nb_tcs = num_tcs;
2879 		tx_conf->nb_tcs = num_tcs;
2880 
2881 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2882 			rx_conf->dcb_tc[i] = i % num_tcs;
2883 			tx_conf->dcb_tc[i] = i % num_tcs;
2884 		}
2885 
2886 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2887 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
2888 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2889 	}
2890 
2891 	if (pfc_en)
2892 		eth_conf->dcb_capability_en =
2893 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2894 	else
2895 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2896 
2897 	return 0;
2898 }
2899 
2900 int
2901 init_port_dcb_config(portid_t pid,
2902 		     enum dcb_mode_enable dcb_mode,
2903 		     enum rte_eth_nb_tcs num_tcs,
2904 		     uint8_t pfc_en)
2905 {
2906 	struct rte_eth_conf port_conf;
2907 	struct rte_port *rte_port;
2908 	int retval;
2909 	uint16_t i;
2910 
2911 	rte_port = &ports[pid];
2912 
2913 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2914 	/* Enter DCB configuration status */
2915 	dcb_config = 1;
2916 
2917 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2918 	port_conf.txmode = rte_port->dev_conf.txmode;
2919 
2920 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2921 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2922 	if (retval < 0)
2923 		return retval;
2924 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2925 
2926 	/* re-configure the device . */
2927 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2928 
2929 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2930 
2931 	/* If dev_info.vmdq_pool_base is greater than 0,
2932 	 * the queue id of vmdq pools is started after pf queues.
2933 	 */
2934 	if (dcb_mode == DCB_VT_ENABLED &&
2935 	    rte_port->dev_info.vmdq_pool_base > 0) {
2936 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2937 			" for port %d.", pid);
2938 		return -1;
2939 	}
2940 
2941 	/* Assume the ports in testpmd have the same dcb capability
2942 	 * and has the same number of rxq and txq in dcb mode
2943 	 */
2944 	if (dcb_mode == DCB_VT_ENABLED) {
2945 		if (rte_port->dev_info.max_vfs > 0) {
2946 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2947 			nb_txq = rte_port->dev_info.nb_tx_queues;
2948 		} else {
2949 			nb_rxq = rte_port->dev_info.max_rx_queues;
2950 			nb_txq = rte_port->dev_info.max_tx_queues;
2951 		}
2952 	} else {
2953 		/*if vt is disabled, use all pf queues */
2954 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2955 			nb_rxq = rte_port->dev_info.max_rx_queues;
2956 			nb_txq = rte_port->dev_info.max_tx_queues;
2957 		} else {
2958 			nb_rxq = (queueid_t)num_tcs;
2959 			nb_txq = (queueid_t)num_tcs;
2960 
2961 		}
2962 	}
2963 	rx_free_thresh = 64;
2964 
2965 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2966 
2967 	rxtx_port_config(rte_port);
2968 	/* VLAN filter */
2969 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2970 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2971 		rx_vft_set(pid, vlan_tags[i], 1);
2972 
2973 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2974 	map_port_queue_stats_mapping_registers(pid, rte_port);
2975 
2976 	rte_port->dcb_flag = 1;
2977 
2978 	return 0;
2979 }
2980 
2981 static void
2982 init_port(void)
2983 {
2984 	/* Configuration of Ethernet ports. */
2985 	ports = rte_zmalloc("testpmd: ports",
2986 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2987 			    RTE_CACHE_LINE_SIZE);
2988 	if (ports == NULL) {
2989 		rte_exit(EXIT_FAILURE,
2990 				"rte_zmalloc(%d struct rte_port) failed\n",
2991 				RTE_MAX_ETHPORTS);
2992 	}
2993 
2994 	/* Initialize ports NUMA structures */
2995 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2996 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2997 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2998 }
2999 
3000 static void
3001 force_quit(void)
3002 {
3003 	pmd_test_exit();
3004 	prompt_exit();
3005 }
3006 
3007 static void
3008 print_stats(void)
3009 {
3010 	uint8_t i;
3011 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3012 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3013 
3014 	/* Clear screen and move to top left */
3015 	printf("%s%s", clr, top_left);
3016 
3017 	printf("\nPort statistics ====================================");
3018 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3019 		nic_stats_display(fwd_ports_ids[i]);
3020 }
3021 
3022 static void
3023 signal_handler(int signum)
3024 {
3025 	if (signum == SIGINT || signum == SIGTERM) {
3026 		printf("\nSignal %d received, preparing to exit...\n",
3027 				signum);
3028 #ifdef RTE_LIBRTE_PDUMP
3029 		/* uninitialize packet capture framework */
3030 		rte_pdump_uninit();
3031 #endif
3032 #ifdef RTE_LIBRTE_LATENCY_STATS
3033 		rte_latencystats_uninit();
3034 #endif
3035 		force_quit();
3036 		/* Set flag to indicate the force termination. */
3037 		f_quit = 1;
3038 		/* exit with the expected status */
3039 		signal(signum, SIG_DFL);
3040 		kill(getpid(), signum);
3041 	}
3042 }
3043 
3044 int
3045 main(int argc, char** argv)
3046 {
3047 	int diag;
3048 	portid_t port_id;
3049 	uint16_t count;
3050 	int ret;
3051 
3052 	signal(SIGINT, signal_handler);
3053 	signal(SIGTERM, signal_handler);
3054 
3055 	diag = rte_eal_init(argc, argv);
3056 	if (diag < 0)
3057 		rte_panic("Cannot init EAL\n");
3058 
3059 	testpmd_logtype = rte_log_register("testpmd");
3060 	if (testpmd_logtype < 0)
3061 		rte_panic("Cannot register log type");
3062 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3063 
3064 	ret = register_eth_event_callback();
3065 	if (ret != 0)
3066 		rte_panic("Cannot register for ethdev events");
3067 
3068 #ifdef RTE_LIBRTE_PDUMP
3069 	/* initialize packet capture framework */
3070 	rte_pdump_init(NULL);
3071 #endif
3072 
3073 	count = 0;
3074 	RTE_ETH_FOREACH_DEV(port_id) {
3075 		ports_ids[count] = port_id;
3076 		count++;
3077 	}
3078 	nb_ports = (portid_t) count;
3079 	if (nb_ports == 0)
3080 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3081 
3082 	/* allocate port structures, and init them */
3083 	init_port();
3084 
3085 	set_def_fwd_config();
3086 	if (nb_lcores == 0)
3087 		rte_panic("Empty set of forwarding logical cores - check the "
3088 			  "core mask supplied in the command parameters\n");
3089 
3090 	/* Bitrate/latency stats disabled by default */
3091 #ifdef RTE_LIBRTE_BITRATE
3092 	bitrate_enabled = 0;
3093 #endif
3094 #ifdef RTE_LIBRTE_LATENCY_STATS
3095 	latencystats_enabled = 0;
3096 #endif
3097 
3098 	/* on FreeBSD, mlockall() is disabled by default */
3099 #ifdef RTE_EXEC_ENV_BSDAPP
3100 	do_mlockall = 0;
3101 #else
3102 	do_mlockall = 1;
3103 #endif
3104 
3105 	argc -= diag;
3106 	argv += diag;
3107 	if (argc > 1)
3108 		launch_args_parse(argc, argv);
3109 
3110 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3111 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3112 			strerror(errno));
3113 	}
3114 
3115 	if (tx_first && interactive)
3116 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3117 				"interactive mode.\n");
3118 
3119 	if (tx_first && lsc_interrupt) {
3120 		printf("Warning: lsc_interrupt needs to be off when "
3121 				" using tx_first. Disabling.\n");
3122 		lsc_interrupt = 0;
3123 	}
3124 
3125 	if (!nb_rxq && !nb_txq)
3126 		printf("Warning: Either rx or tx queues should be non-zero\n");
3127 
3128 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3129 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3130 		       "but nb_txq=%d will prevent to fully test it.\n",
3131 		       nb_rxq, nb_txq);
3132 
3133 	init_config();
3134 
3135 	if (hot_plug) {
3136 		ret = rte_dev_hotplug_handle_enable();
3137 		if (ret) {
3138 			RTE_LOG(ERR, EAL,
3139 				"fail to enable hotplug handling.");
3140 			return -1;
3141 		}
3142 
3143 		ret = rte_dev_event_monitor_start();
3144 		if (ret) {
3145 			RTE_LOG(ERR, EAL,
3146 				"fail to start device event monitoring.");
3147 			return -1;
3148 		}
3149 
3150 		ret = rte_dev_event_callback_register(NULL,
3151 			eth_dev_event_callback, NULL);
3152 		if (ret) {
3153 			RTE_LOG(ERR, EAL,
3154 				"fail  to register device event callback\n");
3155 			return -1;
3156 		}
3157 	}
3158 
3159 	if (start_port(RTE_PORT_ALL) != 0)
3160 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3161 
3162 	/* set all ports to promiscuous mode by default */
3163 	RTE_ETH_FOREACH_DEV(port_id)
3164 		rte_eth_promiscuous_enable(port_id);
3165 
3166 	/* Init metrics library */
3167 	rte_metrics_init(rte_socket_id());
3168 
3169 #ifdef RTE_LIBRTE_LATENCY_STATS
3170 	if (latencystats_enabled != 0) {
3171 		int ret = rte_latencystats_init(1, NULL);
3172 		if (ret)
3173 			printf("Warning: latencystats init()"
3174 				" returned error %d\n",	ret);
3175 		printf("Latencystats running on lcore %d\n",
3176 			latencystats_lcore_id);
3177 	}
3178 #endif
3179 
3180 	/* Setup bitrate stats */
3181 #ifdef RTE_LIBRTE_BITRATE
3182 	if (bitrate_enabled != 0) {
3183 		bitrate_data = rte_stats_bitrate_create();
3184 		if (bitrate_data == NULL)
3185 			rte_exit(EXIT_FAILURE,
3186 				"Could not allocate bitrate data.\n");
3187 		rte_stats_bitrate_reg(bitrate_data);
3188 	}
3189 #endif
3190 
3191 #ifdef RTE_LIBRTE_CMDLINE
3192 	if (strlen(cmdline_filename) != 0)
3193 		cmdline_read_from_file(cmdline_filename);
3194 
3195 	if (interactive == 1) {
3196 		if (auto_start) {
3197 			printf("Start automatic packet forwarding\n");
3198 			start_packet_forwarding(0);
3199 		}
3200 		prompt();
3201 		pmd_test_exit();
3202 	} else
3203 #endif
3204 	{
3205 		char c;
3206 		int rc;
3207 
3208 		f_quit = 0;
3209 
3210 		printf("No commandline core given, start packet forwarding\n");
3211 		start_packet_forwarding(tx_first);
3212 		if (stats_period != 0) {
3213 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3214 			uint64_t timer_period;
3215 
3216 			/* Convert to number of cycles */
3217 			timer_period = stats_period * rte_get_timer_hz();
3218 
3219 			while (f_quit == 0) {
3220 				cur_time = rte_get_timer_cycles();
3221 				diff_time += cur_time - prev_time;
3222 
3223 				if (diff_time >= timer_period) {
3224 					print_stats();
3225 					/* Reset the timer */
3226 					diff_time = 0;
3227 				}
3228 				/* Sleep to avoid unnecessary checks */
3229 				prev_time = cur_time;
3230 				sleep(1);
3231 			}
3232 		}
3233 
3234 		printf("Press enter to exit\n");
3235 		rc = read(0, &c, 1);
3236 		pmd_test_exit();
3237 		if (rc < 0)
3238 			return 1;
3239 	}
3240 
3241 	return 0;
3242 }
3243