xref: /dpdk/app/test-pmd/testpmd.c (revision 3889a3220c9b93f07dfdf95e30b2661e6e3f698f)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
131c036b16SEelco Chaudron #include <sys/mman.h>
14761f7ae1SJie Zhou #endif
15af75078fSIntel #include <sys/types.h>
16af75078fSIntel #include <errno.h>
17fb73e096SJeff Guo #include <stdbool.h>
18af75078fSIntel 
19af75078fSIntel #include <sys/queue.h>
20af75078fSIntel #include <sys/stat.h>
21af75078fSIntel 
22af75078fSIntel #include <stdint.h>
23af75078fSIntel #include <unistd.h>
24af75078fSIntel #include <inttypes.h>
25af75078fSIntel 
26af75078fSIntel #include <rte_common.h>
27d1eb542eSOlivier Matz #include <rte_errno.h>
28af75078fSIntel #include <rte_byteorder.h>
29af75078fSIntel #include <rte_log.h>
30af75078fSIntel #include <rte_debug.h>
31af75078fSIntel #include <rte_cycles.h>
32af75078fSIntel #include <rte_memory.h>
33af75078fSIntel #include <rte_memcpy.h>
34af75078fSIntel #include <rte_launch.h>
35af75078fSIntel #include <rte_eal.h>
36284c908cSGaetan Rivet #include <rte_alarm.h>
37af75078fSIntel #include <rte_per_lcore.h>
38af75078fSIntel #include <rte_lcore.h>
39af75078fSIntel #include <rte_branch_prediction.h>
40af75078fSIntel #include <rte_mempool.h>
41af75078fSIntel #include <rte_malloc.h>
42af75078fSIntel #include <rte_mbuf.h>
430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
44af75078fSIntel #include <rte_interrupts.h>
45af75078fSIntel #include <rte_pci.h>
46af75078fSIntel #include <rte_ether.h>
47af75078fSIntel #include <rte_ethdev.h>
48edab33b1STetsuya Mukawa #include <rte_dev.h>
49af75078fSIntel #include <rte_string_fns.h>
50a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE
51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
52e261265eSRadu Nicolau #endif
53a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
54102b7329SReshma Pattan #include <rte_pdump.h>
55102b7329SReshma Pattan #endif
56938a184aSAdrien Mazarguil #include <rte_flow.h>
57bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS
587e4441c8SRemy Horton #include <rte_metrics.h>
59bb9be9a4SDavid Marchand #endif
60a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
617e4441c8SRemy Horton #include <rte_bitrate.h>
627e4441c8SRemy Horton #endif
63a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
6462d3216dSReshma Pattan #include <rte_latencystats.h>
6562d3216dSReshma Pattan #endif
66761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS
67761f7ae1SJie Zhou #include <process.h>
68761f7ae1SJie Zhou #endif
69e46372d7SHuisong Li #ifdef RTE_NET_BOND
70e46372d7SHuisong Li #include <rte_eth_bond.h>
71e46372d7SHuisong Li #endif
72af75078fSIntel 
73af75078fSIntel #include "testpmd.h"
74af75078fSIntel 
75c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
76c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
77c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
78c7f5dba7SAnatoly Burakov #else
79c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
80c7f5dba7SAnatoly Burakov #endif
81c7f5dba7SAnatoly Burakov 
82c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
83c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
84c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
85c7f5dba7SAnatoly Burakov #else
86c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
87c7f5dba7SAnatoly Burakov #endif
88c7f5dba7SAnatoly Burakov 
89c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
9013b19642SDmitry Kozlyuk /*
9113b19642SDmitry Kozlyuk  * Zone size with the malloc overhead (max of debug and release variants)
9213b19642SDmitry Kozlyuk  * must fit into the smallest supported hugepage size (2M),
9313b19642SDmitry Kozlyuk  * so that an IOVA-contiguous zone of this size can always be allocated
9413b19642SDmitry Kozlyuk  * if there are free 2M hugepages.
9513b19642SDmitry Kozlyuk  */
9613b19642SDmitry Kozlyuk #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE)
97c7f5dba7SAnatoly Burakov 
98af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
99285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
100af75078fSIntel 
101cb056611SStephen Hemminger /* use main core for command line ? */
102af75078fSIntel uint8_t interactive = 0;
103ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
10499cabef0SPablo de Lara uint8_t tx_first;
10581ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
106af75078fSIntel 
107af75078fSIntel /*
108af75078fSIntel  * NUMA support configuration.
109af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
110af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
111af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
112af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
113af75078fSIntel  */
114999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
115af75078fSIntel 
116af75078fSIntel /*
117b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
118b6ea6408SIntel  * not configured.
119b6ea6408SIntel  */
120b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
121b6ea6408SIntel 
122b6ea6408SIntel /*
123c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
124c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
125c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
126c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
127c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
128148f963fSBruce Richardson  */
129c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
130148f963fSBruce Richardson 
131148f963fSBruce Richardson /*
13263531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
13363531389SGeorgios Katsikas  * is allocated.
13463531389SGeorgios Katsikas  */
13563531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
13663531389SGeorgios Katsikas 
13763531389SGeorgios Katsikas /*
13863531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
13963531389SGeorgios Katsikas  * is allocated.
14063531389SGeorgios Katsikas  */
14163531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
14263531389SGeorgios Katsikas 
14363531389SGeorgios Katsikas /*
14463531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
14563531389SGeorgios Katsikas  * is allocated.
14663531389SGeorgios Katsikas  */
14763531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
14863531389SGeorgios Katsikas 
14963531389SGeorgios Katsikas /*
150af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
151af75078fSIntel  * forwarded.
152547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
153af75078fSIntel  * ports.
154af75078fSIntel  */
1556d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
156af75078fSIntel portid_t nb_peer_eth_addrs = 0;
157af75078fSIntel 
158af75078fSIntel /*
159af75078fSIntel  * Probed Target Environment.
160af75078fSIntel  */
161af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
162af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
163af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
164af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
165af75078fSIntel 
1664918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1674918a357SXiaoyun Li 
168af75078fSIntel /*
169af75078fSIntel  * Test Forwarding Configuration.
170af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
171af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
172af75078fSIntel  */
173af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
174af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
175af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
176af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
177af75078fSIntel 
178af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
179af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
180af75078fSIntel 
181af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
182af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
183af75078fSIntel 
184af75078fSIntel /*
185af75078fSIntel  * Forwarding engines.
186af75078fSIntel  */
187af75078fSIntel struct fwd_engine * fwd_engines[] = {
188af75078fSIntel 	&io_fwd_engine,
189af75078fSIntel 	&mac_fwd_engine,
190d47388f1SCyril Chemparathy 	&mac_swap_engine,
191e9e23a61SCyril Chemparathy 	&flow_gen_engine,
192af75078fSIntel 	&rx_only_engine,
193af75078fSIntel 	&tx_only_engine,
194af75078fSIntel 	&csum_fwd_engine,
195168dfa61SIvan Boule 	&icmp_echo_engine,
1963c156061SJens Freimann 	&noisy_vnf_engine,
1972564abdaSShiri Kuzin 	&five_tuple_swap_fwd_engine,
198af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
199af75078fSIntel 	&ieee1588_fwd_engine,
200af75078fSIntel #endif
20159840375SXueming Li 	&shared_rxq_engine,
202af75078fSIntel 	NULL,
203af75078fSIntel };
204af75078fSIntel 
20526cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
20659fcf854SShahaf Shuler uint16_t mempool_flags;
207401b744dSShahaf Shuler 
208af75078fSIntel struct fwd_config cur_fwd_config;
209af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
210bf56fce1SZhihong Wang uint32_t retry_enabled;
211bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
212bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
213af75078fSIntel 
21426cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
21526cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
21626cbb419SViacheslav Ovsiienko 	DEFAULT_MBUF_DATA_SIZE
21726cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */
218c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
219c8798818SIntel                                       * specified on command-line. */
220cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
221d9a191a0SPhil Yang 
22263b72657SIvan Ilchenko /** Extended statistics to show. */
22363b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display;
22463b72657SIvan Ilchenko 
22563b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */
22663b72657SIvan Ilchenko 
227d9a191a0SPhil Yang /*
228d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
229d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
230d9a191a0SPhil Yang  */
231d9a191a0SPhil Yang uint8_t f_quit;
232*3889a322SHuisong Li uint8_t cl_quit; /* Quit testpmd from cmdline. */
233d9a191a0SPhil Yang 
234af75078fSIntel /*
2351bb4a528SFerruh Yigit  * Max Rx frame size, set by '--max-pkt-len' parameter.
2361bb4a528SFerruh Yigit  */
2371bb4a528SFerruh Yigit uint32_t max_rx_pkt_len;
2381bb4a528SFerruh Yigit 
2391bb4a528SFerruh Yigit /*
2400f2096d7SViacheslav Ovsiienko  * Configuration of packet segments used to scatter received packets
2410f2096d7SViacheslav Ovsiienko  * if some of split features is configured.
2420f2096d7SViacheslav Ovsiienko  */
2430f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
2440f2096d7SViacheslav Ovsiienko uint8_t  rx_pkt_nb_segs; /**< Number of segments to split */
24591c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
24691c78e09SViacheslav Ovsiienko uint8_t  rx_pkt_nb_offs; /**< Number of specified offsets */
2470f2096d7SViacheslav Ovsiienko 
2480f2096d7SViacheslav Ovsiienko /*
249af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
250af75078fSIntel  */
251af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
252af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
253af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
254af75078fSIntel };
255af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
256af75078fSIntel 
25779bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
25879bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
25979bec05bSKonstantin Ananyev 
26082010ef5SYongseok Koh uint8_t txonly_multi_flow;
26182010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
26282010ef5SYongseok Koh 
2634940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter;
2644940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */
2654940344dSViacheslav Ovsiienko 
2664940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra;
2674940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */
2684940344dSViacheslav Ovsiienko 
269af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
2706c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
271861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
272e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
273af75078fSIntel 
274900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
275900550deSIntel uint8_t dcb_config = 0;
276900550deSIntel 
277af75078fSIntel /*
278af75078fSIntel  * Configurable number of RX/TX queues.
279af75078fSIntel  */
2801c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
281af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
282af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
283af75078fSIntel 
284af75078fSIntel /*
285af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2868599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
287af75078fSIntel  */
2888599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2898599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
290af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
291af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
292af75078fSIntel 
293f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
294af75078fSIntel /*
295af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
296af75078fSIntel  */
297af75078fSIntel 
298f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
299f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
300f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
301af75078fSIntel 
302f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
303f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
304f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
305af75078fSIntel 
306af75078fSIntel /*
307af75078fSIntel  * Configurable value of RX free threshold.
308af75078fSIntel  */
309f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
310af75078fSIntel 
311af75078fSIntel /*
312ce8d5614SIntel  * Configurable value of RX drop enable.
313ce8d5614SIntel  */
314f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
315ce8d5614SIntel 
316ce8d5614SIntel /*
317af75078fSIntel  * Configurable value of TX free threshold.
318af75078fSIntel  */
319f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
320af75078fSIntel 
321af75078fSIntel /*
322af75078fSIntel  * Configurable value of TX RS bit threshold.
323af75078fSIntel  */
324f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
325af75078fSIntel 
326af75078fSIntel /*
3273c156061SJens Freimann  * Configurable value of buffered packets before sending.
3283c156061SJens Freimann  */
3293c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
3303c156061SJens Freimann 
3313c156061SJens Freimann /*
3323c156061SJens Freimann  * Configurable value of packet buffer timeout.
3333c156061SJens Freimann  */
3343c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
3353c156061SJens Freimann 
3363c156061SJens Freimann /*
3373c156061SJens Freimann  * Configurable value for size of VNF internal memory area
3383c156061SJens Freimann  * used for simulating noisy neighbour behaviour
3393c156061SJens Freimann  */
3403c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
3413c156061SJens Freimann 
3423c156061SJens Freimann /*
3433c156061SJens Freimann  * Configurable value of number of random writes done in
3443c156061SJens Freimann  * VNF simulation memory area.
3453c156061SJens Freimann  */
3463c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3473c156061SJens Freimann 
3483c156061SJens Freimann /*
3493c156061SJens Freimann  * Configurable value of number of random reads done in
3503c156061SJens Freimann  * VNF simulation memory area.
3513c156061SJens Freimann  */
3523c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3533c156061SJens Freimann 
3543c156061SJens Freimann /*
3553c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3563c156061SJens Freimann  * VNF simulation memory area.
3573c156061SJens Freimann  */
3583c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3593c156061SJens Freimann 
3603c156061SJens Freimann /*
361af75078fSIntel  * Receive Side Scaling (RSS) configuration.
362af75078fSIntel  */
363295968d1SFerruh Yigit uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
364af75078fSIntel 
365af75078fSIntel /*
366af75078fSIntel  * Port topology configuration
367af75078fSIntel  */
368af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
369af75078fSIntel 
3707741e4cfSIntel /*
3717741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3727741e4cfSIntel  */
3737741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3747741e4cfSIntel 
375af75078fSIntel /*
3767ee3e944SVasily Philipov  * Flow API isolated mode.
3777ee3e944SVasily Philipov  */
3787ee3e944SVasily Philipov uint8_t flow_isolate_all;
3797ee3e944SVasily Philipov 
3807ee3e944SVasily Philipov /*
381bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
382bc202406SDavid Marchand  */
383bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
384bc202406SDavid Marchand 
385bc202406SDavid Marchand /*
3866937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
3876937d210SStephen Hemminger  */
3886937d210SStephen Hemminger uint8_t no_device_start = 0;
3896937d210SStephen Hemminger 
3906937d210SStephen Hemminger /*
3918ea656f8SGaetan Rivet  * Enable link status change notification
3928ea656f8SGaetan Rivet  */
3938ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3948ea656f8SGaetan Rivet 
3958ea656f8SGaetan Rivet /*
396284c908cSGaetan Rivet  * Enable device removal notification.
397284c908cSGaetan Rivet  */
398284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
399284c908cSGaetan Rivet 
400fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
401fb73e096SJeff Guo 
4024f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
4034f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
4044f1ed78eSThomas Monjalon 
405b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */
406b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true;
407b0a9354aSPavan Nikhilesh 
40801817b10SBing Zhao /* Hairpin ports configuration mode. */
40901817b10SBing Zhao uint16_t hairpin_mode;
41001817b10SBing Zhao 
41197b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
41297b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
41397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
41497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
41597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
41697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
41797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
41897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
41997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
42097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
42197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
42297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
4230e459ffaSDong Zhou 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
424bc70e559SSpike Du 	[RTE_ETH_EVENT_RX_AVAIL_THRESH] = "RxQ available descriptors threshold reached",
42597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
42697b5d8b5SThomas Monjalon };
42797b5d8b5SThomas Monjalon 
428284c908cSGaetan Rivet /*
4293af72783SGaetan Rivet  * Display or mask ether events
4303af72783SGaetan Rivet  * Default to all events except VF_MBOX
4313af72783SGaetan Rivet  */
4323af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
4333af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
4343af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
4353af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
436badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
4373af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
4380e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
4390e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
440e505d84cSAnatoly Burakov /*
441e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
442e505d84cSAnatoly Burakov  */
443e505d84cSAnatoly Burakov int do_mlockall = 0;
4443af72783SGaetan Rivet 
4453af72783SGaetan Rivet /*
4467b7e5ba7SIntel  * NIC bypass mode configuration options.
4477b7e5ba7SIntel  */
4487b7e5ba7SIntel 
449a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
4507b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
451e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
4527b7e5ba7SIntel #endif
4537b7e5ba7SIntel 
454e261265eSRadu Nicolau 
455a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
45662d3216dSReshma Pattan 
45762d3216dSReshma Pattan /*
45862d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
45962d3216dSReshma Pattan  */
46062d3216dSReshma Pattan uint8_t latencystats_enabled;
46162d3216dSReshma Pattan 
46262d3216dSReshma Pattan /*
4637be78d02SJosh Soref  * Lcore ID to service latency statistics.
46462d3216dSReshma Pattan  */
46562d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
46662d3216dSReshma Pattan 
46762d3216dSReshma Pattan #endif
46862d3216dSReshma Pattan 
4697b7e5ba7SIntel /*
470af75078fSIntel  * Ethernet device configuration.
471af75078fSIntel  */
4721bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode;
473af75078fSIntel 
47407e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
475295968d1SFerruh Yigit 	.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
47607e5f7bdSShahaf Shuler };
477fd8c20aaSShahaf Shuler 
478295968d1SFerruh Yigit struct rte_eth_fdir_conf fdir_conf = {
479af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
480295968d1SFerruh Yigit 	.pballoc = RTE_ETH_FDIR_PBALLOC_64K,
481af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
482d9d5e6f2SJingjing Wu 	.mask = {
48326f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
484d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
485d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
486d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
487d9d5e6f2SJingjing Wu 		},
488d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
489d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
490d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
491d9d5e6f2SJingjing Wu 		},
492d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
493d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
49447b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
49547b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
49647b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
497d9d5e6f2SJingjing Wu 	},
498af75078fSIntel 	.drop_queue = 127,
499af75078fSIntel };
500af75078fSIntel 
5012950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
502af75078fSIntel 
503a4fd5eeeSElza Mathew /*
504a4fd5eeeSElza Mathew  * Display zero values by default for xstats
505a4fd5eeeSElza Mathew  */
506a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
507a4fd5eeeSElza Mathew 
508bc700b67SDharmik Thakkar /*
509bc700b67SDharmik Thakkar  * Measure of CPU cycles disabled by default
510bc700b67SDharmik Thakkar  */
511bc700b67SDharmik Thakkar uint8_t record_core_cycles;
512bc700b67SDharmik Thakkar 
5130e4b1963SDharmik Thakkar /*
5140e4b1963SDharmik Thakkar  * Display of RX and TX bursts disabled by default
5150e4b1963SDharmik Thakkar  */
5160e4b1963SDharmik Thakkar uint8_t record_burst_stats;
5170e4b1963SDharmik Thakkar 
518f4d178c1SXueming Li /*
519f4d178c1SXueming Li  * Number of ports per shared Rx queue group, 0 disable.
520f4d178c1SXueming Li  */
521f4d178c1SXueming Li uint32_t rxq_share;
522f4d178c1SXueming Li 
523c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
524c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
5257acf894dSStephen Hurd 
526a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
5277e4441c8SRemy Horton /* Bitrate statistics */
5287e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
529e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
530e25e6c70SRemy Horton uint8_t bitrate_enabled;
531e25e6c70SRemy Horton #endif
5327e4441c8SRemy Horton 
5336970401eSDavid Marchand #ifdef RTE_LIB_GRO
534b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
535b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
5366970401eSDavid Marchand #endif
537b40f8d78SJiayu Hu 
538f9295aa2SXiaoyu Min /*
539f9295aa2SXiaoyu Min  * hexadecimal bitmask of RX mq mode can be enabled.
540f9295aa2SXiaoyu Min  */
541295968d1SFerruh Yigit enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
542f9295aa2SXiaoyu Min 
543b7b78a08SAjit Khaparde /*
544b7b78a08SAjit Khaparde  * Used to set forced link speed
545b7b78a08SAjit Khaparde  */
546b7b78a08SAjit Khaparde uint32_t eth_link_speed;
547b7b78a08SAjit Khaparde 
548a550baf2SMin Hu (Connor) /*
549a550baf2SMin Hu (Connor)  * ID of the current process in multi-process, used to
550a550baf2SMin Hu (Connor)  * configure the queues to be polled.
551a550baf2SMin Hu (Connor)  */
552a550baf2SMin Hu (Connor) int proc_id;
553a550baf2SMin Hu (Connor) 
554a550baf2SMin Hu (Connor) /*
555a550baf2SMin Hu (Connor)  * Number of processes in multi-process, used to
556a550baf2SMin Hu (Connor)  * configure the queues to be polled.
557a550baf2SMin Hu (Connor)  */
558a550baf2SMin Hu (Connor) unsigned int num_procs = 1;
559a550baf2SMin Hu (Connor) 
560f6d8a6d3SIvan Malov static void
561f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id)
562f6d8a6d3SIvan Malov {
563f6d8a6d3SIvan Malov 	uint64_t rx_meta_features = 0;
564f6d8a6d3SIvan Malov 	int ret;
565f6d8a6d3SIvan Malov 
566f6d8a6d3SIvan Malov 	if (!is_proc_primary())
567f6d8a6d3SIvan Malov 		return;
568f6d8a6d3SIvan Malov 
569f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
570f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
571f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
572f6d8a6d3SIvan Malov 
573f6d8a6d3SIvan Malov 	ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
574f6d8a6d3SIvan Malov 	if (ret == 0) {
575f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
576f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
577f6d8a6d3SIvan Malov 				    port_id);
578f6d8a6d3SIvan Malov 		}
579f6d8a6d3SIvan Malov 
580f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
581f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
582f6d8a6d3SIvan Malov 				    port_id);
583f6d8a6d3SIvan Malov 		}
584f6d8a6d3SIvan Malov 
585f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
586f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
587f6d8a6d3SIvan Malov 				    port_id);
588f6d8a6d3SIvan Malov 		}
589f6d8a6d3SIvan Malov 	} else if (ret != -ENOTSUP) {
590f6d8a6d3SIvan Malov 		rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
591f6d8a6d3SIvan Malov 			 port_id, rte_strerror(-ret));
592f6d8a6d3SIvan Malov 	}
593f6d8a6d3SIvan Malov }
594f6d8a6d3SIvan Malov 
595a550baf2SMin Hu (Connor) static int
596a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
597a550baf2SMin Hu (Connor) 		      const struct rte_eth_conf *dev_conf)
598a550baf2SMin Hu (Connor) {
599a550baf2SMin Hu (Connor) 	if (is_proc_primary())
600a550baf2SMin Hu (Connor) 		return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
601a550baf2SMin Hu (Connor) 					dev_conf);
602a550baf2SMin Hu (Connor) 	return 0;
603a550baf2SMin Hu (Connor) }
604a550baf2SMin Hu (Connor) 
605a550baf2SMin Hu (Connor) static int
606e46372d7SHuisong Li change_bonding_slave_port_status(portid_t bond_pid, bool is_stop)
607e46372d7SHuisong Li {
608e46372d7SHuisong Li #ifdef RTE_NET_BOND
609e46372d7SHuisong Li 
610e46372d7SHuisong Li 	portid_t slave_pids[RTE_MAX_ETHPORTS];
611e46372d7SHuisong Li 	struct rte_port *port;
612e46372d7SHuisong Li 	int num_slaves;
613e46372d7SHuisong Li 	portid_t slave_pid;
614e46372d7SHuisong Li 	int i;
615e46372d7SHuisong Li 
616e46372d7SHuisong Li 	num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids,
617e46372d7SHuisong Li 						RTE_MAX_ETHPORTS);
618e46372d7SHuisong Li 	if (num_slaves < 0) {
619e46372d7SHuisong Li 		fprintf(stderr, "Failed to get slave list for port = %u\n",
620e46372d7SHuisong Li 			bond_pid);
621e46372d7SHuisong Li 		return num_slaves;
622e46372d7SHuisong Li 	}
623e46372d7SHuisong Li 
624e46372d7SHuisong Li 	for (i = 0; i < num_slaves; i++) {
625e46372d7SHuisong Li 		slave_pid = slave_pids[i];
626e46372d7SHuisong Li 		port = &ports[slave_pid];
627e46372d7SHuisong Li 		port->port_status =
628e46372d7SHuisong Li 			is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED;
629e46372d7SHuisong Li 	}
630e46372d7SHuisong Li #else
631e46372d7SHuisong Li 	RTE_SET_USED(bond_pid);
632e46372d7SHuisong Li 	RTE_SET_USED(is_stop);
633e46372d7SHuisong Li #endif
634e46372d7SHuisong Li 	return 0;
635e46372d7SHuisong Li }
636e46372d7SHuisong Li 
637e46372d7SHuisong Li static int
638a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id)
639a550baf2SMin Hu (Connor) {
640e46372d7SHuisong Li 	int ret;
641e46372d7SHuisong Li 
642e46372d7SHuisong Li 	if (is_proc_primary()) {
643e46372d7SHuisong Li 		ret = rte_eth_dev_start(port_id);
644e46372d7SHuisong Li 		if (ret != 0)
645e46372d7SHuisong Li 			return ret;
646e46372d7SHuisong Li 
647e46372d7SHuisong Li 		struct rte_port *port = &ports[port_id];
648e46372d7SHuisong Li 
649e46372d7SHuisong Li 		/*
650e46372d7SHuisong Li 		 * Starting a bonded port also starts all slaves under the bonded
651e46372d7SHuisong Li 		 * device. So if this port is bond device, we need to modify the
652e46372d7SHuisong Li 		 * port status of these slaves.
653e46372d7SHuisong Li 		 */
654e46372d7SHuisong Li 		if (port->bond_flag == 1)
655e46372d7SHuisong Li 			return change_bonding_slave_port_status(port_id, false);
656e46372d7SHuisong Li 	}
657a550baf2SMin Hu (Connor) 
658a550baf2SMin Hu (Connor) 	return 0;
659a550baf2SMin Hu (Connor) }
660a550baf2SMin Hu (Connor) 
661a550baf2SMin Hu (Connor) static int
662a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id)
663a550baf2SMin Hu (Connor) {
664e46372d7SHuisong Li 	int ret;
665e46372d7SHuisong Li 
666e46372d7SHuisong Li 	if (is_proc_primary()) {
667e46372d7SHuisong Li 		ret = rte_eth_dev_stop(port_id);
668e46372d7SHuisong Li 		if (ret != 0)
669e46372d7SHuisong Li 			return ret;
670e46372d7SHuisong Li 
671e46372d7SHuisong Li 		struct rte_port *port = &ports[port_id];
672e46372d7SHuisong Li 
673e46372d7SHuisong Li 		/*
674e46372d7SHuisong Li 		 * Stopping a bonded port also stops all slaves under the bonded
675e46372d7SHuisong Li 		 * device. So if this port is bond device, we need to modify the
676e46372d7SHuisong Li 		 * port status of these slaves.
677e46372d7SHuisong Li 		 */
678e46372d7SHuisong Li 		if (port->bond_flag == 1)
679e46372d7SHuisong Li 			return change_bonding_slave_port_status(port_id, true);
680e46372d7SHuisong Li 	}
681a550baf2SMin Hu (Connor) 
682a550baf2SMin Hu (Connor) 	return 0;
683a550baf2SMin Hu (Connor) }
684a550baf2SMin Hu (Connor) 
685a550baf2SMin Hu (Connor) static void
686a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp)
687a550baf2SMin Hu (Connor) {
688a550baf2SMin Hu (Connor) 	if (is_proc_primary())
689a550baf2SMin Hu (Connor) 		rte_mempool_free(mp);
690a550baf2SMin Hu (Connor) }
691a550baf2SMin Hu (Connor) 
692a550baf2SMin Hu (Connor) static int
693a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
694a550baf2SMin Hu (Connor) {
695a550baf2SMin Hu (Connor) 	if (is_proc_primary())
696a550baf2SMin Hu (Connor) 		return rte_eth_dev_set_mtu(port_id, mtu);
697a550baf2SMin Hu (Connor) 
698a550baf2SMin Hu (Connor) 	return 0;
699a550baf2SMin Hu (Connor) }
700a550baf2SMin Hu (Connor) 
701ed30d9b6SIntel /* Forward function declarations */
702c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
703edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
704f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
70576ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
706d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
707cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
708fb73e096SJeff Guo 				enum rte_dev_event_type type,
709fb73e096SJeff Guo 				void *param);
71063b72657SIvan Ilchenko static void fill_xstats_display_info(void);
711ce8d5614SIntel 
712ce8d5614SIntel /*
713ce8d5614SIntel  * Check if all the ports are started.
714ce8d5614SIntel  * If yes, return positive value. If not, return zero.
715ce8d5614SIntel  */
716ce8d5614SIntel static int all_ports_started(void);
717ed30d9b6SIntel 
7186970401eSDavid Marchand #ifdef RTE_LIB_GSO
71952f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
72035b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
7216970401eSDavid Marchand #endif
72252f38a20SJiayu Hu 
723b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */
724b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
725b57b66a9SOri Kam 
72663b72657SIvan Ilchenko 
727af75078fSIntel /*
72898a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
729c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
730c9cafcc8SShahaf Shuler  */
731c9cafcc8SShahaf Shuler int
732c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
733c9cafcc8SShahaf Shuler {
734c9cafcc8SShahaf Shuler 	unsigned int i;
735c9cafcc8SShahaf Shuler 
736c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
737c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
738c9cafcc8SShahaf Shuler 			return 0;
739c9cafcc8SShahaf Shuler 	}
740c9cafcc8SShahaf Shuler 	return 1;
741c9cafcc8SShahaf Shuler }
742c9cafcc8SShahaf Shuler 
743c9cafcc8SShahaf Shuler /*
744af75078fSIntel  * Setup default configuration.
745af75078fSIntel  */
746af75078fSIntel static void
747af75078fSIntel set_default_fwd_lcores_config(void)
748af75078fSIntel {
749af75078fSIntel 	unsigned int i;
750af75078fSIntel 	unsigned int nb_lc;
7517acf894dSStephen Hurd 	unsigned int sock_num;
752af75078fSIntel 
753af75078fSIntel 	nb_lc = 0;
754af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
755dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
756dbfb8ec7SPhil Yang 			continue;
757c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
758c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
759c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
760c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
761c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
762c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
763c9cafcc8SShahaf Shuler 			}
764c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
7657acf894dSStephen Hurd 		}
766cb056611SStephen Hemminger 		if (i == rte_get_main_lcore())
767f54fe5eeSStephen Hurd 			continue;
768f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
769af75078fSIntel 	}
770af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
771af75078fSIntel 	nb_cfg_lcores = nb_lcores;
772af75078fSIntel 	nb_fwd_lcores = 1;
773af75078fSIntel }
774af75078fSIntel 
775af75078fSIntel static void
776af75078fSIntel set_def_peer_eth_addrs(void)
777af75078fSIntel {
778af75078fSIntel 	portid_t i;
779af75078fSIntel 
780af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
78135b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
782af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
783af75078fSIntel 	}
784af75078fSIntel }
785af75078fSIntel 
786af75078fSIntel static void
787af75078fSIntel set_default_fwd_ports_config(void)
788af75078fSIntel {
789af75078fSIntel 	portid_t pt_id;
79065a7360cSMatan Azrad 	int i = 0;
791af75078fSIntel 
792effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
79365a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
794af75078fSIntel 
795effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
796effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
797effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
798effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
799effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
800effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
801effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
802effdb8bbSPhil Yang 			}
803effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
804effdb8bbSPhil Yang 		}
805effdb8bbSPhil Yang 	}
806effdb8bbSPhil Yang 
807af75078fSIntel 	nb_cfg_ports = nb_ports;
808af75078fSIntel 	nb_fwd_ports = nb_ports;
809af75078fSIntel }
810af75078fSIntel 
811af75078fSIntel void
812af75078fSIntel set_def_fwd_config(void)
813af75078fSIntel {
814af75078fSIntel 	set_default_fwd_lcores_config();
815af75078fSIntel 	set_def_peer_eth_addrs();
816af75078fSIntel 	set_default_fwd_ports_config();
817af75078fSIntel }
818af75078fSIntel 
819761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
820c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
821c7f5dba7SAnatoly Burakov static int
822c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
823c7f5dba7SAnatoly Burakov {
824c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
825c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
826c7f5dba7SAnatoly Burakov 
827c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
828c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
829c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
830c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
831c7f5dba7SAnatoly Burakov 	 */
832c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
833c7f5dba7SAnatoly Burakov 
834c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
835c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
836c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
837c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
838c7f5dba7SAnatoly Burakov 		return -1;
839c7f5dba7SAnatoly Burakov 	}
840c7f5dba7SAnatoly Burakov 
841c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
842c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
843c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
844c7f5dba7SAnatoly Burakov 
845c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
846c7f5dba7SAnatoly Burakov 
847c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
848c7f5dba7SAnatoly Burakov 
849c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
850c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
851c7f5dba7SAnatoly Burakov 		return -1;
852c7f5dba7SAnatoly Burakov 	}
853c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
854c7f5dba7SAnatoly Burakov 
855c7f5dba7SAnatoly Burakov 	return 0;
856c7f5dba7SAnatoly Burakov }
857c7f5dba7SAnatoly Burakov 
858c7f5dba7SAnatoly Burakov static int
859c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
860c7f5dba7SAnatoly Burakov {
861c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
862c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
863c7f5dba7SAnatoly Burakov 	 */
8649d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
865c7f5dba7SAnatoly Burakov 
866c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
867c7f5dba7SAnatoly Burakov }
868c7f5dba7SAnatoly Burakov 
869c7f5dba7SAnatoly Burakov static void *
870c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
871c7f5dba7SAnatoly Burakov {
872c7f5dba7SAnatoly Burakov 	void *addr;
873c7f5dba7SAnatoly Burakov 	int flags;
874c7f5dba7SAnatoly Burakov 
875c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
876c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
877c7f5dba7SAnatoly Burakov 	if (huge)
878c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
879c7f5dba7SAnatoly Burakov 
880c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
881c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
882c7f5dba7SAnatoly Burakov 		return NULL;
883c7f5dba7SAnatoly Burakov 
884c7f5dba7SAnatoly Burakov 	return addr;
885c7f5dba7SAnatoly Burakov }
886c7f5dba7SAnatoly Burakov 
887c7f5dba7SAnatoly Burakov struct extmem_param {
888c7f5dba7SAnatoly Burakov 	void *addr;
889c7f5dba7SAnatoly Burakov 	size_t len;
890c7f5dba7SAnatoly Burakov 	size_t pgsz;
891c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
892c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
893c7f5dba7SAnatoly Burakov };
894c7f5dba7SAnatoly Burakov 
895c7f5dba7SAnatoly Burakov static int
896c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
897c7f5dba7SAnatoly Burakov 		bool huge)
898c7f5dba7SAnatoly Burakov {
899c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
900c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
901c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
902c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
903c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
904c7f5dba7SAnatoly Burakov 	void *addr;
905c7f5dba7SAnatoly Burakov 	int ret;
906c7f5dba7SAnatoly Burakov 
907c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
908c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
909c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
910c7f5dba7SAnatoly Burakov 			continue;
911c7f5dba7SAnatoly Burakov 
912c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
913c7f5dba7SAnatoly Burakov 
914c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
915c7f5dba7SAnatoly Burakov 		if (!huge)
916c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
917c7f5dba7SAnatoly Burakov 
918c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
919c7f5dba7SAnatoly Burakov 		if (ret < 0) {
920c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
921c7f5dba7SAnatoly Burakov 			return -1;
922c7f5dba7SAnatoly Burakov 		}
923c7f5dba7SAnatoly Burakov 
924c7f5dba7SAnatoly Burakov 		/* allocate our memory */
925c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
926c7f5dba7SAnatoly Burakov 
927c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
928c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
929c7f5dba7SAnatoly Burakov 		 * try another one.
930c7f5dba7SAnatoly Burakov 		 */
931c7f5dba7SAnatoly Burakov 		if (addr == NULL)
932c7f5dba7SAnatoly Burakov 			continue;
933c7f5dba7SAnatoly Burakov 
934c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
935c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
936c7f5dba7SAnatoly Burakov 
937c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
938c7f5dba7SAnatoly Burakov 
939c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
940c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
941c7f5dba7SAnatoly Burakov 			goto fail;
942c7f5dba7SAnatoly Burakov 		}
943c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
944c7f5dba7SAnatoly Burakov 		if (!huge)
945c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
946c7f5dba7SAnatoly Burakov 
947c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
948c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
949c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
950c7f5dba7SAnatoly Burakov 			size_t offset;
951c7f5dba7SAnatoly Burakov 			void *cur;
952c7f5dba7SAnatoly Burakov 
953c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
954c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
955c7f5dba7SAnatoly Burakov 
956c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
957c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
958c7f5dba7SAnatoly Burakov 
959c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
960c7f5dba7SAnatoly Burakov 
961c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
962c7f5dba7SAnatoly Burakov 		}
963c7f5dba7SAnatoly Burakov 
964c7f5dba7SAnatoly Burakov 		break;
965c7f5dba7SAnatoly Burakov 	}
966c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
967c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
968c7f5dba7SAnatoly Burakov 		return -1;
969c7f5dba7SAnatoly Burakov 
970c7f5dba7SAnatoly Burakov 	param->addr = addr;
971c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
972c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
973c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
974c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
975c7f5dba7SAnatoly Burakov 
976c7f5dba7SAnatoly Burakov 	return 0;
977c7f5dba7SAnatoly Burakov fail:
978c7f5dba7SAnatoly Burakov 	free(iovas);
979c7f5dba7SAnatoly Burakov 	if (addr)
980c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
981c7f5dba7SAnatoly Burakov 
982c7f5dba7SAnatoly Burakov 	return -1;
983c7f5dba7SAnatoly Burakov }
984c7f5dba7SAnatoly Burakov 
985c7f5dba7SAnatoly Burakov static int
986c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
987c7f5dba7SAnatoly Burakov {
988c7f5dba7SAnatoly Burakov 	struct extmem_param param;
989c7f5dba7SAnatoly Burakov 	int socket_id, ret;
990c7f5dba7SAnatoly Burakov 
991c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
992c7f5dba7SAnatoly Burakov 
993c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
994c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
995c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
996c7f5dba7SAnatoly Burakov 		/* create our heap */
997c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
998c7f5dba7SAnatoly Burakov 		if (ret < 0) {
999c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
1000c7f5dba7SAnatoly Burakov 			return -1;
1001c7f5dba7SAnatoly Burakov 		}
1002c7f5dba7SAnatoly Burakov 	}
1003c7f5dba7SAnatoly Burakov 
1004c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
1005c7f5dba7SAnatoly Burakov 	if (ret < 0) {
1006c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
1007c7f5dba7SAnatoly Burakov 		return -1;
1008c7f5dba7SAnatoly Burakov 	}
1009c7f5dba7SAnatoly Burakov 
1010c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
1011c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
1012c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
1013c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
1014c7f5dba7SAnatoly Burakov 
1015c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
1016c7f5dba7SAnatoly Burakov 
1017c7f5dba7SAnatoly Burakov 	/* not needed any more */
1018c7f5dba7SAnatoly Burakov 	free(param.iova_table);
1019c7f5dba7SAnatoly Burakov 
1020c7f5dba7SAnatoly Burakov 	if (ret < 0) {
1021c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
1022c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
1023c7f5dba7SAnatoly Burakov 		return -1;
1024c7f5dba7SAnatoly Burakov 	}
1025c7f5dba7SAnatoly Burakov 
1026c7f5dba7SAnatoly Burakov 	/* success */
1027c7f5dba7SAnatoly Burakov 
1028c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
1029c7f5dba7SAnatoly Burakov 			param.len >> 20);
1030c7f5dba7SAnatoly Burakov 
1031c7f5dba7SAnatoly Burakov 	return 0;
1032c7f5dba7SAnatoly Burakov }
10333a0968c8SShahaf Shuler static void
10343a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
10353a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
10363a0968c8SShahaf Shuler {
10373a0968c8SShahaf Shuler 	uint16_t pid = 0;
10383a0968c8SShahaf Shuler 	int ret;
10393a0968c8SShahaf Shuler 
10403a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
10410a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
10423a0968c8SShahaf Shuler 
10430a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
10440a0821bcSPaulis Gributs 		if (ret != 0) {
10450a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
10460a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
10470a0821bcSPaulis Gributs 				    "mempool unmapping will not be performed\n",
10480a0821bcSPaulis Gributs 				    pid, memhdr->addr);
10490a0821bcSPaulis Gributs 			continue;
10500a0821bcSPaulis Gributs 		}
10510a0821bcSPaulis Gributs 
10520a0821bcSPaulis Gributs 		ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
10533a0968c8SShahaf Shuler 		if (ret) {
10543a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
10553a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
10563a0968c8SShahaf Shuler 				    "for device %s\n",
10570a0821bcSPaulis Gributs 				    memhdr->addr, dev_info.device->name);
10583a0968c8SShahaf Shuler 		}
10593a0968c8SShahaf Shuler 	}
10603a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
10613a0968c8SShahaf Shuler 	if (ret) {
10623a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10633a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
10643a0968c8SShahaf Shuler 	}
10653a0968c8SShahaf Shuler }
10663a0968c8SShahaf Shuler 
10673a0968c8SShahaf Shuler static void
10683a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
10693a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
10703a0968c8SShahaf Shuler {
10713a0968c8SShahaf Shuler 	uint16_t pid = 0;
10723a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
10733a0968c8SShahaf Shuler 	int ret;
10743a0968c8SShahaf Shuler 
10753a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
10763a0968c8SShahaf Shuler 				  page_size);
10773a0968c8SShahaf Shuler 	if (ret) {
10783a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10793a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
10803a0968c8SShahaf Shuler 		return;
10813a0968c8SShahaf Shuler 	}
10823a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
10830a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
10843a0968c8SShahaf Shuler 
10850a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
10860a0821bcSPaulis Gributs 		if (ret != 0) {
10870a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
10880a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
10890a0821bcSPaulis Gributs 				    "mempool mapping will not be performed\n",
10900a0821bcSPaulis Gributs 				    pid, memhdr->addr);
10910a0821bcSPaulis Gributs 			continue;
10920a0821bcSPaulis Gributs 		}
10930a0821bcSPaulis Gributs 		ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
10943a0968c8SShahaf Shuler 		if (ret) {
10953a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
10963a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
10973a0968c8SShahaf Shuler 				    "for device %s\n",
10980a0821bcSPaulis Gributs 				    memhdr->addr, dev_info.device->name);
10993a0968c8SShahaf Shuler 		}
11003a0968c8SShahaf Shuler 	}
11013a0968c8SShahaf Shuler }
1102761f7ae1SJie Zhou #endif
1103c7f5dba7SAnatoly Burakov 
110472512e18SViacheslav Ovsiienko static unsigned int
110572512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
110672512e18SViacheslav Ovsiienko 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
110772512e18SViacheslav Ovsiienko {
110872512e18SViacheslav Ovsiienko 	struct rte_pktmbuf_extmem *xmem;
110972512e18SViacheslav Ovsiienko 	unsigned int ext_num, zone_num, elt_num;
111072512e18SViacheslav Ovsiienko 	uint16_t elt_size;
111172512e18SViacheslav Ovsiienko 
111272512e18SViacheslav Ovsiienko 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
111372512e18SViacheslav Ovsiienko 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
111472512e18SViacheslav Ovsiienko 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
111572512e18SViacheslav Ovsiienko 
111672512e18SViacheslav Ovsiienko 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
111772512e18SViacheslav Ovsiienko 	if (xmem == NULL) {
111872512e18SViacheslav Ovsiienko 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
111972512e18SViacheslav Ovsiienko 				 "external buffer descriptors\n");
112072512e18SViacheslav Ovsiienko 		*ext_mem = NULL;
112172512e18SViacheslav Ovsiienko 		return 0;
112272512e18SViacheslav Ovsiienko 	}
112372512e18SViacheslav Ovsiienko 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
112472512e18SViacheslav Ovsiienko 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
112572512e18SViacheslav Ovsiienko 		const struct rte_memzone *mz;
112672512e18SViacheslav Ovsiienko 		char mz_name[RTE_MEMZONE_NAMESIZE];
112772512e18SViacheslav Ovsiienko 		int ret;
112872512e18SViacheslav Ovsiienko 
112972512e18SViacheslav Ovsiienko 		ret = snprintf(mz_name, sizeof(mz_name),
113072512e18SViacheslav Ovsiienko 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
113172512e18SViacheslav Ovsiienko 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
113272512e18SViacheslav Ovsiienko 			errno = ENAMETOOLONG;
113372512e18SViacheslav Ovsiienko 			ext_num = 0;
113472512e18SViacheslav Ovsiienko 			break;
113572512e18SViacheslav Ovsiienko 		}
113613b19642SDmitry Kozlyuk 		mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE,
113772512e18SViacheslav Ovsiienko 					 socket_id,
113872512e18SViacheslav Ovsiienko 					 RTE_MEMZONE_IOVA_CONTIG |
113972512e18SViacheslav Ovsiienko 					 RTE_MEMZONE_1GB |
114013b19642SDmitry Kozlyuk 					 RTE_MEMZONE_SIZE_HINT_ONLY);
114172512e18SViacheslav Ovsiienko 		if (mz == NULL) {
114272512e18SViacheslav Ovsiienko 			/*
114372512e18SViacheslav Ovsiienko 			 * The caller exits on external buffer creation
114472512e18SViacheslav Ovsiienko 			 * error, so there is no need to free memzones.
114572512e18SViacheslav Ovsiienko 			 */
114672512e18SViacheslav Ovsiienko 			errno = ENOMEM;
114772512e18SViacheslav Ovsiienko 			ext_num = 0;
114872512e18SViacheslav Ovsiienko 			break;
114972512e18SViacheslav Ovsiienko 		}
115072512e18SViacheslav Ovsiienko 		xseg->buf_ptr = mz->addr;
115172512e18SViacheslav Ovsiienko 		xseg->buf_iova = mz->iova;
115272512e18SViacheslav Ovsiienko 		xseg->buf_len = EXTBUF_ZONE_SIZE;
115372512e18SViacheslav Ovsiienko 		xseg->elt_size = elt_size;
115472512e18SViacheslav Ovsiienko 	}
115572512e18SViacheslav Ovsiienko 	if (ext_num == 0 && xmem != NULL) {
115672512e18SViacheslav Ovsiienko 		free(xmem);
115772512e18SViacheslav Ovsiienko 		xmem = NULL;
115872512e18SViacheslav Ovsiienko 	}
115972512e18SViacheslav Ovsiienko 	*ext_mem = xmem;
116072512e18SViacheslav Ovsiienko 	return ext_num;
116172512e18SViacheslav Ovsiienko }
116272512e18SViacheslav Ovsiienko 
1163af75078fSIntel /*
1164af75078fSIntel  * Configuration initialisation done once at init time.
1165af75078fSIntel  */
1166401b744dSShahaf Shuler static struct rte_mempool *
1167af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
116826cbb419SViacheslav Ovsiienko 		 unsigned int socket_id, uint16_t size_idx)
1169af75078fSIntel {
1170af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
1171bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
1172761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1173af75078fSIntel 	uint32_t mb_size;
1174af75078fSIntel 
1175dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1176761f7ae1SJie Zhou #endif
117726cbb419SViacheslav Ovsiienko 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1178a550baf2SMin Hu (Connor) 	if (!is_proc_primary()) {
1179a550baf2SMin Hu (Connor) 		rte_mp = rte_mempool_lookup(pool_name);
1180a550baf2SMin Hu (Connor) 		if (rte_mp == NULL)
1181a550baf2SMin Hu (Connor) 			rte_exit(EXIT_FAILURE,
1182a550baf2SMin Hu (Connor) 				"Get mbuf pool for socket %u failed: %s\n",
1183a550baf2SMin Hu (Connor) 				socket_id, rte_strerror(rte_errno));
1184a550baf2SMin Hu (Connor) 		return rte_mp;
1185a550baf2SMin Hu (Connor) 	}
1186148f963fSBruce Richardson 
1187285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
1188d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1189d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1190d1eb542eSOlivier Matz 
1191c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
1192c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
1193c7f5dba7SAnatoly Burakov 		{
1194c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
1195c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1196c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
1197c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1198c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1199c7f5dba7SAnatoly Burakov 			break;
1200c7f5dba7SAnatoly Burakov 		}
1201761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1202c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
1203c7f5dba7SAnatoly Burakov 		{
1204b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1205c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
1206148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
120759fcf854SShahaf Shuler 				socket_id, mempool_flags);
120824427bb9SOlivier Matz 			if (rte_mp == NULL)
120924427bb9SOlivier Matz 				goto err;
1210b19a0c75SOlivier Matz 
1211b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
1212b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
1213b19a0c75SOlivier Matz 				rte_mp = NULL;
121424427bb9SOlivier Matz 				goto err;
1215b19a0c75SOlivier Matz 			}
1216b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
1217b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
12183a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1219c7f5dba7SAnatoly Burakov 			break;
1220c7f5dba7SAnatoly Burakov 		}
1221c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
1222c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
1223c7f5dba7SAnatoly Burakov 		{
1224c7f5dba7SAnatoly Burakov 			int heap_socket;
1225c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1226c7f5dba7SAnatoly Burakov 
1227c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1228c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1229c7f5dba7SAnatoly Burakov 
1230c7f5dba7SAnatoly Burakov 			heap_socket =
1231c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1232c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
1233c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1234c7f5dba7SAnatoly Burakov 
12350e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
12360e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
1237ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1238c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
1239c7f5dba7SAnatoly Burakov 					heap_socket);
1240c7f5dba7SAnatoly Burakov 			break;
1241c7f5dba7SAnatoly Burakov 		}
1242761f7ae1SJie Zhou #endif
124372512e18SViacheslav Ovsiienko 	case MP_ALLOC_XBUF:
124472512e18SViacheslav Ovsiienko 		{
124572512e18SViacheslav Ovsiienko 			struct rte_pktmbuf_extmem *ext_mem;
124672512e18SViacheslav Ovsiienko 			unsigned int ext_num;
124772512e18SViacheslav Ovsiienko 
124872512e18SViacheslav Ovsiienko 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
124972512e18SViacheslav Ovsiienko 					       socket_id, pool_name, &ext_mem);
125072512e18SViacheslav Ovsiienko 			if (ext_num == 0)
125172512e18SViacheslav Ovsiienko 				rte_exit(EXIT_FAILURE,
125272512e18SViacheslav Ovsiienko 					 "Can't create pinned data buffers\n");
125372512e18SViacheslav Ovsiienko 
125472512e18SViacheslav Ovsiienko 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
125572512e18SViacheslav Ovsiienko 					rte_mbuf_best_mempool_ops());
125672512e18SViacheslav Ovsiienko 			rte_mp = rte_pktmbuf_pool_create_extbuf
125772512e18SViacheslav Ovsiienko 					(pool_name, nb_mbuf, mb_mempool_cache,
125872512e18SViacheslav Ovsiienko 					 0, mbuf_seg_size, socket_id,
125972512e18SViacheslav Ovsiienko 					 ext_mem, ext_num);
126072512e18SViacheslav Ovsiienko 			free(ext_mem);
126172512e18SViacheslav Ovsiienko 			break;
126272512e18SViacheslav Ovsiienko 		}
1263c7f5dba7SAnatoly Burakov 	default:
1264c7f5dba7SAnatoly Burakov 		{
1265c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1266c7f5dba7SAnatoly Burakov 		}
1267bece7b6cSChristian Ehrhardt 	}
1268148f963fSBruce Richardson 
1269761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
127024427bb9SOlivier Matz err:
1271761f7ae1SJie Zhou #endif
1272af75078fSIntel 	if (rte_mp == NULL) {
1273d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
1274d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
1275d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
1276148f963fSBruce Richardson 	} else if (verbose_level > 0) {
1277591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
1278af75078fSIntel 	}
1279401b744dSShahaf Shuler 	return rte_mp;
1280af75078fSIntel }
1281af75078fSIntel 
128220a0286fSLiu Xiaofeng /*
128320a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
128420a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
128520a0286fSLiu Xiaofeng  */
128620a0286fSLiu Xiaofeng static int
128720a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
128820a0286fSLiu Xiaofeng {
128920a0286fSLiu Xiaofeng 	static int warning_once = 0;
129020a0286fSLiu Xiaofeng 
1291c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
129220a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
129361a3b0e5SAndrew Rybchenko 			fprintf(stderr,
129461a3b0e5SAndrew Rybchenko 				"Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
129520a0286fSLiu Xiaofeng 		warning_once = 1;
129620a0286fSLiu Xiaofeng 		return -1;
129720a0286fSLiu Xiaofeng 	}
129820a0286fSLiu Xiaofeng 	return 0;
129920a0286fSLiu Xiaofeng }
130020a0286fSLiu Xiaofeng 
13013f7311baSWei Dai /*
13023f7311baSWei Dai  * Get the allowed maximum number of RX queues.
13033f7311baSWei Dai  * *pid return the port id which has minimal value of
13043f7311baSWei Dai  * max_rx_queues in all ports.
13053f7311baSWei Dai  */
13063f7311baSWei Dai queueid_t
13073f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
13083f7311baSWei Dai {
13099e6b36c3SDavid Marchand 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
13106f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
13113f7311baSWei Dai 	portid_t pi;
13123f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
13133f7311baSWei Dai 
13143f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
13156f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
13166f51deb9SIvan Ilchenko 			continue;
13176f51deb9SIvan Ilchenko 
13186f51deb9SIvan Ilchenko 		max_rxq_valid = true;
13193f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
13203f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
13213f7311baSWei Dai 			*pid = pi;
13223f7311baSWei Dai 		}
13233f7311baSWei Dai 	}
13246f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
13253f7311baSWei Dai }
13263f7311baSWei Dai 
13273f7311baSWei Dai /*
13283f7311baSWei Dai  * Check input rxq is valid or not.
13293f7311baSWei Dai  * If input rxq is not greater than any of maximum number
13303f7311baSWei Dai  * of RX queues of all ports, it is valid.
13313f7311baSWei Dai  * if valid, return 0, else return -1
13323f7311baSWei Dai  */
13333f7311baSWei Dai int
13343f7311baSWei Dai check_nb_rxq(queueid_t rxq)
13353f7311baSWei Dai {
13363f7311baSWei Dai 	queueid_t allowed_max_rxq;
13373f7311baSWei Dai 	portid_t pid = 0;
13383f7311baSWei Dai 
13393f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
13403f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
134161a3b0e5SAndrew Rybchenko 		fprintf(stderr,
134261a3b0e5SAndrew Rybchenko 			"Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
134361a3b0e5SAndrew Rybchenko 			rxq, allowed_max_rxq, pid);
13443f7311baSWei Dai 		return -1;
13453f7311baSWei Dai 	}
13463f7311baSWei Dai 	return 0;
13473f7311baSWei Dai }
13483f7311baSWei Dai 
134936db4f6cSWei Dai /*
135036db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
135136db4f6cSWei Dai  * *pid return the port id which has minimal value of
135236db4f6cSWei Dai  * max_tx_queues in all ports.
135336db4f6cSWei Dai  */
135436db4f6cSWei Dai queueid_t
135536db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
135636db4f6cSWei Dai {
13579e6b36c3SDavid Marchand 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
13586f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
135936db4f6cSWei Dai 	portid_t pi;
136036db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
136136db4f6cSWei Dai 
136236db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
13636f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
13646f51deb9SIvan Ilchenko 			continue;
13656f51deb9SIvan Ilchenko 
13666f51deb9SIvan Ilchenko 		max_txq_valid = true;
136736db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
136836db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
136936db4f6cSWei Dai 			*pid = pi;
137036db4f6cSWei Dai 		}
137136db4f6cSWei Dai 	}
13726f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
137336db4f6cSWei Dai }
137436db4f6cSWei Dai 
137536db4f6cSWei Dai /*
137636db4f6cSWei Dai  * Check input txq is valid or not.
137736db4f6cSWei Dai  * If input txq is not greater than any of maximum number
137836db4f6cSWei Dai  * of TX queues of all ports, it is valid.
137936db4f6cSWei Dai  * if valid, return 0, else return -1
138036db4f6cSWei Dai  */
138136db4f6cSWei Dai int
138236db4f6cSWei Dai check_nb_txq(queueid_t txq)
138336db4f6cSWei Dai {
138436db4f6cSWei Dai 	queueid_t allowed_max_txq;
138536db4f6cSWei Dai 	portid_t pid = 0;
138636db4f6cSWei Dai 
138736db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
138836db4f6cSWei Dai 	if (txq > allowed_max_txq) {
138961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
139061a3b0e5SAndrew Rybchenko 			"Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
139161a3b0e5SAndrew Rybchenko 			txq, allowed_max_txq, pid);
139236db4f6cSWei Dai 		return -1;
139336db4f6cSWei Dai 	}
139436db4f6cSWei Dai 	return 0;
139536db4f6cSWei Dai }
139636db4f6cSWei Dai 
13971c69df45SOri Kam /*
139899e040d3SLijun Ou  * Get the allowed maximum number of RXDs of every rx queue.
139999e040d3SLijun Ou  * *pid return the port id which has minimal value of
140099e040d3SLijun Ou  * max_rxd in all queues of all ports.
140199e040d3SLijun Ou  */
140299e040d3SLijun Ou static uint16_t
140399e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid)
140499e040d3SLijun Ou {
140599e040d3SLijun Ou 	uint16_t allowed_max_rxd = UINT16_MAX;
140699e040d3SLijun Ou 	portid_t pi;
140799e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
140899e040d3SLijun Ou 
140999e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
141099e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
141199e040d3SLijun Ou 			continue;
141299e040d3SLijun Ou 
141399e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
141499e040d3SLijun Ou 			allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
141599e040d3SLijun Ou 			*pid = pi;
141699e040d3SLijun Ou 		}
141799e040d3SLijun Ou 	}
141899e040d3SLijun Ou 	return allowed_max_rxd;
141999e040d3SLijun Ou }
142099e040d3SLijun Ou 
142199e040d3SLijun Ou /*
142299e040d3SLijun Ou  * Get the allowed minimal number of RXDs of every rx queue.
142399e040d3SLijun Ou  * *pid return the port id which has minimal value of
142499e040d3SLijun Ou  * min_rxd in all queues of all ports.
142599e040d3SLijun Ou  */
142699e040d3SLijun Ou static uint16_t
142799e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid)
142899e040d3SLijun Ou {
142999e040d3SLijun Ou 	uint16_t allowed_min_rxd = 0;
143099e040d3SLijun Ou 	portid_t pi;
143199e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
143299e040d3SLijun Ou 
143399e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
143499e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
143599e040d3SLijun Ou 			continue;
143699e040d3SLijun Ou 
143799e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
143899e040d3SLijun Ou 			allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
143999e040d3SLijun Ou 			*pid = pi;
144099e040d3SLijun Ou 		}
144199e040d3SLijun Ou 	}
144299e040d3SLijun Ou 
144399e040d3SLijun Ou 	return allowed_min_rxd;
144499e040d3SLijun Ou }
144599e040d3SLijun Ou 
144699e040d3SLijun Ou /*
144799e040d3SLijun Ou  * Check input rxd is valid or not.
144899e040d3SLijun Ou  * If input rxd is not greater than any of maximum number
144999e040d3SLijun Ou  * of RXDs of every Rx queues and is not less than any of
145099e040d3SLijun Ou  * minimal number of RXDs of every Rx queues, it is valid.
145199e040d3SLijun Ou  * if valid, return 0, else return -1
145299e040d3SLijun Ou  */
145399e040d3SLijun Ou int
145499e040d3SLijun Ou check_nb_rxd(queueid_t rxd)
145599e040d3SLijun Ou {
145699e040d3SLijun Ou 	uint16_t allowed_max_rxd;
145799e040d3SLijun Ou 	uint16_t allowed_min_rxd;
145899e040d3SLijun Ou 	portid_t pid = 0;
145999e040d3SLijun Ou 
146099e040d3SLijun Ou 	allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
146199e040d3SLijun Ou 	if (rxd > allowed_max_rxd) {
146261a3b0e5SAndrew Rybchenko 		fprintf(stderr,
146361a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
146461a3b0e5SAndrew Rybchenko 			rxd, allowed_max_rxd, pid);
146599e040d3SLijun Ou 		return -1;
146699e040d3SLijun Ou 	}
146799e040d3SLijun Ou 
146899e040d3SLijun Ou 	allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
146999e040d3SLijun Ou 	if (rxd < allowed_min_rxd) {
147061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
147161a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
147261a3b0e5SAndrew Rybchenko 			rxd, allowed_min_rxd, pid);
147399e040d3SLijun Ou 		return -1;
147499e040d3SLijun Ou 	}
147599e040d3SLijun Ou 
147699e040d3SLijun Ou 	return 0;
147799e040d3SLijun Ou }
147899e040d3SLijun Ou 
147999e040d3SLijun Ou /*
148099e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every rx queues.
148199e040d3SLijun Ou  * *pid return the port id which has minimal value of
148299e040d3SLijun Ou  * max_txd in every tx queue.
148399e040d3SLijun Ou  */
148499e040d3SLijun Ou static uint16_t
148599e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid)
148699e040d3SLijun Ou {
148799e040d3SLijun Ou 	uint16_t allowed_max_txd = UINT16_MAX;
148899e040d3SLijun Ou 	portid_t pi;
148999e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
149099e040d3SLijun Ou 
149199e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
149299e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
149399e040d3SLijun Ou 			continue;
149499e040d3SLijun Ou 
149599e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
149699e040d3SLijun Ou 			allowed_max_txd = dev_info.tx_desc_lim.nb_max;
149799e040d3SLijun Ou 			*pid = pi;
149899e040d3SLijun Ou 		}
149999e040d3SLijun Ou 	}
150099e040d3SLijun Ou 	return allowed_max_txd;
150199e040d3SLijun Ou }
150299e040d3SLijun Ou 
150399e040d3SLijun Ou /*
150499e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every tx queues.
150599e040d3SLijun Ou  * *pid return the port id which has minimal value of
150699e040d3SLijun Ou  * min_txd in every tx queue.
150799e040d3SLijun Ou  */
150899e040d3SLijun Ou static uint16_t
150999e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid)
151099e040d3SLijun Ou {
151199e040d3SLijun Ou 	uint16_t allowed_min_txd = 0;
151299e040d3SLijun Ou 	portid_t pi;
151399e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
151499e040d3SLijun Ou 
151599e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
151699e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
151799e040d3SLijun Ou 			continue;
151899e040d3SLijun Ou 
151999e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
152099e040d3SLijun Ou 			allowed_min_txd = dev_info.tx_desc_lim.nb_min;
152199e040d3SLijun Ou 			*pid = pi;
152299e040d3SLijun Ou 		}
152399e040d3SLijun Ou 	}
152499e040d3SLijun Ou 
152599e040d3SLijun Ou 	return allowed_min_txd;
152699e040d3SLijun Ou }
152799e040d3SLijun Ou 
152899e040d3SLijun Ou /*
152999e040d3SLijun Ou  * Check input txd is valid or not.
153099e040d3SLijun Ou  * If input txd is not greater than any of maximum number
153199e040d3SLijun Ou  * of TXDs of every Rx queues, it is valid.
153299e040d3SLijun Ou  * if valid, return 0, else return -1
153399e040d3SLijun Ou  */
153499e040d3SLijun Ou int
153599e040d3SLijun Ou check_nb_txd(queueid_t txd)
153699e040d3SLijun Ou {
153799e040d3SLijun Ou 	uint16_t allowed_max_txd;
153899e040d3SLijun Ou 	uint16_t allowed_min_txd;
153999e040d3SLijun Ou 	portid_t pid = 0;
154099e040d3SLijun Ou 
154199e040d3SLijun Ou 	allowed_max_txd = get_allowed_max_nb_txd(&pid);
154299e040d3SLijun Ou 	if (txd > allowed_max_txd) {
154361a3b0e5SAndrew Rybchenko 		fprintf(stderr,
154461a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
154561a3b0e5SAndrew Rybchenko 			txd, allowed_max_txd, pid);
154699e040d3SLijun Ou 		return -1;
154799e040d3SLijun Ou 	}
154899e040d3SLijun Ou 
154999e040d3SLijun Ou 	allowed_min_txd = get_allowed_min_nb_txd(&pid);
155099e040d3SLijun Ou 	if (txd < allowed_min_txd) {
155161a3b0e5SAndrew Rybchenko 		fprintf(stderr,
155261a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
155361a3b0e5SAndrew Rybchenko 			txd, allowed_min_txd, pid);
155499e040d3SLijun Ou 		return -1;
155599e040d3SLijun Ou 	}
155699e040d3SLijun Ou 	return 0;
155799e040d3SLijun Ou }
155899e040d3SLijun Ou 
155999e040d3SLijun Ou 
156099e040d3SLijun Ou /*
15611c69df45SOri Kam  * Get the allowed maximum number of hairpin queues.
15621c69df45SOri Kam  * *pid return the port id which has minimal value of
15631c69df45SOri Kam  * max_hairpin_queues in all ports.
15641c69df45SOri Kam  */
15651c69df45SOri Kam queueid_t
15661c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid)
15671c69df45SOri Kam {
15689e6b36c3SDavid Marchand 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
15691c69df45SOri Kam 	portid_t pi;
15701c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
15711c69df45SOri Kam 
15721c69df45SOri Kam 	RTE_ETH_FOREACH_DEV(pi) {
15731c69df45SOri Kam 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
15741c69df45SOri Kam 			*pid = pi;
15751c69df45SOri Kam 			return 0;
15761c69df45SOri Kam 		}
15771c69df45SOri Kam 		if (cap.max_nb_queues < allowed_max_hairpinq) {
15781c69df45SOri Kam 			allowed_max_hairpinq = cap.max_nb_queues;
15791c69df45SOri Kam 			*pid = pi;
15801c69df45SOri Kam 		}
15811c69df45SOri Kam 	}
15821c69df45SOri Kam 	return allowed_max_hairpinq;
15831c69df45SOri Kam }
15841c69df45SOri Kam 
15851c69df45SOri Kam /*
15861c69df45SOri Kam  * Check input hairpin is valid or not.
15871c69df45SOri Kam  * If input hairpin is not greater than any of maximum number
15881c69df45SOri Kam  * of hairpin queues of all ports, it is valid.
15891c69df45SOri Kam  * if valid, return 0, else return -1
15901c69df45SOri Kam  */
15911c69df45SOri Kam int
15921c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq)
15931c69df45SOri Kam {
15941c69df45SOri Kam 	queueid_t allowed_max_hairpinq;
15951c69df45SOri Kam 	portid_t pid = 0;
15961c69df45SOri Kam 
15971c69df45SOri Kam 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
15981c69df45SOri Kam 	if (hairpinq > allowed_max_hairpinq) {
159961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
160061a3b0e5SAndrew Rybchenko 			"Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
16011c69df45SOri Kam 			hairpinq, allowed_max_hairpinq, pid);
16021c69df45SOri Kam 		return -1;
16031c69df45SOri Kam 	}
16041c69df45SOri Kam 	return 0;
16051c69df45SOri Kam }
16061c69df45SOri Kam 
16071bb4a528SFerruh Yigit static int
16081bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info)
16091bb4a528SFerruh Yigit {
16101bb4a528SFerruh Yigit 	uint32_t eth_overhead;
16111bb4a528SFerruh Yigit 
16121bb4a528SFerruh Yigit 	if (dev_info->max_mtu != UINT16_MAX &&
16131bb4a528SFerruh Yigit 	    dev_info->max_rx_pktlen > dev_info->max_mtu)
16141bb4a528SFerruh Yigit 		eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
16151bb4a528SFerruh Yigit 	else
16161bb4a528SFerruh Yigit 		eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
16171bb4a528SFerruh Yigit 
16181bb4a528SFerruh Yigit 	return eth_overhead;
16191bb4a528SFerruh Yigit }
16201bb4a528SFerruh Yigit 
1621af75078fSIntel static void
1622b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id)
1623b6b8a1ebSViacheslav Ovsiienko {
1624b6b8a1ebSViacheslav Ovsiienko 	struct rte_port *port = &ports[pid];
1625b6b8a1ebSViacheslav Ovsiienko 	int ret;
1626b6b8a1ebSViacheslav Ovsiienko 	int i;
1627b6b8a1ebSViacheslav Ovsiienko 
1628f6d8a6d3SIvan Malov 	eth_rx_metadata_negotiate_mp(pid);
1629f6d8a6d3SIvan Malov 
1630b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.txmode = tx_mode;
1631b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.rxmode = rx_mode;
1632b6b8a1ebSViacheslav Ovsiienko 
1633b6b8a1ebSViacheslav Ovsiienko 	ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1634b6b8a1ebSViacheslav Ovsiienko 	if (ret != 0)
1635b6b8a1ebSViacheslav Ovsiienko 		rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1636b6b8a1ebSViacheslav Ovsiienko 
1637295968d1SFerruh Yigit 	if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
1638b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.txmode.offloads &=
1639295968d1SFerruh Yigit 			~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1640b6b8a1ebSViacheslav Ovsiienko 
1641b6b8a1ebSViacheslav Ovsiienko 	/* Apply Rx offloads configuration */
1642b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
16433c4426dbSDmitry Kozlyuk 		port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads;
1644b6b8a1ebSViacheslav Ovsiienko 	/* Apply Tx offloads configuration */
1645b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_tx_queues; i++)
16463c4426dbSDmitry Kozlyuk 		port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
1647b6b8a1ebSViacheslav Ovsiienko 
1648b6b8a1ebSViacheslav Ovsiienko 	if (eth_link_speed)
1649b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.link_speeds = eth_link_speed;
1650b6b8a1ebSViacheslav Ovsiienko 
16511bb4a528SFerruh Yigit 	if (max_rx_pkt_len)
16521bb4a528SFerruh Yigit 		port->dev_conf.rxmode.mtu = max_rx_pkt_len -
16531bb4a528SFerruh Yigit 			get_eth_overhead(&port->dev_info);
16541bb4a528SFerruh Yigit 
1655b6b8a1ebSViacheslav Ovsiienko 	/* set flag to initialize port/queue */
1656b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig = 1;
1657b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig_queues = 1;
1658b6b8a1ebSViacheslav Ovsiienko 	port->socket_id = socket_id;
1659b6b8a1ebSViacheslav Ovsiienko 	port->tx_metadata = 0;
1660b6b8a1ebSViacheslav Ovsiienko 
1661b6b8a1ebSViacheslav Ovsiienko 	/*
1662b6b8a1ebSViacheslav Ovsiienko 	 * Check for maximum number of segments per MTU.
1663b6b8a1ebSViacheslav Ovsiienko 	 * Accordingly update the mbuf data size.
1664b6b8a1ebSViacheslav Ovsiienko 	 */
1665b6b8a1ebSViacheslav Ovsiienko 	if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1666b6b8a1ebSViacheslav Ovsiienko 	    port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
16671bb4a528SFerruh Yigit 		uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
16681bb4a528SFerruh Yigit 		uint16_t mtu;
1669b6b8a1ebSViacheslav Ovsiienko 
16701bb4a528SFerruh Yigit 		if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
16711bb4a528SFerruh Yigit 			uint16_t data_size = (mtu + eth_overhead) /
16721bb4a528SFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
16731bb4a528SFerruh Yigit 			uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
16741bb4a528SFerruh Yigit 
16751bb4a528SFerruh Yigit 			if (buffer_size > mbuf_data_size[0]) {
16761bb4a528SFerruh Yigit 				mbuf_data_size[0] = buffer_size;
1677b6b8a1ebSViacheslav Ovsiienko 				TESTPMD_LOG(WARNING,
1678b6b8a1ebSViacheslav Ovsiienko 					"Configured mbuf size of the first segment %hu\n",
1679b6b8a1ebSViacheslav Ovsiienko 					mbuf_data_size[0]);
1680b6b8a1ebSViacheslav Ovsiienko 			}
1681b6b8a1ebSViacheslav Ovsiienko 		}
1682b6b8a1ebSViacheslav Ovsiienko 	}
16831bb4a528SFerruh Yigit }
1684b6b8a1ebSViacheslav Ovsiienko 
1685b6b8a1ebSViacheslav Ovsiienko static void
1686af75078fSIntel init_config(void)
1687af75078fSIntel {
1688ce8d5614SIntel 	portid_t pid;
1689af75078fSIntel 	struct rte_mempool *mbp;
1690af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1691af75078fSIntel 	lcoreid_t  lc_id;
16926970401eSDavid Marchand #ifdef RTE_LIB_GRO
1693b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
16946970401eSDavid Marchand #endif
16956970401eSDavid Marchand #ifdef RTE_LIB_GSO
169652f38a20SJiayu Hu 	uint32_t gso_types;
16976970401eSDavid Marchand #endif
1698487f9a59SYulong Pei 
1699af75078fSIntel 	/* Configuration of logical cores. */
1700af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1701af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1702fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1703af75078fSIntel 	if (fwd_lcores == NULL) {
1704ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1705ce8d5614SIntel 							"failed\n", nb_lcores);
1706af75078fSIntel 	}
1707af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1708af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1709af75078fSIntel 					       sizeof(struct fwd_lcore),
1710fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1711af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1712ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1713ce8d5614SIntel 								"failed\n");
1714af75078fSIntel 		}
1715af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1716af75078fSIntel 	}
1717af75078fSIntel 
17187d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1719b6b8a1ebSViacheslav Ovsiienko 		uint32_t socket_id;
17206f51deb9SIvan Ilchenko 
1721b6ea6408SIntel 		if (numa_support) {
1722b6b8a1ebSViacheslav Ovsiienko 			socket_id = port_numa[pid];
1723b6b8a1ebSViacheslav Ovsiienko 			if (port_numa[pid] == NUMA_NO_CONFIG) {
1724b6b8a1ebSViacheslav Ovsiienko 				socket_id = rte_eth_dev_socket_id(pid);
172520a0286fSLiu Xiaofeng 
172629841336SPhil Yang 				/*
172729841336SPhil Yang 				 * if socket_id is invalid,
172829841336SPhil Yang 				 * set to the first available socket.
172929841336SPhil Yang 				 */
173020a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
173129841336SPhil Yang 					socket_id = socket_ids[0];
1732b6ea6408SIntel 			}
1733b6b8a1ebSViacheslav Ovsiienko 		} else {
1734b6b8a1ebSViacheslav Ovsiienko 			socket_id = (socket_num == UMA_NO_CONFIG) ?
1735b6b8a1ebSViacheslav Ovsiienko 				    0 : socket_num;
1736b6ea6408SIntel 		}
1737b6b8a1ebSViacheslav Ovsiienko 		/* Apply default TxRx configuration for all ports */
1738b6b8a1ebSViacheslav Ovsiienko 		init_config_port_offloads(pid, socket_id);
1739ce8d5614SIntel 	}
17403ab64341SOlivier Matz 	/*
17413ab64341SOlivier Matz 	 * Create pools of mbuf.
17423ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
17433ab64341SOlivier Matz 	 * socket 0 memory by default.
17443ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
17453ab64341SOlivier Matz 	 *
17463ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
17473ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
17483ab64341SOlivier Matz 	 */
17493ab64341SOlivier Matz 	if (param_total_num_mbufs)
17503ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
17513ab64341SOlivier Matz 	else {
17523ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
17533ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
17543ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
17553ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
17563ab64341SOlivier Matz 	}
17573ab64341SOlivier Matz 
1758b6ea6408SIntel 	if (numa_support) {
175926cbb419SViacheslav Ovsiienko 		uint8_t i, j;
1760ce8d5614SIntel 
1761c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
176226cbb419SViacheslav Ovsiienko 			for (j = 0; j < mbuf_data_size_n; j++)
176326cbb419SViacheslav Ovsiienko 				mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
176426cbb419SViacheslav Ovsiienko 					mbuf_pool_create(mbuf_data_size[j],
1765401b744dSShahaf Shuler 							  nb_mbuf_per_pool,
176626cbb419SViacheslav Ovsiienko 							  socket_ids[i], j);
17673ab64341SOlivier Matz 	} else {
176826cbb419SViacheslav Ovsiienko 		uint8_t i;
176926cbb419SViacheslav Ovsiienko 
177026cbb419SViacheslav Ovsiienko 		for (i = 0; i < mbuf_data_size_n; i++)
177126cbb419SViacheslav Ovsiienko 			mempools[i] = mbuf_pool_create
177226cbb419SViacheslav Ovsiienko 					(mbuf_data_size[i],
1773401b744dSShahaf Shuler 					 nb_mbuf_per_pool,
177426cbb419SViacheslav Ovsiienko 					 socket_num == UMA_NO_CONFIG ?
177526cbb419SViacheslav Ovsiienko 					 0 : socket_num, i);
17763ab64341SOlivier Matz 	}
1777b6ea6408SIntel 
1778b6ea6408SIntel 	init_port_config();
17795886ae07SAdrien Mazarguil 
17806970401eSDavid Marchand #ifdef RTE_LIB_GSO
1781295968d1SFerruh Yigit 	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1782295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
17836970401eSDavid Marchand #endif
17845886ae07SAdrien Mazarguil 	/*
17855886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
17865886ae07SAdrien Mazarguil 	 */
17875886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
17888fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
178926cbb419SViacheslav Ovsiienko 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
17908fd8bebcSAdrien Mazarguil 
17915886ae07SAdrien Mazarguil 		if (mbp == NULL)
179226cbb419SViacheslav Ovsiienko 			mbp = mbuf_pool_find(0, 0);
17935886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
17946970401eSDavid Marchand #ifdef RTE_LIB_GSO
179552f38a20SJiayu Hu 		/* initialize GSO context */
179652f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
179752f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
179852f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
179935b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
180035b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
180152f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
18026970401eSDavid Marchand #endif
18035886ae07SAdrien Mazarguil 	}
18045886ae07SAdrien Mazarguil 
18050c0db76fSBernard Iremonger 	fwd_config_setup();
1806b7091f1dSJiayu Hu 
18076970401eSDavid Marchand #ifdef RTE_LIB_GRO
1808b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1809b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1810b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1811b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1812b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1813b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1814b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1815b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1816b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1817b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1818b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1819b7091f1dSJiayu Hu 		}
1820b7091f1dSJiayu Hu 	}
18216970401eSDavid Marchand #endif
1822ce8d5614SIntel }
1823ce8d5614SIntel 
18242950a769SDeclan Doherty 
18252950a769SDeclan Doherty void
1826a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
18272950a769SDeclan Doherty {
18282950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
1829b6b8a1ebSViacheslav Ovsiienko 	init_config_port_offloads(new_port_id, socket_id);
18302950a769SDeclan Doherty 	init_port_config();
18312950a769SDeclan Doherty }
18322950a769SDeclan Doherty 
1833ce8d5614SIntel int
1834ce8d5614SIntel init_fwd_streams(void)
1835ce8d5614SIntel {
1836ce8d5614SIntel 	portid_t pid;
1837ce8d5614SIntel 	struct rte_port *port;
1838ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
18395a8fb55cSReshma Pattan 	queueid_t q;
1840ce8d5614SIntel 
1841ce8d5614SIntel 	/* set socket id according to numa or not */
18427d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1843ce8d5614SIntel 		port = &ports[pid];
1844ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
184561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
184661a3b0e5SAndrew Rybchenko 				"Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
184761a3b0e5SAndrew Rybchenko 				nb_rxq, port->dev_info.max_rx_queues);
1848ce8d5614SIntel 			return -1;
1849ce8d5614SIntel 		}
1850ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
185161a3b0e5SAndrew Rybchenko 			fprintf(stderr,
185261a3b0e5SAndrew Rybchenko 				"Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
185361a3b0e5SAndrew Rybchenko 				nb_txq, port->dev_info.max_tx_queues);
1854ce8d5614SIntel 			return -1;
1855ce8d5614SIntel 		}
185620a0286fSLiu Xiaofeng 		if (numa_support) {
185720a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
185820a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
185920a0286fSLiu Xiaofeng 			else {
1860b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
186120a0286fSLiu Xiaofeng 
186229841336SPhil Yang 				/*
186329841336SPhil Yang 				 * if socket_id is invalid,
186429841336SPhil Yang 				 * set to the first available socket.
186529841336SPhil Yang 				 */
186620a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
186729841336SPhil Yang 					port->socket_id = socket_ids[0];
186820a0286fSLiu Xiaofeng 			}
186920a0286fSLiu Xiaofeng 		}
1870b6ea6408SIntel 		else {
1871b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1872af75078fSIntel 				port->socket_id = 0;
1873b6ea6408SIntel 			else
1874b6ea6408SIntel 				port->socket_id = socket_num;
1875b6ea6408SIntel 		}
1876af75078fSIntel 	}
1877af75078fSIntel 
18785a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
18795a8fb55cSReshma Pattan 	if (q == 0) {
188061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
188161a3b0e5SAndrew Rybchenko 			"Fail: Cannot allocate fwd streams as number of queues is 0\n");
18825a8fb55cSReshma Pattan 		return -1;
18835a8fb55cSReshma Pattan 	}
18845a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1885ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1886ce8d5614SIntel 		return 0;
1887ce8d5614SIntel 	/* clear the old */
1888ce8d5614SIntel 	if (fwd_streams != NULL) {
1889ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1890ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1891ce8d5614SIntel 				continue;
1892ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1893ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1894af75078fSIntel 		}
1895ce8d5614SIntel 		rte_free(fwd_streams);
1896ce8d5614SIntel 		fwd_streams = NULL;
1897ce8d5614SIntel 	}
1898ce8d5614SIntel 
1899ce8d5614SIntel 	/* init new */
1900ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
19011f84c469SMatan Azrad 	if (nb_fwd_streams) {
1902ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
19031f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
19041f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1905ce8d5614SIntel 		if (fwd_streams == NULL)
19061f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
19071f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
19081f84c469SMatan Azrad 				 nb_fwd_streams);
1909ce8d5614SIntel 
1910af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
19111f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
19121f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
19131f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1914ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
19151f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
19161f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
19171f84c469SMatan Azrad 		}
1918af75078fSIntel 	}
1919ce8d5614SIntel 
1920ce8d5614SIntel 	return 0;
1921af75078fSIntel }
1922af75078fSIntel 
1923af75078fSIntel static void
1924af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1925af75078fSIntel {
19267569b8c1SHonnappa Nagarahalli 	uint64_t total_burst, sburst;
192785de481aSHonnappa Nagarahalli 	uint64_t nb_burst;
19287569b8c1SHonnappa Nagarahalli 	uint64_t burst_stats[4];
19297569b8c1SHonnappa Nagarahalli 	uint16_t pktnb_stats[4];
1930af75078fSIntel 	uint16_t nb_pkt;
19317569b8c1SHonnappa Nagarahalli 	int burst_percent[4], sburstp;
19327569b8c1SHonnappa Nagarahalli 	int i;
1933af75078fSIntel 
1934af75078fSIntel 	/*
1935af75078fSIntel 	 * First compute the total number of packet bursts and the
1936af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1937af75078fSIntel 	 */
19387569b8c1SHonnappa Nagarahalli 	memset(&burst_stats, 0x0, sizeof(burst_stats));
19397569b8c1SHonnappa Nagarahalli 	memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
19407569b8c1SHonnappa Nagarahalli 
19417569b8c1SHonnappa Nagarahalli 	/* Show stats for 0 burst size always */
19427569b8c1SHonnappa Nagarahalli 	total_burst = pbs->pkt_burst_spread[0];
19437569b8c1SHonnappa Nagarahalli 	burst_stats[0] = pbs->pkt_burst_spread[0];
19447569b8c1SHonnappa Nagarahalli 	pktnb_stats[0] = 0;
19457569b8c1SHonnappa Nagarahalli 
19467569b8c1SHonnappa Nagarahalli 	/* Find the next 2 burst sizes with highest occurrences. */
19476a8b64fdSEli Britstein 	for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1948af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
19497569b8c1SHonnappa Nagarahalli 
1950af75078fSIntel 		if (nb_burst == 0)
1951af75078fSIntel 			continue;
19527569b8c1SHonnappa Nagarahalli 
1953af75078fSIntel 		total_burst += nb_burst;
19547569b8c1SHonnappa Nagarahalli 
19557569b8c1SHonnappa Nagarahalli 		if (nb_burst > burst_stats[1]) {
19567569b8c1SHonnappa Nagarahalli 			burst_stats[2] = burst_stats[1];
19577569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = pktnb_stats[1];
1958fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1959fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
19607569b8c1SHonnappa Nagarahalli 		} else if (nb_burst > burst_stats[2]) {
19617569b8c1SHonnappa Nagarahalli 			burst_stats[2] = nb_burst;
19627569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = nb_pkt;
1963af75078fSIntel 		}
1964af75078fSIntel 	}
1965af75078fSIntel 	if (total_burst == 0)
1966af75078fSIntel 		return;
19677569b8c1SHonnappa Nagarahalli 
19687569b8c1SHonnappa Nagarahalli 	printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
19697569b8c1SHonnappa Nagarahalli 	for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
19707569b8c1SHonnappa Nagarahalli 		if (i == 3) {
19717569b8c1SHonnappa Nagarahalli 			printf("%d%% of other]\n", 100 - sburstp);
1972af75078fSIntel 			return;
1973af75078fSIntel 		}
19747569b8c1SHonnappa Nagarahalli 
19757569b8c1SHonnappa Nagarahalli 		sburst += burst_stats[i];
19767569b8c1SHonnappa Nagarahalli 		if (sburst == total_burst) {
19777569b8c1SHonnappa Nagarahalli 			printf("%d%% of %d pkts]\n",
19787569b8c1SHonnappa Nagarahalli 				100 - sburstp, (int) pktnb_stats[i]);
1979af75078fSIntel 			return;
1980af75078fSIntel 		}
19817569b8c1SHonnappa Nagarahalli 
19827569b8c1SHonnappa Nagarahalli 		burst_percent[i] =
19837569b8c1SHonnappa Nagarahalli 			(double)burst_stats[i] / total_burst * 100;
19847569b8c1SHonnappa Nagarahalli 		printf("%d%% of %d pkts + ",
19857569b8c1SHonnappa Nagarahalli 			burst_percent[i], (int) pktnb_stats[i]);
19867569b8c1SHonnappa Nagarahalli 		sburstp += burst_percent[i];
1987af75078fSIntel 	}
1988af75078fSIntel }
1989af75078fSIntel 
1990af75078fSIntel static void
1991af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1992af75078fSIntel {
1993af75078fSIntel 	struct fwd_stream *fs;
1994af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1995af75078fSIntel 
1996af75078fSIntel 	fs = fwd_streams[stream_id];
1997af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1998af75078fSIntel 	    (fs->fwd_dropped == 0))
1999af75078fSIntel 		return;
2000af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
2001af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
2002af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
2003af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
2004c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
2005c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
2006af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
2007af75078fSIntel 
2008af75078fSIntel 	/* if checksum mode */
2009af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
2010c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
2011c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
2012c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
201358d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
201458d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
2015d139cf23SLance Richardson 		printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
2016d139cf23SLance Richardson 			fs->rx_bad_outer_ip_csum);
201794d65546SDavid Marchand 	} else {
201894d65546SDavid Marchand 		printf("\n");
2019af75078fSIntel 	}
2020af75078fSIntel 
20210e4b1963SDharmik Thakkar 	if (record_burst_stats) {
2022af75078fSIntel 		pkt_burst_stats_display("RX", &fs->rx_burst_stats);
2023af75078fSIntel 		pkt_burst_stats_display("TX", &fs->tx_burst_stats);
20240e4b1963SDharmik Thakkar 	}
2025af75078fSIntel }
2026af75078fSIntel 
202753324971SDavid Marchand void
202853324971SDavid Marchand fwd_stats_display(void)
202953324971SDavid Marchand {
203053324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
203153324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
203253324971SDavid Marchand 	struct {
203353324971SDavid Marchand 		struct fwd_stream *rx_stream;
203453324971SDavid Marchand 		struct fwd_stream *tx_stream;
203553324971SDavid Marchand 		uint64_t tx_dropped;
203653324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
203753324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
203853324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
2039d139cf23SLance Richardson 		uint64_t rx_bad_outer_ip_csum;
204053324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
204153324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
204253324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
204353324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
204453324971SDavid Marchand 	struct rte_eth_stats stats;
204553324971SDavid Marchand 	uint64_t fwd_cycles = 0;
204653324971SDavid Marchand 	uint64_t total_recv = 0;
204753324971SDavid Marchand 	uint64_t total_xmit = 0;
204853324971SDavid Marchand 	struct rte_port *port;
204953324971SDavid Marchand 	streamid_t sm_id;
205053324971SDavid Marchand 	portid_t pt_id;
2051baef6bbfSMin Hu (Connor) 	int ret;
205253324971SDavid Marchand 	int i;
205353324971SDavid Marchand 
205453324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
205553324971SDavid Marchand 
205653324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
205753324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
205853324971SDavid Marchand 
205953324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
206053324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
206153324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
206253324971SDavid Marchand 		} else {
206353324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
206453324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
206553324971SDavid Marchand 		}
206653324971SDavid Marchand 
206753324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
206853324971SDavid Marchand 
206953324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
207053324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
207153324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
207253324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
2073d139cf23SLance Richardson 		ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2074d139cf23SLance Richardson 				fs->rx_bad_outer_ip_csum;
207553324971SDavid Marchand 
2076bc700b67SDharmik Thakkar 		if (record_core_cycles)
207753324971SDavid Marchand 			fwd_cycles += fs->core_cycles;
207853324971SDavid Marchand 	}
207953324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
208053324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
208153324971SDavid Marchand 		port = &ports[pt_id];
208253324971SDavid Marchand 
2083baef6bbfSMin Hu (Connor) 		ret = rte_eth_stats_get(pt_id, &stats);
2084baef6bbfSMin Hu (Connor) 		if (ret != 0) {
2085baef6bbfSMin Hu (Connor) 			fprintf(stderr,
2086baef6bbfSMin Hu (Connor) 				"%s: Error: failed to get stats (port %u): %d",
2087baef6bbfSMin Hu (Connor) 				__func__, pt_id, ret);
2088baef6bbfSMin Hu (Connor) 			continue;
2089baef6bbfSMin Hu (Connor) 		}
209053324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
209153324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
209253324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
209353324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
209453324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
209553324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
209653324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
209753324971SDavid Marchand 
209853324971SDavid Marchand 		total_recv += stats.ipackets;
209953324971SDavid Marchand 		total_xmit += stats.opackets;
210053324971SDavid Marchand 		total_rx_dropped += stats.imissed;
210153324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
210253324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
210353324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
210453324971SDavid Marchand 
210553324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
210653324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
210753324971SDavid Marchand 
210808dcd187SHuisong Li 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
210908dcd187SHuisong Li 		       "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
211053324971SDavid Marchand 		       stats.ipackets + stats.imissed);
211153324971SDavid Marchand 
2112d139cf23SLance Richardson 		if (cur_fwd_eng == &csum_fwd_engine) {
211353324971SDavid Marchand 			printf("  Bad-ipcsum: %-14"PRIu64
211453324971SDavid Marchand 			       " Bad-l4csum: %-14"PRIu64
211553324971SDavid Marchand 			       "Bad-outer-l4csum: %-14"PRIu64"\n",
211653324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_ip_csum,
211753324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_l4_csum,
211853324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_outer_l4_csum);
2119d139cf23SLance Richardson 			printf("  Bad-outer-ipcsum: %-14"PRIu64"\n",
2120d139cf23SLance Richardson 			       ports_stats[pt_id].rx_bad_outer_ip_csum);
2121d139cf23SLance Richardson 		}
212253324971SDavid Marchand 		if (stats.ierrors + stats.rx_nombuf > 0) {
212308dcd187SHuisong Li 			printf("  RX-error: %-"PRIu64"\n", stats.ierrors);
212408dcd187SHuisong Li 			printf("  RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
212553324971SDavid Marchand 		}
212653324971SDavid Marchand 
212708dcd187SHuisong Li 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
212853324971SDavid Marchand 		       "TX-total: %-"PRIu64"\n",
212953324971SDavid Marchand 		       stats.opackets, ports_stats[pt_id].tx_dropped,
213053324971SDavid Marchand 		       stats.opackets + ports_stats[pt_id].tx_dropped);
213153324971SDavid Marchand 
21320e4b1963SDharmik Thakkar 		if (record_burst_stats) {
213353324971SDavid Marchand 			if (ports_stats[pt_id].rx_stream)
213453324971SDavid Marchand 				pkt_burst_stats_display("RX",
213553324971SDavid Marchand 					&ports_stats[pt_id].rx_stream->rx_burst_stats);
213653324971SDavid Marchand 			if (ports_stats[pt_id].tx_stream)
213753324971SDavid Marchand 				pkt_burst_stats_display("TX",
213853324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
21390e4b1963SDharmik Thakkar 		}
214053324971SDavid Marchand 
214153324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
214253324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
214353324971SDavid Marchand 	}
214453324971SDavid Marchand 
214553324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
214653324971SDavid Marchand 	       "%s\n",
214753324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
214853324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
214953324971SDavid Marchand 	       "%-"PRIu64"\n"
215053324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
215153324971SDavid Marchand 	       "%-"PRIu64"\n",
215253324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
215353324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
215453324971SDavid Marchand 	if (total_rx_nombuf > 0)
215553324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
215653324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
215753324971SDavid Marchand 	       "%s\n",
215853324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
2159bc700b67SDharmik Thakkar 	if (record_core_cycles) {
21604c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6
21613a164e00SPhil Yang 		if (total_recv > 0 || total_xmit > 0) {
21623a164e00SPhil Yang 			uint64_t total_pkts = 0;
21633a164e00SPhil Yang 			if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
21643a164e00SPhil Yang 			    strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
21653a164e00SPhil Yang 				total_pkts = total_xmit;
21663a164e00SPhil Yang 			else
21673a164e00SPhil Yang 				total_pkts = total_recv;
21683a164e00SPhil Yang 
21691920832aSDharmik Thakkar 			printf("\n  CPU cycles/packet=%.2F (total cycles="
21703a164e00SPhil Yang 			       "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
21714c0497b1SDharmik Thakkar 			       " MHz Clock\n",
21723a164e00SPhil Yang 			       (double) fwd_cycles / total_pkts,
21733a164e00SPhil Yang 			       fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
21744c0497b1SDharmik Thakkar 			       (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
21753a164e00SPhil Yang 		}
2176bc700b67SDharmik Thakkar 	}
217753324971SDavid Marchand }
217853324971SDavid Marchand 
217953324971SDavid Marchand void
218053324971SDavid Marchand fwd_stats_reset(void)
218153324971SDavid Marchand {
218253324971SDavid Marchand 	streamid_t sm_id;
218353324971SDavid Marchand 	portid_t pt_id;
2184baef6bbfSMin Hu (Connor) 	int ret;
218553324971SDavid Marchand 	int i;
218653324971SDavid Marchand 
218753324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
218853324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
2189baef6bbfSMin Hu (Connor) 		ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2190baef6bbfSMin Hu (Connor) 		if (ret != 0)
2191baef6bbfSMin Hu (Connor) 			fprintf(stderr,
2192baef6bbfSMin Hu (Connor) 				"%s: Error: failed to clear stats (port %u):%d",
2193baef6bbfSMin Hu (Connor) 				__func__, pt_id, ret);
219453324971SDavid Marchand 	}
219553324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
219653324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
219753324971SDavid Marchand 
219853324971SDavid Marchand 		fs->rx_packets = 0;
219953324971SDavid Marchand 		fs->tx_packets = 0;
220053324971SDavid Marchand 		fs->fwd_dropped = 0;
220153324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
220253324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
220353324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
2204d139cf23SLance Richardson 		fs->rx_bad_outer_ip_csum = 0;
220553324971SDavid Marchand 
220653324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
220753324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
220853324971SDavid Marchand 		fs->core_cycles = 0;
220953324971SDavid Marchand 	}
221053324971SDavid Marchand }
221153324971SDavid Marchand 
2212af75078fSIntel static void
22137741e4cfSIntel flush_fwd_rx_queues(void)
2214af75078fSIntel {
2215af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2216af75078fSIntel 	portid_t  rxp;
22177741e4cfSIntel 	portid_t port_id;
2218af75078fSIntel 	queueid_t rxq;
2219af75078fSIntel 	uint16_t  nb_rx;
2220af75078fSIntel 	uint16_t  i;
2221af75078fSIntel 	uint8_t   j;
2222f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2223594302c7SJames Poole 	uint64_t timer_period;
2224f487715fSReshma Pattan 
2225a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
2226a550baf2SMin Hu (Connor) 		printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2227a550baf2SMin Hu (Connor) 		return;
2228a550baf2SMin Hu (Connor) 	}
2229a550baf2SMin Hu (Connor) 
2230f487715fSReshma Pattan 	/* convert to number of cycles */
2231594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
2232af75078fSIntel 
2233af75078fSIntel 	for (j = 0; j < 2; j++) {
22347741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2235af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
22367741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
22373c4426dbSDmitry Kozlyuk 
22383c4426dbSDmitry Kozlyuk 				/* Polling stopped queues is prohibited. */
22393c4426dbSDmitry Kozlyuk 				if (ports[port_id].rxq[rxq].state ==
22403c4426dbSDmitry Kozlyuk 				    RTE_ETH_QUEUE_STATE_STOPPED)
22413c4426dbSDmitry Kozlyuk 					continue;
22423c4426dbSDmitry Kozlyuk 
2243f487715fSReshma Pattan 				/**
2244f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
2245f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
2246f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
2247f487715fSReshma Pattan 				* after 1sec timer expiry.
2248f487715fSReshma Pattan 				*/
2249f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
2250af75078fSIntel 				do {
22517741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
2252013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
2253af75078fSIntel 					for (i = 0; i < nb_rx; i++)
2254af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
2255f487715fSReshma Pattan 
2256f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
2257f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
2258f487715fSReshma Pattan 					timer_tsc += diff_tsc;
2259f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
2260f487715fSReshma Pattan 					(timer_tsc < timer_period));
2261f487715fSReshma Pattan 				timer_tsc = 0;
2262af75078fSIntel 			}
2263af75078fSIntel 		}
2264af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2265af75078fSIntel 	}
2266af75078fSIntel }
2267af75078fSIntel 
2268af75078fSIntel static void
2269af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2270af75078fSIntel {
2271af75078fSIntel 	struct fwd_stream **fsm;
2272af75078fSIntel 	streamid_t nb_fs;
2273af75078fSIntel 	streamid_t sm_id;
2274a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
22757e4441c8SRemy Horton 	uint64_t tics_per_1sec;
22767e4441c8SRemy Horton 	uint64_t tics_datum;
22777e4441c8SRemy Horton 	uint64_t tics_current;
22784918a357SXiaoyun Li 	uint16_t i, cnt_ports;
2279af75078fSIntel 
22804918a357SXiaoyun Li 	cnt_ports = nb_ports;
22817e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
22827e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
22837e4441c8SRemy Horton #endif
2284af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
2285af75078fSIntel 	nb_fs = fc->stream_nb;
2286af75078fSIntel 	do {
2287af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
22883c4426dbSDmitry Kozlyuk 			if (!fsm[sm_id]->disabled)
2289af75078fSIntel 				(*pkt_fwd)(fsm[sm_id]);
2290a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
2291e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
2292e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
22937e4441c8SRemy Horton 			tics_current = rte_rdtsc();
22947e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
22957e4441c8SRemy Horton 				/* Periodic bitrate calculation */
22964918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
2297e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
22984918a357SXiaoyun Li 						ports_ids[i]);
22997e4441c8SRemy Horton 				tics_datum = tics_current;
23007e4441c8SRemy Horton 			}
2301e25e6c70SRemy Horton 		}
23027e4441c8SRemy Horton #endif
2303a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
230465eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
230565eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
230662d3216dSReshma Pattan 			rte_latencystats_update();
230762d3216dSReshma Pattan #endif
230862d3216dSReshma Pattan 
2309af75078fSIntel 	} while (! fc->stopped);
2310af75078fSIntel }
2311af75078fSIntel 
2312af75078fSIntel static int
2313af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
2314af75078fSIntel {
2315af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2316af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
2317af75078fSIntel 	return 0;
2318af75078fSIntel }
2319af75078fSIntel 
2320af75078fSIntel /*
2321af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
2322af75078fSIntel  * Used to start communication flows in network loopback test configurations.
2323af75078fSIntel  */
2324af75078fSIntel static int
2325af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
2326af75078fSIntel {
2327af75078fSIntel 	struct fwd_lcore *fwd_lc;
2328af75078fSIntel 	struct fwd_lcore tmp_lcore;
2329af75078fSIntel 
2330af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
2331af75078fSIntel 	tmp_lcore = *fwd_lc;
2332af75078fSIntel 	tmp_lcore.stopped = 1;
2333af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2334af75078fSIntel 	return 0;
2335af75078fSIntel }
2336af75078fSIntel 
2337af75078fSIntel /*
2338af75078fSIntel  * Launch packet forwarding:
2339af75078fSIntel  *     - Setup per-port forwarding context.
2340af75078fSIntel  *     - launch logical cores with their forwarding configuration.
2341af75078fSIntel  */
2342af75078fSIntel static void
2343af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2344af75078fSIntel {
2345af75078fSIntel 	unsigned int i;
2346af75078fSIntel 	unsigned int lc_id;
2347af75078fSIntel 	int diag;
2348af75078fSIntel 
2349af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2350af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
2351af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2352af75078fSIntel 			fwd_lcores[i]->stopped = 0;
2353af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2354af75078fSIntel 						     fwd_lcores[i], lc_id);
2355af75078fSIntel 			if (diag != 0)
235661a3b0e5SAndrew Rybchenko 				fprintf(stderr,
235761a3b0e5SAndrew Rybchenko 					"launch lcore %u failed - diag=%d\n",
2358af75078fSIntel 					lc_id, diag);
2359af75078fSIntel 		}
2360af75078fSIntel 	}
2361af75078fSIntel }
2362af75078fSIntel 
2363af75078fSIntel /*
2364af75078fSIntel  * Launch packet forwarding configuration.
2365af75078fSIntel  */
2366af75078fSIntel void
2367af75078fSIntel start_packet_forwarding(int with_tx_first)
2368af75078fSIntel {
2369af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2370af75078fSIntel 	port_fwd_end_t  port_fwd_end;
23713c4426dbSDmitry Kozlyuk 	stream_init_t stream_init = cur_fwd_eng->stream_init;
2372af75078fSIntel 	unsigned int i;
2373af75078fSIntel 
23745a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
23755a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
23765a8fb55cSReshma Pattan 
23775a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
23785a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
23795a8fb55cSReshma Pattan 
23805a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
23815a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
23825a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
23835a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
23845a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
23855a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
23865a8fb55cSReshma Pattan 
2387ce8d5614SIntel 	if (all_ports_started() == 0) {
238861a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Not all ports were started\n");
2389ce8d5614SIntel 		return;
2390ce8d5614SIntel 	}
2391af75078fSIntel 	if (test_done == 0) {
239261a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding already started\n");
2393af75078fSIntel 		return;
2394af75078fSIntel 	}
23957741e4cfSIntel 
239647a767b2SMatan Azrad 	fwd_config_setup();
239747a767b2SMatan Azrad 
239865744833SXueming Li 	pkt_fwd_config_display(&cur_fwd_config);
239965744833SXueming Li 	if (!pkt_fwd_shared_rxq_check())
240065744833SXueming Li 		return;
240165744833SXueming Li 
24023c4426dbSDmitry Kozlyuk 	if (stream_init != NULL)
24033c4426dbSDmitry Kozlyuk 		for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++)
24043c4426dbSDmitry Kozlyuk 			stream_init(fwd_streams[i]);
24053c4426dbSDmitry Kozlyuk 
2406a78040c9SAlvin Zhang 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2407a78040c9SAlvin Zhang 	if (port_fwd_begin != NULL) {
2408a78040c9SAlvin Zhang 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2409a78040c9SAlvin Zhang 			if (port_fwd_begin(fwd_ports_ids[i])) {
2410a78040c9SAlvin Zhang 				fprintf(stderr,
2411a78040c9SAlvin Zhang 					"Packet forwarding is not ready\n");
2412a78040c9SAlvin Zhang 				return;
2413a78040c9SAlvin Zhang 			}
2414a78040c9SAlvin Zhang 		}
2415a78040c9SAlvin Zhang 	}
2416a78040c9SAlvin Zhang 
2417a78040c9SAlvin Zhang 	if (with_tx_first) {
2418a78040c9SAlvin Zhang 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2419a78040c9SAlvin Zhang 		if (port_fwd_begin != NULL) {
2420a78040c9SAlvin Zhang 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2421a78040c9SAlvin Zhang 				if (port_fwd_begin(fwd_ports_ids[i])) {
2422a78040c9SAlvin Zhang 					fprintf(stderr,
2423a78040c9SAlvin Zhang 						"Packet forwarding is not ready\n");
2424a78040c9SAlvin Zhang 					return;
2425a78040c9SAlvin Zhang 				}
2426a78040c9SAlvin Zhang 			}
2427a78040c9SAlvin Zhang 		}
2428a78040c9SAlvin Zhang 	}
2429a78040c9SAlvin Zhang 
2430a78040c9SAlvin Zhang 	test_done = 0;
2431a78040c9SAlvin Zhang 
24327741e4cfSIntel 	if(!no_flush_rx)
24337741e4cfSIntel 		flush_fwd_rx_queues();
24347741e4cfSIntel 
2435af75078fSIntel 	rxtx_config_display();
2436af75078fSIntel 
243753324971SDavid Marchand 	fwd_stats_reset();
2438af75078fSIntel 	if (with_tx_first) {
2439acbf77a6SZhihong Wang 		while (with_tx_first--) {
2440acbf77a6SZhihong Wang 			launch_packet_forwarding(
2441acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
2442af75078fSIntel 			rte_eal_mp_wait_lcore();
2443acbf77a6SZhihong Wang 		}
2444af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
2445af75078fSIntel 		if (port_fwd_end != NULL) {
2446af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2447af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
2448af75078fSIntel 		}
2449af75078fSIntel 	}
2450af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
2451af75078fSIntel }
2452af75078fSIntel 
2453af75078fSIntel void
2454af75078fSIntel stop_packet_forwarding(void)
2455af75078fSIntel {
2456af75078fSIntel 	port_fwd_end_t port_fwd_end;
2457af75078fSIntel 	lcoreid_t lc_id;
245853324971SDavid Marchand 	portid_t pt_id;
245953324971SDavid Marchand 	int i;
2460af75078fSIntel 
2461af75078fSIntel 	if (test_done) {
246261a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding not started\n");
2463af75078fSIntel 		return;
2464af75078fSIntel 	}
2465af75078fSIntel 	printf("Telling cores to stop...");
2466af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2467af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
2468af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
2469af75078fSIntel 	rte_eal_mp_wait_lcore();
2470af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2471af75078fSIntel 	if (port_fwd_end != NULL) {
2472af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2473af75078fSIntel 			pt_id = fwd_ports_ids[i];
2474af75078fSIntel 			(*port_fwd_end)(pt_id);
2475af75078fSIntel 		}
2476af75078fSIntel 	}
2477c185d42cSDavid Marchand 
247853324971SDavid Marchand 	fwd_stats_display();
247958d475b7SJerin Jacob 
2480af75078fSIntel 	printf("\nDone.\n");
2481af75078fSIntel 	test_done = 1;
2482af75078fSIntel }
2483af75078fSIntel 
2484cfae07fdSOuyang Changchun void
2485cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
2486cfae07fdSOuyang Changchun {
2487492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
248861a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link up fail.\n");
2489cfae07fdSOuyang Changchun }
2490cfae07fdSOuyang Changchun 
2491cfae07fdSOuyang Changchun void
2492cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2493cfae07fdSOuyang Changchun {
2494492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
249561a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link down fail.\n");
2496cfae07fdSOuyang Changchun }
2497cfae07fdSOuyang Changchun 
2498ce8d5614SIntel static int
2499ce8d5614SIntel all_ports_started(void)
2500ce8d5614SIntel {
2501ce8d5614SIntel 	portid_t pi;
2502ce8d5614SIntel 	struct rte_port *port;
2503ce8d5614SIntel 
25047d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2505ce8d5614SIntel 		port = &ports[pi];
2506ce8d5614SIntel 		/* Check if there is a port which is not started */
250741b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
250841b05095SBernard Iremonger 			(port->slave_flag == 0))
2509ce8d5614SIntel 			return 0;
2510ce8d5614SIntel 	}
2511ce8d5614SIntel 
2512ce8d5614SIntel 	/* No port is not started */
2513ce8d5614SIntel 	return 1;
2514ce8d5614SIntel }
2515ce8d5614SIntel 
2516148f963fSBruce Richardson int
25176018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
25186018eb8cSShahaf Shuler {
25196018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
25206018eb8cSShahaf Shuler 
25216018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
25226018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
25236018eb8cSShahaf Shuler 		return 0;
25246018eb8cSShahaf Shuler 	return 1;
25256018eb8cSShahaf Shuler }
25266018eb8cSShahaf Shuler 
25276018eb8cSShahaf Shuler int
2528edab33b1STetsuya Mukawa all_ports_stopped(void)
2529edab33b1STetsuya Mukawa {
2530edab33b1STetsuya Mukawa 	portid_t pi;
2531edab33b1STetsuya Mukawa 
25327d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
25336018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2534edab33b1STetsuya Mukawa 			return 0;
2535edab33b1STetsuya Mukawa 	}
2536edab33b1STetsuya Mukawa 
2537edab33b1STetsuya Mukawa 	return 1;
2538edab33b1STetsuya Mukawa }
2539edab33b1STetsuya Mukawa 
2540edab33b1STetsuya Mukawa int
2541edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2542edab33b1STetsuya Mukawa {
2543edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2544edab33b1STetsuya Mukawa 		return 0;
2545edab33b1STetsuya Mukawa 
2546edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2547edab33b1STetsuya Mukawa 		return 0;
2548edab33b1STetsuya Mukawa 
2549edab33b1STetsuya Mukawa 	return 1;
2550edab33b1STetsuya Mukawa }
2551edab33b1STetsuya Mukawa 
25521c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */
25531c69df45SOri Kam static int
255401817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
25551c69df45SOri Kam {
25561c69df45SOri Kam 	queueid_t qi;
25571c69df45SOri Kam 	struct rte_eth_hairpin_conf hairpin_conf = {
25581c69df45SOri Kam 		.peer_count = 1,
25591c69df45SOri Kam 	};
25601c69df45SOri Kam 	int i;
25611c69df45SOri Kam 	int diag;
25621c69df45SOri Kam 	struct rte_port *port = &ports[pi];
256301817b10SBing Zhao 	uint16_t peer_rx_port = pi;
256401817b10SBing Zhao 	uint16_t peer_tx_port = pi;
256501817b10SBing Zhao 	uint32_t manual = 1;
256601817b10SBing Zhao 	uint32_t tx_exp = hairpin_mode & 0x10;
256701817b10SBing Zhao 
256801817b10SBing Zhao 	if (!(hairpin_mode & 0xf)) {
256901817b10SBing Zhao 		peer_rx_port = pi;
257001817b10SBing Zhao 		peer_tx_port = pi;
257101817b10SBing Zhao 		manual = 0;
257201817b10SBing Zhao 	} else if (hairpin_mode & 0x1) {
257301817b10SBing Zhao 		peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
257401817b10SBing Zhao 						       RTE_ETH_DEV_NO_OWNER);
257501817b10SBing Zhao 		if (peer_tx_port >= RTE_MAX_ETHPORTS)
257601817b10SBing Zhao 			peer_tx_port = rte_eth_find_next_owned_by(0,
257701817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
257801817b10SBing Zhao 		if (p_pi != RTE_MAX_ETHPORTS) {
257901817b10SBing Zhao 			peer_rx_port = p_pi;
258001817b10SBing Zhao 		} else {
258101817b10SBing Zhao 			uint16_t next_pi;
258201817b10SBing Zhao 
258301817b10SBing Zhao 			/* Last port will be the peer RX port of the first. */
258401817b10SBing Zhao 			RTE_ETH_FOREACH_DEV(next_pi)
258501817b10SBing Zhao 				peer_rx_port = next_pi;
258601817b10SBing Zhao 		}
258701817b10SBing Zhao 		manual = 1;
258801817b10SBing Zhao 	} else if (hairpin_mode & 0x2) {
258901817b10SBing Zhao 		if (cnt_pi & 0x1) {
259001817b10SBing Zhao 			peer_rx_port = p_pi;
259101817b10SBing Zhao 		} else {
259201817b10SBing Zhao 			peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
259301817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
259401817b10SBing Zhao 			if (peer_rx_port >= RTE_MAX_ETHPORTS)
259501817b10SBing Zhao 				peer_rx_port = pi;
259601817b10SBing Zhao 		}
259701817b10SBing Zhao 		peer_tx_port = peer_rx_port;
259801817b10SBing Zhao 		manual = 1;
259901817b10SBing Zhao 	}
26001c69df45SOri Kam 
26011c69df45SOri Kam 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
260201817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_rx_port;
26031c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_rxq;
260401817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
260501817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
26061c69df45SOri Kam 		diag = rte_eth_tx_hairpin_queue_setup
26071c69df45SOri Kam 			(pi, qi, nb_txd, &hairpin_conf);
26081c69df45SOri Kam 		i++;
26091c69df45SOri Kam 		if (diag == 0)
26101c69df45SOri Kam 			continue;
26111c69df45SOri Kam 
26121c69df45SOri Kam 		/* Fail to setup rx queue, return */
2613eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
2614eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
2615eac341d3SJoyce Kong 		else
261661a3b0e5SAndrew Rybchenko 			fprintf(stderr,
261761a3b0e5SAndrew Rybchenko 				"Port %d can not be set back to stopped\n", pi);
261861a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
261961a3b0e5SAndrew Rybchenko 			pi);
26201c69df45SOri Kam 		/* try to reconfigure queues next time */
26211c69df45SOri Kam 		port->need_reconfig_queues = 1;
26221c69df45SOri Kam 		return -1;
26231c69df45SOri Kam 	}
26241c69df45SOri Kam 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
262501817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_tx_port;
26261c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_txq;
262701817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
262801817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
26291c69df45SOri Kam 		diag = rte_eth_rx_hairpin_queue_setup
26301c69df45SOri Kam 			(pi, qi, nb_rxd, &hairpin_conf);
26311c69df45SOri Kam 		i++;
26321c69df45SOri Kam 		if (diag == 0)
26331c69df45SOri Kam 			continue;
26341c69df45SOri Kam 
26351c69df45SOri Kam 		/* Fail to setup rx queue, return */
2636eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
2637eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
2638eac341d3SJoyce Kong 		else
263961a3b0e5SAndrew Rybchenko 			fprintf(stderr,
264061a3b0e5SAndrew Rybchenko 				"Port %d can not be set back to stopped\n", pi);
264161a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
264261a3b0e5SAndrew Rybchenko 			pi);
26431c69df45SOri Kam 		/* try to reconfigure queues next time */
26441c69df45SOri Kam 		port->need_reconfig_queues = 1;
26451c69df45SOri Kam 		return -1;
26461c69df45SOri Kam 	}
26471c69df45SOri Kam 	return 0;
26481c69df45SOri Kam }
26491c69df45SOri Kam 
26502befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */
26512befc67fSViacheslav Ovsiienko int
26522befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
26532befc67fSViacheslav Ovsiienko 	       uint16_t nb_rx_desc, unsigned int socket_id,
26542befc67fSViacheslav Ovsiienko 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
26552befc67fSViacheslav Ovsiienko {
26562befc67fSViacheslav Ovsiienko 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
26572befc67fSViacheslav Ovsiienko 	unsigned int i, mp_n;
26582befc67fSViacheslav Ovsiienko 	int ret;
26592befc67fSViacheslav Ovsiienko 
26602befc67fSViacheslav Ovsiienko 	if (rx_pkt_nb_segs <= 1 ||
26612befc67fSViacheslav Ovsiienko 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
26622befc67fSViacheslav Ovsiienko 		rx_conf->rx_seg = NULL;
26632befc67fSViacheslav Ovsiienko 		rx_conf->rx_nseg = 0;
26642befc67fSViacheslav Ovsiienko 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
26652befc67fSViacheslav Ovsiienko 					     nb_rx_desc, socket_id,
26662befc67fSViacheslav Ovsiienko 					     rx_conf, mp);
26673c4426dbSDmitry Kozlyuk 		goto exit;
26682befc67fSViacheslav Ovsiienko 	}
26692befc67fSViacheslav Ovsiienko 	for (i = 0; i < rx_pkt_nb_segs; i++) {
26702befc67fSViacheslav Ovsiienko 		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
26712befc67fSViacheslav Ovsiienko 		struct rte_mempool *mpx;
26722befc67fSViacheslav Ovsiienko 		/*
26732befc67fSViacheslav Ovsiienko 		 * Use last valid pool for the segments with number
26742befc67fSViacheslav Ovsiienko 		 * exceeding the pool index.
26752befc67fSViacheslav Ovsiienko 		 */
26761108c33eSRaja Zidane 		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
26772befc67fSViacheslav Ovsiienko 		mpx = mbuf_pool_find(socket_id, mp_n);
26782befc67fSViacheslav Ovsiienko 		/* Handle zero as mbuf data buffer size. */
26792befc67fSViacheslav Ovsiienko 		rx_seg->length = rx_pkt_seg_lengths[i] ?
26802befc67fSViacheslav Ovsiienko 				   rx_pkt_seg_lengths[i] :
26812befc67fSViacheslav Ovsiienko 				   mbuf_data_size[mp_n];
26822befc67fSViacheslav Ovsiienko 		rx_seg->offset = i < rx_pkt_nb_offs ?
26832befc67fSViacheslav Ovsiienko 				   rx_pkt_seg_offsets[i] : 0;
26842befc67fSViacheslav Ovsiienko 		rx_seg->mp = mpx ? mpx : mp;
26852befc67fSViacheslav Ovsiienko 	}
26862befc67fSViacheslav Ovsiienko 	rx_conf->rx_nseg = rx_pkt_nb_segs;
26872befc67fSViacheslav Ovsiienko 	rx_conf->rx_seg = rx_useg;
26882befc67fSViacheslav Ovsiienko 	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
26892befc67fSViacheslav Ovsiienko 				    socket_id, rx_conf, NULL);
26902befc67fSViacheslav Ovsiienko 	rx_conf->rx_seg = NULL;
26912befc67fSViacheslav Ovsiienko 	rx_conf->rx_nseg = 0;
26923c4426dbSDmitry Kozlyuk exit:
26933c4426dbSDmitry Kozlyuk 	ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
26943c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STOPPED :
26953c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STARTED;
26962befc67fSViacheslav Ovsiienko 	return ret;
26972befc67fSViacheslav Ovsiienko }
26982befc67fSViacheslav Ovsiienko 
269963b72657SIvan Ilchenko static int
270063b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi)
270163b72657SIvan Ilchenko {
270263b72657SIvan Ilchenko 	uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
270363b72657SIvan Ilchenko 	uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
270463b72657SIvan Ilchenko 	uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
270563b72657SIvan Ilchenko 
270663b72657SIvan Ilchenko 	if (xstats_display_num == 0)
270763b72657SIvan Ilchenko 		return 0;
270863b72657SIvan Ilchenko 
270963b72657SIvan Ilchenko 	*ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
271063b72657SIvan Ilchenko 	if (*ids_supp == NULL)
271163b72657SIvan Ilchenko 		goto fail_ids_supp;
271263b72657SIvan Ilchenko 
271363b72657SIvan Ilchenko 	*prev_values = calloc(xstats_display_num,
271463b72657SIvan Ilchenko 			      sizeof(**prev_values));
271563b72657SIvan Ilchenko 	if (*prev_values == NULL)
271663b72657SIvan Ilchenko 		goto fail_prev_values;
271763b72657SIvan Ilchenko 
271863b72657SIvan Ilchenko 	*curr_values = calloc(xstats_display_num,
271963b72657SIvan Ilchenko 			      sizeof(**curr_values));
272063b72657SIvan Ilchenko 	if (*curr_values == NULL)
272163b72657SIvan Ilchenko 		goto fail_curr_values;
272263b72657SIvan Ilchenko 
272363b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = true;
272463b72657SIvan Ilchenko 
272563b72657SIvan Ilchenko 	return 0;
272663b72657SIvan Ilchenko 
272763b72657SIvan Ilchenko fail_curr_values:
272863b72657SIvan Ilchenko 	free(*prev_values);
272963b72657SIvan Ilchenko fail_prev_values:
273063b72657SIvan Ilchenko 	free(*ids_supp);
273163b72657SIvan Ilchenko fail_ids_supp:
273263b72657SIvan Ilchenko 	return -ENOMEM;
273363b72657SIvan Ilchenko }
273463b72657SIvan Ilchenko 
273563b72657SIvan Ilchenko static void
273663b72657SIvan Ilchenko free_xstats_display_info(portid_t pi)
273763b72657SIvan Ilchenko {
273863b72657SIvan Ilchenko 	if (!ports[pi].xstats_info.allocated)
273963b72657SIvan Ilchenko 		return;
274063b72657SIvan Ilchenko 	free(ports[pi].xstats_info.ids_supp);
274163b72657SIvan Ilchenko 	free(ports[pi].xstats_info.prev_values);
274263b72657SIvan Ilchenko 	free(ports[pi].xstats_info.curr_values);
274363b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = false;
274463b72657SIvan Ilchenko }
274563b72657SIvan Ilchenko 
274663b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */
274763b72657SIvan Ilchenko static void
274863b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi)
274963b72657SIvan Ilchenko {
275063b72657SIvan Ilchenko 	unsigned int stat, stat_supp;
275163b72657SIvan Ilchenko 	const char *xstat_name;
275263b72657SIvan Ilchenko 	struct rte_port *port;
275363b72657SIvan Ilchenko 	uint64_t *ids_supp;
275463b72657SIvan Ilchenko 	int rc;
275563b72657SIvan Ilchenko 
275663b72657SIvan Ilchenko 	if (xstats_display_num == 0)
275763b72657SIvan Ilchenko 		return;
275863b72657SIvan Ilchenko 
275963b72657SIvan Ilchenko 	if (pi == (portid_t)RTE_PORT_ALL) {
276063b72657SIvan Ilchenko 		fill_xstats_display_info();
276163b72657SIvan Ilchenko 		return;
276263b72657SIvan Ilchenko 	}
276363b72657SIvan Ilchenko 
276463b72657SIvan Ilchenko 	port = &ports[pi];
276563b72657SIvan Ilchenko 	if (port->port_status != RTE_PORT_STARTED)
276663b72657SIvan Ilchenko 		return;
276763b72657SIvan Ilchenko 
276863b72657SIvan Ilchenko 	if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
276963b72657SIvan Ilchenko 		rte_exit(EXIT_FAILURE,
277063b72657SIvan Ilchenko 			 "Failed to allocate xstats display memory\n");
277163b72657SIvan Ilchenko 
277263b72657SIvan Ilchenko 	ids_supp = port->xstats_info.ids_supp;
277363b72657SIvan Ilchenko 	for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
277463b72657SIvan Ilchenko 		xstat_name = xstats_display[stat].name;
277563b72657SIvan Ilchenko 		rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
277663b72657SIvan Ilchenko 						   ids_supp + stat_supp);
277763b72657SIvan Ilchenko 		if (rc != 0) {
277863b72657SIvan Ilchenko 			fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
277963b72657SIvan Ilchenko 				xstat_name, pi, stat);
278063b72657SIvan Ilchenko 			continue;
278163b72657SIvan Ilchenko 		}
278263b72657SIvan Ilchenko 		stat_supp++;
278363b72657SIvan Ilchenko 	}
278463b72657SIvan Ilchenko 
278563b72657SIvan Ilchenko 	port->xstats_info.ids_supp_sz = stat_supp;
278663b72657SIvan Ilchenko }
278763b72657SIvan Ilchenko 
278863b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */
278963b72657SIvan Ilchenko static void
279063b72657SIvan Ilchenko fill_xstats_display_info(void)
279163b72657SIvan Ilchenko {
279263b72657SIvan Ilchenko 	portid_t pi;
279363b72657SIvan Ilchenko 
279463b72657SIvan Ilchenko 	if (xstats_display_num == 0)
279563b72657SIvan Ilchenko 		return;
279663b72657SIvan Ilchenko 
279763b72657SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(pi)
279863b72657SIvan Ilchenko 		fill_xstats_display_info_for_port(pi);
279963b72657SIvan Ilchenko }
280063b72657SIvan Ilchenko 
2801edab33b1STetsuya Mukawa int
2802ce8d5614SIntel start_port(portid_t pid)
2803ce8d5614SIntel {
280492d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
2805ce8d5614SIntel 	portid_t pi;
280601817b10SBing Zhao 	portid_t p_pi = RTE_MAX_ETHPORTS;
280701817b10SBing Zhao 	portid_t pl[RTE_MAX_ETHPORTS];
280801817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
280901817b10SBing Zhao 	uint16_t cnt_pi = 0;
281001817b10SBing Zhao 	uint16_t cfg_pi = 0;
281101817b10SBing Zhao 	int peer_pi;
2812ce8d5614SIntel 	queueid_t qi;
2813ce8d5614SIntel 	struct rte_port *port;
28141c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
2815ce8d5614SIntel 
28164468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
28174468635fSMichael Qiu 		return 0;
28184468635fSMichael Qiu 
28197d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2820edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2821ce8d5614SIntel 			continue;
2822ce8d5614SIntel 
2823d8c079a5SMin Hu (Connor) 		if (port_is_bonding_slave(pi)) {
2824d8c079a5SMin Hu (Connor) 			fprintf(stderr,
2825d8c079a5SMin Hu (Connor) 				"Please remove port %d from bonded device.\n",
2826d8c079a5SMin Hu (Connor) 				pi);
2827d8c079a5SMin Hu (Connor) 			continue;
2828d8c079a5SMin Hu (Connor) 		}
2829d8c079a5SMin Hu (Connor) 
283092d2703eSMichael Qiu 		need_check_link_status = 0;
2831ce8d5614SIntel 		port = &ports[pi];
2832eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_STOPPED)
2833eac341d3SJoyce Kong 			port->port_status = RTE_PORT_HANDLING;
2834eac341d3SJoyce Kong 		else {
283561a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is now not stopped\n", pi);
2836ce8d5614SIntel 			continue;
2837ce8d5614SIntel 		}
2838ce8d5614SIntel 
2839ce8d5614SIntel 		if (port->need_reconfig > 0) {
2840655eae01SJie Wang 			struct rte_eth_conf dev_conf;
2841655eae01SJie Wang 			int k;
2842655eae01SJie Wang 
2843ce8d5614SIntel 			port->need_reconfig = 0;
2844ce8d5614SIntel 
28457ee3e944SVasily Philipov 			if (flow_isolate_all) {
28467ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
28477ee3e944SVasily Philipov 				if (ret) {
284861a3b0e5SAndrew Rybchenko 					fprintf(stderr,
284961a3b0e5SAndrew Rybchenko 						"Failed to apply isolated mode on port %d\n",
285061a3b0e5SAndrew Rybchenko 						pi);
28517ee3e944SVasily Philipov 					return -1;
28527ee3e944SVasily Philipov 				}
28537ee3e944SVasily Philipov 			}
2854b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
28555706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
285620a0286fSLiu Xiaofeng 					port->socket_id);
28571c69df45SOri Kam 			if (nb_hairpinq > 0 &&
28581c69df45SOri Kam 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
285961a3b0e5SAndrew Rybchenko 				fprintf(stderr,
286061a3b0e5SAndrew Rybchenko 					"Port %d doesn't support hairpin queues\n",
286161a3b0e5SAndrew Rybchenko 					pi);
28621c69df45SOri Kam 				return -1;
28631c69df45SOri Kam 			}
28641bb4a528SFerruh Yigit 
2865ce8d5614SIntel 			/* configure port */
2866a550baf2SMin Hu (Connor) 			diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
28671c69df45SOri Kam 						     nb_txq + nb_hairpinq,
2868ce8d5614SIntel 						     &(port->dev_conf));
2869ce8d5614SIntel 			if (diag != 0) {
2870eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
2871eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
2872eac341d3SJoyce Kong 				else
287361a3b0e5SAndrew Rybchenko 					fprintf(stderr,
287461a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
287561a3b0e5SAndrew Rybchenko 						pi);
287661a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Fail to configure port %d\n",
287761a3b0e5SAndrew Rybchenko 					pi);
2878ce8d5614SIntel 				/* try to reconfigure port next time */
2879ce8d5614SIntel 				port->need_reconfig = 1;
2880148f963fSBruce Richardson 				return -1;
2881ce8d5614SIntel 			}
2882655eae01SJie Wang 			/* get device configuration*/
2883655eae01SJie Wang 			if (0 !=
2884655eae01SJie Wang 				eth_dev_conf_get_print_err(pi, &dev_conf)) {
2885655eae01SJie Wang 				fprintf(stderr,
2886655eae01SJie Wang 					"port %d can not get device configuration\n",
2887655eae01SJie Wang 					pi);
2888655eae01SJie Wang 				return -1;
2889655eae01SJie Wang 			}
2890655eae01SJie Wang 			/* Apply Rx offloads configuration */
2891655eae01SJie Wang 			if (dev_conf.rxmode.offloads !=
2892655eae01SJie Wang 			    port->dev_conf.rxmode.offloads) {
2893655eae01SJie Wang 				port->dev_conf.rxmode.offloads |=
2894655eae01SJie Wang 					dev_conf.rxmode.offloads;
2895655eae01SJie Wang 				for (k = 0;
2896655eae01SJie Wang 				     k < port->dev_info.max_rx_queues;
2897655eae01SJie Wang 				     k++)
28983c4426dbSDmitry Kozlyuk 					port->rxq[k].conf.offloads |=
2899655eae01SJie Wang 						dev_conf.rxmode.offloads;
2900655eae01SJie Wang 			}
2901655eae01SJie Wang 			/* Apply Tx offloads configuration */
2902655eae01SJie Wang 			if (dev_conf.txmode.offloads !=
2903655eae01SJie Wang 			    port->dev_conf.txmode.offloads) {
2904655eae01SJie Wang 				port->dev_conf.txmode.offloads |=
2905655eae01SJie Wang 					dev_conf.txmode.offloads;
2906655eae01SJie Wang 				for (k = 0;
2907655eae01SJie Wang 				     k < port->dev_info.max_tx_queues;
2908655eae01SJie Wang 				     k++)
29093c4426dbSDmitry Kozlyuk 					port->txq[k].conf.offloads |=
2910655eae01SJie Wang 						dev_conf.txmode.offloads;
2911655eae01SJie Wang 			}
2912ce8d5614SIntel 		}
2913a550baf2SMin Hu (Connor) 		if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2914ce8d5614SIntel 			port->need_reconfig_queues = 0;
2915ce8d5614SIntel 			/* setup tx queues */
2916ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
29173c4426dbSDmitry Kozlyuk 				struct rte_eth_txconf *conf =
29183c4426dbSDmitry Kozlyuk 							&port->txq[qi].conf;
29193c4426dbSDmitry Kozlyuk 
2920b6ea6408SIntel 				if ((numa_support) &&
2921b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2922b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2923d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2924d44f8a48SQi Zhang 						txring_numa[pi],
29253c4426dbSDmitry Kozlyuk 						&(port->txq[qi].conf));
2926b6ea6408SIntel 				else
2927b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2928d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2929d44f8a48SQi Zhang 						port->socket_id,
29303c4426dbSDmitry Kozlyuk 						&(port->txq[qi].conf));
2931b6ea6408SIntel 
29323c4426dbSDmitry Kozlyuk 				if (diag == 0) {
29333c4426dbSDmitry Kozlyuk 					port->txq[qi].state =
29343c4426dbSDmitry Kozlyuk 						conf->tx_deferred_start ?
29353c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STOPPED :
29363c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STARTED;
2937ce8d5614SIntel 					continue;
29383c4426dbSDmitry Kozlyuk 				}
2939ce8d5614SIntel 
2940ce8d5614SIntel 				/* Fail to setup tx queue, return */
2941eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
2942eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
2943eac341d3SJoyce Kong 				else
294461a3b0e5SAndrew Rybchenko 					fprintf(stderr,
294561a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
294661a3b0e5SAndrew Rybchenko 						pi);
294761a3b0e5SAndrew Rybchenko 				fprintf(stderr,
294861a3b0e5SAndrew Rybchenko 					"Fail to configure port %d tx queues\n",
2949d44f8a48SQi Zhang 					pi);
2950ce8d5614SIntel 				/* try to reconfigure queues next time */
2951ce8d5614SIntel 				port->need_reconfig_queues = 1;
2952148f963fSBruce Richardson 				return -1;
2953ce8d5614SIntel 			}
2954ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2955d44f8a48SQi Zhang 				/* setup rx queues */
2956b6ea6408SIntel 				if ((numa_support) &&
2957b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2958b6ea6408SIntel 					struct rte_mempool * mp =
295926cbb419SViacheslav Ovsiienko 						mbuf_pool_find
296026cbb419SViacheslav Ovsiienko 							(rxring_numa[pi], 0);
2961b6ea6408SIntel 					if (mp == NULL) {
296261a3b0e5SAndrew Rybchenko 						fprintf(stderr,
296361a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
2964b6ea6408SIntel 							rxring_numa[pi]);
2965148f963fSBruce Richardson 						return -1;
2966b6ea6408SIntel 					}
2967b6ea6408SIntel 
29682befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
2969d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2970d44f8a48SQi Zhang 					     rxring_numa[pi],
29713c4426dbSDmitry Kozlyuk 					     &(port->rxq[qi].conf),
2972d44f8a48SQi Zhang 					     mp);
29731e1d6bddSBernard Iremonger 				} else {
29741e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
297526cbb419SViacheslav Ovsiienko 						mbuf_pool_find
297626cbb419SViacheslav Ovsiienko 							(port->socket_id, 0);
29771e1d6bddSBernard Iremonger 					if (mp == NULL) {
297861a3b0e5SAndrew Rybchenko 						fprintf(stderr,
297961a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
29801e1d6bddSBernard Iremonger 							port->socket_id);
29811e1d6bddSBernard Iremonger 						return -1;
2982b6ea6408SIntel 					}
29832befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
2984d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2985d44f8a48SQi Zhang 					     port->socket_id,
29863c4426dbSDmitry Kozlyuk 					     &(port->rxq[qi].conf),
2987d44f8a48SQi Zhang 					     mp);
29881e1d6bddSBernard Iremonger 				}
2989ce8d5614SIntel 				if (diag == 0)
2990ce8d5614SIntel 					continue;
2991ce8d5614SIntel 
2992ce8d5614SIntel 				/* Fail to setup rx queue, return */
2993eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
2994eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
2995eac341d3SJoyce Kong 				else
299661a3b0e5SAndrew Rybchenko 					fprintf(stderr,
299761a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
299861a3b0e5SAndrew Rybchenko 						pi);
299961a3b0e5SAndrew Rybchenko 				fprintf(stderr,
300061a3b0e5SAndrew Rybchenko 					"Fail to configure port %d rx queues\n",
3001d44f8a48SQi Zhang 					pi);
3002ce8d5614SIntel 				/* try to reconfigure queues next time */
3003ce8d5614SIntel 				port->need_reconfig_queues = 1;
3004148f963fSBruce Richardson 				return -1;
3005ce8d5614SIntel 			}
30061c69df45SOri Kam 			/* setup hairpin queues */
300701817b10SBing Zhao 			if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
30081c69df45SOri Kam 				return -1;
3009ce8d5614SIntel 		}
3010b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
3011b0a9354aSPavan Nikhilesh 		if (clear_ptypes) {
3012b0a9354aSPavan Nikhilesh 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
3013b0a9354aSPavan Nikhilesh 					NULL, 0);
3014b0a9354aSPavan Nikhilesh 			if (diag < 0)
301561a3b0e5SAndrew Rybchenko 				fprintf(stderr,
3016b0a9354aSPavan Nikhilesh 					"Port %d: Failed to disable Ptype parsing\n",
3017b0a9354aSPavan Nikhilesh 					pi);
3018b0a9354aSPavan Nikhilesh 		}
3019b0a9354aSPavan Nikhilesh 
302001817b10SBing Zhao 		p_pi = pi;
302101817b10SBing Zhao 		cnt_pi++;
302201817b10SBing Zhao 
3023ce8d5614SIntel 		/* start port */
3024a550baf2SMin Hu (Connor) 		diag = eth_dev_start_mp(pi);
302552f2c6f2SAndrew Rybchenko 		if (diag < 0) {
302661a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Fail to start port %d: %s\n",
302761a3b0e5SAndrew Rybchenko 				pi, rte_strerror(-diag));
3028ce8d5614SIntel 
3029ce8d5614SIntel 			/* Fail to setup rx queue, return */
3030eac341d3SJoyce Kong 			if (port->port_status == RTE_PORT_HANDLING)
3031eac341d3SJoyce Kong 				port->port_status = RTE_PORT_STOPPED;
3032eac341d3SJoyce Kong 			else
303361a3b0e5SAndrew Rybchenko 				fprintf(stderr,
303461a3b0e5SAndrew Rybchenko 					"Port %d can not be set back to stopped\n",
303561a3b0e5SAndrew Rybchenko 					pi);
3036ce8d5614SIntel 			continue;
3037ce8d5614SIntel 		}
3038ce8d5614SIntel 
3039eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
3040eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STARTED;
3041eac341d3SJoyce Kong 		else
304261a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into started\n",
304361a3b0e5SAndrew Rybchenko 				pi);
3044ce8d5614SIntel 
30455ffc4a2aSYuying Zhang 		if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
3046c2c4f87bSAman Deep Singh 			printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
3047a7db3afcSAman Deep Singh 					RTE_ETHER_ADDR_BYTES(&port->eth_addr));
3048d8c89163SZijie Pan 
3049ce8d5614SIntel 		/* at least one port started, need checking link status */
3050ce8d5614SIntel 		need_check_link_status = 1;
305101817b10SBing Zhao 
305201817b10SBing Zhao 		pl[cfg_pi++] = pi;
3053ce8d5614SIntel 	}
3054ce8d5614SIntel 
305592d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
3056edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
305792d2703eSMichael Qiu 	else if (need_check_link_status == 0)
305861a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Please stop the ports first\n");
3059ce8d5614SIntel 
306001817b10SBing Zhao 	if (hairpin_mode & 0xf) {
306101817b10SBing Zhao 		uint16_t i;
306201817b10SBing Zhao 		int j;
306301817b10SBing Zhao 
306401817b10SBing Zhao 		/* bind all started hairpin ports */
306501817b10SBing Zhao 		for (i = 0; i < cfg_pi; i++) {
306601817b10SBing Zhao 			pi = pl[i];
306701817b10SBing Zhao 			/* bind current Tx to all peer Rx */
306801817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
306901817b10SBing Zhao 							RTE_MAX_ETHPORTS, 1);
307001817b10SBing Zhao 			if (peer_pi < 0)
307101817b10SBing Zhao 				return peer_pi;
307201817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
307301817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
307401817b10SBing Zhao 					continue;
307501817b10SBing Zhao 				diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
307601817b10SBing Zhao 				if (diag < 0) {
307761a3b0e5SAndrew Rybchenko 					fprintf(stderr,
307861a3b0e5SAndrew Rybchenko 						"Error during binding hairpin Tx port %u to %u: %s\n",
307901817b10SBing Zhao 						pi, peer_pl[j],
308001817b10SBing Zhao 						rte_strerror(-diag));
308101817b10SBing Zhao 					return -1;
308201817b10SBing Zhao 				}
308301817b10SBing Zhao 			}
308401817b10SBing Zhao 			/* bind all peer Tx to current Rx */
308501817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
308601817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
308701817b10SBing Zhao 			if (peer_pi < 0)
308801817b10SBing Zhao 				return peer_pi;
308901817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
309001817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
309101817b10SBing Zhao 					continue;
309201817b10SBing Zhao 				diag = rte_eth_hairpin_bind(peer_pl[j], pi);
309301817b10SBing Zhao 				if (diag < 0) {
309461a3b0e5SAndrew Rybchenko 					fprintf(stderr,
309561a3b0e5SAndrew Rybchenko 						"Error during binding hairpin Tx port %u to %u: %s\n",
309601817b10SBing Zhao 						peer_pl[j], pi,
309701817b10SBing Zhao 						rte_strerror(-diag));
309801817b10SBing Zhao 					return -1;
309901817b10SBing Zhao 				}
310001817b10SBing Zhao 			}
310101817b10SBing Zhao 		}
310201817b10SBing Zhao 	}
310301817b10SBing Zhao 
310463b72657SIvan Ilchenko 	fill_xstats_display_info_for_port(pid);
310563b72657SIvan Ilchenko 
3106ce8d5614SIntel 	printf("Done\n");
3107148f963fSBruce Richardson 	return 0;
3108ce8d5614SIntel }
3109ce8d5614SIntel 
3110ce8d5614SIntel void
3111ce8d5614SIntel stop_port(portid_t pid)
3112ce8d5614SIntel {
3113ce8d5614SIntel 	portid_t pi;
3114ce8d5614SIntel 	struct rte_port *port;
3115ce8d5614SIntel 	int need_check_link_status = 0;
311601817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
311701817b10SBing Zhao 	int peer_pi;
3118ce8d5614SIntel 
31194468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
31204468635fSMichael Qiu 		return;
31214468635fSMichael Qiu 
3122ce8d5614SIntel 	printf("Stopping ports...\n");
3123ce8d5614SIntel 
31247d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
31254468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3126ce8d5614SIntel 			continue;
3127ce8d5614SIntel 
3128a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
312961a3b0e5SAndrew Rybchenko 			fprintf(stderr,
313061a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
313161a3b0e5SAndrew Rybchenko 				pi);
3132a8ef3e3aSBernard Iremonger 			continue;
3133a8ef3e3aSBernard Iremonger 		}
3134a8ef3e3aSBernard Iremonger 
31350e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
313661a3b0e5SAndrew Rybchenko 			fprintf(stderr,
313761a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
313861a3b0e5SAndrew Rybchenko 				pi);
31390e545d30SBernard Iremonger 			continue;
31400e545d30SBernard Iremonger 		}
31410e545d30SBernard Iremonger 
3142ce8d5614SIntel 		port = &ports[pi];
3143eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_STARTED)
3144eac341d3SJoyce Kong 			port->port_status = RTE_PORT_HANDLING;
3145eac341d3SJoyce Kong 		else
3146ce8d5614SIntel 			continue;
3147ce8d5614SIntel 
314801817b10SBing Zhao 		if (hairpin_mode & 0xf) {
314901817b10SBing Zhao 			int j;
315001817b10SBing Zhao 
315101817b10SBing Zhao 			rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
315201817b10SBing Zhao 			/* unbind all peer Tx from current Rx */
315301817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
315401817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
315501817b10SBing Zhao 			if (peer_pi < 0)
315601817b10SBing Zhao 				continue;
315701817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
315801817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
315901817b10SBing Zhao 					continue;
316001817b10SBing Zhao 				rte_eth_hairpin_unbind(peer_pl[j], pi);
316101817b10SBing Zhao 			}
316201817b10SBing Zhao 		}
316301817b10SBing Zhao 
31640f93edbfSGregory Etelson 		if (port->flow_list)
31650f93edbfSGregory Etelson 			port_flow_flush(pi);
31660f93edbfSGregory Etelson 
3167a550baf2SMin Hu (Connor) 		if (eth_dev_stop_mp(pi) != 0)
3168e62c5a12SIvan Ilchenko 			RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3169e62c5a12SIvan Ilchenko 				pi);
3170ce8d5614SIntel 
3171eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
3172eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
3173eac341d3SJoyce Kong 		else
317461a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into stopped\n",
317561a3b0e5SAndrew Rybchenko 				pi);
3176ce8d5614SIntel 		need_check_link_status = 1;
3177ce8d5614SIntel 	}
3178bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
3179edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
3180ce8d5614SIntel 
3181ce8d5614SIntel 	printf("Done\n");
3182ce8d5614SIntel }
3183ce8d5614SIntel 
3184ce6959bfSWisam Jaddo static void
31854f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
3186ce6959bfSWisam Jaddo {
31874f1de450SThomas Monjalon 	portid_t i;
31884f1de450SThomas Monjalon 	portid_t new_total = 0;
3189ce6959bfSWisam Jaddo 
31904f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
31914f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
31924f1de450SThomas Monjalon 			array[new_total] = array[i];
31934f1de450SThomas Monjalon 			new_total++;
3194ce6959bfSWisam Jaddo 		}
31954f1de450SThomas Monjalon 	*total = new_total;
31964f1de450SThomas Monjalon }
31974f1de450SThomas Monjalon 
31984f1de450SThomas Monjalon static void
31994f1de450SThomas Monjalon remove_invalid_ports(void)
32004f1de450SThomas Monjalon {
32014f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
32024f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
32034f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
3204ce6959bfSWisam Jaddo }
3205ce6959bfSWisam Jaddo 
3206*3889a322SHuisong Li static void
3207*3889a322SHuisong Li clear_bonding_slave_device(portid_t *slave_pids, uint16_t num_slaves)
3208*3889a322SHuisong Li {
3209*3889a322SHuisong Li 	struct rte_port *port;
3210*3889a322SHuisong Li 	portid_t slave_pid;
3211*3889a322SHuisong Li 	uint16_t i;
3212*3889a322SHuisong Li 
3213*3889a322SHuisong Li 	for (i = 0; i < num_slaves; i++) {
3214*3889a322SHuisong Li 		slave_pid = slave_pids[i];
3215*3889a322SHuisong Li 		if (port_is_started(slave_pid) == 1) {
3216*3889a322SHuisong Li 			if (rte_eth_dev_stop(slave_pid) != 0)
3217*3889a322SHuisong Li 				fprintf(stderr, "rte_eth_dev_stop failed for port %u\n",
3218*3889a322SHuisong Li 					slave_pid);
3219*3889a322SHuisong Li 
3220*3889a322SHuisong Li 			port = &ports[slave_pid];
3221*3889a322SHuisong Li 			port->port_status = RTE_PORT_STOPPED;
3222*3889a322SHuisong Li 		}
3223*3889a322SHuisong Li 
3224*3889a322SHuisong Li 		clear_port_slave_flag(slave_pid);
3225*3889a322SHuisong Li 
3226*3889a322SHuisong Li 		/* Close slave device when testpmd quit or is killed. */
3227*3889a322SHuisong Li 		if (cl_quit == 1 || f_quit == 1)
3228*3889a322SHuisong Li 			rte_eth_dev_close(slave_pid);
3229*3889a322SHuisong Li 	}
3230*3889a322SHuisong Li }
3231*3889a322SHuisong Li 
3232ce8d5614SIntel void
3233ce8d5614SIntel close_port(portid_t pid)
3234ce8d5614SIntel {
3235ce8d5614SIntel 	portid_t pi;
3236ce8d5614SIntel 	struct rte_port *port;
3237*3889a322SHuisong Li 	portid_t slave_pids[RTE_MAX_ETHPORTS];
3238*3889a322SHuisong Li 	int num_slaves = 0;
3239ce8d5614SIntel 
32404468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
32414468635fSMichael Qiu 		return;
32424468635fSMichael Qiu 
3243ce8d5614SIntel 	printf("Closing ports...\n");
3244ce8d5614SIntel 
32457d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
32464468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3247ce8d5614SIntel 			continue;
3248ce8d5614SIntel 
3249a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
325061a3b0e5SAndrew Rybchenko 			fprintf(stderr,
325161a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
325261a3b0e5SAndrew Rybchenko 				pi);
3253a8ef3e3aSBernard Iremonger 			continue;
3254a8ef3e3aSBernard Iremonger 		}
3255a8ef3e3aSBernard Iremonger 
32560e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
325761a3b0e5SAndrew Rybchenko 			fprintf(stderr,
325861a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
325961a3b0e5SAndrew Rybchenko 				pi);
32600e545d30SBernard Iremonger 			continue;
32610e545d30SBernard Iremonger 		}
32620e545d30SBernard Iremonger 
3263ce8d5614SIntel 		port = &ports[pi];
3264eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_CLOSED) {
326561a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is already closed\n", pi);
3266d4e8ad64SMichael Qiu 			continue;
3267d4e8ad64SMichael Qiu 		}
3268d4e8ad64SMichael Qiu 
3269a550baf2SMin Hu (Connor) 		if (is_proc_primary()) {
327068629be3SKe Zhang 			mcast_addr_pool_destroy(pi);
3271938a184aSAdrien Mazarguil 			port_flow_flush(pi);
327259f3a8acSGregory Etelson 			port_flex_item_flush(pi);
3273f7352c17SDmitry Kozlyuk 			port_action_handle_flush(pi);
3274*3889a322SHuisong Li #ifdef RTE_NET_BOND
3275*3889a322SHuisong Li 			if (port->bond_flag == 1)
3276*3889a322SHuisong Li 				num_slaves = rte_eth_bond_slaves_get(pi,
3277*3889a322SHuisong Li 						slave_pids, RTE_MAX_ETHPORTS);
3278*3889a322SHuisong Li #endif
3279ce8d5614SIntel 			rte_eth_dev_close(pi);
3280*3889a322SHuisong Li 			/*
3281*3889a322SHuisong Li 			 * If this port is bonded device, all slaves under the
3282*3889a322SHuisong Li 			 * device need to be removed or closed.
3283*3889a322SHuisong Li 			 */
3284*3889a322SHuisong Li 			if (port->bond_flag == 1 && num_slaves > 0)
3285*3889a322SHuisong Li 				clear_bonding_slave_device(slave_pids,
3286*3889a322SHuisong Li 							num_slaves);
3287ce8d5614SIntel 		}
328863b72657SIvan Ilchenko 
328963b72657SIvan Ilchenko 		free_xstats_display_info(pi);
3290a550baf2SMin Hu (Connor) 	}
3291ce8d5614SIntel 
329285c6571cSThomas Monjalon 	remove_invalid_ports();
3293ce8d5614SIntel 	printf("Done\n");
3294ce8d5614SIntel }
3295ce8d5614SIntel 
3296edab33b1STetsuya Mukawa void
329797f1e196SWei Dai reset_port(portid_t pid)
329897f1e196SWei Dai {
329997f1e196SWei Dai 	int diag;
330097f1e196SWei Dai 	portid_t pi;
330197f1e196SWei Dai 	struct rte_port *port;
330297f1e196SWei Dai 
330397f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
330497f1e196SWei Dai 		return;
330597f1e196SWei Dai 
33061cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
33071cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
330861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
330961a3b0e5SAndrew Rybchenko 			"Can not reset port(s), please stop port(s) first.\n");
33101cde1b9aSShougang Wang 		return;
33111cde1b9aSShougang Wang 	}
33121cde1b9aSShougang Wang 
331397f1e196SWei Dai 	printf("Resetting ports...\n");
331497f1e196SWei Dai 
331597f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
331697f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
331797f1e196SWei Dai 			continue;
331897f1e196SWei Dai 
331997f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
332061a3b0e5SAndrew Rybchenko 			fprintf(stderr,
332161a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
332261a3b0e5SAndrew Rybchenko 				pi);
332397f1e196SWei Dai 			continue;
332497f1e196SWei Dai 		}
332597f1e196SWei Dai 
332697f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
332761a3b0e5SAndrew Rybchenko 			fprintf(stderr,
332861a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
332997f1e196SWei Dai 				pi);
333097f1e196SWei Dai 			continue;
333197f1e196SWei Dai 		}
333297f1e196SWei Dai 
333397f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
333497f1e196SWei Dai 		if (diag == 0) {
333597f1e196SWei Dai 			port = &ports[pi];
333697f1e196SWei Dai 			port->need_reconfig = 1;
333797f1e196SWei Dai 			port->need_reconfig_queues = 1;
333897f1e196SWei Dai 		} else {
333961a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Failed to reset port %d. diag=%d\n",
334061a3b0e5SAndrew Rybchenko 				pi, diag);
334197f1e196SWei Dai 		}
334297f1e196SWei Dai 	}
334397f1e196SWei Dai 
334497f1e196SWei Dai 	printf("Done\n");
334597f1e196SWei Dai }
334697f1e196SWei Dai 
334797f1e196SWei Dai void
3348edab33b1STetsuya Mukawa attach_port(char *identifier)
3349ce8d5614SIntel {
33504f1ed78eSThomas Monjalon 	portid_t pi;
3351c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
3352ce8d5614SIntel 
3353edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
3354edab33b1STetsuya Mukawa 
3355edab33b1STetsuya Mukawa 	if (identifier == NULL) {
335661a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Invalid parameters are specified\n");
3357edab33b1STetsuya Mukawa 		return;
3358ce8d5614SIntel 	}
3359ce8d5614SIntel 
336075b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
3361c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3362edab33b1STetsuya Mukawa 		return;
3363c9cce428SThomas Monjalon 	}
3364c9cce428SThomas Monjalon 
33654f1ed78eSThomas Monjalon 	/* first attach mode: event */
33664f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
33674f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
33684f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
33694f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
33704f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
33714f1ed78eSThomas Monjalon 				setup_attached_port(pi);
33724f1ed78eSThomas Monjalon 		return;
33734f1ed78eSThomas Monjalon 	}
33744f1ed78eSThomas Monjalon 
33754f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
337686fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
33774f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
337886fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
337986fa5de1SThomas Monjalon 			continue; /* port was already attached before */
3380c9cce428SThomas Monjalon 		setup_attached_port(pi);
3381c9cce428SThomas Monjalon 	}
338286fa5de1SThomas Monjalon }
3383c9cce428SThomas Monjalon 
3384c9cce428SThomas Monjalon static void
3385c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
3386c9cce428SThomas Monjalon {
3387c9cce428SThomas Monjalon 	unsigned int socket_id;
338834fc1051SIvan Ilchenko 	int ret;
3389edab33b1STetsuya Mukawa 
3390931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
339129841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
3392931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
339329841336SPhil Yang 		socket_id = socket_ids[0];
3394931126baSBernard Iremonger 	reconfig(pi, socket_id);
339534fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
339634fc1051SIvan Ilchenko 	if (ret != 0)
339761a3b0e5SAndrew Rybchenko 		fprintf(stderr,
339861a3b0e5SAndrew Rybchenko 			"Error during enabling promiscuous mode for port %u: %s - ignore\n",
339934fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
3400edab33b1STetsuya Mukawa 
34014f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
34024f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
34034f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
34044f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
3405edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
3406edab33b1STetsuya Mukawa 
3407edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3408edab33b1STetsuya Mukawa 	printf("Done\n");
3409edab33b1STetsuya Mukawa }
3410edab33b1STetsuya Mukawa 
34110654d4a8SThomas Monjalon static void
34120654d4a8SThomas Monjalon detach_device(struct rte_device *dev)
34135f4ec54fSChen Jing D(Mark) {
3414f8e5baa2SThomas Monjalon 	portid_t sibling;
3415f8e5baa2SThomas Monjalon 
3416f8e5baa2SThomas Monjalon 	if (dev == NULL) {
341761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Device already removed\n");
3418f8e5baa2SThomas Monjalon 		return;
3419f8e5baa2SThomas Monjalon 	}
3420f8e5baa2SThomas Monjalon 
34210654d4a8SThomas Monjalon 	printf("Removing a device...\n");
3422938a184aSAdrien Mazarguil 
34232a449871SThomas Monjalon 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
34242a449871SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
34252a449871SThomas Monjalon 			if (ports[sibling].port_status != RTE_PORT_STOPPED) {
342661a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
342761a3b0e5SAndrew Rybchenko 					sibling);
34282a449871SThomas Monjalon 				return;
34292a449871SThomas Monjalon 			}
34302a449871SThomas Monjalon 			port_flow_flush(sibling);
34312a449871SThomas Monjalon 		}
34322a449871SThomas Monjalon 	}
34332a449871SThomas Monjalon 
343475b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
3435f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3436edab33b1STetsuya Mukawa 		return;
34373070419eSGaetan Rivet 	}
34384f1de450SThomas Monjalon 	remove_invalid_ports();
343903ce2c53SMatan Azrad 
34400654d4a8SThomas Monjalon 	printf("Device is detached\n");
3441f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
3442edab33b1STetsuya Mukawa 	printf("Done\n");
3443edab33b1STetsuya Mukawa 	return;
34445f4ec54fSChen Jing D(Mark) }
34455f4ec54fSChen Jing D(Mark) 
3446af75078fSIntel void
34470654d4a8SThomas Monjalon detach_port_device(portid_t port_id)
34480654d4a8SThomas Monjalon {
34490a0821bcSPaulis Gributs 	int ret;
34500a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
34510a0821bcSPaulis Gributs 
34520654d4a8SThomas Monjalon 	if (port_id_is_invalid(port_id, ENABLED_WARN))
34530654d4a8SThomas Monjalon 		return;
34540654d4a8SThomas Monjalon 
34550654d4a8SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
34560654d4a8SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
345761a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port not stopped\n");
34580654d4a8SThomas Monjalon 			return;
34590654d4a8SThomas Monjalon 		}
346061a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Port was not closed\n");
34610654d4a8SThomas Monjalon 	}
34620654d4a8SThomas Monjalon 
34630a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
34640a0821bcSPaulis Gributs 	if (ret != 0) {
34650a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
34660a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
34670a0821bcSPaulis Gributs 			port_id);
34680a0821bcSPaulis Gributs 		return;
34690a0821bcSPaulis Gributs 	}
34700a0821bcSPaulis Gributs 	detach_device(dev_info.device);
34710654d4a8SThomas Monjalon }
34720654d4a8SThomas Monjalon 
34730654d4a8SThomas Monjalon void
34745edee5f6SThomas Monjalon detach_devargs(char *identifier)
347555e51c96SNithin Dabilpuram {
347655e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
347755e51c96SNithin Dabilpuram 	struct rte_devargs da;
347855e51c96SNithin Dabilpuram 	portid_t port_id;
347955e51c96SNithin Dabilpuram 
348055e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
348155e51c96SNithin Dabilpuram 
348255e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
348355e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
348461a3b0e5SAndrew Rybchenko 		fprintf(stderr, "cannot parse identifier\n");
348555e51c96SNithin Dabilpuram 		return;
348655e51c96SNithin Dabilpuram 	}
348755e51c96SNithin Dabilpuram 
348855e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
348955e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
349055e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
349161a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
349261a3b0e5SAndrew Rybchenko 					port_id);
3493149677c9SStephen Hemminger 				rte_eth_iterator_cleanup(&iterator);
349464051bb1SXueming Li 				rte_devargs_reset(&da);
349555e51c96SNithin Dabilpuram 				return;
349655e51c96SNithin Dabilpuram 			}
349755e51c96SNithin Dabilpuram 			port_flow_flush(port_id);
349855e51c96SNithin Dabilpuram 		}
349955e51c96SNithin Dabilpuram 	}
350055e51c96SNithin Dabilpuram 
350155e51c96SNithin Dabilpuram 	if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
350255e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
350355e51c96SNithin Dabilpuram 			    da.name, da.bus->name);
350464051bb1SXueming Li 		rte_devargs_reset(&da);
350555e51c96SNithin Dabilpuram 		return;
350655e51c96SNithin Dabilpuram 	}
350755e51c96SNithin Dabilpuram 
350855e51c96SNithin Dabilpuram 	remove_invalid_ports();
350955e51c96SNithin Dabilpuram 
351055e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
351155e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
351255e51c96SNithin Dabilpuram 	printf("Done\n");
351364051bb1SXueming Li 	rte_devargs_reset(&da);
351455e51c96SNithin Dabilpuram }
351555e51c96SNithin Dabilpuram 
351655e51c96SNithin Dabilpuram void
3517af75078fSIntel pmd_test_exit(void)
3518af75078fSIntel {
3519af75078fSIntel 	portid_t pt_id;
352026cbb419SViacheslav Ovsiienko 	unsigned int i;
3521fb73e096SJeff Guo 	int ret;
3522af75078fSIntel 
35238210ec25SPablo de Lara 	if (test_done == 0)
35248210ec25SPablo de Lara 		stop_packet_forwarding();
35258210ec25SPablo de Lara 
3526761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
352726cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
35283a0968c8SShahaf Shuler 		if (mempools[i]) {
35293a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
35303a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
35313a0968c8SShahaf Shuler 						     NULL);
35323a0968c8SShahaf Shuler 		}
35333a0968c8SShahaf Shuler 	}
3534761f7ae1SJie Zhou #endif
3535d3a274ceSZhihong Wang 	if (ports != NULL) {
3536d3a274ceSZhihong Wang 		no_link_check = 1;
35377d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
353808fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
3539af75078fSIntel 			fflush(stdout);
3540d3a274ceSZhihong Wang 			stop_port(pt_id);
354108fd782bSCristian Dumitrescu 		}
354208fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
354308fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
354408fd782bSCristian Dumitrescu 			fflush(stdout);
3545d3a274ceSZhihong Wang 			close_port(pt_id);
3546af75078fSIntel 		}
3547d3a274ceSZhihong Wang 	}
3548fb73e096SJeff Guo 
3549fb73e096SJeff Guo 	if (hot_plug) {
3550fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
35512049c511SJeff Guo 		if (ret) {
3552fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
3553fb73e096SJeff Guo 				"fail to stop device event monitor.");
35542049c511SJeff Guo 			return;
35552049c511SJeff Guo 		}
3556fb73e096SJeff Guo 
35572049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
3558cc1bf307SJeff Guo 			dev_event_callback, NULL);
35592049c511SJeff Guo 		if (ret < 0) {
3560fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
35612049c511SJeff Guo 				"fail to unregister device event callback.\n");
35622049c511SJeff Guo 			return;
35632049c511SJeff Guo 		}
35642049c511SJeff Guo 
35652049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
35662049c511SJeff Guo 		if (ret) {
35672049c511SJeff Guo 			RTE_LOG(ERR, EAL,
35682049c511SJeff Guo 				"fail to disable hotplug handling.\n");
35692049c511SJeff Guo 			return;
35702049c511SJeff Guo 		}
3571fb73e096SJeff Guo 	}
357226cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3573401b744dSShahaf Shuler 		if (mempools[i])
3574a550baf2SMin Hu (Connor) 			mempool_free_mp(mempools[i]);
3575401b744dSShahaf Shuler 	}
357663b72657SIvan Ilchenko 	free(xstats_display);
3577fb73e096SJeff Guo 
3578d3a274ceSZhihong Wang 	printf("\nBye...\n");
3579af75078fSIntel }
3580af75078fSIntel 
3581af75078fSIntel typedef void (*cmd_func_t)(void);
3582af75078fSIntel struct pmd_test_command {
3583af75078fSIntel 	const char *cmd_name;
3584af75078fSIntel 	cmd_func_t cmd_func;
3585af75078fSIntel };
3586af75078fSIntel 
3587ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
3588af75078fSIntel static void
3589edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
3590af75078fSIntel {
3591ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
3592ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3593f8244c63SZhiyong Yang 	portid_t portid;
3594f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
3595ce8d5614SIntel 	struct rte_eth_link link;
3596e661a08bSIgor Romanov 	int ret;
3597ba5509a6SIvan Dyukov 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3598ce8d5614SIntel 
3599ce8d5614SIntel 	printf("Checking link statuses...\n");
3600ce8d5614SIntel 	fflush(stdout);
3601ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
3602ce8d5614SIntel 		all_ports_up = 1;
36037d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
3604ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
3605ce8d5614SIntel 				continue;
3606ce8d5614SIntel 			memset(&link, 0, sizeof(link));
3607e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
3608e661a08bSIgor Romanov 			if (ret < 0) {
3609e661a08bSIgor Romanov 				all_ports_up = 0;
3610e661a08bSIgor Romanov 				if (print_flag == 1)
361161a3b0e5SAndrew Rybchenko 					fprintf(stderr,
361261a3b0e5SAndrew Rybchenko 						"Port %u link get failed: %s\n",
3613e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
3614e661a08bSIgor Romanov 				continue;
3615e661a08bSIgor Romanov 			}
3616ce8d5614SIntel 			/* print link status if flag set */
3617ce8d5614SIntel 			if (print_flag == 1) {
3618ba5509a6SIvan Dyukov 				rte_eth_link_to_str(link_status,
3619ba5509a6SIvan Dyukov 					sizeof(link_status), &link);
3620ba5509a6SIvan Dyukov 				printf("Port %d %s\n", portid, link_status);
3621ce8d5614SIntel 				continue;
3622ce8d5614SIntel 			}
3623ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
3624295968d1SFerruh Yigit 			if (link.link_status == RTE_ETH_LINK_DOWN) {
3625ce8d5614SIntel 				all_ports_up = 0;
3626ce8d5614SIntel 				break;
3627ce8d5614SIntel 			}
3628ce8d5614SIntel 		}
3629ce8d5614SIntel 		/* after finally printing all link status, get out */
3630ce8d5614SIntel 		if (print_flag == 1)
3631ce8d5614SIntel 			break;
3632ce8d5614SIntel 
3633ce8d5614SIntel 		if (all_ports_up == 0) {
3634ce8d5614SIntel 			fflush(stdout);
3635ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
3636ce8d5614SIntel 		}
3637ce8d5614SIntel 
3638ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
3639ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3640ce8d5614SIntel 			print_flag = 1;
3641ce8d5614SIntel 		}
36428ea656f8SGaetan Rivet 
36438ea656f8SGaetan Rivet 		if (lsc_interrupt)
36448ea656f8SGaetan Rivet 			break;
3645ce8d5614SIntel 	}
3646af75078fSIntel }
3647af75078fSIntel 
3648284c908cSGaetan Rivet static void
3649cc1bf307SJeff Guo rmv_port_callback(void *arg)
3650284c908cSGaetan Rivet {
36513b97888aSMatan Azrad 	int need_to_start = 0;
36520da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
365328caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
36540a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
36550a0821bcSPaulis Gributs 	int ret;
3656284c908cSGaetan Rivet 
3657284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
3658284c908cSGaetan Rivet 
36593b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
36603b97888aSMatan Azrad 		need_to_start = 1;
36613b97888aSMatan Azrad 		stop_packet_forwarding();
36623b97888aSMatan Azrad 	}
36630da2a62bSMatan Azrad 	no_link_check = 1;
3664284c908cSGaetan Rivet 	stop_port(port_id);
36650da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
36660654d4a8SThomas Monjalon 
36670a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
36680a0821bcSPaulis Gributs 	if (ret != 0)
36690a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
36700a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
36710a0821bcSPaulis Gributs 			port_id);
3672e1d38504SPaulis Gributs 	else {
3673e1d38504SPaulis Gributs 		struct rte_device *device = dev_info.device;
3674e1d38504SPaulis Gributs 		close_port(port_id);
3675e1d38504SPaulis Gributs 		detach_device(device); /* might be already removed or have more ports */
3676e1d38504SPaulis Gributs 	}
36773b97888aSMatan Azrad 	if (need_to_start)
36783b97888aSMatan Azrad 		start_packet_forwarding(0);
3679284c908cSGaetan Rivet }
3680284c908cSGaetan Rivet 
368176ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
3682d6af1a13SBernard Iremonger static int
3683f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3684d6af1a13SBernard Iremonger 		  void *ret_param)
368576ad4a2dSGaetan Rivet {
368676ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
3687d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
368876ad4a2dSGaetan Rivet 
368976ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
369061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
369161a3b0e5SAndrew Rybchenko 			"\nPort %" PRIu16 ": %s called upon invalid event %d\n",
369276ad4a2dSGaetan Rivet 			port_id, __func__, type);
369376ad4a2dSGaetan Rivet 		fflush(stderr);
36943af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
3695f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
369697b5d8b5SThomas Monjalon 			eth_event_desc[type]);
369776ad4a2dSGaetan Rivet 		fflush(stdout);
369876ad4a2dSGaetan Rivet 	}
3699284c908cSGaetan Rivet 
3700284c908cSGaetan Rivet 	switch (type) {
37014f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
37024f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
37034f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
37044f1ed78eSThomas Monjalon 		break;
3705284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
37064f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
37074f1ed78eSThomas Monjalon 			break;
3708284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
3709cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
371061a3b0e5SAndrew Rybchenko 			fprintf(stderr,
371161a3b0e5SAndrew Rybchenko 				"Could not set up deferred device removal\n");
3712284c908cSGaetan Rivet 		break;
371385c6571cSThomas Monjalon 	case RTE_ETH_EVENT_DESTROY:
371485c6571cSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_CLOSED;
371585c6571cSThomas Monjalon 		printf("Port %u is closed\n", port_id);
371685c6571cSThomas Monjalon 		break;
3717bc70e559SSpike Du 	case RTE_ETH_EVENT_RX_AVAIL_THRESH: {
3718bc70e559SSpike Du 		uint16_t rxq_id;
3719bc70e559SSpike Du 		int ret;
3720bc70e559SSpike Du 
3721bc70e559SSpike Du 		/* avail_thresh query API rewinds rxq_id, no need to check max RxQ num */
3722bc70e559SSpike Du 		for (rxq_id = 0; ; rxq_id++) {
3723bc70e559SSpike Du 			ret = rte_eth_rx_avail_thresh_query(port_id, &rxq_id,
3724bc70e559SSpike Du 							    NULL);
3725bc70e559SSpike Du 			if (ret <= 0)
3726bc70e559SSpike Du 				break;
3727bc70e559SSpike Du 			printf("Received avail_thresh event, port: %u, rxq_id: %u\n",
3728bc70e559SSpike Du 			       port_id, rxq_id);
3729bc70e559SSpike Du 		}
3730bc70e559SSpike Du 		break;
3731bc70e559SSpike Du 	}
3732284c908cSGaetan Rivet 	default:
3733284c908cSGaetan Rivet 		break;
3734284c908cSGaetan Rivet 	}
3735d6af1a13SBernard Iremonger 	return 0;
373676ad4a2dSGaetan Rivet }
373776ad4a2dSGaetan Rivet 
373897b5d8b5SThomas Monjalon static int
373997b5d8b5SThomas Monjalon register_eth_event_callback(void)
374097b5d8b5SThomas Monjalon {
374197b5d8b5SThomas Monjalon 	int ret;
374297b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
374397b5d8b5SThomas Monjalon 
374497b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
374597b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
374697b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
374797b5d8b5SThomas Monjalon 				event,
374897b5d8b5SThomas Monjalon 				eth_event_callback,
374997b5d8b5SThomas Monjalon 				NULL);
375097b5d8b5SThomas Monjalon 		if (ret != 0) {
375197b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
375297b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
375397b5d8b5SThomas Monjalon 			return -1;
375497b5d8b5SThomas Monjalon 		}
375597b5d8b5SThomas Monjalon 	}
375697b5d8b5SThomas Monjalon 
375797b5d8b5SThomas Monjalon 	return 0;
375897b5d8b5SThomas Monjalon }
375997b5d8b5SThomas Monjalon 
3760fb73e096SJeff Guo /* This function is used by the interrupt thread */
3761fb73e096SJeff Guo static void
3762cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3763fb73e096SJeff Guo 			     __rte_unused void *arg)
3764fb73e096SJeff Guo {
37652049c511SJeff Guo 	uint16_t port_id;
37662049c511SJeff Guo 	int ret;
37672049c511SJeff Guo 
3768fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
3769fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
3770fb73e096SJeff Guo 			__func__, type);
3771fb73e096SJeff Guo 		fflush(stderr);
3772fb73e096SJeff Guo 	}
3773fb73e096SJeff Guo 
3774fb73e096SJeff Guo 	switch (type) {
3775fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
3776cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3777fb73e096SJeff Guo 			device_name);
37782049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
37792049c511SJeff Guo 		if (ret) {
37802049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
37812049c511SJeff Guo 				device_name);
37822049c511SJeff Guo 			return;
37832049c511SJeff Guo 		}
3784cc1bf307SJeff Guo 		/*
3785cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
3786cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
3787cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
3788cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
3789cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
3790cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
3791cc1bf307SJeff Guo 		 * be deleted.
3792cc1bf307SJeff Guo 		 */
3793cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
3794cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
3795cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
3796cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
3797fb73e096SJeff Guo 		break;
3798fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
3799fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3800fb73e096SJeff Guo 			device_name);
3801fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
3802fb73e096SJeff Guo 		 * begin to attach port.
3803fb73e096SJeff Guo 		 */
3804fb73e096SJeff Guo 		break;
3805fb73e096SJeff Guo 	default:
3806fb73e096SJeff Guo 		break;
3807fb73e096SJeff Guo 	}
3808fb73e096SJeff Guo }
3809fb73e096SJeff Guo 
3810f2c5125aSPablo de Lara static void
3811f4d178c1SXueming Li rxtx_port_config(portid_t pid)
3812f2c5125aSPablo de Lara {
3813d44f8a48SQi Zhang 	uint16_t qid;
38145e91aeefSWei Zhao 	uint64_t offloads;
3815f4d178c1SXueming Li 	struct rte_port *port = &ports[pid];
3816f2c5125aSPablo de Lara 
3817d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
38183c4426dbSDmitry Kozlyuk 		offloads = port->rxq[qid].conf.offloads;
38193c4426dbSDmitry Kozlyuk 		port->rxq[qid].conf = port->dev_info.default_rxconf;
3820f4d178c1SXueming Li 
3821f4d178c1SXueming Li 		if (rxq_share > 0 &&
3822f4d178c1SXueming Li 		    (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
3823f4d178c1SXueming Li 			/* Non-zero share group to enable RxQ share. */
38243c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.share_group = pid / rxq_share + 1;
38253c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */
3826f4d178c1SXueming Li 		}
3827f4d178c1SXueming Li 
3828575e0fd1SWei Zhao 		if (offloads != 0)
38293c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.offloads = offloads;
3830d44f8a48SQi Zhang 
3831d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
3832f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
38333c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh;
3834f2c5125aSPablo de Lara 
3835f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
38363c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh;
3837f2c5125aSPablo de Lara 
3838f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
38393c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh;
3840f2c5125aSPablo de Lara 
3841f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
38423c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_free_thresh = rx_free_thresh;
3843f2c5125aSPablo de Lara 
3844f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
38453c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_drop_en = rx_drop_en;
3846f2c5125aSPablo de Lara 
3847d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
3848d44f8a48SQi Zhang 	}
3849d44f8a48SQi Zhang 
3850d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
38513c4426dbSDmitry Kozlyuk 		offloads = port->txq[qid].conf.offloads;
38523c4426dbSDmitry Kozlyuk 		port->txq[qid].conf = port->dev_info.default_txconf;
3853575e0fd1SWei Zhao 		if (offloads != 0)
38543c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.offloads = offloads;
3855d44f8a48SQi Zhang 
3856d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
3857f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
38583c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh;
3859f2c5125aSPablo de Lara 
3860f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
38613c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh;
3862f2c5125aSPablo de Lara 
3863f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
38643c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh;
3865f2c5125aSPablo de Lara 
3866f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
38673c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh;
3868f2c5125aSPablo de Lara 
3869f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
38703c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_free_thresh = tx_free_thresh;
3871d44f8a48SQi Zhang 
3872d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
3873d44f8a48SQi Zhang 	}
3874f2c5125aSPablo de Lara }
3875f2c5125aSPablo de Lara 
38760c4abd36SSteve Yang /*
3877b563c142SFerruh Yigit  * Helper function to set MTU from frame size
38780c4abd36SSteve Yang  *
38790c4abd36SSteve Yang  * port->dev_info should be set before calling this function.
38800c4abd36SSteve Yang  *
38810c4abd36SSteve Yang  * return 0 on success, negative on error
38820c4abd36SSteve Yang  */
38830c4abd36SSteve Yang int
3884b563c142SFerruh Yigit update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
38850c4abd36SSteve Yang {
38860c4abd36SSteve Yang 	struct rte_port *port = &ports[portid];
38870c4abd36SSteve Yang 	uint32_t eth_overhead;
38881bb4a528SFerruh Yigit 	uint16_t mtu, new_mtu;
38890c4abd36SSteve Yang 
38901bb4a528SFerruh Yigit 	eth_overhead = get_eth_overhead(&port->dev_info);
38911bb4a528SFerruh Yigit 
38921bb4a528SFerruh Yigit 	if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
38931bb4a528SFerruh Yigit 		printf("Failed to get MTU for port %u\n", portid);
38941bb4a528SFerruh Yigit 		return -1;
38951bb4a528SFerruh Yigit 	}
38961bb4a528SFerruh Yigit 
38971bb4a528SFerruh Yigit 	new_mtu = max_rx_pktlen - eth_overhead;
38980c4abd36SSteve Yang 
38991bb4a528SFerruh Yigit 	if (mtu == new_mtu)
39001bb4a528SFerruh Yigit 		return 0;
39011bb4a528SFerruh Yigit 
39021bb4a528SFerruh Yigit 	if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
390361a3b0e5SAndrew Rybchenko 		fprintf(stderr,
390461a3b0e5SAndrew Rybchenko 			"Failed to set MTU to %u for port %u\n",
39051bb4a528SFerruh Yigit 			new_mtu, portid);
39061bb4a528SFerruh Yigit 		return -1;
39070c4abd36SSteve Yang 	}
39080c4abd36SSteve Yang 
39091bb4a528SFerruh Yigit 	port->dev_conf.rxmode.mtu = new_mtu;
39101bb4a528SFerruh Yigit 
39110c4abd36SSteve Yang 	return 0;
39120c4abd36SSteve Yang }
39130c4abd36SSteve Yang 
3914013af9b6SIntel void
3915013af9b6SIntel init_port_config(void)
3916013af9b6SIntel {
3917013af9b6SIntel 	portid_t pid;
3918013af9b6SIntel 	struct rte_port *port;
3919655eae01SJie Wang 	int ret, i;
3920013af9b6SIntel 
39217d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
3922013af9b6SIntel 		port = &ports[pid];
3923013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
39246f51deb9SIvan Ilchenko 
39256f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
39266f51deb9SIvan Ilchenko 		if (ret != 0)
39276f51deb9SIvan Ilchenko 			return;
39286f51deb9SIvan Ilchenko 
39293ce690d3SBruce Richardson 		if (nb_rxq > 1) {
3930013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
393190892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3932422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
3933af75078fSIntel 		} else {
3934013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3935013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3936af75078fSIntel 		}
39373ce690d3SBruce Richardson 
39385f592039SJingjing Wu 		if (port->dcb_flag == 0) {
3939655eae01SJie Wang 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3940f9295aa2SXiaoyu Min 				port->dev_conf.rxmode.mq_mode =
3941f9295aa2SXiaoyu Min 					(enum rte_eth_rx_mq_mode)
3942295968d1SFerruh Yigit 						(rx_mq_mode & RTE_ETH_MQ_RX_RSS);
3943655eae01SJie Wang 			} else {
3944295968d1SFerruh Yigit 				port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
3945655eae01SJie Wang 				port->dev_conf.rxmode.offloads &=
3946295968d1SFerruh Yigit 						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3947655eae01SJie Wang 
3948655eae01SJie Wang 				for (i = 0;
3949655eae01SJie Wang 				     i < port->dev_info.nb_rx_queues;
3950655eae01SJie Wang 				     i++)
39513c4426dbSDmitry Kozlyuk 					port->rxq[i].conf.offloads &=
3952295968d1SFerruh Yigit 						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3953655eae01SJie Wang 			}
39543ce690d3SBruce Richardson 		}
39553ce690d3SBruce Richardson 
3956f4d178c1SXueming Li 		rxtx_port_config(pid);
3957013af9b6SIntel 
3958a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3959a5279d25SIgor Romanov 		if (ret != 0)
3960a5279d25SIgor Romanov 			return;
3961013af9b6SIntel 
3962a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3963e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
39647b7e5ba7SIntel #endif
39658ea656f8SGaetan Rivet 
39660a0821bcSPaulis Gributs 		if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
39678ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
39680a0821bcSPaulis Gributs 		if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3969284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
3970013af9b6SIntel 	}
3971013af9b6SIntel }
3972013af9b6SIntel 
397341b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
397441b05095SBernard Iremonger {
397541b05095SBernard Iremonger 	struct rte_port *port;
397641b05095SBernard Iremonger 
397741b05095SBernard Iremonger 	port = &ports[slave_pid];
397841b05095SBernard Iremonger 	port->slave_flag = 1;
397941b05095SBernard Iremonger }
398041b05095SBernard Iremonger 
398141b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
398241b05095SBernard Iremonger {
398341b05095SBernard Iremonger 	struct rte_port *port;
398441b05095SBernard Iremonger 
398541b05095SBernard Iremonger 	port = &ports[slave_pid];
398641b05095SBernard Iremonger 	port->slave_flag = 0;
398741b05095SBernard Iremonger }
398841b05095SBernard Iremonger 
39890e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
39900e545d30SBernard Iremonger {
39910e545d30SBernard Iremonger 	struct rte_port *port;
39920a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
39930a0821bcSPaulis Gributs 	int ret;
39940e545d30SBernard Iremonger 
39950e545d30SBernard Iremonger 	port = &ports[slave_pid];
39960a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
39970a0821bcSPaulis Gributs 	if (ret != 0) {
39980a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
39990a0821bcSPaulis Gributs 			"Failed to get device info for port id %d,"
40000a0821bcSPaulis Gributs 			"cannot determine if the port is a bonded slave",
40010a0821bcSPaulis Gributs 			slave_pid);
40020a0821bcSPaulis Gributs 		return 0;
40030a0821bcSPaulis Gributs 	}
40040a0821bcSPaulis Gributs 	if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
4005b8b8b344SMatan Azrad 		return 1;
4006b8b8b344SMatan Azrad 	return 0;
40070e545d30SBernard Iremonger }
40080e545d30SBernard Iremonger 
4009013af9b6SIntel const uint16_t vlan_tags[] = {
4010013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
4011013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
4012013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
4013013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
4014013af9b6SIntel };
4015013af9b6SIntel 
4016013af9b6SIntel static  int
4017ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
40181a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
40191a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
40201a572499SJingjing Wu 		 uint8_t pfc_en)
4021013af9b6SIntel {
4022013af9b6SIntel 	uint8_t i;
4023ac7c491cSKonstantin Ananyev 	int32_t rc;
4024ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
4025af75078fSIntel 
4026af75078fSIntel 	/*
4027013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
4028013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
4029af75078fSIntel 	 */
40301a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
40311a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
40321a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
40331a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
40341a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
4035013af9b6SIntel 
4036547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
40371a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
40381a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
40391a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
4040295968d1SFerruh Yigit 			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
40411a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
4042295968d1SFerruh Yigit 			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
4043013af9b6SIntel 
40441a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
40451a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
40461a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
40471a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
40481a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
4049af75078fSIntel 		}
4050295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4051f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
4052f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
4053013af9b6SIntel 		}
4054013af9b6SIntel 
4055013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
4056f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
4057f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
4058295968d1SFerruh Yigit 					(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
4059295968d1SFerruh Yigit 		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
40601a572499SJingjing Wu 	} else {
40611a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
40621a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
40631a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
40641a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
4065013af9b6SIntel 
40665139bc12STing Xu 		memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
40675139bc12STing Xu 
4068ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
4069ac7c491cSKonstantin Ananyev 		if (rc != 0)
4070ac7c491cSKonstantin Ananyev 			return rc;
4071ac7c491cSKonstantin Ananyev 
40721a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
40731a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
40741a572499SJingjing Wu 
4075295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4076bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
4077bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
4078013af9b6SIntel 		}
4079ac7c491cSKonstantin Ananyev 
4080f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
4081f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
4082295968d1SFerruh Yigit 					(rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
4083ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
4084295968d1SFerruh Yigit 		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
40851a572499SJingjing Wu 	}
40861a572499SJingjing Wu 
40871a572499SJingjing Wu 	if (pfc_en)
40881a572499SJingjing Wu 		eth_conf->dcb_capability_en =
4089295968d1SFerruh Yigit 				RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
4090013af9b6SIntel 	else
4091295968d1SFerruh Yigit 		eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
4092013af9b6SIntel 
4093013af9b6SIntel 	return 0;
4094013af9b6SIntel }
4095013af9b6SIntel 
4096013af9b6SIntel int
40971a572499SJingjing Wu init_port_dcb_config(portid_t pid,
40981a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
40991a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
41001a572499SJingjing Wu 		     uint8_t pfc_en)
4101013af9b6SIntel {
4102013af9b6SIntel 	struct rte_eth_conf port_conf;
4103013af9b6SIntel 	struct rte_port *rte_port;
4104013af9b6SIntel 	int retval;
4105013af9b6SIntel 	uint16_t i;
4106013af9b6SIntel 
4107a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
4108a550baf2SMin Hu (Connor) 		printf("The multi-process feature doesn't support dcb.\n");
4109a550baf2SMin Hu (Connor) 		return -ENOTSUP;
4110a550baf2SMin Hu (Connor) 	}
41112a977b89SWenzhuo Lu 	rte_port = &ports[pid];
4112013af9b6SIntel 
4113c1ba6c32SHuisong Li 	/* retain the original device configuration. */
4114c1ba6c32SHuisong Li 	memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
4115d5354e89SYanglong Wu 
4116013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
4117ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
4118013af9b6SIntel 	if (retval < 0)
4119013af9b6SIntel 		return retval;
4120295968d1SFerruh Yigit 	port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4121cbe70fdeSJie Wang 	/* remove RSS HASH offload for DCB in vt mode */
4122cbe70fdeSJie Wang 	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
4123cbe70fdeSJie Wang 		port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4124cbe70fdeSJie Wang 		for (i = 0; i < nb_rxq; i++)
41253c4426dbSDmitry Kozlyuk 			rte_port->rxq[i].conf.offloads &=
4126cbe70fdeSJie Wang 				~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4127cbe70fdeSJie Wang 	}
4128013af9b6SIntel 
41292f203d44SQi Zhang 	/* re-configure the device . */
41302b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
41312b0e0ebaSChenbo Xia 	if (retval < 0)
41322b0e0ebaSChenbo Xia 		return retval;
41336f51deb9SIvan Ilchenko 
41346f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
41356f51deb9SIvan Ilchenko 	if (retval != 0)
41366f51deb9SIvan Ilchenko 		return retval;
41372a977b89SWenzhuo Lu 
41382a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
41392a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
41402a977b89SWenzhuo Lu 	 */
41412a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
41422a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
414361a3b0e5SAndrew Rybchenko 		fprintf(stderr,
414461a3b0e5SAndrew Rybchenko 			"VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
414561a3b0e5SAndrew Rybchenko 			pid);
41462a977b89SWenzhuo Lu 		return -1;
41472a977b89SWenzhuo Lu 	}
41482a977b89SWenzhuo Lu 
41492a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
41502a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
41512a977b89SWenzhuo Lu 	 */
41522a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
415386ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
415486ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
415586ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
415686ef65eeSBernard Iremonger 		} else {
41572a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
41582a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
415986ef65eeSBernard Iremonger 		}
41602a977b89SWenzhuo Lu 	} else {
41612a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
41622a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
41632a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
41642a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
41652a977b89SWenzhuo Lu 		} else {
41662a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
41672a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
41682a977b89SWenzhuo Lu 
41692a977b89SWenzhuo Lu 		}
41702a977b89SWenzhuo Lu 	}
41712a977b89SWenzhuo Lu 	rx_free_thresh = 64;
41722a977b89SWenzhuo Lu 
4173013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4174013af9b6SIntel 
4175f4d178c1SXueming Li 	rxtx_port_config(pid);
4176013af9b6SIntel 	/* VLAN filter */
4177295968d1SFerruh Yigit 	rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
41781a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
4179013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
4180013af9b6SIntel 
4181a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4182a5279d25SIgor Romanov 	if (retval != 0)
4183a5279d25SIgor Romanov 		return retval;
4184a5279d25SIgor Romanov 
41857741e4cfSIntel 	rte_port->dcb_flag = 1;
41867741e4cfSIntel 
4187a690a070SHuisong Li 	/* Enter DCB configuration status */
4188a690a070SHuisong Li 	dcb_config = 1;
4189a690a070SHuisong Li 
4190013af9b6SIntel 	return 0;
4191af75078fSIntel }
4192af75078fSIntel 
4193ffc468ffSTetsuya Mukawa static void
4194ffc468ffSTetsuya Mukawa init_port(void)
4195ffc468ffSTetsuya Mukawa {
41961b9f2746SGregory Etelson 	int i;
41971b9f2746SGregory Etelson 
4198ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
4199ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
4200ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4201ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
4202ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
4203ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
4204ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
4205ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
4206ffc468ffSTetsuya Mukawa 	}
42071b9f2746SGregory Etelson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
420863b72657SIvan Ilchenko 		ports[i].xstats_info.allocated = false;
420963b72657SIvan Ilchenko 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
42101b9f2746SGregory Etelson 		LIST_INIT(&ports[i].flow_tunnel_list);
421129841336SPhil Yang 	/* Initialize ports NUMA structures */
421229841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
421329841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
421429841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4215ffc468ffSTetsuya Mukawa }
4216ffc468ffSTetsuya Mukawa 
4217d3a274ceSZhihong Wang static void
4218d3a274ceSZhihong Wang force_quit(void)
4219d3a274ceSZhihong Wang {
4220d3a274ceSZhihong Wang 	pmd_test_exit();
4221d3a274ceSZhihong Wang 	prompt_exit();
4222d3a274ceSZhihong Wang }
4223d3a274ceSZhihong Wang 
4224d3a274ceSZhihong Wang static void
4225cfea1f30SPablo de Lara print_stats(void)
4226cfea1f30SPablo de Lara {
4227cfea1f30SPablo de Lara 	uint8_t i;
4228cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
4229cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4230cfea1f30SPablo de Lara 
4231cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
4232cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
4233cfea1f30SPablo de Lara 
4234cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
4235cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4236cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
4237683d1e82SIgor Romanov 
4238683d1e82SIgor Romanov 	fflush(stdout);
4239cfea1f30SPablo de Lara }
4240cfea1f30SPablo de Lara 
4241cfea1f30SPablo de Lara static void
4242d3a274ceSZhihong Wang signal_handler(int signum)
4243d3a274ceSZhihong Wang {
4244d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
424561a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4246d3a274ceSZhihong Wang 			signum);
4247a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
4248102b7329SReshma Pattan 		/* uninitialize packet capture framework */
4249102b7329SReshma Pattan 		rte_pdump_uninit();
4250102b7329SReshma Pattan #endif
4251a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
42528b36297dSAmit Gupta 		if (latencystats_enabled != 0)
425362d3216dSReshma Pattan 			rte_latencystats_uninit();
425462d3216dSReshma Pattan #endif
4255d3a274ceSZhihong Wang 		force_quit();
4256d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
4257d9a191a0SPhil Yang 		f_quit = 1;
4258d3a274ceSZhihong Wang 		/* exit with the expected status */
4259761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
4260d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
4261d3a274ceSZhihong Wang 		kill(getpid(), signum);
4262761f7ae1SJie Zhou #endif
4263d3a274ceSZhihong Wang 	}
4264d3a274ceSZhihong Wang }
4265d3a274ceSZhihong Wang 
4266af75078fSIntel int
4267af75078fSIntel main(int argc, char** argv)
4268af75078fSIntel {
4269af75078fSIntel 	int diag;
4270f8244c63SZhiyong Yang 	portid_t port_id;
42714918a357SXiaoyun Li 	uint16_t count;
4272fb73e096SJeff Guo 	int ret;
4273af75078fSIntel 
4274d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
4275d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
4276d3a274ceSZhihong Wang 
4277285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
4278285fd101SOlivier Matz 	if (testpmd_logtype < 0)
427916267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
4280285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4281285fd101SOlivier Matz 
42829201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
42839201806eSStephen Hemminger 	if (diag < 0)
428416267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
428516267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
42869201806eSStephen Hemminger 
428797b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
428897b5d8b5SThomas Monjalon 	if (ret != 0)
428916267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
429097b5d8b5SThomas Monjalon 
4291a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
42924aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
4293e9436f54STiwei Bie 	rte_pdump_init();
42944aa0d012SAnatoly Burakov #endif
42954aa0d012SAnatoly Burakov 
42964918a357SXiaoyun Li 	count = 0;
42974918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
42984918a357SXiaoyun Li 		ports_ids[count] = port_id;
42994918a357SXiaoyun Li 		count++;
43004918a357SXiaoyun Li 	}
43014918a357SXiaoyun Li 	nb_ports = (portid_t) count;
43024aa0d012SAnatoly Burakov 	if (nb_ports == 0)
43034aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
43044aa0d012SAnatoly Burakov 
43054aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
43064aa0d012SAnatoly Burakov 	init_port();
43074aa0d012SAnatoly Burakov 
43084aa0d012SAnatoly Burakov 	set_def_fwd_config();
43094aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
431016267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
431116267ceeSStephen Hemminger 			 "Check the core mask argument\n");
43124aa0d012SAnatoly Burakov 
4313e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
4314a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4315e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
4316e505d84cSAnatoly Burakov #endif
4317a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
4318e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
4319e505d84cSAnatoly Burakov #endif
4320e505d84cSAnatoly Burakov 
4321fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
43225fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
4323fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
4324fb7b8b32SAnatoly Burakov #else
4325fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
4326fb7b8b32SAnatoly Burakov #endif
4327fb7b8b32SAnatoly Burakov 
4328e505d84cSAnatoly Burakov 	argc -= diag;
4329e505d84cSAnatoly Burakov 	argv += diag;
4330e505d84cSAnatoly Burakov 	if (argc > 1)
4331e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
4332e505d84cSAnatoly Burakov 
4333761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
4334e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4335285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
43361c036b16SEelco Chaudron 			strerror(errno));
43371c036b16SEelco Chaudron 	}
4338761f7ae1SJie Zhou #endif
43391c036b16SEelco Chaudron 
434099cabef0SPablo de Lara 	if (tx_first && interactive)
434199cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
434299cabef0SPablo de Lara 				"interactive mode.\n");
43438820cba4SDavid Hunt 
43448820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
434561a3b0e5SAndrew Rybchenko 		fprintf(stderr,
434661a3b0e5SAndrew Rybchenko 			"Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
43478820cba4SDavid Hunt 		lsc_interrupt = 0;
43488820cba4SDavid Hunt 	}
43498820cba4SDavid Hunt 
43505a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
435161a3b0e5SAndrew Rybchenko 		fprintf(stderr,
435261a3b0e5SAndrew Rybchenko 			"Warning: Either rx or tx queues should be non-zero\n");
43535a8fb55cSReshma Pattan 
43545a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
435561a3b0e5SAndrew Rybchenko 		fprintf(stderr,
435661a3b0e5SAndrew Rybchenko 			"Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4357af75078fSIntel 			nb_rxq, nb_txq);
4358af75078fSIntel 
4359af75078fSIntel 	init_config();
4360fb73e096SJeff Guo 
4361fb73e096SJeff Guo 	if (hot_plug) {
43622049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
4363fb73e096SJeff Guo 		if (ret) {
43642049c511SJeff Guo 			RTE_LOG(ERR, EAL,
43652049c511SJeff Guo 				"fail to enable hotplug handling.");
4366fb73e096SJeff Guo 			return -1;
4367fb73e096SJeff Guo 		}
4368fb73e096SJeff Guo 
43692049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
43702049c511SJeff Guo 		if (ret) {
43712049c511SJeff Guo 			RTE_LOG(ERR, EAL,
43722049c511SJeff Guo 				"fail to start device event monitoring.");
43732049c511SJeff Guo 			return -1;
43742049c511SJeff Guo 		}
43752049c511SJeff Guo 
43762049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
4377cc1bf307SJeff Guo 			dev_event_callback, NULL);
43782049c511SJeff Guo 		if (ret) {
43792049c511SJeff Guo 			RTE_LOG(ERR, EAL,
43802049c511SJeff Guo 				"fail  to register device event callback\n");
43812049c511SJeff Guo 			return -1;
43822049c511SJeff Guo 		}
4383fb73e096SJeff Guo 	}
4384fb73e096SJeff Guo 
43856937d210SStephen Hemminger 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4386148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
4387af75078fSIntel 
4388ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
438934fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
439034fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
439134fc1051SIvan Ilchenko 		if (ret != 0)
439261a3b0e5SAndrew Rybchenko 			fprintf(stderr,
439361a3b0e5SAndrew Rybchenko 				"Error during enabling promiscuous mode for port %u: %s - ignore\n",
439434fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
439534fc1051SIvan Ilchenko 	}
4396af75078fSIntel 
4397bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS
43987e4441c8SRemy Horton 	/* Init metrics library */
43997e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
4400bb9be9a4SDavid Marchand #endif
44017e4441c8SRemy Horton 
4402a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
440362d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
440462d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
440562d3216dSReshma Pattan 		if (ret)
440661a3b0e5SAndrew Rybchenko 			fprintf(stderr,
440761a3b0e5SAndrew Rybchenko 				"Warning: latencystats init() returned error %d\n",
440861a3b0e5SAndrew Rybchenko 				ret);
440961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Latencystats running on lcore %d\n",
441062d3216dSReshma Pattan 			latencystats_lcore_id);
441162d3216dSReshma Pattan 	}
441262d3216dSReshma Pattan #endif
441362d3216dSReshma Pattan 
44147e4441c8SRemy Horton 	/* Setup bitrate stats */
4415a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4416e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
44177e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
44187e4441c8SRemy Horton 		if (bitrate_data == NULL)
4419e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
4420e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
44217e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
4422e25e6c70SRemy Horton 	}
44237e4441c8SRemy Horton #endif
4424a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE
4425592ab76fSDavid Marchand 	if (init_cmdline() != 0)
4426592ab76fSDavid Marchand 		rte_exit(EXIT_FAILURE,
4427592ab76fSDavid Marchand 			"Could not initialise cmdline context.\n");
4428592ab76fSDavid Marchand 
442981ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
443081ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
443181ef862bSAllain Legacy 
4432ca7feb22SCyril Chemparathy 	if (interactive == 1) {
4433ca7feb22SCyril Chemparathy 		if (auto_start) {
4434ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
4435ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
4436ca7feb22SCyril Chemparathy 		}
4437af75078fSIntel 		prompt();
44380de738cfSJiayu Hu 		pmd_test_exit();
4439ca7feb22SCyril Chemparathy 	} else
44400d56cb81SThomas Monjalon #endif
44410d56cb81SThomas Monjalon 	{
4442af75078fSIntel 		char c;
4443af75078fSIntel 		int rc;
4444af75078fSIntel 
4445d9a191a0SPhil Yang 		f_quit = 0;
4446d9a191a0SPhil Yang 
4447af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
444899cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
4449cfea1f30SPablo de Lara 		if (stats_period != 0) {
4450cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
4451cfea1f30SPablo de Lara 			uint64_t timer_period;
4452cfea1f30SPablo de Lara 
4453cfea1f30SPablo de Lara 			/* Convert to number of cycles */
4454cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
4455cfea1f30SPablo de Lara 
4456d9a191a0SPhil Yang 			while (f_quit == 0) {
4457cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
4458cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
4459cfea1f30SPablo de Lara 
4460cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
4461cfea1f30SPablo de Lara 					print_stats();
4462cfea1f30SPablo de Lara 					/* Reset the timer */
4463cfea1f30SPablo de Lara 					diff_time = 0;
4464cfea1f30SPablo de Lara 				}
4465cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
4466cfea1f30SPablo de Lara 				prev_time = cur_time;
4467761f7ae1SJie Zhou 				rte_delay_us_sleep(US_PER_S);
4468cfea1f30SPablo de Lara 			}
4469cfea1f30SPablo de Lara 		}
4470cfea1f30SPablo de Lara 
4471af75078fSIntel 		printf("Press enter to exit\n");
4472af75078fSIntel 		rc = read(0, &c, 1);
4473d3a274ceSZhihong Wang 		pmd_test_exit();
4474af75078fSIntel 		if (rc < 0)
4475af75078fSIntel 			return 1;
4476af75078fSIntel 	}
4477af75078fSIntel 
44785e516c89SStephen Hemminger 	ret = rte_eal_cleanup();
44795e516c89SStephen Hemminger 	if (ret != 0)
44805e516c89SStephen Hemminger 		rte_exit(EXIT_FAILURE,
44815e516c89SStephen Hemminger 			 "EAL cleanup failed: %s\n", strerror(-ret));
44825e516c89SStephen Hemminger 
44835e516c89SStephen Hemminger 	return EXIT_SUCCESS;
4484af75078fSIntel }
4485