xref: /dpdk/app/test-pmd/testpmd.c (revision 99a4974aa5690436a03a9089ef679cd6f08e850c)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
131c036b16SEelco Chaudron #include <sys/mman.h>
14761f7ae1SJie Zhou #endif
15af75078fSIntel #include <sys/types.h>
16af75078fSIntel #include <errno.h>
17fb73e096SJeff Guo #include <stdbool.h>
18af75078fSIntel 
19af75078fSIntel #include <sys/queue.h>
20af75078fSIntel #include <sys/stat.h>
21af75078fSIntel 
22af75078fSIntel #include <stdint.h>
23af75078fSIntel #include <unistd.h>
24af75078fSIntel #include <inttypes.h>
25af75078fSIntel 
26af75078fSIntel #include <rte_common.h>
27d1eb542eSOlivier Matz #include <rte_errno.h>
28af75078fSIntel #include <rte_byteorder.h>
29af75078fSIntel #include <rte_log.h>
30af75078fSIntel #include <rte_debug.h>
31af75078fSIntel #include <rte_cycles.h>
32af75078fSIntel #include <rte_memory.h>
33af75078fSIntel #include <rte_memcpy.h>
34af75078fSIntel #include <rte_launch.h>
35770ebc06SDavid Marchand #include <rte_bus.h>
36af75078fSIntel #include <rte_eal.h>
37284c908cSGaetan Rivet #include <rte_alarm.h>
38af75078fSIntel #include <rte_per_lcore.h>
39af75078fSIntel #include <rte_lcore.h>
40af75078fSIntel #include <rte_branch_prediction.h>
41af75078fSIntel #include <rte_mempool.h>
42af75078fSIntel #include <rte_malloc.h>
43af75078fSIntel #include <rte_mbuf.h>
440e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
45af75078fSIntel #include <rte_interrupts.h>
46af75078fSIntel #include <rte_ether.h>
47af75078fSIntel #include <rte_ethdev.h>
48edab33b1STetsuya Mukawa #include <rte_dev.h>
49af75078fSIntel #include <rte_string_fns.h>
50a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE
51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
52e261265eSRadu Nicolau #endif
53a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
54102b7329SReshma Pattan #include <rte_pdump.h>
55102b7329SReshma Pattan #endif
56938a184aSAdrien Mazarguil #include <rte_flow.h>
57bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS
587e4441c8SRemy Horton #include <rte_metrics.h>
59bb9be9a4SDavid Marchand #endif
60a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
617e4441c8SRemy Horton #include <rte_bitrate.h>
627e4441c8SRemy Horton #endif
63a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
6462d3216dSReshma Pattan #include <rte_latencystats.h>
6562d3216dSReshma Pattan #endif
66761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS
67761f7ae1SJie Zhou #include <process.h>
68761f7ae1SJie Zhou #endif
69e46372d7SHuisong Li #ifdef RTE_NET_BOND
70e46372d7SHuisong Li #include <rte_eth_bond.h>
71e46372d7SHuisong Li #endif
72f41a5092SSpike Du #ifdef RTE_NET_MLX5
73f41a5092SSpike Du #include "mlx5_testpmd.h"
74f41a5092SSpike Du #endif
75af75078fSIntel 
76af75078fSIntel #include "testpmd.h"
77af75078fSIntel 
78c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
79c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
80c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
81c7f5dba7SAnatoly Burakov #else
82c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
83c7f5dba7SAnatoly Burakov #endif
84c7f5dba7SAnatoly Burakov 
85c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
86c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
87c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
88c7f5dba7SAnatoly Burakov #else
89c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
90c7f5dba7SAnatoly Burakov #endif
91c7f5dba7SAnatoly Burakov 
92c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
9313b19642SDmitry Kozlyuk /*
9413b19642SDmitry Kozlyuk  * Zone size with the malloc overhead (max of debug and release variants)
9513b19642SDmitry Kozlyuk  * must fit into the smallest supported hugepage size (2M),
9613b19642SDmitry Kozlyuk  * so that an IOVA-contiguous zone of this size can always be allocated
9713b19642SDmitry Kozlyuk  * if there are free 2M hugepages.
9813b19642SDmitry Kozlyuk  */
9913b19642SDmitry Kozlyuk #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE)
100c7f5dba7SAnatoly Burakov 
101af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
102285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
103af75078fSIntel 
104cb056611SStephen Hemminger /* use main core for command line ? */
105af75078fSIntel uint8_t interactive = 0;
106ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
10799cabef0SPablo de Lara uint8_t tx_first;
10881ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
109af75078fSIntel 
110af75078fSIntel /*
111af75078fSIntel  * NUMA support configuration.
112af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
113af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
114af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
115af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
116af75078fSIntel  */
117999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
118af75078fSIntel 
119af75078fSIntel /*
120b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
121b6ea6408SIntel  * not configured.
122b6ea6408SIntel  */
123b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
124b6ea6408SIntel 
125b6ea6408SIntel /*
126c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
127c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
128c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
129c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
130c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
131148f963fSBruce Richardson  */
132c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
133148f963fSBruce Richardson 
134148f963fSBruce Richardson /*
13563531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
13663531389SGeorgios Katsikas  * is allocated.
13763531389SGeorgios Katsikas  */
13863531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
13963531389SGeorgios Katsikas 
14063531389SGeorgios Katsikas /*
14163531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
14263531389SGeorgios Katsikas  * is allocated.
14363531389SGeorgios Katsikas  */
14463531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
14563531389SGeorgios Katsikas 
14663531389SGeorgios Katsikas /*
14763531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
14863531389SGeorgios Katsikas  * is allocated.
14963531389SGeorgios Katsikas  */
15063531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
15163531389SGeorgios Katsikas 
15263531389SGeorgios Katsikas /*
153af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
154af75078fSIntel  * forwarded.
155547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
156af75078fSIntel  * ports.
157af75078fSIntel  */
1586d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
159af75078fSIntel portid_t nb_peer_eth_addrs = 0;
160af75078fSIntel 
161af75078fSIntel /*
162af75078fSIntel  * Probed Target Environment.
163af75078fSIntel  */
164af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
165af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
166af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
167af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
168af75078fSIntel 
1694918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1704918a357SXiaoyun Li 
171af75078fSIntel /*
172af75078fSIntel  * Test Forwarding Configuration.
173af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
174af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
175af75078fSIntel  */
176af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
177af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
178af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
179af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
180af75078fSIntel 
181af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
182af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
183af75078fSIntel 
184af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
185af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
186af75078fSIntel 
187af75078fSIntel /*
188af75078fSIntel  * Forwarding engines.
189af75078fSIntel  */
190af75078fSIntel struct fwd_engine * fwd_engines[] = {
191af75078fSIntel 	&io_fwd_engine,
192af75078fSIntel 	&mac_fwd_engine,
193d47388f1SCyril Chemparathy 	&mac_swap_engine,
194e9e23a61SCyril Chemparathy 	&flow_gen_engine,
195af75078fSIntel 	&rx_only_engine,
196af75078fSIntel 	&tx_only_engine,
197af75078fSIntel 	&csum_fwd_engine,
198168dfa61SIvan Boule 	&icmp_echo_engine,
1993c156061SJens Freimann 	&noisy_vnf_engine,
2002564abdaSShiri Kuzin 	&five_tuple_swap_fwd_engine,
201af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
202af75078fSIntel 	&ieee1588_fwd_engine,
203af75078fSIntel #endif
20459840375SXueming Li 	&shared_rxq_engine,
205af75078fSIntel 	NULL,
206af75078fSIntel };
207af75078fSIntel 
20826cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
20959fcf854SShahaf Shuler uint16_t mempool_flags;
210401b744dSShahaf Shuler 
211af75078fSIntel struct fwd_config cur_fwd_config;
212af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
213bf56fce1SZhihong Wang uint32_t retry_enabled;
214bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
215bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
216af75078fSIntel 
21726cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
21826cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
21926cbb419SViacheslav Ovsiienko 	DEFAULT_MBUF_DATA_SIZE
22026cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */
221c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
222c8798818SIntel                                       * specified on command-line. */
223cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
224d9a191a0SPhil Yang 
22563b72657SIvan Ilchenko /** Extended statistics to show. */
22663b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display;
22763b72657SIvan Ilchenko 
22863b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */
22963b72657SIvan Ilchenko 
230d9a191a0SPhil Yang /*
231d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
232d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
233d9a191a0SPhil Yang  */
2344c243bd4SStephen Hemminger static volatile uint8_t f_quit;
2353889a322SHuisong Li uint8_t cl_quit; /* Quit testpmd from cmdline. */
236d9a191a0SPhil Yang 
237af75078fSIntel /*
2381bb4a528SFerruh Yigit  * Max Rx frame size, set by '--max-pkt-len' parameter.
2391bb4a528SFerruh Yigit  */
2401bb4a528SFerruh Yigit uint32_t max_rx_pkt_len;
2411bb4a528SFerruh Yigit 
2421bb4a528SFerruh Yigit /*
2430f2096d7SViacheslav Ovsiienko  * Configuration of packet segments used to scatter received packets
2440f2096d7SViacheslav Ovsiienko  * if some of split features is configured.
2450f2096d7SViacheslav Ovsiienko  */
2460f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
2470f2096d7SViacheslav Ovsiienko uint8_t  rx_pkt_nb_segs; /**< Number of segments to split */
24891c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
24991c78e09SViacheslav Ovsiienko uint8_t  rx_pkt_nb_offs; /**< Number of specified offsets */
25052e2e7edSYuan Wang uint32_t rx_pkt_hdr_protos[MAX_SEGS_BUFFER_SPLIT];
2510f2096d7SViacheslav Ovsiienko 
252a4bf5421SHanumanth Pothula uint8_t multi_rx_mempool; /**< Enables multi-rx-mempool feature */
253a4bf5421SHanumanth Pothula 
2540f2096d7SViacheslav Ovsiienko /*
255af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
256af75078fSIntel  */
257af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
258af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
259af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
260af75078fSIntel };
261af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
262af75078fSIntel 
26379bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
26479bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
26579bec05bSKonstantin Ananyev 
26682010ef5SYongseok Koh uint8_t txonly_multi_flow;
26782010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
26882010ef5SYongseok Koh 
2694940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter;
2704940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */
2714940344dSViacheslav Ovsiienko 
2724940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra;
2734940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */
2744940344dSViacheslav Ovsiienko 
275af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
2766c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
277861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
278e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
279af75078fSIntel 
280900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
281900550deSIntel uint8_t dcb_config = 0;
282900550deSIntel 
283af75078fSIntel /*
284af75078fSIntel  * Configurable number of RX/TX queues.
285af75078fSIntel  */
2861c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
287af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
288af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
289af75078fSIntel 
290af75078fSIntel /*
291af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2928599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
293af75078fSIntel  */
2944ed89049SDavid Marchand #define RX_DESC_DEFAULT 0
2954ed89049SDavid Marchand #define TX_DESC_DEFAULT 0
2964ed89049SDavid Marchand uint16_t nb_rxd = RX_DESC_DEFAULT; /**< Number of RX descriptors. */
2974ed89049SDavid Marchand uint16_t nb_txd = TX_DESC_DEFAULT; /**< Number of TX descriptors. */
298af75078fSIntel 
299f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
300af75078fSIntel /*
301af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
302af75078fSIntel  */
303af75078fSIntel 
304f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
305f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
306f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
307af75078fSIntel 
308f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
309f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
310f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
311af75078fSIntel 
312af75078fSIntel /*
313af75078fSIntel  * Configurable value of RX free threshold.
314af75078fSIntel  */
315f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
316af75078fSIntel 
317af75078fSIntel /*
318ce8d5614SIntel  * Configurable value of RX drop enable.
319ce8d5614SIntel  */
320f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
321ce8d5614SIntel 
322ce8d5614SIntel /*
323af75078fSIntel  * Configurable value of TX free threshold.
324af75078fSIntel  */
325f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
326af75078fSIntel 
327af75078fSIntel /*
328af75078fSIntel  * Configurable value of TX RS bit threshold.
329af75078fSIntel  */
330f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
331af75078fSIntel 
332af75078fSIntel /*
3333c156061SJens Freimann  * Configurable value of buffered packets before sending.
3343c156061SJens Freimann  */
3353c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
3363c156061SJens Freimann 
3373c156061SJens Freimann /*
3383c156061SJens Freimann  * Configurable value of packet buffer timeout.
3393c156061SJens Freimann  */
3403c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
3413c156061SJens Freimann 
3423c156061SJens Freimann /*
3433c156061SJens Freimann  * Configurable value for size of VNF internal memory area
3443c156061SJens Freimann  * used for simulating noisy neighbour behaviour
3453c156061SJens Freimann  */
3463c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
3473c156061SJens Freimann 
3483c156061SJens Freimann /*
3493c156061SJens Freimann  * Configurable value of number of random writes done in
3503c156061SJens Freimann  * VNF simulation memory area.
3513c156061SJens Freimann  */
3523c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3533c156061SJens Freimann 
3543c156061SJens Freimann /*
3553c156061SJens Freimann  * Configurable value of number of random reads done in
3563c156061SJens Freimann  * VNF simulation memory area.
3573c156061SJens Freimann  */
3583c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3593c156061SJens Freimann 
3603c156061SJens Freimann /*
3613c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3623c156061SJens Freimann  * VNF simulation memory area.
3633c156061SJens Freimann  */
3643c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3653c156061SJens Freimann 
3663c156061SJens Freimann /*
367af75078fSIntel  * Receive Side Scaling (RSS) configuration.
368af75078fSIntel  */
369295968d1SFerruh Yigit uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
370af75078fSIntel 
371af75078fSIntel /*
372af75078fSIntel  * Port topology configuration
373af75078fSIntel  */
374af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
375af75078fSIntel 
3767741e4cfSIntel /*
3777741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3787741e4cfSIntel  */
3797741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3807741e4cfSIntel 
381af75078fSIntel /*
3827ee3e944SVasily Philipov  * Flow API isolated mode.
3837ee3e944SVasily Philipov  */
3847ee3e944SVasily Philipov uint8_t flow_isolate_all;
3857ee3e944SVasily Philipov 
3867ee3e944SVasily Philipov /*
387bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
388bc202406SDavid Marchand  */
389bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
390bc202406SDavid Marchand 
391bc202406SDavid Marchand /*
3926937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
3936937d210SStephen Hemminger  */
3946937d210SStephen Hemminger uint8_t no_device_start = 0;
3956937d210SStephen Hemminger 
3966937d210SStephen Hemminger /*
3978ea656f8SGaetan Rivet  * Enable link status change notification
3988ea656f8SGaetan Rivet  */
3998ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
4008ea656f8SGaetan Rivet 
4018ea656f8SGaetan Rivet /*
402284c908cSGaetan Rivet  * Enable device removal notification.
403284c908cSGaetan Rivet  */
404284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
405284c908cSGaetan Rivet 
406fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
407fb73e096SJeff Guo 
4084f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
4094f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
4104f1ed78eSThomas Monjalon 
411b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */
412b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true;
413b0a9354aSPavan Nikhilesh 
41401817b10SBing Zhao /* Hairpin ports configuration mode. */
41523095155SDariusz Sosnowski uint32_t hairpin_mode;
41601817b10SBing Zhao 
41797b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
41897b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
41997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
42097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
42197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
42297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
42397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
42497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
42597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
42697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
42797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
42897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
4290e459ffaSDong Zhou 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
430bc70e559SSpike Du 	[RTE_ETH_EVENT_RX_AVAIL_THRESH] = "RxQ available descriptors threshold reached",
431eb0d471aSKalesh AP 	[RTE_ETH_EVENT_ERR_RECOVERING] = "error recovering",
432eb0d471aSKalesh AP 	[RTE_ETH_EVENT_RECOVERY_SUCCESS] = "error recovery successful",
433eb0d471aSKalesh AP 	[RTE_ETH_EVENT_RECOVERY_FAILED] = "error recovery failed",
43497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
43597b5d8b5SThomas Monjalon };
43697b5d8b5SThomas Monjalon 
437284c908cSGaetan Rivet /*
4383af72783SGaetan Rivet  * Display or mask ether events
4393af72783SGaetan Rivet  * Default to all events except VF_MBOX
4403af72783SGaetan Rivet  */
4413af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
4423af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
4433af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
4443af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
445badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
4463af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
4470e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
448eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED) |
449eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_ERR_RECOVERING) |
450eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_RECOVERY_SUCCESS) |
451eb0d471aSKalesh AP 			    (UINT32_C(1) << RTE_ETH_EVENT_RECOVERY_FAILED);
452e505d84cSAnatoly Burakov /*
453e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
454e505d84cSAnatoly Burakov  */
455e505d84cSAnatoly Burakov int do_mlockall = 0;
4563af72783SGaetan Rivet 
457a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
45862d3216dSReshma Pattan 
45962d3216dSReshma Pattan /*
46062d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
46162d3216dSReshma Pattan  */
46262d3216dSReshma Pattan uint8_t latencystats_enabled;
46362d3216dSReshma Pattan 
46462d3216dSReshma Pattan /*
4657be78d02SJosh Soref  * Lcore ID to service latency statistics.
46662d3216dSReshma Pattan  */
46762d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
46862d3216dSReshma Pattan 
46962d3216dSReshma Pattan #endif
47062d3216dSReshma Pattan 
4717b7e5ba7SIntel /*
472af75078fSIntel  * Ethernet device configuration.
473af75078fSIntel  */
4741bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode;
475af75078fSIntel 
47607e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
477295968d1SFerruh Yigit 	.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
47807e5f7bdSShahaf Shuler };
479fd8c20aaSShahaf Shuler 
4802950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
481af75078fSIntel 
482a4fd5eeeSElza Mathew /*
483a4fd5eeeSElza Mathew  * Display zero values by default for xstats
484a4fd5eeeSElza Mathew  */
485a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
486a4fd5eeeSElza Mathew 
487bc700b67SDharmik Thakkar /*
488bc700b67SDharmik Thakkar  * Measure of CPU cycles disabled by default
489bc700b67SDharmik Thakkar  */
490bc700b67SDharmik Thakkar uint8_t record_core_cycles;
491bc700b67SDharmik Thakkar 
4920e4b1963SDharmik Thakkar /*
4930e4b1963SDharmik Thakkar  * Display of RX and TX bursts disabled by default
4940e4b1963SDharmik Thakkar  */
4950e4b1963SDharmik Thakkar uint8_t record_burst_stats;
4960e4b1963SDharmik Thakkar 
497f4d178c1SXueming Li /*
498f4d178c1SXueming Li  * Number of ports per shared Rx queue group, 0 disable.
499f4d178c1SXueming Li  */
500f4d178c1SXueming Li uint32_t rxq_share;
501f4d178c1SXueming Li 
502c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
503c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
5047acf894dSStephen Hurd 
505a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
5067e4441c8SRemy Horton /* Bitrate statistics */
5077e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
508e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
509e25e6c70SRemy Horton uint8_t bitrate_enabled;
510e25e6c70SRemy Horton #endif
5117e4441c8SRemy Horton 
5126970401eSDavid Marchand #ifdef RTE_LIB_GRO
513b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
514b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
5156970401eSDavid Marchand #endif
516b40f8d78SJiayu Hu 
517f9295aa2SXiaoyu Min /*
518f9295aa2SXiaoyu Min  * hexadecimal bitmask of RX mq mode can be enabled.
519f9295aa2SXiaoyu Min  */
520295968d1SFerruh Yigit enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
521f9295aa2SXiaoyu Min 
522b7b78a08SAjit Khaparde /*
523b7b78a08SAjit Khaparde  * Used to set forced link speed
524b7b78a08SAjit Khaparde  */
525b7b78a08SAjit Khaparde uint32_t eth_link_speed;
526b7b78a08SAjit Khaparde 
527a550baf2SMin Hu (Connor) /*
528a550baf2SMin Hu (Connor)  * ID of the current process in multi-process, used to
529a550baf2SMin Hu (Connor)  * configure the queues to be polled.
530a550baf2SMin Hu (Connor)  */
531a550baf2SMin Hu (Connor) int proc_id;
532a550baf2SMin Hu (Connor) 
533a550baf2SMin Hu (Connor) /*
534a550baf2SMin Hu (Connor)  * Number of processes in multi-process, used to
535a550baf2SMin Hu (Connor)  * configure the queues to be polled.
536a550baf2SMin Hu (Connor)  */
537a550baf2SMin Hu (Connor) unsigned int num_procs = 1;
538a550baf2SMin Hu (Connor) 
539f6d8a6d3SIvan Malov static void
540f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id)
541f6d8a6d3SIvan Malov {
542f6d8a6d3SIvan Malov 	uint64_t rx_meta_features = 0;
543f6d8a6d3SIvan Malov 	int ret;
544f6d8a6d3SIvan Malov 
545f6d8a6d3SIvan Malov 	if (!is_proc_primary())
546f6d8a6d3SIvan Malov 		return;
547f6d8a6d3SIvan Malov 
548f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
549f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
550f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
551f6d8a6d3SIvan Malov 
552f6d8a6d3SIvan Malov 	ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
553f6d8a6d3SIvan Malov 	if (ret == 0) {
554f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
555f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
556f6d8a6d3SIvan Malov 				    port_id);
557f6d8a6d3SIvan Malov 		}
558f6d8a6d3SIvan Malov 
559f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
560f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
561f6d8a6d3SIvan Malov 				    port_id);
562f6d8a6d3SIvan Malov 		}
563f6d8a6d3SIvan Malov 
564f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
565f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
566f6d8a6d3SIvan Malov 				    port_id);
567f6d8a6d3SIvan Malov 		}
568f6d8a6d3SIvan Malov 	} else if (ret != -ENOTSUP) {
569f6d8a6d3SIvan Malov 		rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
570f6d8a6d3SIvan Malov 			 port_id, rte_strerror(-ret));
571f6d8a6d3SIvan Malov 	}
572f6d8a6d3SIvan Malov }
573f6d8a6d3SIvan Malov 
574a550baf2SMin Hu (Connor) static int
575a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
576a550baf2SMin Hu (Connor) 		      const struct rte_eth_conf *dev_conf)
577a550baf2SMin Hu (Connor) {
578a550baf2SMin Hu (Connor) 	if (is_proc_primary())
579a550baf2SMin Hu (Connor) 		return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
580a550baf2SMin Hu (Connor) 					dev_conf);
581a550baf2SMin Hu (Connor) 	return 0;
582a550baf2SMin Hu (Connor) }
583a550baf2SMin Hu (Connor) 
584a550baf2SMin Hu (Connor) static int
585e46372d7SHuisong Li change_bonding_slave_port_status(portid_t bond_pid, bool is_stop)
586e46372d7SHuisong Li {
587e46372d7SHuisong Li #ifdef RTE_NET_BOND
588e46372d7SHuisong Li 
589e46372d7SHuisong Li 	portid_t slave_pids[RTE_MAX_ETHPORTS];
590e46372d7SHuisong Li 	struct rte_port *port;
591e46372d7SHuisong Li 	int num_slaves;
592e46372d7SHuisong Li 	portid_t slave_pid;
593e46372d7SHuisong Li 	int i;
594e46372d7SHuisong Li 
595e46372d7SHuisong Li 	num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids,
596e46372d7SHuisong Li 						RTE_MAX_ETHPORTS);
597e46372d7SHuisong Li 	if (num_slaves < 0) {
598e46372d7SHuisong Li 		fprintf(stderr, "Failed to get slave list for port = %u\n",
599e46372d7SHuisong Li 			bond_pid);
600e46372d7SHuisong Li 		return num_slaves;
601e46372d7SHuisong Li 	}
602e46372d7SHuisong Li 
603e46372d7SHuisong Li 	for (i = 0; i < num_slaves; i++) {
604e46372d7SHuisong Li 		slave_pid = slave_pids[i];
605e46372d7SHuisong Li 		port = &ports[slave_pid];
606e46372d7SHuisong Li 		port->port_status =
607e46372d7SHuisong Li 			is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED;
608e46372d7SHuisong Li 	}
609e46372d7SHuisong Li #else
610e46372d7SHuisong Li 	RTE_SET_USED(bond_pid);
611e46372d7SHuisong Li 	RTE_SET_USED(is_stop);
612e46372d7SHuisong Li #endif
613e46372d7SHuisong Li 	return 0;
614e46372d7SHuisong Li }
615e46372d7SHuisong Li 
616e46372d7SHuisong Li static int
617a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id)
618a550baf2SMin Hu (Connor) {
619e46372d7SHuisong Li 	int ret;
620e46372d7SHuisong Li 
621e46372d7SHuisong Li 	if (is_proc_primary()) {
622e46372d7SHuisong Li 		ret = rte_eth_dev_start(port_id);
623e46372d7SHuisong Li 		if (ret != 0)
624e46372d7SHuisong Li 			return ret;
625e46372d7SHuisong Li 
626e46372d7SHuisong Li 		struct rte_port *port = &ports[port_id];
627e46372d7SHuisong Li 
628e46372d7SHuisong Li 		/*
629e46372d7SHuisong Li 		 * Starting a bonded port also starts all slaves under the bonded
630e46372d7SHuisong Li 		 * device. So if this port is bond device, we need to modify the
631e46372d7SHuisong Li 		 * port status of these slaves.
632e46372d7SHuisong Li 		 */
633e46372d7SHuisong Li 		if (port->bond_flag == 1)
634e46372d7SHuisong Li 			return change_bonding_slave_port_status(port_id, false);
635e46372d7SHuisong Li 	}
636a550baf2SMin Hu (Connor) 
637a550baf2SMin Hu (Connor) 	return 0;
638a550baf2SMin Hu (Connor) }
639a550baf2SMin Hu (Connor) 
640a550baf2SMin Hu (Connor) static int
641a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id)
642a550baf2SMin Hu (Connor) {
643e46372d7SHuisong Li 	int ret;
644e46372d7SHuisong Li 
645e46372d7SHuisong Li 	if (is_proc_primary()) {
646e46372d7SHuisong Li 		ret = rte_eth_dev_stop(port_id);
647e46372d7SHuisong Li 		if (ret != 0)
648e46372d7SHuisong Li 			return ret;
649e46372d7SHuisong Li 
650e46372d7SHuisong Li 		struct rte_port *port = &ports[port_id];
651e46372d7SHuisong Li 
652e46372d7SHuisong Li 		/*
653e46372d7SHuisong Li 		 * Stopping a bonded port also stops all slaves under the bonded
654e46372d7SHuisong Li 		 * device. So if this port is bond device, we need to modify the
655e46372d7SHuisong Li 		 * port status of these slaves.
656e46372d7SHuisong Li 		 */
657e46372d7SHuisong Li 		if (port->bond_flag == 1)
658e46372d7SHuisong Li 			return change_bonding_slave_port_status(port_id, true);
659e46372d7SHuisong Li 	}
660a550baf2SMin Hu (Connor) 
661a550baf2SMin Hu (Connor) 	return 0;
662a550baf2SMin Hu (Connor) }
663a550baf2SMin Hu (Connor) 
664a550baf2SMin Hu (Connor) static void
665a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp)
666a550baf2SMin Hu (Connor) {
667a550baf2SMin Hu (Connor) 	if (is_proc_primary())
668a550baf2SMin Hu (Connor) 		rte_mempool_free(mp);
669a550baf2SMin Hu (Connor) }
670a550baf2SMin Hu (Connor) 
671a550baf2SMin Hu (Connor) static int
672a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
673a550baf2SMin Hu (Connor) {
674a550baf2SMin Hu (Connor) 	if (is_proc_primary())
675a550baf2SMin Hu (Connor) 		return rte_eth_dev_set_mtu(port_id, mtu);
676a550baf2SMin Hu (Connor) 
677a550baf2SMin Hu (Connor) 	return 0;
678a550baf2SMin Hu (Connor) }
679a550baf2SMin Hu (Connor) 
680ed30d9b6SIntel /* Forward function declarations */
681c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
682edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
683f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
68476ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
685d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
686cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
687fb73e096SJeff Guo 				enum rte_dev_event_type type,
688fb73e096SJeff Guo 				void *param);
68963b72657SIvan Ilchenko static void fill_xstats_display_info(void);
690ce8d5614SIntel 
691ce8d5614SIntel /*
692ce8d5614SIntel  * Check if all the ports are started.
693ce8d5614SIntel  * If yes, return positive value. If not, return zero.
694ce8d5614SIntel  */
695ce8d5614SIntel static int all_ports_started(void);
696ed30d9b6SIntel 
6976970401eSDavid Marchand #ifdef RTE_LIB_GSO
69852f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
69935b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
7006970401eSDavid Marchand #endif
70152f38a20SJiayu Hu 
702b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */
703b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
704b57b66a9SOri Kam 
70563b72657SIvan Ilchenko 
706af75078fSIntel /*
70798a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
708c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
709c9cafcc8SShahaf Shuler  */
710c9cafcc8SShahaf Shuler int
711c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
712c9cafcc8SShahaf Shuler {
713c9cafcc8SShahaf Shuler 	unsigned int i;
714c9cafcc8SShahaf Shuler 
715c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
716c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
717c9cafcc8SShahaf Shuler 			return 0;
718c9cafcc8SShahaf Shuler 	}
719c9cafcc8SShahaf Shuler 	return 1;
720c9cafcc8SShahaf Shuler }
721c9cafcc8SShahaf Shuler 
722c9cafcc8SShahaf Shuler /*
723af75078fSIntel  * Setup default configuration.
724af75078fSIntel  */
725af75078fSIntel static void
726af75078fSIntel set_default_fwd_lcores_config(void)
727af75078fSIntel {
728af75078fSIntel 	unsigned int i;
729af75078fSIntel 	unsigned int nb_lc;
7307acf894dSStephen Hurd 	unsigned int sock_num;
731af75078fSIntel 
732af75078fSIntel 	nb_lc = 0;
733af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
734dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
735dbfb8ec7SPhil Yang 			continue;
736c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
737c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
738c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
739c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
740c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
741c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
742c9cafcc8SShahaf Shuler 			}
743c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
7447acf894dSStephen Hurd 		}
745cb056611SStephen Hemminger 		if (i == rte_get_main_lcore())
746f54fe5eeSStephen Hurd 			continue;
747f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
748af75078fSIntel 	}
749af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
750af75078fSIntel 	nb_cfg_lcores = nb_lcores;
751af75078fSIntel 	nb_fwd_lcores = 1;
752af75078fSIntel }
753af75078fSIntel 
754af75078fSIntel static void
755af75078fSIntel set_def_peer_eth_addrs(void)
756af75078fSIntel {
757af75078fSIntel 	portid_t i;
758af75078fSIntel 
759af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
76035b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
761af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
762af75078fSIntel 	}
763af75078fSIntel }
764af75078fSIntel 
765af75078fSIntel static void
766af75078fSIntel set_default_fwd_ports_config(void)
767af75078fSIntel {
768af75078fSIntel 	portid_t pt_id;
76965a7360cSMatan Azrad 	int i = 0;
770af75078fSIntel 
771effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
77265a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
773af75078fSIntel 
774effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
775effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
776effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
777effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
778effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
779effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
780effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
781effdb8bbSPhil Yang 			}
782effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
783effdb8bbSPhil Yang 		}
784effdb8bbSPhil Yang 	}
785effdb8bbSPhil Yang 
786af75078fSIntel 	nb_cfg_ports = nb_ports;
787af75078fSIntel 	nb_fwd_ports = nb_ports;
788af75078fSIntel }
789af75078fSIntel 
790af75078fSIntel void
791af75078fSIntel set_def_fwd_config(void)
792af75078fSIntel {
793af75078fSIntel 	set_default_fwd_lcores_config();
794af75078fSIntel 	set_def_peer_eth_addrs();
795af75078fSIntel 	set_default_fwd_ports_config();
796af75078fSIntel }
797af75078fSIntel 
798761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
799c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
800c7f5dba7SAnatoly Burakov static int
801c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
802c7f5dba7SAnatoly Burakov {
803c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
804c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
805c7f5dba7SAnatoly Burakov 
806c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
807c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
808c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
809c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
810c7f5dba7SAnatoly Burakov 	 */
811c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
812c7f5dba7SAnatoly Burakov 
813c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
814c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
815c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
816c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
817c7f5dba7SAnatoly Burakov 		return -1;
818c7f5dba7SAnatoly Burakov 	}
819c7f5dba7SAnatoly Burakov 
820c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
821c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
822c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
823c7f5dba7SAnatoly Burakov 
824c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
825c7f5dba7SAnatoly Burakov 
826c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
827c7f5dba7SAnatoly Burakov 
828c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
829c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
830c7f5dba7SAnatoly Burakov 		return -1;
831c7f5dba7SAnatoly Burakov 	}
832c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
833c7f5dba7SAnatoly Burakov 
834c7f5dba7SAnatoly Burakov 	return 0;
835c7f5dba7SAnatoly Burakov }
836c7f5dba7SAnatoly Burakov 
837c7f5dba7SAnatoly Burakov static int
838c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
839c7f5dba7SAnatoly Burakov {
840c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
841c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
842c7f5dba7SAnatoly Burakov 	 */
8439d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
844c7f5dba7SAnatoly Burakov 
845c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
846c7f5dba7SAnatoly Burakov }
847c7f5dba7SAnatoly Burakov 
848c7f5dba7SAnatoly Burakov static void *
849c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
850c7f5dba7SAnatoly Burakov {
851c7f5dba7SAnatoly Burakov 	void *addr;
852c7f5dba7SAnatoly Burakov 	int flags;
853c7f5dba7SAnatoly Burakov 
854c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
855c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
856c7f5dba7SAnatoly Burakov 	if (huge)
857c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
858c7f5dba7SAnatoly Burakov 
859c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
860c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
861c7f5dba7SAnatoly Burakov 		return NULL;
862c7f5dba7SAnatoly Burakov 
863c7f5dba7SAnatoly Burakov 	return addr;
864c7f5dba7SAnatoly Burakov }
865c7f5dba7SAnatoly Burakov 
866c7f5dba7SAnatoly Burakov struct extmem_param {
867c7f5dba7SAnatoly Burakov 	void *addr;
868c7f5dba7SAnatoly Burakov 	size_t len;
869c7f5dba7SAnatoly Burakov 	size_t pgsz;
870c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
871c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
872c7f5dba7SAnatoly Burakov };
873c7f5dba7SAnatoly Burakov 
874c7f5dba7SAnatoly Burakov static int
875c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
876c7f5dba7SAnatoly Burakov 		bool huge)
877c7f5dba7SAnatoly Burakov {
878c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
879c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
880c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
881c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
882c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
883c7f5dba7SAnatoly Burakov 	void *addr;
884c7f5dba7SAnatoly Burakov 	int ret;
885c7f5dba7SAnatoly Burakov 
886c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
887c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
888c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
889c7f5dba7SAnatoly Burakov 			continue;
890c7f5dba7SAnatoly Burakov 
891c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
892c7f5dba7SAnatoly Burakov 
893c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
894c7f5dba7SAnatoly Burakov 		if (!huge)
895c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
896c7f5dba7SAnatoly Burakov 
897c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
898c7f5dba7SAnatoly Burakov 		if (ret < 0) {
899c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
900c7f5dba7SAnatoly Burakov 			return -1;
901c7f5dba7SAnatoly Burakov 		}
902c7f5dba7SAnatoly Burakov 
903c7f5dba7SAnatoly Burakov 		/* allocate our memory */
904c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
905c7f5dba7SAnatoly Burakov 
906c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
907c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
908c7f5dba7SAnatoly Burakov 		 * try another one.
909c7f5dba7SAnatoly Burakov 		 */
910c7f5dba7SAnatoly Burakov 		if (addr == NULL)
911c7f5dba7SAnatoly Burakov 			continue;
912c7f5dba7SAnatoly Burakov 
913c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
914c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
915c7f5dba7SAnatoly Burakov 
916c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
917c7f5dba7SAnatoly Burakov 
918c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
919c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
920c7f5dba7SAnatoly Burakov 			goto fail;
921c7f5dba7SAnatoly Burakov 		}
922c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
923c7f5dba7SAnatoly Burakov 		if (!huge)
924c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
925c7f5dba7SAnatoly Burakov 
926c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
927c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
928c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
929c7f5dba7SAnatoly Burakov 			size_t offset;
930c7f5dba7SAnatoly Burakov 			void *cur;
931c7f5dba7SAnatoly Burakov 
932c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
933c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
934c7f5dba7SAnatoly Burakov 
935c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
936c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
937c7f5dba7SAnatoly Burakov 
938c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
939c7f5dba7SAnatoly Burakov 
940c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
941c7f5dba7SAnatoly Burakov 		}
942c7f5dba7SAnatoly Burakov 
943c7f5dba7SAnatoly Burakov 		break;
944c7f5dba7SAnatoly Burakov 	}
945c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
946c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
947c7f5dba7SAnatoly Burakov 		return -1;
948c7f5dba7SAnatoly Burakov 
949c7f5dba7SAnatoly Burakov 	param->addr = addr;
950c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
951c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
952c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
953c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
954c7f5dba7SAnatoly Burakov 
955c7f5dba7SAnatoly Burakov 	return 0;
956c7f5dba7SAnatoly Burakov fail:
957c7f5dba7SAnatoly Burakov 	free(iovas);
958c7f5dba7SAnatoly Burakov 	if (addr)
959c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
960c7f5dba7SAnatoly Burakov 
961c7f5dba7SAnatoly Burakov 	return -1;
962c7f5dba7SAnatoly Burakov }
963c7f5dba7SAnatoly Burakov 
964c7f5dba7SAnatoly Burakov static int
965c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
966c7f5dba7SAnatoly Burakov {
967c7f5dba7SAnatoly Burakov 	struct extmem_param param;
968c7f5dba7SAnatoly Burakov 	int socket_id, ret;
969c7f5dba7SAnatoly Burakov 
970c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
971c7f5dba7SAnatoly Burakov 
972c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
973c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
974c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
975c7f5dba7SAnatoly Burakov 		/* create our heap */
976c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
977c7f5dba7SAnatoly Burakov 		if (ret < 0) {
978c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
979c7f5dba7SAnatoly Burakov 			return -1;
980c7f5dba7SAnatoly Burakov 		}
981c7f5dba7SAnatoly Burakov 	}
982c7f5dba7SAnatoly Burakov 
983c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
984c7f5dba7SAnatoly Burakov 	if (ret < 0) {
985c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
986c7f5dba7SAnatoly Burakov 		return -1;
987c7f5dba7SAnatoly Burakov 	}
988c7f5dba7SAnatoly Burakov 
989c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
990c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
991c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
992c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
993c7f5dba7SAnatoly Burakov 
994c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
995c7f5dba7SAnatoly Burakov 
996c7f5dba7SAnatoly Burakov 	/* not needed any more */
997c7f5dba7SAnatoly Burakov 	free(param.iova_table);
998c7f5dba7SAnatoly Burakov 
999c7f5dba7SAnatoly Burakov 	if (ret < 0) {
1000c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
1001c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
1002c7f5dba7SAnatoly Burakov 		return -1;
1003c7f5dba7SAnatoly Burakov 	}
1004c7f5dba7SAnatoly Burakov 
1005c7f5dba7SAnatoly Burakov 	/* success */
1006c7f5dba7SAnatoly Burakov 
1007c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
1008c7f5dba7SAnatoly Burakov 			param.len >> 20);
1009c7f5dba7SAnatoly Burakov 
1010c7f5dba7SAnatoly Burakov 	return 0;
1011c7f5dba7SAnatoly Burakov }
10123a0968c8SShahaf Shuler static void
10133a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
10143a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
10153a0968c8SShahaf Shuler {
10163a0968c8SShahaf Shuler 	uint16_t pid = 0;
10173a0968c8SShahaf Shuler 	int ret;
10183a0968c8SShahaf Shuler 
10193a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
10200a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
10213a0968c8SShahaf Shuler 
10220a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
10230a0821bcSPaulis Gributs 		if (ret != 0) {
10240a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
10250a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
10260a0821bcSPaulis Gributs 				    "mempool unmapping will not be performed\n",
10270a0821bcSPaulis Gributs 				    pid, memhdr->addr);
10280a0821bcSPaulis Gributs 			continue;
10290a0821bcSPaulis Gributs 		}
10300a0821bcSPaulis Gributs 
10310a0821bcSPaulis Gributs 		ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
10323a0968c8SShahaf Shuler 		if (ret) {
10333a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
10343a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
10353a0968c8SShahaf Shuler 				    "for device %s\n",
1036ec5ecd7eSDavid Marchand 				    memhdr->addr, rte_dev_name(dev_info.device));
10373a0968c8SShahaf Shuler 		}
10383a0968c8SShahaf Shuler 	}
10393a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
10403a0968c8SShahaf Shuler 	if (ret) {
10413a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10423a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
10433a0968c8SShahaf Shuler 	}
10443a0968c8SShahaf Shuler }
10453a0968c8SShahaf Shuler 
10463a0968c8SShahaf Shuler static void
10473a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
10483a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
10493a0968c8SShahaf Shuler {
10503a0968c8SShahaf Shuler 	uint16_t pid = 0;
10513a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
10523a0968c8SShahaf Shuler 	int ret;
10533a0968c8SShahaf Shuler 
10543a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
10553a0968c8SShahaf Shuler 				  page_size);
10563a0968c8SShahaf Shuler 	if (ret) {
10573a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10583a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
10593a0968c8SShahaf Shuler 		return;
10603a0968c8SShahaf Shuler 	}
10613a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
10620a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
10633a0968c8SShahaf Shuler 
10640a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
10650a0821bcSPaulis Gributs 		if (ret != 0) {
10660a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
10670a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
10680a0821bcSPaulis Gributs 				    "mempool mapping will not be performed\n",
10690a0821bcSPaulis Gributs 				    pid, memhdr->addr);
10700a0821bcSPaulis Gributs 			continue;
10710a0821bcSPaulis Gributs 		}
10720a0821bcSPaulis Gributs 		ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
10733a0968c8SShahaf Shuler 		if (ret) {
10743a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
10753a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
10763a0968c8SShahaf Shuler 				    "for device %s\n",
1077ec5ecd7eSDavid Marchand 				    memhdr->addr, rte_dev_name(dev_info.device));
10783a0968c8SShahaf Shuler 		}
10793a0968c8SShahaf Shuler 	}
10803a0968c8SShahaf Shuler }
1081761f7ae1SJie Zhou #endif
1082c7f5dba7SAnatoly Burakov 
108372512e18SViacheslav Ovsiienko static unsigned int
108472512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
108572512e18SViacheslav Ovsiienko 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
108672512e18SViacheslav Ovsiienko {
108772512e18SViacheslav Ovsiienko 	struct rte_pktmbuf_extmem *xmem;
108872512e18SViacheslav Ovsiienko 	unsigned int ext_num, zone_num, elt_num;
108972512e18SViacheslav Ovsiienko 	uint16_t elt_size;
109072512e18SViacheslav Ovsiienko 
109172512e18SViacheslav Ovsiienko 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
109272512e18SViacheslav Ovsiienko 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
109372512e18SViacheslav Ovsiienko 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
109472512e18SViacheslav Ovsiienko 
109572512e18SViacheslav Ovsiienko 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
109672512e18SViacheslav Ovsiienko 	if (xmem == NULL) {
109772512e18SViacheslav Ovsiienko 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
109872512e18SViacheslav Ovsiienko 				 "external buffer descriptors\n");
109972512e18SViacheslav Ovsiienko 		*ext_mem = NULL;
110072512e18SViacheslav Ovsiienko 		return 0;
110172512e18SViacheslav Ovsiienko 	}
110272512e18SViacheslav Ovsiienko 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
110372512e18SViacheslav Ovsiienko 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
110472512e18SViacheslav Ovsiienko 		const struct rte_memzone *mz;
110572512e18SViacheslav Ovsiienko 		char mz_name[RTE_MEMZONE_NAMESIZE];
110672512e18SViacheslav Ovsiienko 		int ret;
110772512e18SViacheslav Ovsiienko 
110872512e18SViacheslav Ovsiienko 		ret = snprintf(mz_name, sizeof(mz_name),
110972512e18SViacheslav Ovsiienko 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
111072512e18SViacheslav Ovsiienko 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
111172512e18SViacheslav Ovsiienko 			errno = ENAMETOOLONG;
111272512e18SViacheslav Ovsiienko 			ext_num = 0;
111372512e18SViacheslav Ovsiienko 			break;
111472512e18SViacheslav Ovsiienko 		}
111513b19642SDmitry Kozlyuk 		mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE,
111672512e18SViacheslav Ovsiienko 					 socket_id,
111772512e18SViacheslav Ovsiienko 					 RTE_MEMZONE_IOVA_CONTIG |
111872512e18SViacheslav Ovsiienko 					 RTE_MEMZONE_1GB |
111913b19642SDmitry Kozlyuk 					 RTE_MEMZONE_SIZE_HINT_ONLY);
112072512e18SViacheslav Ovsiienko 		if (mz == NULL) {
112172512e18SViacheslav Ovsiienko 			/*
112272512e18SViacheslav Ovsiienko 			 * The caller exits on external buffer creation
112372512e18SViacheslav Ovsiienko 			 * error, so there is no need to free memzones.
112472512e18SViacheslav Ovsiienko 			 */
112572512e18SViacheslav Ovsiienko 			errno = ENOMEM;
112672512e18SViacheslav Ovsiienko 			ext_num = 0;
112772512e18SViacheslav Ovsiienko 			break;
112872512e18SViacheslav Ovsiienko 		}
112972512e18SViacheslav Ovsiienko 		xseg->buf_ptr = mz->addr;
113072512e18SViacheslav Ovsiienko 		xseg->buf_iova = mz->iova;
113172512e18SViacheslav Ovsiienko 		xseg->buf_len = EXTBUF_ZONE_SIZE;
113272512e18SViacheslav Ovsiienko 		xseg->elt_size = elt_size;
113372512e18SViacheslav Ovsiienko 	}
113472512e18SViacheslav Ovsiienko 	if (ext_num == 0 && xmem != NULL) {
113572512e18SViacheslav Ovsiienko 		free(xmem);
113672512e18SViacheslav Ovsiienko 		xmem = NULL;
113772512e18SViacheslav Ovsiienko 	}
113872512e18SViacheslav Ovsiienko 	*ext_mem = xmem;
113972512e18SViacheslav Ovsiienko 	return ext_num;
114072512e18SViacheslav Ovsiienko }
114172512e18SViacheslav Ovsiienko 
1142af75078fSIntel /*
1143af75078fSIntel  * Configuration initialisation done once at init time.
1144af75078fSIntel  */
1145401b744dSShahaf Shuler static struct rte_mempool *
1146af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
114726cbb419SViacheslav Ovsiienko 		 unsigned int socket_id, uint16_t size_idx)
1148af75078fSIntel {
1149af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
1150bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
1151761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1152af75078fSIntel 	uint32_t mb_size;
1153af75078fSIntel 
1154dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1155761f7ae1SJie Zhou #endif
115626cbb419SViacheslav Ovsiienko 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1157a550baf2SMin Hu (Connor) 	if (!is_proc_primary()) {
1158a550baf2SMin Hu (Connor) 		rte_mp = rte_mempool_lookup(pool_name);
1159a550baf2SMin Hu (Connor) 		if (rte_mp == NULL)
1160a550baf2SMin Hu (Connor) 			rte_exit(EXIT_FAILURE,
1161a550baf2SMin Hu (Connor) 				"Get mbuf pool for socket %u failed: %s\n",
1162a550baf2SMin Hu (Connor) 				socket_id, rte_strerror(rte_errno));
1163a550baf2SMin Hu (Connor) 		return rte_mp;
1164a550baf2SMin Hu (Connor) 	}
1165148f963fSBruce Richardson 
1166285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
1167d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1168d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1169d1eb542eSOlivier Matz 
1170c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
1171c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
1172c7f5dba7SAnatoly Burakov 		{
1173c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
1174c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1175c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
1176c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1177c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1178c7f5dba7SAnatoly Burakov 			break;
1179c7f5dba7SAnatoly Burakov 		}
1180761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1181c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
1182c7f5dba7SAnatoly Burakov 		{
1183b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1184c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
1185148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
118659fcf854SShahaf Shuler 				socket_id, mempool_flags);
118724427bb9SOlivier Matz 			if (rte_mp == NULL)
118824427bb9SOlivier Matz 				goto err;
1189b19a0c75SOlivier Matz 
1190b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
1191b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
1192b19a0c75SOlivier Matz 				rte_mp = NULL;
119324427bb9SOlivier Matz 				goto err;
1194b19a0c75SOlivier Matz 			}
1195b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
1196b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
11973a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1198c7f5dba7SAnatoly Burakov 			break;
1199c7f5dba7SAnatoly Burakov 		}
1200c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
1201c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
1202c7f5dba7SAnatoly Burakov 		{
1203c7f5dba7SAnatoly Burakov 			int heap_socket;
1204c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1205c7f5dba7SAnatoly Burakov 
1206c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1207c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1208c7f5dba7SAnatoly Burakov 
1209c7f5dba7SAnatoly Burakov 			heap_socket =
1210c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1211c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
1212c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1213c7f5dba7SAnatoly Burakov 
12140e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
12150e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
1216ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1217c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
1218c7f5dba7SAnatoly Burakov 					heap_socket);
1219c7f5dba7SAnatoly Burakov 			break;
1220c7f5dba7SAnatoly Burakov 		}
1221761f7ae1SJie Zhou #endif
122272512e18SViacheslav Ovsiienko 	case MP_ALLOC_XBUF:
122372512e18SViacheslav Ovsiienko 		{
122472512e18SViacheslav Ovsiienko 			struct rte_pktmbuf_extmem *ext_mem;
122572512e18SViacheslav Ovsiienko 			unsigned int ext_num;
122672512e18SViacheslav Ovsiienko 
122772512e18SViacheslav Ovsiienko 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
122872512e18SViacheslav Ovsiienko 					       socket_id, pool_name, &ext_mem);
122972512e18SViacheslav Ovsiienko 			if (ext_num == 0)
123072512e18SViacheslav Ovsiienko 				rte_exit(EXIT_FAILURE,
123172512e18SViacheslav Ovsiienko 					 "Can't create pinned data buffers\n");
123272512e18SViacheslav Ovsiienko 
123372512e18SViacheslav Ovsiienko 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
123472512e18SViacheslav Ovsiienko 					rte_mbuf_best_mempool_ops());
123572512e18SViacheslav Ovsiienko 			rte_mp = rte_pktmbuf_pool_create_extbuf
123672512e18SViacheslav Ovsiienko 					(pool_name, nb_mbuf, mb_mempool_cache,
123772512e18SViacheslav Ovsiienko 					 0, mbuf_seg_size, socket_id,
123872512e18SViacheslav Ovsiienko 					 ext_mem, ext_num);
123972512e18SViacheslav Ovsiienko 			free(ext_mem);
124072512e18SViacheslav Ovsiienko 			break;
124172512e18SViacheslav Ovsiienko 		}
1242c7f5dba7SAnatoly Burakov 	default:
1243c7f5dba7SAnatoly Burakov 		{
1244c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1245c7f5dba7SAnatoly Burakov 		}
1246bece7b6cSChristian Ehrhardt 	}
1247148f963fSBruce Richardson 
1248761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
124924427bb9SOlivier Matz err:
1250761f7ae1SJie Zhou #endif
1251af75078fSIntel 	if (rte_mp == NULL) {
1252d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
1253d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
1254d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
1255148f963fSBruce Richardson 	} else if (verbose_level > 0) {
1256591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
1257af75078fSIntel 	}
1258401b744dSShahaf Shuler 	return rte_mp;
1259af75078fSIntel }
1260af75078fSIntel 
126120a0286fSLiu Xiaofeng /*
126220a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
126320a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
126420a0286fSLiu Xiaofeng  */
126520a0286fSLiu Xiaofeng static int
126620a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
126720a0286fSLiu Xiaofeng {
126820a0286fSLiu Xiaofeng 	static int warning_once = 0;
126920a0286fSLiu Xiaofeng 
1270c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
127120a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
127261a3b0e5SAndrew Rybchenko 			fprintf(stderr,
127361a3b0e5SAndrew Rybchenko 				"Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
127420a0286fSLiu Xiaofeng 		warning_once = 1;
127520a0286fSLiu Xiaofeng 		return -1;
127620a0286fSLiu Xiaofeng 	}
127720a0286fSLiu Xiaofeng 	return 0;
127820a0286fSLiu Xiaofeng }
127920a0286fSLiu Xiaofeng 
12803f7311baSWei Dai /*
12813f7311baSWei Dai  * Get the allowed maximum number of RX queues.
12823f7311baSWei Dai  * *pid return the port id which has minimal value of
12833f7311baSWei Dai  * max_rx_queues in all ports.
12843f7311baSWei Dai  */
12853f7311baSWei Dai queueid_t
12863f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
12873f7311baSWei Dai {
12889e6b36c3SDavid Marchand 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
12896f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
12903f7311baSWei Dai 	portid_t pi;
12913f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
12923f7311baSWei Dai 
12933f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
12946f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
12956f51deb9SIvan Ilchenko 			continue;
12966f51deb9SIvan Ilchenko 
12976f51deb9SIvan Ilchenko 		max_rxq_valid = true;
12983f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
12993f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
13003f7311baSWei Dai 			*pid = pi;
13013f7311baSWei Dai 		}
13023f7311baSWei Dai 	}
13036f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
13043f7311baSWei Dai }
13053f7311baSWei Dai 
13063f7311baSWei Dai /*
13073f7311baSWei Dai  * Check input rxq is valid or not.
13083f7311baSWei Dai  * If input rxq is not greater than any of maximum number
13093f7311baSWei Dai  * of RX queues of all ports, it is valid.
13103f7311baSWei Dai  * if valid, return 0, else return -1
13113f7311baSWei Dai  */
13123f7311baSWei Dai int
13133f7311baSWei Dai check_nb_rxq(queueid_t rxq)
13143f7311baSWei Dai {
13153f7311baSWei Dai 	queueid_t allowed_max_rxq;
13163f7311baSWei Dai 	portid_t pid = 0;
13173f7311baSWei Dai 
13183f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
13193f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
132061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
132161a3b0e5SAndrew Rybchenko 			"Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
132261a3b0e5SAndrew Rybchenko 			rxq, allowed_max_rxq, pid);
13233f7311baSWei Dai 		return -1;
13243f7311baSWei Dai 	}
13253f7311baSWei Dai 	return 0;
13263f7311baSWei Dai }
13273f7311baSWei Dai 
132836db4f6cSWei Dai /*
132936db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
133036db4f6cSWei Dai  * *pid return the port id which has minimal value of
133136db4f6cSWei Dai  * max_tx_queues in all ports.
133236db4f6cSWei Dai  */
133336db4f6cSWei Dai queueid_t
133436db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
133536db4f6cSWei Dai {
13369e6b36c3SDavid Marchand 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
13376f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
133836db4f6cSWei Dai 	portid_t pi;
133936db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
134036db4f6cSWei Dai 
134136db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
13426f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
13436f51deb9SIvan Ilchenko 			continue;
13446f51deb9SIvan Ilchenko 
13456f51deb9SIvan Ilchenko 		max_txq_valid = true;
134636db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
134736db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
134836db4f6cSWei Dai 			*pid = pi;
134936db4f6cSWei Dai 		}
135036db4f6cSWei Dai 	}
13516f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
135236db4f6cSWei Dai }
135336db4f6cSWei Dai 
135436db4f6cSWei Dai /*
135536db4f6cSWei Dai  * Check input txq is valid or not.
135636db4f6cSWei Dai  * If input txq is not greater than any of maximum number
135736db4f6cSWei Dai  * of TX queues of all ports, it is valid.
135836db4f6cSWei Dai  * if valid, return 0, else return -1
135936db4f6cSWei Dai  */
136036db4f6cSWei Dai int
136136db4f6cSWei Dai check_nb_txq(queueid_t txq)
136236db4f6cSWei Dai {
136336db4f6cSWei Dai 	queueid_t allowed_max_txq;
136436db4f6cSWei Dai 	portid_t pid = 0;
136536db4f6cSWei Dai 
136636db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
136736db4f6cSWei Dai 	if (txq > allowed_max_txq) {
136861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
136961a3b0e5SAndrew Rybchenko 			"Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
137061a3b0e5SAndrew Rybchenko 			txq, allowed_max_txq, pid);
137136db4f6cSWei Dai 		return -1;
137236db4f6cSWei Dai 	}
137336db4f6cSWei Dai 	return 0;
137436db4f6cSWei Dai }
137536db4f6cSWei Dai 
13761c69df45SOri Kam /*
137799e040d3SLijun Ou  * Get the allowed maximum number of RXDs of every rx queue.
137899e040d3SLijun Ou  * *pid return the port id which has minimal value of
137999e040d3SLijun Ou  * max_rxd in all queues of all ports.
138099e040d3SLijun Ou  */
138199e040d3SLijun Ou static uint16_t
138299e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid)
138399e040d3SLijun Ou {
138499e040d3SLijun Ou 	uint16_t allowed_max_rxd = UINT16_MAX;
138599e040d3SLijun Ou 	portid_t pi;
138699e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
138799e040d3SLijun Ou 
138899e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
138999e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
139099e040d3SLijun Ou 			continue;
139199e040d3SLijun Ou 
139299e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
139399e040d3SLijun Ou 			allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
139499e040d3SLijun Ou 			*pid = pi;
139599e040d3SLijun Ou 		}
139699e040d3SLijun Ou 	}
139799e040d3SLijun Ou 	return allowed_max_rxd;
139899e040d3SLijun Ou }
139999e040d3SLijun Ou 
140099e040d3SLijun Ou /*
140199e040d3SLijun Ou  * Get the allowed minimal number of RXDs of every rx queue.
140299e040d3SLijun Ou  * *pid return the port id which has minimal value of
140399e040d3SLijun Ou  * min_rxd in all queues of all ports.
140499e040d3SLijun Ou  */
140599e040d3SLijun Ou static uint16_t
140699e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid)
140799e040d3SLijun Ou {
140899e040d3SLijun Ou 	uint16_t allowed_min_rxd = 0;
140999e040d3SLijun Ou 	portid_t pi;
141099e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
141199e040d3SLijun Ou 
141299e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
141399e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
141499e040d3SLijun Ou 			continue;
141599e040d3SLijun Ou 
141699e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
141799e040d3SLijun Ou 			allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
141899e040d3SLijun Ou 			*pid = pi;
141999e040d3SLijun Ou 		}
142099e040d3SLijun Ou 	}
142199e040d3SLijun Ou 
142299e040d3SLijun Ou 	return allowed_min_rxd;
142399e040d3SLijun Ou }
142499e040d3SLijun Ou 
142599e040d3SLijun Ou /*
142699e040d3SLijun Ou  * Check input rxd is valid or not.
142799e040d3SLijun Ou  * If input rxd is not greater than any of maximum number
142899e040d3SLijun Ou  * of RXDs of every Rx queues and is not less than any of
142999e040d3SLijun Ou  * minimal number of RXDs of every Rx queues, it is valid.
143099e040d3SLijun Ou  * if valid, return 0, else return -1
143199e040d3SLijun Ou  */
143299e040d3SLijun Ou int
143399e040d3SLijun Ou check_nb_rxd(queueid_t rxd)
143499e040d3SLijun Ou {
143599e040d3SLijun Ou 	uint16_t allowed_max_rxd;
143699e040d3SLijun Ou 	uint16_t allowed_min_rxd;
143799e040d3SLijun Ou 	portid_t pid = 0;
143899e040d3SLijun Ou 
143999e040d3SLijun Ou 	allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
144099e040d3SLijun Ou 	if (rxd > allowed_max_rxd) {
144161a3b0e5SAndrew Rybchenko 		fprintf(stderr,
144261a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
144361a3b0e5SAndrew Rybchenko 			rxd, allowed_max_rxd, pid);
144499e040d3SLijun Ou 		return -1;
144599e040d3SLijun Ou 	}
144699e040d3SLijun Ou 
144799e040d3SLijun Ou 	allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
144899e040d3SLijun Ou 	if (rxd < allowed_min_rxd) {
144961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
145061a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
145161a3b0e5SAndrew Rybchenko 			rxd, allowed_min_rxd, pid);
145299e040d3SLijun Ou 		return -1;
145399e040d3SLijun Ou 	}
145499e040d3SLijun Ou 
145599e040d3SLijun Ou 	return 0;
145699e040d3SLijun Ou }
145799e040d3SLijun Ou 
145899e040d3SLijun Ou /*
145999e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every rx queues.
146099e040d3SLijun Ou  * *pid return the port id which has minimal value of
146199e040d3SLijun Ou  * max_txd in every tx queue.
146299e040d3SLijun Ou  */
146399e040d3SLijun Ou static uint16_t
146499e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid)
146599e040d3SLijun Ou {
146699e040d3SLijun Ou 	uint16_t allowed_max_txd = UINT16_MAX;
146799e040d3SLijun Ou 	portid_t pi;
146899e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
146999e040d3SLijun Ou 
147099e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
147199e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
147299e040d3SLijun Ou 			continue;
147399e040d3SLijun Ou 
147499e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
147599e040d3SLijun Ou 			allowed_max_txd = dev_info.tx_desc_lim.nb_max;
147699e040d3SLijun Ou 			*pid = pi;
147799e040d3SLijun Ou 		}
147899e040d3SLijun Ou 	}
147999e040d3SLijun Ou 	return allowed_max_txd;
148099e040d3SLijun Ou }
148199e040d3SLijun Ou 
148299e040d3SLijun Ou /*
148399e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every tx queues.
148499e040d3SLijun Ou  * *pid return the port id which has minimal value of
148599e040d3SLijun Ou  * min_txd in every tx queue.
148699e040d3SLijun Ou  */
148799e040d3SLijun Ou static uint16_t
148899e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid)
148999e040d3SLijun Ou {
149099e040d3SLijun Ou 	uint16_t allowed_min_txd = 0;
149199e040d3SLijun Ou 	portid_t pi;
149299e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
149399e040d3SLijun Ou 
149499e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
149599e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
149699e040d3SLijun Ou 			continue;
149799e040d3SLijun Ou 
149899e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
149999e040d3SLijun Ou 			allowed_min_txd = dev_info.tx_desc_lim.nb_min;
150099e040d3SLijun Ou 			*pid = pi;
150199e040d3SLijun Ou 		}
150299e040d3SLijun Ou 	}
150399e040d3SLijun Ou 
150499e040d3SLijun Ou 	return allowed_min_txd;
150599e040d3SLijun Ou }
150699e040d3SLijun Ou 
150799e040d3SLijun Ou /*
150899e040d3SLijun Ou  * Check input txd is valid or not.
150999e040d3SLijun Ou  * If input txd is not greater than any of maximum number
151099e040d3SLijun Ou  * of TXDs of every Rx queues, it is valid.
151199e040d3SLijun Ou  * if valid, return 0, else return -1
151299e040d3SLijun Ou  */
151399e040d3SLijun Ou int
151499e040d3SLijun Ou check_nb_txd(queueid_t txd)
151599e040d3SLijun Ou {
151699e040d3SLijun Ou 	uint16_t allowed_max_txd;
151799e040d3SLijun Ou 	uint16_t allowed_min_txd;
151899e040d3SLijun Ou 	portid_t pid = 0;
151999e040d3SLijun Ou 
152099e040d3SLijun Ou 	allowed_max_txd = get_allowed_max_nb_txd(&pid);
152199e040d3SLijun Ou 	if (txd > allowed_max_txd) {
152261a3b0e5SAndrew Rybchenko 		fprintf(stderr,
152361a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
152461a3b0e5SAndrew Rybchenko 			txd, allowed_max_txd, pid);
152599e040d3SLijun Ou 		return -1;
152699e040d3SLijun Ou 	}
152799e040d3SLijun Ou 
152899e040d3SLijun Ou 	allowed_min_txd = get_allowed_min_nb_txd(&pid);
152999e040d3SLijun Ou 	if (txd < allowed_min_txd) {
153061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
153161a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
153261a3b0e5SAndrew Rybchenko 			txd, allowed_min_txd, pid);
153399e040d3SLijun Ou 		return -1;
153499e040d3SLijun Ou 	}
153599e040d3SLijun Ou 	return 0;
153699e040d3SLijun Ou }
153799e040d3SLijun Ou 
153899e040d3SLijun Ou 
153999e040d3SLijun Ou /*
15401c69df45SOri Kam  * Get the allowed maximum number of hairpin queues.
15411c69df45SOri Kam  * *pid return the port id which has minimal value of
15421c69df45SOri Kam  * max_hairpin_queues in all ports.
15431c69df45SOri Kam  */
15441c69df45SOri Kam queueid_t
15451c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid)
15461c69df45SOri Kam {
15479e6b36c3SDavid Marchand 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
15481c69df45SOri Kam 	portid_t pi;
15491c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
15501c69df45SOri Kam 
15511c69df45SOri Kam 	RTE_ETH_FOREACH_DEV(pi) {
15521c69df45SOri Kam 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
15531c69df45SOri Kam 			*pid = pi;
15541c69df45SOri Kam 			return 0;
15551c69df45SOri Kam 		}
15561c69df45SOri Kam 		if (cap.max_nb_queues < allowed_max_hairpinq) {
15571c69df45SOri Kam 			allowed_max_hairpinq = cap.max_nb_queues;
15581c69df45SOri Kam 			*pid = pi;
15591c69df45SOri Kam 		}
15601c69df45SOri Kam 	}
15611c69df45SOri Kam 	return allowed_max_hairpinq;
15621c69df45SOri Kam }
15631c69df45SOri Kam 
15641c69df45SOri Kam /*
15651c69df45SOri Kam  * Check input hairpin is valid or not.
15661c69df45SOri Kam  * If input hairpin is not greater than any of maximum number
15671c69df45SOri Kam  * of hairpin queues of all ports, it is valid.
15681c69df45SOri Kam  * if valid, return 0, else return -1
15691c69df45SOri Kam  */
15701c69df45SOri Kam int
15711c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq)
15721c69df45SOri Kam {
15731c69df45SOri Kam 	queueid_t allowed_max_hairpinq;
15741c69df45SOri Kam 	portid_t pid = 0;
15751c69df45SOri Kam 
15761c69df45SOri Kam 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
15771c69df45SOri Kam 	if (hairpinq > allowed_max_hairpinq) {
157861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
157961a3b0e5SAndrew Rybchenko 			"Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
15801c69df45SOri Kam 			hairpinq, allowed_max_hairpinq, pid);
15811c69df45SOri Kam 		return -1;
15821c69df45SOri Kam 	}
15831c69df45SOri Kam 	return 0;
15841c69df45SOri Kam }
15851c69df45SOri Kam 
15861bb4a528SFerruh Yigit static int
15871bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info)
15881bb4a528SFerruh Yigit {
15891bb4a528SFerruh Yigit 	uint32_t eth_overhead;
15901bb4a528SFerruh Yigit 
15911bb4a528SFerruh Yigit 	if (dev_info->max_mtu != UINT16_MAX &&
15921bb4a528SFerruh Yigit 	    dev_info->max_rx_pktlen > dev_info->max_mtu)
15931bb4a528SFerruh Yigit 		eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
15941bb4a528SFerruh Yigit 	else
15951bb4a528SFerruh Yigit 		eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
15961bb4a528SFerruh Yigit 
15971bb4a528SFerruh Yigit 	return eth_overhead;
15981bb4a528SFerruh Yigit }
15991bb4a528SFerruh Yigit 
1600af75078fSIntel static void
1601b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id)
1602b6b8a1ebSViacheslav Ovsiienko {
1603b6b8a1ebSViacheslav Ovsiienko 	struct rte_port *port = &ports[pid];
1604b6b8a1ebSViacheslav Ovsiienko 	int ret;
1605b6b8a1ebSViacheslav Ovsiienko 	int i;
1606b6b8a1ebSViacheslav Ovsiienko 
1607f6d8a6d3SIvan Malov 	eth_rx_metadata_negotiate_mp(pid);
1608f6d8a6d3SIvan Malov 
1609b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.txmode = tx_mode;
1610b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.rxmode = rx_mode;
1611b6b8a1ebSViacheslav Ovsiienko 
1612b6b8a1ebSViacheslav Ovsiienko 	ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1613b6b8a1ebSViacheslav Ovsiienko 	if (ret != 0)
1614b6b8a1ebSViacheslav Ovsiienko 		rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1615b6b8a1ebSViacheslav Ovsiienko 
1616295968d1SFerruh Yigit 	if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
1617b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.txmode.offloads &=
1618295968d1SFerruh Yigit 			~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1619b6b8a1ebSViacheslav Ovsiienko 
1620b6b8a1ebSViacheslav Ovsiienko 	/* Apply Rx offloads configuration */
1621b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
16223c4426dbSDmitry Kozlyuk 		port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads;
1623b6b8a1ebSViacheslav Ovsiienko 	/* Apply Tx offloads configuration */
1624b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_tx_queues; i++)
16253c4426dbSDmitry Kozlyuk 		port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
1626b6b8a1ebSViacheslav Ovsiienko 
1627b6b8a1ebSViacheslav Ovsiienko 	if (eth_link_speed)
1628b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.link_speeds = eth_link_speed;
1629b6b8a1ebSViacheslav Ovsiienko 
16301bb4a528SFerruh Yigit 	if (max_rx_pkt_len)
16311bb4a528SFerruh Yigit 		port->dev_conf.rxmode.mtu = max_rx_pkt_len -
16321bb4a528SFerruh Yigit 			get_eth_overhead(&port->dev_info);
16331bb4a528SFerruh Yigit 
1634b6b8a1ebSViacheslav Ovsiienko 	/* set flag to initialize port/queue */
1635b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig = 1;
1636b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig_queues = 1;
1637b6b8a1ebSViacheslav Ovsiienko 	port->socket_id = socket_id;
1638b6b8a1ebSViacheslav Ovsiienko 	port->tx_metadata = 0;
1639b6b8a1ebSViacheslav Ovsiienko 
1640b6b8a1ebSViacheslav Ovsiienko 	/*
1641b6b8a1ebSViacheslav Ovsiienko 	 * Check for maximum number of segments per MTU.
1642b6b8a1ebSViacheslav Ovsiienko 	 * Accordingly update the mbuf data size.
1643b6b8a1ebSViacheslav Ovsiienko 	 */
1644b6b8a1ebSViacheslav Ovsiienko 	if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1645b6b8a1ebSViacheslav Ovsiienko 	    port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
16461bb4a528SFerruh Yigit 		uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
16471bb4a528SFerruh Yigit 		uint16_t mtu;
1648b6b8a1ebSViacheslav Ovsiienko 
16491bb4a528SFerruh Yigit 		if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
16501bb4a528SFerruh Yigit 			uint16_t data_size = (mtu + eth_overhead) /
16511bb4a528SFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
16521bb4a528SFerruh Yigit 			uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
16531bb4a528SFerruh Yigit 
16541bb4a528SFerruh Yigit 			if (buffer_size > mbuf_data_size[0]) {
16551bb4a528SFerruh Yigit 				mbuf_data_size[0] = buffer_size;
1656b6b8a1ebSViacheslav Ovsiienko 				TESTPMD_LOG(WARNING,
1657b6b8a1ebSViacheslav Ovsiienko 					"Configured mbuf size of the first segment %hu\n",
1658b6b8a1ebSViacheslav Ovsiienko 					mbuf_data_size[0]);
1659b6b8a1ebSViacheslav Ovsiienko 			}
1660b6b8a1ebSViacheslav Ovsiienko 		}
1661b6b8a1ebSViacheslav Ovsiienko 	}
16621bb4a528SFerruh Yigit }
1663b6b8a1ebSViacheslav Ovsiienko 
1664b6b8a1ebSViacheslav Ovsiienko static void
1665af75078fSIntel init_config(void)
1666af75078fSIntel {
1667ce8d5614SIntel 	portid_t pid;
1668af75078fSIntel 	struct rte_mempool *mbp;
1669af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1670af75078fSIntel 	lcoreid_t  lc_id;
16716970401eSDavid Marchand #ifdef RTE_LIB_GRO
1672b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
16736970401eSDavid Marchand #endif
16746970401eSDavid Marchand #ifdef RTE_LIB_GSO
167552f38a20SJiayu Hu 	uint32_t gso_types;
16766970401eSDavid Marchand #endif
1677487f9a59SYulong Pei 
1678af75078fSIntel 	/* Configuration of logical cores. */
1679af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1680af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1681fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1682af75078fSIntel 	if (fwd_lcores == NULL) {
1683ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1684ce8d5614SIntel 							"failed\n", nb_lcores);
1685af75078fSIntel 	}
1686af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1687af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1688af75078fSIntel 					       sizeof(struct fwd_lcore),
1689fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1690af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1691ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1692ce8d5614SIntel 								"failed\n");
1693af75078fSIntel 		}
1694af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1695af75078fSIntel 	}
1696af75078fSIntel 
16977d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1698b6b8a1ebSViacheslav Ovsiienko 		uint32_t socket_id;
16996f51deb9SIvan Ilchenko 
1700b6ea6408SIntel 		if (numa_support) {
1701b6b8a1ebSViacheslav Ovsiienko 			socket_id = port_numa[pid];
1702b6b8a1ebSViacheslav Ovsiienko 			if (port_numa[pid] == NUMA_NO_CONFIG) {
1703b6b8a1ebSViacheslav Ovsiienko 				socket_id = rte_eth_dev_socket_id(pid);
170420a0286fSLiu Xiaofeng 
170529841336SPhil Yang 				/*
170629841336SPhil Yang 				 * if socket_id is invalid,
170729841336SPhil Yang 				 * set to the first available socket.
170829841336SPhil Yang 				 */
170920a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
171029841336SPhil Yang 					socket_id = socket_ids[0];
1711b6ea6408SIntel 			}
1712b6b8a1ebSViacheslav Ovsiienko 		} else {
1713b6b8a1ebSViacheslav Ovsiienko 			socket_id = (socket_num == UMA_NO_CONFIG) ?
1714b6b8a1ebSViacheslav Ovsiienko 				    0 : socket_num;
1715b6ea6408SIntel 		}
1716b6b8a1ebSViacheslav Ovsiienko 		/* Apply default TxRx configuration for all ports */
1717b6b8a1ebSViacheslav Ovsiienko 		init_config_port_offloads(pid, socket_id);
1718ce8d5614SIntel 	}
17193ab64341SOlivier Matz 	/*
17203ab64341SOlivier Matz 	 * Create pools of mbuf.
17213ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
17223ab64341SOlivier Matz 	 * socket 0 memory by default.
17233ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
17243ab64341SOlivier Matz 	 *
17253ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
17263ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
17273ab64341SOlivier Matz 	 */
17283ab64341SOlivier Matz 	if (param_total_num_mbufs)
17293ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
17303ab64341SOlivier Matz 	else {
17314ed89049SDavid Marchand 		nb_mbuf_per_pool = RX_DESC_MAX +
17323ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
17334ed89049SDavid Marchand 			TX_DESC_MAX + MAX_PKT_BURST;
17343ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
17353ab64341SOlivier Matz 	}
17363ab64341SOlivier Matz 
1737b6ea6408SIntel 	if (numa_support) {
173826cbb419SViacheslav Ovsiienko 		uint8_t i, j;
1739ce8d5614SIntel 
1740c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
174126cbb419SViacheslav Ovsiienko 			for (j = 0; j < mbuf_data_size_n; j++)
174226cbb419SViacheslav Ovsiienko 				mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
174326cbb419SViacheslav Ovsiienko 					mbuf_pool_create(mbuf_data_size[j],
1744401b744dSShahaf Shuler 							  nb_mbuf_per_pool,
174526cbb419SViacheslav Ovsiienko 							  socket_ids[i], j);
17463ab64341SOlivier Matz 	} else {
174726cbb419SViacheslav Ovsiienko 		uint8_t i;
174826cbb419SViacheslav Ovsiienko 
174926cbb419SViacheslav Ovsiienko 		for (i = 0; i < mbuf_data_size_n; i++)
175026cbb419SViacheslav Ovsiienko 			mempools[i] = mbuf_pool_create
175126cbb419SViacheslav Ovsiienko 					(mbuf_data_size[i],
1752401b744dSShahaf Shuler 					 nb_mbuf_per_pool,
175326cbb419SViacheslav Ovsiienko 					 socket_num == UMA_NO_CONFIG ?
175426cbb419SViacheslav Ovsiienko 					 0 : socket_num, i);
17553ab64341SOlivier Matz 	}
1756b6ea6408SIntel 
1757b6ea6408SIntel 	init_port_config();
17585886ae07SAdrien Mazarguil 
17596970401eSDavid Marchand #ifdef RTE_LIB_GSO
1760295968d1SFerruh Yigit 	gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1761295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
17626970401eSDavid Marchand #endif
17635886ae07SAdrien Mazarguil 	/*
17645886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
17655886ae07SAdrien Mazarguil 	 */
17665886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
17678fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
176826cbb419SViacheslav Ovsiienko 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
17698fd8bebcSAdrien Mazarguil 
17705886ae07SAdrien Mazarguil 		if (mbp == NULL)
177126cbb419SViacheslav Ovsiienko 			mbp = mbuf_pool_find(0, 0);
17725886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
17736970401eSDavid Marchand #ifdef RTE_LIB_GSO
177452f38a20SJiayu Hu 		/* initialize GSO context */
177552f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
177652f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
177752f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
177835b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
177935b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
178052f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
17816970401eSDavid Marchand #endif
17825886ae07SAdrien Mazarguil 	}
17835886ae07SAdrien Mazarguil 
17840c0db76fSBernard Iremonger 	fwd_config_setup();
1785b7091f1dSJiayu Hu 
17866970401eSDavid Marchand #ifdef RTE_LIB_GRO
1787b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1788b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1789b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1790b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1791b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1792b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1793b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1794b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1795b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1796b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1797b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1798b7091f1dSJiayu Hu 		}
1799b7091f1dSJiayu Hu 	}
18006970401eSDavid Marchand #endif
1801ce8d5614SIntel }
1802ce8d5614SIntel 
18032950a769SDeclan Doherty 
18042950a769SDeclan Doherty void
1805a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
18062950a769SDeclan Doherty {
18072950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
1808b6b8a1ebSViacheslav Ovsiienko 	init_config_port_offloads(new_port_id, socket_id);
18092950a769SDeclan Doherty 	init_port_config();
18102950a769SDeclan Doherty }
18112950a769SDeclan Doherty 
1812ce8d5614SIntel int
1813ce8d5614SIntel init_fwd_streams(void)
1814ce8d5614SIntel {
1815ce8d5614SIntel 	portid_t pid;
1816ce8d5614SIntel 	struct rte_port *port;
1817ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
18185a8fb55cSReshma Pattan 	queueid_t q;
1819ce8d5614SIntel 
1820ce8d5614SIntel 	/* set socket id according to numa or not */
18217d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1822ce8d5614SIntel 		port = &ports[pid];
1823ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
182461a3b0e5SAndrew Rybchenko 			fprintf(stderr,
182561a3b0e5SAndrew Rybchenko 				"Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
182661a3b0e5SAndrew Rybchenko 				nb_rxq, port->dev_info.max_rx_queues);
1827ce8d5614SIntel 			return -1;
1828ce8d5614SIntel 		}
1829ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
183061a3b0e5SAndrew Rybchenko 			fprintf(stderr,
183161a3b0e5SAndrew Rybchenko 				"Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
183261a3b0e5SAndrew Rybchenko 				nb_txq, port->dev_info.max_tx_queues);
1833ce8d5614SIntel 			return -1;
1834ce8d5614SIntel 		}
183520a0286fSLiu Xiaofeng 		if (numa_support) {
183620a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
183720a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
183820a0286fSLiu Xiaofeng 			else {
1839b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
184020a0286fSLiu Xiaofeng 
184129841336SPhil Yang 				/*
184229841336SPhil Yang 				 * if socket_id is invalid,
184329841336SPhil Yang 				 * set to the first available socket.
184429841336SPhil Yang 				 */
184520a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
184629841336SPhil Yang 					port->socket_id = socket_ids[0];
184720a0286fSLiu Xiaofeng 			}
184820a0286fSLiu Xiaofeng 		}
1849b6ea6408SIntel 		else {
1850b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1851af75078fSIntel 				port->socket_id = 0;
1852b6ea6408SIntel 			else
1853b6ea6408SIntel 				port->socket_id = socket_num;
1854b6ea6408SIntel 		}
1855af75078fSIntel 	}
1856af75078fSIntel 
18575a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
18585a8fb55cSReshma Pattan 	if (q == 0) {
185961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
186061a3b0e5SAndrew Rybchenko 			"Fail: Cannot allocate fwd streams as number of queues is 0\n");
18615a8fb55cSReshma Pattan 		return -1;
18625a8fb55cSReshma Pattan 	}
18635a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1864ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1865ce8d5614SIntel 		return 0;
1866ce8d5614SIntel 	/* clear the old */
1867ce8d5614SIntel 	if (fwd_streams != NULL) {
1868ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1869ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1870ce8d5614SIntel 				continue;
1871ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1872ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1873af75078fSIntel 		}
1874ce8d5614SIntel 		rte_free(fwd_streams);
1875ce8d5614SIntel 		fwd_streams = NULL;
1876ce8d5614SIntel 	}
1877ce8d5614SIntel 
1878ce8d5614SIntel 	/* init new */
1879ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
18801f84c469SMatan Azrad 	if (nb_fwd_streams) {
1881ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
18821f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
18831f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1884ce8d5614SIntel 		if (fwd_streams == NULL)
18851f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
18861f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
18871f84c469SMatan Azrad 				 nb_fwd_streams);
1888ce8d5614SIntel 
1889af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
18901f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
18911f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
18921f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1893ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
18941f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
18951f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
18961f84c469SMatan Azrad 		}
1897af75078fSIntel 	}
1898ce8d5614SIntel 
1899ce8d5614SIntel 	return 0;
1900af75078fSIntel }
1901af75078fSIntel 
1902af75078fSIntel static void
1903af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1904af75078fSIntel {
19057569b8c1SHonnappa Nagarahalli 	uint64_t total_burst, sburst;
190685de481aSHonnappa Nagarahalli 	uint64_t nb_burst;
19077569b8c1SHonnappa Nagarahalli 	uint64_t burst_stats[4];
19087569b8c1SHonnappa Nagarahalli 	uint16_t pktnb_stats[4];
1909af75078fSIntel 	uint16_t nb_pkt;
19107569b8c1SHonnappa Nagarahalli 	int burst_percent[4], sburstp;
19117569b8c1SHonnappa Nagarahalli 	int i;
1912af75078fSIntel 
1913af75078fSIntel 	/*
1914af75078fSIntel 	 * First compute the total number of packet bursts and the
1915af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1916af75078fSIntel 	 */
19177569b8c1SHonnappa Nagarahalli 	memset(&burst_stats, 0x0, sizeof(burst_stats));
19187569b8c1SHonnappa Nagarahalli 	memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
19197569b8c1SHonnappa Nagarahalli 
19207569b8c1SHonnappa Nagarahalli 	/* Show stats for 0 burst size always */
19217569b8c1SHonnappa Nagarahalli 	total_burst = pbs->pkt_burst_spread[0];
19227569b8c1SHonnappa Nagarahalli 	burst_stats[0] = pbs->pkt_burst_spread[0];
19237569b8c1SHonnappa Nagarahalli 	pktnb_stats[0] = 0;
19247569b8c1SHonnappa Nagarahalli 
19257569b8c1SHonnappa Nagarahalli 	/* Find the next 2 burst sizes with highest occurrences. */
19266a8b64fdSEli Britstein 	for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1927af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
19287569b8c1SHonnappa Nagarahalli 
1929af75078fSIntel 		if (nb_burst == 0)
1930af75078fSIntel 			continue;
19317569b8c1SHonnappa Nagarahalli 
1932af75078fSIntel 		total_burst += nb_burst;
19337569b8c1SHonnappa Nagarahalli 
19347569b8c1SHonnappa Nagarahalli 		if (nb_burst > burst_stats[1]) {
19357569b8c1SHonnappa Nagarahalli 			burst_stats[2] = burst_stats[1];
19367569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = pktnb_stats[1];
1937fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1938fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
19397569b8c1SHonnappa Nagarahalli 		} else if (nb_burst > burst_stats[2]) {
19407569b8c1SHonnappa Nagarahalli 			burst_stats[2] = nb_burst;
19417569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = nb_pkt;
1942af75078fSIntel 		}
1943af75078fSIntel 	}
1944af75078fSIntel 	if (total_burst == 0)
1945af75078fSIntel 		return;
19467569b8c1SHonnappa Nagarahalli 
19477569b8c1SHonnappa Nagarahalli 	printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
19487569b8c1SHonnappa Nagarahalli 	for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
19497569b8c1SHonnappa Nagarahalli 		if (i == 3) {
19507569b8c1SHonnappa Nagarahalli 			printf("%d%% of other]\n", 100 - sburstp);
1951af75078fSIntel 			return;
1952af75078fSIntel 		}
19537569b8c1SHonnappa Nagarahalli 
19547569b8c1SHonnappa Nagarahalli 		sburst += burst_stats[i];
19557569b8c1SHonnappa Nagarahalli 		if (sburst == total_burst) {
19567569b8c1SHonnappa Nagarahalli 			printf("%d%% of %d pkts]\n",
19577569b8c1SHonnappa Nagarahalli 				100 - sburstp, (int) pktnb_stats[i]);
1958af75078fSIntel 			return;
1959af75078fSIntel 		}
19607569b8c1SHonnappa Nagarahalli 
19617569b8c1SHonnappa Nagarahalli 		burst_percent[i] =
19627569b8c1SHonnappa Nagarahalli 			(double)burst_stats[i] / total_burst * 100;
19637569b8c1SHonnappa Nagarahalli 		printf("%d%% of %d pkts + ",
19647569b8c1SHonnappa Nagarahalli 			burst_percent[i], (int) pktnb_stats[i]);
19657569b8c1SHonnappa Nagarahalli 		sburstp += burst_percent[i];
1966af75078fSIntel 	}
1967af75078fSIntel }
1968af75078fSIntel 
1969af75078fSIntel static void
1970af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1971af75078fSIntel {
1972af75078fSIntel 	struct fwd_stream *fs;
1973af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1974af75078fSIntel 
1975af75078fSIntel 	fs = fwd_streams[stream_id];
1976af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1977af75078fSIntel 	    (fs->fwd_dropped == 0))
1978af75078fSIntel 		return;
1979af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1980af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1981af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1982af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1983c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1984c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1985af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1986af75078fSIntel 
1987af75078fSIntel 	/* if checksum mode */
1988af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1989c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1990c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1991c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
199258d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
199358d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
1994d139cf23SLance Richardson 		printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1995d139cf23SLance Richardson 			fs->rx_bad_outer_ip_csum);
199694d65546SDavid Marchand 	} else {
199794d65546SDavid Marchand 		printf("\n");
1998af75078fSIntel 	}
1999af75078fSIntel 
20000e4b1963SDharmik Thakkar 	if (record_burst_stats) {
2001af75078fSIntel 		pkt_burst_stats_display("RX", &fs->rx_burst_stats);
2002af75078fSIntel 		pkt_burst_stats_display("TX", &fs->tx_burst_stats);
20030e4b1963SDharmik Thakkar 	}
2004af75078fSIntel }
2005af75078fSIntel 
200653324971SDavid Marchand void
200753324971SDavid Marchand fwd_stats_display(void)
200853324971SDavid Marchand {
200953324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
201053324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
201153324971SDavid Marchand 	struct {
201253324971SDavid Marchand 		struct fwd_stream *rx_stream;
201353324971SDavid Marchand 		struct fwd_stream *tx_stream;
201453324971SDavid Marchand 		uint64_t tx_dropped;
201553324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
201653324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
201753324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
2018d139cf23SLance Richardson 		uint64_t rx_bad_outer_ip_csum;
201953324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
202053324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
202153324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
202253324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
202353324971SDavid Marchand 	struct rte_eth_stats stats;
202453324971SDavid Marchand 	uint64_t fwd_cycles = 0;
202553324971SDavid Marchand 	uint64_t total_recv = 0;
202653324971SDavid Marchand 	uint64_t total_xmit = 0;
202753324971SDavid Marchand 	struct rte_port *port;
202853324971SDavid Marchand 	streamid_t sm_id;
202953324971SDavid Marchand 	portid_t pt_id;
2030baef6bbfSMin Hu (Connor) 	int ret;
203153324971SDavid Marchand 	int i;
203253324971SDavid Marchand 
203353324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
203453324971SDavid Marchand 
203553324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
203653324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
203753324971SDavid Marchand 
203853324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
203953324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
204053324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
204153324971SDavid Marchand 		} else {
204253324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
204353324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
204453324971SDavid Marchand 		}
204553324971SDavid Marchand 
204653324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
204753324971SDavid Marchand 
204853324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
204953324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
205053324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
205153324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
2052d139cf23SLance Richardson 		ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2053d139cf23SLance Richardson 				fs->rx_bad_outer_ip_csum;
205453324971SDavid Marchand 
2055bc700b67SDharmik Thakkar 		if (record_core_cycles)
2056*99a4974aSRobin Jarry 			fwd_cycles += fs->busy_cycles;
205753324971SDavid Marchand 	}
205853324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
205953324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
206053324971SDavid Marchand 		port = &ports[pt_id];
206153324971SDavid Marchand 
2062baef6bbfSMin Hu (Connor) 		ret = rte_eth_stats_get(pt_id, &stats);
2063baef6bbfSMin Hu (Connor) 		if (ret != 0) {
2064baef6bbfSMin Hu (Connor) 			fprintf(stderr,
2065baef6bbfSMin Hu (Connor) 				"%s: Error: failed to get stats (port %u): %d",
2066baef6bbfSMin Hu (Connor) 				__func__, pt_id, ret);
2067baef6bbfSMin Hu (Connor) 			continue;
2068baef6bbfSMin Hu (Connor) 		}
206953324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
207053324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
207153324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
207253324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
207353324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
207453324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
207553324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
207653324971SDavid Marchand 
207753324971SDavid Marchand 		total_recv += stats.ipackets;
207853324971SDavid Marchand 		total_xmit += stats.opackets;
207953324971SDavid Marchand 		total_rx_dropped += stats.imissed;
208053324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
208153324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
208253324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
208353324971SDavid Marchand 
208453324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
208553324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
208653324971SDavid Marchand 
208708dcd187SHuisong Li 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
208808dcd187SHuisong Li 		       "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
208953324971SDavid Marchand 		       stats.ipackets + stats.imissed);
209053324971SDavid Marchand 
2091d139cf23SLance Richardson 		if (cur_fwd_eng == &csum_fwd_engine) {
209253324971SDavid Marchand 			printf("  Bad-ipcsum: %-14"PRIu64
209353324971SDavid Marchand 			       " Bad-l4csum: %-14"PRIu64
209453324971SDavid Marchand 			       "Bad-outer-l4csum: %-14"PRIu64"\n",
209553324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_ip_csum,
209653324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_l4_csum,
209753324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_outer_l4_csum);
2098d139cf23SLance Richardson 			printf("  Bad-outer-ipcsum: %-14"PRIu64"\n",
2099d139cf23SLance Richardson 			       ports_stats[pt_id].rx_bad_outer_ip_csum);
2100d139cf23SLance Richardson 		}
210153324971SDavid Marchand 		if (stats.ierrors + stats.rx_nombuf > 0) {
210208dcd187SHuisong Li 			printf("  RX-error: %-"PRIu64"\n", stats.ierrors);
210308dcd187SHuisong Li 			printf("  RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
210453324971SDavid Marchand 		}
210553324971SDavid Marchand 
210608dcd187SHuisong Li 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
210753324971SDavid Marchand 		       "TX-total: %-"PRIu64"\n",
210853324971SDavid Marchand 		       stats.opackets, ports_stats[pt_id].tx_dropped,
210953324971SDavid Marchand 		       stats.opackets + ports_stats[pt_id].tx_dropped);
211053324971SDavid Marchand 
21110e4b1963SDharmik Thakkar 		if (record_burst_stats) {
211253324971SDavid Marchand 			if (ports_stats[pt_id].rx_stream)
211353324971SDavid Marchand 				pkt_burst_stats_display("RX",
211453324971SDavid Marchand 					&ports_stats[pt_id].rx_stream->rx_burst_stats);
211553324971SDavid Marchand 			if (ports_stats[pt_id].tx_stream)
211653324971SDavid Marchand 				pkt_burst_stats_display("TX",
211753324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
21180e4b1963SDharmik Thakkar 		}
211953324971SDavid Marchand 
212053324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
212153324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
212253324971SDavid Marchand 	}
212353324971SDavid Marchand 
212453324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
212553324971SDavid Marchand 	       "%s\n",
212653324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
212753324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
212853324971SDavid Marchand 	       "%-"PRIu64"\n"
212953324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
213053324971SDavid Marchand 	       "%-"PRIu64"\n",
213153324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
213253324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
213353324971SDavid Marchand 	if (total_rx_nombuf > 0)
213453324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
213553324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
213653324971SDavid Marchand 	       "%s\n",
213753324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
2138bc700b67SDharmik Thakkar 	if (record_core_cycles) {
21394c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6
21403a164e00SPhil Yang 		if (total_recv > 0 || total_xmit > 0) {
21413a164e00SPhil Yang 			uint64_t total_pkts = 0;
21423a164e00SPhil Yang 			if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
21433a164e00SPhil Yang 			    strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
21443a164e00SPhil Yang 				total_pkts = total_xmit;
21453a164e00SPhil Yang 			else
21463a164e00SPhil Yang 				total_pkts = total_recv;
21473a164e00SPhil Yang 
2148*99a4974aSRobin Jarry 			printf("\n  CPU cycles/packet=%.2F (busy cycles="
21493a164e00SPhil Yang 			       "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
21504c0497b1SDharmik Thakkar 			       " MHz Clock\n",
21513a164e00SPhil Yang 			       (double) fwd_cycles / total_pkts,
21523a164e00SPhil Yang 			       fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
21534c0497b1SDharmik Thakkar 			       (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
21543a164e00SPhil Yang 		}
2155bc700b67SDharmik Thakkar 	}
215653324971SDavid Marchand }
215753324971SDavid Marchand 
215853324971SDavid Marchand void
215953324971SDavid Marchand fwd_stats_reset(void)
216053324971SDavid Marchand {
216153324971SDavid Marchand 	streamid_t sm_id;
216253324971SDavid Marchand 	portid_t pt_id;
2163baef6bbfSMin Hu (Connor) 	int ret;
216453324971SDavid Marchand 	int i;
216553324971SDavid Marchand 
216653324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
216753324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
2168baef6bbfSMin Hu (Connor) 		ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2169baef6bbfSMin Hu (Connor) 		if (ret != 0)
2170baef6bbfSMin Hu (Connor) 			fprintf(stderr,
2171baef6bbfSMin Hu (Connor) 				"%s: Error: failed to clear stats (port %u):%d",
2172baef6bbfSMin Hu (Connor) 				__func__, pt_id, ret);
217353324971SDavid Marchand 	}
217453324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
217553324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
217653324971SDavid Marchand 
217753324971SDavid Marchand 		fs->rx_packets = 0;
217853324971SDavid Marchand 		fs->tx_packets = 0;
217953324971SDavid Marchand 		fs->fwd_dropped = 0;
218053324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
218153324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
218253324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
2183d139cf23SLance Richardson 		fs->rx_bad_outer_ip_csum = 0;
218453324971SDavid Marchand 
218553324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
218653324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2187*99a4974aSRobin Jarry 		fs->busy_cycles = 0;
218853324971SDavid Marchand 	}
218953324971SDavid Marchand }
219053324971SDavid Marchand 
2191af75078fSIntel static void
21927741e4cfSIntel flush_fwd_rx_queues(void)
2193af75078fSIntel {
2194af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2195af75078fSIntel 	portid_t  rxp;
21967741e4cfSIntel 	portid_t port_id;
2197af75078fSIntel 	queueid_t rxq;
2198af75078fSIntel 	uint16_t  nb_rx;
2199af75078fSIntel 	uint16_t  i;
2200af75078fSIntel 	uint8_t   j;
2201f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2202594302c7SJames Poole 	uint64_t timer_period;
2203f487715fSReshma Pattan 
2204a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
2205a550baf2SMin Hu (Connor) 		printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2206a550baf2SMin Hu (Connor) 		return;
2207a550baf2SMin Hu (Connor) 	}
2208a550baf2SMin Hu (Connor) 
2209f487715fSReshma Pattan 	/* convert to number of cycles */
2210594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
2211af75078fSIntel 
2212af75078fSIntel 	for (j = 0; j < 2; j++) {
22137741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2214af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
22157741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
22163c4426dbSDmitry Kozlyuk 
22173c4426dbSDmitry Kozlyuk 				/* Polling stopped queues is prohibited. */
22183c4426dbSDmitry Kozlyuk 				if (ports[port_id].rxq[rxq].state ==
22193c4426dbSDmitry Kozlyuk 				    RTE_ETH_QUEUE_STATE_STOPPED)
22203c4426dbSDmitry Kozlyuk 					continue;
22213c4426dbSDmitry Kozlyuk 
2222f487715fSReshma Pattan 				/**
2223f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
2224f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
2225f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
2226f487715fSReshma Pattan 				* after 1sec timer expiry.
2227f487715fSReshma Pattan 				*/
2228f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
2229af75078fSIntel 				do {
22307741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
2231013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
2232af75078fSIntel 					for (i = 0; i < nb_rx; i++)
2233af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
2234f487715fSReshma Pattan 
2235f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
2236f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
2237f487715fSReshma Pattan 					timer_tsc += diff_tsc;
2238f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
2239f487715fSReshma Pattan 					(timer_tsc < timer_period));
2240f487715fSReshma Pattan 				timer_tsc = 0;
2241af75078fSIntel 			}
2242af75078fSIntel 		}
2243af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2244af75078fSIntel 	}
2245af75078fSIntel }
2246af75078fSIntel 
2247af75078fSIntel static void
2248af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2249af75078fSIntel {
2250af75078fSIntel 	struct fwd_stream **fsm;
2251*99a4974aSRobin Jarry 	uint64_t prev_tsc;
2252af75078fSIntel 	streamid_t nb_fs;
2253af75078fSIntel 	streamid_t sm_id;
2254a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
22557e4441c8SRemy Horton 	uint64_t tics_per_1sec;
22567e4441c8SRemy Horton 	uint64_t tics_datum;
22577e4441c8SRemy Horton 	uint64_t tics_current;
22584918a357SXiaoyun Li 	uint16_t i, cnt_ports;
2259af75078fSIntel 
22604918a357SXiaoyun Li 	cnt_ports = nb_ports;
22617e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
22627e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
22637e4441c8SRemy Horton #endif
2264af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
2265af75078fSIntel 	nb_fs = fc->stream_nb;
2266*99a4974aSRobin Jarry 	prev_tsc = rte_rdtsc();
2267af75078fSIntel 	do {
2268af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
22693c4426dbSDmitry Kozlyuk 			if (!fsm[sm_id]->disabled)
2270af75078fSIntel 				(*pkt_fwd)(fsm[sm_id]);
2271a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
2272e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
2273e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
22747e4441c8SRemy Horton 			tics_current = rte_rdtsc();
22757e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
22767e4441c8SRemy Horton 				/* Periodic bitrate calculation */
22774918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
2278e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
22794918a357SXiaoyun Li 						ports_ids[i]);
22807e4441c8SRemy Horton 				tics_datum = tics_current;
22817e4441c8SRemy Horton 			}
2282e25e6c70SRemy Horton 		}
22837e4441c8SRemy Horton #endif
2284a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
228565eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
228665eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
228762d3216dSReshma Pattan 			rte_latencystats_update();
228862d3216dSReshma Pattan #endif
2289*99a4974aSRobin Jarry 		if (record_core_cycles) {
2290*99a4974aSRobin Jarry 			uint64_t tsc = rte_rdtsc();
229162d3216dSReshma Pattan 
2292*99a4974aSRobin Jarry 			fc->total_cycles += tsc - prev_tsc;
2293*99a4974aSRobin Jarry 			prev_tsc = tsc;
2294*99a4974aSRobin Jarry 		}
2295af75078fSIntel 	} while (! fc->stopped);
2296af75078fSIntel }
2297af75078fSIntel 
2298af75078fSIntel static int
2299*99a4974aSRobin Jarry lcore_usage_callback(unsigned int lcore_id, struct rte_lcore_usage *usage)
2300*99a4974aSRobin Jarry {
2301*99a4974aSRobin Jarry 	struct fwd_stream **fsm;
2302*99a4974aSRobin Jarry 	struct fwd_lcore *fc;
2303*99a4974aSRobin Jarry 	streamid_t nb_fs;
2304*99a4974aSRobin Jarry 	streamid_t sm_id;
2305*99a4974aSRobin Jarry 
2306*99a4974aSRobin Jarry 	fc = lcore_to_fwd_lcore(lcore_id);
2307*99a4974aSRobin Jarry 	if (fc == NULL)
2308*99a4974aSRobin Jarry 		return -1;
2309*99a4974aSRobin Jarry 
2310*99a4974aSRobin Jarry 	fsm = &fwd_streams[fc->stream_idx];
2311*99a4974aSRobin Jarry 	nb_fs = fc->stream_nb;
2312*99a4974aSRobin Jarry 	usage->busy_cycles = 0;
2313*99a4974aSRobin Jarry 	usage->total_cycles = fc->total_cycles;
2314*99a4974aSRobin Jarry 
2315*99a4974aSRobin Jarry 	for (sm_id = 0; sm_id < nb_fs; sm_id++) {
2316*99a4974aSRobin Jarry 		if (!fsm[sm_id]->disabled)
2317*99a4974aSRobin Jarry 			usage->busy_cycles += fsm[sm_id]->busy_cycles;
2318*99a4974aSRobin Jarry 	}
2319*99a4974aSRobin Jarry 
2320*99a4974aSRobin Jarry 	return 0;
2321*99a4974aSRobin Jarry }
2322*99a4974aSRobin Jarry 
2323*99a4974aSRobin Jarry static int
2324af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
2325af75078fSIntel {
2326af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2327af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
2328af75078fSIntel 	return 0;
2329af75078fSIntel }
2330af75078fSIntel 
2331af75078fSIntel /*
2332af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
2333af75078fSIntel  * Used to start communication flows in network loopback test configurations.
2334af75078fSIntel  */
2335af75078fSIntel static int
2336af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
2337af75078fSIntel {
2338af75078fSIntel 	struct fwd_lcore *fwd_lc;
2339af75078fSIntel 	struct fwd_lcore tmp_lcore;
2340af75078fSIntel 
2341af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
2342af75078fSIntel 	tmp_lcore = *fwd_lc;
2343af75078fSIntel 	tmp_lcore.stopped = 1;
2344af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2345af75078fSIntel 	return 0;
2346af75078fSIntel }
2347af75078fSIntel 
2348af75078fSIntel /*
2349af75078fSIntel  * Launch packet forwarding:
2350af75078fSIntel  *     - Setup per-port forwarding context.
2351af75078fSIntel  *     - launch logical cores with their forwarding configuration.
2352af75078fSIntel  */
2353af75078fSIntel static void
2354af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2355af75078fSIntel {
2356af75078fSIntel 	unsigned int i;
2357af75078fSIntel 	unsigned int lc_id;
2358af75078fSIntel 	int diag;
2359af75078fSIntel 
2360af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2361af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
2362af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2363af75078fSIntel 			fwd_lcores[i]->stopped = 0;
2364af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2365af75078fSIntel 						     fwd_lcores[i], lc_id);
2366af75078fSIntel 			if (diag != 0)
236761a3b0e5SAndrew Rybchenko 				fprintf(stderr,
236861a3b0e5SAndrew Rybchenko 					"launch lcore %u failed - diag=%d\n",
2369af75078fSIntel 					lc_id, diag);
2370af75078fSIntel 		}
2371af75078fSIntel 	}
2372af75078fSIntel }
2373af75078fSIntel 
2374af75078fSIntel /*
2375af75078fSIntel  * Launch packet forwarding configuration.
2376af75078fSIntel  */
2377af75078fSIntel void
2378af75078fSIntel start_packet_forwarding(int with_tx_first)
2379af75078fSIntel {
2380af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2381af75078fSIntel 	port_fwd_end_t  port_fwd_end;
23823c4426dbSDmitry Kozlyuk 	stream_init_t stream_init = cur_fwd_eng->stream_init;
2383af75078fSIntel 	unsigned int i;
2384af75078fSIntel 
23855a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
23865a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
23875a8fb55cSReshma Pattan 
23885a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
23895a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
23905a8fb55cSReshma Pattan 
23915a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
23925a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
23935a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
23945a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
23955a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
23965a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
23975a8fb55cSReshma Pattan 
2398ce8d5614SIntel 	if (all_ports_started() == 0) {
239961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Not all ports were started\n");
2400ce8d5614SIntel 		return;
2401ce8d5614SIntel 	}
2402af75078fSIntel 	if (test_done == 0) {
240361a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding already started\n");
2404af75078fSIntel 		return;
2405af75078fSIntel 	}
24067741e4cfSIntel 
240747a767b2SMatan Azrad 	fwd_config_setup();
240847a767b2SMatan Azrad 
240965744833SXueming Li 	pkt_fwd_config_display(&cur_fwd_config);
241065744833SXueming Li 	if (!pkt_fwd_shared_rxq_check())
241165744833SXueming Li 		return;
241265744833SXueming Li 
24133c4426dbSDmitry Kozlyuk 	if (stream_init != NULL)
24143c4426dbSDmitry Kozlyuk 		for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++)
24153c4426dbSDmitry Kozlyuk 			stream_init(fwd_streams[i]);
24163c4426dbSDmitry Kozlyuk 
2417a78040c9SAlvin Zhang 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2418a78040c9SAlvin Zhang 	if (port_fwd_begin != NULL) {
2419a78040c9SAlvin Zhang 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2420a78040c9SAlvin Zhang 			if (port_fwd_begin(fwd_ports_ids[i])) {
2421a78040c9SAlvin Zhang 				fprintf(stderr,
2422a78040c9SAlvin Zhang 					"Packet forwarding is not ready\n");
2423a78040c9SAlvin Zhang 				return;
2424a78040c9SAlvin Zhang 			}
2425a78040c9SAlvin Zhang 		}
2426a78040c9SAlvin Zhang 	}
2427a78040c9SAlvin Zhang 
2428a78040c9SAlvin Zhang 	if (with_tx_first) {
2429a78040c9SAlvin Zhang 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2430a78040c9SAlvin Zhang 		if (port_fwd_begin != NULL) {
2431a78040c9SAlvin Zhang 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2432a78040c9SAlvin Zhang 				if (port_fwd_begin(fwd_ports_ids[i])) {
2433a78040c9SAlvin Zhang 					fprintf(stderr,
2434a78040c9SAlvin Zhang 						"Packet forwarding is not ready\n");
2435a78040c9SAlvin Zhang 					return;
2436a78040c9SAlvin Zhang 				}
2437a78040c9SAlvin Zhang 			}
2438a78040c9SAlvin Zhang 		}
2439a78040c9SAlvin Zhang 	}
2440a78040c9SAlvin Zhang 
2441a78040c9SAlvin Zhang 	test_done = 0;
2442a78040c9SAlvin Zhang 
24437741e4cfSIntel 	if(!no_flush_rx)
24447741e4cfSIntel 		flush_fwd_rx_queues();
24457741e4cfSIntel 
2446af75078fSIntel 	rxtx_config_display();
2447af75078fSIntel 
244853324971SDavid Marchand 	fwd_stats_reset();
2449af75078fSIntel 	if (with_tx_first) {
2450acbf77a6SZhihong Wang 		while (with_tx_first--) {
2451acbf77a6SZhihong Wang 			launch_packet_forwarding(
2452acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
2453af75078fSIntel 			rte_eal_mp_wait_lcore();
2454acbf77a6SZhihong Wang 		}
2455af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
2456af75078fSIntel 		if (port_fwd_end != NULL) {
2457af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2458af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
2459af75078fSIntel 		}
2460af75078fSIntel 	}
2461af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
2462af75078fSIntel }
2463af75078fSIntel 
2464af75078fSIntel void
2465af75078fSIntel stop_packet_forwarding(void)
2466af75078fSIntel {
2467af75078fSIntel 	port_fwd_end_t port_fwd_end;
2468af75078fSIntel 	lcoreid_t lc_id;
246953324971SDavid Marchand 	portid_t pt_id;
247053324971SDavid Marchand 	int i;
2471af75078fSIntel 
2472af75078fSIntel 	if (test_done) {
247361a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding not started\n");
2474af75078fSIntel 		return;
2475af75078fSIntel 	}
2476af75078fSIntel 	printf("Telling cores to stop...");
2477af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2478af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
2479af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
2480af75078fSIntel 	rte_eal_mp_wait_lcore();
2481af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2482af75078fSIntel 	if (port_fwd_end != NULL) {
2483af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2484af75078fSIntel 			pt_id = fwd_ports_ids[i];
2485af75078fSIntel 			(*port_fwd_end)(pt_id);
2486af75078fSIntel 		}
2487af75078fSIntel 	}
2488c185d42cSDavid Marchand 
248953324971SDavid Marchand 	fwd_stats_display();
249058d475b7SJerin Jacob 
2491af75078fSIntel 	printf("\nDone.\n");
2492af75078fSIntel 	test_done = 1;
2493af75078fSIntel }
2494af75078fSIntel 
2495cfae07fdSOuyang Changchun void
2496cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
2497cfae07fdSOuyang Changchun {
2498492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
249961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link up fail.\n");
2500cfae07fdSOuyang Changchun }
2501cfae07fdSOuyang Changchun 
2502cfae07fdSOuyang Changchun void
2503cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2504cfae07fdSOuyang Changchun {
2505492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
250661a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link down fail.\n");
2507cfae07fdSOuyang Changchun }
2508cfae07fdSOuyang Changchun 
2509ce8d5614SIntel static int
2510ce8d5614SIntel all_ports_started(void)
2511ce8d5614SIntel {
2512ce8d5614SIntel 	portid_t pi;
2513ce8d5614SIntel 	struct rte_port *port;
2514ce8d5614SIntel 
25157d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2516ce8d5614SIntel 		port = &ports[pi];
2517ce8d5614SIntel 		/* Check if there is a port which is not started */
251841b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
251941b05095SBernard Iremonger 			(port->slave_flag == 0))
2520ce8d5614SIntel 			return 0;
2521ce8d5614SIntel 	}
2522ce8d5614SIntel 
2523ce8d5614SIntel 	/* No port is not started */
2524ce8d5614SIntel 	return 1;
2525ce8d5614SIntel }
2526ce8d5614SIntel 
2527148f963fSBruce Richardson int
25286018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
25296018eb8cSShahaf Shuler {
25306018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
25316018eb8cSShahaf Shuler 
25326018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
25336018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
25346018eb8cSShahaf Shuler 		return 0;
25356018eb8cSShahaf Shuler 	return 1;
25366018eb8cSShahaf Shuler }
25376018eb8cSShahaf Shuler 
25386018eb8cSShahaf Shuler int
2539edab33b1STetsuya Mukawa all_ports_stopped(void)
2540edab33b1STetsuya Mukawa {
2541edab33b1STetsuya Mukawa 	portid_t pi;
2542edab33b1STetsuya Mukawa 
25437d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
25446018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2545edab33b1STetsuya Mukawa 			return 0;
2546edab33b1STetsuya Mukawa 	}
2547edab33b1STetsuya Mukawa 
2548edab33b1STetsuya Mukawa 	return 1;
2549edab33b1STetsuya Mukawa }
2550edab33b1STetsuya Mukawa 
2551edab33b1STetsuya Mukawa int
2552edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2553edab33b1STetsuya Mukawa {
2554edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2555edab33b1STetsuya Mukawa 		return 0;
2556edab33b1STetsuya Mukawa 
2557edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2558edab33b1STetsuya Mukawa 		return 0;
2559edab33b1STetsuya Mukawa 
2560edab33b1STetsuya Mukawa 	return 1;
2561edab33b1STetsuya Mukawa }
2562edab33b1STetsuya Mukawa 
256323095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_FORCE_MEMORY RTE_BIT32(8)
256423095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_FORCE_MEMORY RTE_BIT32(9)
256523095155SDariusz Sosnowski 
256623095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_LOCKED_MEMORY RTE_BIT32(12)
256723095155SDariusz Sosnowski #define HAIRPIN_MODE_RX_RTE_MEMORY RTE_BIT32(13)
256823095155SDariusz Sosnowski 
256923095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_LOCKED_MEMORY RTE_BIT32(16)
257023095155SDariusz Sosnowski #define HAIRPIN_MODE_TX_RTE_MEMORY RTE_BIT32(17)
257123095155SDariusz Sosnowski 
257223095155SDariusz Sosnowski 
25731c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */
25741c69df45SOri Kam static int
257501817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
25761c69df45SOri Kam {
25771c69df45SOri Kam 	queueid_t qi;
25781c69df45SOri Kam 	struct rte_eth_hairpin_conf hairpin_conf = {
25791c69df45SOri Kam 		.peer_count = 1,
25801c69df45SOri Kam 	};
25811c69df45SOri Kam 	int i;
25821c69df45SOri Kam 	int diag;
25831c69df45SOri Kam 	struct rte_port *port = &ports[pi];
258401817b10SBing Zhao 	uint16_t peer_rx_port = pi;
258501817b10SBing Zhao 	uint16_t peer_tx_port = pi;
258601817b10SBing Zhao 	uint32_t manual = 1;
258701817b10SBing Zhao 	uint32_t tx_exp = hairpin_mode & 0x10;
258823095155SDariusz Sosnowski 	uint32_t rx_force_memory = hairpin_mode & HAIRPIN_MODE_RX_FORCE_MEMORY;
258923095155SDariusz Sosnowski 	uint32_t rx_locked_memory = hairpin_mode & HAIRPIN_MODE_RX_LOCKED_MEMORY;
259023095155SDariusz Sosnowski 	uint32_t rx_rte_memory = hairpin_mode & HAIRPIN_MODE_RX_RTE_MEMORY;
259123095155SDariusz Sosnowski 	uint32_t tx_force_memory = hairpin_mode & HAIRPIN_MODE_TX_FORCE_MEMORY;
259223095155SDariusz Sosnowski 	uint32_t tx_locked_memory = hairpin_mode & HAIRPIN_MODE_TX_LOCKED_MEMORY;
259323095155SDariusz Sosnowski 	uint32_t tx_rte_memory = hairpin_mode & HAIRPIN_MODE_TX_RTE_MEMORY;
259401817b10SBing Zhao 
259501817b10SBing Zhao 	if (!(hairpin_mode & 0xf)) {
259601817b10SBing Zhao 		peer_rx_port = pi;
259701817b10SBing Zhao 		peer_tx_port = pi;
259801817b10SBing Zhao 		manual = 0;
259901817b10SBing Zhao 	} else if (hairpin_mode & 0x1) {
260001817b10SBing Zhao 		peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
260101817b10SBing Zhao 						       RTE_ETH_DEV_NO_OWNER);
260201817b10SBing Zhao 		if (peer_tx_port >= RTE_MAX_ETHPORTS)
260301817b10SBing Zhao 			peer_tx_port = rte_eth_find_next_owned_by(0,
260401817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
260501817b10SBing Zhao 		if (p_pi != RTE_MAX_ETHPORTS) {
260601817b10SBing Zhao 			peer_rx_port = p_pi;
260701817b10SBing Zhao 		} else {
260801817b10SBing Zhao 			uint16_t next_pi;
260901817b10SBing Zhao 
261001817b10SBing Zhao 			/* Last port will be the peer RX port of the first. */
261101817b10SBing Zhao 			RTE_ETH_FOREACH_DEV(next_pi)
261201817b10SBing Zhao 				peer_rx_port = next_pi;
261301817b10SBing Zhao 		}
261401817b10SBing Zhao 		manual = 1;
261501817b10SBing Zhao 	} else if (hairpin_mode & 0x2) {
261601817b10SBing Zhao 		if (cnt_pi & 0x1) {
261701817b10SBing Zhao 			peer_rx_port = p_pi;
261801817b10SBing Zhao 		} else {
261901817b10SBing Zhao 			peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
262001817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
262101817b10SBing Zhao 			if (peer_rx_port >= RTE_MAX_ETHPORTS)
262201817b10SBing Zhao 				peer_rx_port = pi;
262301817b10SBing Zhao 		}
262401817b10SBing Zhao 		peer_tx_port = peer_rx_port;
262501817b10SBing Zhao 		manual = 1;
262601817b10SBing Zhao 	}
26271c69df45SOri Kam 
26281c69df45SOri Kam 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
262901817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_rx_port;
26301c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_rxq;
263101817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
263201817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
263323095155SDariusz Sosnowski 		hairpin_conf.force_memory = !!tx_force_memory;
263423095155SDariusz Sosnowski 		hairpin_conf.use_locked_device_memory = !!tx_locked_memory;
263523095155SDariusz Sosnowski 		hairpin_conf.use_rte_memory = !!tx_rte_memory;
26361c69df45SOri Kam 		diag = rte_eth_tx_hairpin_queue_setup
26371c69df45SOri Kam 			(pi, qi, nb_txd, &hairpin_conf);
26381c69df45SOri Kam 		i++;
26391c69df45SOri Kam 		if (diag == 0)
26401c69df45SOri Kam 			continue;
26411c69df45SOri Kam 
26421c69df45SOri Kam 		/* Fail to setup rx queue, return */
2643eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
2644eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
2645eac341d3SJoyce Kong 		else
264661a3b0e5SAndrew Rybchenko 			fprintf(stderr,
264761a3b0e5SAndrew Rybchenko 				"Port %d can not be set back to stopped\n", pi);
264861a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
264961a3b0e5SAndrew Rybchenko 			pi);
26501c69df45SOri Kam 		/* try to reconfigure queues next time */
26511c69df45SOri Kam 		port->need_reconfig_queues = 1;
26521c69df45SOri Kam 		return -1;
26531c69df45SOri Kam 	}
26541c69df45SOri Kam 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
265501817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_tx_port;
26561c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_txq;
265701817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
265801817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
265923095155SDariusz Sosnowski 		hairpin_conf.force_memory = !!rx_force_memory;
266023095155SDariusz Sosnowski 		hairpin_conf.use_locked_device_memory = !!rx_locked_memory;
266123095155SDariusz Sosnowski 		hairpin_conf.use_rte_memory = !!rx_rte_memory;
26621c69df45SOri Kam 		diag = rte_eth_rx_hairpin_queue_setup
26631c69df45SOri Kam 			(pi, qi, nb_rxd, &hairpin_conf);
26641c69df45SOri Kam 		i++;
26651c69df45SOri Kam 		if (diag == 0)
26661c69df45SOri Kam 			continue;
26671c69df45SOri Kam 
26681c69df45SOri Kam 		/* Fail to setup rx queue, return */
2669eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
2670eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
2671eac341d3SJoyce Kong 		else
267261a3b0e5SAndrew Rybchenko 			fprintf(stderr,
267361a3b0e5SAndrew Rybchenko 				"Port %d can not be set back to stopped\n", pi);
267461a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
267561a3b0e5SAndrew Rybchenko 			pi);
26761c69df45SOri Kam 		/* try to reconfigure queues next time */
26771c69df45SOri Kam 		port->need_reconfig_queues = 1;
26781c69df45SOri Kam 		return -1;
26791c69df45SOri Kam 	}
26801c69df45SOri Kam 	return 0;
26811c69df45SOri Kam }
26821c69df45SOri Kam 
26832befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */
26842befc67fSViacheslav Ovsiienko int
26852befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
26862befc67fSViacheslav Ovsiienko 	       uint16_t nb_rx_desc, unsigned int socket_id,
26872befc67fSViacheslav Ovsiienko 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
26882befc67fSViacheslav Ovsiienko {
26892befc67fSViacheslav Ovsiienko 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
26904f04edcdSHanumanth Pothula 	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
26914f04edcdSHanumanth Pothula 	struct rte_mempool *mpx;
26922befc67fSViacheslav Ovsiienko 	unsigned int i, mp_n;
269354a0f4d7SYuan Wang 	uint32_t prev_hdrs = 0;
26942befc67fSViacheslav Ovsiienko 	int ret;
26952befc67fSViacheslav Ovsiienko 
26964f04edcdSHanumanth Pothula 
2697a4bf5421SHanumanth Pothula 	if ((rx_pkt_nb_segs > 1) &&
2698a4bf5421SHanumanth Pothula 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
26994f04edcdSHanumanth Pothula 		/* multi-segment configuration */
27002befc67fSViacheslav Ovsiienko 		for (i = 0; i < rx_pkt_nb_segs; i++) {
27012befc67fSViacheslav Ovsiienko 			struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
27022befc67fSViacheslav Ovsiienko 			/*
27032befc67fSViacheslav Ovsiienko 			 * Use last valid pool for the segments with number
27042befc67fSViacheslav Ovsiienko 			 * exceeding the pool index.
27052befc67fSViacheslav Ovsiienko 			 */
27061108c33eSRaja Zidane 			mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
27072befc67fSViacheslav Ovsiienko 			mpx = mbuf_pool_find(socket_id, mp_n);
27082befc67fSViacheslav Ovsiienko 			/* Handle zero as mbuf data buffer size. */
27092befc67fSViacheslav Ovsiienko 			rx_seg->offset = i < rx_pkt_nb_offs ?
27102befc67fSViacheslav Ovsiienko 					   rx_pkt_seg_offsets[i] : 0;
27112befc67fSViacheslav Ovsiienko 			rx_seg->mp = mpx ? mpx : mp;
271252e2e7edSYuan Wang 			if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
271354a0f4d7SYuan Wang 				rx_seg->proto_hdr = rx_pkt_hdr_protos[i] & ~prev_hdrs;
271454a0f4d7SYuan Wang 				prev_hdrs |= rx_seg->proto_hdr;
271552e2e7edSYuan Wang 			} else {
271652e2e7edSYuan Wang 				rx_seg->length = rx_pkt_seg_lengths[i] ?
271752e2e7edSYuan Wang 						rx_pkt_seg_lengths[i] :
271852e2e7edSYuan Wang 						mbuf_data_size[mp_n];
271952e2e7edSYuan Wang 			}
27202befc67fSViacheslav Ovsiienko 		}
27212befc67fSViacheslav Ovsiienko 		rx_conf->rx_nseg = rx_pkt_nb_segs;
27222befc67fSViacheslav Ovsiienko 		rx_conf->rx_seg = rx_useg;
2723a4bf5421SHanumanth Pothula 		rx_conf->rx_mempools = NULL;
2724a4bf5421SHanumanth Pothula 		rx_conf->rx_nmempool = 0;
2725a4bf5421SHanumanth Pothula 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2726a4bf5421SHanumanth Pothula 				    socket_id, rx_conf, NULL);
2727a4bf5421SHanumanth Pothula 		rx_conf->rx_seg = NULL;
2728a4bf5421SHanumanth Pothula 		rx_conf->rx_nseg = 0;
2729a4bf5421SHanumanth Pothula 	} else if (multi_rx_mempool == 1) {
27304f04edcdSHanumanth Pothula 		/* multi-pool configuration */
2731a4bf5421SHanumanth Pothula 		struct rte_eth_dev_info dev_info;
2732a4bf5421SHanumanth Pothula 
2733a4bf5421SHanumanth Pothula 		if (mbuf_data_size_n <= 1) {
2734a4bf5421SHanumanth Pothula 			fprintf(stderr, "Invalid number of mempools %u\n",
2735a4bf5421SHanumanth Pothula 				mbuf_data_size_n);
2736a4bf5421SHanumanth Pothula 			return -EINVAL;
2737a4bf5421SHanumanth Pothula 		}
2738a4bf5421SHanumanth Pothula 		ret = rte_eth_dev_info_get(port_id, &dev_info);
2739a4bf5421SHanumanth Pothula 		if (ret != 0)
2740a4bf5421SHanumanth Pothula 			return ret;
2741a4bf5421SHanumanth Pothula 		if (dev_info.max_rx_mempools == 0) {
2742a4bf5421SHanumanth Pothula 			fprintf(stderr,
2743a4bf5421SHanumanth Pothula 				"Port %u doesn't support requested multi-rx-mempool configuration.\n",
2744a4bf5421SHanumanth Pothula 				port_id);
2745a4bf5421SHanumanth Pothula 			return -ENOTSUP;
2746a4bf5421SHanumanth Pothula 		}
27474f04edcdSHanumanth Pothula 		for (i = 0; i < mbuf_data_size_n; i++) {
27484f04edcdSHanumanth Pothula 			mpx = mbuf_pool_find(socket_id, i);
27494f04edcdSHanumanth Pothula 			rx_mempool[i] = mpx ? mpx : mp;
27504f04edcdSHanumanth Pothula 		}
27514f04edcdSHanumanth Pothula 		rx_conf->rx_mempools = rx_mempool;
27524f04edcdSHanumanth Pothula 		rx_conf->rx_nmempool = mbuf_data_size_n;
2753a4bf5421SHanumanth Pothula 		rx_conf->rx_seg = NULL;
2754a4bf5421SHanumanth Pothula 		rx_conf->rx_nseg = 0;
27552befc67fSViacheslav Ovsiienko 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
27562befc67fSViacheslav Ovsiienko 				    socket_id, rx_conf, NULL);
2757a4bf5421SHanumanth Pothula 		rx_conf->rx_mempools = NULL;
2758a4bf5421SHanumanth Pothula 		rx_conf->rx_nmempool = 0;
2759a4bf5421SHanumanth Pothula 	} else {
2760a4bf5421SHanumanth Pothula 		/* Single pool/segment configuration */
27612befc67fSViacheslav Ovsiienko 		rx_conf->rx_seg = NULL;
27622befc67fSViacheslav Ovsiienko 		rx_conf->rx_nseg = 0;
27634f04edcdSHanumanth Pothula 		rx_conf->rx_mempools = NULL;
27644f04edcdSHanumanth Pothula 		rx_conf->rx_nmempool = 0;
2765a4bf5421SHanumanth Pothula 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2766a4bf5421SHanumanth Pothula 				    socket_id, rx_conf, mp);
2767a4bf5421SHanumanth Pothula 	}
2768a4bf5421SHanumanth Pothula 
27693c4426dbSDmitry Kozlyuk 	ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
27703c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STOPPED :
27713c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STARTED;
27722befc67fSViacheslav Ovsiienko 	return ret;
27732befc67fSViacheslav Ovsiienko }
27742befc67fSViacheslav Ovsiienko 
277563b72657SIvan Ilchenko static int
277663b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi)
277763b72657SIvan Ilchenko {
277863b72657SIvan Ilchenko 	uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
277963b72657SIvan Ilchenko 	uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
278063b72657SIvan Ilchenko 	uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
278163b72657SIvan Ilchenko 
278263b72657SIvan Ilchenko 	if (xstats_display_num == 0)
278363b72657SIvan Ilchenko 		return 0;
278463b72657SIvan Ilchenko 
278563b72657SIvan Ilchenko 	*ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
278663b72657SIvan Ilchenko 	if (*ids_supp == NULL)
278763b72657SIvan Ilchenko 		goto fail_ids_supp;
278863b72657SIvan Ilchenko 
278963b72657SIvan Ilchenko 	*prev_values = calloc(xstats_display_num,
279063b72657SIvan Ilchenko 			      sizeof(**prev_values));
279163b72657SIvan Ilchenko 	if (*prev_values == NULL)
279263b72657SIvan Ilchenko 		goto fail_prev_values;
279363b72657SIvan Ilchenko 
279463b72657SIvan Ilchenko 	*curr_values = calloc(xstats_display_num,
279563b72657SIvan Ilchenko 			      sizeof(**curr_values));
279663b72657SIvan Ilchenko 	if (*curr_values == NULL)
279763b72657SIvan Ilchenko 		goto fail_curr_values;
279863b72657SIvan Ilchenko 
279963b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = true;
280063b72657SIvan Ilchenko 
280163b72657SIvan Ilchenko 	return 0;
280263b72657SIvan Ilchenko 
280363b72657SIvan Ilchenko fail_curr_values:
280463b72657SIvan Ilchenko 	free(*prev_values);
280563b72657SIvan Ilchenko fail_prev_values:
280663b72657SIvan Ilchenko 	free(*ids_supp);
280763b72657SIvan Ilchenko fail_ids_supp:
280863b72657SIvan Ilchenko 	return -ENOMEM;
280963b72657SIvan Ilchenko }
281063b72657SIvan Ilchenko 
281163b72657SIvan Ilchenko static void
281263b72657SIvan Ilchenko free_xstats_display_info(portid_t pi)
281363b72657SIvan Ilchenko {
281463b72657SIvan Ilchenko 	if (!ports[pi].xstats_info.allocated)
281563b72657SIvan Ilchenko 		return;
281663b72657SIvan Ilchenko 	free(ports[pi].xstats_info.ids_supp);
281763b72657SIvan Ilchenko 	free(ports[pi].xstats_info.prev_values);
281863b72657SIvan Ilchenko 	free(ports[pi].xstats_info.curr_values);
281963b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = false;
282063b72657SIvan Ilchenko }
282163b72657SIvan Ilchenko 
282263b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */
282363b72657SIvan Ilchenko static void
282463b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi)
282563b72657SIvan Ilchenko {
282663b72657SIvan Ilchenko 	unsigned int stat, stat_supp;
282763b72657SIvan Ilchenko 	const char *xstat_name;
282863b72657SIvan Ilchenko 	struct rte_port *port;
282963b72657SIvan Ilchenko 	uint64_t *ids_supp;
283063b72657SIvan Ilchenko 	int rc;
283163b72657SIvan Ilchenko 
283263b72657SIvan Ilchenko 	if (xstats_display_num == 0)
283363b72657SIvan Ilchenko 		return;
283463b72657SIvan Ilchenko 
283563b72657SIvan Ilchenko 	if (pi == (portid_t)RTE_PORT_ALL) {
283663b72657SIvan Ilchenko 		fill_xstats_display_info();
283763b72657SIvan Ilchenko 		return;
283863b72657SIvan Ilchenko 	}
283963b72657SIvan Ilchenko 
284063b72657SIvan Ilchenko 	port = &ports[pi];
284163b72657SIvan Ilchenko 	if (port->port_status != RTE_PORT_STARTED)
284263b72657SIvan Ilchenko 		return;
284363b72657SIvan Ilchenko 
284463b72657SIvan Ilchenko 	if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
284563b72657SIvan Ilchenko 		rte_exit(EXIT_FAILURE,
284663b72657SIvan Ilchenko 			 "Failed to allocate xstats display memory\n");
284763b72657SIvan Ilchenko 
284863b72657SIvan Ilchenko 	ids_supp = port->xstats_info.ids_supp;
284963b72657SIvan Ilchenko 	for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
285063b72657SIvan Ilchenko 		xstat_name = xstats_display[stat].name;
285163b72657SIvan Ilchenko 		rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
285263b72657SIvan Ilchenko 						   ids_supp + stat_supp);
285363b72657SIvan Ilchenko 		if (rc != 0) {
285463b72657SIvan Ilchenko 			fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
285563b72657SIvan Ilchenko 				xstat_name, pi, stat);
285663b72657SIvan Ilchenko 			continue;
285763b72657SIvan Ilchenko 		}
285863b72657SIvan Ilchenko 		stat_supp++;
285963b72657SIvan Ilchenko 	}
286063b72657SIvan Ilchenko 
286163b72657SIvan Ilchenko 	port->xstats_info.ids_supp_sz = stat_supp;
286263b72657SIvan Ilchenko }
286363b72657SIvan Ilchenko 
286463b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */
286563b72657SIvan Ilchenko static void
286663b72657SIvan Ilchenko fill_xstats_display_info(void)
286763b72657SIvan Ilchenko {
286863b72657SIvan Ilchenko 	portid_t pi;
286963b72657SIvan Ilchenko 
287063b72657SIvan Ilchenko 	if (xstats_display_num == 0)
287163b72657SIvan Ilchenko 		return;
287263b72657SIvan Ilchenko 
287363b72657SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(pi)
287463b72657SIvan Ilchenko 		fill_xstats_display_info_for_port(pi);
287563b72657SIvan Ilchenko }
287663b72657SIvan Ilchenko 
28777c06f1abSHuisong Li /*
28787c06f1abSHuisong Li  * Some capabilities (like, rx_offload_capa and tx_offload_capa) of bonding
28797c06f1abSHuisong Li  * device in dev_info is zero when no slave is added. And its capability
28807c06f1abSHuisong Li  * will be updated when add a new slave device. So adding a slave device need
28817c06f1abSHuisong Li  * to update the port configurations of bonding device.
28827c06f1abSHuisong Li  */
28837c06f1abSHuisong Li static void
28847c06f1abSHuisong Li update_bonding_port_dev_conf(portid_t bond_pid)
28857c06f1abSHuisong Li {
28867c06f1abSHuisong Li #ifdef RTE_NET_BOND
28877c06f1abSHuisong Li 	struct rte_port *port = &ports[bond_pid];
28887c06f1abSHuisong Li 	uint16_t i;
28897c06f1abSHuisong Li 	int ret;
28907c06f1abSHuisong Li 
28917c06f1abSHuisong Li 	ret = eth_dev_info_get_print_err(bond_pid, &port->dev_info);
28927c06f1abSHuisong Li 	if (ret != 0) {
28937c06f1abSHuisong Li 		fprintf(stderr, "Failed to get dev info for port = %u\n",
28947c06f1abSHuisong Li 			bond_pid);
28957c06f1abSHuisong Li 		return;
28967c06f1abSHuisong Li 	}
28977c06f1abSHuisong Li 
28987c06f1abSHuisong Li 	if (port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
28997c06f1abSHuisong Li 		port->dev_conf.txmode.offloads |=
29007c06f1abSHuisong Li 				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
29017c06f1abSHuisong Li 	/* Apply Tx offloads configuration */
29027c06f1abSHuisong Li 	for (i = 0; i < port->dev_info.max_tx_queues; i++)
29037c06f1abSHuisong Li 		port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
29047c06f1abSHuisong Li 
29057c06f1abSHuisong Li 	port->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
29067c06f1abSHuisong Li 				port->dev_info.flow_type_rss_offloads;
29077c06f1abSHuisong Li #else
29087c06f1abSHuisong Li 	RTE_SET_USED(bond_pid);
29097c06f1abSHuisong Li #endif
29107c06f1abSHuisong Li }
29117c06f1abSHuisong Li 
2912edab33b1STetsuya Mukawa int
2913ce8d5614SIntel start_port(portid_t pid)
2914ce8d5614SIntel {
291592d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
2916ce8d5614SIntel 	portid_t pi;
291701817b10SBing Zhao 	portid_t p_pi = RTE_MAX_ETHPORTS;
291801817b10SBing Zhao 	portid_t pl[RTE_MAX_ETHPORTS];
291901817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
292001817b10SBing Zhao 	uint16_t cnt_pi = 0;
292101817b10SBing Zhao 	uint16_t cfg_pi = 0;
292201817b10SBing Zhao 	int peer_pi;
2923ce8d5614SIntel 	queueid_t qi;
2924ce8d5614SIntel 	struct rte_port *port;
29251c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
2926ce8d5614SIntel 
29274468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
29284468635fSMichael Qiu 		return 0;
29294468635fSMichael Qiu 
29307d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2931edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2932ce8d5614SIntel 			continue;
2933ce8d5614SIntel 
2934d8c079a5SMin Hu (Connor) 		if (port_is_bonding_slave(pi)) {
2935d8c079a5SMin Hu (Connor) 			fprintf(stderr,
2936d8c079a5SMin Hu (Connor) 				"Please remove port %d from bonded device.\n",
2937d8c079a5SMin Hu (Connor) 				pi);
2938d8c079a5SMin Hu (Connor) 			continue;
2939d8c079a5SMin Hu (Connor) 		}
2940d8c079a5SMin Hu (Connor) 
294192d2703eSMichael Qiu 		need_check_link_status = 0;
2942ce8d5614SIntel 		port = &ports[pi];
2943eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_STOPPED)
2944eac341d3SJoyce Kong 			port->port_status = RTE_PORT_HANDLING;
2945eac341d3SJoyce Kong 		else {
294661a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is now not stopped\n", pi);
2947ce8d5614SIntel 			continue;
2948ce8d5614SIntel 		}
2949ce8d5614SIntel 
2950ce8d5614SIntel 		if (port->need_reconfig > 0) {
2951655eae01SJie Wang 			struct rte_eth_conf dev_conf;
2952655eae01SJie Wang 			int k;
2953655eae01SJie Wang 
2954ce8d5614SIntel 			port->need_reconfig = 0;
2955ce8d5614SIntel 
29567ee3e944SVasily Philipov 			if (flow_isolate_all) {
29577ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
29587ee3e944SVasily Philipov 				if (ret) {
295961a3b0e5SAndrew Rybchenko 					fprintf(stderr,
296061a3b0e5SAndrew Rybchenko 						"Failed to apply isolated mode on port %d\n",
296161a3b0e5SAndrew Rybchenko 						pi);
29627ee3e944SVasily Philipov 					return -1;
29637ee3e944SVasily Philipov 				}
29647ee3e944SVasily Philipov 			}
2965b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
29665706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
296720a0286fSLiu Xiaofeng 					port->socket_id);
29681c69df45SOri Kam 			if (nb_hairpinq > 0 &&
29691c69df45SOri Kam 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
297061a3b0e5SAndrew Rybchenko 				fprintf(stderr,
297161a3b0e5SAndrew Rybchenko 					"Port %d doesn't support hairpin queues\n",
297261a3b0e5SAndrew Rybchenko 					pi);
29731c69df45SOri Kam 				return -1;
29741c69df45SOri Kam 			}
29751bb4a528SFerruh Yigit 
29767c06f1abSHuisong Li 			if (port->bond_flag == 1 && port->update_conf == 1) {
29777c06f1abSHuisong Li 				update_bonding_port_dev_conf(pi);
29787c06f1abSHuisong Li 				port->update_conf = 0;
29797c06f1abSHuisong Li 			}
29807c06f1abSHuisong Li 
2981ce8d5614SIntel 			/* configure port */
2982a550baf2SMin Hu (Connor) 			diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
29831c69df45SOri Kam 						     nb_txq + nb_hairpinq,
2984ce8d5614SIntel 						     &(port->dev_conf));
2985ce8d5614SIntel 			if (diag != 0) {
2986eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
2987eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
2988eac341d3SJoyce Kong 				else
298961a3b0e5SAndrew Rybchenko 					fprintf(stderr,
299061a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
299161a3b0e5SAndrew Rybchenko 						pi);
299261a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Fail to configure port %d\n",
299361a3b0e5SAndrew Rybchenko 					pi);
2994ce8d5614SIntel 				/* try to reconfigure port next time */
2995ce8d5614SIntel 				port->need_reconfig = 1;
2996148f963fSBruce Richardson 				return -1;
2997ce8d5614SIntel 			}
2998655eae01SJie Wang 			/* get device configuration*/
2999655eae01SJie Wang 			if (0 !=
3000655eae01SJie Wang 				eth_dev_conf_get_print_err(pi, &dev_conf)) {
3001655eae01SJie Wang 				fprintf(stderr,
3002655eae01SJie Wang 					"port %d can not get device configuration\n",
3003655eae01SJie Wang 					pi);
3004655eae01SJie Wang 				return -1;
3005655eae01SJie Wang 			}
3006655eae01SJie Wang 			/* Apply Rx offloads configuration */
3007655eae01SJie Wang 			if (dev_conf.rxmode.offloads !=
3008655eae01SJie Wang 			    port->dev_conf.rxmode.offloads) {
3009655eae01SJie Wang 				port->dev_conf.rxmode.offloads |=
3010655eae01SJie Wang 					dev_conf.rxmode.offloads;
3011655eae01SJie Wang 				for (k = 0;
3012655eae01SJie Wang 				     k < port->dev_info.max_rx_queues;
3013655eae01SJie Wang 				     k++)
30143c4426dbSDmitry Kozlyuk 					port->rxq[k].conf.offloads |=
3015655eae01SJie Wang 						dev_conf.rxmode.offloads;
3016655eae01SJie Wang 			}
3017655eae01SJie Wang 			/* Apply Tx offloads configuration */
3018655eae01SJie Wang 			if (dev_conf.txmode.offloads !=
3019655eae01SJie Wang 			    port->dev_conf.txmode.offloads) {
3020655eae01SJie Wang 				port->dev_conf.txmode.offloads |=
3021655eae01SJie Wang 					dev_conf.txmode.offloads;
3022655eae01SJie Wang 				for (k = 0;
3023655eae01SJie Wang 				     k < port->dev_info.max_tx_queues;
3024655eae01SJie Wang 				     k++)
30253c4426dbSDmitry Kozlyuk 					port->txq[k].conf.offloads |=
3026655eae01SJie Wang 						dev_conf.txmode.offloads;
3027655eae01SJie Wang 			}
3028ce8d5614SIntel 		}
3029a550baf2SMin Hu (Connor) 		if (port->need_reconfig_queues > 0 && is_proc_primary()) {
3030ce8d5614SIntel 			port->need_reconfig_queues = 0;
3031ce8d5614SIntel 			/* setup tx queues */
3032ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
30333c4426dbSDmitry Kozlyuk 				struct rte_eth_txconf *conf =
30343c4426dbSDmitry Kozlyuk 							&port->txq[qi].conf;
30353c4426dbSDmitry Kozlyuk 
3036b6ea6408SIntel 				if ((numa_support) &&
3037b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
3038b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
3039d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
3040d44f8a48SQi Zhang 						txring_numa[pi],
30413c4426dbSDmitry Kozlyuk 						&(port->txq[qi].conf));
3042b6ea6408SIntel 				else
3043b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
3044d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
3045d44f8a48SQi Zhang 						port->socket_id,
30463c4426dbSDmitry Kozlyuk 						&(port->txq[qi].conf));
3047b6ea6408SIntel 
30483c4426dbSDmitry Kozlyuk 				if (diag == 0) {
30493c4426dbSDmitry Kozlyuk 					port->txq[qi].state =
30503c4426dbSDmitry Kozlyuk 						conf->tx_deferred_start ?
30513c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STOPPED :
30523c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STARTED;
3053ce8d5614SIntel 					continue;
30543c4426dbSDmitry Kozlyuk 				}
3055ce8d5614SIntel 
3056ce8d5614SIntel 				/* Fail to setup tx queue, return */
3057eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
3058eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
3059eac341d3SJoyce Kong 				else
306061a3b0e5SAndrew Rybchenko 					fprintf(stderr,
306161a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
306261a3b0e5SAndrew Rybchenko 						pi);
306361a3b0e5SAndrew Rybchenko 				fprintf(stderr,
306461a3b0e5SAndrew Rybchenko 					"Fail to configure port %d tx queues\n",
3065d44f8a48SQi Zhang 					pi);
3066ce8d5614SIntel 				/* try to reconfigure queues next time */
3067ce8d5614SIntel 				port->need_reconfig_queues = 1;
3068148f963fSBruce Richardson 				return -1;
3069ce8d5614SIntel 			}
3070ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
3071d44f8a48SQi Zhang 				/* setup rx queues */
3072b6ea6408SIntel 				if ((numa_support) &&
3073b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
3074b6ea6408SIntel 					struct rte_mempool * mp =
307526cbb419SViacheslav Ovsiienko 						mbuf_pool_find
307626cbb419SViacheslav Ovsiienko 							(rxring_numa[pi], 0);
3077b6ea6408SIntel 					if (mp == NULL) {
307861a3b0e5SAndrew Rybchenko 						fprintf(stderr,
307961a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
3080b6ea6408SIntel 							rxring_numa[pi]);
3081148f963fSBruce Richardson 						return -1;
3082b6ea6408SIntel 					}
3083b6ea6408SIntel 
30842befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
3085d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
3086d44f8a48SQi Zhang 					     rxring_numa[pi],
30873c4426dbSDmitry Kozlyuk 					     &(port->rxq[qi].conf),
3088d44f8a48SQi Zhang 					     mp);
30891e1d6bddSBernard Iremonger 				} else {
30901e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
309126cbb419SViacheslav Ovsiienko 						mbuf_pool_find
309226cbb419SViacheslav Ovsiienko 							(port->socket_id, 0);
30931e1d6bddSBernard Iremonger 					if (mp == NULL) {
309461a3b0e5SAndrew Rybchenko 						fprintf(stderr,
309561a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
30961e1d6bddSBernard Iremonger 							port->socket_id);
30971e1d6bddSBernard Iremonger 						return -1;
3098b6ea6408SIntel 					}
30992befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
3100d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
3101d44f8a48SQi Zhang 					     port->socket_id,
31023c4426dbSDmitry Kozlyuk 					     &(port->rxq[qi].conf),
3103d44f8a48SQi Zhang 					     mp);
31041e1d6bddSBernard Iremonger 				}
3105ce8d5614SIntel 				if (diag == 0)
3106ce8d5614SIntel 					continue;
3107ce8d5614SIntel 
3108ce8d5614SIntel 				/* Fail to setup rx queue, return */
3109eac341d3SJoyce Kong 				if (port->port_status == RTE_PORT_HANDLING)
3110eac341d3SJoyce Kong 					port->port_status = RTE_PORT_STOPPED;
3111eac341d3SJoyce Kong 				else
311261a3b0e5SAndrew Rybchenko 					fprintf(stderr,
311361a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
311461a3b0e5SAndrew Rybchenko 						pi);
311561a3b0e5SAndrew Rybchenko 				fprintf(stderr,
311661a3b0e5SAndrew Rybchenko 					"Fail to configure port %d rx queues\n",
3117d44f8a48SQi Zhang 					pi);
3118ce8d5614SIntel 				/* try to reconfigure queues next time */
3119ce8d5614SIntel 				port->need_reconfig_queues = 1;
3120148f963fSBruce Richardson 				return -1;
3121ce8d5614SIntel 			}
31221c69df45SOri Kam 			/* setup hairpin queues */
312301817b10SBing Zhao 			if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
31241c69df45SOri Kam 				return -1;
3125ce8d5614SIntel 		}
3126b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
3127b0a9354aSPavan Nikhilesh 		if (clear_ptypes) {
3128b0a9354aSPavan Nikhilesh 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
3129b0a9354aSPavan Nikhilesh 					NULL, 0);
3130b0a9354aSPavan Nikhilesh 			if (diag < 0)
313161a3b0e5SAndrew Rybchenko 				fprintf(stderr,
3132b0a9354aSPavan Nikhilesh 					"Port %d: Failed to disable Ptype parsing\n",
3133b0a9354aSPavan Nikhilesh 					pi);
3134b0a9354aSPavan Nikhilesh 		}
3135b0a9354aSPavan Nikhilesh 
313601817b10SBing Zhao 		p_pi = pi;
313701817b10SBing Zhao 		cnt_pi++;
313801817b10SBing Zhao 
3139ce8d5614SIntel 		/* start port */
3140a550baf2SMin Hu (Connor) 		diag = eth_dev_start_mp(pi);
314152f2c6f2SAndrew Rybchenko 		if (diag < 0) {
314261a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Fail to start port %d: %s\n",
314361a3b0e5SAndrew Rybchenko 				pi, rte_strerror(-diag));
3144ce8d5614SIntel 
3145ce8d5614SIntel 			/* Fail to setup rx queue, return */
3146eac341d3SJoyce Kong 			if (port->port_status == RTE_PORT_HANDLING)
3147eac341d3SJoyce Kong 				port->port_status = RTE_PORT_STOPPED;
3148eac341d3SJoyce Kong 			else
314961a3b0e5SAndrew Rybchenko 				fprintf(stderr,
315061a3b0e5SAndrew Rybchenko 					"Port %d can not be set back to stopped\n",
315161a3b0e5SAndrew Rybchenko 					pi);
3152ce8d5614SIntel 			continue;
3153ce8d5614SIntel 		}
3154ce8d5614SIntel 
3155eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
3156eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STARTED;
3157eac341d3SJoyce Kong 		else
315861a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into started\n",
315961a3b0e5SAndrew Rybchenko 				pi);
3160ce8d5614SIntel 
31615ffc4a2aSYuying Zhang 		if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
3162c2c4f87bSAman Deep Singh 			printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
3163a7db3afcSAman Deep Singh 					RTE_ETHER_ADDR_BYTES(&port->eth_addr));
3164d8c89163SZijie Pan 
3165ce8d5614SIntel 		/* at least one port started, need checking link status */
3166ce8d5614SIntel 		need_check_link_status = 1;
316701817b10SBing Zhao 
316801817b10SBing Zhao 		pl[cfg_pi++] = pi;
3169ce8d5614SIntel 	}
3170ce8d5614SIntel 
317192d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
3172edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
317392d2703eSMichael Qiu 	else if (need_check_link_status == 0)
317461a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Please stop the ports first\n");
3175ce8d5614SIntel 
317601817b10SBing Zhao 	if (hairpin_mode & 0xf) {
317701817b10SBing Zhao 		uint16_t i;
317801817b10SBing Zhao 		int j;
317901817b10SBing Zhao 
318001817b10SBing Zhao 		/* bind all started hairpin ports */
318101817b10SBing Zhao 		for (i = 0; i < cfg_pi; i++) {
318201817b10SBing Zhao 			pi = pl[i];
318301817b10SBing Zhao 			/* bind current Tx to all peer Rx */
318401817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
318501817b10SBing Zhao 							RTE_MAX_ETHPORTS, 1);
318601817b10SBing Zhao 			if (peer_pi < 0)
318701817b10SBing Zhao 				return peer_pi;
318801817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
318901817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
319001817b10SBing Zhao 					continue;
319101817b10SBing Zhao 				diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
319201817b10SBing Zhao 				if (diag < 0) {
319361a3b0e5SAndrew Rybchenko 					fprintf(stderr,
319461a3b0e5SAndrew Rybchenko 						"Error during binding hairpin Tx port %u to %u: %s\n",
319501817b10SBing Zhao 						pi, peer_pl[j],
319601817b10SBing Zhao 						rte_strerror(-diag));
319701817b10SBing Zhao 					return -1;
319801817b10SBing Zhao 				}
319901817b10SBing Zhao 			}
320001817b10SBing Zhao 			/* bind all peer Tx to current Rx */
320101817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
320201817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
320301817b10SBing Zhao 			if (peer_pi < 0)
320401817b10SBing Zhao 				return peer_pi;
320501817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
320601817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
320701817b10SBing Zhao 					continue;
320801817b10SBing Zhao 				diag = rte_eth_hairpin_bind(peer_pl[j], pi);
320901817b10SBing Zhao 				if (diag < 0) {
321061a3b0e5SAndrew Rybchenko 					fprintf(stderr,
321161a3b0e5SAndrew Rybchenko 						"Error during binding hairpin Tx port %u to %u: %s\n",
321201817b10SBing Zhao 						peer_pl[j], pi,
321301817b10SBing Zhao 						rte_strerror(-diag));
321401817b10SBing Zhao 					return -1;
321501817b10SBing Zhao 				}
321601817b10SBing Zhao 			}
321701817b10SBing Zhao 		}
321801817b10SBing Zhao 	}
321901817b10SBing Zhao 
322063b72657SIvan Ilchenko 	fill_xstats_display_info_for_port(pid);
322163b72657SIvan Ilchenko 
3222ce8d5614SIntel 	printf("Done\n");
3223148f963fSBruce Richardson 	return 0;
3224ce8d5614SIntel }
3225ce8d5614SIntel 
3226ce8d5614SIntel void
3227ce8d5614SIntel stop_port(portid_t pid)
3228ce8d5614SIntel {
3229ce8d5614SIntel 	portid_t pi;
3230ce8d5614SIntel 	struct rte_port *port;
3231ce8d5614SIntel 	int need_check_link_status = 0;
323201817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
323301817b10SBing Zhao 	int peer_pi;
323447a4e1fbSDariusz Sosnowski 	int ret;
3235ce8d5614SIntel 
32364468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
32374468635fSMichael Qiu 		return;
32384468635fSMichael Qiu 
3239ce8d5614SIntel 	printf("Stopping ports...\n");
3240ce8d5614SIntel 
32417d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
32424468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3243ce8d5614SIntel 			continue;
3244ce8d5614SIntel 
3245a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
324661a3b0e5SAndrew Rybchenko 			fprintf(stderr,
324761a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
324861a3b0e5SAndrew Rybchenko 				pi);
3249a8ef3e3aSBernard Iremonger 			continue;
3250a8ef3e3aSBernard Iremonger 		}
3251a8ef3e3aSBernard Iremonger 
32520e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
325361a3b0e5SAndrew Rybchenko 			fprintf(stderr,
325461a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
325561a3b0e5SAndrew Rybchenko 				pi);
32560e545d30SBernard Iremonger 			continue;
32570e545d30SBernard Iremonger 		}
32580e545d30SBernard Iremonger 
3259ce8d5614SIntel 		port = &ports[pi];
3260eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_STARTED)
3261eac341d3SJoyce Kong 			port->port_status = RTE_PORT_HANDLING;
3262eac341d3SJoyce Kong 		else
3263ce8d5614SIntel 			continue;
3264ce8d5614SIntel 
326501817b10SBing Zhao 		if (hairpin_mode & 0xf) {
326601817b10SBing Zhao 			int j;
326701817b10SBing Zhao 
326801817b10SBing Zhao 			rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
326901817b10SBing Zhao 			/* unbind all peer Tx from current Rx */
327001817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
327101817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
327201817b10SBing Zhao 			if (peer_pi < 0)
327301817b10SBing Zhao 				continue;
327401817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
327501817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
327601817b10SBing Zhao 					continue;
327701817b10SBing Zhao 				rte_eth_hairpin_unbind(peer_pl[j], pi);
327801817b10SBing Zhao 			}
327901817b10SBing Zhao 		}
328001817b10SBing Zhao 
32810f93edbfSGregory Etelson 		if (port->flow_list)
32820f93edbfSGregory Etelson 			port_flow_flush(pi);
32830f93edbfSGregory Etelson 
328447a4e1fbSDariusz Sosnowski 		ret = eth_dev_stop_mp(pi);
328547a4e1fbSDariusz Sosnowski 		if (ret != 0) {
3286e62c5a12SIvan Ilchenko 			RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3287e62c5a12SIvan Ilchenko 				pi);
328847a4e1fbSDariusz Sosnowski 			/* Allow to retry stopping the port. */
328947a4e1fbSDariusz Sosnowski 			port->port_status = RTE_PORT_STARTED;
329047a4e1fbSDariusz Sosnowski 			continue;
329147a4e1fbSDariusz Sosnowski 		}
3292ce8d5614SIntel 
3293eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_HANDLING)
3294eac341d3SJoyce Kong 			port->port_status = RTE_PORT_STOPPED;
3295eac341d3SJoyce Kong 		else
329661a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into stopped\n",
329761a3b0e5SAndrew Rybchenko 				pi);
3298ce8d5614SIntel 		need_check_link_status = 1;
3299ce8d5614SIntel 	}
3300bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
3301edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
3302ce8d5614SIntel 
3303ce8d5614SIntel 	printf("Done\n");
3304ce8d5614SIntel }
3305ce8d5614SIntel 
3306ce6959bfSWisam Jaddo static void
33074f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
3308ce6959bfSWisam Jaddo {
33094f1de450SThomas Monjalon 	portid_t i;
33104f1de450SThomas Monjalon 	portid_t new_total = 0;
3311ce6959bfSWisam Jaddo 
33124f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
33134f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
33144f1de450SThomas Monjalon 			array[new_total] = array[i];
33154f1de450SThomas Monjalon 			new_total++;
3316ce6959bfSWisam Jaddo 		}
33174f1de450SThomas Monjalon 	*total = new_total;
33184f1de450SThomas Monjalon }
33194f1de450SThomas Monjalon 
33204f1de450SThomas Monjalon static void
33214f1de450SThomas Monjalon remove_invalid_ports(void)
33224f1de450SThomas Monjalon {
33234f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
33244f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
33254f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
3326ce6959bfSWisam Jaddo }
3327ce6959bfSWisam Jaddo 
33283889a322SHuisong Li static void
33294b27989dSDmitry Kozlyuk flush_port_owned_resources(portid_t pi)
33304b27989dSDmitry Kozlyuk {
33314b27989dSDmitry Kozlyuk 	mcast_addr_pool_destroy(pi);
33324b27989dSDmitry Kozlyuk 	port_flow_flush(pi);
33334b27989dSDmitry Kozlyuk 	port_flex_item_flush(pi);
33346d736e05SSuanming Mou 	port_flow_template_table_flush(pi);
33356d736e05SSuanming Mou 	port_flow_pattern_template_flush(pi);
33366d736e05SSuanming Mou 	port_flow_actions_template_flush(pi);
33374b27989dSDmitry Kozlyuk 	port_action_handle_flush(pi);
33384b27989dSDmitry Kozlyuk }
33394b27989dSDmitry Kozlyuk 
33404b27989dSDmitry Kozlyuk static void
33413889a322SHuisong Li clear_bonding_slave_device(portid_t *slave_pids, uint16_t num_slaves)
33423889a322SHuisong Li {
33433889a322SHuisong Li 	struct rte_port *port;
33443889a322SHuisong Li 	portid_t slave_pid;
33453889a322SHuisong Li 	uint16_t i;
33463889a322SHuisong Li 
33473889a322SHuisong Li 	for (i = 0; i < num_slaves; i++) {
33483889a322SHuisong Li 		slave_pid = slave_pids[i];
33493889a322SHuisong Li 		if (port_is_started(slave_pid) == 1) {
33503889a322SHuisong Li 			if (rte_eth_dev_stop(slave_pid) != 0)
33513889a322SHuisong Li 				fprintf(stderr, "rte_eth_dev_stop failed for port %u\n",
33523889a322SHuisong Li 					slave_pid);
33533889a322SHuisong Li 
33543889a322SHuisong Li 			port = &ports[slave_pid];
33553889a322SHuisong Li 			port->port_status = RTE_PORT_STOPPED;
33563889a322SHuisong Li 		}
33573889a322SHuisong Li 
33583889a322SHuisong Li 		clear_port_slave_flag(slave_pid);
33593889a322SHuisong Li 
33603889a322SHuisong Li 		/* Close slave device when testpmd quit or is killed. */
33613889a322SHuisong Li 		if (cl_quit == 1 || f_quit == 1)
33623889a322SHuisong Li 			rte_eth_dev_close(slave_pid);
33633889a322SHuisong Li 	}
33643889a322SHuisong Li }
33653889a322SHuisong Li 
3366ce8d5614SIntel void
3367ce8d5614SIntel close_port(portid_t pid)
3368ce8d5614SIntel {
3369ce8d5614SIntel 	portid_t pi;
3370ce8d5614SIntel 	struct rte_port *port;
33713889a322SHuisong Li 	portid_t slave_pids[RTE_MAX_ETHPORTS];
33723889a322SHuisong Li 	int num_slaves = 0;
3373ce8d5614SIntel 
33744468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
33754468635fSMichael Qiu 		return;
33764468635fSMichael Qiu 
3377ce8d5614SIntel 	printf("Closing ports...\n");
3378ce8d5614SIntel 
33797d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
33804468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3381ce8d5614SIntel 			continue;
3382ce8d5614SIntel 
3383a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
338461a3b0e5SAndrew Rybchenko 			fprintf(stderr,
338561a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
338661a3b0e5SAndrew Rybchenko 				pi);
3387a8ef3e3aSBernard Iremonger 			continue;
3388a8ef3e3aSBernard Iremonger 		}
3389a8ef3e3aSBernard Iremonger 
33900e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
339161a3b0e5SAndrew Rybchenko 			fprintf(stderr,
339261a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
339361a3b0e5SAndrew Rybchenko 				pi);
33940e545d30SBernard Iremonger 			continue;
33950e545d30SBernard Iremonger 		}
33960e545d30SBernard Iremonger 
3397ce8d5614SIntel 		port = &ports[pi];
3398eac341d3SJoyce Kong 		if (port->port_status == RTE_PORT_CLOSED) {
339961a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is already closed\n", pi);
3400d4e8ad64SMichael Qiu 			continue;
3401d4e8ad64SMichael Qiu 		}
3402d4e8ad64SMichael Qiu 
3403a550baf2SMin Hu (Connor) 		if (is_proc_primary()) {
34044b27989dSDmitry Kozlyuk 			flush_port_owned_resources(pi);
34053889a322SHuisong Li #ifdef RTE_NET_BOND
34063889a322SHuisong Li 			if (port->bond_flag == 1)
34073889a322SHuisong Li 				num_slaves = rte_eth_bond_slaves_get(pi,
34083889a322SHuisong Li 						slave_pids, RTE_MAX_ETHPORTS);
34093889a322SHuisong Li #endif
3410ce8d5614SIntel 			rte_eth_dev_close(pi);
34113889a322SHuisong Li 			/*
34123889a322SHuisong Li 			 * If this port is bonded device, all slaves under the
34133889a322SHuisong Li 			 * device need to be removed or closed.
34143889a322SHuisong Li 			 */
34153889a322SHuisong Li 			if (port->bond_flag == 1 && num_slaves > 0)
34163889a322SHuisong Li 				clear_bonding_slave_device(slave_pids,
34173889a322SHuisong Li 							num_slaves);
3418ce8d5614SIntel 		}
341963b72657SIvan Ilchenko 
342063b72657SIvan Ilchenko 		free_xstats_display_info(pi);
3421a550baf2SMin Hu (Connor) 	}
3422ce8d5614SIntel 
342385c6571cSThomas Monjalon 	remove_invalid_ports();
3424ce8d5614SIntel 	printf("Done\n");
3425ce8d5614SIntel }
3426ce8d5614SIntel 
3427edab33b1STetsuya Mukawa void
342897f1e196SWei Dai reset_port(portid_t pid)
342997f1e196SWei Dai {
343097f1e196SWei Dai 	int diag;
343197f1e196SWei Dai 	portid_t pi;
343297f1e196SWei Dai 	struct rte_port *port;
343397f1e196SWei Dai 
343497f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
343597f1e196SWei Dai 		return;
343697f1e196SWei Dai 
34371cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
34381cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
343961a3b0e5SAndrew Rybchenko 		fprintf(stderr,
344061a3b0e5SAndrew Rybchenko 			"Can not reset port(s), please stop port(s) first.\n");
34411cde1b9aSShougang Wang 		return;
34421cde1b9aSShougang Wang 	}
34431cde1b9aSShougang Wang 
344497f1e196SWei Dai 	printf("Resetting ports...\n");
344597f1e196SWei Dai 
344697f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
344797f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
344897f1e196SWei Dai 			continue;
344997f1e196SWei Dai 
345097f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
345161a3b0e5SAndrew Rybchenko 			fprintf(stderr,
345261a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
345361a3b0e5SAndrew Rybchenko 				pi);
345497f1e196SWei Dai 			continue;
345597f1e196SWei Dai 		}
345697f1e196SWei Dai 
345797f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
345861a3b0e5SAndrew Rybchenko 			fprintf(stderr,
345961a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
346097f1e196SWei Dai 				pi);
346197f1e196SWei Dai 			continue;
346297f1e196SWei Dai 		}
346397f1e196SWei Dai 
3464e9351eaaSQiming Yang 		if (is_proc_primary()) {
346597f1e196SWei Dai 			diag = rte_eth_dev_reset(pi);
346697f1e196SWei Dai 			if (diag == 0) {
346797f1e196SWei Dai 				port = &ports[pi];
346897f1e196SWei Dai 				port->need_reconfig = 1;
346997f1e196SWei Dai 				port->need_reconfig_queues = 1;
347097f1e196SWei Dai 			} else {
347161a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Failed to reset port %d. diag=%d\n",
347261a3b0e5SAndrew Rybchenko 					pi, diag);
347397f1e196SWei Dai 			}
347497f1e196SWei Dai 		}
3475e9351eaaSQiming Yang 	}
347697f1e196SWei Dai 
347797f1e196SWei Dai 	printf("Done\n");
347897f1e196SWei Dai }
347997f1e196SWei Dai 
348097f1e196SWei Dai void
3481edab33b1STetsuya Mukawa attach_port(char *identifier)
3482ce8d5614SIntel {
34834f1ed78eSThomas Monjalon 	portid_t pi;
3484c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
3485ce8d5614SIntel 
3486edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
3487edab33b1STetsuya Mukawa 
3488edab33b1STetsuya Mukawa 	if (identifier == NULL) {
348961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Invalid parameters are specified\n");
3490edab33b1STetsuya Mukawa 		return;
3491ce8d5614SIntel 	}
3492ce8d5614SIntel 
349375b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
3494c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3495edab33b1STetsuya Mukawa 		return;
3496c9cce428SThomas Monjalon 	}
3497c9cce428SThomas Monjalon 
34984f1ed78eSThomas Monjalon 	/* first attach mode: event */
34994f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
35004f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
35014f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
35024f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
35034f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
35044f1ed78eSThomas Monjalon 				setup_attached_port(pi);
35054f1ed78eSThomas Monjalon 		return;
35064f1ed78eSThomas Monjalon 	}
35074f1ed78eSThomas Monjalon 
35084f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
350986fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
35104f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
351186fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
351286fa5de1SThomas Monjalon 			continue; /* port was already attached before */
3513c9cce428SThomas Monjalon 		setup_attached_port(pi);
3514c9cce428SThomas Monjalon 	}
351586fa5de1SThomas Monjalon }
3516c9cce428SThomas Monjalon 
3517c9cce428SThomas Monjalon static void
3518c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
3519c9cce428SThomas Monjalon {
3520c9cce428SThomas Monjalon 	unsigned int socket_id;
352134fc1051SIvan Ilchenko 	int ret;
3522edab33b1STetsuya Mukawa 
3523931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
352429841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
3525931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
352629841336SPhil Yang 		socket_id = socket_ids[0];
3527931126baSBernard Iremonger 	reconfig(pi, socket_id);
352834fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
352934fc1051SIvan Ilchenko 	if (ret != 0)
353061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
353161a3b0e5SAndrew Rybchenko 			"Error during enabling promiscuous mode for port %u: %s - ignore\n",
353234fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
3533edab33b1STetsuya Mukawa 
35344f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
35354f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
35364f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
35374f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
3538edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
3539edab33b1STetsuya Mukawa 
3540edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3541edab33b1STetsuya Mukawa 	printf("Done\n");
3542edab33b1STetsuya Mukawa }
3543edab33b1STetsuya Mukawa 
35440654d4a8SThomas Monjalon static void
35450654d4a8SThomas Monjalon detach_device(struct rte_device *dev)
35465f4ec54fSChen Jing D(Mark) {
3547f8e5baa2SThomas Monjalon 	portid_t sibling;
3548f8e5baa2SThomas Monjalon 
3549f8e5baa2SThomas Monjalon 	if (dev == NULL) {
355061a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Device already removed\n");
3551f8e5baa2SThomas Monjalon 		return;
3552f8e5baa2SThomas Monjalon 	}
3553f8e5baa2SThomas Monjalon 
35540654d4a8SThomas Monjalon 	printf("Removing a device...\n");
3555938a184aSAdrien Mazarguil 
35562a449871SThomas Monjalon 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
35572a449871SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
35582a449871SThomas Monjalon 			if (ports[sibling].port_status != RTE_PORT_STOPPED) {
355961a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
356061a3b0e5SAndrew Rybchenko 					sibling);
35612a449871SThomas Monjalon 				return;
35622a449871SThomas Monjalon 			}
35634b27989dSDmitry Kozlyuk 			flush_port_owned_resources(sibling);
35642a449871SThomas Monjalon 		}
35652a449871SThomas Monjalon 	}
35662a449871SThomas Monjalon 
356775b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
3568ec5ecd7eSDavid Marchand 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", rte_dev_name(dev));
3569edab33b1STetsuya Mukawa 		return;
35703070419eSGaetan Rivet 	}
35714f1de450SThomas Monjalon 	remove_invalid_ports();
357203ce2c53SMatan Azrad 
35730654d4a8SThomas Monjalon 	printf("Device is detached\n");
3574f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
3575edab33b1STetsuya Mukawa 	printf("Done\n");
3576edab33b1STetsuya Mukawa 	return;
35775f4ec54fSChen Jing D(Mark) }
35785f4ec54fSChen Jing D(Mark) 
3579af75078fSIntel void
35800654d4a8SThomas Monjalon detach_port_device(portid_t port_id)
35810654d4a8SThomas Monjalon {
35820a0821bcSPaulis Gributs 	int ret;
35830a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
35840a0821bcSPaulis Gributs 
35850654d4a8SThomas Monjalon 	if (port_id_is_invalid(port_id, ENABLED_WARN))
35860654d4a8SThomas Monjalon 		return;
35870654d4a8SThomas Monjalon 
35880654d4a8SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
35890654d4a8SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
359061a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port not stopped\n");
35910654d4a8SThomas Monjalon 			return;
35920654d4a8SThomas Monjalon 		}
359361a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Port was not closed\n");
35940654d4a8SThomas Monjalon 	}
35950654d4a8SThomas Monjalon 
35960a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
35970a0821bcSPaulis Gributs 	if (ret != 0) {
35980a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
35990a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
36000a0821bcSPaulis Gributs 			port_id);
36010a0821bcSPaulis Gributs 		return;
36020a0821bcSPaulis Gributs 	}
36030a0821bcSPaulis Gributs 	detach_device(dev_info.device);
36040654d4a8SThomas Monjalon }
36050654d4a8SThomas Monjalon 
36060654d4a8SThomas Monjalon void
36075edee5f6SThomas Monjalon detach_devargs(char *identifier)
360855e51c96SNithin Dabilpuram {
360955e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
361055e51c96SNithin Dabilpuram 	struct rte_devargs da;
361155e51c96SNithin Dabilpuram 	portid_t port_id;
361255e51c96SNithin Dabilpuram 
361355e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
361455e51c96SNithin Dabilpuram 
361555e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
361655e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
361761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "cannot parse identifier\n");
361855e51c96SNithin Dabilpuram 		return;
361955e51c96SNithin Dabilpuram 	}
362055e51c96SNithin Dabilpuram 
362155e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
362255e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
362355e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
362461a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
362561a3b0e5SAndrew Rybchenko 					port_id);
3626149677c9SStephen Hemminger 				rte_eth_iterator_cleanup(&iterator);
362764051bb1SXueming Li 				rte_devargs_reset(&da);
362855e51c96SNithin Dabilpuram 				return;
362955e51c96SNithin Dabilpuram 			}
36304b27989dSDmitry Kozlyuk 			flush_port_owned_resources(port_id);
363155e51c96SNithin Dabilpuram 		}
363255e51c96SNithin Dabilpuram 	}
363355e51c96SNithin Dabilpuram 
3634148c51a3SDavid Marchand 	if (rte_eal_hotplug_remove(rte_bus_name(da.bus), da.name) != 0) {
363555e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3636148c51a3SDavid Marchand 			    da.name, rte_bus_name(da.bus));
363764051bb1SXueming Li 		rte_devargs_reset(&da);
363855e51c96SNithin Dabilpuram 		return;
363955e51c96SNithin Dabilpuram 	}
364055e51c96SNithin Dabilpuram 
364155e51c96SNithin Dabilpuram 	remove_invalid_ports();
364255e51c96SNithin Dabilpuram 
364355e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
364455e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
364555e51c96SNithin Dabilpuram 	printf("Done\n");
364664051bb1SXueming Li 	rte_devargs_reset(&da);
364755e51c96SNithin Dabilpuram }
364855e51c96SNithin Dabilpuram 
364955e51c96SNithin Dabilpuram void
3650af75078fSIntel pmd_test_exit(void)
3651af75078fSIntel {
3652af75078fSIntel 	portid_t pt_id;
365326cbb419SViacheslav Ovsiienko 	unsigned int i;
3654fb73e096SJeff Guo 	int ret;
3655af75078fSIntel 
36568210ec25SPablo de Lara 	if (test_done == 0)
36578210ec25SPablo de Lara 		stop_packet_forwarding();
36588210ec25SPablo de Lara 
3659761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
366026cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
36613a0968c8SShahaf Shuler 		if (mempools[i]) {
36623a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
36633a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
36643a0968c8SShahaf Shuler 						     NULL);
36653a0968c8SShahaf Shuler 		}
36663a0968c8SShahaf Shuler 	}
3667761f7ae1SJie Zhou #endif
3668d3a274ceSZhihong Wang 	if (ports != NULL) {
3669d3a274ceSZhihong Wang 		no_link_check = 1;
36707d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
367108fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
3672af75078fSIntel 			fflush(stdout);
3673d3a274ceSZhihong Wang 			stop_port(pt_id);
367408fd782bSCristian Dumitrescu 		}
367508fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
367608fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
367708fd782bSCristian Dumitrescu 			fflush(stdout);
3678d3a274ceSZhihong Wang 			close_port(pt_id);
3679af75078fSIntel 		}
3680d3a274ceSZhihong Wang 	}
3681fb73e096SJeff Guo 
3682fb73e096SJeff Guo 	if (hot_plug) {
3683fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
36842049c511SJeff Guo 		if (ret) {
3685fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
3686fb73e096SJeff Guo 				"fail to stop device event monitor.");
36872049c511SJeff Guo 			return;
36882049c511SJeff Guo 		}
3689fb73e096SJeff Guo 
36902049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
3691cc1bf307SJeff Guo 			dev_event_callback, NULL);
36922049c511SJeff Guo 		if (ret < 0) {
3693fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
36942049c511SJeff Guo 				"fail to unregister device event callback.\n");
36952049c511SJeff Guo 			return;
36962049c511SJeff Guo 		}
36972049c511SJeff Guo 
36982049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
36992049c511SJeff Guo 		if (ret) {
37002049c511SJeff Guo 			RTE_LOG(ERR, EAL,
37012049c511SJeff Guo 				"fail to disable hotplug handling.\n");
37022049c511SJeff Guo 			return;
37032049c511SJeff Guo 		}
3704fb73e096SJeff Guo 	}
370526cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3706401b744dSShahaf Shuler 		if (mempools[i])
3707a550baf2SMin Hu (Connor) 			mempool_free_mp(mempools[i]);
3708401b744dSShahaf Shuler 	}
370963b72657SIvan Ilchenko 	free(xstats_display);
3710fb73e096SJeff Guo 
3711d3a274ceSZhihong Wang 	printf("\nBye...\n");
3712af75078fSIntel }
3713af75078fSIntel 
3714af75078fSIntel typedef void (*cmd_func_t)(void);
3715af75078fSIntel struct pmd_test_command {
3716af75078fSIntel 	const char *cmd_name;
3717af75078fSIntel 	cmd_func_t cmd_func;
3718af75078fSIntel };
3719af75078fSIntel 
3720ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
3721af75078fSIntel static void
3722edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
3723af75078fSIntel {
3724ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
3725ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3726f8244c63SZhiyong Yang 	portid_t portid;
3727f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
3728ce8d5614SIntel 	struct rte_eth_link link;
3729e661a08bSIgor Romanov 	int ret;
3730ba5509a6SIvan Dyukov 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3731ce8d5614SIntel 
3732ce8d5614SIntel 	printf("Checking link statuses...\n");
3733ce8d5614SIntel 	fflush(stdout);
3734ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
3735ce8d5614SIntel 		all_ports_up = 1;
37367d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
3737ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
3738ce8d5614SIntel 				continue;
3739ce8d5614SIntel 			memset(&link, 0, sizeof(link));
3740e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
3741e661a08bSIgor Romanov 			if (ret < 0) {
3742e661a08bSIgor Romanov 				all_ports_up = 0;
3743e661a08bSIgor Romanov 				if (print_flag == 1)
374461a3b0e5SAndrew Rybchenko 					fprintf(stderr,
374561a3b0e5SAndrew Rybchenko 						"Port %u link get failed: %s\n",
3746e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
3747e661a08bSIgor Romanov 				continue;
3748e661a08bSIgor Romanov 			}
3749ce8d5614SIntel 			/* print link status if flag set */
3750ce8d5614SIntel 			if (print_flag == 1) {
3751ba5509a6SIvan Dyukov 				rte_eth_link_to_str(link_status,
3752ba5509a6SIvan Dyukov 					sizeof(link_status), &link);
3753ba5509a6SIvan Dyukov 				printf("Port %d %s\n", portid, link_status);
3754ce8d5614SIntel 				continue;
3755ce8d5614SIntel 			}
3756ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
3757295968d1SFerruh Yigit 			if (link.link_status == RTE_ETH_LINK_DOWN) {
3758ce8d5614SIntel 				all_ports_up = 0;
3759ce8d5614SIntel 				break;
3760ce8d5614SIntel 			}
3761ce8d5614SIntel 		}
3762ce8d5614SIntel 		/* after finally printing all link status, get out */
3763ce8d5614SIntel 		if (print_flag == 1)
3764ce8d5614SIntel 			break;
3765ce8d5614SIntel 
3766ce8d5614SIntel 		if (all_ports_up == 0) {
3767ce8d5614SIntel 			fflush(stdout);
3768ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
3769ce8d5614SIntel 		}
3770ce8d5614SIntel 
3771ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
3772ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3773ce8d5614SIntel 			print_flag = 1;
3774ce8d5614SIntel 		}
37758ea656f8SGaetan Rivet 
37768ea656f8SGaetan Rivet 		if (lsc_interrupt)
37778ea656f8SGaetan Rivet 			break;
3778ce8d5614SIntel 	}
3779af75078fSIntel }
3780af75078fSIntel 
3781284c908cSGaetan Rivet static void
3782cc1bf307SJeff Guo rmv_port_callback(void *arg)
3783284c908cSGaetan Rivet {
37843b97888aSMatan Azrad 	int need_to_start = 0;
37850da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
378628caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
37870a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
37880a0821bcSPaulis Gributs 	int ret;
3789284c908cSGaetan Rivet 
3790284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
3791284c908cSGaetan Rivet 
37923b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
37933b97888aSMatan Azrad 		need_to_start = 1;
37943b97888aSMatan Azrad 		stop_packet_forwarding();
37953b97888aSMatan Azrad 	}
37960da2a62bSMatan Azrad 	no_link_check = 1;
3797284c908cSGaetan Rivet 	stop_port(port_id);
37980da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
37990654d4a8SThomas Monjalon 
38000a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
38010a0821bcSPaulis Gributs 	if (ret != 0)
38020a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
38030a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
38040a0821bcSPaulis Gributs 			port_id);
3805e1d38504SPaulis Gributs 	else {
3806e1d38504SPaulis Gributs 		struct rte_device *device = dev_info.device;
3807e1d38504SPaulis Gributs 		close_port(port_id);
3808e1d38504SPaulis Gributs 		detach_device(device); /* might be already removed or have more ports */
3809e1d38504SPaulis Gributs 	}
38103b97888aSMatan Azrad 	if (need_to_start)
38113b97888aSMatan Azrad 		start_packet_forwarding(0);
3812284c908cSGaetan Rivet }
3813284c908cSGaetan Rivet 
381476ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
3815d6af1a13SBernard Iremonger static int
3816f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3817d6af1a13SBernard Iremonger 		  void *ret_param)
381876ad4a2dSGaetan Rivet {
381976ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
3820d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
382176ad4a2dSGaetan Rivet 
382276ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
382361a3b0e5SAndrew Rybchenko 		fprintf(stderr,
382461a3b0e5SAndrew Rybchenko 			"\nPort %" PRIu16 ": %s called upon invalid event %d\n",
382576ad4a2dSGaetan Rivet 			port_id, __func__, type);
382676ad4a2dSGaetan Rivet 		fflush(stderr);
38273af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
3828f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
382997b5d8b5SThomas Monjalon 			eth_event_desc[type]);
383076ad4a2dSGaetan Rivet 		fflush(stdout);
383176ad4a2dSGaetan Rivet 	}
3832284c908cSGaetan Rivet 
3833284c908cSGaetan Rivet 	switch (type) {
38344f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
38354f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
38364f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
38374f1ed78eSThomas Monjalon 		break;
3838284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
38394f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
38404f1ed78eSThomas Monjalon 			break;
3841284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
3842cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
384361a3b0e5SAndrew Rybchenko 			fprintf(stderr,
384461a3b0e5SAndrew Rybchenko 				"Could not set up deferred device removal\n");
3845284c908cSGaetan Rivet 		break;
384685c6571cSThomas Monjalon 	case RTE_ETH_EVENT_DESTROY:
384785c6571cSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_CLOSED;
384885c6571cSThomas Monjalon 		printf("Port %u is closed\n", port_id);
384985c6571cSThomas Monjalon 		break;
3850bc70e559SSpike Du 	case RTE_ETH_EVENT_RX_AVAIL_THRESH: {
3851bc70e559SSpike Du 		uint16_t rxq_id;
3852bc70e559SSpike Du 		int ret;
3853bc70e559SSpike Du 
3854bc70e559SSpike Du 		/* avail_thresh query API rewinds rxq_id, no need to check max RxQ num */
3855bc70e559SSpike Du 		for (rxq_id = 0; ; rxq_id++) {
3856bc70e559SSpike Du 			ret = rte_eth_rx_avail_thresh_query(port_id, &rxq_id,
3857bc70e559SSpike Du 							    NULL);
3858bc70e559SSpike Du 			if (ret <= 0)
3859bc70e559SSpike Du 				break;
3860bc70e559SSpike Du 			printf("Received avail_thresh event, port: %u, rxq_id: %u\n",
3861bc70e559SSpike Du 			       port_id, rxq_id);
3862f41a5092SSpike Du 
3863f41a5092SSpike Du #ifdef RTE_NET_MLX5
3864f41a5092SSpike Du 			mlx5_test_avail_thresh_event_handler(port_id, rxq_id);
3865f41a5092SSpike Du #endif
3866bc70e559SSpike Du 		}
3867bc70e559SSpike Du 		break;
3868bc70e559SSpike Du 	}
3869284c908cSGaetan Rivet 	default:
3870284c908cSGaetan Rivet 		break;
3871284c908cSGaetan Rivet 	}
3872d6af1a13SBernard Iremonger 	return 0;
387376ad4a2dSGaetan Rivet }
387476ad4a2dSGaetan Rivet 
387597b5d8b5SThomas Monjalon static int
387697b5d8b5SThomas Monjalon register_eth_event_callback(void)
387797b5d8b5SThomas Monjalon {
387897b5d8b5SThomas Monjalon 	int ret;
387997b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
388097b5d8b5SThomas Monjalon 
388197b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
388297b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
388397b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
388497b5d8b5SThomas Monjalon 				event,
388597b5d8b5SThomas Monjalon 				eth_event_callback,
388697b5d8b5SThomas Monjalon 				NULL);
388797b5d8b5SThomas Monjalon 		if (ret != 0) {
388897b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
388997b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
389097b5d8b5SThomas Monjalon 			return -1;
389197b5d8b5SThomas Monjalon 		}
389297b5d8b5SThomas Monjalon 	}
389397b5d8b5SThomas Monjalon 
389497b5d8b5SThomas Monjalon 	return 0;
389597b5d8b5SThomas Monjalon }
389697b5d8b5SThomas Monjalon 
3897fb73e096SJeff Guo /* This function is used by the interrupt thread */
3898fb73e096SJeff Guo static void
3899cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3900fb73e096SJeff Guo 			     __rte_unused void *arg)
3901fb73e096SJeff Guo {
39022049c511SJeff Guo 	uint16_t port_id;
39032049c511SJeff Guo 	int ret;
39042049c511SJeff Guo 
3905fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
3906fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
3907fb73e096SJeff Guo 			__func__, type);
3908fb73e096SJeff Guo 		fflush(stderr);
3909fb73e096SJeff Guo 	}
3910fb73e096SJeff Guo 
3911fb73e096SJeff Guo 	switch (type) {
3912fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
3913cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3914fb73e096SJeff Guo 			device_name);
39152049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
39162049c511SJeff Guo 		if (ret) {
39172049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
39182049c511SJeff Guo 				device_name);
39192049c511SJeff Guo 			return;
39202049c511SJeff Guo 		}
3921cc1bf307SJeff Guo 		/*
3922cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
3923cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
3924cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
3925cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
3926cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
3927cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
3928cc1bf307SJeff Guo 		 * be deleted.
3929cc1bf307SJeff Guo 		 */
3930cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
3931cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
3932cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
3933cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
3934fb73e096SJeff Guo 		break;
3935fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
3936fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3937fb73e096SJeff Guo 			device_name);
3938fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
3939fb73e096SJeff Guo 		 * begin to attach port.
3940fb73e096SJeff Guo 		 */
3941fb73e096SJeff Guo 		break;
3942fb73e096SJeff Guo 	default:
3943fb73e096SJeff Guo 		break;
3944fb73e096SJeff Guo 	}
3945fb73e096SJeff Guo }
3946fb73e096SJeff Guo 
3947f2c5125aSPablo de Lara static void
3948f4d178c1SXueming Li rxtx_port_config(portid_t pid)
3949f2c5125aSPablo de Lara {
3950d44f8a48SQi Zhang 	uint16_t qid;
39515e91aeefSWei Zhao 	uint64_t offloads;
3952f4d178c1SXueming Li 	struct rte_port *port = &ports[pid];
3953f2c5125aSPablo de Lara 
3954d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
39553c4426dbSDmitry Kozlyuk 		offloads = port->rxq[qid].conf.offloads;
39563c4426dbSDmitry Kozlyuk 		port->rxq[qid].conf = port->dev_info.default_rxconf;
3957f4d178c1SXueming Li 
3958f4d178c1SXueming Li 		if (rxq_share > 0 &&
3959f4d178c1SXueming Li 		    (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
3960f4d178c1SXueming Li 			/* Non-zero share group to enable RxQ share. */
39613c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.share_group = pid / rxq_share + 1;
39623c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */
3963f4d178c1SXueming Li 		}
3964f4d178c1SXueming Li 
3965575e0fd1SWei Zhao 		if (offloads != 0)
39663c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.offloads = offloads;
3967d44f8a48SQi Zhang 
3968d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
3969f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
39703c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh;
3971f2c5125aSPablo de Lara 
3972f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
39733c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh;
3974f2c5125aSPablo de Lara 
3975f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
39763c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh;
3977f2c5125aSPablo de Lara 
3978f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
39793c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_free_thresh = rx_free_thresh;
3980f2c5125aSPablo de Lara 
3981f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
39823c4426dbSDmitry Kozlyuk 			port->rxq[qid].conf.rx_drop_en = rx_drop_en;
3983f2c5125aSPablo de Lara 
3984d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
3985d44f8a48SQi Zhang 	}
3986d44f8a48SQi Zhang 
3987d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
39883c4426dbSDmitry Kozlyuk 		offloads = port->txq[qid].conf.offloads;
39893c4426dbSDmitry Kozlyuk 		port->txq[qid].conf = port->dev_info.default_txconf;
3990575e0fd1SWei Zhao 		if (offloads != 0)
39913c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.offloads = offloads;
3992d44f8a48SQi Zhang 
3993d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
3994f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
39953c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh;
3996f2c5125aSPablo de Lara 
3997f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
39983c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh;
3999f2c5125aSPablo de Lara 
4000f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
40013c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh;
4002f2c5125aSPablo de Lara 
4003f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
40043c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh;
4005f2c5125aSPablo de Lara 
4006f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
40073c4426dbSDmitry Kozlyuk 			port->txq[qid].conf.tx_free_thresh = tx_free_thresh;
4008d44f8a48SQi Zhang 
4009d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
4010d44f8a48SQi Zhang 	}
4011f2c5125aSPablo de Lara }
4012f2c5125aSPablo de Lara 
40130c4abd36SSteve Yang /*
4014b563c142SFerruh Yigit  * Helper function to set MTU from frame size
40150c4abd36SSteve Yang  *
40160c4abd36SSteve Yang  * port->dev_info should be set before calling this function.
40170c4abd36SSteve Yang  *
40180c4abd36SSteve Yang  * return 0 on success, negative on error
40190c4abd36SSteve Yang  */
40200c4abd36SSteve Yang int
4021b563c142SFerruh Yigit update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
40220c4abd36SSteve Yang {
40230c4abd36SSteve Yang 	struct rte_port *port = &ports[portid];
40240c4abd36SSteve Yang 	uint32_t eth_overhead;
40251bb4a528SFerruh Yigit 	uint16_t mtu, new_mtu;
40260c4abd36SSteve Yang 
40271bb4a528SFerruh Yigit 	eth_overhead = get_eth_overhead(&port->dev_info);
40281bb4a528SFerruh Yigit 
40291bb4a528SFerruh Yigit 	if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
40301bb4a528SFerruh Yigit 		printf("Failed to get MTU for port %u\n", portid);
40311bb4a528SFerruh Yigit 		return -1;
40321bb4a528SFerruh Yigit 	}
40331bb4a528SFerruh Yigit 
40341bb4a528SFerruh Yigit 	new_mtu = max_rx_pktlen - eth_overhead;
40350c4abd36SSteve Yang 
40361bb4a528SFerruh Yigit 	if (mtu == new_mtu)
40371bb4a528SFerruh Yigit 		return 0;
40381bb4a528SFerruh Yigit 
40391bb4a528SFerruh Yigit 	if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
404061a3b0e5SAndrew Rybchenko 		fprintf(stderr,
404161a3b0e5SAndrew Rybchenko 			"Failed to set MTU to %u for port %u\n",
40421bb4a528SFerruh Yigit 			new_mtu, portid);
40431bb4a528SFerruh Yigit 		return -1;
40440c4abd36SSteve Yang 	}
40450c4abd36SSteve Yang 
40461bb4a528SFerruh Yigit 	port->dev_conf.rxmode.mtu = new_mtu;
40471bb4a528SFerruh Yigit 
40480c4abd36SSteve Yang 	return 0;
40490c4abd36SSteve Yang }
40500c4abd36SSteve Yang 
4051013af9b6SIntel void
4052013af9b6SIntel init_port_config(void)
4053013af9b6SIntel {
4054013af9b6SIntel 	portid_t pid;
4055013af9b6SIntel 	struct rte_port *port;
4056655eae01SJie Wang 	int ret, i;
4057013af9b6SIntel 
40587d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
4059013af9b6SIntel 		port = &ports[pid];
40606f51deb9SIvan Ilchenko 
40616f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
40626f51deb9SIvan Ilchenko 		if (ret != 0)
40636f51deb9SIvan Ilchenko 			return;
40646f51deb9SIvan Ilchenko 
40653ce690d3SBruce Richardson 		if (nb_rxq > 1) {
4066013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
406790892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
4068422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
4069af75078fSIntel 		} else {
4070013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
4071013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
4072af75078fSIntel 		}
40733ce690d3SBruce Richardson 
40745f592039SJingjing Wu 		if (port->dcb_flag == 0) {
4075655eae01SJie Wang 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
4076f9295aa2SXiaoyu Min 				port->dev_conf.rxmode.mq_mode =
4077f9295aa2SXiaoyu Min 					(enum rte_eth_rx_mq_mode)
4078295968d1SFerruh Yigit 						(rx_mq_mode & RTE_ETH_MQ_RX_RSS);
4079655eae01SJie Wang 			} else {
4080295968d1SFerruh Yigit 				port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
4081655eae01SJie Wang 				port->dev_conf.rxmode.offloads &=
4082295968d1SFerruh Yigit 						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4083655eae01SJie Wang 
4084655eae01SJie Wang 				for (i = 0;
4085655eae01SJie Wang 				     i < port->dev_info.nb_rx_queues;
4086655eae01SJie Wang 				     i++)
40873c4426dbSDmitry Kozlyuk 					port->rxq[i].conf.offloads &=
4088295968d1SFerruh Yigit 						~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4089655eae01SJie Wang 			}
40903ce690d3SBruce Richardson 		}
40913ce690d3SBruce Richardson 
4092f4d178c1SXueming Li 		rxtx_port_config(pid);
4093013af9b6SIntel 
4094a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
4095a5279d25SIgor Romanov 		if (ret != 0)
4096a5279d25SIgor Romanov 			return;
4097013af9b6SIntel 
40980a0821bcSPaulis Gributs 		if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
40998ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
41000a0821bcSPaulis Gributs 		if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
4101284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
4102013af9b6SIntel 	}
4103013af9b6SIntel }
4104013af9b6SIntel 
410541b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
410641b05095SBernard Iremonger {
410741b05095SBernard Iremonger 	struct rte_port *port;
410841b05095SBernard Iremonger 
410941b05095SBernard Iremonger 	port = &ports[slave_pid];
411041b05095SBernard Iremonger 	port->slave_flag = 1;
411141b05095SBernard Iremonger }
411241b05095SBernard Iremonger 
411341b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
411441b05095SBernard Iremonger {
411541b05095SBernard Iremonger 	struct rte_port *port;
411641b05095SBernard Iremonger 
411741b05095SBernard Iremonger 	port = &ports[slave_pid];
411841b05095SBernard Iremonger 	port->slave_flag = 0;
411941b05095SBernard Iremonger }
412041b05095SBernard Iremonger 
41210e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
41220e545d30SBernard Iremonger {
41230e545d30SBernard Iremonger 	struct rte_port *port;
41240a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
41250a0821bcSPaulis Gributs 	int ret;
41260e545d30SBernard Iremonger 
41270e545d30SBernard Iremonger 	port = &ports[slave_pid];
41280a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
41290a0821bcSPaulis Gributs 	if (ret != 0) {
41300a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
41310a0821bcSPaulis Gributs 			"Failed to get device info for port id %d,"
41320a0821bcSPaulis Gributs 			"cannot determine if the port is a bonded slave",
41330a0821bcSPaulis Gributs 			slave_pid);
41340a0821bcSPaulis Gributs 		return 0;
41350a0821bcSPaulis Gributs 	}
41360a0821bcSPaulis Gributs 	if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
4137b8b8b344SMatan Azrad 		return 1;
4138b8b8b344SMatan Azrad 	return 0;
41390e545d30SBernard Iremonger }
41400e545d30SBernard Iremonger 
4141013af9b6SIntel const uint16_t vlan_tags[] = {
4142013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
4143013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
4144013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
4145013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
4146013af9b6SIntel };
4147013af9b6SIntel 
4148013af9b6SIntel static  int
4149ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
41501a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
41511a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
41521a572499SJingjing Wu 		 uint8_t pfc_en)
4153013af9b6SIntel {
4154013af9b6SIntel 	uint8_t i;
4155ac7c491cSKonstantin Ananyev 	int32_t rc;
4156ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
4157af75078fSIntel 
4158af75078fSIntel 	/*
4159013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
4160013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
4161af75078fSIntel 	 */
41621a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
41631a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
41641a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
41651a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
41661a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
4167013af9b6SIntel 
4168547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
41691a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
41701a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
41711a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
4172295968d1SFerruh Yigit 			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
41731a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
4174295968d1SFerruh Yigit 			(num_tcs ==  RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
4175013af9b6SIntel 
41761a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
41771a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
41781a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
41791a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
41801a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
4181af75078fSIntel 		}
4182295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4183f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
4184f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
4185013af9b6SIntel 		}
4186013af9b6SIntel 
4187013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
4188f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
4189f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
4190295968d1SFerruh Yigit 					(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
4191295968d1SFerruh Yigit 		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
41921a572499SJingjing Wu 	} else {
41931a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
41941a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
41951a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
41961a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
4197013af9b6SIntel 
41985139bc12STing Xu 		memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
41995139bc12STing Xu 
4200ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
4201ac7c491cSKonstantin Ananyev 		if (rc != 0)
4202ac7c491cSKonstantin Ananyev 			return rc;
4203ac7c491cSKonstantin Ananyev 
42041a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
42051a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
42061a572499SJingjing Wu 
4207295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4208bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
4209bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
4210013af9b6SIntel 		}
4211ac7c491cSKonstantin Ananyev 
4212f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
4213f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
4214295968d1SFerruh Yigit 					(rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
4215ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
4216295968d1SFerruh Yigit 		eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
42171a572499SJingjing Wu 	}
42181a572499SJingjing Wu 
42191a572499SJingjing Wu 	if (pfc_en)
42201a572499SJingjing Wu 		eth_conf->dcb_capability_en =
4221295968d1SFerruh Yigit 				RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
4222013af9b6SIntel 	else
4223295968d1SFerruh Yigit 		eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
4224013af9b6SIntel 
4225013af9b6SIntel 	return 0;
4226013af9b6SIntel }
4227013af9b6SIntel 
4228013af9b6SIntel int
42291a572499SJingjing Wu init_port_dcb_config(portid_t pid,
42301a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
42311a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
42321a572499SJingjing Wu 		     uint8_t pfc_en)
4233013af9b6SIntel {
4234013af9b6SIntel 	struct rte_eth_conf port_conf;
4235013af9b6SIntel 	struct rte_port *rte_port;
4236013af9b6SIntel 	int retval;
4237013af9b6SIntel 	uint16_t i;
4238013af9b6SIntel 
4239a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
4240a550baf2SMin Hu (Connor) 		printf("The multi-process feature doesn't support dcb.\n");
4241a550baf2SMin Hu (Connor) 		return -ENOTSUP;
4242a550baf2SMin Hu (Connor) 	}
42432a977b89SWenzhuo Lu 	rte_port = &ports[pid];
4244013af9b6SIntel 
4245c1ba6c32SHuisong Li 	/* retain the original device configuration. */
4246c1ba6c32SHuisong Li 	memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
4247d5354e89SYanglong Wu 
4248013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
4249ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
4250013af9b6SIntel 	if (retval < 0)
4251013af9b6SIntel 		return retval;
4252295968d1SFerruh Yigit 	port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4253cbe70fdeSJie Wang 	/* remove RSS HASH offload for DCB in vt mode */
4254cbe70fdeSJie Wang 	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
4255cbe70fdeSJie Wang 		port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4256cbe70fdeSJie Wang 		for (i = 0; i < nb_rxq; i++)
42573c4426dbSDmitry Kozlyuk 			rte_port->rxq[i].conf.offloads &=
4258cbe70fdeSJie Wang 				~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4259cbe70fdeSJie Wang 	}
4260013af9b6SIntel 
42612f203d44SQi Zhang 	/* re-configure the device . */
42622b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
42632b0e0ebaSChenbo Xia 	if (retval < 0)
42642b0e0ebaSChenbo Xia 		return retval;
42656f51deb9SIvan Ilchenko 
42666f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
42676f51deb9SIvan Ilchenko 	if (retval != 0)
42686f51deb9SIvan Ilchenko 		return retval;
42692a977b89SWenzhuo Lu 
42702a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
42712a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
42722a977b89SWenzhuo Lu 	 */
42732a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
42742a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
427561a3b0e5SAndrew Rybchenko 		fprintf(stderr,
427661a3b0e5SAndrew Rybchenko 			"VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
427761a3b0e5SAndrew Rybchenko 			pid);
42782a977b89SWenzhuo Lu 		return -1;
42792a977b89SWenzhuo Lu 	}
42802a977b89SWenzhuo Lu 
42812a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
42822a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
42832a977b89SWenzhuo Lu 	 */
42842a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
428586ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
428686ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
428786ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
428886ef65eeSBernard Iremonger 		} else {
42892a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
42902a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
429186ef65eeSBernard Iremonger 		}
42922a977b89SWenzhuo Lu 	} else {
42932a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
42942a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
42952a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
42962a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
42972a977b89SWenzhuo Lu 		} else {
42982a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
42992a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
43002a977b89SWenzhuo Lu 
43012a977b89SWenzhuo Lu 		}
43022a977b89SWenzhuo Lu 	}
43032a977b89SWenzhuo Lu 	rx_free_thresh = 64;
43042a977b89SWenzhuo Lu 
4305013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4306013af9b6SIntel 
4307f4d178c1SXueming Li 	rxtx_port_config(pid);
4308013af9b6SIntel 	/* VLAN filter */
4309295968d1SFerruh Yigit 	rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
43101a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
4311013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
4312013af9b6SIntel 
4313a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4314a5279d25SIgor Romanov 	if (retval != 0)
4315a5279d25SIgor Romanov 		return retval;
4316a5279d25SIgor Romanov 
43177741e4cfSIntel 	rte_port->dcb_flag = 1;
43187741e4cfSIntel 
4319a690a070SHuisong Li 	/* Enter DCB configuration status */
4320a690a070SHuisong Li 	dcb_config = 1;
4321a690a070SHuisong Li 
4322013af9b6SIntel 	return 0;
4323af75078fSIntel }
4324af75078fSIntel 
4325ffc468ffSTetsuya Mukawa static void
4326ffc468ffSTetsuya Mukawa init_port(void)
4327ffc468ffSTetsuya Mukawa {
43281b9f2746SGregory Etelson 	int i;
43291b9f2746SGregory Etelson 
4330ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
4331ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
4332ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4333ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
4334ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
4335ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
4336ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
4337ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
4338ffc468ffSTetsuya Mukawa 	}
4339236bc417SGregory Etelson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
4340236bc417SGregory Etelson 		ports[i].fwd_mac_swap = 1;
434163b72657SIvan Ilchenko 		ports[i].xstats_info.allocated = false;
43421b9f2746SGregory Etelson 		LIST_INIT(&ports[i].flow_tunnel_list);
4343236bc417SGregory Etelson 	}
434429841336SPhil Yang 	/* Initialize ports NUMA structures */
434529841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
434629841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
434729841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4348ffc468ffSTetsuya Mukawa }
4349ffc468ffSTetsuya Mukawa 
4350d3a274ceSZhihong Wang static void
4351d3a274ceSZhihong Wang force_quit(void)
4352d3a274ceSZhihong Wang {
4353d3a274ceSZhihong Wang 	pmd_test_exit();
4354d3a274ceSZhihong Wang 	prompt_exit();
4355d3a274ceSZhihong Wang }
4356d3a274ceSZhihong Wang 
4357d3a274ceSZhihong Wang static void
4358cfea1f30SPablo de Lara print_stats(void)
4359cfea1f30SPablo de Lara {
4360cfea1f30SPablo de Lara 	uint8_t i;
4361cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
4362cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4363cfea1f30SPablo de Lara 
4364cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
4365cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
4366cfea1f30SPablo de Lara 
4367cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
4368cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4369cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
4370683d1e82SIgor Romanov 
4371683d1e82SIgor Romanov 	fflush(stdout);
4372cfea1f30SPablo de Lara }
4373cfea1f30SPablo de Lara 
4374cfea1f30SPablo de Lara static void
4375d3a274ceSZhihong Wang signal_handler(int signum)
4376d3a274ceSZhihong Wang {
4377d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
437861a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4379d3a274ceSZhihong Wang 			signum);
4380a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
4381102b7329SReshma Pattan 		/* uninitialize packet capture framework */
4382102b7329SReshma Pattan 		rte_pdump_uninit();
4383102b7329SReshma Pattan #endif
4384a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
43858b36297dSAmit Gupta 		if (latencystats_enabled != 0)
438662d3216dSReshma Pattan 			rte_latencystats_uninit();
438762d3216dSReshma Pattan #endif
4388d3a274ceSZhihong Wang 		force_quit();
4389d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
4390d9a191a0SPhil Yang 		f_quit = 1;
4391d3a274ceSZhihong Wang 		/* exit with the expected status */
4392761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
4393d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
4394d3a274ceSZhihong Wang 		kill(getpid(), signum);
4395761f7ae1SJie Zhou #endif
4396d3a274ceSZhihong Wang 	}
4397d3a274ceSZhihong Wang }
4398d3a274ceSZhihong Wang 
4399af75078fSIntel int
4400af75078fSIntel main(int argc, char** argv)
4401af75078fSIntel {
4402af75078fSIntel 	int diag;
4403f8244c63SZhiyong Yang 	portid_t port_id;
44044918a357SXiaoyun Li 	uint16_t count;
4405fb73e096SJeff Guo 	int ret;
4406af75078fSIntel 
4407d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
4408d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
4409d3a274ceSZhihong Wang 
4410285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
4411285fd101SOlivier Matz 	if (testpmd_logtype < 0)
441216267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
4413285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4414285fd101SOlivier Matz 
44159201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
44169201806eSStephen Hemminger 	if (diag < 0)
441716267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
441816267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
44199201806eSStephen Hemminger 
442097b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
442197b5d8b5SThomas Monjalon 	if (ret != 0)
442216267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
442397b5d8b5SThomas Monjalon 
4424a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
44254aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
4426e9436f54STiwei Bie 	rte_pdump_init();
44274aa0d012SAnatoly Burakov #endif
44284aa0d012SAnatoly Burakov 
44294918a357SXiaoyun Li 	count = 0;
44304918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
44314918a357SXiaoyun Li 		ports_ids[count] = port_id;
44324918a357SXiaoyun Li 		count++;
44334918a357SXiaoyun Li 	}
44344918a357SXiaoyun Li 	nb_ports = (portid_t) count;
44354aa0d012SAnatoly Burakov 	if (nb_ports == 0)
44364aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
44374aa0d012SAnatoly Burakov 
44384aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
44394aa0d012SAnatoly Burakov 	init_port();
44404aa0d012SAnatoly Burakov 
44414aa0d012SAnatoly Burakov 	set_def_fwd_config();
44424aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
444316267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
444416267ceeSStephen Hemminger 			 "Check the core mask argument\n");
44454aa0d012SAnatoly Burakov 
4446e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
4447a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4448e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
4449e505d84cSAnatoly Burakov #endif
4450a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
4451e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
4452e505d84cSAnatoly Burakov #endif
4453e505d84cSAnatoly Burakov 
4454fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
44555fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
4456fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
4457fb7b8b32SAnatoly Burakov #else
4458fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
4459fb7b8b32SAnatoly Burakov #endif
4460fb7b8b32SAnatoly Burakov 
4461e505d84cSAnatoly Burakov 	argc -= diag;
4462e505d84cSAnatoly Burakov 	argv += diag;
4463e505d84cSAnatoly Burakov 	if (argc > 1)
4464e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
4465e505d84cSAnatoly Burakov 
4466761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
4467e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4468285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
44691c036b16SEelco Chaudron 			strerror(errno));
44701c036b16SEelco Chaudron 	}
4471761f7ae1SJie Zhou #endif
44721c036b16SEelco Chaudron 
447399cabef0SPablo de Lara 	if (tx_first && interactive)
447499cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
447599cabef0SPablo de Lara 				"interactive mode.\n");
44768820cba4SDavid Hunt 
44778820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
447861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
447961a3b0e5SAndrew Rybchenko 			"Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
44808820cba4SDavid Hunt 		lsc_interrupt = 0;
44818820cba4SDavid Hunt 	}
44828820cba4SDavid Hunt 
44835a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
448461a3b0e5SAndrew Rybchenko 		fprintf(stderr,
448561a3b0e5SAndrew Rybchenko 			"Warning: Either rx or tx queues should be non-zero\n");
44865a8fb55cSReshma Pattan 
44875a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
448861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
448961a3b0e5SAndrew Rybchenko 			"Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4490af75078fSIntel 			nb_rxq, nb_txq);
4491af75078fSIntel 
4492af75078fSIntel 	init_config();
4493fb73e096SJeff Guo 
4494fb73e096SJeff Guo 	if (hot_plug) {
44952049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
4496fb73e096SJeff Guo 		if (ret) {
44972049c511SJeff Guo 			RTE_LOG(ERR, EAL,
44982049c511SJeff Guo 				"fail to enable hotplug handling.");
4499fb73e096SJeff Guo 			return -1;
4500fb73e096SJeff Guo 		}
4501fb73e096SJeff Guo 
45022049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
45032049c511SJeff Guo 		if (ret) {
45042049c511SJeff Guo 			RTE_LOG(ERR, EAL,
45052049c511SJeff Guo 				"fail to start device event monitoring.");
45062049c511SJeff Guo 			return -1;
45072049c511SJeff Guo 		}
45082049c511SJeff Guo 
45092049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
4510cc1bf307SJeff Guo 			dev_event_callback, NULL);
45112049c511SJeff Guo 		if (ret) {
45122049c511SJeff Guo 			RTE_LOG(ERR, EAL,
45132049c511SJeff Guo 				"fail  to register device event callback\n");
45142049c511SJeff Guo 			return -1;
45152049c511SJeff Guo 		}
4516fb73e096SJeff Guo 	}
4517fb73e096SJeff Guo 
45187e403725SGregory Etelson 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0) {
45197e403725SGregory Etelson 		if (!interactive) {
45207e403725SGregory Etelson 			rte_eal_cleanup();
4521148f963fSBruce Richardson 			rte_exit(EXIT_FAILURE, "Start ports failed\n");
45227e403725SGregory Etelson 		}
45237e403725SGregory Etelson 		fprintf(stderr, "Start ports failed\n");
45247e403725SGregory Etelson 	}
4525af75078fSIntel 
4526ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
452734fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
452834fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
452934fc1051SIvan Ilchenko 		if (ret != 0)
453061a3b0e5SAndrew Rybchenko 			fprintf(stderr,
453161a3b0e5SAndrew Rybchenko 				"Error during enabling promiscuous mode for port %u: %s - ignore\n",
453234fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
453334fc1051SIvan Ilchenko 	}
4534af75078fSIntel 
4535bb9be9a4SDavid Marchand #ifdef RTE_LIB_METRICS
45367e4441c8SRemy Horton 	/* Init metrics library */
45377e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
4538bb9be9a4SDavid Marchand #endif
45397e4441c8SRemy Horton 
4540a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
454162d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
454262d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
454362d3216dSReshma Pattan 		if (ret)
454461a3b0e5SAndrew Rybchenko 			fprintf(stderr,
454561a3b0e5SAndrew Rybchenko 				"Warning: latencystats init() returned error %d\n",
454661a3b0e5SAndrew Rybchenko 				ret);
454761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Latencystats running on lcore %d\n",
454862d3216dSReshma Pattan 			latencystats_lcore_id);
454962d3216dSReshma Pattan 	}
455062d3216dSReshma Pattan #endif
455162d3216dSReshma Pattan 
45527e4441c8SRemy Horton 	/* Setup bitrate stats */
4553a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4554e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
45557e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
45567e4441c8SRemy Horton 		if (bitrate_data == NULL)
4557e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
4558e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
45597e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
4560e25e6c70SRemy Horton 	}
45617e4441c8SRemy Horton #endif
4562*99a4974aSRobin Jarry 
4563*99a4974aSRobin Jarry 	if (record_core_cycles)
4564*99a4974aSRobin Jarry 		rte_lcore_register_usage_cb(lcore_usage_callback);
4565*99a4974aSRobin Jarry 
4566a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE
4567592ab76fSDavid Marchand 	if (init_cmdline() != 0)
4568592ab76fSDavid Marchand 		rte_exit(EXIT_FAILURE,
4569592ab76fSDavid Marchand 			"Could not initialise cmdline context.\n");
4570592ab76fSDavid Marchand 
457181ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
457281ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
457381ef862bSAllain Legacy 
4574ca7feb22SCyril Chemparathy 	if (interactive == 1) {
4575ca7feb22SCyril Chemparathy 		if (auto_start) {
4576ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
4577ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
4578ca7feb22SCyril Chemparathy 		}
4579af75078fSIntel 		prompt();
45800de738cfSJiayu Hu 		pmd_test_exit();
4581ca7feb22SCyril Chemparathy 	} else
45820d56cb81SThomas Monjalon #endif
45830d56cb81SThomas Monjalon 	{
4584af75078fSIntel 		char c;
4585af75078fSIntel 		int rc;
4586af75078fSIntel 
4587d9a191a0SPhil Yang 		f_quit = 0;
4588d9a191a0SPhil Yang 
4589af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
459099cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
4591cfea1f30SPablo de Lara 		if (stats_period != 0) {
4592cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
4593cfea1f30SPablo de Lara 			uint64_t timer_period;
4594cfea1f30SPablo de Lara 
4595cfea1f30SPablo de Lara 			/* Convert to number of cycles */
4596cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
4597cfea1f30SPablo de Lara 
4598d9a191a0SPhil Yang 			while (f_quit == 0) {
4599cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
4600cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
4601cfea1f30SPablo de Lara 
4602cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
4603cfea1f30SPablo de Lara 					print_stats();
4604cfea1f30SPablo de Lara 					/* Reset the timer */
4605cfea1f30SPablo de Lara 					diff_time = 0;
4606cfea1f30SPablo de Lara 				}
4607cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
4608cfea1f30SPablo de Lara 				prev_time = cur_time;
4609761f7ae1SJie Zhou 				rte_delay_us_sleep(US_PER_S);
4610cfea1f30SPablo de Lara 			}
4611cfea1f30SPablo de Lara 		}
4612cfea1f30SPablo de Lara 
4613af75078fSIntel 		printf("Press enter to exit\n");
4614af75078fSIntel 		rc = read(0, &c, 1);
4615d3a274ceSZhihong Wang 		pmd_test_exit();
4616af75078fSIntel 		if (rc < 0)
4617af75078fSIntel 			return 1;
4618af75078fSIntel 	}
4619af75078fSIntel 
46205e516c89SStephen Hemminger 	ret = rte_eal_cleanup();
46215e516c89SStephen Hemminger 	if (ret != 0)
46225e516c89SStephen Hemminger 		rte_exit(EXIT_FAILURE,
46235e516c89SStephen Hemminger 			 "EAL cleanup failed: %s\n", strerror(-ret));
46245e516c89SStephen Hemminger 
46255e516c89SStephen Hemminger 	return EXIT_SUCCESS;
4626af75078fSIntel }
4627