xref: /dpdk/app/test-pmd/testpmd.c (revision 6a8b64fd5e9e8f5572ea4d9de40a1cff23498246)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
12761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
131c036b16SEelco Chaudron #include <sys/mman.h>
14761f7ae1SJie Zhou #endif
15af75078fSIntel #include <sys/types.h>
16af75078fSIntel #include <errno.h>
17fb73e096SJeff Guo #include <stdbool.h>
18af75078fSIntel 
19af75078fSIntel #include <sys/queue.h>
20af75078fSIntel #include <sys/stat.h>
21af75078fSIntel 
22af75078fSIntel #include <stdint.h>
23af75078fSIntel #include <unistd.h>
24af75078fSIntel #include <inttypes.h>
25af75078fSIntel 
26af75078fSIntel #include <rte_common.h>
27d1eb542eSOlivier Matz #include <rte_errno.h>
28af75078fSIntel #include <rte_byteorder.h>
29af75078fSIntel #include <rte_log.h>
30af75078fSIntel #include <rte_debug.h>
31af75078fSIntel #include <rte_cycles.h>
32af75078fSIntel #include <rte_memory.h>
33af75078fSIntel #include <rte_memcpy.h>
34af75078fSIntel #include <rte_launch.h>
35af75078fSIntel #include <rte_eal.h>
36284c908cSGaetan Rivet #include <rte_alarm.h>
37af75078fSIntel #include <rte_per_lcore.h>
38af75078fSIntel #include <rte_lcore.h>
39af75078fSIntel #include <rte_atomic.h>
40af75078fSIntel #include <rte_branch_prediction.h>
41af75078fSIntel #include <rte_mempool.h>
42af75078fSIntel #include <rte_malloc.h>
43af75078fSIntel #include <rte_mbuf.h>
440e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
45af75078fSIntel #include <rte_interrupts.h>
46af75078fSIntel #include <rte_pci.h>
47af75078fSIntel #include <rte_ether.h>
48af75078fSIntel #include <rte_ethdev.h>
49edab33b1STetsuya Mukawa #include <rte_dev.h>
50af75078fSIntel #include <rte_string_fns.h>
51a8d0d473SBruce Richardson #ifdef RTE_NET_IXGBE
52e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
53e261265eSRadu Nicolau #endif
54a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
55102b7329SReshma Pattan #include <rte_pdump.h>
56102b7329SReshma Pattan #endif
57938a184aSAdrien Mazarguil #include <rte_flow.h>
587e4441c8SRemy Horton #include <rte_metrics.h>
59a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
607e4441c8SRemy Horton #include <rte_bitrate.h>
617e4441c8SRemy Horton #endif
62a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
6362d3216dSReshma Pattan #include <rte_latencystats.h>
6462d3216dSReshma Pattan #endif
65761f7ae1SJie Zhou #ifdef RTE_EXEC_ENV_WINDOWS
66761f7ae1SJie Zhou #include <process.h>
67761f7ae1SJie Zhou #endif
68af75078fSIntel 
69af75078fSIntel #include "testpmd.h"
70af75078fSIntel 
71c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
72c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
73c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
74c7f5dba7SAnatoly Burakov #else
75c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
76c7f5dba7SAnatoly Burakov #endif
77c7f5dba7SAnatoly Burakov 
78c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
79c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
80c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
81c7f5dba7SAnatoly Burakov #else
82c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
83c7f5dba7SAnatoly Burakov #endif
84c7f5dba7SAnatoly Burakov 
85c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
8672512e18SViacheslav Ovsiienko #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
87c7f5dba7SAnatoly Burakov 
88af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
89285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
90af75078fSIntel 
91cb056611SStephen Hemminger /* use main core for command line ? */
92af75078fSIntel uint8_t interactive = 0;
93ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
9499cabef0SPablo de Lara uint8_t tx_first;
9581ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
96af75078fSIntel 
97af75078fSIntel /*
98af75078fSIntel  * NUMA support configuration.
99af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
100af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
101af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
102af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
103af75078fSIntel  */
104999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
105af75078fSIntel 
106af75078fSIntel /*
107b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
108b6ea6408SIntel  * not configured.
109b6ea6408SIntel  */
110b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
111b6ea6408SIntel 
112b6ea6408SIntel /*
113c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
114c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
115c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
116c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
117c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
118148f963fSBruce Richardson  */
119c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
120148f963fSBruce Richardson 
121148f963fSBruce Richardson /*
12263531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
12363531389SGeorgios Katsikas  * is allocated.
12463531389SGeorgios Katsikas  */
12563531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12663531389SGeorgios Katsikas 
12763531389SGeorgios Katsikas /*
12863531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12963531389SGeorgios Katsikas  * is allocated.
13063531389SGeorgios Katsikas  */
13163531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
13263531389SGeorgios Katsikas 
13363531389SGeorgios Katsikas /*
13463531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13563531389SGeorgios Katsikas  * is allocated.
13663531389SGeorgios Katsikas  */
13763531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13863531389SGeorgios Katsikas 
13963531389SGeorgios Katsikas /*
140af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
141af75078fSIntel  * forwarded.
142547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
143af75078fSIntel  * ports.
144af75078fSIntel  */
1456d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
146af75078fSIntel portid_t nb_peer_eth_addrs = 0;
147af75078fSIntel 
148af75078fSIntel /*
149af75078fSIntel  * Probed Target Environment.
150af75078fSIntel  */
151af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
152af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
153af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
154af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
155af75078fSIntel 
1564918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1574918a357SXiaoyun Li 
158af75078fSIntel /*
159af75078fSIntel  * Test Forwarding Configuration.
160af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
161af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
162af75078fSIntel  */
163af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
164af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
165af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
166af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
167af75078fSIntel 
168af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
169af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
170af75078fSIntel 
171af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
172af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
173af75078fSIntel 
174af75078fSIntel /*
175af75078fSIntel  * Forwarding engines.
176af75078fSIntel  */
177af75078fSIntel struct fwd_engine * fwd_engines[] = {
178af75078fSIntel 	&io_fwd_engine,
179af75078fSIntel 	&mac_fwd_engine,
180d47388f1SCyril Chemparathy 	&mac_swap_engine,
181e9e23a61SCyril Chemparathy 	&flow_gen_engine,
182af75078fSIntel 	&rx_only_engine,
183af75078fSIntel 	&tx_only_engine,
184af75078fSIntel 	&csum_fwd_engine,
185168dfa61SIvan Boule 	&icmp_echo_engine,
1863c156061SJens Freimann 	&noisy_vnf_engine,
1872564abdaSShiri Kuzin 	&five_tuple_swap_fwd_engine,
188af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
189af75078fSIntel 	&ieee1588_fwd_engine,
190af75078fSIntel #endif
19159840375SXueming Li 	&shared_rxq_engine,
192af75078fSIntel 	NULL,
193af75078fSIntel };
194af75078fSIntel 
19526cbb419SViacheslav Ovsiienko struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
19659fcf854SShahaf Shuler uint16_t mempool_flags;
197401b744dSShahaf Shuler 
198af75078fSIntel struct fwd_config cur_fwd_config;
199af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
200bf56fce1SZhihong Wang uint32_t retry_enabled;
201bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
202bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
203af75078fSIntel 
20426cbb419SViacheslav Ovsiienko uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
20526cbb419SViacheslav Ovsiienko uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
20626cbb419SViacheslav Ovsiienko 	DEFAULT_MBUF_DATA_SIZE
20726cbb419SViacheslav Ovsiienko }; /**< Mbuf data space size. */
208c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
209c8798818SIntel                                       * specified on command-line. */
210cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
211d9a191a0SPhil Yang 
21263b72657SIvan Ilchenko /** Extended statistics to show. */
21363b72657SIvan Ilchenko struct rte_eth_xstat_name *xstats_display;
21463b72657SIvan Ilchenko 
21563b72657SIvan Ilchenko unsigned int xstats_display_num; /**< Size of extended statistics to show */
21663b72657SIvan Ilchenko 
217d9a191a0SPhil Yang /*
218d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
219d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
220d9a191a0SPhil Yang  */
221d9a191a0SPhil Yang uint8_t f_quit;
222d9a191a0SPhil Yang 
223af75078fSIntel /*
2241bb4a528SFerruh Yigit  * Max Rx frame size, set by '--max-pkt-len' parameter.
2251bb4a528SFerruh Yigit  */
2261bb4a528SFerruh Yigit uint32_t max_rx_pkt_len;
2271bb4a528SFerruh Yigit 
2281bb4a528SFerruh Yigit /*
2290f2096d7SViacheslav Ovsiienko  * Configuration of packet segments used to scatter received packets
2300f2096d7SViacheslav Ovsiienko  * if some of split features is configured.
2310f2096d7SViacheslav Ovsiienko  */
2320f2096d7SViacheslav Ovsiienko uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
2330f2096d7SViacheslav Ovsiienko uint8_t  rx_pkt_nb_segs; /**< Number of segments to split */
23491c78e09SViacheslav Ovsiienko uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
23591c78e09SViacheslav Ovsiienko uint8_t  rx_pkt_nb_offs; /**< Number of specified offsets */
2360f2096d7SViacheslav Ovsiienko 
2370f2096d7SViacheslav Ovsiienko /*
238af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
239af75078fSIntel  */
240af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
241af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
242af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
243af75078fSIntel };
244af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
245af75078fSIntel 
24679bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
24779bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
24879bec05bSKonstantin Ananyev 
24982010ef5SYongseok Koh uint8_t txonly_multi_flow;
25082010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
25182010ef5SYongseok Koh 
2524940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter;
2534940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */
2544940344dSViacheslav Ovsiienko 
2554940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra;
2564940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */
2574940344dSViacheslav Ovsiienko 
258af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
2596c02043eSIgor Russkikh uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
260861e7684SZhihong Wang int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
261e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
262af75078fSIntel 
263900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
264900550deSIntel uint8_t dcb_config = 0;
265900550deSIntel 
266af75078fSIntel /*
267af75078fSIntel  * Configurable number of RX/TX queues.
268af75078fSIntel  */
2691c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
270af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
271af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
272af75078fSIntel 
273af75078fSIntel /*
274af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2758599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
276af75078fSIntel  */
2778599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2788599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
279af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
280af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
281af75078fSIntel 
282f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
283af75078fSIntel /*
284af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
285af75078fSIntel  */
286af75078fSIntel 
287f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
288f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
289f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
290af75078fSIntel 
291f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
292f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
293f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
294af75078fSIntel 
295af75078fSIntel /*
296af75078fSIntel  * Configurable value of RX free threshold.
297af75078fSIntel  */
298f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
299af75078fSIntel 
300af75078fSIntel /*
301ce8d5614SIntel  * Configurable value of RX drop enable.
302ce8d5614SIntel  */
303f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
304ce8d5614SIntel 
305ce8d5614SIntel /*
306af75078fSIntel  * Configurable value of TX free threshold.
307af75078fSIntel  */
308f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
309af75078fSIntel 
310af75078fSIntel /*
311af75078fSIntel  * Configurable value of TX RS bit threshold.
312af75078fSIntel  */
313f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
314af75078fSIntel 
315af75078fSIntel /*
3163c156061SJens Freimann  * Configurable value of buffered packets before sending.
3173c156061SJens Freimann  */
3183c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
3193c156061SJens Freimann 
3203c156061SJens Freimann /*
3213c156061SJens Freimann  * Configurable value of packet buffer timeout.
3223c156061SJens Freimann  */
3233c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
3243c156061SJens Freimann 
3253c156061SJens Freimann /*
3263c156061SJens Freimann  * Configurable value for size of VNF internal memory area
3273c156061SJens Freimann  * used for simulating noisy neighbour behaviour
3283c156061SJens Freimann  */
3293c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
3303c156061SJens Freimann 
3313c156061SJens Freimann /*
3323c156061SJens Freimann  * Configurable value of number of random writes done in
3333c156061SJens Freimann  * VNF simulation memory area.
3343c156061SJens Freimann  */
3353c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3363c156061SJens Freimann 
3373c156061SJens Freimann /*
3383c156061SJens Freimann  * Configurable value of number of random reads done in
3393c156061SJens Freimann  * VNF simulation memory area.
3403c156061SJens Freimann  */
3413c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3423c156061SJens Freimann 
3433c156061SJens Freimann /*
3443c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3453c156061SJens Freimann  * VNF simulation memory area.
3463c156061SJens Freimann  */
3473c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3483c156061SJens Freimann 
3493c156061SJens Freimann /*
350af75078fSIntel  * Receive Side Scaling (RSS) configuration.
351af75078fSIntel  */
3528a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
353af75078fSIntel 
354af75078fSIntel /*
355af75078fSIntel  * Port topology configuration
356af75078fSIntel  */
357af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
358af75078fSIntel 
3597741e4cfSIntel /*
3607741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3617741e4cfSIntel  */
3627741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3637741e4cfSIntel 
364af75078fSIntel /*
3657ee3e944SVasily Philipov  * Flow API isolated mode.
3667ee3e944SVasily Philipov  */
3677ee3e944SVasily Philipov uint8_t flow_isolate_all;
3687ee3e944SVasily Philipov 
3697ee3e944SVasily Philipov /*
370bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
371bc202406SDavid Marchand  */
372bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
373bc202406SDavid Marchand 
374bc202406SDavid Marchand /*
3756937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
3766937d210SStephen Hemminger  */
3776937d210SStephen Hemminger uint8_t no_device_start = 0;
3786937d210SStephen Hemminger 
3796937d210SStephen Hemminger /*
3808ea656f8SGaetan Rivet  * Enable link status change notification
3818ea656f8SGaetan Rivet  */
3828ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3838ea656f8SGaetan Rivet 
3848ea656f8SGaetan Rivet /*
385284c908cSGaetan Rivet  * Enable device removal notification.
386284c908cSGaetan Rivet  */
387284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
388284c908cSGaetan Rivet 
389fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
390fb73e096SJeff Guo 
3914f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
3924f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
3934f1ed78eSThomas Monjalon 
394b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */
395b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true;
396b0a9354aSPavan Nikhilesh 
39701817b10SBing Zhao /* Hairpin ports configuration mode. */
39801817b10SBing Zhao uint16_t hairpin_mode;
39901817b10SBing Zhao 
40097b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
40197b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
40297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
40397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
40497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
40597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
40697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
40797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
40897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
40997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
41097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
41197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
4120e459ffaSDong Zhou 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
41397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
41497b5d8b5SThomas Monjalon };
41597b5d8b5SThomas Monjalon 
416284c908cSGaetan Rivet /*
4173af72783SGaetan Rivet  * Display or mask ether events
4183af72783SGaetan Rivet  * Default to all events except VF_MBOX
4193af72783SGaetan Rivet  */
4203af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
4213af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
4223af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
4233af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
424badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
4253af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
4260e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
4270e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
428e505d84cSAnatoly Burakov /*
429e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
430e505d84cSAnatoly Burakov  */
431e505d84cSAnatoly Burakov int do_mlockall = 0;
4323af72783SGaetan Rivet 
4333af72783SGaetan Rivet /*
4347b7e5ba7SIntel  * NIC bypass mode configuration options.
4357b7e5ba7SIntel  */
4367b7e5ba7SIntel 
437a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
4387b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
439e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
4407b7e5ba7SIntel #endif
4417b7e5ba7SIntel 
442e261265eSRadu Nicolau 
443a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
44462d3216dSReshma Pattan 
44562d3216dSReshma Pattan /*
44662d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
44762d3216dSReshma Pattan  */
44862d3216dSReshma Pattan uint8_t latencystats_enabled;
44962d3216dSReshma Pattan 
45062d3216dSReshma Pattan /*
45162d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
45262d3216dSReshma Pattan  */
45362d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
45462d3216dSReshma Pattan 
45562d3216dSReshma Pattan #endif
45662d3216dSReshma Pattan 
4577b7e5ba7SIntel /*
458af75078fSIntel  * Ethernet device configuration.
459af75078fSIntel  */
4601bb4a528SFerruh Yigit struct rte_eth_rxmode rx_mode;
461af75078fSIntel 
46207e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
46307e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
46407e5f7bdSShahaf Shuler };
465fd8c20aaSShahaf Shuler 
466af75078fSIntel struct rte_fdir_conf fdir_conf = {
467af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
468af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
469af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
470d9d5e6f2SJingjing Wu 	.mask = {
47126f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
472d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
473d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
474d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
475d9d5e6f2SJingjing Wu 		},
476d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
477d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
478d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
479d9d5e6f2SJingjing Wu 		},
480d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
481d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
48247b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
48347b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
48447b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
485d9d5e6f2SJingjing Wu 	},
486af75078fSIntel 	.drop_queue = 127,
487af75078fSIntel };
488af75078fSIntel 
4892950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
490af75078fSIntel 
491a4fd5eeeSElza Mathew /*
492a4fd5eeeSElza Mathew  * Display zero values by default for xstats
493a4fd5eeeSElza Mathew  */
494a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
495a4fd5eeeSElza Mathew 
496bc700b67SDharmik Thakkar /*
497bc700b67SDharmik Thakkar  * Measure of CPU cycles disabled by default
498bc700b67SDharmik Thakkar  */
499bc700b67SDharmik Thakkar uint8_t record_core_cycles;
500bc700b67SDharmik Thakkar 
5010e4b1963SDharmik Thakkar /*
5020e4b1963SDharmik Thakkar  * Display of RX and TX bursts disabled by default
5030e4b1963SDharmik Thakkar  */
5040e4b1963SDharmik Thakkar uint8_t record_burst_stats;
5050e4b1963SDharmik Thakkar 
506f4d178c1SXueming Li /*
507f4d178c1SXueming Li  * Number of ports per shared Rx queue group, 0 disable.
508f4d178c1SXueming Li  */
509f4d178c1SXueming Li uint32_t rxq_share;
510f4d178c1SXueming Li 
511c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
512c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
5137acf894dSStephen Hurd 
514a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
5157e4441c8SRemy Horton /* Bitrate statistics */
5167e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
517e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
518e25e6c70SRemy Horton uint8_t bitrate_enabled;
519e25e6c70SRemy Horton #endif
5207e4441c8SRemy Horton 
521b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
522b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
523b40f8d78SJiayu Hu 
524f9295aa2SXiaoyu Min /*
525f9295aa2SXiaoyu Min  * hexadecimal bitmask of RX mq mode can be enabled.
526f9295aa2SXiaoyu Min  */
527f9295aa2SXiaoyu Min enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
528f9295aa2SXiaoyu Min 
529b7b78a08SAjit Khaparde /*
530b7b78a08SAjit Khaparde  * Used to set forced link speed
531b7b78a08SAjit Khaparde  */
532b7b78a08SAjit Khaparde uint32_t eth_link_speed;
533b7b78a08SAjit Khaparde 
534a550baf2SMin Hu (Connor) /*
535a550baf2SMin Hu (Connor)  * ID of the current process in multi-process, used to
536a550baf2SMin Hu (Connor)  * configure the queues to be polled.
537a550baf2SMin Hu (Connor)  */
538a550baf2SMin Hu (Connor) int proc_id;
539a550baf2SMin Hu (Connor) 
540a550baf2SMin Hu (Connor) /*
541a550baf2SMin Hu (Connor)  * Number of processes in multi-process, used to
542a550baf2SMin Hu (Connor)  * configure the queues to be polled.
543a550baf2SMin Hu (Connor)  */
544a550baf2SMin Hu (Connor) unsigned int num_procs = 1;
545a550baf2SMin Hu (Connor) 
546f6d8a6d3SIvan Malov static void
547f6d8a6d3SIvan Malov eth_rx_metadata_negotiate_mp(uint16_t port_id)
548f6d8a6d3SIvan Malov {
549f6d8a6d3SIvan Malov 	uint64_t rx_meta_features = 0;
550f6d8a6d3SIvan Malov 	int ret;
551f6d8a6d3SIvan Malov 
552f6d8a6d3SIvan Malov 	if (!is_proc_primary())
553f6d8a6d3SIvan Malov 		return;
554f6d8a6d3SIvan Malov 
555f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
556f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
557f6d8a6d3SIvan Malov 	rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
558f6d8a6d3SIvan Malov 
559f6d8a6d3SIvan Malov 	ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
560f6d8a6d3SIvan Malov 	if (ret == 0) {
561f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
562f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
563f6d8a6d3SIvan Malov 				    port_id);
564f6d8a6d3SIvan Malov 		}
565f6d8a6d3SIvan Malov 
566f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
567f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
568f6d8a6d3SIvan Malov 				    port_id);
569f6d8a6d3SIvan Malov 		}
570f6d8a6d3SIvan Malov 
571f6d8a6d3SIvan Malov 		if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
572f6d8a6d3SIvan Malov 			TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
573f6d8a6d3SIvan Malov 				    port_id);
574f6d8a6d3SIvan Malov 		}
575f6d8a6d3SIvan Malov 	} else if (ret != -ENOTSUP) {
576f6d8a6d3SIvan Malov 		rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
577f6d8a6d3SIvan Malov 			 port_id, rte_strerror(-ret));
578f6d8a6d3SIvan Malov 	}
579f6d8a6d3SIvan Malov }
580f6d8a6d3SIvan Malov 
5811179f05cSIvan Malov static void
5821179f05cSIvan Malov flow_pick_transfer_proxy_mp(uint16_t port_id)
5831179f05cSIvan Malov {
5841179f05cSIvan Malov 	struct rte_port *port = &ports[port_id];
5851179f05cSIvan Malov 	int ret;
5861179f05cSIvan Malov 
5871179f05cSIvan Malov 	port->flow_transfer_proxy = port_id;
5881179f05cSIvan Malov 
5891179f05cSIvan Malov 	if (!is_proc_primary())
5901179f05cSIvan Malov 		return;
5911179f05cSIvan Malov 
5921179f05cSIvan Malov 	ret = rte_flow_pick_transfer_proxy(port_id, &port->flow_transfer_proxy,
5931179f05cSIvan Malov 					   NULL);
5941179f05cSIvan Malov 	if (ret != 0) {
5951179f05cSIvan Malov 		fprintf(stderr, "Error picking flow transfer proxy for port %u: %s - ignore\n",
5961179f05cSIvan Malov 			port_id, rte_strerror(-ret));
5971179f05cSIvan Malov 	}
5981179f05cSIvan Malov }
5991179f05cSIvan Malov 
600a550baf2SMin Hu (Connor) static int
601a550baf2SMin Hu (Connor) eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
602a550baf2SMin Hu (Connor) 		      const struct rte_eth_conf *dev_conf)
603a550baf2SMin Hu (Connor) {
604a550baf2SMin Hu (Connor) 	if (is_proc_primary())
605a550baf2SMin Hu (Connor) 		return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
606a550baf2SMin Hu (Connor) 					dev_conf);
607a550baf2SMin Hu (Connor) 	return 0;
608a550baf2SMin Hu (Connor) }
609a550baf2SMin Hu (Connor) 
610a550baf2SMin Hu (Connor) static int
611a550baf2SMin Hu (Connor) eth_dev_start_mp(uint16_t port_id)
612a550baf2SMin Hu (Connor) {
613a550baf2SMin Hu (Connor) 	if (is_proc_primary())
614a550baf2SMin Hu (Connor) 		return rte_eth_dev_start(port_id);
615a550baf2SMin Hu (Connor) 
616a550baf2SMin Hu (Connor) 	return 0;
617a550baf2SMin Hu (Connor) }
618a550baf2SMin Hu (Connor) 
619a550baf2SMin Hu (Connor) static int
620a550baf2SMin Hu (Connor) eth_dev_stop_mp(uint16_t port_id)
621a550baf2SMin Hu (Connor) {
622a550baf2SMin Hu (Connor) 	if (is_proc_primary())
623a550baf2SMin Hu (Connor) 		return rte_eth_dev_stop(port_id);
624a550baf2SMin Hu (Connor) 
625a550baf2SMin Hu (Connor) 	return 0;
626a550baf2SMin Hu (Connor) }
627a550baf2SMin Hu (Connor) 
628a550baf2SMin Hu (Connor) static void
629a550baf2SMin Hu (Connor) mempool_free_mp(struct rte_mempool *mp)
630a550baf2SMin Hu (Connor) {
631a550baf2SMin Hu (Connor) 	if (is_proc_primary())
632a550baf2SMin Hu (Connor) 		rte_mempool_free(mp);
633a550baf2SMin Hu (Connor) }
634a550baf2SMin Hu (Connor) 
635a550baf2SMin Hu (Connor) static int
636a550baf2SMin Hu (Connor) eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
637a550baf2SMin Hu (Connor) {
638a550baf2SMin Hu (Connor) 	if (is_proc_primary())
639a550baf2SMin Hu (Connor) 		return rte_eth_dev_set_mtu(port_id, mtu);
640a550baf2SMin Hu (Connor) 
641a550baf2SMin Hu (Connor) 	return 0;
642a550baf2SMin Hu (Connor) }
643a550baf2SMin Hu (Connor) 
644ed30d9b6SIntel /* Forward function declarations */
645c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
646edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
647f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
64876ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
649d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
650cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
651fb73e096SJeff Guo 				enum rte_dev_event_type type,
652fb73e096SJeff Guo 				void *param);
65363b72657SIvan Ilchenko static void fill_xstats_display_info(void);
654ce8d5614SIntel 
655ce8d5614SIntel /*
656ce8d5614SIntel  * Check if all the ports are started.
657ce8d5614SIntel  * If yes, return positive value. If not, return zero.
658ce8d5614SIntel  */
659ce8d5614SIntel static int all_ports_started(void);
660ed30d9b6SIntel 
66152f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
66235b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
66352f38a20SJiayu Hu 
664b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */
665b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
666b57b66a9SOri Kam 
66763b72657SIvan Ilchenko 
668af75078fSIntel /*
66998a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
670c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
671c9cafcc8SShahaf Shuler  */
672c9cafcc8SShahaf Shuler int
673c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
674c9cafcc8SShahaf Shuler {
675c9cafcc8SShahaf Shuler 	unsigned int i;
676c9cafcc8SShahaf Shuler 
677c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
678c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
679c9cafcc8SShahaf Shuler 			return 0;
680c9cafcc8SShahaf Shuler 	}
681c9cafcc8SShahaf Shuler 	return 1;
682c9cafcc8SShahaf Shuler }
683c9cafcc8SShahaf Shuler 
684c9cafcc8SShahaf Shuler /*
685af75078fSIntel  * Setup default configuration.
686af75078fSIntel  */
687af75078fSIntel static void
688af75078fSIntel set_default_fwd_lcores_config(void)
689af75078fSIntel {
690af75078fSIntel 	unsigned int i;
691af75078fSIntel 	unsigned int nb_lc;
6927acf894dSStephen Hurd 	unsigned int sock_num;
693af75078fSIntel 
694af75078fSIntel 	nb_lc = 0;
695af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
696dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
697dbfb8ec7SPhil Yang 			continue;
698c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
699c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
700c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
701c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
702c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
703c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
704c9cafcc8SShahaf Shuler 			}
705c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
7067acf894dSStephen Hurd 		}
707cb056611SStephen Hemminger 		if (i == rte_get_main_lcore())
708f54fe5eeSStephen Hurd 			continue;
709f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
710af75078fSIntel 	}
711af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
712af75078fSIntel 	nb_cfg_lcores = nb_lcores;
713af75078fSIntel 	nb_fwd_lcores = 1;
714af75078fSIntel }
715af75078fSIntel 
716af75078fSIntel static void
717af75078fSIntel set_def_peer_eth_addrs(void)
718af75078fSIntel {
719af75078fSIntel 	portid_t i;
720af75078fSIntel 
721af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
72235b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
723af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
724af75078fSIntel 	}
725af75078fSIntel }
726af75078fSIntel 
727af75078fSIntel static void
728af75078fSIntel set_default_fwd_ports_config(void)
729af75078fSIntel {
730af75078fSIntel 	portid_t pt_id;
73165a7360cSMatan Azrad 	int i = 0;
732af75078fSIntel 
733effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
73465a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
735af75078fSIntel 
736effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
737effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
738effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
739effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
740effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
741effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
742effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
743effdb8bbSPhil Yang 			}
744effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
745effdb8bbSPhil Yang 		}
746effdb8bbSPhil Yang 	}
747effdb8bbSPhil Yang 
748af75078fSIntel 	nb_cfg_ports = nb_ports;
749af75078fSIntel 	nb_fwd_ports = nb_ports;
750af75078fSIntel }
751af75078fSIntel 
752af75078fSIntel void
753af75078fSIntel set_def_fwd_config(void)
754af75078fSIntel {
755af75078fSIntel 	set_default_fwd_lcores_config();
756af75078fSIntel 	set_def_peer_eth_addrs();
757af75078fSIntel 	set_default_fwd_ports_config();
758af75078fSIntel }
759af75078fSIntel 
760761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
761c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
762c7f5dba7SAnatoly Burakov static int
763c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
764c7f5dba7SAnatoly Burakov {
765c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
766c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
767c7f5dba7SAnatoly Burakov 
768c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
769c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
770c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
771c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
772c7f5dba7SAnatoly Burakov 	 */
773c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
774c7f5dba7SAnatoly Burakov 
775c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
776c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
777c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
778c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
779c7f5dba7SAnatoly Burakov 		return -1;
780c7f5dba7SAnatoly Burakov 	}
781c7f5dba7SAnatoly Burakov 
782c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
783c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
784c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
785c7f5dba7SAnatoly Burakov 
786c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
787c7f5dba7SAnatoly Burakov 
788c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
789c7f5dba7SAnatoly Burakov 
790c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
791c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
792c7f5dba7SAnatoly Burakov 		return -1;
793c7f5dba7SAnatoly Burakov 	}
794c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
795c7f5dba7SAnatoly Burakov 
796c7f5dba7SAnatoly Burakov 	return 0;
797c7f5dba7SAnatoly Burakov }
798c7f5dba7SAnatoly Burakov 
799c7f5dba7SAnatoly Burakov static int
800c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
801c7f5dba7SAnatoly Burakov {
802c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
803c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
804c7f5dba7SAnatoly Burakov 	 */
8059d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
806c7f5dba7SAnatoly Burakov 
807c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
808c7f5dba7SAnatoly Burakov }
809c7f5dba7SAnatoly Burakov 
810c7f5dba7SAnatoly Burakov static void *
811c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
812c7f5dba7SAnatoly Burakov {
813c7f5dba7SAnatoly Burakov 	void *addr;
814c7f5dba7SAnatoly Burakov 	int flags;
815c7f5dba7SAnatoly Burakov 
816c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
817c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
818c7f5dba7SAnatoly Burakov 	if (huge)
819c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
820c7f5dba7SAnatoly Burakov 
821c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
822c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
823c7f5dba7SAnatoly Burakov 		return NULL;
824c7f5dba7SAnatoly Burakov 
825c7f5dba7SAnatoly Burakov 	return addr;
826c7f5dba7SAnatoly Burakov }
827c7f5dba7SAnatoly Burakov 
828c7f5dba7SAnatoly Burakov struct extmem_param {
829c7f5dba7SAnatoly Burakov 	void *addr;
830c7f5dba7SAnatoly Burakov 	size_t len;
831c7f5dba7SAnatoly Burakov 	size_t pgsz;
832c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
833c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
834c7f5dba7SAnatoly Burakov };
835c7f5dba7SAnatoly Burakov 
836c7f5dba7SAnatoly Burakov static int
837c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
838c7f5dba7SAnatoly Burakov 		bool huge)
839c7f5dba7SAnatoly Burakov {
840c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
841c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
842c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
843c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
844c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
845c7f5dba7SAnatoly Burakov 	void *addr;
846c7f5dba7SAnatoly Burakov 	int ret;
847c7f5dba7SAnatoly Burakov 
848c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
849c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
850c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
851c7f5dba7SAnatoly Burakov 			continue;
852c7f5dba7SAnatoly Burakov 
853c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
854c7f5dba7SAnatoly Burakov 
855c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
856c7f5dba7SAnatoly Burakov 		if (!huge)
857c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
858c7f5dba7SAnatoly Burakov 
859c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
860c7f5dba7SAnatoly Burakov 		if (ret < 0) {
861c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
862c7f5dba7SAnatoly Burakov 			return -1;
863c7f5dba7SAnatoly Burakov 		}
864c7f5dba7SAnatoly Burakov 
865c7f5dba7SAnatoly Burakov 		/* allocate our memory */
866c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
867c7f5dba7SAnatoly Burakov 
868c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
869c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
870c7f5dba7SAnatoly Burakov 		 * try another one.
871c7f5dba7SAnatoly Burakov 		 */
872c7f5dba7SAnatoly Burakov 		if (addr == NULL)
873c7f5dba7SAnatoly Burakov 			continue;
874c7f5dba7SAnatoly Burakov 
875c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
876c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
877c7f5dba7SAnatoly Burakov 
878c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
879c7f5dba7SAnatoly Burakov 
880c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
881c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
882c7f5dba7SAnatoly Burakov 			goto fail;
883c7f5dba7SAnatoly Burakov 		}
884c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
885c7f5dba7SAnatoly Burakov 		if (!huge)
886c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
887c7f5dba7SAnatoly Burakov 
888c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
889c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
890c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
891c7f5dba7SAnatoly Burakov 			size_t offset;
892c7f5dba7SAnatoly Burakov 			void *cur;
893c7f5dba7SAnatoly Burakov 
894c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
895c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
896c7f5dba7SAnatoly Burakov 
897c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
898c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
899c7f5dba7SAnatoly Burakov 
900c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
901c7f5dba7SAnatoly Burakov 
902c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
903c7f5dba7SAnatoly Burakov 		}
904c7f5dba7SAnatoly Burakov 
905c7f5dba7SAnatoly Burakov 		break;
906c7f5dba7SAnatoly Burakov 	}
907c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
908c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
909c7f5dba7SAnatoly Burakov 		return -1;
910c7f5dba7SAnatoly Burakov 
911c7f5dba7SAnatoly Burakov 	param->addr = addr;
912c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
913c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
914c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
915c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
916c7f5dba7SAnatoly Burakov 
917c7f5dba7SAnatoly Burakov 	return 0;
918c7f5dba7SAnatoly Burakov fail:
919c7f5dba7SAnatoly Burakov 	if (iovas)
920c7f5dba7SAnatoly Burakov 		free(iovas);
921c7f5dba7SAnatoly Burakov 	if (addr)
922c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
923c7f5dba7SAnatoly Burakov 
924c7f5dba7SAnatoly Burakov 	return -1;
925c7f5dba7SAnatoly Burakov }
926c7f5dba7SAnatoly Burakov 
927c7f5dba7SAnatoly Burakov static int
928c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
929c7f5dba7SAnatoly Burakov {
930c7f5dba7SAnatoly Burakov 	struct extmem_param param;
931c7f5dba7SAnatoly Burakov 	int socket_id, ret;
932c7f5dba7SAnatoly Burakov 
933c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
934c7f5dba7SAnatoly Burakov 
935c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
936c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
937c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
938c7f5dba7SAnatoly Burakov 		/* create our heap */
939c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
940c7f5dba7SAnatoly Burakov 		if (ret < 0) {
941c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
942c7f5dba7SAnatoly Burakov 			return -1;
943c7f5dba7SAnatoly Burakov 		}
944c7f5dba7SAnatoly Burakov 	}
945c7f5dba7SAnatoly Burakov 
946c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
947c7f5dba7SAnatoly Burakov 	if (ret < 0) {
948c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
949c7f5dba7SAnatoly Burakov 		return -1;
950c7f5dba7SAnatoly Burakov 	}
951c7f5dba7SAnatoly Burakov 
952c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
953c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
954c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
955c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
956c7f5dba7SAnatoly Burakov 
957c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
958c7f5dba7SAnatoly Burakov 
959c7f5dba7SAnatoly Burakov 	/* not needed any more */
960c7f5dba7SAnatoly Burakov 	free(param.iova_table);
961c7f5dba7SAnatoly Burakov 
962c7f5dba7SAnatoly Burakov 	if (ret < 0) {
963c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
964c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
965c7f5dba7SAnatoly Burakov 		return -1;
966c7f5dba7SAnatoly Burakov 	}
967c7f5dba7SAnatoly Burakov 
968c7f5dba7SAnatoly Burakov 	/* success */
969c7f5dba7SAnatoly Burakov 
970c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
971c7f5dba7SAnatoly Burakov 			param.len >> 20);
972c7f5dba7SAnatoly Burakov 
973c7f5dba7SAnatoly Burakov 	return 0;
974c7f5dba7SAnatoly Burakov }
9753a0968c8SShahaf Shuler static void
9763a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
9773a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
9783a0968c8SShahaf Shuler {
9793a0968c8SShahaf Shuler 	uint16_t pid = 0;
9803a0968c8SShahaf Shuler 	int ret;
9813a0968c8SShahaf Shuler 
9823a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
9830a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
9843a0968c8SShahaf Shuler 
9850a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
9860a0821bcSPaulis Gributs 		if (ret != 0) {
9870a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
9880a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
9890a0821bcSPaulis Gributs 				    "mempool unmapping will not be performed\n",
9900a0821bcSPaulis Gributs 				    pid, memhdr->addr);
9910a0821bcSPaulis Gributs 			continue;
9920a0821bcSPaulis Gributs 		}
9930a0821bcSPaulis Gributs 
9940a0821bcSPaulis Gributs 		ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
9953a0968c8SShahaf Shuler 		if (ret) {
9963a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
9973a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
9983a0968c8SShahaf Shuler 				    "for device %s\n",
9990a0821bcSPaulis Gributs 				    memhdr->addr, dev_info.device->name);
10003a0968c8SShahaf Shuler 		}
10013a0968c8SShahaf Shuler 	}
10023a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
10033a0968c8SShahaf Shuler 	if (ret) {
10043a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10053a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
10063a0968c8SShahaf Shuler 	}
10073a0968c8SShahaf Shuler }
10083a0968c8SShahaf Shuler 
10093a0968c8SShahaf Shuler static void
10103a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
10113a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
10123a0968c8SShahaf Shuler {
10133a0968c8SShahaf Shuler 	uint16_t pid = 0;
10143a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
10153a0968c8SShahaf Shuler 	int ret;
10163a0968c8SShahaf Shuler 
10173a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
10183a0968c8SShahaf Shuler 				  page_size);
10193a0968c8SShahaf Shuler 	if (ret) {
10203a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
10213a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
10223a0968c8SShahaf Shuler 		return;
10233a0968c8SShahaf Shuler 	}
10243a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
10250a0821bcSPaulis Gributs 		struct rte_eth_dev_info dev_info;
10263a0968c8SShahaf Shuler 
10270a0821bcSPaulis Gributs 		ret = eth_dev_info_get_print_err(pid, &dev_info);
10280a0821bcSPaulis Gributs 		if (ret != 0) {
10290a0821bcSPaulis Gributs 			TESTPMD_LOG(DEBUG,
10300a0821bcSPaulis Gributs 				    "unable to get device info for port %d on addr 0x%p,"
10310a0821bcSPaulis Gributs 				    "mempool mapping will not be performed\n",
10320a0821bcSPaulis Gributs 				    pid, memhdr->addr);
10330a0821bcSPaulis Gributs 			continue;
10340a0821bcSPaulis Gributs 		}
10350a0821bcSPaulis Gributs 		ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
10363a0968c8SShahaf Shuler 		if (ret) {
10373a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
10383a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
10393a0968c8SShahaf Shuler 				    "for device %s\n",
10400a0821bcSPaulis Gributs 				    memhdr->addr, dev_info.device->name);
10413a0968c8SShahaf Shuler 		}
10423a0968c8SShahaf Shuler 	}
10433a0968c8SShahaf Shuler }
1044761f7ae1SJie Zhou #endif
1045c7f5dba7SAnatoly Burakov 
104672512e18SViacheslav Ovsiienko static unsigned int
104772512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
104872512e18SViacheslav Ovsiienko 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
104972512e18SViacheslav Ovsiienko {
105072512e18SViacheslav Ovsiienko 	struct rte_pktmbuf_extmem *xmem;
105172512e18SViacheslav Ovsiienko 	unsigned int ext_num, zone_num, elt_num;
105272512e18SViacheslav Ovsiienko 	uint16_t elt_size;
105372512e18SViacheslav Ovsiienko 
105472512e18SViacheslav Ovsiienko 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
105572512e18SViacheslav Ovsiienko 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
105672512e18SViacheslav Ovsiienko 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
105772512e18SViacheslav Ovsiienko 
105872512e18SViacheslav Ovsiienko 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
105972512e18SViacheslav Ovsiienko 	if (xmem == NULL) {
106072512e18SViacheslav Ovsiienko 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
106172512e18SViacheslav Ovsiienko 				 "external buffer descriptors\n");
106272512e18SViacheslav Ovsiienko 		*ext_mem = NULL;
106372512e18SViacheslav Ovsiienko 		return 0;
106472512e18SViacheslav Ovsiienko 	}
106572512e18SViacheslav Ovsiienko 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
106672512e18SViacheslav Ovsiienko 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
106772512e18SViacheslav Ovsiienko 		const struct rte_memzone *mz;
106872512e18SViacheslav Ovsiienko 		char mz_name[RTE_MEMZONE_NAMESIZE];
106972512e18SViacheslav Ovsiienko 		int ret;
107072512e18SViacheslav Ovsiienko 
107172512e18SViacheslav Ovsiienko 		ret = snprintf(mz_name, sizeof(mz_name),
107272512e18SViacheslav Ovsiienko 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
107372512e18SViacheslav Ovsiienko 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
107472512e18SViacheslav Ovsiienko 			errno = ENAMETOOLONG;
107572512e18SViacheslav Ovsiienko 			ext_num = 0;
107672512e18SViacheslav Ovsiienko 			break;
107772512e18SViacheslav Ovsiienko 		}
107872512e18SViacheslav Ovsiienko 		mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
107972512e18SViacheslav Ovsiienko 						 socket_id,
108072512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_IOVA_CONTIG |
108172512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_1GB |
108272512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_SIZE_HINT_ONLY,
108372512e18SViacheslav Ovsiienko 						 EXTBUF_ZONE_SIZE);
108472512e18SViacheslav Ovsiienko 		if (mz == NULL) {
108572512e18SViacheslav Ovsiienko 			/*
108672512e18SViacheslav Ovsiienko 			 * The caller exits on external buffer creation
108772512e18SViacheslav Ovsiienko 			 * error, so there is no need to free memzones.
108872512e18SViacheslav Ovsiienko 			 */
108972512e18SViacheslav Ovsiienko 			errno = ENOMEM;
109072512e18SViacheslav Ovsiienko 			ext_num = 0;
109172512e18SViacheslav Ovsiienko 			break;
109272512e18SViacheslav Ovsiienko 		}
109372512e18SViacheslav Ovsiienko 		xseg->buf_ptr = mz->addr;
109472512e18SViacheslav Ovsiienko 		xseg->buf_iova = mz->iova;
109572512e18SViacheslav Ovsiienko 		xseg->buf_len = EXTBUF_ZONE_SIZE;
109672512e18SViacheslav Ovsiienko 		xseg->elt_size = elt_size;
109772512e18SViacheslav Ovsiienko 	}
109872512e18SViacheslav Ovsiienko 	if (ext_num == 0 && xmem != NULL) {
109972512e18SViacheslav Ovsiienko 		free(xmem);
110072512e18SViacheslav Ovsiienko 		xmem = NULL;
110172512e18SViacheslav Ovsiienko 	}
110272512e18SViacheslav Ovsiienko 	*ext_mem = xmem;
110372512e18SViacheslav Ovsiienko 	return ext_num;
110472512e18SViacheslav Ovsiienko }
110572512e18SViacheslav Ovsiienko 
1106af75078fSIntel /*
1107af75078fSIntel  * Configuration initialisation done once at init time.
1108af75078fSIntel  */
1109401b744dSShahaf Shuler static struct rte_mempool *
1110af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
111126cbb419SViacheslav Ovsiienko 		 unsigned int socket_id, uint16_t size_idx)
1112af75078fSIntel {
1113af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
1114bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
1115761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1116af75078fSIntel 	uint32_t mb_size;
1117af75078fSIntel 
1118dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1119761f7ae1SJie Zhou #endif
112026cbb419SViacheslav Ovsiienko 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1121a550baf2SMin Hu (Connor) 	if (!is_proc_primary()) {
1122a550baf2SMin Hu (Connor) 		rte_mp = rte_mempool_lookup(pool_name);
1123a550baf2SMin Hu (Connor) 		if (rte_mp == NULL)
1124a550baf2SMin Hu (Connor) 			rte_exit(EXIT_FAILURE,
1125a550baf2SMin Hu (Connor) 				"Get mbuf pool for socket %u failed: %s\n",
1126a550baf2SMin Hu (Connor) 				socket_id, rte_strerror(rte_errno));
1127a550baf2SMin Hu (Connor) 		return rte_mp;
1128a550baf2SMin Hu (Connor) 	}
1129148f963fSBruce Richardson 
1130285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
1131d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1132d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1133d1eb542eSOlivier Matz 
1134c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
1135c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
1136c7f5dba7SAnatoly Burakov 		{
1137c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
1138c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1139c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
1140c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1141c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1142c7f5dba7SAnatoly Burakov 			break;
1143c7f5dba7SAnatoly Burakov 		}
1144761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
1145c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
1146c7f5dba7SAnatoly Burakov 		{
1147b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1148c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
1149148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
115059fcf854SShahaf Shuler 				socket_id, mempool_flags);
115124427bb9SOlivier Matz 			if (rte_mp == NULL)
115224427bb9SOlivier Matz 				goto err;
1153b19a0c75SOlivier Matz 
1154b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
1155b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
1156b19a0c75SOlivier Matz 				rte_mp = NULL;
115724427bb9SOlivier Matz 				goto err;
1158b19a0c75SOlivier Matz 			}
1159b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
1160b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
11613a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1162c7f5dba7SAnatoly Burakov 			break;
1163c7f5dba7SAnatoly Burakov 		}
1164c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
1165c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
1166c7f5dba7SAnatoly Burakov 		{
1167c7f5dba7SAnatoly Burakov 			int heap_socket;
1168c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1169c7f5dba7SAnatoly Burakov 
1170c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1171c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1172c7f5dba7SAnatoly Burakov 
1173c7f5dba7SAnatoly Burakov 			heap_socket =
1174c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1175c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
1176c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1177c7f5dba7SAnatoly Burakov 
11780e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
11790e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
1180ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1181c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
1182c7f5dba7SAnatoly Burakov 					heap_socket);
1183c7f5dba7SAnatoly Burakov 			break;
1184c7f5dba7SAnatoly Burakov 		}
1185761f7ae1SJie Zhou #endif
118672512e18SViacheslav Ovsiienko 	case MP_ALLOC_XBUF:
118772512e18SViacheslav Ovsiienko 		{
118872512e18SViacheslav Ovsiienko 			struct rte_pktmbuf_extmem *ext_mem;
118972512e18SViacheslav Ovsiienko 			unsigned int ext_num;
119072512e18SViacheslav Ovsiienko 
119172512e18SViacheslav Ovsiienko 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
119272512e18SViacheslav Ovsiienko 					       socket_id, pool_name, &ext_mem);
119372512e18SViacheslav Ovsiienko 			if (ext_num == 0)
119472512e18SViacheslav Ovsiienko 				rte_exit(EXIT_FAILURE,
119572512e18SViacheslav Ovsiienko 					 "Can't create pinned data buffers\n");
119672512e18SViacheslav Ovsiienko 
119772512e18SViacheslav Ovsiienko 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
119872512e18SViacheslav Ovsiienko 					rte_mbuf_best_mempool_ops());
119972512e18SViacheslav Ovsiienko 			rte_mp = rte_pktmbuf_pool_create_extbuf
120072512e18SViacheslav Ovsiienko 					(pool_name, nb_mbuf, mb_mempool_cache,
120172512e18SViacheslav Ovsiienko 					 0, mbuf_seg_size, socket_id,
120272512e18SViacheslav Ovsiienko 					 ext_mem, ext_num);
120372512e18SViacheslav Ovsiienko 			free(ext_mem);
120472512e18SViacheslav Ovsiienko 			break;
120572512e18SViacheslav Ovsiienko 		}
1206c7f5dba7SAnatoly Burakov 	default:
1207c7f5dba7SAnatoly Burakov 		{
1208c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1209c7f5dba7SAnatoly Burakov 		}
1210bece7b6cSChristian Ehrhardt 	}
1211148f963fSBruce Richardson 
1212761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
121324427bb9SOlivier Matz err:
1214761f7ae1SJie Zhou #endif
1215af75078fSIntel 	if (rte_mp == NULL) {
1216d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
1217d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
1218d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
1219148f963fSBruce Richardson 	} else if (verbose_level > 0) {
1220591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
1221af75078fSIntel 	}
1222401b744dSShahaf Shuler 	return rte_mp;
1223af75078fSIntel }
1224af75078fSIntel 
122520a0286fSLiu Xiaofeng /*
122620a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
122720a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
122820a0286fSLiu Xiaofeng  */
122920a0286fSLiu Xiaofeng static int
123020a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
123120a0286fSLiu Xiaofeng {
123220a0286fSLiu Xiaofeng 	static int warning_once = 0;
123320a0286fSLiu Xiaofeng 
1234c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
123520a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
123661a3b0e5SAndrew Rybchenko 			fprintf(stderr,
123761a3b0e5SAndrew Rybchenko 				"Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
123820a0286fSLiu Xiaofeng 		warning_once = 1;
123920a0286fSLiu Xiaofeng 		return -1;
124020a0286fSLiu Xiaofeng 	}
124120a0286fSLiu Xiaofeng 	return 0;
124220a0286fSLiu Xiaofeng }
124320a0286fSLiu Xiaofeng 
12443f7311baSWei Dai /*
12453f7311baSWei Dai  * Get the allowed maximum number of RX queues.
12463f7311baSWei Dai  * *pid return the port id which has minimal value of
12473f7311baSWei Dai  * max_rx_queues in all ports.
12483f7311baSWei Dai  */
12493f7311baSWei Dai queueid_t
12503f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
12513f7311baSWei Dai {
12529e6b36c3SDavid Marchand 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
12536f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
12543f7311baSWei Dai 	portid_t pi;
12553f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
12563f7311baSWei Dai 
12573f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
12586f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
12596f51deb9SIvan Ilchenko 			continue;
12606f51deb9SIvan Ilchenko 
12616f51deb9SIvan Ilchenko 		max_rxq_valid = true;
12623f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
12633f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
12643f7311baSWei Dai 			*pid = pi;
12653f7311baSWei Dai 		}
12663f7311baSWei Dai 	}
12676f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
12683f7311baSWei Dai }
12693f7311baSWei Dai 
12703f7311baSWei Dai /*
12713f7311baSWei Dai  * Check input rxq is valid or not.
12723f7311baSWei Dai  * If input rxq is not greater than any of maximum number
12733f7311baSWei Dai  * of RX queues of all ports, it is valid.
12743f7311baSWei Dai  * if valid, return 0, else return -1
12753f7311baSWei Dai  */
12763f7311baSWei Dai int
12773f7311baSWei Dai check_nb_rxq(queueid_t rxq)
12783f7311baSWei Dai {
12793f7311baSWei Dai 	queueid_t allowed_max_rxq;
12803f7311baSWei Dai 	portid_t pid = 0;
12813f7311baSWei Dai 
12823f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
12833f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
128461a3b0e5SAndrew Rybchenko 		fprintf(stderr,
128561a3b0e5SAndrew Rybchenko 			"Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
128661a3b0e5SAndrew Rybchenko 			rxq, allowed_max_rxq, pid);
12873f7311baSWei Dai 		return -1;
12883f7311baSWei Dai 	}
12893f7311baSWei Dai 	return 0;
12903f7311baSWei Dai }
12913f7311baSWei Dai 
129236db4f6cSWei Dai /*
129336db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
129436db4f6cSWei Dai  * *pid return the port id which has minimal value of
129536db4f6cSWei Dai  * max_tx_queues in all ports.
129636db4f6cSWei Dai  */
129736db4f6cSWei Dai queueid_t
129836db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
129936db4f6cSWei Dai {
13009e6b36c3SDavid Marchand 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
13016f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
130236db4f6cSWei Dai 	portid_t pi;
130336db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
130436db4f6cSWei Dai 
130536db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
13066f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
13076f51deb9SIvan Ilchenko 			continue;
13086f51deb9SIvan Ilchenko 
13096f51deb9SIvan Ilchenko 		max_txq_valid = true;
131036db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
131136db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
131236db4f6cSWei Dai 			*pid = pi;
131336db4f6cSWei Dai 		}
131436db4f6cSWei Dai 	}
13156f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
131636db4f6cSWei Dai }
131736db4f6cSWei Dai 
131836db4f6cSWei Dai /*
131936db4f6cSWei Dai  * Check input txq is valid or not.
132036db4f6cSWei Dai  * If input txq is not greater than any of maximum number
132136db4f6cSWei Dai  * of TX queues of all ports, it is valid.
132236db4f6cSWei Dai  * if valid, return 0, else return -1
132336db4f6cSWei Dai  */
132436db4f6cSWei Dai int
132536db4f6cSWei Dai check_nb_txq(queueid_t txq)
132636db4f6cSWei Dai {
132736db4f6cSWei Dai 	queueid_t allowed_max_txq;
132836db4f6cSWei Dai 	portid_t pid = 0;
132936db4f6cSWei Dai 
133036db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
133136db4f6cSWei Dai 	if (txq > allowed_max_txq) {
133261a3b0e5SAndrew Rybchenko 		fprintf(stderr,
133361a3b0e5SAndrew Rybchenko 			"Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
133461a3b0e5SAndrew Rybchenko 			txq, allowed_max_txq, pid);
133536db4f6cSWei Dai 		return -1;
133636db4f6cSWei Dai 	}
133736db4f6cSWei Dai 	return 0;
133836db4f6cSWei Dai }
133936db4f6cSWei Dai 
13401c69df45SOri Kam /*
134199e040d3SLijun Ou  * Get the allowed maximum number of RXDs of every rx queue.
134299e040d3SLijun Ou  * *pid return the port id which has minimal value of
134399e040d3SLijun Ou  * max_rxd in all queues of all ports.
134499e040d3SLijun Ou  */
134599e040d3SLijun Ou static uint16_t
134699e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid)
134799e040d3SLijun Ou {
134899e040d3SLijun Ou 	uint16_t allowed_max_rxd = UINT16_MAX;
134999e040d3SLijun Ou 	portid_t pi;
135099e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
135199e040d3SLijun Ou 
135299e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
135399e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
135499e040d3SLijun Ou 			continue;
135599e040d3SLijun Ou 
135699e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
135799e040d3SLijun Ou 			allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
135899e040d3SLijun Ou 			*pid = pi;
135999e040d3SLijun Ou 		}
136099e040d3SLijun Ou 	}
136199e040d3SLijun Ou 	return allowed_max_rxd;
136299e040d3SLijun Ou }
136399e040d3SLijun Ou 
136499e040d3SLijun Ou /*
136599e040d3SLijun Ou  * Get the allowed minimal number of RXDs of every rx queue.
136699e040d3SLijun Ou  * *pid return the port id which has minimal value of
136799e040d3SLijun Ou  * min_rxd in all queues of all ports.
136899e040d3SLijun Ou  */
136999e040d3SLijun Ou static uint16_t
137099e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid)
137199e040d3SLijun Ou {
137299e040d3SLijun Ou 	uint16_t allowed_min_rxd = 0;
137399e040d3SLijun Ou 	portid_t pi;
137499e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
137599e040d3SLijun Ou 
137699e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
137799e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
137899e040d3SLijun Ou 			continue;
137999e040d3SLijun Ou 
138099e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
138199e040d3SLijun Ou 			allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
138299e040d3SLijun Ou 			*pid = pi;
138399e040d3SLijun Ou 		}
138499e040d3SLijun Ou 	}
138599e040d3SLijun Ou 
138699e040d3SLijun Ou 	return allowed_min_rxd;
138799e040d3SLijun Ou }
138899e040d3SLijun Ou 
138999e040d3SLijun Ou /*
139099e040d3SLijun Ou  * Check input rxd is valid or not.
139199e040d3SLijun Ou  * If input rxd is not greater than any of maximum number
139299e040d3SLijun Ou  * of RXDs of every Rx queues and is not less than any of
139399e040d3SLijun Ou  * minimal number of RXDs of every Rx queues, it is valid.
139499e040d3SLijun Ou  * if valid, return 0, else return -1
139599e040d3SLijun Ou  */
139699e040d3SLijun Ou int
139799e040d3SLijun Ou check_nb_rxd(queueid_t rxd)
139899e040d3SLijun Ou {
139999e040d3SLijun Ou 	uint16_t allowed_max_rxd;
140099e040d3SLijun Ou 	uint16_t allowed_min_rxd;
140199e040d3SLijun Ou 	portid_t pid = 0;
140299e040d3SLijun Ou 
140399e040d3SLijun Ou 	allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
140499e040d3SLijun Ou 	if (rxd > allowed_max_rxd) {
140561a3b0e5SAndrew Rybchenko 		fprintf(stderr,
140661a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
140761a3b0e5SAndrew Rybchenko 			rxd, allowed_max_rxd, pid);
140899e040d3SLijun Ou 		return -1;
140999e040d3SLijun Ou 	}
141099e040d3SLijun Ou 
141199e040d3SLijun Ou 	allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
141299e040d3SLijun Ou 	if (rxd < allowed_min_rxd) {
141361a3b0e5SAndrew Rybchenko 		fprintf(stderr,
141461a3b0e5SAndrew Rybchenko 			"Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
141561a3b0e5SAndrew Rybchenko 			rxd, allowed_min_rxd, pid);
141699e040d3SLijun Ou 		return -1;
141799e040d3SLijun Ou 	}
141899e040d3SLijun Ou 
141999e040d3SLijun Ou 	return 0;
142099e040d3SLijun Ou }
142199e040d3SLijun Ou 
142299e040d3SLijun Ou /*
142399e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every rx queues.
142499e040d3SLijun Ou  * *pid return the port id which has minimal value of
142599e040d3SLijun Ou  * max_txd in every tx queue.
142699e040d3SLijun Ou  */
142799e040d3SLijun Ou static uint16_t
142899e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid)
142999e040d3SLijun Ou {
143099e040d3SLijun Ou 	uint16_t allowed_max_txd = UINT16_MAX;
143199e040d3SLijun Ou 	portid_t pi;
143299e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
143399e040d3SLijun Ou 
143499e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
143599e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
143699e040d3SLijun Ou 			continue;
143799e040d3SLijun Ou 
143899e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
143999e040d3SLijun Ou 			allowed_max_txd = dev_info.tx_desc_lim.nb_max;
144099e040d3SLijun Ou 			*pid = pi;
144199e040d3SLijun Ou 		}
144299e040d3SLijun Ou 	}
144399e040d3SLijun Ou 	return allowed_max_txd;
144499e040d3SLijun Ou }
144599e040d3SLijun Ou 
144699e040d3SLijun Ou /*
144799e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every tx queues.
144899e040d3SLijun Ou  * *pid return the port id which has minimal value of
144999e040d3SLijun Ou  * min_txd in every tx queue.
145099e040d3SLijun Ou  */
145199e040d3SLijun Ou static uint16_t
145299e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid)
145399e040d3SLijun Ou {
145499e040d3SLijun Ou 	uint16_t allowed_min_txd = 0;
145599e040d3SLijun Ou 	portid_t pi;
145699e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
145799e040d3SLijun Ou 
145899e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
145999e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
146099e040d3SLijun Ou 			continue;
146199e040d3SLijun Ou 
146299e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
146399e040d3SLijun Ou 			allowed_min_txd = dev_info.tx_desc_lim.nb_min;
146499e040d3SLijun Ou 			*pid = pi;
146599e040d3SLijun Ou 		}
146699e040d3SLijun Ou 	}
146799e040d3SLijun Ou 
146899e040d3SLijun Ou 	return allowed_min_txd;
146999e040d3SLijun Ou }
147099e040d3SLijun Ou 
147199e040d3SLijun Ou /*
147299e040d3SLijun Ou  * Check input txd is valid or not.
147399e040d3SLijun Ou  * If input txd is not greater than any of maximum number
147499e040d3SLijun Ou  * of TXDs of every Rx queues, it is valid.
147599e040d3SLijun Ou  * if valid, return 0, else return -1
147699e040d3SLijun Ou  */
147799e040d3SLijun Ou int
147899e040d3SLijun Ou check_nb_txd(queueid_t txd)
147999e040d3SLijun Ou {
148099e040d3SLijun Ou 	uint16_t allowed_max_txd;
148199e040d3SLijun Ou 	uint16_t allowed_min_txd;
148299e040d3SLijun Ou 	portid_t pid = 0;
148399e040d3SLijun Ou 
148499e040d3SLijun Ou 	allowed_max_txd = get_allowed_max_nb_txd(&pid);
148599e040d3SLijun Ou 	if (txd > allowed_max_txd) {
148661a3b0e5SAndrew Rybchenko 		fprintf(stderr,
148761a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
148861a3b0e5SAndrew Rybchenko 			txd, allowed_max_txd, pid);
148999e040d3SLijun Ou 		return -1;
149099e040d3SLijun Ou 	}
149199e040d3SLijun Ou 
149299e040d3SLijun Ou 	allowed_min_txd = get_allowed_min_nb_txd(&pid);
149399e040d3SLijun Ou 	if (txd < allowed_min_txd) {
149461a3b0e5SAndrew Rybchenko 		fprintf(stderr,
149561a3b0e5SAndrew Rybchenko 			"Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
149661a3b0e5SAndrew Rybchenko 			txd, allowed_min_txd, pid);
149799e040d3SLijun Ou 		return -1;
149899e040d3SLijun Ou 	}
149999e040d3SLijun Ou 	return 0;
150099e040d3SLijun Ou }
150199e040d3SLijun Ou 
150299e040d3SLijun Ou 
150399e040d3SLijun Ou /*
15041c69df45SOri Kam  * Get the allowed maximum number of hairpin queues.
15051c69df45SOri Kam  * *pid return the port id which has minimal value of
15061c69df45SOri Kam  * max_hairpin_queues in all ports.
15071c69df45SOri Kam  */
15081c69df45SOri Kam queueid_t
15091c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid)
15101c69df45SOri Kam {
15119e6b36c3SDavid Marchand 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
15121c69df45SOri Kam 	portid_t pi;
15131c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
15141c69df45SOri Kam 
15151c69df45SOri Kam 	RTE_ETH_FOREACH_DEV(pi) {
15161c69df45SOri Kam 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
15171c69df45SOri Kam 			*pid = pi;
15181c69df45SOri Kam 			return 0;
15191c69df45SOri Kam 		}
15201c69df45SOri Kam 		if (cap.max_nb_queues < allowed_max_hairpinq) {
15211c69df45SOri Kam 			allowed_max_hairpinq = cap.max_nb_queues;
15221c69df45SOri Kam 			*pid = pi;
15231c69df45SOri Kam 		}
15241c69df45SOri Kam 	}
15251c69df45SOri Kam 	return allowed_max_hairpinq;
15261c69df45SOri Kam }
15271c69df45SOri Kam 
15281c69df45SOri Kam /*
15291c69df45SOri Kam  * Check input hairpin is valid or not.
15301c69df45SOri Kam  * If input hairpin is not greater than any of maximum number
15311c69df45SOri Kam  * of hairpin queues of all ports, it is valid.
15321c69df45SOri Kam  * if valid, return 0, else return -1
15331c69df45SOri Kam  */
15341c69df45SOri Kam int
15351c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq)
15361c69df45SOri Kam {
15371c69df45SOri Kam 	queueid_t allowed_max_hairpinq;
15381c69df45SOri Kam 	portid_t pid = 0;
15391c69df45SOri Kam 
15401c69df45SOri Kam 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
15411c69df45SOri Kam 	if (hairpinq > allowed_max_hairpinq) {
154261a3b0e5SAndrew Rybchenko 		fprintf(stderr,
154361a3b0e5SAndrew Rybchenko 			"Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
15441c69df45SOri Kam 			hairpinq, allowed_max_hairpinq, pid);
15451c69df45SOri Kam 		return -1;
15461c69df45SOri Kam 	}
15471c69df45SOri Kam 	return 0;
15481c69df45SOri Kam }
15491c69df45SOri Kam 
15501bb4a528SFerruh Yigit static int
15511bb4a528SFerruh Yigit get_eth_overhead(struct rte_eth_dev_info *dev_info)
15521bb4a528SFerruh Yigit {
15531bb4a528SFerruh Yigit 	uint32_t eth_overhead;
15541bb4a528SFerruh Yigit 
15551bb4a528SFerruh Yigit 	if (dev_info->max_mtu != UINT16_MAX &&
15561bb4a528SFerruh Yigit 	    dev_info->max_rx_pktlen > dev_info->max_mtu)
15571bb4a528SFerruh Yigit 		eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
15581bb4a528SFerruh Yigit 	else
15591bb4a528SFerruh Yigit 		eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
15601bb4a528SFerruh Yigit 
15611bb4a528SFerruh Yigit 	return eth_overhead;
15621bb4a528SFerruh Yigit }
15631bb4a528SFerruh Yigit 
1564af75078fSIntel static void
1565b6b8a1ebSViacheslav Ovsiienko init_config_port_offloads(portid_t pid, uint32_t socket_id)
1566b6b8a1ebSViacheslav Ovsiienko {
1567b6b8a1ebSViacheslav Ovsiienko 	struct rte_port *port = &ports[pid];
1568b6b8a1ebSViacheslav Ovsiienko 	int ret;
1569b6b8a1ebSViacheslav Ovsiienko 	int i;
1570b6b8a1ebSViacheslav Ovsiienko 
1571f6d8a6d3SIvan Malov 	eth_rx_metadata_negotiate_mp(pid);
15721179f05cSIvan Malov 	flow_pick_transfer_proxy_mp(pid);
1573f6d8a6d3SIvan Malov 
1574b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.txmode = tx_mode;
1575b6b8a1ebSViacheslav Ovsiienko 	port->dev_conf.rxmode = rx_mode;
1576b6b8a1ebSViacheslav Ovsiienko 
1577b6b8a1ebSViacheslav Ovsiienko 	ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1578b6b8a1ebSViacheslav Ovsiienko 	if (ret != 0)
1579b6b8a1ebSViacheslav Ovsiienko 		rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1580b6b8a1ebSViacheslav Ovsiienko 
1581b6b8a1ebSViacheslav Ovsiienko 	if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1582b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.txmode.offloads &=
1583b6b8a1ebSViacheslav Ovsiienko 			~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1584b6b8a1ebSViacheslav Ovsiienko 
1585b6b8a1ebSViacheslav Ovsiienko 	/* Apply Rx offloads configuration */
1586b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
1587b6b8a1ebSViacheslav Ovsiienko 		port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
1588b6b8a1ebSViacheslav Ovsiienko 	/* Apply Tx offloads configuration */
1589b6b8a1ebSViacheslav Ovsiienko 	for (i = 0; i < port->dev_info.max_tx_queues; i++)
1590b6b8a1ebSViacheslav Ovsiienko 		port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
1591b6b8a1ebSViacheslav Ovsiienko 
1592b6b8a1ebSViacheslav Ovsiienko 	if (eth_link_speed)
1593b6b8a1ebSViacheslav Ovsiienko 		port->dev_conf.link_speeds = eth_link_speed;
1594b6b8a1ebSViacheslav Ovsiienko 
15951bb4a528SFerruh Yigit 	if (max_rx_pkt_len)
15961bb4a528SFerruh Yigit 		port->dev_conf.rxmode.mtu = max_rx_pkt_len -
15971bb4a528SFerruh Yigit 			get_eth_overhead(&port->dev_info);
15981bb4a528SFerruh Yigit 
1599b6b8a1ebSViacheslav Ovsiienko 	/* set flag to initialize port/queue */
1600b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig = 1;
1601b6b8a1ebSViacheslav Ovsiienko 	port->need_reconfig_queues = 1;
1602b6b8a1ebSViacheslav Ovsiienko 	port->socket_id = socket_id;
1603b6b8a1ebSViacheslav Ovsiienko 	port->tx_metadata = 0;
1604b6b8a1ebSViacheslav Ovsiienko 
1605b6b8a1ebSViacheslav Ovsiienko 	/*
1606b6b8a1ebSViacheslav Ovsiienko 	 * Check for maximum number of segments per MTU.
1607b6b8a1ebSViacheslav Ovsiienko 	 * Accordingly update the mbuf data size.
1608b6b8a1ebSViacheslav Ovsiienko 	 */
1609b6b8a1ebSViacheslav Ovsiienko 	if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1610b6b8a1ebSViacheslav Ovsiienko 	    port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
16111bb4a528SFerruh Yigit 		uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
16121bb4a528SFerruh Yigit 		uint16_t mtu;
1613b6b8a1ebSViacheslav Ovsiienko 
16141bb4a528SFerruh Yigit 		if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
16151bb4a528SFerruh Yigit 			uint16_t data_size = (mtu + eth_overhead) /
16161bb4a528SFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
16171bb4a528SFerruh Yigit 			uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
16181bb4a528SFerruh Yigit 
16191bb4a528SFerruh Yigit 			if (buffer_size > mbuf_data_size[0]) {
16201bb4a528SFerruh Yigit 				mbuf_data_size[0] = buffer_size;
1621b6b8a1ebSViacheslav Ovsiienko 				TESTPMD_LOG(WARNING,
1622b6b8a1ebSViacheslav Ovsiienko 					"Configured mbuf size of the first segment %hu\n",
1623b6b8a1ebSViacheslav Ovsiienko 					mbuf_data_size[0]);
1624b6b8a1ebSViacheslav Ovsiienko 			}
1625b6b8a1ebSViacheslav Ovsiienko 		}
1626b6b8a1ebSViacheslav Ovsiienko 	}
16271bb4a528SFerruh Yigit }
1628b6b8a1ebSViacheslav Ovsiienko 
1629b6b8a1ebSViacheslav Ovsiienko static void
1630af75078fSIntel init_config(void)
1631af75078fSIntel {
1632ce8d5614SIntel 	portid_t pid;
1633af75078fSIntel 	struct rte_mempool *mbp;
1634af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1635af75078fSIntel 	lcoreid_t  lc_id;
1636b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
163752f38a20SJiayu Hu 	uint32_t gso_types;
1638487f9a59SYulong Pei 
1639af75078fSIntel 	/* Configuration of logical cores. */
1640af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1641af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1642fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1643af75078fSIntel 	if (fwd_lcores == NULL) {
1644ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1645ce8d5614SIntel 							"failed\n", nb_lcores);
1646af75078fSIntel 	}
1647af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1648af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1649af75078fSIntel 					       sizeof(struct fwd_lcore),
1650fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1651af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1652ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1653ce8d5614SIntel 								"failed\n");
1654af75078fSIntel 		}
1655af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1656af75078fSIntel 	}
1657af75078fSIntel 
16587d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1659b6b8a1ebSViacheslav Ovsiienko 		uint32_t socket_id;
16606f51deb9SIvan Ilchenko 
1661b6ea6408SIntel 		if (numa_support) {
1662b6b8a1ebSViacheslav Ovsiienko 			socket_id = port_numa[pid];
1663b6b8a1ebSViacheslav Ovsiienko 			if (port_numa[pid] == NUMA_NO_CONFIG) {
1664b6b8a1ebSViacheslav Ovsiienko 				socket_id = rte_eth_dev_socket_id(pid);
166520a0286fSLiu Xiaofeng 
166629841336SPhil Yang 				/*
166729841336SPhil Yang 				 * if socket_id is invalid,
166829841336SPhil Yang 				 * set to the first available socket.
166929841336SPhil Yang 				 */
167020a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
167129841336SPhil Yang 					socket_id = socket_ids[0];
1672b6ea6408SIntel 			}
1673b6b8a1ebSViacheslav Ovsiienko 		} else {
1674b6b8a1ebSViacheslav Ovsiienko 			socket_id = (socket_num == UMA_NO_CONFIG) ?
1675b6b8a1ebSViacheslav Ovsiienko 				    0 : socket_num;
1676b6ea6408SIntel 		}
1677b6b8a1ebSViacheslav Ovsiienko 		/* Apply default TxRx configuration for all ports */
1678b6b8a1ebSViacheslav Ovsiienko 		init_config_port_offloads(pid, socket_id);
1679ce8d5614SIntel 	}
16803ab64341SOlivier Matz 	/*
16813ab64341SOlivier Matz 	 * Create pools of mbuf.
16823ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
16833ab64341SOlivier Matz 	 * socket 0 memory by default.
16843ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
16853ab64341SOlivier Matz 	 *
16863ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
16873ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
16883ab64341SOlivier Matz 	 */
16893ab64341SOlivier Matz 	if (param_total_num_mbufs)
16903ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
16913ab64341SOlivier Matz 	else {
16923ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
16933ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
16943ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
16953ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
16963ab64341SOlivier Matz 	}
16973ab64341SOlivier Matz 
1698b6ea6408SIntel 	if (numa_support) {
169926cbb419SViacheslav Ovsiienko 		uint8_t i, j;
1700ce8d5614SIntel 
1701c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
170226cbb419SViacheslav Ovsiienko 			for (j = 0; j < mbuf_data_size_n; j++)
170326cbb419SViacheslav Ovsiienko 				mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
170426cbb419SViacheslav Ovsiienko 					mbuf_pool_create(mbuf_data_size[j],
1705401b744dSShahaf Shuler 							  nb_mbuf_per_pool,
170626cbb419SViacheslav Ovsiienko 							  socket_ids[i], j);
17073ab64341SOlivier Matz 	} else {
170826cbb419SViacheslav Ovsiienko 		uint8_t i;
170926cbb419SViacheslav Ovsiienko 
171026cbb419SViacheslav Ovsiienko 		for (i = 0; i < mbuf_data_size_n; i++)
171126cbb419SViacheslav Ovsiienko 			mempools[i] = mbuf_pool_create
171226cbb419SViacheslav Ovsiienko 					(mbuf_data_size[i],
1713401b744dSShahaf Shuler 					 nb_mbuf_per_pool,
171426cbb419SViacheslav Ovsiienko 					 socket_num == UMA_NO_CONFIG ?
171526cbb419SViacheslav Ovsiienko 					 0 : socket_num, i);
17163ab64341SOlivier Matz 	}
1717b6ea6408SIntel 
1718b6ea6408SIntel 	init_port_config();
17195886ae07SAdrien Mazarguil 
172052f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1721aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
17225886ae07SAdrien Mazarguil 	/*
17235886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
17245886ae07SAdrien Mazarguil 	 */
17255886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
17268fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
172726cbb419SViacheslav Ovsiienko 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
17288fd8bebcSAdrien Mazarguil 
17295886ae07SAdrien Mazarguil 		if (mbp == NULL)
173026cbb419SViacheslav Ovsiienko 			mbp = mbuf_pool_find(0, 0);
17315886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
173252f38a20SJiayu Hu 		/* initialize GSO context */
173352f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
173452f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
173552f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
173635b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
173735b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
173852f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
17395886ae07SAdrien Mazarguil 	}
17405886ae07SAdrien Mazarguil 
17410c0db76fSBernard Iremonger 	fwd_config_setup();
1742b7091f1dSJiayu Hu 
1743b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1744b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1745b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1746b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1747b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1748b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1749b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1750b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1751b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1752b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1753b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1754b7091f1dSJiayu Hu 		}
1755b7091f1dSJiayu Hu 	}
1756ce8d5614SIntel }
1757ce8d5614SIntel 
17582950a769SDeclan Doherty 
17592950a769SDeclan Doherty void
1760a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
17612950a769SDeclan Doherty {
17622950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
1763b6b8a1ebSViacheslav Ovsiienko 	init_config_port_offloads(new_port_id, socket_id);
17642950a769SDeclan Doherty 	init_port_config();
17652950a769SDeclan Doherty }
17662950a769SDeclan Doherty 
17672950a769SDeclan Doherty 
1768ce8d5614SIntel int
1769ce8d5614SIntel init_fwd_streams(void)
1770ce8d5614SIntel {
1771ce8d5614SIntel 	portid_t pid;
1772ce8d5614SIntel 	struct rte_port *port;
1773ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
17745a8fb55cSReshma Pattan 	queueid_t q;
1775ce8d5614SIntel 
1776ce8d5614SIntel 	/* set socket id according to numa or not */
17777d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1778ce8d5614SIntel 		port = &ports[pid];
1779ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
178061a3b0e5SAndrew Rybchenko 			fprintf(stderr,
178161a3b0e5SAndrew Rybchenko 				"Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
178261a3b0e5SAndrew Rybchenko 				nb_rxq, port->dev_info.max_rx_queues);
1783ce8d5614SIntel 			return -1;
1784ce8d5614SIntel 		}
1785ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
178661a3b0e5SAndrew Rybchenko 			fprintf(stderr,
178761a3b0e5SAndrew Rybchenko 				"Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
178861a3b0e5SAndrew Rybchenko 				nb_txq, port->dev_info.max_tx_queues);
1789ce8d5614SIntel 			return -1;
1790ce8d5614SIntel 		}
179120a0286fSLiu Xiaofeng 		if (numa_support) {
179220a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
179320a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
179420a0286fSLiu Xiaofeng 			else {
1795b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
179620a0286fSLiu Xiaofeng 
179729841336SPhil Yang 				/*
179829841336SPhil Yang 				 * if socket_id is invalid,
179929841336SPhil Yang 				 * set to the first available socket.
180029841336SPhil Yang 				 */
180120a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
180229841336SPhil Yang 					port->socket_id = socket_ids[0];
180320a0286fSLiu Xiaofeng 			}
180420a0286fSLiu Xiaofeng 		}
1805b6ea6408SIntel 		else {
1806b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1807af75078fSIntel 				port->socket_id = 0;
1808b6ea6408SIntel 			else
1809b6ea6408SIntel 				port->socket_id = socket_num;
1810b6ea6408SIntel 		}
1811af75078fSIntel 	}
1812af75078fSIntel 
18135a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
18145a8fb55cSReshma Pattan 	if (q == 0) {
181561a3b0e5SAndrew Rybchenko 		fprintf(stderr,
181661a3b0e5SAndrew Rybchenko 			"Fail: Cannot allocate fwd streams as number of queues is 0\n");
18175a8fb55cSReshma Pattan 		return -1;
18185a8fb55cSReshma Pattan 	}
18195a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1820ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1821ce8d5614SIntel 		return 0;
1822ce8d5614SIntel 	/* clear the old */
1823ce8d5614SIntel 	if (fwd_streams != NULL) {
1824ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1825ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1826ce8d5614SIntel 				continue;
1827ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1828ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1829af75078fSIntel 		}
1830ce8d5614SIntel 		rte_free(fwd_streams);
1831ce8d5614SIntel 		fwd_streams = NULL;
1832ce8d5614SIntel 	}
1833ce8d5614SIntel 
1834ce8d5614SIntel 	/* init new */
1835ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
18361f84c469SMatan Azrad 	if (nb_fwd_streams) {
1837ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
18381f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
18391f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1840ce8d5614SIntel 		if (fwd_streams == NULL)
18411f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
18421f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
18431f84c469SMatan Azrad 				 nb_fwd_streams);
1844ce8d5614SIntel 
1845af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
18461f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
18471f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
18481f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1849ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
18501f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
18511f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
18521f84c469SMatan Azrad 		}
1853af75078fSIntel 	}
1854ce8d5614SIntel 
1855ce8d5614SIntel 	return 0;
1856af75078fSIntel }
1857af75078fSIntel 
1858af75078fSIntel static void
1859af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1860af75078fSIntel {
18617569b8c1SHonnappa Nagarahalli 	uint64_t total_burst, sburst;
186285de481aSHonnappa Nagarahalli 	uint64_t nb_burst;
18637569b8c1SHonnappa Nagarahalli 	uint64_t burst_stats[4];
18647569b8c1SHonnappa Nagarahalli 	uint16_t pktnb_stats[4];
1865af75078fSIntel 	uint16_t nb_pkt;
18667569b8c1SHonnappa Nagarahalli 	int burst_percent[4], sburstp;
18677569b8c1SHonnappa Nagarahalli 	int i;
1868af75078fSIntel 
1869af75078fSIntel 	/*
1870af75078fSIntel 	 * First compute the total number of packet bursts and the
1871af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1872af75078fSIntel 	 */
18737569b8c1SHonnappa Nagarahalli 	memset(&burst_stats, 0x0, sizeof(burst_stats));
18747569b8c1SHonnappa Nagarahalli 	memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
18757569b8c1SHonnappa Nagarahalli 
18767569b8c1SHonnappa Nagarahalli 	/* Show stats for 0 burst size always */
18777569b8c1SHonnappa Nagarahalli 	total_burst = pbs->pkt_burst_spread[0];
18787569b8c1SHonnappa Nagarahalli 	burst_stats[0] = pbs->pkt_burst_spread[0];
18797569b8c1SHonnappa Nagarahalli 	pktnb_stats[0] = 0;
18807569b8c1SHonnappa Nagarahalli 
18817569b8c1SHonnappa Nagarahalli 	/* Find the next 2 burst sizes with highest occurrences. */
1882*6a8b64fdSEli Britstein 	for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1883af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
18847569b8c1SHonnappa Nagarahalli 
1885af75078fSIntel 		if (nb_burst == 0)
1886af75078fSIntel 			continue;
18877569b8c1SHonnappa Nagarahalli 
1888af75078fSIntel 		total_burst += nb_burst;
18897569b8c1SHonnappa Nagarahalli 
18907569b8c1SHonnappa Nagarahalli 		if (nb_burst > burst_stats[1]) {
18917569b8c1SHonnappa Nagarahalli 			burst_stats[2] = burst_stats[1];
18927569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = pktnb_stats[1];
1893fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1894fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
18957569b8c1SHonnappa Nagarahalli 		} else if (nb_burst > burst_stats[2]) {
18967569b8c1SHonnappa Nagarahalli 			burst_stats[2] = nb_burst;
18977569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = nb_pkt;
1898af75078fSIntel 		}
1899af75078fSIntel 	}
1900af75078fSIntel 	if (total_burst == 0)
1901af75078fSIntel 		return;
19027569b8c1SHonnappa Nagarahalli 
19037569b8c1SHonnappa Nagarahalli 	printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
19047569b8c1SHonnappa Nagarahalli 	for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
19057569b8c1SHonnappa Nagarahalli 		if (i == 3) {
19067569b8c1SHonnappa Nagarahalli 			printf("%d%% of other]\n", 100 - sburstp);
1907af75078fSIntel 			return;
1908af75078fSIntel 		}
19097569b8c1SHonnappa Nagarahalli 
19107569b8c1SHonnappa Nagarahalli 		sburst += burst_stats[i];
19117569b8c1SHonnappa Nagarahalli 		if (sburst == total_burst) {
19127569b8c1SHonnappa Nagarahalli 			printf("%d%% of %d pkts]\n",
19137569b8c1SHonnappa Nagarahalli 				100 - sburstp, (int) pktnb_stats[i]);
1914af75078fSIntel 			return;
1915af75078fSIntel 		}
19167569b8c1SHonnappa Nagarahalli 
19177569b8c1SHonnappa Nagarahalli 		burst_percent[i] =
19187569b8c1SHonnappa Nagarahalli 			(double)burst_stats[i] / total_burst * 100;
19197569b8c1SHonnappa Nagarahalli 		printf("%d%% of %d pkts + ",
19207569b8c1SHonnappa Nagarahalli 			burst_percent[i], (int) pktnb_stats[i]);
19217569b8c1SHonnappa Nagarahalli 		sburstp += burst_percent[i];
1922af75078fSIntel 	}
1923af75078fSIntel }
1924af75078fSIntel 
1925af75078fSIntel static void
1926af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1927af75078fSIntel {
1928af75078fSIntel 	struct fwd_stream *fs;
1929af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1930af75078fSIntel 
1931af75078fSIntel 	fs = fwd_streams[stream_id];
1932af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1933af75078fSIntel 	    (fs->fwd_dropped == 0))
1934af75078fSIntel 		return;
1935af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1936af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1937af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1938af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1939c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1940c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1941af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1942af75078fSIntel 
1943af75078fSIntel 	/* if checksum mode */
1944af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1945c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1946c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1947c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
194858d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
194958d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
1950d139cf23SLance Richardson 		printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1951d139cf23SLance Richardson 			fs->rx_bad_outer_ip_csum);
195294d65546SDavid Marchand 	} else {
195394d65546SDavid Marchand 		printf("\n");
1954af75078fSIntel 	}
1955af75078fSIntel 
19560e4b1963SDharmik Thakkar 	if (record_burst_stats) {
1957af75078fSIntel 		pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1958af75078fSIntel 		pkt_burst_stats_display("TX", &fs->tx_burst_stats);
19590e4b1963SDharmik Thakkar 	}
1960af75078fSIntel }
1961af75078fSIntel 
196253324971SDavid Marchand void
196353324971SDavid Marchand fwd_stats_display(void)
196453324971SDavid Marchand {
196553324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
196653324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
196753324971SDavid Marchand 	struct {
196853324971SDavid Marchand 		struct fwd_stream *rx_stream;
196953324971SDavid Marchand 		struct fwd_stream *tx_stream;
197053324971SDavid Marchand 		uint64_t tx_dropped;
197153324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
197253324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
197353324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
1974d139cf23SLance Richardson 		uint64_t rx_bad_outer_ip_csum;
197553324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
197653324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
197753324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
197853324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
197953324971SDavid Marchand 	struct rte_eth_stats stats;
198053324971SDavid Marchand 	uint64_t fwd_cycles = 0;
198153324971SDavid Marchand 	uint64_t total_recv = 0;
198253324971SDavid Marchand 	uint64_t total_xmit = 0;
198353324971SDavid Marchand 	struct rte_port *port;
198453324971SDavid Marchand 	streamid_t sm_id;
198553324971SDavid Marchand 	portid_t pt_id;
198653324971SDavid Marchand 	int i;
198753324971SDavid Marchand 
198853324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
198953324971SDavid Marchand 
199053324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
199153324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
199253324971SDavid Marchand 
199353324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
199453324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
199553324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
199653324971SDavid Marchand 		} else {
199753324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
199853324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
199953324971SDavid Marchand 		}
200053324971SDavid Marchand 
200153324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
200253324971SDavid Marchand 
200353324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
200453324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
200553324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
200653324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
2007d139cf23SLance Richardson 		ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2008d139cf23SLance Richardson 				fs->rx_bad_outer_ip_csum;
200953324971SDavid Marchand 
2010bc700b67SDharmik Thakkar 		if (record_core_cycles)
201153324971SDavid Marchand 			fwd_cycles += fs->core_cycles;
201253324971SDavid Marchand 	}
201353324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
201453324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
201553324971SDavid Marchand 		port = &ports[pt_id];
201653324971SDavid Marchand 
201753324971SDavid Marchand 		rte_eth_stats_get(pt_id, &stats);
201853324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
201953324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
202053324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
202153324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
202253324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
202353324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
202453324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
202553324971SDavid Marchand 
202653324971SDavid Marchand 		total_recv += stats.ipackets;
202753324971SDavid Marchand 		total_xmit += stats.opackets;
202853324971SDavid Marchand 		total_rx_dropped += stats.imissed;
202953324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
203053324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
203153324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
203253324971SDavid Marchand 
203353324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
203453324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
203553324971SDavid Marchand 
203608dcd187SHuisong Li 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
203708dcd187SHuisong Li 		       "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
203853324971SDavid Marchand 		       stats.ipackets + stats.imissed);
203953324971SDavid Marchand 
2040d139cf23SLance Richardson 		if (cur_fwd_eng == &csum_fwd_engine) {
204153324971SDavid Marchand 			printf("  Bad-ipcsum: %-14"PRIu64
204253324971SDavid Marchand 			       " Bad-l4csum: %-14"PRIu64
204353324971SDavid Marchand 			       "Bad-outer-l4csum: %-14"PRIu64"\n",
204453324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_ip_csum,
204553324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_l4_csum,
204653324971SDavid Marchand 			       ports_stats[pt_id].rx_bad_outer_l4_csum);
2047d139cf23SLance Richardson 			printf("  Bad-outer-ipcsum: %-14"PRIu64"\n",
2048d139cf23SLance Richardson 			       ports_stats[pt_id].rx_bad_outer_ip_csum);
2049d139cf23SLance Richardson 		}
205053324971SDavid Marchand 		if (stats.ierrors + stats.rx_nombuf > 0) {
205108dcd187SHuisong Li 			printf("  RX-error: %-"PRIu64"\n", stats.ierrors);
205208dcd187SHuisong Li 			printf("  RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
205353324971SDavid Marchand 		}
205453324971SDavid Marchand 
205508dcd187SHuisong Li 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
205653324971SDavid Marchand 		       "TX-total: %-"PRIu64"\n",
205753324971SDavid Marchand 		       stats.opackets, ports_stats[pt_id].tx_dropped,
205853324971SDavid Marchand 		       stats.opackets + ports_stats[pt_id].tx_dropped);
205953324971SDavid Marchand 
20600e4b1963SDharmik Thakkar 		if (record_burst_stats) {
206153324971SDavid Marchand 			if (ports_stats[pt_id].rx_stream)
206253324971SDavid Marchand 				pkt_burst_stats_display("RX",
206353324971SDavid Marchand 					&ports_stats[pt_id].rx_stream->rx_burst_stats);
206453324971SDavid Marchand 			if (ports_stats[pt_id].tx_stream)
206553324971SDavid Marchand 				pkt_burst_stats_display("TX",
206653324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
20670e4b1963SDharmik Thakkar 		}
206853324971SDavid Marchand 
206953324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
207053324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
207153324971SDavid Marchand 	}
207253324971SDavid Marchand 
207353324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
207453324971SDavid Marchand 	       "%s\n",
207553324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
207653324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
207753324971SDavid Marchand 	       "%-"PRIu64"\n"
207853324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
207953324971SDavid Marchand 	       "%-"PRIu64"\n",
208053324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
208153324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
208253324971SDavid Marchand 	if (total_rx_nombuf > 0)
208353324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
208453324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
208553324971SDavid Marchand 	       "%s\n",
208653324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
2087bc700b67SDharmik Thakkar 	if (record_core_cycles) {
20884c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6
20893a164e00SPhil Yang 		if (total_recv > 0 || total_xmit > 0) {
20903a164e00SPhil Yang 			uint64_t total_pkts = 0;
20913a164e00SPhil Yang 			if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
20923a164e00SPhil Yang 			    strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
20933a164e00SPhil Yang 				total_pkts = total_xmit;
20943a164e00SPhil Yang 			else
20953a164e00SPhil Yang 				total_pkts = total_recv;
20963a164e00SPhil Yang 
20971920832aSDharmik Thakkar 			printf("\n  CPU cycles/packet=%.2F (total cycles="
20983a164e00SPhil Yang 			       "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
20994c0497b1SDharmik Thakkar 			       " MHz Clock\n",
21003a164e00SPhil Yang 			       (double) fwd_cycles / total_pkts,
21013a164e00SPhil Yang 			       fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
21024c0497b1SDharmik Thakkar 			       (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
21033a164e00SPhil Yang 		}
2104bc700b67SDharmik Thakkar 	}
210553324971SDavid Marchand }
210653324971SDavid Marchand 
210753324971SDavid Marchand void
210853324971SDavid Marchand fwd_stats_reset(void)
210953324971SDavid Marchand {
211053324971SDavid Marchand 	streamid_t sm_id;
211153324971SDavid Marchand 	portid_t pt_id;
211253324971SDavid Marchand 	int i;
211353324971SDavid Marchand 
211453324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
211553324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
211653324971SDavid Marchand 		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
211753324971SDavid Marchand 	}
211853324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
211953324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
212053324971SDavid Marchand 
212153324971SDavid Marchand 		fs->rx_packets = 0;
212253324971SDavid Marchand 		fs->tx_packets = 0;
212353324971SDavid Marchand 		fs->fwd_dropped = 0;
212453324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
212553324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
212653324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
2127d139cf23SLance Richardson 		fs->rx_bad_outer_ip_csum = 0;
212853324971SDavid Marchand 
212953324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
213053324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
213153324971SDavid Marchand 		fs->core_cycles = 0;
213253324971SDavid Marchand 	}
213353324971SDavid Marchand }
213453324971SDavid Marchand 
2135af75078fSIntel static void
21367741e4cfSIntel flush_fwd_rx_queues(void)
2137af75078fSIntel {
2138af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2139af75078fSIntel 	portid_t  rxp;
21407741e4cfSIntel 	portid_t port_id;
2141af75078fSIntel 	queueid_t rxq;
2142af75078fSIntel 	uint16_t  nb_rx;
2143af75078fSIntel 	uint16_t  i;
2144af75078fSIntel 	uint8_t   j;
2145f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2146594302c7SJames Poole 	uint64_t timer_period;
2147f487715fSReshma Pattan 
2148a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
2149a550baf2SMin Hu (Connor) 		printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2150a550baf2SMin Hu (Connor) 		return;
2151a550baf2SMin Hu (Connor) 	}
2152a550baf2SMin Hu (Connor) 
2153f487715fSReshma Pattan 	/* convert to number of cycles */
2154594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
2155af75078fSIntel 
2156af75078fSIntel 	for (j = 0; j < 2; j++) {
21577741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2158af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
21597741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
2160f487715fSReshma Pattan 				/**
2161f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
2162f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
2163f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
2164f487715fSReshma Pattan 				* after 1sec timer expiry.
2165f487715fSReshma Pattan 				*/
2166f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
2167af75078fSIntel 				do {
21687741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
2169013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
2170af75078fSIntel 					for (i = 0; i < nb_rx; i++)
2171af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
2172f487715fSReshma Pattan 
2173f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
2174f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
2175f487715fSReshma Pattan 					timer_tsc += diff_tsc;
2176f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
2177f487715fSReshma Pattan 					(timer_tsc < timer_period));
2178f487715fSReshma Pattan 				timer_tsc = 0;
2179af75078fSIntel 			}
2180af75078fSIntel 		}
2181af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2182af75078fSIntel 	}
2183af75078fSIntel }
2184af75078fSIntel 
2185af75078fSIntel static void
2186af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2187af75078fSIntel {
2188af75078fSIntel 	struct fwd_stream **fsm;
2189af75078fSIntel 	streamid_t nb_fs;
2190af75078fSIntel 	streamid_t sm_id;
2191a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
21927e4441c8SRemy Horton 	uint64_t tics_per_1sec;
21937e4441c8SRemy Horton 	uint64_t tics_datum;
21947e4441c8SRemy Horton 	uint64_t tics_current;
21954918a357SXiaoyun Li 	uint16_t i, cnt_ports;
2196af75078fSIntel 
21974918a357SXiaoyun Li 	cnt_ports = nb_ports;
21987e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
21997e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
22007e4441c8SRemy Horton #endif
2201af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
2202af75078fSIntel 	nb_fs = fc->stream_nb;
2203af75078fSIntel 	do {
2204af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
2205af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
2206a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
2207e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
2208e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
22097e4441c8SRemy Horton 			tics_current = rte_rdtsc();
22107e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
22117e4441c8SRemy Horton 				/* Periodic bitrate calculation */
22124918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
2213e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
22144918a357SXiaoyun Li 						ports_ids[i]);
22157e4441c8SRemy Horton 				tics_datum = tics_current;
22167e4441c8SRemy Horton 			}
2217e25e6c70SRemy Horton 		}
22187e4441c8SRemy Horton #endif
2219a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
222065eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
222165eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
222262d3216dSReshma Pattan 			rte_latencystats_update();
222362d3216dSReshma Pattan #endif
222462d3216dSReshma Pattan 
2225af75078fSIntel 	} while (! fc->stopped);
2226af75078fSIntel }
2227af75078fSIntel 
2228af75078fSIntel static int
2229af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
2230af75078fSIntel {
2231af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2232af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
2233af75078fSIntel 	return 0;
2234af75078fSIntel }
2235af75078fSIntel 
2236af75078fSIntel /*
2237af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
2238af75078fSIntel  * Used to start communication flows in network loopback test configurations.
2239af75078fSIntel  */
2240af75078fSIntel static int
2241af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
2242af75078fSIntel {
2243af75078fSIntel 	struct fwd_lcore *fwd_lc;
2244af75078fSIntel 	struct fwd_lcore tmp_lcore;
2245af75078fSIntel 
2246af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
2247af75078fSIntel 	tmp_lcore = *fwd_lc;
2248af75078fSIntel 	tmp_lcore.stopped = 1;
2249af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2250af75078fSIntel 	return 0;
2251af75078fSIntel }
2252af75078fSIntel 
2253af75078fSIntel /*
2254af75078fSIntel  * Launch packet forwarding:
2255af75078fSIntel  *     - Setup per-port forwarding context.
2256af75078fSIntel  *     - launch logical cores with their forwarding configuration.
2257af75078fSIntel  */
2258af75078fSIntel static void
2259af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2260af75078fSIntel {
2261af75078fSIntel 	unsigned int i;
2262af75078fSIntel 	unsigned int lc_id;
2263af75078fSIntel 	int diag;
2264af75078fSIntel 
2265af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2266af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
2267af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2268af75078fSIntel 			fwd_lcores[i]->stopped = 0;
2269af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2270af75078fSIntel 						     fwd_lcores[i], lc_id);
2271af75078fSIntel 			if (diag != 0)
227261a3b0e5SAndrew Rybchenko 				fprintf(stderr,
227361a3b0e5SAndrew Rybchenko 					"launch lcore %u failed - diag=%d\n",
2274af75078fSIntel 					lc_id, diag);
2275af75078fSIntel 		}
2276af75078fSIntel 	}
2277af75078fSIntel }
2278af75078fSIntel 
2279af75078fSIntel /*
2280af75078fSIntel  * Launch packet forwarding configuration.
2281af75078fSIntel  */
2282af75078fSIntel void
2283af75078fSIntel start_packet_forwarding(int with_tx_first)
2284af75078fSIntel {
2285af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2286af75078fSIntel 	port_fwd_end_t  port_fwd_end;
2287af75078fSIntel 	unsigned int i;
2288af75078fSIntel 
22895a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
22905a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
22915a8fb55cSReshma Pattan 
22925a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
22935a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
22945a8fb55cSReshma Pattan 
22955a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
22965a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
22975a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
22985a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
22995a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
23005a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
23015a8fb55cSReshma Pattan 
2302ce8d5614SIntel 	if (all_ports_started() == 0) {
230361a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Not all ports were started\n");
2304ce8d5614SIntel 		return;
2305ce8d5614SIntel 	}
2306af75078fSIntel 	if (test_done == 0) {
230761a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding already started\n");
2308af75078fSIntel 		return;
2309af75078fSIntel 	}
23107741e4cfSIntel 
231147a767b2SMatan Azrad 	fwd_config_setup();
231247a767b2SMatan Azrad 
231365744833SXueming Li 	pkt_fwd_config_display(&cur_fwd_config);
231465744833SXueming Li 	if (!pkt_fwd_shared_rxq_check())
231565744833SXueming Li 		return;
231665744833SXueming Li 
2317a78040c9SAlvin Zhang 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2318a78040c9SAlvin Zhang 	if (port_fwd_begin != NULL) {
2319a78040c9SAlvin Zhang 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2320a78040c9SAlvin Zhang 			if (port_fwd_begin(fwd_ports_ids[i])) {
2321a78040c9SAlvin Zhang 				fprintf(stderr,
2322a78040c9SAlvin Zhang 					"Packet forwarding is not ready\n");
2323a78040c9SAlvin Zhang 				return;
2324a78040c9SAlvin Zhang 			}
2325a78040c9SAlvin Zhang 		}
2326a78040c9SAlvin Zhang 	}
2327a78040c9SAlvin Zhang 
2328a78040c9SAlvin Zhang 	if (with_tx_first) {
2329a78040c9SAlvin Zhang 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2330a78040c9SAlvin Zhang 		if (port_fwd_begin != NULL) {
2331a78040c9SAlvin Zhang 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2332a78040c9SAlvin Zhang 				if (port_fwd_begin(fwd_ports_ids[i])) {
2333a78040c9SAlvin Zhang 					fprintf(stderr,
2334a78040c9SAlvin Zhang 						"Packet forwarding is not ready\n");
2335a78040c9SAlvin Zhang 					return;
2336a78040c9SAlvin Zhang 				}
2337a78040c9SAlvin Zhang 			}
2338a78040c9SAlvin Zhang 		}
2339a78040c9SAlvin Zhang 	}
2340a78040c9SAlvin Zhang 
2341a78040c9SAlvin Zhang 	test_done = 0;
2342a78040c9SAlvin Zhang 
23437741e4cfSIntel 	if(!no_flush_rx)
23447741e4cfSIntel 		flush_fwd_rx_queues();
23457741e4cfSIntel 
2346af75078fSIntel 	rxtx_config_display();
2347af75078fSIntel 
234853324971SDavid Marchand 	fwd_stats_reset();
2349af75078fSIntel 	if (with_tx_first) {
2350acbf77a6SZhihong Wang 		while (with_tx_first--) {
2351acbf77a6SZhihong Wang 			launch_packet_forwarding(
2352acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
2353af75078fSIntel 			rte_eal_mp_wait_lcore();
2354acbf77a6SZhihong Wang 		}
2355af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
2356af75078fSIntel 		if (port_fwd_end != NULL) {
2357af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2358af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
2359af75078fSIntel 		}
2360af75078fSIntel 	}
2361af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
2362af75078fSIntel }
2363af75078fSIntel 
2364af75078fSIntel void
2365af75078fSIntel stop_packet_forwarding(void)
2366af75078fSIntel {
2367af75078fSIntel 	port_fwd_end_t port_fwd_end;
2368af75078fSIntel 	lcoreid_t lc_id;
236953324971SDavid Marchand 	portid_t pt_id;
237053324971SDavid Marchand 	int i;
2371af75078fSIntel 
2372af75078fSIntel 	if (test_done) {
237361a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Packet forwarding not started\n");
2374af75078fSIntel 		return;
2375af75078fSIntel 	}
2376af75078fSIntel 	printf("Telling cores to stop...");
2377af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2378af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
2379af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
2380af75078fSIntel 	rte_eal_mp_wait_lcore();
2381af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2382af75078fSIntel 	if (port_fwd_end != NULL) {
2383af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2384af75078fSIntel 			pt_id = fwd_ports_ids[i];
2385af75078fSIntel 			(*port_fwd_end)(pt_id);
2386af75078fSIntel 		}
2387af75078fSIntel 	}
2388c185d42cSDavid Marchand 
238953324971SDavid Marchand 	fwd_stats_display();
239058d475b7SJerin Jacob 
2391af75078fSIntel 	printf("\nDone.\n");
2392af75078fSIntel 	test_done = 1;
2393af75078fSIntel }
2394af75078fSIntel 
2395cfae07fdSOuyang Changchun void
2396cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
2397cfae07fdSOuyang Changchun {
2398492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
239961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link up fail.\n");
2400cfae07fdSOuyang Changchun }
2401cfae07fdSOuyang Changchun 
2402cfae07fdSOuyang Changchun void
2403cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2404cfae07fdSOuyang Changchun {
2405492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
240661a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSet link down fail.\n");
2407cfae07fdSOuyang Changchun }
2408cfae07fdSOuyang Changchun 
2409ce8d5614SIntel static int
2410ce8d5614SIntel all_ports_started(void)
2411ce8d5614SIntel {
2412ce8d5614SIntel 	portid_t pi;
2413ce8d5614SIntel 	struct rte_port *port;
2414ce8d5614SIntel 
24157d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2416ce8d5614SIntel 		port = &ports[pi];
2417ce8d5614SIntel 		/* Check if there is a port which is not started */
241841b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
241941b05095SBernard Iremonger 			(port->slave_flag == 0))
2420ce8d5614SIntel 			return 0;
2421ce8d5614SIntel 	}
2422ce8d5614SIntel 
2423ce8d5614SIntel 	/* No port is not started */
2424ce8d5614SIntel 	return 1;
2425ce8d5614SIntel }
2426ce8d5614SIntel 
2427148f963fSBruce Richardson int
24286018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
24296018eb8cSShahaf Shuler {
24306018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
24316018eb8cSShahaf Shuler 
24326018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
24336018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
24346018eb8cSShahaf Shuler 		return 0;
24356018eb8cSShahaf Shuler 	return 1;
24366018eb8cSShahaf Shuler }
24376018eb8cSShahaf Shuler 
24386018eb8cSShahaf Shuler int
2439edab33b1STetsuya Mukawa all_ports_stopped(void)
2440edab33b1STetsuya Mukawa {
2441edab33b1STetsuya Mukawa 	portid_t pi;
2442edab33b1STetsuya Mukawa 
24437d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
24446018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2445edab33b1STetsuya Mukawa 			return 0;
2446edab33b1STetsuya Mukawa 	}
2447edab33b1STetsuya Mukawa 
2448edab33b1STetsuya Mukawa 	return 1;
2449edab33b1STetsuya Mukawa }
2450edab33b1STetsuya Mukawa 
2451edab33b1STetsuya Mukawa int
2452edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2453edab33b1STetsuya Mukawa {
2454edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2455edab33b1STetsuya Mukawa 		return 0;
2456edab33b1STetsuya Mukawa 
2457edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2458edab33b1STetsuya Mukawa 		return 0;
2459edab33b1STetsuya Mukawa 
2460edab33b1STetsuya Mukawa 	return 1;
2461edab33b1STetsuya Mukawa }
2462edab33b1STetsuya Mukawa 
24631c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */
24641c69df45SOri Kam static int
246501817b10SBing Zhao setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
24661c69df45SOri Kam {
24671c69df45SOri Kam 	queueid_t qi;
24681c69df45SOri Kam 	struct rte_eth_hairpin_conf hairpin_conf = {
24691c69df45SOri Kam 		.peer_count = 1,
24701c69df45SOri Kam 	};
24711c69df45SOri Kam 	int i;
24721c69df45SOri Kam 	int diag;
24731c69df45SOri Kam 	struct rte_port *port = &ports[pi];
247401817b10SBing Zhao 	uint16_t peer_rx_port = pi;
247501817b10SBing Zhao 	uint16_t peer_tx_port = pi;
247601817b10SBing Zhao 	uint32_t manual = 1;
247701817b10SBing Zhao 	uint32_t tx_exp = hairpin_mode & 0x10;
247801817b10SBing Zhao 
247901817b10SBing Zhao 	if (!(hairpin_mode & 0xf)) {
248001817b10SBing Zhao 		peer_rx_port = pi;
248101817b10SBing Zhao 		peer_tx_port = pi;
248201817b10SBing Zhao 		manual = 0;
248301817b10SBing Zhao 	} else if (hairpin_mode & 0x1) {
248401817b10SBing Zhao 		peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
248501817b10SBing Zhao 						       RTE_ETH_DEV_NO_OWNER);
248601817b10SBing Zhao 		if (peer_tx_port >= RTE_MAX_ETHPORTS)
248701817b10SBing Zhao 			peer_tx_port = rte_eth_find_next_owned_by(0,
248801817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
248901817b10SBing Zhao 		if (p_pi != RTE_MAX_ETHPORTS) {
249001817b10SBing Zhao 			peer_rx_port = p_pi;
249101817b10SBing Zhao 		} else {
249201817b10SBing Zhao 			uint16_t next_pi;
249301817b10SBing Zhao 
249401817b10SBing Zhao 			/* Last port will be the peer RX port of the first. */
249501817b10SBing Zhao 			RTE_ETH_FOREACH_DEV(next_pi)
249601817b10SBing Zhao 				peer_rx_port = next_pi;
249701817b10SBing Zhao 		}
249801817b10SBing Zhao 		manual = 1;
249901817b10SBing Zhao 	} else if (hairpin_mode & 0x2) {
250001817b10SBing Zhao 		if (cnt_pi & 0x1) {
250101817b10SBing Zhao 			peer_rx_port = p_pi;
250201817b10SBing Zhao 		} else {
250301817b10SBing Zhao 			peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
250401817b10SBing Zhao 						RTE_ETH_DEV_NO_OWNER);
250501817b10SBing Zhao 			if (peer_rx_port >= RTE_MAX_ETHPORTS)
250601817b10SBing Zhao 				peer_rx_port = pi;
250701817b10SBing Zhao 		}
250801817b10SBing Zhao 		peer_tx_port = peer_rx_port;
250901817b10SBing Zhao 		manual = 1;
251001817b10SBing Zhao 	}
25111c69df45SOri Kam 
25121c69df45SOri Kam 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
251301817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_rx_port;
25141c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_rxq;
251501817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
251601817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
25171c69df45SOri Kam 		diag = rte_eth_tx_hairpin_queue_setup
25181c69df45SOri Kam 			(pi, qi, nb_txd, &hairpin_conf);
25191c69df45SOri Kam 		i++;
25201c69df45SOri Kam 		if (diag == 0)
25211c69df45SOri Kam 			continue;
25221c69df45SOri Kam 
25231c69df45SOri Kam 		/* Fail to setup rx queue, return */
25241c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
25251c69df45SOri Kam 					RTE_PORT_HANDLING,
25261c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
252761a3b0e5SAndrew Rybchenko 			fprintf(stderr,
252861a3b0e5SAndrew Rybchenko 				"Port %d can not be set back to stopped\n", pi);
252961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
253061a3b0e5SAndrew Rybchenko 			pi);
25311c69df45SOri Kam 		/* try to reconfigure queues next time */
25321c69df45SOri Kam 		port->need_reconfig_queues = 1;
25331c69df45SOri Kam 		return -1;
25341c69df45SOri Kam 	}
25351c69df45SOri Kam 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
253601817b10SBing Zhao 		hairpin_conf.peers[0].port = peer_tx_port;
25371c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_txq;
253801817b10SBing Zhao 		hairpin_conf.manual_bind = !!manual;
253901817b10SBing Zhao 		hairpin_conf.tx_explicit = !!tx_exp;
25401c69df45SOri Kam 		diag = rte_eth_rx_hairpin_queue_setup
25411c69df45SOri Kam 			(pi, qi, nb_rxd, &hairpin_conf);
25421c69df45SOri Kam 		i++;
25431c69df45SOri Kam 		if (diag == 0)
25441c69df45SOri Kam 			continue;
25451c69df45SOri Kam 
25461c69df45SOri Kam 		/* Fail to setup rx queue, return */
25471c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
25481c69df45SOri Kam 					RTE_PORT_HANDLING,
25491c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
255061a3b0e5SAndrew Rybchenko 			fprintf(stderr,
255161a3b0e5SAndrew Rybchenko 				"Port %d can not be set back to stopped\n", pi);
255261a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
255361a3b0e5SAndrew Rybchenko 			pi);
25541c69df45SOri Kam 		/* try to reconfigure queues next time */
25551c69df45SOri Kam 		port->need_reconfig_queues = 1;
25561c69df45SOri Kam 		return -1;
25571c69df45SOri Kam 	}
25581c69df45SOri Kam 	return 0;
25591c69df45SOri Kam }
25601c69df45SOri Kam 
25612befc67fSViacheslav Ovsiienko /* Configure the Rx with optional split. */
25622befc67fSViacheslav Ovsiienko int
25632befc67fSViacheslav Ovsiienko rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
25642befc67fSViacheslav Ovsiienko 	       uint16_t nb_rx_desc, unsigned int socket_id,
25652befc67fSViacheslav Ovsiienko 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
25662befc67fSViacheslav Ovsiienko {
25672befc67fSViacheslav Ovsiienko 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
25682befc67fSViacheslav Ovsiienko 	unsigned int i, mp_n;
25692befc67fSViacheslav Ovsiienko 	int ret;
25702befc67fSViacheslav Ovsiienko 
25712befc67fSViacheslav Ovsiienko 	if (rx_pkt_nb_segs <= 1 ||
25722befc67fSViacheslav Ovsiienko 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
25732befc67fSViacheslav Ovsiienko 		rx_conf->rx_seg = NULL;
25742befc67fSViacheslav Ovsiienko 		rx_conf->rx_nseg = 0;
25752befc67fSViacheslav Ovsiienko 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
25762befc67fSViacheslav Ovsiienko 					     nb_rx_desc, socket_id,
25772befc67fSViacheslav Ovsiienko 					     rx_conf, mp);
25782befc67fSViacheslav Ovsiienko 		return ret;
25792befc67fSViacheslav Ovsiienko 	}
25802befc67fSViacheslav Ovsiienko 	for (i = 0; i < rx_pkt_nb_segs; i++) {
25812befc67fSViacheslav Ovsiienko 		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
25822befc67fSViacheslav Ovsiienko 		struct rte_mempool *mpx;
25832befc67fSViacheslav Ovsiienko 		/*
25842befc67fSViacheslav Ovsiienko 		 * Use last valid pool for the segments with number
25852befc67fSViacheslav Ovsiienko 		 * exceeding the pool index.
25862befc67fSViacheslav Ovsiienko 		 */
25872befc67fSViacheslav Ovsiienko 		mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
25882befc67fSViacheslav Ovsiienko 		mpx = mbuf_pool_find(socket_id, mp_n);
25892befc67fSViacheslav Ovsiienko 		/* Handle zero as mbuf data buffer size. */
25902befc67fSViacheslav Ovsiienko 		rx_seg->length = rx_pkt_seg_lengths[i] ?
25912befc67fSViacheslav Ovsiienko 				   rx_pkt_seg_lengths[i] :
25922befc67fSViacheslav Ovsiienko 				   mbuf_data_size[mp_n];
25932befc67fSViacheslav Ovsiienko 		rx_seg->offset = i < rx_pkt_nb_offs ?
25942befc67fSViacheslav Ovsiienko 				   rx_pkt_seg_offsets[i] : 0;
25952befc67fSViacheslav Ovsiienko 		rx_seg->mp = mpx ? mpx : mp;
25962befc67fSViacheslav Ovsiienko 	}
25972befc67fSViacheslav Ovsiienko 	rx_conf->rx_nseg = rx_pkt_nb_segs;
25982befc67fSViacheslav Ovsiienko 	rx_conf->rx_seg = rx_useg;
25992befc67fSViacheslav Ovsiienko 	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
26002befc67fSViacheslav Ovsiienko 				    socket_id, rx_conf, NULL);
26012befc67fSViacheslav Ovsiienko 	rx_conf->rx_seg = NULL;
26022befc67fSViacheslav Ovsiienko 	rx_conf->rx_nseg = 0;
26032befc67fSViacheslav Ovsiienko 	return ret;
26042befc67fSViacheslav Ovsiienko }
26052befc67fSViacheslav Ovsiienko 
260663b72657SIvan Ilchenko static int
260763b72657SIvan Ilchenko alloc_xstats_display_info(portid_t pi)
260863b72657SIvan Ilchenko {
260963b72657SIvan Ilchenko 	uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
261063b72657SIvan Ilchenko 	uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
261163b72657SIvan Ilchenko 	uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
261263b72657SIvan Ilchenko 
261363b72657SIvan Ilchenko 	if (xstats_display_num == 0)
261463b72657SIvan Ilchenko 		return 0;
261563b72657SIvan Ilchenko 
261663b72657SIvan Ilchenko 	*ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
261763b72657SIvan Ilchenko 	if (*ids_supp == NULL)
261863b72657SIvan Ilchenko 		goto fail_ids_supp;
261963b72657SIvan Ilchenko 
262063b72657SIvan Ilchenko 	*prev_values = calloc(xstats_display_num,
262163b72657SIvan Ilchenko 			      sizeof(**prev_values));
262263b72657SIvan Ilchenko 	if (*prev_values == NULL)
262363b72657SIvan Ilchenko 		goto fail_prev_values;
262463b72657SIvan Ilchenko 
262563b72657SIvan Ilchenko 	*curr_values = calloc(xstats_display_num,
262663b72657SIvan Ilchenko 			      sizeof(**curr_values));
262763b72657SIvan Ilchenko 	if (*curr_values == NULL)
262863b72657SIvan Ilchenko 		goto fail_curr_values;
262963b72657SIvan Ilchenko 
263063b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = true;
263163b72657SIvan Ilchenko 
263263b72657SIvan Ilchenko 	return 0;
263363b72657SIvan Ilchenko 
263463b72657SIvan Ilchenko fail_curr_values:
263563b72657SIvan Ilchenko 	free(*prev_values);
263663b72657SIvan Ilchenko fail_prev_values:
263763b72657SIvan Ilchenko 	free(*ids_supp);
263863b72657SIvan Ilchenko fail_ids_supp:
263963b72657SIvan Ilchenko 	return -ENOMEM;
264063b72657SIvan Ilchenko }
264163b72657SIvan Ilchenko 
264263b72657SIvan Ilchenko static void
264363b72657SIvan Ilchenko free_xstats_display_info(portid_t pi)
264463b72657SIvan Ilchenko {
264563b72657SIvan Ilchenko 	if (!ports[pi].xstats_info.allocated)
264663b72657SIvan Ilchenko 		return;
264763b72657SIvan Ilchenko 	free(ports[pi].xstats_info.ids_supp);
264863b72657SIvan Ilchenko 	free(ports[pi].xstats_info.prev_values);
264963b72657SIvan Ilchenko 	free(ports[pi].xstats_info.curr_values);
265063b72657SIvan Ilchenko 	ports[pi].xstats_info.allocated = false;
265163b72657SIvan Ilchenko }
265263b72657SIvan Ilchenko 
265363b72657SIvan Ilchenko /** Fill helper structures for specified port to show extended statistics. */
265463b72657SIvan Ilchenko static void
265563b72657SIvan Ilchenko fill_xstats_display_info_for_port(portid_t pi)
265663b72657SIvan Ilchenko {
265763b72657SIvan Ilchenko 	unsigned int stat, stat_supp;
265863b72657SIvan Ilchenko 	const char *xstat_name;
265963b72657SIvan Ilchenko 	struct rte_port *port;
266063b72657SIvan Ilchenko 	uint64_t *ids_supp;
266163b72657SIvan Ilchenko 	int rc;
266263b72657SIvan Ilchenko 
266363b72657SIvan Ilchenko 	if (xstats_display_num == 0)
266463b72657SIvan Ilchenko 		return;
266563b72657SIvan Ilchenko 
266663b72657SIvan Ilchenko 	if (pi == (portid_t)RTE_PORT_ALL) {
266763b72657SIvan Ilchenko 		fill_xstats_display_info();
266863b72657SIvan Ilchenko 		return;
266963b72657SIvan Ilchenko 	}
267063b72657SIvan Ilchenko 
267163b72657SIvan Ilchenko 	port = &ports[pi];
267263b72657SIvan Ilchenko 	if (port->port_status != RTE_PORT_STARTED)
267363b72657SIvan Ilchenko 		return;
267463b72657SIvan Ilchenko 
267563b72657SIvan Ilchenko 	if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
267663b72657SIvan Ilchenko 		rte_exit(EXIT_FAILURE,
267763b72657SIvan Ilchenko 			 "Failed to allocate xstats display memory\n");
267863b72657SIvan Ilchenko 
267963b72657SIvan Ilchenko 	ids_supp = port->xstats_info.ids_supp;
268063b72657SIvan Ilchenko 	for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
268163b72657SIvan Ilchenko 		xstat_name = xstats_display[stat].name;
268263b72657SIvan Ilchenko 		rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
268363b72657SIvan Ilchenko 						   ids_supp + stat_supp);
268463b72657SIvan Ilchenko 		if (rc != 0) {
268563b72657SIvan Ilchenko 			fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
268663b72657SIvan Ilchenko 				xstat_name, pi, stat);
268763b72657SIvan Ilchenko 			continue;
268863b72657SIvan Ilchenko 		}
268963b72657SIvan Ilchenko 		stat_supp++;
269063b72657SIvan Ilchenko 	}
269163b72657SIvan Ilchenko 
269263b72657SIvan Ilchenko 	port->xstats_info.ids_supp_sz = stat_supp;
269363b72657SIvan Ilchenko }
269463b72657SIvan Ilchenko 
269563b72657SIvan Ilchenko /** Fill helper structures for all ports to show extended statistics. */
269663b72657SIvan Ilchenko static void
269763b72657SIvan Ilchenko fill_xstats_display_info(void)
269863b72657SIvan Ilchenko {
269963b72657SIvan Ilchenko 	portid_t pi;
270063b72657SIvan Ilchenko 
270163b72657SIvan Ilchenko 	if (xstats_display_num == 0)
270263b72657SIvan Ilchenko 		return;
270363b72657SIvan Ilchenko 
270463b72657SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(pi)
270563b72657SIvan Ilchenko 		fill_xstats_display_info_for_port(pi);
270663b72657SIvan Ilchenko }
270763b72657SIvan Ilchenko 
2708edab33b1STetsuya Mukawa int
2709ce8d5614SIntel start_port(portid_t pid)
2710ce8d5614SIntel {
271192d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
2712ce8d5614SIntel 	portid_t pi;
271301817b10SBing Zhao 	portid_t p_pi = RTE_MAX_ETHPORTS;
271401817b10SBing Zhao 	portid_t pl[RTE_MAX_ETHPORTS];
271501817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
271601817b10SBing Zhao 	uint16_t cnt_pi = 0;
271701817b10SBing Zhao 	uint16_t cfg_pi = 0;
271801817b10SBing Zhao 	int peer_pi;
2719ce8d5614SIntel 	queueid_t qi;
2720ce8d5614SIntel 	struct rte_port *port;
27211c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
2722ce8d5614SIntel 
27234468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
27244468635fSMichael Qiu 		return 0;
27254468635fSMichael Qiu 
27267d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2727edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2728ce8d5614SIntel 			continue;
2729ce8d5614SIntel 
273092d2703eSMichael Qiu 		need_check_link_status = 0;
2731ce8d5614SIntel 		port = &ports[pi];
2732ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2733ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
273461a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is now not stopped\n", pi);
2735ce8d5614SIntel 			continue;
2736ce8d5614SIntel 		}
2737ce8d5614SIntel 
2738ce8d5614SIntel 		if (port->need_reconfig > 0) {
2739655eae01SJie Wang 			struct rte_eth_conf dev_conf;
2740655eae01SJie Wang 			int k;
2741655eae01SJie Wang 
2742ce8d5614SIntel 			port->need_reconfig = 0;
2743ce8d5614SIntel 
27447ee3e944SVasily Philipov 			if (flow_isolate_all) {
27457ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
27467ee3e944SVasily Philipov 				if (ret) {
274761a3b0e5SAndrew Rybchenko 					fprintf(stderr,
274861a3b0e5SAndrew Rybchenko 						"Failed to apply isolated mode on port %d\n",
274961a3b0e5SAndrew Rybchenko 						pi);
27507ee3e944SVasily Philipov 					return -1;
27517ee3e944SVasily Philipov 				}
27527ee3e944SVasily Philipov 			}
2753b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
27545706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
275520a0286fSLiu Xiaofeng 					port->socket_id);
27561c69df45SOri Kam 			if (nb_hairpinq > 0 &&
27571c69df45SOri Kam 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
275861a3b0e5SAndrew Rybchenko 				fprintf(stderr,
275961a3b0e5SAndrew Rybchenko 					"Port %d doesn't support hairpin queues\n",
276061a3b0e5SAndrew Rybchenko 					pi);
27611c69df45SOri Kam 				return -1;
27621c69df45SOri Kam 			}
27631bb4a528SFerruh Yigit 
2764ce8d5614SIntel 			/* configure port */
2765a550baf2SMin Hu (Connor) 			diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
27661c69df45SOri Kam 						     nb_txq + nb_hairpinq,
2767ce8d5614SIntel 						     &(port->dev_conf));
2768ce8d5614SIntel 			if (diag != 0) {
2769ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2770ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
277161a3b0e5SAndrew Rybchenko 					fprintf(stderr,
277261a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
277361a3b0e5SAndrew Rybchenko 						pi);
277461a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Fail to configure port %d\n",
277561a3b0e5SAndrew Rybchenko 					pi);
2776ce8d5614SIntel 				/* try to reconfigure port next time */
2777ce8d5614SIntel 				port->need_reconfig = 1;
2778148f963fSBruce Richardson 				return -1;
2779ce8d5614SIntel 			}
2780655eae01SJie Wang 			/* get device configuration*/
2781655eae01SJie Wang 			if (0 !=
2782655eae01SJie Wang 				eth_dev_conf_get_print_err(pi, &dev_conf)) {
2783655eae01SJie Wang 				fprintf(stderr,
2784655eae01SJie Wang 					"port %d can not get device configuration\n",
2785655eae01SJie Wang 					pi);
2786655eae01SJie Wang 				return -1;
2787655eae01SJie Wang 			}
2788655eae01SJie Wang 			/* Apply Rx offloads configuration */
2789655eae01SJie Wang 			if (dev_conf.rxmode.offloads !=
2790655eae01SJie Wang 			    port->dev_conf.rxmode.offloads) {
2791655eae01SJie Wang 				port->dev_conf.rxmode.offloads |=
2792655eae01SJie Wang 					dev_conf.rxmode.offloads;
2793655eae01SJie Wang 				for (k = 0;
2794655eae01SJie Wang 				     k < port->dev_info.max_rx_queues;
2795655eae01SJie Wang 				     k++)
2796655eae01SJie Wang 					port->rx_conf[k].offloads |=
2797655eae01SJie Wang 						dev_conf.rxmode.offloads;
2798655eae01SJie Wang 			}
2799655eae01SJie Wang 			/* Apply Tx offloads configuration */
2800655eae01SJie Wang 			if (dev_conf.txmode.offloads !=
2801655eae01SJie Wang 			    port->dev_conf.txmode.offloads) {
2802655eae01SJie Wang 				port->dev_conf.txmode.offloads |=
2803655eae01SJie Wang 					dev_conf.txmode.offloads;
2804655eae01SJie Wang 				for (k = 0;
2805655eae01SJie Wang 				     k < port->dev_info.max_tx_queues;
2806655eae01SJie Wang 				     k++)
2807655eae01SJie Wang 					port->tx_conf[k].offloads |=
2808655eae01SJie Wang 						dev_conf.txmode.offloads;
2809655eae01SJie Wang 			}
2810ce8d5614SIntel 		}
2811a550baf2SMin Hu (Connor) 		if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2812ce8d5614SIntel 			port->need_reconfig_queues = 0;
2813ce8d5614SIntel 			/* setup tx queues */
2814ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
2815b6ea6408SIntel 				if ((numa_support) &&
2816b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2817b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2818d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2819d44f8a48SQi Zhang 						txring_numa[pi],
2820d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2821b6ea6408SIntel 				else
2822b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2823d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2824d44f8a48SQi Zhang 						port->socket_id,
2825d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2826b6ea6408SIntel 
2827ce8d5614SIntel 				if (diag == 0)
2828ce8d5614SIntel 					continue;
2829ce8d5614SIntel 
2830ce8d5614SIntel 				/* Fail to setup tx queue, return */
2831ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2832ce8d5614SIntel 							RTE_PORT_HANDLING,
2833ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
283461a3b0e5SAndrew Rybchenko 					fprintf(stderr,
283561a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
283661a3b0e5SAndrew Rybchenko 						pi);
283761a3b0e5SAndrew Rybchenko 				fprintf(stderr,
283861a3b0e5SAndrew Rybchenko 					"Fail to configure port %d tx queues\n",
2839d44f8a48SQi Zhang 					pi);
2840ce8d5614SIntel 				/* try to reconfigure queues next time */
2841ce8d5614SIntel 				port->need_reconfig_queues = 1;
2842148f963fSBruce Richardson 				return -1;
2843ce8d5614SIntel 			}
2844ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2845d44f8a48SQi Zhang 				/* setup rx queues */
2846b6ea6408SIntel 				if ((numa_support) &&
2847b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2848b6ea6408SIntel 					struct rte_mempool * mp =
284926cbb419SViacheslav Ovsiienko 						mbuf_pool_find
285026cbb419SViacheslav Ovsiienko 							(rxring_numa[pi], 0);
2851b6ea6408SIntel 					if (mp == NULL) {
285261a3b0e5SAndrew Rybchenko 						fprintf(stderr,
285361a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
2854b6ea6408SIntel 							rxring_numa[pi]);
2855148f963fSBruce Richardson 						return -1;
2856b6ea6408SIntel 					}
2857b6ea6408SIntel 
28582befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
2859d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2860d44f8a48SQi Zhang 					     rxring_numa[pi],
2861d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2862d44f8a48SQi Zhang 					     mp);
28631e1d6bddSBernard Iremonger 				} else {
28641e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
286526cbb419SViacheslav Ovsiienko 						mbuf_pool_find
286626cbb419SViacheslav Ovsiienko 							(port->socket_id, 0);
28671e1d6bddSBernard Iremonger 					if (mp == NULL) {
286861a3b0e5SAndrew Rybchenko 						fprintf(stderr,
286961a3b0e5SAndrew Rybchenko 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
28701e1d6bddSBernard Iremonger 							port->socket_id);
28711e1d6bddSBernard Iremonger 						return -1;
2872b6ea6408SIntel 					}
28732befc67fSViacheslav Ovsiienko 					diag = rx_queue_setup(pi, qi,
2874d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2875d44f8a48SQi Zhang 					     port->socket_id,
2876d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2877d44f8a48SQi Zhang 					     mp);
28781e1d6bddSBernard Iremonger 				}
2879ce8d5614SIntel 				if (diag == 0)
2880ce8d5614SIntel 					continue;
2881ce8d5614SIntel 
2882ce8d5614SIntel 				/* Fail to setup rx queue, return */
2883ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2884ce8d5614SIntel 							RTE_PORT_HANDLING,
2885ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
288661a3b0e5SAndrew Rybchenko 					fprintf(stderr,
288761a3b0e5SAndrew Rybchenko 						"Port %d can not be set back to stopped\n",
288861a3b0e5SAndrew Rybchenko 						pi);
288961a3b0e5SAndrew Rybchenko 				fprintf(stderr,
289061a3b0e5SAndrew Rybchenko 					"Fail to configure port %d rx queues\n",
2891d44f8a48SQi Zhang 					pi);
2892ce8d5614SIntel 				/* try to reconfigure queues next time */
2893ce8d5614SIntel 				port->need_reconfig_queues = 1;
2894148f963fSBruce Richardson 				return -1;
2895ce8d5614SIntel 			}
28961c69df45SOri Kam 			/* setup hairpin queues */
289701817b10SBing Zhao 			if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
28981c69df45SOri Kam 				return -1;
2899ce8d5614SIntel 		}
2900b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
2901b0a9354aSPavan Nikhilesh 		if (clear_ptypes) {
2902b0a9354aSPavan Nikhilesh 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2903b0a9354aSPavan Nikhilesh 					NULL, 0);
2904b0a9354aSPavan Nikhilesh 			if (diag < 0)
290561a3b0e5SAndrew Rybchenko 				fprintf(stderr,
2906b0a9354aSPavan Nikhilesh 					"Port %d: Failed to disable Ptype parsing\n",
2907b0a9354aSPavan Nikhilesh 					pi);
2908b0a9354aSPavan Nikhilesh 		}
2909b0a9354aSPavan Nikhilesh 
291001817b10SBing Zhao 		p_pi = pi;
291101817b10SBing Zhao 		cnt_pi++;
291201817b10SBing Zhao 
2913ce8d5614SIntel 		/* start port */
2914a550baf2SMin Hu (Connor) 		diag = eth_dev_start_mp(pi);
291552f2c6f2SAndrew Rybchenko 		if (diag < 0) {
291661a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Fail to start port %d: %s\n",
291761a3b0e5SAndrew Rybchenko 				pi, rte_strerror(-diag));
2918ce8d5614SIntel 
2919ce8d5614SIntel 			/* Fail to setup rx queue, return */
2920ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2921ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
292261a3b0e5SAndrew Rybchenko 				fprintf(stderr,
292361a3b0e5SAndrew Rybchenko 					"Port %d can not be set back to stopped\n",
292461a3b0e5SAndrew Rybchenko 					pi);
2925ce8d5614SIntel 			continue;
2926ce8d5614SIntel 		}
2927ce8d5614SIntel 
2928ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2929ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
293061a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into started\n",
293161a3b0e5SAndrew Rybchenko 				pi);
2932ce8d5614SIntel 
29335ffc4a2aSYuying Zhang 		if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
2934c2c4f87bSAman Deep Singh 			printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
2935a7db3afcSAman Deep Singh 					RTE_ETHER_ADDR_BYTES(&port->eth_addr));
2936d8c89163SZijie Pan 
2937ce8d5614SIntel 		/* at least one port started, need checking link status */
2938ce8d5614SIntel 		need_check_link_status = 1;
293901817b10SBing Zhao 
294001817b10SBing Zhao 		pl[cfg_pi++] = pi;
2941ce8d5614SIntel 	}
2942ce8d5614SIntel 
294392d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2944edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
294592d2703eSMichael Qiu 	else if (need_check_link_status == 0)
294661a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Please stop the ports first\n");
2947ce8d5614SIntel 
294801817b10SBing Zhao 	if (hairpin_mode & 0xf) {
294901817b10SBing Zhao 		uint16_t i;
295001817b10SBing Zhao 		int j;
295101817b10SBing Zhao 
295201817b10SBing Zhao 		/* bind all started hairpin ports */
295301817b10SBing Zhao 		for (i = 0; i < cfg_pi; i++) {
295401817b10SBing Zhao 			pi = pl[i];
295501817b10SBing Zhao 			/* bind current Tx to all peer Rx */
295601817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
295701817b10SBing Zhao 							RTE_MAX_ETHPORTS, 1);
295801817b10SBing Zhao 			if (peer_pi < 0)
295901817b10SBing Zhao 				return peer_pi;
296001817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
296101817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
296201817b10SBing Zhao 					continue;
296301817b10SBing Zhao 				diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
296401817b10SBing Zhao 				if (diag < 0) {
296561a3b0e5SAndrew Rybchenko 					fprintf(stderr,
296661a3b0e5SAndrew Rybchenko 						"Error during binding hairpin Tx port %u to %u: %s\n",
296701817b10SBing Zhao 						pi, peer_pl[j],
296801817b10SBing Zhao 						rte_strerror(-diag));
296901817b10SBing Zhao 					return -1;
297001817b10SBing Zhao 				}
297101817b10SBing Zhao 			}
297201817b10SBing Zhao 			/* bind all peer Tx to current Rx */
297301817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
297401817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
297501817b10SBing Zhao 			if (peer_pi < 0)
297601817b10SBing Zhao 				return peer_pi;
297701817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
297801817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
297901817b10SBing Zhao 					continue;
298001817b10SBing Zhao 				diag = rte_eth_hairpin_bind(peer_pl[j], pi);
298101817b10SBing Zhao 				if (diag < 0) {
298261a3b0e5SAndrew Rybchenko 					fprintf(stderr,
298361a3b0e5SAndrew Rybchenko 						"Error during binding hairpin Tx port %u to %u: %s\n",
298401817b10SBing Zhao 						peer_pl[j], pi,
298501817b10SBing Zhao 						rte_strerror(-diag));
298601817b10SBing Zhao 					return -1;
298701817b10SBing Zhao 				}
298801817b10SBing Zhao 			}
298901817b10SBing Zhao 		}
299001817b10SBing Zhao 	}
299101817b10SBing Zhao 
299263b72657SIvan Ilchenko 	fill_xstats_display_info_for_port(pid);
299363b72657SIvan Ilchenko 
2994ce8d5614SIntel 	printf("Done\n");
2995148f963fSBruce Richardson 	return 0;
2996ce8d5614SIntel }
2997ce8d5614SIntel 
2998ce8d5614SIntel void
2999ce8d5614SIntel stop_port(portid_t pid)
3000ce8d5614SIntel {
3001ce8d5614SIntel 	portid_t pi;
3002ce8d5614SIntel 	struct rte_port *port;
3003ce8d5614SIntel 	int need_check_link_status = 0;
300401817b10SBing Zhao 	portid_t peer_pl[RTE_MAX_ETHPORTS];
300501817b10SBing Zhao 	int peer_pi;
3006ce8d5614SIntel 
30074468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
30084468635fSMichael Qiu 		return;
30094468635fSMichael Qiu 
3010ce8d5614SIntel 	printf("Stopping ports...\n");
3011ce8d5614SIntel 
30127d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
30134468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3014ce8d5614SIntel 			continue;
3015ce8d5614SIntel 
3016a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
301761a3b0e5SAndrew Rybchenko 			fprintf(stderr,
301861a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
301961a3b0e5SAndrew Rybchenko 				pi);
3020a8ef3e3aSBernard Iremonger 			continue;
3021a8ef3e3aSBernard Iremonger 		}
3022a8ef3e3aSBernard Iremonger 
30230e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
302461a3b0e5SAndrew Rybchenko 			fprintf(stderr,
302561a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
302661a3b0e5SAndrew Rybchenko 				pi);
30270e545d30SBernard Iremonger 			continue;
30280e545d30SBernard Iremonger 		}
30290e545d30SBernard Iremonger 
3030ce8d5614SIntel 		port = &ports[pi];
3031ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
3032ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
3033ce8d5614SIntel 			continue;
3034ce8d5614SIntel 
303501817b10SBing Zhao 		if (hairpin_mode & 0xf) {
303601817b10SBing Zhao 			int j;
303701817b10SBing Zhao 
303801817b10SBing Zhao 			rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
303901817b10SBing Zhao 			/* unbind all peer Tx from current Rx */
304001817b10SBing Zhao 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
304101817b10SBing Zhao 							RTE_MAX_ETHPORTS, 0);
304201817b10SBing Zhao 			if (peer_pi < 0)
304301817b10SBing Zhao 				continue;
304401817b10SBing Zhao 			for (j = 0; j < peer_pi; j++) {
304501817b10SBing Zhao 				if (!port_is_started(peer_pl[j]))
304601817b10SBing Zhao 					continue;
304701817b10SBing Zhao 				rte_eth_hairpin_unbind(peer_pl[j], pi);
304801817b10SBing Zhao 			}
304901817b10SBing Zhao 		}
305001817b10SBing Zhao 
30510f93edbfSGregory Etelson 		if (port->flow_list)
30520f93edbfSGregory Etelson 			port_flow_flush(pi);
30530f93edbfSGregory Etelson 
3054a550baf2SMin Hu (Connor) 		if (eth_dev_stop_mp(pi) != 0)
3055e62c5a12SIvan Ilchenko 			RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3056e62c5a12SIvan Ilchenko 				pi);
3057ce8d5614SIntel 
3058ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
3059ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
306061a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d can not be set into stopped\n",
306161a3b0e5SAndrew Rybchenko 				pi);
3062ce8d5614SIntel 		need_check_link_status = 1;
3063ce8d5614SIntel 	}
3064bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
3065edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
3066ce8d5614SIntel 
3067ce8d5614SIntel 	printf("Done\n");
3068ce8d5614SIntel }
3069ce8d5614SIntel 
3070ce6959bfSWisam Jaddo static void
30714f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
3072ce6959bfSWisam Jaddo {
30734f1de450SThomas Monjalon 	portid_t i;
30744f1de450SThomas Monjalon 	portid_t new_total = 0;
3075ce6959bfSWisam Jaddo 
30764f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
30774f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
30784f1de450SThomas Monjalon 			array[new_total] = array[i];
30794f1de450SThomas Monjalon 			new_total++;
3080ce6959bfSWisam Jaddo 		}
30814f1de450SThomas Monjalon 	*total = new_total;
30824f1de450SThomas Monjalon }
30834f1de450SThomas Monjalon 
30844f1de450SThomas Monjalon static void
30854f1de450SThomas Monjalon remove_invalid_ports(void)
30864f1de450SThomas Monjalon {
30874f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
30884f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
30894f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
3090ce6959bfSWisam Jaddo }
3091ce6959bfSWisam Jaddo 
3092ce8d5614SIntel void
3093ce8d5614SIntel close_port(portid_t pid)
3094ce8d5614SIntel {
3095ce8d5614SIntel 	portid_t pi;
3096ce8d5614SIntel 	struct rte_port *port;
3097ce8d5614SIntel 
30984468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
30994468635fSMichael Qiu 		return;
31004468635fSMichael Qiu 
3101ce8d5614SIntel 	printf("Closing ports...\n");
3102ce8d5614SIntel 
31037d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
31044468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3105ce8d5614SIntel 			continue;
3106ce8d5614SIntel 
3107a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
310861a3b0e5SAndrew Rybchenko 			fprintf(stderr,
310961a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
311061a3b0e5SAndrew Rybchenko 				pi);
3111a8ef3e3aSBernard Iremonger 			continue;
3112a8ef3e3aSBernard Iremonger 		}
3113a8ef3e3aSBernard Iremonger 
31140e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
311561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
311661a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
311761a3b0e5SAndrew Rybchenko 				pi);
31180e545d30SBernard Iremonger 			continue;
31190e545d30SBernard Iremonger 		}
31200e545d30SBernard Iremonger 
3121ce8d5614SIntel 		port = &ports[pi];
3122ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
3123d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
312461a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port %d is already closed\n", pi);
3125d4e8ad64SMichael Qiu 			continue;
3126d4e8ad64SMichael Qiu 		}
3127d4e8ad64SMichael Qiu 
3128a550baf2SMin Hu (Connor) 		if (is_proc_primary()) {
3129938a184aSAdrien Mazarguil 			port_flow_flush(pi);
313059f3a8acSGregory Etelson 			port_flex_item_flush(pi);
3131ce8d5614SIntel 			rte_eth_dev_close(pi);
3132ce8d5614SIntel 		}
313363b72657SIvan Ilchenko 
313463b72657SIvan Ilchenko 		free_xstats_display_info(pi);
3135a550baf2SMin Hu (Connor) 	}
3136ce8d5614SIntel 
313785c6571cSThomas Monjalon 	remove_invalid_ports();
3138ce8d5614SIntel 	printf("Done\n");
3139ce8d5614SIntel }
3140ce8d5614SIntel 
3141edab33b1STetsuya Mukawa void
314297f1e196SWei Dai reset_port(portid_t pid)
314397f1e196SWei Dai {
314497f1e196SWei Dai 	int diag;
314597f1e196SWei Dai 	portid_t pi;
314697f1e196SWei Dai 	struct rte_port *port;
314797f1e196SWei Dai 
314897f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
314997f1e196SWei Dai 		return;
315097f1e196SWei Dai 
31511cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
31521cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
315361a3b0e5SAndrew Rybchenko 		fprintf(stderr,
315461a3b0e5SAndrew Rybchenko 			"Can not reset port(s), please stop port(s) first.\n");
31551cde1b9aSShougang Wang 		return;
31561cde1b9aSShougang Wang 	}
31571cde1b9aSShougang Wang 
315897f1e196SWei Dai 	printf("Resetting ports...\n");
315997f1e196SWei Dai 
316097f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
316197f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
316297f1e196SWei Dai 			continue;
316397f1e196SWei Dai 
316497f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
316561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
316661a3b0e5SAndrew Rybchenko 				"Please remove port %d from forwarding configuration.\n",
316761a3b0e5SAndrew Rybchenko 				pi);
316897f1e196SWei Dai 			continue;
316997f1e196SWei Dai 		}
317097f1e196SWei Dai 
317197f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
317261a3b0e5SAndrew Rybchenko 			fprintf(stderr,
317361a3b0e5SAndrew Rybchenko 				"Please remove port %d from bonded device.\n",
317497f1e196SWei Dai 				pi);
317597f1e196SWei Dai 			continue;
317697f1e196SWei Dai 		}
317797f1e196SWei Dai 
317897f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
317997f1e196SWei Dai 		if (diag == 0) {
318097f1e196SWei Dai 			port = &ports[pi];
318197f1e196SWei Dai 			port->need_reconfig = 1;
318297f1e196SWei Dai 			port->need_reconfig_queues = 1;
318397f1e196SWei Dai 		} else {
318461a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Failed to reset port %d. diag=%d\n",
318561a3b0e5SAndrew Rybchenko 				pi, diag);
318697f1e196SWei Dai 		}
318797f1e196SWei Dai 	}
318897f1e196SWei Dai 
318997f1e196SWei Dai 	printf("Done\n");
319097f1e196SWei Dai }
319197f1e196SWei Dai 
319297f1e196SWei Dai void
3193edab33b1STetsuya Mukawa attach_port(char *identifier)
3194ce8d5614SIntel {
31954f1ed78eSThomas Monjalon 	portid_t pi;
3196c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
3197ce8d5614SIntel 
3198edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
3199edab33b1STetsuya Mukawa 
3200edab33b1STetsuya Mukawa 	if (identifier == NULL) {
320161a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Invalid parameters are specified\n");
3202edab33b1STetsuya Mukawa 		return;
3203ce8d5614SIntel 	}
3204ce8d5614SIntel 
320575b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
3206c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3207edab33b1STetsuya Mukawa 		return;
3208c9cce428SThomas Monjalon 	}
3209c9cce428SThomas Monjalon 
32104f1ed78eSThomas Monjalon 	/* first attach mode: event */
32114f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
32124f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
32134f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
32144f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
32154f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
32164f1ed78eSThomas Monjalon 				setup_attached_port(pi);
32174f1ed78eSThomas Monjalon 		return;
32184f1ed78eSThomas Monjalon 	}
32194f1ed78eSThomas Monjalon 
32204f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
322186fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
32224f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
322386fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
322486fa5de1SThomas Monjalon 			continue; /* port was already attached before */
3225c9cce428SThomas Monjalon 		setup_attached_port(pi);
3226c9cce428SThomas Monjalon 	}
322786fa5de1SThomas Monjalon }
3228c9cce428SThomas Monjalon 
3229c9cce428SThomas Monjalon static void
3230c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
3231c9cce428SThomas Monjalon {
3232c9cce428SThomas Monjalon 	unsigned int socket_id;
323334fc1051SIvan Ilchenko 	int ret;
3234edab33b1STetsuya Mukawa 
3235931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
323629841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
3237931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
323829841336SPhil Yang 		socket_id = socket_ids[0];
3239931126baSBernard Iremonger 	reconfig(pi, socket_id);
324034fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
324134fc1051SIvan Ilchenko 	if (ret != 0)
324261a3b0e5SAndrew Rybchenko 		fprintf(stderr,
324361a3b0e5SAndrew Rybchenko 			"Error during enabling promiscuous mode for port %u: %s - ignore\n",
324434fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
3245edab33b1STetsuya Mukawa 
32464f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
32474f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
32484f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
32494f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
3250edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
3251edab33b1STetsuya Mukawa 
3252edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3253edab33b1STetsuya Mukawa 	printf("Done\n");
3254edab33b1STetsuya Mukawa }
3255edab33b1STetsuya Mukawa 
32560654d4a8SThomas Monjalon static void
32570654d4a8SThomas Monjalon detach_device(struct rte_device *dev)
32585f4ec54fSChen Jing D(Mark) {
3259f8e5baa2SThomas Monjalon 	portid_t sibling;
3260f8e5baa2SThomas Monjalon 
3261f8e5baa2SThomas Monjalon 	if (dev == NULL) {
326261a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Device already removed\n");
3263f8e5baa2SThomas Monjalon 		return;
3264f8e5baa2SThomas Monjalon 	}
3265f8e5baa2SThomas Monjalon 
32660654d4a8SThomas Monjalon 	printf("Removing a device...\n");
3267938a184aSAdrien Mazarguil 
32682a449871SThomas Monjalon 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
32692a449871SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
32702a449871SThomas Monjalon 			if (ports[sibling].port_status != RTE_PORT_STOPPED) {
327161a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
327261a3b0e5SAndrew Rybchenko 					sibling);
32732a449871SThomas Monjalon 				return;
32742a449871SThomas Monjalon 			}
32752a449871SThomas Monjalon 			port_flow_flush(sibling);
32762a449871SThomas Monjalon 		}
32772a449871SThomas Monjalon 	}
32782a449871SThomas Monjalon 
327975b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
3280f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3281edab33b1STetsuya Mukawa 		return;
32823070419eSGaetan Rivet 	}
32834f1de450SThomas Monjalon 	remove_invalid_ports();
328403ce2c53SMatan Azrad 
32850654d4a8SThomas Monjalon 	printf("Device is detached\n");
3286f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
3287edab33b1STetsuya Mukawa 	printf("Done\n");
3288edab33b1STetsuya Mukawa 	return;
32895f4ec54fSChen Jing D(Mark) }
32905f4ec54fSChen Jing D(Mark) 
3291af75078fSIntel void
32920654d4a8SThomas Monjalon detach_port_device(portid_t port_id)
32930654d4a8SThomas Monjalon {
32940a0821bcSPaulis Gributs 	int ret;
32950a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
32960a0821bcSPaulis Gributs 
32970654d4a8SThomas Monjalon 	if (port_id_is_invalid(port_id, ENABLED_WARN))
32980654d4a8SThomas Monjalon 		return;
32990654d4a8SThomas Monjalon 
33000654d4a8SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
33010654d4a8SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
330261a3b0e5SAndrew Rybchenko 			fprintf(stderr, "Port not stopped\n");
33030654d4a8SThomas Monjalon 			return;
33040654d4a8SThomas Monjalon 		}
330561a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Port was not closed\n");
33060654d4a8SThomas Monjalon 	}
33070654d4a8SThomas Monjalon 
33080a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
33090a0821bcSPaulis Gributs 	if (ret != 0) {
33100a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
33110a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
33120a0821bcSPaulis Gributs 			port_id);
33130a0821bcSPaulis Gributs 		return;
33140a0821bcSPaulis Gributs 	}
33150a0821bcSPaulis Gributs 	detach_device(dev_info.device);
33160654d4a8SThomas Monjalon }
33170654d4a8SThomas Monjalon 
33180654d4a8SThomas Monjalon void
33195edee5f6SThomas Monjalon detach_devargs(char *identifier)
332055e51c96SNithin Dabilpuram {
332155e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
332255e51c96SNithin Dabilpuram 	struct rte_devargs da;
332355e51c96SNithin Dabilpuram 	portid_t port_id;
332455e51c96SNithin Dabilpuram 
332555e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
332655e51c96SNithin Dabilpuram 
332755e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
332855e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
332961a3b0e5SAndrew Rybchenko 		fprintf(stderr, "cannot parse identifier\n");
333055e51c96SNithin Dabilpuram 		return;
333155e51c96SNithin Dabilpuram 	}
333255e51c96SNithin Dabilpuram 
333355e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
333455e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
333555e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
333661a3b0e5SAndrew Rybchenko 				fprintf(stderr, "Port %u not stopped\n",
333761a3b0e5SAndrew Rybchenko 					port_id);
3338149677c9SStephen Hemminger 				rte_eth_iterator_cleanup(&iterator);
333964051bb1SXueming Li 				rte_devargs_reset(&da);
334055e51c96SNithin Dabilpuram 				return;
334155e51c96SNithin Dabilpuram 			}
334255e51c96SNithin Dabilpuram 			port_flow_flush(port_id);
334355e51c96SNithin Dabilpuram 		}
334455e51c96SNithin Dabilpuram 	}
334555e51c96SNithin Dabilpuram 
334655e51c96SNithin Dabilpuram 	if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
334755e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
334855e51c96SNithin Dabilpuram 			    da.name, da.bus->name);
334964051bb1SXueming Li 		rte_devargs_reset(&da);
335055e51c96SNithin Dabilpuram 		return;
335155e51c96SNithin Dabilpuram 	}
335255e51c96SNithin Dabilpuram 
335355e51c96SNithin Dabilpuram 	remove_invalid_ports();
335455e51c96SNithin Dabilpuram 
335555e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
335655e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
335755e51c96SNithin Dabilpuram 	printf("Done\n");
335864051bb1SXueming Li 	rte_devargs_reset(&da);
335955e51c96SNithin Dabilpuram }
336055e51c96SNithin Dabilpuram 
336155e51c96SNithin Dabilpuram void
3362af75078fSIntel pmd_test_exit(void)
3363af75078fSIntel {
3364af75078fSIntel 	portid_t pt_id;
336526cbb419SViacheslav Ovsiienko 	unsigned int i;
3366fb73e096SJeff Guo 	int ret;
3367af75078fSIntel 
33688210ec25SPablo de Lara 	if (test_done == 0)
33698210ec25SPablo de Lara 		stop_packet_forwarding();
33708210ec25SPablo de Lara 
3371761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
337226cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
33733a0968c8SShahaf Shuler 		if (mempools[i]) {
33743a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
33753a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
33763a0968c8SShahaf Shuler 						     NULL);
33773a0968c8SShahaf Shuler 		}
33783a0968c8SShahaf Shuler 	}
3379761f7ae1SJie Zhou #endif
3380d3a274ceSZhihong Wang 	if (ports != NULL) {
3381d3a274ceSZhihong Wang 		no_link_check = 1;
33827d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
338308fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
3384af75078fSIntel 			fflush(stdout);
3385d3a274ceSZhihong Wang 			stop_port(pt_id);
338608fd782bSCristian Dumitrescu 		}
338708fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
338808fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
338908fd782bSCristian Dumitrescu 			fflush(stdout);
3390d3a274ceSZhihong Wang 			close_port(pt_id);
3391af75078fSIntel 		}
3392d3a274ceSZhihong Wang 	}
3393fb73e096SJeff Guo 
3394fb73e096SJeff Guo 	if (hot_plug) {
3395fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
33962049c511SJeff Guo 		if (ret) {
3397fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
3398fb73e096SJeff Guo 				"fail to stop device event monitor.");
33992049c511SJeff Guo 			return;
34002049c511SJeff Guo 		}
3401fb73e096SJeff Guo 
34022049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
3403cc1bf307SJeff Guo 			dev_event_callback, NULL);
34042049c511SJeff Guo 		if (ret < 0) {
3405fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
34062049c511SJeff Guo 				"fail to unregister device event callback.\n");
34072049c511SJeff Guo 			return;
34082049c511SJeff Guo 		}
34092049c511SJeff Guo 
34102049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
34112049c511SJeff Guo 		if (ret) {
34122049c511SJeff Guo 			RTE_LOG(ERR, EAL,
34132049c511SJeff Guo 				"fail to disable hotplug handling.\n");
34142049c511SJeff Guo 			return;
34152049c511SJeff Guo 		}
3416fb73e096SJeff Guo 	}
341726cbb419SViacheslav Ovsiienko 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3418401b744dSShahaf Shuler 		if (mempools[i])
3419a550baf2SMin Hu (Connor) 			mempool_free_mp(mempools[i]);
3420401b744dSShahaf Shuler 	}
342163b72657SIvan Ilchenko 	free(xstats_display);
3422fb73e096SJeff Guo 
3423d3a274ceSZhihong Wang 	printf("\nBye...\n");
3424af75078fSIntel }
3425af75078fSIntel 
3426af75078fSIntel typedef void (*cmd_func_t)(void);
3427af75078fSIntel struct pmd_test_command {
3428af75078fSIntel 	const char *cmd_name;
3429af75078fSIntel 	cmd_func_t cmd_func;
3430af75078fSIntel };
3431af75078fSIntel 
3432ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
3433af75078fSIntel static void
3434edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
3435af75078fSIntel {
3436ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
3437ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3438f8244c63SZhiyong Yang 	portid_t portid;
3439f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
3440ce8d5614SIntel 	struct rte_eth_link link;
3441e661a08bSIgor Romanov 	int ret;
3442ba5509a6SIvan Dyukov 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3443ce8d5614SIntel 
3444ce8d5614SIntel 	printf("Checking link statuses...\n");
3445ce8d5614SIntel 	fflush(stdout);
3446ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
3447ce8d5614SIntel 		all_ports_up = 1;
34487d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
3449ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
3450ce8d5614SIntel 				continue;
3451ce8d5614SIntel 			memset(&link, 0, sizeof(link));
3452e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
3453e661a08bSIgor Romanov 			if (ret < 0) {
3454e661a08bSIgor Romanov 				all_ports_up = 0;
3455e661a08bSIgor Romanov 				if (print_flag == 1)
345661a3b0e5SAndrew Rybchenko 					fprintf(stderr,
345761a3b0e5SAndrew Rybchenko 						"Port %u link get failed: %s\n",
3458e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
3459e661a08bSIgor Romanov 				continue;
3460e661a08bSIgor Romanov 			}
3461ce8d5614SIntel 			/* print link status if flag set */
3462ce8d5614SIntel 			if (print_flag == 1) {
3463ba5509a6SIvan Dyukov 				rte_eth_link_to_str(link_status,
3464ba5509a6SIvan Dyukov 					sizeof(link_status), &link);
3465ba5509a6SIvan Dyukov 				printf("Port %d %s\n", portid, link_status);
3466ce8d5614SIntel 				continue;
3467ce8d5614SIntel 			}
3468ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
346909419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
3470ce8d5614SIntel 				all_ports_up = 0;
3471ce8d5614SIntel 				break;
3472ce8d5614SIntel 			}
3473ce8d5614SIntel 		}
3474ce8d5614SIntel 		/* after finally printing all link status, get out */
3475ce8d5614SIntel 		if (print_flag == 1)
3476ce8d5614SIntel 			break;
3477ce8d5614SIntel 
3478ce8d5614SIntel 		if (all_ports_up == 0) {
3479ce8d5614SIntel 			fflush(stdout);
3480ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
3481ce8d5614SIntel 		}
3482ce8d5614SIntel 
3483ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
3484ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3485ce8d5614SIntel 			print_flag = 1;
3486ce8d5614SIntel 		}
34878ea656f8SGaetan Rivet 
34888ea656f8SGaetan Rivet 		if (lsc_interrupt)
34898ea656f8SGaetan Rivet 			break;
3490ce8d5614SIntel 	}
3491af75078fSIntel }
3492af75078fSIntel 
3493284c908cSGaetan Rivet static void
3494cc1bf307SJeff Guo rmv_port_callback(void *arg)
3495284c908cSGaetan Rivet {
34963b97888aSMatan Azrad 	int need_to_start = 0;
34970da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
349828caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
34990a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
35000a0821bcSPaulis Gributs 	int ret;
3501284c908cSGaetan Rivet 
3502284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
3503284c908cSGaetan Rivet 
35043b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
35053b97888aSMatan Azrad 		need_to_start = 1;
35063b97888aSMatan Azrad 		stop_packet_forwarding();
35073b97888aSMatan Azrad 	}
35080da2a62bSMatan Azrad 	no_link_check = 1;
3509284c908cSGaetan Rivet 	stop_port(port_id);
35100da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
35110654d4a8SThomas Monjalon 
35120a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
35130a0821bcSPaulis Gributs 	if (ret != 0)
35140a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
35150a0821bcSPaulis Gributs 			"Failed to get device info for port %d, not detaching\n",
35160a0821bcSPaulis Gributs 			port_id);
3517e1d38504SPaulis Gributs 	else {
3518e1d38504SPaulis Gributs 		struct rte_device *device = dev_info.device;
3519e1d38504SPaulis Gributs 		close_port(port_id);
3520e1d38504SPaulis Gributs 		detach_device(device); /* might be already removed or have more ports */
3521e1d38504SPaulis Gributs 	}
35223b97888aSMatan Azrad 	if (need_to_start)
35233b97888aSMatan Azrad 		start_packet_forwarding(0);
3524284c908cSGaetan Rivet }
3525284c908cSGaetan Rivet 
352676ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
3527d6af1a13SBernard Iremonger static int
3528f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3529d6af1a13SBernard Iremonger 		  void *ret_param)
353076ad4a2dSGaetan Rivet {
353176ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
3532d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
353376ad4a2dSGaetan Rivet 
353476ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
353561a3b0e5SAndrew Rybchenko 		fprintf(stderr,
353661a3b0e5SAndrew Rybchenko 			"\nPort %" PRIu16 ": %s called upon invalid event %d\n",
353776ad4a2dSGaetan Rivet 			port_id, __func__, type);
353876ad4a2dSGaetan Rivet 		fflush(stderr);
35393af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
3540f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
354197b5d8b5SThomas Monjalon 			eth_event_desc[type]);
354276ad4a2dSGaetan Rivet 		fflush(stdout);
354376ad4a2dSGaetan Rivet 	}
3544284c908cSGaetan Rivet 
3545284c908cSGaetan Rivet 	switch (type) {
35464f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
35474f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
35484f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
35494f1ed78eSThomas Monjalon 		break;
3550284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
35514f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
35524f1ed78eSThomas Monjalon 			break;
3553284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
3554cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
355561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
355661a3b0e5SAndrew Rybchenko 				"Could not set up deferred device removal\n");
3557284c908cSGaetan Rivet 		break;
355885c6571cSThomas Monjalon 	case RTE_ETH_EVENT_DESTROY:
355985c6571cSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_CLOSED;
356085c6571cSThomas Monjalon 		printf("Port %u is closed\n", port_id);
356185c6571cSThomas Monjalon 		break;
3562284c908cSGaetan Rivet 	default:
3563284c908cSGaetan Rivet 		break;
3564284c908cSGaetan Rivet 	}
3565d6af1a13SBernard Iremonger 	return 0;
356676ad4a2dSGaetan Rivet }
356776ad4a2dSGaetan Rivet 
356897b5d8b5SThomas Monjalon static int
356997b5d8b5SThomas Monjalon register_eth_event_callback(void)
357097b5d8b5SThomas Monjalon {
357197b5d8b5SThomas Monjalon 	int ret;
357297b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
357397b5d8b5SThomas Monjalon 
357497b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
357597b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
357697b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
357797b5d8b5SThomas Monjalon 				event,
357897b5d8b5SThomas Monjalon 				eth_event_callback,
357997b5d8b5SThomas Monjalon 				NULL);
358097b5d8b5SThomas Monjalon 		if (ret != 0) {
358197b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
358297b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
358397b5d8b5SThomas Monjalon 			return -1;
358497b5d8b5SThomas Monjalon 		}
358597b5d8b5SThomas Monjalon 	}
358697b5d8b5SThomas Monjalon 
358797b5d8b5SThomas Monjalon 	return 0;
358897b5d8b5SThomas Monjalon }
358997b5d8b5SThomas Monjalon 
3590fb73e096SJeff Guo /* This function is used by the interrupt thread */
3591fb73e096SJeff Guo static void
3592cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3593fb73e096SJeff Guo 			     __rte_unused void *arg)
3594fb73e096SJeff Guo {
35952049c511SJeff Guo 	uint16_t port_id;
35962049c511SJeff Guo 	int ret;
35972049c511SJeff Guo 
3598fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
3599fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
3600fb73e096SJeff Guo 			__func__, type);
3601fb73e096SJeff Guo 		fflush(stderr);
3602fb73e096SJeff Guo 	}
3603fb73e096SJeff Guo 
3604fb73e096SJeff Guo 	switch (type) {
3605fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
3606cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3607fb73e096SJeff Guo 			device_name);
36082049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
36092049c511SJeff Guo 		if (ret) {
36102049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
36112049c511SJeff Guo 				device_name);
36122049c511SJeff Guo 			return;
36132049c511SJeff Guo 		}
3614cc1bf307SJeff Guo 		/*
3615cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
3616cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
3617cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
3618cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
3619cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
3620cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
3621cc1bf307SJeff Guo 		 * be deleted.
3622cc1bf307SJeff Guo 		 */
3623cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
3624cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
3625cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
3626cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
3627fb73e096SJeff Guo 		break;
3628fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
3629fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3630fb73e096SJeff Guo 			device_name);
3631fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
3632fb73e096SJeff Guo 		 * begin to attach port.
3633fb73e096SJeff Guo 		 */
3634fb73e096SJeff Guo 		break;
3635fb73e096SJeff Guo 	default:
3636fb73e096SJeff Guo 		break;
3637fb73e096SJeff Guo 	}
3638fb73e096SJeff Guo }
3639fb73e096SJeff Guo 
3640f2c5125aSPablo de Lara static void
3641f4d178c1SXueming Li rxtx_port_config(portid_t pid)
3642f2c5125aSPablo de Lara {
3643d44f8a48SQi Zhang 	uint16_t qid;
36445e91aeefSWei Zhao 	uint64_t offloads;
3645f4d178c1SXueming Li 	struct rte_port *port = &ports[pid];
3646f2c5125aSPablo de Lara 
3647d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
36485e91aeefSWei Zhao 		offloads = port->rx_conf[qid].offloads;
3649d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
3650f4d178c1SXueming Li 
3651f4d178c1SXueming Li 		if (rxq_share > 0 &&
3652f4d178c1SXueming Li 		    (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
3653f4d178c1SXueming Li 			/* Non-zero share group to enable RxQ share. */
3654f4d178c1SXueming Li 			port->rx_conf[qid].share_group = pid / rxq_share + 1;
3655f4d178c1SXueming Li 			port->rx_conf[qid].share_qid = qid; /* Equal mapping. */
3656f4d178c1SXueming Li 		}
3657f4d178c1SXueming Li 
3658575e0fd1SWei Zhao 		if (offloads != 0)
3659575e0fd1SWei Zhao 			port->rx_conf[qid].offloads = offloads;
3660d44f8a48SQi Zhang 
3661d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
3662f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3663d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3664f2c5125aSPablo de Lara 
3665f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3666d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3667f2c5125aSPablo de Lara 
3668f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3669d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3670f2c5125aSPablo de Lara 
3671f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3672d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3673f2c5125aSPablo de Lara 
3674f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3675d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
3676f2c5125aSPablo de Lara 
3677d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
3678d44f8a48SQi Zhang 	}
3679d44f8a48SQi Zhang 
3680d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
36815e91aeefSWei Zhao 		offloads = port->tx_conf[qid].offloads;
3682d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
3683575e0fd1SWei Zhao 		if (offloads != 0)
3684575e0fd1SWei Zhao 			port->tx_conf[qid].offloads = offloads;
3685d44f8a48SQi Zhang 
3686d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
3687f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3688d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3689f2c5125aSPablo de Lara 
3690f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3691d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3692f2c5125aSPablo de Lara 
3693f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3694d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3695f2c5125aSPablo de Lara 
3696f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3697d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3698f2c5125aSPablo de Lara 
3699f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3700d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3701d44f8a48SQi Zhang 
3702d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
3703d44f8a48SQi Zhang 	}
3704f2c5125aSPablo de Lara }
3705f2c5125aSPablo de Lara 
37060c4abd36SSteve Yang /*
3707b563c142SFerruh Yigit  * Helper function to set MTU from frame size
37080c4abd36SSteve Yang  *
37090c4abd36SSteve Yang  * port->dev_info should be set before calling this function.
37100c4abd36SSteve Yang  *
37110c4abd36SSteve Yang  * return 0 on success, negative on error
37120c4abd36SSteve Yang  */
37130c4abd36SSteve Yang int
3714b563c142SFerruh Yigit update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
37150c4abd36SSteve Yang {
37160c4abd36SSteve Yang 	struct rte_port *port = &ports[portid];
37170c4abd36SSteve Yang 	uint32_t eth_overhead;
37181bb4a528SFerruh Yigit 	uint16_t mtu, new_mtu;
37190c4abd36SSteve Yang 
37201bb4a528SFerruh Yigit 	eth_overhead = get_eth_overhead(&port->dev_info);
37211bb4a528SFerruh Yigit 
37221bb4a528SFerruh Yigit 	if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
37231bb4a528SFerruh Yigit 		printf("Failed to get MTU for port %u\n", portid);
37241bb4a528SFerruh Yigit 		return -1;
37251bb4a528SFerruh Yigit 	}
37261bb4a528SFerruh Yigit 
37271bb4a528SFerruh Yigit 	new_mtu = max_rx_pktlen - eth_overhead;
37280c4abd36SSteve Yang 
37291bb4a528SFerruh Yigit 	if (mtu == new_mtu)
37301bb4a528SFerruh Yigit 		return 0;
37311bb4a528SFerruh Yigit 
37321bb4a528SFerruh Yigit 	if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
373361a3b0e5SAndrew Rybchenko 		fprintf(stderr,
373461a3b0e5SAndrew Rybchenko 			"Failed to set MTU to %u for port %u\n",
37351bb4a528SFerruh Yigit 			new_mtu, portid);
37361bb4a528SFerruh Yigit 		return -1;
37370c4abd36SSteve Yang 	}
37380c4abd36SSteve Yang 
37391bb4a528SFerruh Yigit 	port->dev_conf.rxmode.mtu = new_mtu;
37401bb4a528SFerruh Yigit 
37410c4abd36SSteve Yang 	return 0;
37420c4abd36SSteve Yang }
37430c4abd36SSteve Yang 
3744013af9b6SIntel void
3745013af9b6SIntel init_port_config(void)
3746013af9b6SIntel {
3747013af9b6SIntel 	portid_t pid;
3748013af9b6SIntel 	struct rte_port *port;
3749655eae01SJie Wang 	int ret, i;
3750013af9b6SIntel 
37517d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
3752013af9b6SIntel 		port = &ports[pid];
3753013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
37546f51deb9SIvan Ilchenko 
37556f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
37566f51deb9SIvan Ilchenko 		if (ret != 0)
37576f51deb9SIvan Ilchenko 			return;
37586f51deb9SIvan Ilchenko 
37593ce690d3SBruce Richardson 		if (nb_rxq > 1) {
3760013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
376190892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3762422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
3763af75078fSIntel 		} else {
3764013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3765013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3766af75078fSIntel 		}
37673ce690d3SBruce Richardson 
37685f592039SJingjing Wu 		if (port->dcb_flag == 0) {
3769655eae01SJie Wang 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3770f9295aa2SXiaoyu Min 				port->dev_conf.rxmode.mq_mode =
3771f9295aa2SXiaoyu Min 					(enum rte_eth_rx_mq_mode)
3772f9295aa2SXiaoyu Min 						(rx_mq_mode & ETH_MQ_RX_RSS);
3773655eae01SJie Wang 			} else {
37743ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3775655eae01SJie Wang 				port->dev_conf.rxmode.offloads &=
3776655eae01SJie Wang 						~DEV_RX_OFFLOAD_RSS_HASH;
3777655eae01SJie Wang 
3778655eae01SJie Wang 				for (i = 0;
3779655eae01SJie Wang 				     i < port->dev_info.nb_rx_queues;
3780655eae01SJie Wang 				     i++)
3781655eae01SJie Wang 					port->rx_conf[i].offloads &=
3782655eae01SJie Wang 						~DEV_RX_OFFLOAD_RSS_HASH;
3783655eae01SJie Wang 			}
37843ce690d3SBruce Richardson 		}
37853ce690d3SBruce Richardson 
3786f4d178c1SXueming Li 		rxtx_port_config(pid);
3787013af9b6SIntel 
3788a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3789a5279d25SIgor Romanov 		if (ret != 0)
3790a5279d25SIgor Romanov 			return;
3791013af9b6SIntel 
3792a8d0d473SBruce Richardson #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3793e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
37947b7e5ba7SIntel #endif
37958ea656f8SGaetan Rivet 
37960a0821bcSPaulis Gributs 		if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
37978ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
37980a0821bcSPaulis Gributs 		if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3799284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
3800013af9b6SIntel 	}
3801013af9b6SIntel }
3802013af9b6SIntel 
380341b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
380441b05095SBernard Iremonger {
380541b05095SBernard Iremonger 	struct rte_port *port;
380641b05095SBernard Iremonger 
380741b05095SBernard Iremonger 	port = &ports[slave_pid];
380841b05095SBernard Iremonger 	port->slave_flag = 1;
380941b05095SBernard Iremonger }
381041b05095SBernard Iremonger 
381141b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
381241b05095SBernard Iremonger {
381341b05095SBernard Iremonger 	struct rte_port *port;
381441b05095SBernard Iremonger 
381541b05095SBernard Iremonger 	port = &ports[slave_pid];
381641b05095SBernard Iremonger 	port->slave_flag = 0;
381741b05095SBernard Iremonger }
381841b05095SBernard Iremonger 
38190e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
38200e545d30SBernard Iremonger {
38210e545d30SBernard Iremonger 	struct rte_port *port;
38220a0821bcSPaulis Gributs 	struct rte_eth_dev_info dev_info;
38230a0821bcSPaulis Gributs 	int ret;
38240e545d30SBernard Iremonger 
38250e545d30SBernard Iremonger 	port = &ports[slave_pid];
38260a0821bcSPaulis Gributs 	ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
38270a0821bcSPaulis Gributs 	if (ret != 0) {
38280a0821bcSPaulis Gributs 		TESTPMD_LOG(ERR,
38290a0821bcSPaulis Gributs 			"Failed to get device info for port id %d,"
38300a0821bcSPaulis Gributs 			"cannot determine if the port is a bonded slave",
38310a0821bcSPaulis Gributs 			slave_pid);
38320a0821bcSPaulis Gributs 		return 0;
38330a0821bcSPaulis Gributs 	}
38340a0821bcSPaulis Gributs 	if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3835b8b8b344SMatan Azrad 		return 1;
3836b8b8b344SMatan Azrad 	return 0;
38370e545d30SBernard Iremonger }
38380e545d30SBernard Iremonger 
3839013af9b6SIntel const uint16_t vlan_tags[] = {
3840013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
3841013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
3842013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
3843013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
3844013af9b6SIntel };
3845013af9b6SIntel 
3846013af9b6SIntel static  int
3847ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
38481a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
38491a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
38501a572499SJingjing Wu 		 uint8_t pfc_en)
3851013af9b6SIntel {
3852013af9b6SIntel 	uint8_t i;
3853ac7c491cSKonstantin Ananyev 	int32_t rc;
3854ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
3855af75078fSIntel 
3856af75078fSIntel 	/*
3857013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3858013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
3859af75078fSIntel 	 */
38601a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
38611a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
38621a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
38631a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
38641a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3865013af9b6SIntel 
3866547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
38671a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
38681a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
38691a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
38701a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
38711a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
38721a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3873013af9b6SIntel 
38741a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
38751a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
38761a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
38771a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
38781a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
3879af75078fSIntel 		}
3880013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3881f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3882f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3883013af9b6SIntel 		}
3884013af9b6SIntel 
3885013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
3886f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
3887f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
3888f9295aa2SXiaoyu Min 					(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
388932e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
38901a572499SJingjing Wu 	} else {
38911a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
38921a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
38931a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
38941a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
3895013af9b6SIntel 
38965139bc12STing Xu 		memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
38975139bc12STing Xu 
3898ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3899ac7c491cSKonstantin Ananyev 		if (rc != 0)
3900ac7c491cSKonstantin Ananyev 			return rc;
3901ac7c491cSKonstantin Ananyev 
39021a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
39031a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
39041a572499SJingjing Wu 
3905bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3906bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
3907bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
3908013af9b6SIntel 		}
3909ac7c491cSKonstantin Ananyev 
3910f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
3911f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
3912f9295aa2SXiaoyu Min 					(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3913ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
391432e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
39151a572499SJingjing Wu 	}
39161a572499SJingjing Wu 
39171a572499SJingjing Wu 	if (pfc_en)
39181a572499SJingjing Wu 		eth_conf->dcb_capability_en =
39191a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3920013af9b6SIntel 	else
3921013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3922013af9b6SIntel 
3923013af9b6SIntel 	return 0;
3924013af9b6SIntel }
3925013af9b6SIntel 
3926013af9b6SIntel int
39271a572499SJingjing Wu init_port_dcb_config(portid_t pid,
39281a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
39291a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
39301a572499SJingjing Wu 		     uint8_t pfc_en)
3931013af9b6SIntel {
3932013af9b6SIntel 	struct rte_eth_conf port_conf;
3933013af9b6SIntel 	struct rte_port *rte_port;
3934013af9b6SIntel 	int retval;
3935013af9b6SIntel 	uint16_t i;
3936013af9b6SIntel 
3937a550baf2SMin Hu (Connor) 	if (num_procs > 1) {
3938a550baf2SMin Hu (Connor) 		printf("The multi-process feature doesn't support dcb.\n");
3939a550baf2SMin Hu (Connor) 		return -ENOTSUP;
3940a550baf2SMin Hu (Connor) 	}
39412a977b89SWenzhuo Lu 	rte_port = &ports[pid];
3942013af9b6SIntel 
3943c1ba6c32SHuisong Li 	/* retain the original device configuration. */
3944c1ba6c32SHuisong Li 	memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
3945d5354e89SYanglong Wu 
3946013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
3947ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3948013af9b6SIntel 	if (retval < 0)
3949013af9b6SIntel 		return retval;
39500074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3951013af9b6SIntel 
39522f203d44SQi Zhang 	/* re-configure the device . */
39532b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
39542b0e0ebaSChenbo Xia 	if (retval < 0)
39552b0e0ebaSChenbo Xia 		return retval;
39566f51deb9SIvan Ilchenko 
39576f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
39586f51deb9SIvan Ilchenko 	if (retval != 0)
39596f51deb9SIvan Ilchenko 		return retval;
39602a977b89SWenzhuo Lu 
39612a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
39622a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
39632a977b89SWenzhuo Lu 	 */
39642a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
39652a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
396661a3b0e5SAndrew Rybchenko 		fprintf(stderr,
396761a3b0e5SAndrew Rybchenko 			"VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
396861a3b0e5SAndrew Rybchenko 			pid);
39692a977b89SWenzhuo Lu 		return -1;
39702a977b89SWenzhuo Lu 	}
39712a977b89SWenzhuo Lu 
39722a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
39732a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
39742a977b89SWenzhuo Lu 	 */
39752a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
397686ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
397786ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
397886ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
397986ef65eeSBernard Iremonger 		} else {
39802a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
39812a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
398286ef65eeSBernard Iremonger 		}
39832a977b89SWenzhuo Lu 	} else {
39842a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
39852a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
39862a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
39872a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
39882a977b89SWenzhuo Lu 		} else {
39892a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
39902a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
39912a977b89SWenzhuo Lu 
39922a977b89SWenzhuo Lu 		}
39932a977b89SWenzhuo Lu 	}
39942a977b89SWenzhuo Lu 	rx_free_thresh = 64;
39952a977b89SWenzhuo Lu 
3996013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3997013af9b6SIntel 
3998f4d178c1SXueming Li 	rxtx_port_config(pid);
3999013af9b6SIntel 	/* VLAN filter */
40000074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
40011a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
4002013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
4003013af9b6SIntel 
4004a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4005a5279d25SIgor Romanov 	if (retval != 0)
4006a5279d25SIgor Romanov 		return retval;
4007a5279d25SIgor Romanov 
40087741e4cfSIntel 	rte_port->dcb_flag = 1;
40097741e4cfSIntel 
4010a690a070SHuisong Li 	/* Enter DCB configuration status */
4011a690a070SHuisong Li 	dcb_config = 1;
4012a690a070SHuisong Li 
4013013af9b6SIntel 	return 0;
4014af75078fSIntel }
4015af75078fSIntel 
4016ffc468ffSTetsuya Mukawa static void
4017ffc468ffSTetsuya Mukawa init_port(void)
4018ffc468ffSTetsuya Mukawa {
40191b9f2746SGregory Etelson 	int i;
40201b9f2746SGregory Etelson 
4021ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
4022ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
4023ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4024ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
4025ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
4026ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
4027ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
4028ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
4029ffc468ffSTetsuya Mukawa 	}
40301b9f2746SGregory Etelson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
403163b72657SIvan Ilchenko 		ports[i].xstats_info.allocated = false;
403263b72657SIvan Ilchenko 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
40331b9f2746SGregory Etelson 		LIST_INIT(&ports[i].flow_tunnel_list);
403429841336SPhil Yang 	/* Initialize ports NUMA structures */
403529841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
403629841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
403729841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4038ffc468ffSTetsuya Mukawa }
4039ffc468ffSTetsuya Mukawa 
4040d3a274ceSZhihong Wang static void
4041d3a274ceSZhihong Wang force_quit(void)
4042d3a274ceSZhihong Wang {
4043d3a274ceSZhihong Wang 	pmd_test_exit();
4044d3a274ceSZhihong Wang 	prompt_exit();
4045d3a274ceSZhihong Wang }
4046d3a274ceSZhihong Wang 
4047d3a274ceSZhihong Wang static void
4048cfea1f30SPablo de Lara print_stats(void)
4049cfea1f30SPablo de Lara {
4050cfea1f30SPablo de Lara 	uint8_t i;
4051cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
4052cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4053cfea1f30SPablo de Lara 
4054cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
4055cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
4056cfea1f30SPablo de Lara 
4057cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
4058cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4059cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
4060683d1e82SIgor Romanov 
4061683d1e82SIgor Romanov 	fflush(stdout);
4062cfea1f30SPablo de Lara }
4063cfea1f30SPablo de Lara 
4064cfea1f30SPablo de Lara static void
4065d3a274ceSZhihong Wang signal_handler(int signum)
4066d3a274ceSZhihong Wang {
4067d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
406861a3b0e5SAndrew Rybchenko 		fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4069d3a274ceSZhihong Wang 			signum);
4070a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
4071102b7329SReshma Pattan 		/* uninitialize packet capture framework */
4072102b7329SReshma Pattan 		rte_pdump_uninit();
4073102b7329SReshma Pattan #endif
4074a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
40758b36297dSAmit Gupta 		if (latencystats_enabled != 0)
407662d3216dSReshma Pattan 			rte_latencystats_uninit();
407762d3216dSReshma Pattan #endif
4078d3a274ceSZhihong Wang 		force_quit();
4079d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
4080d9a191a0SPhil Yang 		f_quit = 1;
4081d3a274ceSZhihong Wang 		/* exit with the expected status */
4082761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
4083d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
4084d3a274ceSZhihong Wang 		kill(getpid(), signum);
4085761f7ae1SJie Zhou #endif
4086d3a274ceSZhihong Wang 	}
4087d3a274ceSZhihong Wang }
4088d3a274ceSZhihong Wang 
4089af75078fSIntel int
4090af75078fSIntel main(int argc, char** argv)
4091af75078fSIntel {
4092af75078fSIntel 	int diag;
4093f8244c63SZhiyong Yang 	portid_t port_id;
40944918a357SXiaoyun Li 	uint16_t count;
4095fb73e096SJeff Guo 	int ret;
4096af75078fSIntel 
4097d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
4098d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
4099d3a274ceSZhihong Wang 
4100285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
4101285fd101SOlivier Matz 	if (testpmd_logtype < 0)
410216267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
4103285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4104285fd101SOlivier Matz 
41059201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
41069201806eSStephen Hemminger 	if (diag < 0)
410716267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
410816267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
41099201806eSStephen Hemminger 
411097b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
411197b5d8b5SThomas Monjalon 	if (ret != 0)
411216267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
411397b5d8b5SThomas Monjalon 
4114a8d0d473SBruce Richardson #ifdef RTE_LIB_PDUMP
41154aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
4116e9436f54STiwei Bie 	rte_pdump_init();
41174aa0d012SAnatoly Burakov #endif
41184aa0d012SAnatoly Burakov 
41194918a357SXiaoyun Li 	count = 0;
41204918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
41214918a357SXiaoyun Li 		ports_ids[count] = port_id;
41224918a357SXiaoyun Li 		count++;
41234918a357SXiaoyun Li 	}
41244918a357SXiaoyun Li 	nb_ports = (portid_t) count;
41254aa0d012SAnatoly Burakov 	if (nb_ports == 0)
41264aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
41274aa0d012SAnatoly Burakov 
41284aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
41294aa0d012SAnatoly Burakov 	init_port();
41304aa0d012SAnatoly Burakov 
41314aa0d012SAnatoly Burakov 	set_def_fwd_config();
41324aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
413316267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
413416267ceeSStephen Hemminger 			 "Check the core mask argument\n");
41354aa0d012SAnatoly Burakov 
4136e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
4137a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4138e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
4139e505d84cSAnatoly Burakov #endif
4140a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
4141e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
4142e505d84cSAnatoly Burakov #endif
4143e505d84cSAnatoly Burakov 
4144fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
41455fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
4146fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
4147fb7b8b32SAnatoly Burakov #else
4148fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
4149fb7b8b32SAnatoly Burakov #endif
4150fb7b8b32SAnatoly Burakov 
4151e505d84cSAnatoly Burakov 	argc -= diag;
4152e505d84cSAnatoly Burakov 	argv += diag;
4153e505d84cSAnatoly Burakov 	if (argc > 1)
4154e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
4155e505d84cSAnatoly Burakov 
4156761f7ae1SJie Zhou #ifndef RTE_EXEC_ENV_WINDOWS
4157e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4158285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
41591c036b16SEelco Chaudron 			strerror(errno));
41601c036b16SEelco Chaudron 	}
4161761f7ae1SJie Zhou #endif
41621c036b16SEelco Chaudron 
416399cabef0SPablo de Lara 	if (tx_first && interactive)
416499cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
416599cabef0SPablo de Lara 				"interactive mode.\n");
41668820cba4SDavid Hunt 
41678820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
416861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
416961a3b0e5SAndrew Rybchenko 			"Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
41708820cba4SDavid Hunt 		lsc_interrupt = 0;
41718820cba4SDavid Hunt 	}
41728820cba4SDavid Hunt 
41735a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
417461a3b0e5SAndrew Rybchenko 		fprintf(stderr,
417561a3b0e5SAndrew Rybchenko 			"Warning: Either rx or tx queues should be non-zero\n");
41765a8fb55cSReshma Pattan 
41775a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
417861a3b0e5SAndrew Rybchenko 		fprintf(stderr,
417961a3b0e5SAndrew Rybchenko 			"Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4180af75078fSIntel 			nb_rxq, nb_txq);
4181af75078fSIntel 
4182af75078fSIntel 	init_config();
4183fb73e096SJeff Guo 
4184fb73e096SJeff Guo 	if (hot_plug) {
41852049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
4186fb73e096SJeff Guo 		if (ret) {
41872049c511SJeff Guo 			RTE_LOG(ERR, EAL,
41882049c511SJeff Guo 				"fail to enable hotplug handling.");
4189fb73e096SJeff Guo 			return -1;
4190fb73e096SJeff Guo 		}
4191fb73e096SJeff Guo 
41922049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
41932049c511SJeff Guo 		if (ret) {
41942049c511SJeff Guo 			RTE_LOG(ERR, EAL,
41952049c511SJeff Guo 				"fail to start device event monitoring.");
41962049c511SJeff Guo 			return -1;
41972049c511SJeff Guo 		}
41982049c511SJeff Guo 
41992049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
4200cc1bf307SJeff Guo 			dev_event_callback, NULL);
42012049c511SJeff Guo 		if (ret) {
42022049c511SJeff Guo 			RTE_LOG(ERR, EAL,
42032049c511SJeff Guo 				"fail  to register device event callback\n");
42042049c511SJeff Guo 			return -1;
42052049c511SJeff Guo 		}
4206fb73e096SJeff Guo 	}
4207fb73e096SJeff Guo 
42086937d210SStephen Hemminger 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4209148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
4210af75078fSIntel 
4211ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
421234fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
421334fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
421434fc1051SIvan Ilchenko 		if (ret != 0)
421561a3b0e5SAndrew Rybchenko 			fprintf(stderr,
421661a3b0e5SAndrew Rybchenko 				"Error during enabling promiscuous mode for port %u: %s - ignore\n",
421734fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
421834fc1051SIvan Ilchenko 	}
4219af75078fSIntel 
42207e4441c8SRemy Horton 	/* Init metrics library */
42217e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
42227e4441c8SRemy Horton 
4223a8d0d473SBruce Richardson #ifdef RTE_LIB_LATENCYSTATS
422462d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
422562d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
422662d3216dSReshma Pattan 		if (ret)
422761a3b0e5SAndrew Rybchenko 			fprintf(stderr,
422861a3b0e5SAndrew Rybchenko 				"Warning: latencystats init() returned error %d\n",
422961a3b0e5SAndrew Rybchenko 				ret);
423061a3b0e5SAndrew Rybchenko 		fprintf(stderr, "Latencystats running on lcore %d\n",
423162d3216dSReshma Pattan 			latencystats_lcore_id);
423262d3216dSReshma Pattan 	}
423362d3216dSReshma Pattan #endif
423462d3216dSReshma Pattan 
42357e4441c8SRemy Horton 	/* Setup bitrate stats */
4236a8d0d473SBruce Richardson #ifdef RTE_LIB_BITRATESTATS
4237e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
42387e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
42397e4441c8SRemy Horton 		if (bitrate_data == NULL)
4240e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
4241e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
42427e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
4243e25e6c70SRemy Horton 	}
42447e4441c8SRemy Horton #endif
4245a8d0d473SBruce Richardson #ifdef RTE_LIB_CMDLINE
424681ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
424781ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
424881ef862bSAllain Legacy 
4249ca7feb22SCyril Chemparathy 	if (interactive == 1) {
4250ca7feb22SCyril Chemparathy 		if (auto_start) {
4251ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
4252ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
4253ca7feb22SCyril Chemparathy 		}
4254af75078fSIntel 		prompt();
42550de738cfSJiayu Hu 		pmd_test_exit();
4256ca7feb22SCyril Chemparathy 	} else
42570d56cb81SThomas Monjalon #endif
42580d56cb81SThomas Monjalon 	{
4259af75078fSIntel 		char c;
4260af75078fSIntel 		int rc;
4261af75078fSIntel 
4262d9a191a0SPhil Yang 		f_quit = 0;
4263d9a191a0SPhil Yang 
4264af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
426599cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
4266cfea1f30SPablo de Lara 		if (stats_period != 0) {
4267cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
4268cfea1f30SPablo de Lara 			uint64_t timer_period;
4269cfea1f30SPablo de Lara 
4270cfea1f30SPablo de Lara 			/* Convert to number of cycles */
4271cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
4272cfea1f30SPablo de Lara 
4273d9a191a0SPhil Yang 			while (f_quit == 0) {
4274cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
4275cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
4276cfea1f30SPablo de Lara 
4277cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
4278cfea1f30SPablo de Lara 					print_stats();
4279cfea1f30SPablo de Lara 					/* Reset the timer */
4280cfea1f30SPablo de Lara 					diff_time = 0;
4281cfea1f30SPablo de Lara 				}
4282cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
4283cfea1f30SPablo de Lara 				prev_time = cur_time;
4284761f7ae1SJie Zhou 				rte_delay_us_sleep(US_PER_S);
4285cfea1f30SPablo de Lara 			}
4286cfea1f30SPablo de Lara 		}
4287cfea1f30SPablo de Lara 
4288af75078fSIntel 		printf("Press enter to exit\n");
4289af75078fSIntel 		rc = read(0, &c, 1);
4290d3a274ceSZhihong Wang 		pmd_test_exit();
4291af75078fSIntel 		if (rc < 0)
4292af75078fSIntel 			return 1;
4293af75078fSIntel 	}
4294af75078fSIntel 
42955e516c89SStephen Hemminger 	ret = rte_eal_cleanup();
42965e516c89SStephen Hemminger 	if (ret != 0)
42975e516c89SStephen Hemminger 		rte_exit(EXIT_FAILURE,
42985e516c89SStephen Hemminger 			 "EAL cleanup failed: %s\n", strerror(-ret));
42995e516c89SStephen Hemminger 
43005e516c89SStephen Hemminger 	return EXIT_SUCCESS;
4301af75078fSIntel }
4302