xref: /dpdk/app/test-pmd/testpmd.c (revision 72512e1897b2ba9a36227b5ff919c5450ed5dc8c)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30af75078fSIntel #include <rte_memory.h>
31af75078fSIntel #include <rte_memcpy.h>
32af75078fSIntel #include <rte_launch.h>
33af75078fSIntel #include <rte_eal.h>
34284c908cSGaetan Rivet #include <rte_alarm.h>
35af75078fSIntel #include <rte_per_lcore.h>
36af75078fSIntel #include <rte_lcore.h>
37af75078fSIntel #include <rte_atomic.h>
38af75078fSIntel #include <rte_branch_prediction.h>
39af75078fSIntel #include <rte_mempool.h>
40af75078fSIntel #include <rte_malloc.h>
41af75078fSIntel #include <rte_mbuf.h>
420e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
43af75078fSIntel #include <rte_interrupts.h>
44af75078fSIntel #include <rte_pci.h>
45af75078fSIntel #include <rte_ether.h>
46af75078fSIntel #include <rte_ethdev.h>
47edab33b1STetsuya Mukawa #include <rte_dev.h>
48af75078fSIntel #include <rte_string_fns.h>
49e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
50e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
51e261265eSRadu Nicolau #endif
52102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
53102b7329SReshma Pattan #include <rte_pdump.h>
54102b7329SReshma Pattan #endif
55938a184aSAdrien Mazarguil #include <rte_flow.h>
567e4441c8SRemy Horton #include <rte_metrics.h>
577e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
587e4441c8SRemy Horton #include <rte_bitrate.h>
597e4441c8SRemy Horton #endif
6062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6162d3216dSReshma Pattan #include <rte_latencystats.h>
6262d3216dSReshma Pattan #endif
63af75078fSIntel 
64af75078fSIntel #include "testpmd.h"
65af75078fSIntel 
66c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
67c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
69c7f5dba7SAnatoly Burakov #else
70c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
71c7f5dba7SAnatoly Burakov #endif
72c7f5dba7SAnatoly Burakov 
73c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
74c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
75c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
76c7f5dba7SAnatoly Burakov #else
77c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
78c7f5dba7SAnatoly Burakov #endif
79c7f5dba7SAnatoly Burakov 
80c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
81*72512e18SViacheslav Ovsiienko #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
82c7f5dba7SAnatoly Burakov 
83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
85af75078fSIntel 
86af75078fSIntel /* use master core for command line ? */
87af75078fSIntel uint8_t interactive = 0;
88ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8999cabef0SPablo de Lara uint8_t tx_first;
9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
91af75078fSIntel 
92af75078fSIntel /*
93af75078fSIntel  * NUMA support configuration.
94af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
95af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
97af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
98af75078fSIntel  */
99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
100af75078fSIntel 
101af75078fSIntel /*
102b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103b6ea6408SIntel  * not configured.
104b6ea6408SIntel  */
105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
106b6ea6408SIntel 
107b6ea6408SIntel /*
108c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
109c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
110c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
111c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
112c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
113148f963fSBruce Richardson  */
114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115148f963fSBruce Richardson 
116148f963fSBruce Richardson /*
11763531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11863531389SGeorgios Katsikas  * is allocated.
11963531389SGeorgios Katsikas  */
12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12163531389SGeorgios Katsikas 
12263531389SGeorgios Katsikas /*
12363531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12463531389SGeorgios Katsikas  * is allocated.
12563531389SGeorgios Katsikas  */
12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12763531389SGeorgios Katsikas 
12863531389SGeorgios Katsikas /*
12963531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13063531389SGeorgios Katsikas  * is allocated.
13163531389SGeorgios Katsikas  */
13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13363531389SGeorgios Katsikas 
13463531389SGeorgios Katsikas /*
135af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
136af75078fSIntel  * forwarded.
137547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
138af75078fSIntel  * ports.
139af75078fSIntel  */
1406d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141af75078fSIntel portid_t nb_peer_eth_addrs = 0;
142af75078fSIntel 
143af75078fSIntel /*
144af75078fSIntel  * Probed Target Environment.
145af75078fSIntel  */
146af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
147af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150af75078fSIntel 
1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1524918a357SXiaoyun Li 
153af75078fSIntel /*
154af75078fSIntel  * Test Forwarding Configuration.
155af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157af75078fSIntel  */
158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162af75078fSIntel 
163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165af75078fSIntel 
166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168af75078fSIntel 
169af75078fSIntel /*
170af75078fSIntel  * Forwarding engines.
171af75078fSIntel  */
172af75078fSIntel struct fwd_engine * fwd_engines[] = {
173af75078fSIntel 	&io_fwd_engine,
174af75078fSIntel 	&mac_fwd_engine,
175d47388f1SCyril Chemparathy 	&mac_swap_engine,
176e9e23a61SCyril Chemparathy 	&flow_gen_engine,
177af75078fSIntel 	&rx_only_engine,
178af75078fSIntel 	&tx_only_engine,
179af75078fSIntel 	&csum_fwd_engine,
180168dfa61SIvan Boule 	&icmp_echo_engine,
1813c156061SJens Freimann 	&noisy_vnf_engine,
1820ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
1830ad778b3SJasvinder Singh 	&softnic_fwd_engine,
1845b590fbeSJasvinder Singh #endif
185af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
186af75078fSIntel 	&ieee1588_fwd_engine,
187af75078fSIntel #endif
188af75078fSIntel 	NULL,
189af75078fSIntel };
190af75078fSIntel 
191401b744dSShahaf Shuler struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
19259fcf854SShahaf Shuler uint16_t mempool_flags;
193401b744dSShahaf Shuler 
194af75078fSIntel struct fwd_config cur_fwd_config;
195af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
196bf56fce1SZhihong Wang uint32_t retry_enabled;
197bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
198bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
199af75078fSIntel 
200af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
201c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
202c8798818SIntel                                       * specified on command-line. */
203cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
204d9a191a0SPhil Yang 
205d9a191a0SPhil Yang /*
206d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
207d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
208d9a191a0SPhil Yang  */
209d9a191a0SPhil Yang uint8_t f_quit;
210d9a191a0SPhil Yang 
211af75078fSIntel /*
212af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
213af75078fSIntel  */
214af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
215af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
216af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
217af75078fSIntel };
218af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
219af75078fSIntel 
22079bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
22179bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
22279bec05bSKonstantin Ananyev 
22382010ef5SYongseok Koh uint8_t txonly_multi_flow;
22482010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
22582010ef5SYongseok Koh 
226af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
227e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
228af75078fSIntel 
229900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
230900550deSIntel uint8_t dcb_config = 0;
231900550deSIntel 
232900550deSIntel /* Whether the dcb is in testing status */
233900550deSIntel uint8_t dcb_test = 0;
234900550deSIntel 
235af75078fSIntel /*
236af75078fSIntel  * Configurable number of RX/TX queues.
237af75078fSIntel  */
2381c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
239af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
240af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
241af75078fSIntel 
242af75078fSIntel /*
243af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2448599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
245af75078fSIntel  */
2468599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2478599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
248af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
249af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
250af75078fSIntel 
251f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
252af75078fSIntel /*
253af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
254af75078fSIntel  */
255af75078fSIntel 
256f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
257f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
258f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
259af75078fSIntel 
260f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
261f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
262f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
263af75078fSIntel 
264af75078fSIntel /*
265af75078fSIntel  * Configurable value of RX free threshold.
266af75078fSIntel  */
267f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
268af75078fSIntel 
269af75078fSIntel /*
270ce8d5614SIntel  * Configurable value of RX drop enable.
271ce8d5614SIntel  */
272f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
273ce8d5614SIntel 
274ce8d5614SIntel /*
275af75078fSIntel  * Configurable value of TX free threshold.
276af75078fSIntel  */
277f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
278af75078fSIntel 
279af75078fSIntel /*
280af75078fSIntel  * Configurable value of TX RS bit threshold.
281af75078fSIntel  */
282f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
283af75078fSIntel 
284af75078fSIntel /*
2853c156061SJens Freimann  * Configurable value of buffered packets before sending.
2863c156061SJens Freimann  */
2873c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
2883c156061SJens Freimann 
2893c156061SJens Freimann /*
2903c156061SJens Freimann  * Configurable value of packet buffer timeout.
2913c156061SJens Freimann  */
2923c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
2933c156061SJens Freimann 
2943c156061SJens Freimann /*
2953c156061SJens Freimann  * Configurable value for size of VNF internal memory area
2963c156061SJens Freimann  * used for simulating noisy neighbour behaviour
2973c156061SJens Freimann  */
2983c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
2993c156061SJens Freimann 
3003c156061SJens Freimann /*
3013c156061SJens Freimann  * Configurable value of number of random writes done in
3023c156061SJens Freimann  * VNF simulation memory area.
3033c156061SJens Freimann  */
3043c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3053c156061SJens Freimann 
3063c156061SJens Freimann /*
3073c156061SJens Freimann  * Configurable value of number of random reads done in
3083c156061SJens Freimann  * VNF simulation memory area.
3093c156061SJens Freimann  */
3103c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3113c156061SJens Freimann 
3123c156061SJens Freimann /*
3133c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3143c156061SJens Freimann  * VNF simulation memory area.
3153c156061SJens Freimann  */
3163c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3173c156061SJens Freimann 
3183c156061SJens Freimann /*
319af75078fSIntel  * Receive Side Scaling (RSS) configuration.
320af75078fSIntel  */
3218a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
322af75078fSIntel 
323af75078fSIntel /*
324af75078fSIntel  * Port topology configuration
325af75078fSIntel  */
326af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
327af75078fSIntel 
3287741e4cfSIntel /*
3297741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3307741e4cfSIntel  */
3317741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3327741e4cfSIntel 
333af75078fSIntel /*
3347ee3e944SVasily Philipov  * Flow API isolated mode.
3357ee3e944SVasily Philipov  */
3367ee3e944SVasily Philipov uint8_t flow_isolate_all;
3377ee3e944SVasily Philipov 
3387ee3e944SVasily Philipov /*
339bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
340bc202406SDavid Marchand  */
341bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
342bc202406SDavid Marchand 
343bc202406SDavid Marchand /*
3446937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
3456937d210SStephen Hemminger  */
3466937d210SStephen Hemminger uint8_t no_device_start = 0;
3476937d210SStephen Hemminger 
3486937d210SStephen Hemminger /*
3498ea656f8SGaetan Rivet  * Enable link status change notification
3508ea656f8SGaetan Rivet  */
3518ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3528ea656f8SGaetan Rivet 
3538ea656f8SGaetan Rivet /*
354284c908cSGaetan Rivet  * Enable device removal notification.
355284c908cSGaetan Rivet  */
356284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
357284c908cSGaetan Rivet 
358fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
359fb73e096SJeff Guo 
3604f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
3614f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
3624f1ed78eSThomas Monjalon 
363b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */
364b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true;
365b0a9354aSPavan Nikhilesh 
36697b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
36797b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
36897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
36997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
37097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
37197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
37297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
37397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
37497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
37597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
37697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
37797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
37897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
37997b5d8b5SThomas Monjalon };
38097b5d8b5SThomas Monjalon 
381284c908cSGaetan Rivet /*
3823af72783SGaetan Rivet  * Display or mask ether events
3833af72783SGaetan Rivet  * Default to all events except VF_MBOX
3843af72783SGaetan Rivet  */
3853af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
3863af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
3873af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
3883af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
389badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
3903af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
3913af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
392e505d84cSAnatoly Burakov /*
393e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
394e505d84cSAnatoly Burakov  */
395e505d84cSAnatoly Burakov int do_mlockall = 0;
3963af72783SGaetan Rivet 
3973af72783SGaetan Rivet /*
3987b7e5ba7SIntel  * NIC bypass mode configuration options.
3997b7e5ba7SIntel  */
4007b7e5ba7SIntel 
40150c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
4027b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
403e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
4047b7e5ba7SIntel #endif
4057b7e5ba7SIntel 
406e261265eSRadu Nicolau 
40762d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
40862d3216dSReshma Pattan 
40962d3216dSReshma Pattan /*
41062d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
41162d3216dSReshma Pattan  */
41262d3216dSReshma Pattan uint8_t latencystats_enabled;
41362d3216dSReshma Pattan 
41462d3216dSReshma Pattan /*
41562d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
41662d3216dSReshma Pattan  */
41762d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
41862d3216dSReshma Pattan 
41962d3216dSReshma Pattan #endif
42062d3216dSReshma Pattan 
4217b7e5ba7SIntel /*
422af75078fSIntel  * Ethernet device configuration.
423af75078fSIntel  */
424af75078fSIntel struct rte_eth_rxmode rx_mode = {
42535b2d13fSOlivier Matz 	.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
42635b2d13fSOlivier Matz 		/**< Default maximum frame length. */
427af75078fSIntel };
428af75078fSIntel 
42907e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
43007e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
43107e5f7bdSShahaf Shuler };
432fd8c20aaSShahaf Shuler 
433af75078fSIntel struct rte_fdir_conf fdir_conf = {
434af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
435af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
436af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
437d9d5e6f2SJingjing Wu 	.mask = {
43826f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
439d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
440d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
441d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
442d9d5e6f2SJingjing Wu 		},
443d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
444d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
445d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
446d9d5e6f2SJingjing Wu 		},
447d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
448d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
44947b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
45047b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
45147b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
452d9d5e6f2SJingjing Wu 	},
453af75078fSIntel 	.drop_queue = 127,
454af75078fSIntel };
455af75078fSIntel 
4562950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
457af75078fSIntel 
458ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
459ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
460ed30d9b6SIntel 
461ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
462ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
463ed30d9b6SIntel 
464ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
465ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
466ed30d9b6SIntel 
467a4fd5eeeSElza Mathew /*
468a4fd5eeeSElza Mathew  * Display zero values by default for xstats
469a4fd5eeeSElza Mathew  */
470a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
471a4fd5eeeSElza Mathew 
472c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
473c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
4747acf894dSStephen Hurd 
475e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE
4767e4441c8SRemy Horton /* Bitrate statistics */
4777e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
478e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
479e25e6c70SRemy Horton uint8_t bitrate_enabled;
480e25e6c70SRemy Horton #endif
4817e4441c8SRemy Horton 
482b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
483b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
484b40f8d78SJiayu Hu 
485ed30d9b6SIntel /* Forward function declarations */
486c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
48728caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
48828caa76aSZhiyong Yang 						   struct rte_port *port);
489edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
490f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
49176ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
492d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
493cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
494fb73e096SJeff Guo 				enum rte_dev_event_type type,
495fb73e096SJeff Guo 				void *param);
496ce8d5614SIntel 
497ce8d5614SIntel /*
498ce8d5614SIntel  * Check if all the ports are started.
499ce8d5614SIntel  * If yes, return positive value. If not, return zero.
500ce8d5614SIntel  */
501ce8d5614SIntel static int all_ports_started(void);
502ed30d9b6SIntel 
50352f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
50435b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
50552f38a20SJiayu Hu 
506b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */
507b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
508b57b66a9SOri Kam 
509af75078fSIntel /*
51098a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
511c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
512c9cafcc8SShahaf Shuler  */
513c9cafcc8SShahaf Shuler int
514c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
515c9cafcc8SShahaf Shuler {
516c9cafcc8SShahaf Shuler 	unsigned int i;
517c9cafcc8SShahaf Shuler 
518c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
519c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
520c9cafcc8SShahaf Shuler 			return 0;
521c9cafcc8SShahaf Shuler 	}
522c9cafcc8SShahaf Shuler 	return 1;
523c9cafcc8SShahaf Shuler }
524c9cafcc8SShahaf Shuler 
525c9cafcc8SShahaf Shuler /*
526af75078fSIntel  * Setup default configuration.
527af75078fSIntel  */
528af75078fSIntel static void
529af75078fSIntel set_default_fwd_lcores_config(void)
530af75078fSIntel {
531af75078fSIntel 	unsigned int i;
532af75078fSIntel 	unsigned int nb_lc;
5337acf894dSStephen Hurd 	unsigned int sock_num;
534af75078fSIntel 
535af75078fSIntel 	nb_lc = 0;
536af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
537dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
538dbfb8ec7SPhil Yang 			continue;
539c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
540c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
541c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
542c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
543c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
544c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
545c9cafcc8SShahaf Shuler 			}
546c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5477acf894dSStephen Hurd 		}
548f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
549f54fe5eeSStephen Hurd 			continue;
550f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
551af75078fSIntel 	}
552af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
553af75078fSIntel 	nb_cfg_lcores = nb_lcores;
554af75078fSIntel 	nb_fwd_lcores = 1;
555af75078fSIntel }
556af75078fSIntel 
557af75078fSIntel static void
558af75078fSIntel set_def_peer_eth_addrs(void)
559af75078fSIntel {
560af75078fSIntel 	portid_t i;
561af75078fSIntel 
562af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
56335b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
564af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
565af75078fSIntel 	}
566af75078fSIntel }
567af75078fSIntel 
568af75078fSIntel static void
569af75078fSIntel set_default_fwd_ports_config(void)
570af75078fSIntel {
571af75078fSIntel 	portid_t pt_id;
57265a7360cSMatan Azrad 	int i = 0;
573af75078fSIntel 
574effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
57565a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
576af75078fSIntel 
577effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
578effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
579effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
580effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
581effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
582effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
583effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
584effdb8bbSPhil Yang 			}
585effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
586effdb8bbSPhil Yang 		}
587effdb8bbSPhil Yang 	}
588effdb8bbSPhil Yang 
589af75078fSIntel 	nb_cfg_ports = nb_ports;
590af75078fSIntel 	nb_fwd_ports = nb_ports;
591af75078fSIntel }
592af75078fSIntel 
593af75078fSIntel void
594af75078fSIntel set_def_fwd_config(void)
595af75078fSIntel {
596af75078fSIntel 	set_default_fwd_lcores_config();
597af75078fSIntel 	set_def_peer_eth_addrs();
598af75078fSIntel 	set_default_fwd_ports_config();
599af75078fSIntel }
600af75078fSIntel 
601c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
602c7f5dba7SAnatoly Burakov static int
603c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
604c7f5dba7SAnatoly Burakov {
605c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
606c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
607c7f5dba7SAnatoly Burakov 
608c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
609c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
610c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
611c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
612c7f5dba7SAnatoly Burakov 	 */
613c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
614c7f5dba7SAnatoly Burakov 
615c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
616c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
617c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
618c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
619c7f5dba7SAnatoly Burakov 		return -1;
620c7f5dba7SAnatoly Burakov 	}
621c7f5dba7SAnatoly Burakov 
622c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
623c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
624c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
625c7f5dba7SAnatoly Burakov 
626c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
627c7f5dba7SAnatoly Burakov 
628c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
629c7f5dba7SAnatoly Burakov 
630c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
631c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
632c7f5dba7SAnatoly Burakov 		return -1;
633c7f5dba7SAnatoly Burakov 	}
634c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
635c7f5dba7SAnatoly Burakov 
636c7f5dba7SAnatoly Burakov 	return 0;
637c7f5dba7SAnatoly Burakov }
638c7f5dba7SAnatoly Burakov 
639c7f5dba7SAnatoly Burakov static int
640c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
641c7f5dba7SAnatoly Burakov {
642c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
643c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
644c7f5dba7SAnatoly Burakov 	 */
6459d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
646c7f5dba7SAnatoly Burakov 
647c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
648c7f5dba7SAnatoly Burakov }
649c7f5dba7SAnatoly Burakov 
650c7f5dba7SAnatoly Burakov static void *
651c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
652c7f5dba7SAnatoly Burakov {
653c7f5dba7SAnatoly Burakov 	void *addr;
654c7f5dba7SAnatoly Burakov 	int flags;
655c7f5dba7SAnatoly Burakov 
656c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
657c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
658c7f5dba7SAnatoly Burakov 	if (huge)
659c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
660c7f5dba7SAnatoly Burakov 
661c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
662c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
663c7f5dba7SAnatoly Burakov 		return NULL;
664c7f5dba7SAnatoly Burakov 
665c7f5dba7SAnatoly Burakov 	return addr;
666c7f5dba7SAnatoly Burakov }
667c7f5dba7SAnatoly Burakov 
668c7f5dba7SAnatoly Burakov struct extmem_param {
669c7f5dba7SAnatoly Burakov 	void *addr;
670c7f5dba7SAnatoly Burakov 	size_t len;
671c7f5dba7SAnatoly Burakov 	size_t pgsz;
672c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
673c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
674c7f5dba7SAnatoly Burakov };
675c7f5dba7SAnatoly Burakov 
676c7f5dba7SAnatoly Burakov static int
677c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
678c7f5dba7SAnatoly Burakov 		bool huge)
679c7f5dba7SAnatoly Burakov {
680c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
681c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
682c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
683c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
684c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
685c7f5dba7SAnatoly Burakov 	void *addr;
686c7f5dba7SAnatoly Burakov 	int ret;
687c7f5dba7SAnatoly Burakov 
688c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
689c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
690c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
691c7f5dba7SAnatoly Burakov 			continue;
692c7f5dba7SAnatoly Burakov 
693c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
694c7f5dba7SAnatoly Burakov 
695c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
696c7f5dba7SAnatoly Burakov 		if (!huge)
697c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
698c7f5dba7SAnatoly Burakov 
699c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
700c7f5dba7SAnatoly Burakov 		if (ret < 0) {
701c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
702c7f5dba7SAnatoly Burakov 			return -1;
703c7f5dba7SAnatoly Burakov 		}
704c7f5dba7SAnatoly Burakov 
705c7f5dba7SAnatoly Burakov 		/* allocate our memory */
706c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
707c7f5dba7SAnatoly Burakov 
708c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
709c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
710c7f5dba7SAnatoly Burakov 		 * try another one.
711c7f5dba7SAnatoly Burakov 		 */
712c7f5dba7SAnatoly Burakov 		if (addr == NULL)
713c7f5dba7SAnatoly Burakov 			continue;
714c7f5dba7SAnatoly Burakov 
715c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
716c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
717c7f5dba7SAnatoly Burakov 
718c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
719c7f5dba7SAnatoly Burakov 
720c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
721c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
722c7f5dba7SAnatoly Burakov 			goto fail;
723c7f5dba7SAnatoly Burakov 		}
724c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
725c7f5dba7SAnatoly Burakov 		if (!huge)
726c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
727c7f5dba7SAnatoly Burakov 
728c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
729c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
730c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
731c7f5dba7SAnatoly Burakov 			size_t offset;
732c7f5dba7SAnatoly Burakov 			void *cur;
733c7f5dba7SAnatoly Burakov 
734c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
735c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
736c7f5dba7SAnatoly Burakov 
737c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
738c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
739c7f5dba7SAnatoly Burakov 
740c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
741c7f5dba7SAnatoly Burakov 
742c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
743c7f5dba7SAnatoly Burakov 		}
744c7f5dba7SAnatoly Burakov 
745c7f5dba7SAnatoly Burakov 		break;
746c7f5dba7SAnatoly Burakov 	}
747c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
748c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
749c7f5dba7SAnatoly Burakov 		return -1;
750c7f5dba7SAnatoly Burakov 
751c7f5dba7SAnatoly Burakov 	param->addr = addr;
752c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
753c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
754c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
755c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
756c7f5dba7SAnatoly Burakov 
757c7f5dba7SAnatoly Burakov 	return 0;
758c7f5dba7SAnatoly Burakov fail:
759c7f5dba7SAnatoly Burakov 	if (iovas)
760c7f5dba7SAnatoly Burakov 		free(iovas);
761c7f5dba7SAnatoly Burakov 	if (addr)
762c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
763c7f5dba7SAnatoly Burakov 
764c7f5dba7SAnatoly Burakov 	return -1;
765c7f5dba7SAnatoly Burakov }
766c7f5dba7SAnatoly Burakov 
767c7f5dba7SAnatoly Burakov static int
768c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
769c7f5dba7SAnatoly Burakov {
770c7f5dba7SAnatoly Burakov 	struct extmem_param param;
771c7f5dba7SAnatoly Burakov 	int socket_id, ret;
772c7f5dba7SAnatoly Burakov 
773c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
774c7f5dba7SAnatoly Burakov 
775c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
776c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
777c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
778c7f5dba7SAnatoly Burakov 		/* create our heap */
779c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
780c7f5dba7SAnatoly Burakov 		if (ret < 0) {
781c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
782c7f5dba7SAnatoly Burakov 			return -1;
783c7f5dba7SAnatoly Burakov 		}
784c7f5dba7SAnatoly Burakov 	}
785c7f5dba7SAnatoly Burakov 
786c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
787c7f5dba7SAnatoly Burakov 	if (ret < 0) {
788c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
789c7f5dba7SAnatoly Burakov 		return -1;
790c7f5dba7SAnatoly Burakov 	}
791c7f5dba7SAnatoly Burakov 
792c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
793c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
794c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
795c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
796c7f5dba7SAnatoly Burakov 
797c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
798c7f5dba7SAnatoly Burakov 
799c7f5dba7SAnatoly Burakov 	/* not needed any more */
800c7f5dba7SAnatoly Burakov 	free(param.iova_table);
801c7f5dba7SAnatoly Burakov 
802c7f5dba7SAnatoly Burakov 	if (ret < 0) {
803c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
804c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
805c7f5dba7SAnatoly Burakov 		return -1;
806c7f5dba7SAnatoly Burakov 	}
807c7f5dba7SAnatoly Burakov 
808c7f5dba7SAnatoly Burakov 	/* success */
809c7f5dba7SAnatoly Burakov 
810c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
811c7f5dba7SAnatoly Burakov 			param.len >> 20);
812c7f5dba7SAnatoly Burakov 
813c7f5dba7SAnatoly Burakov 	return 0;
814c7f5dba7SAnatoly Burakov }
8153a0968c8SShahaf Shuler static void
8163a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8173a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8183a0968c8SShahaf Shuler {
8193a0968c8SShahaf Shuler 	uint16_t pid = 0;
8203a0968c8SShahaf Shuler 	int ret;
8213a0968c8SShahaf Shuler 
8223a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8233a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8243a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8253a0968c8SShahaf Shuler 
8263a0968c8SShahaf Shuler 		ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
8273a0968c8SShahaf Shuler 					memhdr->len);
8283a0968c8SShahaf Shuler 		if (ret) {
8293a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8303a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
8313a0968c8SShahaf Shuler 				    "for device %s\n",
8323a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8333a0968c8SShahaf Shuler 		}
8343a0968c8SShahaf Shuler 	}
8353a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
8363a0968c8SShahaf Shuler 	if (ret) {
8373a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8383a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
8393a0968c8SShahaf Shuler 	}
8403a0968c8SShahaf Shuler }
8413a0968c8SShahaf Shuler 
8423a0968c8SShahaf Shuler static void
8433a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8443a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8453a0968c8SShahaf Shuler {
8463a0968c8SShahaf Shuler 	uint16_t pid = 0;
8473a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
8483a0968c8SShahaf Shuler 	int ret;
8493a0968c8SShahaf Shuler 
8503a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
8513a0968c8SShahaf Shuler 				  page_size);
8523a0968c8SShahaf Shuler 	if (ret) {
8533a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8543a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
8553a0968c8SShahaf Shuler 		return;
8563a0968c8SShahaf Shuler 	}
8573a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8583a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8593a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8603a0968c8SShahaf Shuler 
8613a0968c8SShahaf Shuler 		ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
8623a0968c8SShahaf Shuler 				      memhdr->len);
8633a0968c8SShahaf Shuler 		if (ret) {
8643a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8653a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
8663a0968c8SShahaf Shuler 				    "for device %s\n",
8673a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8683a0968c8SShahaf Shuler 		}
8693a0968c8SShahaf Shuler 	}
8703a0968c8SShahaf Shuler }
871c7f5dba7SAnatoly Burakov 
872*72512e18SViacheslav Ovsiienko static unsigned int
873*72512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
874*72512e18SViacheslav Ovsiienko 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
875*72512e18SViacheslav Ovsiienko {
876*72512e18SViacheslav Ovsiienko 	struct rte_pktmbuf_extmem *xmem;
877*72512e18SViacheslav Ovsiienko 	unsigned int ext_num, zone_num, elt_num;
878*72512e18SViacheslav Ovsiienko 	uint16_t elt_size;
879*72512e18SViacheslav Ovsiienko 
880*72512e18SViacheslav Ovsiienko 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
881*72512e18SViacheslav Ovsiienko 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
882*72512e18SViacheslav Ovsiienko 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
883*72512e18SViacheslav Ovsiienko 
884*72512e18SViacheslav Ovsiienko 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
885*72512e18SViacheslav Ovsiienko 	if (xmem == NULL) {
886*72512e18SViacheslav Ovsiienko 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
887*72512e18SViacheslav Ovsiienko 				 "external buffer descriptors\n");
888*72512e18SViacheslav Ovsiienko 		*ext_mem = NULL;
889*72512e18SViacheslav Ovsiienko 		return 0;
890*72512e18SViacheslav Ovsiienko 	}
891*72512e18SViacheslav Ovsiienko 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
892*72512e18SViacheslav Ovsiienko 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
893*72512e18SViacheslav Ovsiienko 		const struct rte_memzone *mz;
894*72512e18SViacheslav Ovsiienko 		char mz_name[RTE_MEMZONE_NAMESIZE];
895*72512e18SViacheslav Ovsiienko 		int ret;
896*72512e18SViacheslav Ovsiienko 
897*72512e18SViacheslav Ovsiienko 		ret = snprintf(mz_name, sizeof(mz_name),
898*72512e18SViacheslav Ovsiienko 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
899*72512e18SViacheslav Ovsiienko 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
900*72512e18SViacheslav Ovsiienko 			errno = ENAMETOOLONG;
901*72512e18SViacheslav Ovsiienko 			ext_num = 0;
902*72512e18SViacheslav Ovsiienko 			break;
903*72512e18SViacheslav Ovsiienko 		}
904*72512e18SViacheslav Ovsiienko 		mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
905*72512e18SViacheslav Ovsiienko 						 socket_id,
906*72512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_IOVA_CONTIG |
907*72512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_1GB |
908*72512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_SIZE_HINT_ONLY,
909*72512e18SViacheslav Ovsiienko 						 EXTBUF_ZONE_SIZE);
910*72512e18SViacheslav Ovsiienko 		if (mz == NULL) {
911*72512e18SViacheslav Ovsiienko 			/*
912*72512e18SViacheslav Ovsiienko 			 * The caller exits on external buffer creation
913*72512e18SViacheslav Ovsiienko 			 * error, so there is no need to free memzones.
914*72512e18SViacheslav Ovsiienko 			 */
915*72512e18SViacheslav Ovsiienko 			errno = ENOMEM;
916*72512e18SViacheslav Ovsiienko 			ext_num = 0;
917*72512e18SViacheslav Ovsiienko 			break;
918*72512e18SViacheslav Ovsiienko 		}
919*72512e18SViacheslav Ovsiienko 		xseg->buf_ptr = mz->addr;
920*72512e18SViacheslav Ovsiienko 		xseg->buf_iova = mz->iova;
921*72512e18SViacheslav Ovsiienko 		xseg->buf_len = EXTBUF_ZONE_SIZE;
922*72512e18SViacheslav Ovsiienko 		xseg->elt_size = elt_size;
923*72512e18SViacheslav Ovsiienko 	}
924*72512e18SViacheslav Ovsiienko 	if (ext_num == 0 && xmem != NULL) {
925*72512e18SViacheslav Ovsiienko 		free(xmem);
926*72512e18SViacheslav Ovsiienko 		xmem = NULL;
927*72512e18SViacheslav Ovsiienko 	}
928*72512e18SViacheslav Ovsiienko 	*ext_mem = xmem;
929*72512e18SViacheslav Ovsiienko 	return ext_num;
930*72512e18SViacheslav Ovsiienko }
931*72512e18SViacheslav Ovsiienko 
932af75078fSIntel /*
933af75078fSIntel  * Configuration initialisation done once at init time.
934af75078fSIntel  */
935401b744dSShahaf Shuler static struct rte_mempool *
936af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
937af75078fSIntel 		 unsigned int socket_id)
938af75078fSIntel {
939af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
940bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
941af75078fSIntel 	uint32_t mb_size;
942af75078fSIntel 
943dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
944af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
945148f963fSBruce Richardson 
946285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
947d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
948d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
949d1eb542eSOlivier Matz 
950c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
951c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
952c7f5dba7SAnatoly Burakov 		{
953c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
954c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
955c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
956c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
957c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
958c7f5dba7SAnatoly Burakov 			break;
959c7f5dba7SAnatoly Burakov 		}
960c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
961c7f5dba7SAnatoly Burakov 		{
962b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
963c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
964148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
96559fcf854SShahaf Shuler 				socket_id, mempool_flags);
96624427bb9SOlivier Matz 			if (rte_mp == NULL)
96724427bb9SOlivier Matz 				goto err;
968b19a0c75SOlivier Matz 
969b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
970b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
971b19a0c75SOlivier Matz 				rte_mp = NULL;
97224427bb9SOlivier Matz 				goto err;
973b19a0c75SOlivier Matz 			}
974b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
975b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
9763a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
977c7f5dba7SAnatoly Burakov 			break;
978c7f5dba7SAnatoly Burakov 		}
979c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
980c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
981c7f5dba7SAnatoly Burakov 		{
982c7f5dba7SAnatoly Burakov 			int heap_socket;
983c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
984c7f5dba7SAnatoly Burakov 
985c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
986c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
987c7f5dba7SAnatoly Burakov 
988c7f5dba7SAnatoly Burakov 			heap_socket =
989c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
990c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
991c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
992c7f5dba7SAnatoly Burakov 
9930e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
9940e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
995ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
996c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
997c7f5dba7SAnatoly Burakov 					heap_socket);
998c7f5dba7SAnatoly Burakov 			break;
999c7f5dba7SAnatoly Burakov 		}
1000*72512e18SViacheslav Ovsiienko 	case MP_ALLOC_XBUF:
1001*72512e18SViacheslav Ovsiienko 		{
1002*72512e18SViacheslav Ovsiienko 			struct rte_pktmbuf_extmem *ext_mem;
1003*72512e18SViacheslav Ovsiienko 			unsigned int ext_num;
1004*72512e18SViacheslav Ovsiienko 
1005*72512e18SViacheslav Ovsiienko 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
1006*72512e18SViacheslav Ovsiienko 					       socket_id, pool_name, &ext_mem);
1007*72512e18SViacheslav Ovsiienko 			if (ext_num == 0)
1008*72512e18SViacheslav Ovsiienko 				rte_exit(EXIT_FAILURE,
1009*72512e18SViacheslav Ovsiienko 					 "Can't create pinned data buffers\n");
1010*72512e18SViacheslav Ovsiienko 
1011*72512e18SViacheslav Ovsiienko 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1012*72512e18SViacheslav Ovsiienko 					rte_mbuf_best_mempool_ops());
1013*72512e18SViacheslav Ovsiienko 			rte_mp = rte_pktmbuf_pool_create_extbuf
1014*72512e18SViacheslav Ovsiienko 					(pool_name, nb_mbuf, mb_mempool_cache,
1015*72512e18SViacheslav Ovsiienko 					 0, mbuf_seg_size, socket_id,
1016*72512e18SViacheslav Ovsiienko 					 ext_mem, ext_num);
1017*72512e18SViacheslav Ovsiienko 			free(ext_mem);
1018*72512e18SViacheslav Ovsiienko 			break;
1019*72512e18SViacheslav Ovsiienko 		}
1020c7f5dba7SAnatoly Burakov 	default:
1021c7f5dba7SAnatoly Burakov 		{
1022c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1023c7f5dba7SAnatoly Burakov 		}
1024bece7b6cSChristian Ehrhardt 	}
1025148f963fSBruce Richardson 
102624427bb9SOlivier Matz err:
1027af75078fSIntel 	if (rte_mp == NULL) {
1028d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
1029d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
1030d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
1031148f963fSBruce Richardson 	} else if (verbose_level > 0) {
1032591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
1033af75078fSIntel 	}
1034401b744dSShahaf Shuler 	return rte_mp;
1035af75078fSIntel }
1036af75078fSIntel 
103720a0286fSLiu Xiaofeng /*
103820a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
103920a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
104020a0286fSLiu Xiaofeng  */
104120a0286fSLiu Xiaofeng static int
104220a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
104320a0286fSLiu Xiaofeng {
104420a0286fSLiu Xiaofeng 	static int warning_once = 0;
104520a0286fSLiu Xiaofeng 
1046c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
104720a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
104820a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
104920a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
105020a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
105120a0286fSLiu Xiaofeng 			       " --numa.\n");
105220a0286fSLiu Xiaofeng 		warning_once = 1;
105320a0286fSLiu Xiaofeng 		return -1;
105420a0286fSLiu Xiaofeng 	}
105520a0286fSLiu Xiaofeng 	return 0;
105620a0286fSLiu Xiaofeng }
105720a0286fSLiu Xiaofeng 
10583f7311baSWei Dai /*
10593f7311baSWei Dai  * Get the allowed maximum number of RX queues.
10603f7311baSWei Dai  * *pid return the port id which has minimal value of
10613f7311baSWei Dai  * max_rx_queues in all ports.
10623f7311baSWei Dai  */
10633f7311baSWei Dai queueid_t
10643f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
10653f7311baSWei Dai {
10669e6b36c3SDavid Marchand 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
10676f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
10683f7311baSWei Dai 	portid_t pi;
10693f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
10703f7311baSWei Dai 
10713f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
10726f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
10736f51deb9SIvan Ilchenko 			continue;
10746f51deb9SIvan Ilchenko 
10756f51deb9SIvan Ilchenko 		max_rxq_valid = true;
10763f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
10773f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
10783f7311baSWei Dai 			*pid = pi;
10793f7311baSWei Dai 		}
10803f7311baSWei Dai 	}
10816f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
10823f7311baSWei Dai }
10833f7311baSWei Dai 
10843f7311baSWei Dai /*
10853f7311baSWei Dai  * Check input rxq is valid or not.
10863f7311baSWei Dai  * If input rxq is not greater than any of maximum number
10873f7311baSWei Dai  * of RX queues of all ports, it is valid.
10883f7311baSWei Dai  * if valid, return 0, else return -1
10893f7311baSWei Dai  */
10903f7311baSWei Dai int
10913f7311baSWei Dai check_nb_rxq(queueid_t rxq)
10923f7311baSWei Dai {
10933f7311baSWei Dai 	queueid_t allowed_max_rxq;
10943f7311baSWei Dai 	portid_t pid = 0;
10953f7311baSWei Dai 
10963f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
10973f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
10983f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
10993f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
11003f7311baSWei Dai 		       rxq,
11013f7311baSWei Dai 		       allowed_max_rxq,
11023f7311baSWei Dai 		       pid);
11033f7311baSWei Dai 		return -1;
11043f7311baSWei Dai 	}
11053f7311baSWei Dai 	return 0;
11063f7311baSWei Dai }
11073f7311baSWei Dai 
110836db4f6cSWei Dai /*
110936db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
111036db4f6cSWei Dai  * *pid return the port id which has minimal value of
111136db4f6cSWei Dai  * max_tx_queues in all ports.
111236db4f6cSWei Dai  */
111336db4f6cSWei Dai queueid_t
111436db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
111536db4f6cSWei Dai {
11169e6b36c3SDavid Marchand 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
11176f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
111836db4f6cSWei Dai 	portid_t pi;
111936db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
112036db4f6cSWei Dai 
112136db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
11226f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
11236f51deb9SIvan Ilchenko 			continue;
11246f51deb9SIvan Ilchenko 
11256f51deb9SIvan Ilchenko 		max_txq_valid = true;
112636db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
112736db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
112836db4f6cSWei Dai 			*pid = pi;
112936db4f6cSWei Dai 		}
113036db4f6cSWei Dai 	}
11316f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
113236db4f6cSWei Dai }
113336db4f6cSWei Dai 
113436db4f6cSWei Dai /*
113536db4f6cSWei Dai  * Check input txq is valid or not.
113636db4f6cSWei Dai  * If input txq is not greater than any of maximum number
113736db4f6cSWei Dai  * of TX queues of all ports, it is valid.
113836db4f6cSWei Dai  * if valid, return 0, else return -1
113936db4f6cSWei Dai  */
114036db4f6cSWei Dai int
114136db4f6cSWei Dai check_nb_txq(queueid_t txq)
114236db4f6cSWei Dai {
114336db4f6cSWei Dai 	queueid_t allowed_max_txq;
114436db4f6cSWei Dai 	portid_t pid = 0;
114536db4f6cSWei Dai 
114636db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
114736db4f6cSWei Dai 	if (txq > allowed_max_txq) {
114836db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
114936db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
115036db4f6cSWei Dai 		       txq,
115136db4f6cSWei Dai 		       allowed_max_txq,
115236db4f6cSWei Dai 		       pid);
115336db4f6cSWei Dai 		return -1;
115436db4f6cSWei Dai 	}
115536db4f6cSWei Dai 	return 0;
115636db4f6cSWei Dai }
115736db4f6cSWei Dai 
11581c69df45SOri Kam /*
11591c69df45SOri Kam  * Get the allowed maximum number of hairpin queues.
11601c69df45SOri Kam  * *pid return the port id which has minimal value of
11611c69df45SOri Kam  * max_hairpin_queues in all ports.
11621c69df45SOri Kam  */
11631c69df45SOri Kam queueid_t
11641c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid)
11651c69df45SOri Kam {
11669e6b36c3SDavid Marchand 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
11671c69df45SOri Kam 	portid_t pi;
11681c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
11691c69df45SOri Kam 
11701c69df45SOri Kam 	RTE_ETH_FOREACH_DEV(pi) {
11711c69df45SOri Kam 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
11721c69df45SOri Kam 			*pid = pi;
11731c69df45SOri Kam 			return 0;
11741c69df45SOri Kam 		}
11751c69df45SOri Kam 		if (cap.max_nb_queues < allowed_max_hairpinq) {
11761c69df45SOri Kam 			allowed_max_hairpinq = cap.max_nb_queues;
11771c69df45SOri Kam 			*pid = pi;
11781c69df45SOri Kam 		}
11791c69df45SOri Kam 	}
11801c69df45SOri Kam 	return allowed_max_hairpinq;
11811c69df45SOri Kam }
11821c69df45SOri Kam 
11831c69df45SOri Kam /*
11841c69df45SOri Kam  * Check input hairpin is valid or not.
11851c69df45SOri Kam  * If input hairpin is not greater than any of maximum number
11861c69df45SOri Kam  * of hairpin queues of all ports, it is valid.
11871c69df45SOri Kam  * if valid, return 0, else return -1
11881c69df45SOri Kam  */
11891c69df45SOri Kam int
11901c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq)
11911c69df45SOri Kam {
11921c69df45SOri Kam 	queueid_t allowed_max_hairpinq;
11931c69df45SOri Kam 	portid_t pid = 0;
11941c69df45SOri Kam 
11951c69df45SOri Kam 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
11961c69df45SOri Kam 	if (hairpinq > allowed_max_hairpinq) {
11971c69df45SOri Kam 		printf("Fail: input hairpin (%u) can't be greater "
11981c69df45SOri Kam 		       "than max_hairpin_queues (%u) of port %u\n",
11991c69df45SOri Kam 		       hairpinq, allowed_max_hairpinq, pid);
12001c69df45SOri Kam 		return -1;
12011c69df45SOri Kam 	}
12021c69df45SOri Kam 	return 0;
12031c69df45SOri Kam }
12041c69df45SOri Kam 
1205af75078fSIntel static void
1206af75078fSIntel init_config(void)
1207af75078fSIntel {
1208ce8d5614SIntel 	portid_t pid;
1209af75078fSIntel 	struct rte_port *port;
1210af75078fSIntel 	struct rte_mempool *mbp;
1211af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1212af75078fSIntel 	lcoreid_t  lc_id;
12137acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1214b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
121552f38a20SJiayu Hu 	uint32_t gso_types;
121633f9630fSSunil Kumar Kori 	uint16_t data_size;
121733f9630fSSunil Kumar Kori 	bool warning = 0;
1218c73a9071SWei Dai 	int k;
12196f51deb9SIvan Ilchenko 	int ret;
1220af75078fSIntel 
12217acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1222487f9a59SYulong Pei 
1223af75078fSIntel 	/* Configuration of logical cores. */
1224af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1225af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1226fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1227af75078fSIntel 	if (fwd_lcores == NULL) {
1228ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1229ce8d5614SIntel 							"failed\n", nb_lcores);
1230af75078fSIntel 	}
1231af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1232af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1233af75078fSIntel 					       sizeof(struct fwd_lcore),
1234fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1235af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1236ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1237ce8d5614SIntel 								"failed\n");
1238af75078fSIntel 		}
1239af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1240af75078fSIntel 	}
1241af75078fSIntel 
12427d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1243ce8d5614SIntel 		port = &ports[pid];
12448b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1245fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1246384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
12476f51deb9SIvan Ilchenko 
12486f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
12496f51deb9SIvan Ilchenko 		if (ret != 0)
12506f51deb9SIvan Ilchenko 			rte_exit(EXIT_FAILURE,
12516f51deb9SIvan Ilchenko 				 "rte_eth_dev_info_get() failed\n");
12527c45f6c0SFerruh Yigit 
125307e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
125407e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
125507e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
125607e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1257b6ea6408SIntel 		if (numa_support) {
1258b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1259b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1260b6ea6408SIntel 			else {
1261b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
126220a0286fSLiu Xiaofeng 
126329841336SPhil Yang 				/*
126429841336SPhil Yang 				 * if socket_id is invalid,
126529841336SPhil Yang 				 * set to the first available socket.
126629841336SPhil Yang 				 */
126720a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
126829841336SPhil Yang 					socket_id = socket_ids[0];
1269b6ea6408SIntel 				port_per_socket[socket_id]++;
1270b6ea6408SIntel 			}
1271b6ea6408SIntel 		}
1272b6ea6408SIntel 
1273c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1274c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1275c73a9071SWei Dai 			port->rx_conf[k].offloads =
1276c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1277c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1278c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1279c73a9071SWei Dai 			port->tx_conf[k].offloads =
1280c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1281c73a9071SWei Dai 
1282ce8d5614SIntel 		/* set flag to initialize port/queue */
1283ce8d5614SIntel 		port->need_reconfig = 1;
1284ce8d5614SIntel 		port->need_reconfig_queues = 1;
1285c18feafaSDekel Peled 		port->tx_metadata = 0;
128633f9630fSSunil Kumar Kori 
128733f9630fSSunil Kumar Kori 		/* Check for maximum number of segments per MTU. Accordingly
128833f9630fSSunil Kumar Kori 		 * update the mbuf data size.
128933f9630fSSunil Kumar Kori 		 */
1290163fbaafSFerruh Yigit 		if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1291163fbaafSFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
129233f9630fSSunil Kumar Kori 			data_size = rx_mode.max_rx_pkt_len /
129333f9630fSSunil Kumar Kori 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
129433f9630fSSunil Kumar Kori 
129533f9630fSSunil Kumar Kori 			if ((data_size + RTE_PKTMBUF_HEADROOM) >
129633f9630fSSunil Kumar Kori 							mbuf_data_size) {
129733f9630fSSunil Kumar Kori 				mbuf_data_size = data_size +
129833f9630fSSunil Kumar Kori 						 RTE_PKTMBUF_HEADROOM;
129933f9630fSSunil Kumar Kori 				warning = 1;
1300ce8d5614SIntel 			}
130133f9630fSSunil Kumar Kori 		}
130233f9630fSSunil Kumar Kori 	}
130333f9630fSSunil Kumar Kori 
130433f9630fSSunil Kumar Kori 	if (warning)
130533f9630fSSunil Kumar Kori 		TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
130633f9630fSSunil Kumar Kori 			    mbuf_data_size);
1307ce8d5614SIntel 
13083ab64341SOlivier Matz 	/*
13093ab64341SOlivier Matz 	 * Create pools of mbuf.
13103ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
13113ab64341SOlivier Matz 	 * socket 0 memory by default.
13123ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
13133ab64341SOlivier Matz 	 *
13143ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
13153ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
13163ab64341SOlivier Matz 	 */
13173ab64341SOlivier Matz 	if (param_total_num_mbufs)
13183ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
13193ab64341SOlivier Matz 	else {
13203ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
13213ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
13223ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
13233ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
13243ab64341SOlivier Matz 	}
13253ab64341SOlivier Matz 
1326b6ea6408SIntel 	if (numa_support) {
1327b6ea6408SIntel 		uint8_t i;
1328ce8d5614SIntel 
1329c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
1330401b744dSShahaf Shuler 			mempools[i] = mbuf_pool_create(mbuf_data_size,
1331401b744dSShahaf Shuler 						       nb_mbuf_per_pool,
1332c9cafcc8SShahaf Shuler 						       socket_ids[i]);
13333ab64341SOlivier Matz 	} else {
13343ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
1335401b744dSShahaf Shuler 			mempools[0] = mbuf_pool_create(mbuf_data_size,
1336401b744dSShahaf Shuler 						       nb_mbuf_per_pool, 0);
13373ab64341SOlivier Matz 		else
1338401b744dSShahaf Shuler 			mempools[socket_num] = mbuf_pool_create
1339401b744dSShahaf Shuler 							(mbuf_data_size,
1340401b744dSShahaf Shuler 							 nb_mbuf_per_pool,
13413ab64341SOlivier Matz 							 socket_num);
13423ab64341SOlivier Matz 	}
1343b6ea6408SIntel 
1344b6ea6408SIntel 	init_port_config();
13455886ae07SAdrien Mazarguil 
134652f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1347aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
13485886ae07SAdrien Mazarguil 	/*
13495886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
13505886ae07SAdrien Mazarguil 	 */
13515886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
13528fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
13538fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
13548fd8bebcSAdrien Mazarguil 
13555886ae07SAdrien Mazarguil 		if (mbp == NULL)
13565886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
13575886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
135852f38a20SJiayu Hu 		/* initialize GSO context */
135952f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
136052f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
136152f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
136235b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
136335b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
136452f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
13655886ae07SAdrien Mazarguil 	}
13665886ae07SAdrien Mazarguil 
1367ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1368ce8d5614SIntel 	if (init_fwd_streams() < 0)
1369ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
13700c0db76fSBernard Iremonger 
13710c0db76fSBernard Iremonger 	fwd_config_setup();
1372b7091f1dSJiayu Hu 
1373b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1374b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1375b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1376b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1377b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1378b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1379b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1380b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1381b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1382b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1383b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1384b7091f1dSJiayu Hu 		}
1385b7091f1dSJiayu Hu 	}
13860ad778b3SJasvinder Singh 
13870ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
13880ad778b3SJasvinder Singh 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
13890ad778b3SJasvinder Singh 		RTE_ETH_FOREACH_DEV(pid) {
13900ad778b3SJasvinder Singh 			port = &ports[pid];
13910ad778b3SJasvinder Singh 			const char *driver = port->dev_info.driver_name;
13920ad778b3SJasvinder Singh 
13930ad778b3SJasvinder Singh 			if (strcmp(driver, "net_softnic") == 0)
13940ad778b3SJasvinder Singh 				port->softport.fwd_lcore_arg = fwd_lcores;
13950ad778b3SJasvinder Singh 		}
13960ad778b3SJasvinder Singh 	}
13970ad778b3SJasvinder Singh #endif
13980ad778b3SJasvinder Singh 
1399ce8d5614SIntel }
1400ce8d5614SIntel 
14012950a769SDeclan Doherty 
14022950a769SDeclan Doherty void
1403a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
14042950a769SDeclan Doherty {
14052950a769SDeclan Doherty 	struct rte_port *port;
14066f51deb9SIvan Ilchenko 	int ret;
14072950a769SDeclan Doherty 
14082950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
14092950a769SDeclan Doherty 	port = &ports[new_port_id];
14106f51deb9SIvan Ilchenko 
14116f51deb9SIvan Ilchenko 	ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
14126f51deb9SIvan Ilchenko 	if (ret != 0)
14136f51deb9SIvan Ilchenko 		return;
14142950a769SDeclan Doherty 
14152950a769SDeclan Doherty 	/* set flag to initialize port/queue */
14162950a769SDeclan Doherty 	port->need_reconfig = 1;
14172950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1418a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
14192950a769SDeclan Doherty 
14202950a769SDeclan Doherty 	init_port_config();
14212950a769SDeclan Doherty }
14222950a769SDeclan Doherty 
14232950a769SDeclan Doherty 
1424ce8d5614SIntel int
1425ce8d5614SIntel init_fwd_streams(void)
1426ce8d5614SIntel {
1427ce8d5614SIntel 	portid_t pid;
1428ce8d5614SIntel 	struct rte_port *port;
1429ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
14305a8fb55cSReshma Pattan 	queueid_t q;
1431ce8d5614SIntel 
1432ce8d5614SIntel 	/* set socket id according to numa or not */
14337d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1434ce8d5614SIntel 		port = &ports[pid];
1435ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1436ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1437ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1438ce8d5614SIntel 				port->dev_info.max_rx_queues);
1439ce8d5614SIntel 			return -1;
1440ce8d5614SIntel 		}
1441ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1442ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1443ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1444ce8d5614SIntel 				port->dev_info.max_tx_queues);
1445ce8d5614SIntel 			return -1;
1446ce8d5614SIntel 		}
144720a0286fSLiu Xiaofeng 		if (numa_support) {
144820a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
144920a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
145020a0286fSLiu Xiaofeng 			else {
1451b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
145220a0286fSLiu Xiaofeng 
145329841336SPhil Yang 				/*
145429841336SPhil Yang 				 * if socket_id is invalid,
145529841336SPhil Yang 				 * set to the first available socket.
145629841336SPhil Yang 				 */
145720a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
145829841336SPhil Yang 					port->socket_id = socket_ids[0];
145920a0286fSLiu Xiaofeng 			}
146020a0286fSLiu Xiaofeng 		}
1461b6ea6408SIntel 		else {
1462b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1463af75078fSIntel 				port->socket_id = 0;
1464b6ea6408SIntel 			else
1465b6ea6408SIntel 				port->socket_id = socket_num;
1466b6ea6408SIntel 		}
1467af75078fSIntel 	}
1468af75078fSIntel 
14695a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
14705a8fb55cSReshma Pattan 	if (q == 0) {
14715a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
14725a8fb55cSReshma Pattan 		return -1;
14735a8fb55cSReshma Pattan 	}
14745a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1475ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1476ce8d5614SIntel 		return 0;
1477ce8d5614SIntel 	/* clear the old */
1478ce8d5614SIntel 	if (fwd_streams != NULL) {
1479ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1480ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1481ce8d5614SIntel 				continue;
1482ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1483ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1484af75078fSIntel 		}
1485ce8d5614SIntel 		rte_free(fwd_streams);
1486ce8d5614SIntel 		fwd_streams = NULL;
1487ce8d5614SIntel 	}
1488ce8d5614SIntel 
1489ce8d5614SIntel 	/* init new */
1490ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
14911f84c469SMatan Azrad 	if (nb_fwd_streams) {
1492ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
14931f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
14941f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1495ce8d5614SIntel 		if (fwd_streams == NULL)
14961f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
14971f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
14981f84c469SMatan Azrad 				 nb_fwd_streams);
1499ce8d5614SIntel 
1500af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
15011f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
15021f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
15031f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1504ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
15051f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
15061f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
15071f84c469SMatan Azrad 		}
1508af75078fSIntel 	}
1509ce8d5614SIntel 
1510ce8d5614SIntel 	return 0;
1511af75078fSIntel }
1512af75078fSIntel 
1513af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1514af75078fSIntel static void
1515af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1516af75078fSIntel {
1517af75078fSIntel 	unsigned int total_burst;
1518af75078fSIntel 	unsigned int nb_burst;
1519af75078fSIntel 	unsigned int burst_stats[3];
1520af75078fSIntel 	uint16_t pktnb_stats[3];
1521af75078fSIntel 	uint16_t nb_pkt;
1522af75078fSIntel 	int burst_percent[3];
1523af75078fSIntel 
1524af75078fSIntel 	/*
1525af75078fSIntel 	 * First compute the total number of packet bursts and the
1526af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1527af75078fSIntel 	 */
1528af75078fSIntel 	total_burst = 0;
1529af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1530af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1531af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1532af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1533af75078fSIntel 		if (nb_burst == 0)
1534af75078fSIntel 			continue;
1535af75078fSIntel 		total_burst += nb_burst;
1536af75078fSIntel 		if (nb_burst > burst_stats[0]) {
1537af75078fSIntel 			burst_stats[1] = burst_stats[0];
1538af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
1539af75078fSIntel 			burst_stats[0] = nb_burst;
1540af75078fSIntel 			pktnb_stats[0] = nb_pkt;
1541fe613657SDaniel Shelepov 		} else if (nb_burst > burst_stats[1]) {
1542fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1543fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
1544af75078fSIntel 		}
1545af75078fSIntel 	}
1546af75078fSIntel 	if (total_burst == 0)
1547af75078fSIntel 		return;
1548af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1549af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1550af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
1551af75078fSIntel 	if (burst_stats[0] == total_burst) {
1552af75078fSIntel 		printf("]\n");
1553af75078fSIntel 		return;
1554af75078fSIntel 	}
1555af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1556af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
1557af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
1558af75078fSIntel 		return;
1559af75078fSIntel 	}
1560af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1561af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1562af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1563af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1564af75078fSIntel 		return;
1565af75078fSIntel 	}
1566af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1567af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1568af75078fSIntel }
1569af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1570af75078fSIntel 
1571af75078fSIntel static void
1572af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1573af75078fSIntel {
1574af75078fSIntel 	struct fwd_stream *fs;
1575af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1576af75078fSIntel 
1577af75078fSIntel 	fs = fwd_streams[stream_id];
1578af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1579af75078fSIntel 	    (fs->fwd_dropped == 0))
1580af75078fSIntel 		return;
1581af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1582af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1583af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1584af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1585c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1586c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1587af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1588af75078fSIntel 
1589af75078fSIntel 	/* if checksum mode */
1590af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1591c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1592c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1593c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
159458d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
159558d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
159694d65546SDavid Marchand 	} else {
159794d65546SDavid Marchand 		printf("\n");
1598af75078fSIntel 	}
1599af75078fSIntel 
1600af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1601af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1602af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1603af75078fSIntel #endif
1604af75078fSIntel }
1605af75078fSIntel 
160653324971SDavid Marchand void
160753324971SDavid Marchand fwd_stats_display(void)
160853324971SDavid Marchand {
160953324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
161053324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
161153324971SDavid Marchand 	struct {
161253324971SDavid Marchand 		struct fwd_stream *rx_stream;
161353324971SDavid Marchand 		struct fwd_stream *tx_stream;
161453324971SDavid Marchand 		uint64_t tx_dropped;
161553324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
161653324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
161753324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
161853324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
161953324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
162053324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
162153324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
162253324971SDavid Marchand 	struct rte_eth_stats stats;
162353324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
162453324971SDavid Marchand 	uint64_t fwd_cycles = 0;
162553324971SDavid Marchand #endif
162653324971SDavid Marchand 	uint64_t total_recv = 0;
162753324971SDavid Marchand 	uint64_t total_xmit = 0;
162853324971SDavid Marchand 	struct rte_port *port;
162953324971SDavid Marchand 	streamid_t sm_id;
163053324971SDavid Marchand 	portid_t pt_id;
163153324971SDavid Marchand 	int i;
163253324971SDavid Marchand 
163353324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
163453324971SDavid Marchand 
163553324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
163653324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
163753324971SDavid Marchand 
163853324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
163953324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
164053324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
164153324971SDavid Marchand 		} else {
164253324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
164353324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
164453324971SDavid Marchand 		}
164553324971SDavid Marchand 
164653324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
164753324971SDavid Marchand 
164853324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
164953324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
165053324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
165153324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
165253324971SDavid Marchand 
165353324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
165453324971SDavid Marchand 		fwd_cycles += fs->core_cycles;
165553324971SDavid Marchand #endif
165653324971SDavid Marchand 	}
165753324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
165853324971SDavid Marchand 		uint8_t j;
165953324971SDavid Marchand 
166053324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
166153324971SDavid Marchand 		port = &ports[pt_id];
166253324971SDavid Marchand 
166353324971SDavid Marchand 		rte_eth_stats_get(pt_id, &stats);
166453324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
166553324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
166653324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
166753324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
166853324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
166953324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
167053324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
167153324971SDavid Marchand 
167253324971SDavid Marchand 		total_recv += stats.ipackets;
167353324971SDavid Marchand 		total_xmit += stats.opackets;
167453324971SDavid Marchand 		total_rx_dropped += stats.imissed;
167553324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
167653324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
167753324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
167853324971SDavid Marchand 
167953324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
168053324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
168153324971SDavid Marchand 
168253324971SDavid Marchand 		if (!port->rx_queue_stats_mapping_enabled &&
168353324971SDavid Marchand 		    !port->tx_queue_stats_mapping_enabled) {
168453324971SDavid Marchand 			printf("  RX-packets: %-14"PRIu64
168553324971SDavid Marchand 			       " RX-dropped: %-14"PRIu64
168653324971SDavid Marchand 			       "RX-total: %-"PRIu64"\n",
168753324971SDavid Marchand 			       stats.ipackets, stats.imissed,
168853324971SDavid Marchand 			       stats.ipackets + stats.imissed);
168953324971SDavid Marchand 
169053324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
169153324971SDavid Marchand 				printf("  Bad-ipcsum: %-14"PRIu64
169253324971SDavid Marchand 				       " Bad-l4csum: %-14"PRIu64
169353324971SDavid Marchand 				       "Bad-outer-l4csum: %-14"PRIu64"\n",
169453324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
169553324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
169653324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
169753324971SDavid Marchand 			if (stats.ierrors + stats.rx_nombuf > 0) {
169853324971SDavid Marchand 				printf("  RX-error: %-"PRIu64"\n",
169953324971SDavid Marchand 				       stats.ierrors);
170053324971SDavid Marchand 				printf("  RX-nombufs: %-14"PRIu64"\n",
170153324971SDavid Marchand 				       stats.rx_nombuf);
170253324971SDavid Marchand 			}
170353324971SDavid Marchand 
170453324971SDavid Marchand 			printf("  TX-packets: %-14"PRIu64
170553324971SDavid Marchand 			       " TX-dropped: %-14"PRIu64
170653324971SDavid Marchand 			       "TX-total: %-"PRIu64"\n",
170753324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
170853324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
170953324971SDavid Marchand 		} else {
171053324971SDavid Marchand 			printf("  RX-packets:             %14"PRIu64
171153324971SDavid Marchand 			       "    RX-dropped:%14"PRIu64
171253324971SDavid Marchand 			       "    RX-total:%14"PRIu64"\n",
171353324971SDavid Marchand 			       stats.ipackets, stats.imissed,
171453324971SDavid Marchand 			       stats.ipackets + stats.imissed);
171553324971SDavid Marchand 
171653324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
171753324971SDavid Marchand 				printf("  Bad-ipcsum:%14"PRIu64
171853324971SDavid Marchand 				       "    Bad-l4csum:%14"PRIu64
171953324971SDavid Marchand 				       "    Bad-outer-l4csum: %-14"PRIu64"\n",
172053324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
172153324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
172253324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
172353324971SDavid Marchand 			if ((stats.ierrors + stats.rx_nombuf) > 0) {
172453324971SDavid Marchand 				printf("  RX-error:%"PRIu64"\n", stats.ierrors);
172553324971SDavid Marchand 				printf("  RX-nombufs:             %14"PRIu64"\n",
172653324971SDavid Marchand 				       stats.rx_nombuf);
172753324971SDavid Marchand 			}
172853324971SDavid Marchand 
172953324971SDavid Marchand 			printf("  TX-packets:             %14"PRIu64
173053324971SDavid Marchand 			       "    TX-dropped:%14"PRIu64
173153324971SDavid Marchand 			       "    TX-total:%14"PRIu64"\n",
173253324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
173353324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
173453324971SDavid Marchand 		}
173553324971SDavid Marchand 
173653324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
173753324971SDavid Marchand 		if (ports_stats[pt_id].rx_stream)
173853324971SDavid Marchand 			pkt_burst_stats_display("RX",
173953324971SDavid Marchand 				&ports_stats[pt_id].rx_stream->rx_burst_stats);
174053324971SDavid Marchand 		if (ports_stats[pt_id].tx_stream)
174153324971SDavid Marchand 			pkt_burst_stats_display("TX",
174253324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
174353324971SDavid Marchand #endif
174453324971SDavid Marchand 
174553324971SDavid Marchand 		if (port->rx_queue_stats_mapping_enabled) {
174653324971SDavid Marchand 			printf("\n");
174753324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
174853324971SDavid Marchand 				printf("  Stats reg %2d RX-packets:%14"PRIu64
174953324971SDavid Marchand 				       "     RX-errors:%14"PRIu64
175053324971SDavid Marchand 				       "    RX-bytes:%14"PRIu64"\n",
175153324971SDavid Marchand 				       j, stats.q_ipackets[j],
175253324971SDavid Marchand 				       stats.q_errors[j], stats.q_ibytes[j]);
175353324971SDavid Marchand 			}
175453324971SDavid Marchand 			printf("\n");
175553324971SDavid Marchand 		}
175653324971SDavid Marchand 		if (port->tx_queue_stats_mapping_enabled) {
175753324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
175853324971SDavid Marchand 				printf("  Stats reg %2d TX-packets:%14"PRIu64
175953324971SDavid Marchand 				       "                                 TX-bytes:%14"
176053324971SDavid Marchand 				       PRIu64"\n",
176153324971SDavid Marchand 				       j, stats.q_opackets[j],
176253324971SDavid Marchand 				       stats.q_obytes[j]);
176353324971SDavid Marchand 			}
176453324971SDavid Marchand 		}
176553324971SDavid Marchand 
176653324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
176753324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
176853324971SDavid Marchand 	}
176953324971SDavid Marchand 
177053324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
177153324971SDavid Marchand 	       "%s\n",
177253324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
177353324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
177453324971SDavid Marchand 	       "%-"PRIu64"\n"
177553324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
177653324971SDavid Marchand 	       "%-"PRIu64"\n",
177753324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
177853324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
177953324971SDavid Marchand 	if (total_rx_nombuf > 0)
178053324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
178153324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
178253324971SDavid Marchand 	       "%s\n",
178353324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
178453324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
178553324971SDavid Marchand 	if (total_recv > 0)
178653324971SDavid Marchand 		printf("\n  CPU cycles/packet=%u (total cycles="
178753324971SDavid Marchand 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
178853324971SDavid Marchand 		       (unsigned int)(fwd_cycles / total_recv),
178953324971SDavid Marchand 		       fwd_cycles, total_recv);
179053324971SDavid Marchand #endif
179153324971SDavid Marchand }
179253324971SDavid Marchand 
179353324971SDavid Marchand void
179453324971SDavid Marchand fwd_stats_reset(void)
179553324971SDavid Marchand {
179653324971SDavid Marchand 	streamid_t sm_id;
179753324971SDavid Marchand 	portid_t pt_id;
179853324971SDavid Marchand 	int i;
179953324971SDavid Marchand 
180053324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
180153324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
180253324971SDavid Marchand 		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
180353324971SDavid Marchand 	}
180453324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
180553324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
180653324971SDavid Marchand 
180753324971SDavid Marchand 		fs->rx_packets = 0;
180853324971SDavid Marchand 		fs->tx_packets = 0;
180953324971SDavid Marchand 		fs->fwd_dropped = 0;
181053324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
181153324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
181253324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
181353324971SDavid Marchand 
181453324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
181553324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
181653324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
181753324971SDavid Marchand #endif
181853324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
181953324971SDavid Marchand 		fs->core_cycles = 0;
182053324971SDavid Marchand #endif
182153324971SDavid Marchand 	}
182253324971SDavid Marchand }
182353324971SDavid Marchand 
1824af75078fSIntel static void
18257741e4cfSIntel flush_fwd_rx_queues(void)
1826af75078fSIntel {
1827af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1828af75078fSIntel 	portid_t  rxp;
18297741e4cfSIntel 	portid_t port_id;
1830af75078fSIntel 	queueid_t rxq;
1831af75078fSIntel 	uint16_t  nb_rx;
1832af75078fSIntel 	uint16_t  i;
1833af75078fSIntel 	uint8_t   j;
1834f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1835594302c7SJames Poole 	uint64_t timer_period;
1836f487715fSReshma Pattan 
1837f487715fSReshma Pattan 	/* convert to number of cycles */
1838594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1839af75078fSIntel 
1840af75078fSIntel 	for (j = 0; j < 2; j++) {
18417741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1842af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
18437741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
1844f487715fSReshma Pattan 				/**
1845f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
1846f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
1847f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
1848f487715fSReshma Pattan 				* after 1sec timer expiry.
1849f487715fSReshma Pattan 				*/
1850f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
1851af75078fSIntel 				do {
18527741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1853013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
1854af75078fSIntel 					for (i = 0; i < nb_rx; i++)
1855af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
1856f487715fSReshma Pattan 
1857f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
1858f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
1859f487715fSReshma Pattan 					timer_tsc += diff_tsc;
1860f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
1861f487715fSReshma Pattan 					(timer_tsc < timer_period));
1862f487715fSReshma Pattan 				timer_tsc = 0;
1863af75078fSIntel 			}
1864af75078fSIntel 		}
1865af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1866af75078fSIntel 	}
1867af75078fSIntel }
1868af75078fSIntel 
1869af75078fSIntel static void
1870af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1871af75078fSIntel {
1872af75078fSIntel 	struct fwd_stream **fsm;
1873af75078fSIntel 	streamid_t nb_fs;
1874af75078fSIntel 	streamid_t sm_id;
18757e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
18767e4441c8SRemy Horton 	uint64_t tics_per_1sec;
18777e4441c8SRemy Horton 	uint64_t tics_datum;
18787e4441c8SRemy Horton 	uint64_t tics_current;
18794918a357SXiaoyun Li 	uint16_t i, cnt_ports;
1880af75078fSIntel 
18814918a357SXiaoyun Li 	cnt_ports = nb_ports;
18827e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
18837e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
18847e4441c8SRemy Horton #endif
1885af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
1886af75078fSIntel 	nb_fs = fc->stream_nb;
1887af75078fSIntel 	do {
1888af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1889af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
18907e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
1891e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
1892e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
18937e4441c8SRemy Horton 			tics_current = rte_rdtsc();
18947e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
18957e4441c8SRemy Horton 				/* Periodic bitrate calculation */
18964918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
1897e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
18984918a357SXiaoyun Li 						ports_ids[i]);
18997e4441c8SRemy Horton 				tics_datum = tics_current;
19007e4441c8SRemy Horton 			}
1901e25e6c70SRemy Horton 		}
19027e4441c8SRemy Horton #endif
190362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
190465eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
190565eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
190662d3216dSReshma Pattan 			rte_latencystats_update();
190762d3216dSReshma Pattan #endif
190862d3216dSReshma Pattan 
1909af75078fSIntel 	} while (! fc->stopped);
1910af75078fSIntel }
1911af75078fSIntel 
1912af75078fSIntel static int
1913af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
1914af75078fSIntel {
1915af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1916af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
1917af75078fSIntel 	return 0;
1918af75078fSIntel }
1919af75078fSIntel 
1920af75078fSIntel /*
1921af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1922af75078fSIntel  * Used to start communication flows in network loopback test configurations.
1923af75078fSIntel  */
1924af75078fSIntel static int
1925af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
1926af75078fSIntel {
1927af75078fSIntel 	struct fwd_lcore *fwd_lc;
1928af75078fSIntel 	struct fwd_lcore tmp_lcore;
1929af75078fSIntel 
1930af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1931af75078fSIntel 	tmp_lcore = *fwd_lc;
1932af75078fSIntel 	tmp_lcore.stopped = 1;
1933af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1934af75078fSIntel 	return 0;
1935af75078fSIntel }
1936af75078fSIntel 
1937af75078fSIntel /*
1938af75078fSIntel  * Launch packet forwarding:
1939af75078fSIntel  *     - Setup per-port forwarding context.
1940af75078fSIntel  *     - launch logical cores with their forwarding configuration.
1941af75078fSIntel  */
1942af75078fSIntel static void
1943af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1944af75078fSIntel {
1945af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1946af75078fSIntel 	unsigned int i;
1947af75078fSIntel 	unsigned int lc_id;
1948af75078fSIntel 	int diag;
1949af75078fSIntel 
1950af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1951af75078fSIntel 	if (port_fwd_begin != NULL) {
1952af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1953af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
1954af75078fSIntel 	}
1955af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1956af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
1957af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1958af75078fSIntel 			fwd_lcores[i]->stopped = 0;
1959af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1960af75078fSIntel 						     fwd_lcores[i], lc_id);
1961af75078fSIntel 			if (diag != 0)
1962af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
1963af75078fSIntel 				       lc_id, diag);
1964af75078fSIntel 		}
1965af75078fSIntel 	}
1966af75078fSIntel }
1967af75078fSIntel 
1968af75078fSIntel /*
1969af75078fSIntel  * Launch packet forwarding configuration.
1970af75078fSIntel  */
1971af75078fSIntel void
1972af75078fSIntel start_packet_forwarding(int with_tx_first)
1973af75078fSIntel {
1974af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1975af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1976af75078fSIntel 	struct rte_port *port;
1977af75078fSIntel 	unsigned int i;
1978af75078fSIntel 	portid_t   pt_id;
1979af75078fSIntel 
19805a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
19815a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
19825a8fb55cSReshma Pattan 
19835a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
19845a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
19855a8fb55cSReshma Pattan 
19865a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
19875a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
19885a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
19895a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
19905a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
19915a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
19925a8fb55cSReshma Pattan 
1993ce8d5614SIntel 	if (all_ports_started() == 0) {
1994ce8d5614SIntel 		printf("Not all ports were started\n");
1995ce8d5614SIntel 		return;
1996ce8d5614SIntel 	}
1997af75078fSIntel 	if (test_done == 0) {
1998af75078fSIntel 		printf("Packet forwarding already started\n");
1999af75078fSIntel 		return;
2000af75078fSIntel 	}
2001edf87b4aSBernard Iremonger 
2002edf87b4aSBernard Iremonger 
20037741e4cfSIntel 	if(dcb_test) {
20047741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
20057741e4cfSIntel 			pt_id = fwd_ports_ids[i];
20067741e4cfSIntel 			port = &ports[pt_id];
20077741e4cfSIntel 			if (!port->dcb_flag) {
20087741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
20097741e4cfSIntel                                        "be configured in this mode.\n");
2010013af9b6SIntel 				return;
2011013af9b6SIntel 			}
20127741e4cfSIntel 		}
20137741e4cfSIntel 		if (nb_fwd_lcores == 1) {
20147741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
20157741e4cfSIntel                                "should be larger than 1.\n");
20167741e4cfSIntel 			return;
20177741e4cfSIntel 		}
20187741e4cfSIntel 	}
2019af75078fSIntel 	test_done = 0;
20207741e4cfSIntel 
202147a767b2SMatan Azrad 	fwd_config_setup();
202247a767b2SMatan Azrad 
20237741e4cfSIntel 	if(!no_flush_rx)
20247741e4cfSIntel 		flush_fwd_rx_queues();
20257741e4cfSIntel 
2026933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
2027af75078fSIntel 	rxtx_config_display();
2028af75078fSIntel 
202953324971SDavid Marchand 	fwd_stats_reset();
2030af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2031af75078fSIntel 		pt_id = fwd_ports_ids[i];
2032af75078fSIntel 		port = &ports[pt_id];
2033013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
2034af75078fSIntel 	}
2035af75078fSIntel 	if (with_tx_first) {
2036af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2037af75078fSIntel 		if (port_fwd_begin != NULL) {
2038af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2039af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
2040af75078fSIntel 		}
2041acbf77a6SZhihong Wang 		while (with_tx_first--) {
2042acbf77a6SZhihong Wang 			launch_packet_forwarding(
2043acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
2044af75078fSIntel 			rte_eal_mp_wait_lcore();
2045acbf77a6SZhihong Wang 		}
2046af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
2047af75078fSIntel 		if (port_fwd_end != NULL) {
2048af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2049af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
2050af75078fSIntel 		}
2051af75078fSIntel 	}
2052af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
2053af75078fSIntel }
2054af75078fSIntel 
2055af75078fSIntel void
2056af75078fSIntel stop_packet_forwarding(void)
2057af75078fSIntel {
2058af75078fSIntel 	port_fwd_end_t port_fwd_end;
2059af75078fSIntel 	lcoreid_t lc_id;
206053324971SDavid Marchand 	portid_t pt_id;
206153324971SDavid Marchand 	int i;
2062af75078fSIntel 
2063af75078fSIntel 	if (test_done) {
2064af75078fSIntel 		printf("Packet forwarding not started\n");
2065af75078fSIntel 		return;
2066af75078fSIntel 	}
2067af75078fSIntel 	printf("Telling cores to stop...");
2068af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2069af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
2070af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
2071af75078fSIntel 	rte_eal_mp_wait_lcore();
2072af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2073af75078fSIntel 	if (port_fwd_end != NULL) {
2074af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2075af75078fSIntel 			pt_id = fwd_ports_ids[i];
2076af75078fSIntel 			(*port_fwd_end)(pt_id);
2077af75078fSIntel 		}
2078af75078fSIntel 	}
2079c185d42cSDavid Marchand 
208053324971SDavid Marchand 	fwd_stats_display();
208158d475b7SJerin Jacob 
2082af75078fSIntel 	printf("\nDone.\n");
2083af75078fSIntel 	test_done = 1;
2084af75078fSIntel }
2085af75078fSIntel 
2086cfae07fdSOuyang Changchun void
2087cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
2088cfae07fdSOuyang Changchun {
2089492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
2090cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
2091cfae07fdSOuyang Changchun }
2092cfae07fdSOuyang Changchun 
2093cfae07fdSOuyang Changchun void
2094cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2095cfae07fdSOuyang Changchun {
2096492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
2097cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
2098cfae07fdSOuyang Changchun }
2099cfae07fdSOuyang Changchun 
2100ce8d5614SIntel static int
2101ce8d5614SIntel all_ports_started(void)
2102ce8d5614SIntel {
2103ce8d5614SIntel 	portid_t pi;
2104ce8d5614SIntel 	struct rte_port *port;
2105ce8d5614SIntel 
21067d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2107ce8d5614SIntel 		port = &ports[pi];
2108ce8d5614SIntel 		/* Check if there is a port which is not started */
210941b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
211041b05095SBernard Iremonger 			(port->slave_flag == 0))
2111ce8d5614SIntel 			return 0;
2112ce8d5614SIntel 	}
2113ce8d5614SIntel 
2114ce8d5614SIntel 	/* No port is not started */
2115ce8d5614SIntel 	return 1;
2116ce8d5614SIntel }
2117ce8d5614SIntel 
2118148f963fSBruce Richardson int
21196018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
21206018eb8cSShahaf Shuler {
21216018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
21226018eb8cSShahaf Shuler 
21236018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
21246018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
21256018eb8cSShahaf Shuler 		return 0;
21266018eb8cSShahaf Shuler 	return 1;
21276018eb8cSShahaf Shuler }
21286018eb8cSShahaf Shuler 
21296018eb8cSShahaf Shuler int
2130edab33b1STetsuya Mukawa all_ports_stopped(void)
2131edab33b1STetsuya Mukawa {
2132edab33b1STetsuya Mukawa 	portid_t pi;
2133edab33b1STetsuya Mukawa 
21347d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
21356018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2136edab33b1STetsuya Mukawa 			return 0;
2137edab33b1STetsuya Mukawa 	}
2138edab33b1STetsuya Mukawa 
2139edab33b1STetsuya Mukawa 	return 1;
2140edab33b1STetsuya Mukawa }
2141edab33b1STetsuya Mukawa 
2142edab33b1STetsuya Mukawa int
2143edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2144edab33b1STetsuya Mukawa {
2145edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2146edab33b1STetsuya Mukawa 		return 0;
2147edab33b1STetsuya Mukawa 
2148edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2149edab33b1STetsuya Mukawa 		return 0;
2150edab33b1STetsuya Mukawa 
2151edab33b1STetsuya Mukawa 	return 1;
2152edab33b1STetsuya Mukawa }
2153edab33b1STetsuya Mukawa 
21541c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */
21551c69df45SOri Kam static int
21561c69df45SOri Kam setup_hairpin_queues(portid_t pi)
21571c69df45SOri Kam {
21581c69df45SOri Kam 	queueid_t qi;
21591c69df45SOri Kam 	struct rte_eth_hairpin_conf hairpin_conf = {
21601c69df45SOri Kam 		.peer_count = 1,
21611c69df45SOri Kam 	};
21621c69df45SOri Kam 	int i;
21631c69df45SOri Kam 	int diag;
21641c69df45SOri Kam 	struct rte_port *port = &ports[pi];
21651c69df45SOri Kam 
21661c69df45SOri Kam 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
21671c69df45SOri Kam 		hairpin_conf.peers[0].port = pi;
21681c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_rxq;
21691c69df45SOri Kam 		diag = rte_eth_tx_hairpin_queue_setup
21701c69df45SOri Kam 			(pi, qi, nb_txd, &hairpin_conf);
21711c69df45SOri Kam 		i++;
21721c69df45SOri Kam 		if (diag == 0)
21731c69df45SOri Kam 			continue;
21741c69df45SOri Kam 
21751c69df45SOri Kam 		/* Fail to setup rx queue, return */
21761c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
21771c69df45SOri Kam 					RTE_PORT_HANDLING,
21781c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
21791c69df45SOri Kam 			printf("Port %d can not be set back "
21801c69df45SOri Kam 					"to stopped\n", pi);
21811c69df45SOri Kam 		printf("Fail to configure port %d hairpin "
21821c69df45SOri Kam 				"queues\n", pi);
21831c69df45SOri Kam 		/* try to reconfigure queues next time */
21841c69df45SOri Kam 		port->need_reconfig_queues = 1;
21851c69df45SOri Kam 		return -1;
21861c69df45SOri Kam 	}
21871c69df45SOri Kam 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
21881c69df45SOri Kam 		hairpin_conf.peers[0].port = pi;
21891c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_txq;
21901c69df45SOri Kam 		diag = rte_eth_rx_hairpin_queue_setup
21911c69df45SOri Kam 			(pi, qi, nb_rxd, &hairpin_conf);
21921c69df45SOri Kam 		i++;
21931c69df45SOri Kam 		if (diag == 0)
21941c69df45SOri Kam 			continue;
21951c69df45SOri Kam 
21961c69df45SOri Kam 		/* Fail to setup rx queue, return */
21971c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
21981c69df45SOri Kam 					RTE_PORT_HANDLING,
21991c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
22001c69df45SOri Kam 			printf("Port %d can not be set back "
22011c69df45SOri Kam 					"to stopped\n", pi);
22021c69df45SOri Kam 		printf("Fail to configure port %d hairpin "
22031c69df45SOri Kam 				"queues\n", pi);
22041c69df45SOri Kam 		/* try to reconfigure queues next time */
22051c69df45SOri Kam 		port->need_reconfig_queues = 1;
22061c69df45SOri Kam 		return -1;
22071c69df45SOri Kam 	}
22081c69df45SOri Kam 	return 0;
22091c69df45SOri Kam }
22101c69df45SOri Kam 
2211edab33b1STetsuya Mukawa int
2212ce8d5614SIntel start_port(portid_t pid)
2213ce8d5614SIntel {
221492d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
2215ce8d5614SIntel 	portid_t pi;
2216ce8d5614SIntel 	queueid_t qi;
2217ce8d5614SIntel 	struct rte_port *port;
22186d13ea8eSOlivier Matz 	struct rte_ether_addr mac_addr;
22191c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
2220ce8d5614SIntel 
22214468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
22224468635fSMichael Qiu 		return 0;
22234468635fSMichael Qiu 
2224ce8d5614SIntel 	if(dcb_config)
2225ce8d5614SIntel 		dcb_test = 1;
22267d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2227edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2228ce8d5614SIntel 			continue;
2229ce8d5614SIntel 
223092d2703eSMichael Qiu 		need_check_link_status = 0;
2231ce8d5614SIntel 		port = &ports[pi];
2232ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2233ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
2234ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2235ce8d5614SIntel 			continue;
2236ce8d5614SIntel 		}
2237ce8d5614SIntel 
2238ce8d5614SIntel 		if (port->need_reconfig > 0) {
2239ce8d5614SIntel 			port->need_reconfig = 0;
2240ce8d5614SIntel 
22417ee3e944SVasily Philipov 			if (flow_isolate_all) {
22427ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
22437ee3e944SVasily Philipov 				if (ret) {
22447ee3e944SVasily Philipov 					printf("Failed to apply isolated"
22457ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
22467ee3e944SVasily Philipov 					return -1;
22477ee3e944SVasily Philipov 				}
22487ee3e944SVasily Philipov 			}
2249b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
22505706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
225120a0286fSLiu Xiaofeng 					port->socket_id);
22521c69df45SOri Kam 			if (nb_hairpinq > 0 &&
22531c69df45SOri Kam 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
22541c69df45SOri Kam 				printf("Port %d doesn't support hairpin "
22551c69df45SOri Kam 				       "queues\n", pi);
22561c69df45SOri Kam 				return -1;
22571c69df45SOri Kam 			}
2258ce8d5614SIntel 			/* configure port */
22591c69df45SOri Kam 			diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
22601c69df45SOri Kam 						     nb_txq + nb_hairpinq,
2261ce8d5614SIntel 						     &(port->dev_conf));
2262ce8d5614SIntel 			if (diag != 0) {
2263ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2264ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2265ce8d5614SIntel 					printf("Port %d can not be set back "
2266ce8d5614SIntel 							"to stopped\n", pi);
2267ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
2268ce8d5614SIntel 				/* try to reconfigure port next time */
2269ce8d5614SIntel 				port->need_reconfig = 1;
2270148f963fSBruce Richardson 				return -1;
2271ce8d5614SIntel 			}
2272ce8d5614SIntel 		}
2273ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
2274ce8d5614SIntel 			port->need_reconfig_queues = 0;
2275ce8d5614SIntel 			/* setup tx queues */
2276ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
2277b6ea6408SIntel 				if ((numa_support) &&
2278b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2279b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2280d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2281d44f8a48SQi Zhang 						txring_numa[pi],
2282d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2283b6ea6408SIntel 				else
2284b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2285d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2286d44f8a48SQi Zhang 						port->socket_id,
2287d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2288b6ea6408SIntel 
2289ce8d5614SIntel 				if (diag == 0)
2290ce8d5614SIntel 					continue;
2291ce8d5614SIntel 
2292ce8d5614SIntel 				/* Fail to setup tx queue, return */
2293ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2294ce8d5614SIntel 							RTE_PORT_HANDLING,
2295ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2296ce8d5614SIntel 					printf("Port %d can not be set back "
2297ce8d5614SIntel 							"to stopped\n", pi);
2298d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
2299d44f8a48SQi Zhang 				       pi);
2300ce8d5614SIntel 				/* try to reconfigure queues next time */
2301ce8d5614SIntel 				port->need_reconfig_queues = 1;
2302148f963fSBruce Richardson 				return -1;
2303ce8d5614SIntel 			}
2304ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2305d44f8a48SQi Zhang 				/* setup rx queues */
2306b6ea6408SIntel 				if ((numa_support) &&
2307b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2308b6ea6408SIntel 					struct rte_mempool * mp =
2309b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
2310b6ea6408SIntel 					if (mp == NULL) {
2311b6ea6408SIntel 						printf("Failed to setup RX queue:"
2312b6ea6408SIntel 							"No mempool allocation"
2313b6ea6408SIntel 							" on the socket %d\n",
2314b6ea6408SIntel 							rxring_numa[pi]);
2315148f963fSBruce Richardson 						return -1;
2316b6ea6408SIntel 					}
2317b6ea6408SIntel 
2318b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2319d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2320d44f8a48SQi Zhang 					     rxring_numa[pi],
2321d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2322d44f8a48SQi Zhang 					     mp);
23231e1d6bddSBernard Iremonger 				} else {
23241e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
23251e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
23261e1d6bddSBernard Iremonger 					if (mp == NULL) {
23271e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
23281e1d6bddSBernard Iremonger 							"No mempool allocation"
23291e1d6bddSBernard Iremonger 							" on the socket %d\n",
23301e1d6bddSBernard Iremonger 							port->socket_id);
23311e1d6bddSBernard Iremonger 						return -1;
2332b6ea6408SIntel 					}
2333b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2334d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2335d44f8a48SQi Zhang 					     port->socket_id,
2336d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2337d44f8a48SQi Zhang 					     mp);
23381e1d6bddSBernard Iremonger 				}
2339ce8d5614SIntel 				if (diag == 0)
2340ce8d5614SIntel 					continue;
2341ce8d5614SIntel 
2342ce8d5614SIntel 				/* Fail to setup rx queue, return */
2343ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2344ce8d5614SIntel 							RTE_PORT_HANDLING,
2345ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2346ce8d5614SIntel 					printf("Port %d can not be set back "
2347ce8d5614SIntel 							"to stopped\n", pi);
2348d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2349d44f8a48SQi Zhang 				       pi);
2350ce8d5614SIntel 				/* try to reconfigure queues next time */
2351ce8d5614SIntel 				port->need_reconfig_queues = 1;
2352148f963fSBruce Richardson 				return -1;
2353ce8d5614SIntel 			}
23541c69df45SOri Kam 			/* setup hairpin queues */
23551c69df45SOri Kam 			if (setup_hairpin_queues(pi) != 0)
23561c69df45SOri Kam 				return -1;
2357ce8d5614SIntel 		}
2358b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
2359b0a9354aSPavan Nikhilesh 		if (clear_ptypes) {
2360b0a9354aSPavan Nikhilesh 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2361b0a9354aSPavan Nikhilesh 					NULL, 0);
2362b0a9354aSPavan Nikhilesh 			if (diag < 0)
2363b0a9354aSPavan Nikhilesh 				printf(
2364b0a9354aSPavan Nikhilesh 				"Port %d: Failed to disable Ptype parsing\n",
2365b0a9354aSPavan Nikhilesh 				pi);
2366b0a9354aSPavan Nikhilesh 		}
2367b0a9354aSPavan Nikhilesh 
2368ce8d5614SIntel 		/* start port */
2369ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2370ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2371ce8d5614SIntel 
2372ce8d5614SIntel 			/* Fail to setup rx queue, return */
2373ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2374ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2375ce8d5614SIntel 				printf("Port %d can not be set back to "
2376ce8d5614SIntel 							"stopped\n", pi);
2377ce8d5614SIntel 			continue;
2378ce8d5614SIntel 		}
2379ce8d5614SIntel 
2380ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2381ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2382ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2383ce8d5614SIntel 
2384a5279d25SIgor Romanov 		if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2385d8c89163SZijie Pan 			printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
23862950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
23872950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
23882950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2389d8c89163SZijie Pan 
2390ce8d5614SIntel 		/* at least one port started, need checking link status */
2391ce8d5614SIntel 		need_check_link_status = 1;
2392ce8d5614SIntel 	}
2393ce8d5614SIntel 
239492d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2395edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
239692d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2397ce8d5614SIntel 		printf("Please stop the ports first\n");
2398ce8d5614SIntel 
2399ce8d5614SIntel 	printf("Done\n");
2400148f963fSBruce Richardson 	return 0;
2401ce8d5614SIntel }
2402ce8d5614SIntel 
2403ce8d5614SIntel void
2404ce8d5614SIntel stop_port(portid_t pid)
2405ce8d5614SIntel {
2406ce8d5614SIntel 	portid_t pi;
2407ce8d5614SIntel 	struct rte_port *port;
2408ce8d5614SIntel 	int need_check_link_status = 0;
2409ce8d5614SIntel 
2410ce8d5614SIntel 	if (dcb_test) {
2411ce8d5614SIntel 		dcb_test = 0;
2412ce8d5614SIntel 		dcb_config = 0;
2413ce8d5614SIntel 	}
24144468635fSMichael Qiu 
24154468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
24164468635fSMichael Qiu 		return;
24174468635fSMichael Qiu 
2418ce8d5614SIntel 	printf("Stopping ports...\n");
2419ce8d5614SIntel 
24207d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
24214468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2422ce8d5614SIntel 			continue;
2423ce8d5614SIntel 
2424a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2425a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2426a8ef3e3aSBernard Iremonger 			continue;
2427a8ef3e3aSBernard Iremonger 		}
2428a8ef3e3aSBernard Iremonger 
24290e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
24300e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
24310e545d30SBernard Iremonger 			continue;
24320e545d30SBernard Iremonger 		}
24330e545d30SBernard Iremonger 
2434ce8d5614SIntel 		port = &ports[pi];
2435ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2436ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2437ce8d5614SIntel 			continue;
2438ce8d5614SIntel 
2439ce8d5614SIntel 		rte_eth_dev_stop(pi);
2440ce8d5614SIntel 
2441ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2442ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2443ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2444ce8d5614SIntel 		need_check_link_status = 1;
2445ce8d5614SIntel 	}
2446bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2447edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2448ce8d5614SIntel 
2449ce8d5614SIntel 	printf("Done\n");
2450ce8d5614SIntel }
2451ce8d5614SIntel 
2452ce6959bfSWisam Jaddo static void
24534f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
2454ce6959bfSWisam Jaddo {
24554f1de450SThomas Monjalon 	portid_t i;
24564f1de450SThomas Monjalon 	portid_t new_total = 0;
2457ce6959bfSWisam Jaddo 
24584f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
24594f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
24604f1de450SThomas Monjalon 			array[new_total] = array[i];
24614f1de450SThomas Monjalon 			new_total++;
2462ce6959bfSWisam Jaddo 		}
24634f1de450SThomas Monjalon 	*total = new_total;
24644f1de450SThomas Monjalon }
24654f1de450SThomas Monjalon 
24664f1de450SThomas Monjalon static void
24674f1de450SThomas Monjalon remove_invalid_ports(void)
24684f1de450SThomas Monjalon {
24694f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
24704f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
24714f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
2472ce6959bfSWisam Jaddo }
2473ce6959bfSWisam Jaddo 
2474ce8d5614SIntel void
2475ce8d5614SIntel close_port(portid_t pid)
2476ce8d5614SIntel {
2477ce8d5614SIntel 	portid_t pi;
2478ce8d5614SIntel 	struct rte_port *port;
2479ce8d5614SIntel 
24804468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
24814468635fSMichael Qiu 		return;
24824468635fSMichael Qiu 
2483ce8d5614SIntel 	printf("Closing ports...\n");
2484ce8d5614SIntel 
24857d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
24864468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2487ce8d5614SIntel 			continue;
2488ce8d5614SIntel 
2489a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2490a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2491a8ef3e3aSBernard Iremonger 			continue;
2492a8ef3e3aSBernard Iremonger 		}
2493a8ef3e3aSBernard Iremonger 
24940e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
24950e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
24960e545d30SBernard Iremonger 			continue;
24970e545d30SBernard Iremonger 		}
24980e545d30SBernard Iremonger 
2499ce8d5614SIntel 		port = &ports[pi];
2500ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2501d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2502d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2503d4e8ad64SMichael Qiu 			continue;
2504d4e8ad64SMichael Qiu 		}
2505d4e8ad64SMichael Qiu 
2506d4e8ad64SMichael Qiu 		if (rte_atomic16_cmpset(&(port->port_status),
2507ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2508ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2509ce8d5614SIntel 			continue;
2510ce8d5614SIntel 		}
2511ce8d5614SIntel 
2512938a184aSAdrien Mazarguil 		if (port->flow_list)
2513938a184aSAdrien Mazarguil 			port_flow_flush(pi);
2514ce8d5614SIntel 		rte_eth_dev_close(pi);
2515ce8d5614SIntel 
25164f1de450SThomas Monjalon 		remove_invalid_ports();
251723ea57a2SThomas Monjalon 
2518ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2519ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2520b38bb262SPablo de Lara 			printf("Port %d cannot be set to closed\n", pi);
2521ce8d5614SIntel 	}
2522ce8d5614SIntel 
2523ce8d5614SIntel 	printf("Done\n");
2524ce8d5614SIntel }
2525ce8d5614SIntel 
2526edab33b1STetsuya Mukawa void
252797f1e196SWei Dai reset_port(portid_t pid)
252897f1e196SWei Dai {
252997f1e196SWei Dai 	int diag;
253097f1e196SWei Dai 	portid_t pi;
253197f1e196SWei Dai 	struct rte_port *port;
253297f1e196SWei Dai 
253397f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
253497f1e196SWei Dai 		return;
253597f1e196SWei Dai 
25361cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
25371cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
25381cde1b9aSShougang Wang 		printf("Can not reset port(s), please stop port(s) first.\n");
25391cde1b9aSShougang Wang 		return;
25401cde1b9aSShougang Wang 	}
25411cde1b9aSShougang Wang 
254297f1e196SWei Dai 	printf("Resetting ports...\n");
254397f1e196SWei Dai 
254497f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
254597f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
254697f1e196SWei Dai 			continue;
254797f1e196SWei Dai 
254897f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
254997f1e196SWei Dai 			printf("Please remove port %d from forwarding "
255097f1e196SWei Dai 			       "configuration.\n", pi);
255197f1e196SWei Dai 			continue;
255297f1e196SWei Dai 		}
255397f1e196SWei Dai 
255497f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
255597f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
255697f1e196SWei Dai 			       pi);
255797f1e196SWei Dai 			continue;
255897f1e196SWei Dai 		}
255997f1e196SWei Dai 
256097f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
256197f1e196SWei Dai 		if (diag == 0) {
256297f1e196SWei Dai 			port = &ports[pi];
256397f1e196SWei Dai 			port->need_reconfig = 1;
256497f1e196SWei Dai 			port->need_reconfig_queues = 1;
256597f1e196SWei Dai 		} else {
256697f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
256797f1e196SWei Dai 		}
256897f1e196SWei Dai 	}
256997f1e196SWei Dai 
257097f1e196SWei Dai 	printf("Done\n");
257197f1e196SWei Dai }
257297f1e196SWei Dai 
257397f1e196SWei Dai void
2574edab33b1STetsuya Mukawa attach_port(char *identifier)
2575ce8d5614SIntel {
25764f1ed78eSThomas Monjalon 	portid_t pi;
2577c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
2578ce8d5614SIntel 
2579edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2580edab33b1STetsuya Mukawa 
2581edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2582edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2583edab33b1STetsuya Mukawa 		return;
2584ce8d5614SIntel 	}
2585ce8d5614SIntel 
258675b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
2587c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2588edab33b1STetsuya Mukawa 		return;
2589c9cce428SThomas Monjalon 	}
2590c9cce428SThomas Monjalon 
25914f1ed78eSThomas Monjalon 	/* first attach mode: event */
25924f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
25934f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
25944f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
25954f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
25964f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
25974f1ed78eSThomas Monjalon 				setup_attached_port(pi);
25984f1ed78eSThomas Monjalon 		return;
25994f1ed78eSThomas Monjalon 	}
26004f1ed78eSThomas Monjalon 
26014f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
260286fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
26034f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
260486fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
260586fa5de1SThomas Monjalon 			continue; /* port was already attached before */
2606c9cce428SThomas Monjalon 		setup_attached_port(pi);
2607c9cce428SThomas Monjalon 	}
260886fa5de1SThomas Monjalon }
2609c9cce428SThomas Monjalon 
2610c9cce428SThomas Monjalon static void
2611c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
2612c9cce428SThomas Monjalon {
2613c9cce428SThomas Monjalon 	unsigned int socket_id;
261434fc1051SIvan Ilchenko 	int ret;
2615edab33b1STetsuya Mukawa 
2616931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
261729841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
2618931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
261929841336SPhil Yang 		socket_id = socket_ids[0];
2620931126baSBernard Iremonger 	reconfig(pi, socket_id);
262134fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
262234fc1051SIvan Ilchenko 	if (ret != 0)
262334fc1051SIvan Ilchenko 		printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
262434fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
2625edab33b1STetsuya Mukawa 
26264f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
26274f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
26284f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
26294f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
2630edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2631edab33b1STetsuya Mukawa 
2632edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2633edab33b1STetsuya Mukawa 	printf("Done\n");
2634edab33b1STetsuya Mukawa }
2635edab33b1STetsuya Mukawa 
2636edab33b1STetsuya Mukawa void
2637f8e5baa2SThomas Monjalon detach_port_device(portid_t port_id)
26385f4ec54fSChen Jing D(Mark) {
2639f8e5baa2SThomas Monjalon 	struct rte_device *dev;
2640f8e5baa2SThomas Monjalon 	portid_t sibling;
2641f8e5baa2SThomas Monjalon 
2642c9cce428SThomas Monjalon 	printf("Removing a device...\n");
26435f4ec54fSChen Jing D(Mark) 
264443d0e304SMatan Azrad 	if (port_id_is_invalid(port_id, ENABLED_WARN))
264543d0e304SMatan Azrad 		return;
264643d0e304SMatan Azrad 
2647f8e5baa2SThomas Monjalon 	dev = rte_eth_devices[port_id].device;
2648f8e5baa2SThomas Monjalon 	if (dev == NULL) {
2649f8e5baa2SThomas Monjalon 		printf("Device already removed\n");
2650f8e5baa2SThomas Monjalon 		return;
2651f8e5baa2SThomas Monjalon 	}
2652f8e5baa2SThomas Monjalon 
265323ea57a2SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
26543f4a8370SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
26553f4a8370SThomas Monjalon 			printf("Port not stopped\n");
2656edab33b1STetsuya Mukawa 			return;
2657edab33b1STetsuya Mukawa 		}
26583f4a8370SThomas Monjalon 		printf("Port was not closed\n");
2659938a184aSAdrien Mazarguil 		if (ports[port_id].flow_list)
2660938a184aSAdrien Mazarguil 			port_flow_flush(port_id);
26613f4a8370SThomas Monjalon 	}
2662938a184aSAdrien Mazarguil 
266375b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
2664f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2665edab33b1STetsuya Mukawa 		return;
26663070419eSGaetan Rivet 	}
26677ca262b8SViacheslav Ovsiienko 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2668f8e5baa2SThomas Monjalon 		/* reset mapping between old ports and removed device */
2669f8e5baa2SThomas Monjalon 		rte_eth_devices[sibling].device = NULL;
2670f8e5baa2SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2671f8e5baa2SThomas Monjalon 			/* sibling ports are forced to be closed */
2672f8e5baa2SThomas Monjalon 			ports[sibling].port_status = RTE_PORT_CLOSED;
2673f8e5baa2SThomas Monjalon 			printf("Port %u is closed\n", sibling);
2674f8e5baa2SThomas Monjalon 		}
2675f8e5baa2SThomas Monjalon 	}
2676f8e5baa2SThomas Monjalon 
26774f1de450SThomas Monjalon 	remove_invalid_ports();
267803ce2c53SMatan Azrad 
2679f8e5baa2SThomas Monjalon 	printf("Device of port %u is detached\n", port_id);
2680f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
2681edab33b1STetsuya Mukawa 	printf("Done\n");
2682edab33b1STetsuya Mukawa 	return;
26835f4ec54fSChen Jing D(Mark) }
26845f4ec54fSChen Jing D(Mark) 
2685af75078fSIntel void
268655e51c96SNithin Dabilpuram detach_device(char *identifier)
268755e51c96SNithin Dabilpuram {
268855e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
268955e51c96SNithin Dabilpuram 	struct rte_devargs da;
269055e51c96SNithin Dabilpuram 	portid_t port_id;
269155e51c96SNithin Dabilpuram 
269255e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
269355e51c96SNithin Dabilpuram 
269455e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
269555e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
269655e51c96SNithin Dabilpuram 		printf("cannot parse identifier\n");
269755e51c96SNithin Dabilpuram 		if (da.args)
269855e51c96SNithin Dabilpuram 			free(da.args);
269955e51c96SNithin Dabilpuram 		return;
270055e51c96SNithin Dabilpuram 	}
270155e51c96SNithin Dabilpuram 
270255e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
270355e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
270455e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
270555e51c96SNithin Dabilpuram 				printf("Port %u not stopped\n", port_id);
2706149677c9SStephen Hemminger 				rte_eth_iterator_cleanup(&iterator);
270755e51c96SNithin Dabilpuram 				return;
270855e51c96SNithin Dabilpuram 			}
270955e51c96SNithin Dabilpuram 
271055e51c96SNithin Dabilpuram 			/* sibling ports are forced to be closed */
271155e51c96SNithin Dabilpuram 			if (ports[port_id].flow_list)
271255e51c96SNithin Dabilpuram 				port_flow_flush(port_id);
271355e51c96SNithin Dabilpuram 			ports[port_id].port_status = RTE_PORT_CLOSED;
271455e51c96SNithin Dabilpuram 			printf("Port %u is now closed\n", port_id);
271555e51c96SNithin Dabilpuram 		}
271655e51c96SNithin Dabilpuram 	}
271755e51c96SNithin Dabilpuram 
271855e51c96SNithin Dabilpuram 	if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
271955e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
272055e51c96SNithin Dabilpuram 			    da.name, da.bus->name);
272155e51c96SNithin Dabilpuram 		return;
272255e51c96SNithin Dabilpuram 	}
272355e51c96SNithin Dabilpuram 
272455e51c96SNithin Dabilpuram 	remove_invalid_ports();
272555e51c96SNithin Dabilpuram 
272655e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
272755e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
272855e51c96SNithin Dabilpuram 	printf("Done\n");
272955e51c96SNithin Dabilpuram }
273055e51c96SNithin Dabilpuram 
273155e51c96SNithin Dabilpuram void
2732af75078fSIntel pmd_test_exit(void)
2733af75078fSIntel {
2734af75078fSIntel 	portid_t pt_id;
2735fb73e096SJeff Guo 	int ret;
2736401b744dSShahaf Shuler 	int i;
2737af75078fSIntel 
27388210ec25SPablo de Lara 	if (test_done == 0)
27398210ec25SPablo de Lara 		stop_packet_forwarding();
27408210ec25SPablo de Lara 
27413a0968c8SShahaf Shuler 	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
27423a0968c8SShahaf Shuler 		if (mempools[i]) {
27433a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
27443a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
27453a0968c8SShahaf Shuler 						     NULL);
27463a0968c8SShahaf Shuler 		}
27473a0968c8SShahaf Shuler 	}
2748d3a274ceSZhihong Wang 	if (ports != NULL) {
2749d3a274ceSZhihong Wang 		no_link_check = 1;
27507d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
275108fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
2752af75078fSIntel 			fflush(stdout);
2753d3a274ceSZhihong Wang 			stop_port(pt_id);
275408fd782bSCristian Dumitrescu 		}
275508fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
275608fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
275708fd782bSCristian Dumitrescu 			fflush(stdout);
2758d3a274ceSZhihong Wang 			close_port(pt_id);
2759af75078fSIntel 		}
2760d3a274ceSZhihong Wang 	}
2761fb73e096SJeff Guo 
2762fb73e096SJeff Guo 	if (hot_plug) {
2763fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
27642049c511SJeff Guo 		if (ret) {
2765fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2766fb73e096SJeff Guo 				"fail to stop device event monitor.");
27672049c511SJeff Guo 			return;
27682049c511SJeff Guo 		}
2769fb73e096SJeff Guo 
27702049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
2771cc1bf307SJeff Guo 			dev_event_callback, NULL);
27722049c511SJeff Guo 		if (ret < 0) {
2773fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
27742049c511SJeff Guo 				"fail to unregister device event callback.\n");
27752049c511SJeff Guo 			return;
27762049c511SJeff Guo 		}
27772049c511SJeff Guo 
27782049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
27792049c511SJeff Guo 		if (ret) {
27802049c511SJeff Guo 			RTE_LOG(ERR, EAL,
27812049c511SJeff Guo 				"fail to disable hotplug handling.\n");
27822049c511SJeff Guo 			return;
27832049c511SJeff Guo 		}
2784fb73e096SJeff Guo 	}
2785401b744dSShahaf Shuler 	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2786401b744dSShahaf Shuler 		if (mempools[i])
2787401b744dSShahaf Shuler 			rte_mempool_free(mempools[i]);
2788401b744dSShahaf Shuler 	}
2789fb73e096SJeff Guo 
2790d3a274ceSZhihong Wang 	printf("\nBye...\n");
2791af75078fSIntel }
2792af75078fSIntel 
2793af75078fSIntel typedef void (*cmd_func_t)(void);
2794af75078fSIntel struct pmd_test_command {
2795af75078fSIntel 	const char *cmd_name;
2796af75078fSIntel 	cmd_func_t cmd_func;
2797af75078fSIntel };
2798af75078fSIntel 
2799af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2800af75078fSIntel 
2801ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
2802af75078fSIntel static void
2803edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
2804af75078fSIntel {
2805ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
2806ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2807f8244c63SZhiyong Yang 	portid_t portid;
2808f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
2809ce8d5614SIntel 	struct rte_eth_link link;
2810e661a08bSIgor Romanov 	int ret;
2811ce8d5614SIntel 
2812ce8d5614SIntel 	printf("Checking link statuses...\n");
2813ce8d5614SIntel 	fflush(stdout);
2814ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2815ce8d5614SIntel 		all_ports_up = 1;
28167d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
2817ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
2818ce8d5614SIntel 				continue;
2819ce8d5614SIntel 			memset(&link, 0, sizeof(link));
2820e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
2821e661a08bSIgor Romanov 			if (ret < 0) {
2822e661a08bSIgor Romanov 				all_ports_up = 0;
2823e661a08bSIgor Romanov 				if (print_flag == 1)
2824e661a08bSIgor Romanov 					printf("Port %u link get failed: %s\n",
2825e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
2826e661a08bSIgor Romanov 				continue;
2827e661a08bSIgor Romanov 			}
2828ce8d5614SIntel 			/* print link status if flag set */
2829ce8d5614SIntel 			if (print_flag == 1) {
2830ce8d5614SIntel 				if (link.link_status)
2831f8244c63SZhiyong Yang 					printf(
2832f8244c63SZhiyong Yang 					"Port%d Link Up. speed %u Mbps- %s\n",
2833f8244c63SZhiyong Yang 					portid, link.link_speed,
2834ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2835ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
2836ce8d5614SIntel 				else
2837f8244c63SZhiyong Yang 					printf("Port %d Link Down\n", portid);
2838ce8d5614SIntel 				continue;
2839ce8d5614SIntel 			}
2840ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
284109419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
2842ce8d5614SIntel 				all_ports_up = 0;
2843ce8d5614SIntel 				break;
2844ce8d5614SIntel 			}
2845ce8d5614SIntel 		}
2846ce8d5614SIntel 		/* after finally printing all link status, get out */
2847ce8d5614SIntel 		if (print_flag == 1)
2848ce8d5614SIntel 			break;
2849ce8d5614SIntel 
2850ce8d5614SIntel 		if (all_ports_up == 0) {
2851ce8d5614SIntel 			fflush(stdout);
2852ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
2853ce8d5614SIntel 		}
2854ce8d5614SIntel 
2855ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
2856ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2857ce8d5614SIntel 			print_flag = 1;
2858ce8d5614SIntel 		}
28598ea656f8SGaetan Rivet 
28608ea656f8SGaetan Rivet 		if (lsc_interrupt)
28618ea656f8SGaetan Rivet 			break;
2862ce8d5614SIntel 	}
2863af75078fSIntel }
2864af75078fSIntel 
2865cc1bf307SJeff Guo /*
2866cc1bf307SJeff Guo  * This callback is for remove a port for a device. It has limitation because
2867cc1bf307SJeff Guo  * it is not for multiple port removal for a device.
2868cc1bf307SJeff Guo  * TODO: the device detach invoke will plan to be removed from user side to
2869cc1bf307SJeff Guo  * eal. And convert all PMDs to free port resources on ether device closing.
2870cc1bf307SJeff Guo  */
2871284c908cSGaetan Rivet static void
2872cc1bf307SJeff Guo rmv_port_callback(void *arg)
2873284c908cSGaetan Rivet {
28743b97888aSMatan Azrad 	int need_to_start = 0;
28750da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
287628caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
2877284c908cSGaetan Rivet 
2878284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2879284c908cSGaetan Rivet 
28803b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
28813b97888aSMatan Azrad 		need_to_start = 1;
28823b97888aSMatan Azrad 		stop_packet_forwarding();
28833b97888aSMatan Azrad 	}
28840da2a62bSMatan Azrad 	no_link_check = 1;
2885284c908cSGaetan Rivet 	stop_port(port_id);
28860da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
2887284c908cSGaetan Rivet 	close_port(port_id);
2888f8e5baa2SThomas Monjalon 	detach_port_device(port_id);
28893b97888aSMatan Azrad 	if (need_to_start)
28903b97888aSMatan Azrad 		start_packet_forwarding(0);
2891284c908cSGaetan Rivet }
2892284c908cSGaetan Rivet 
289376ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
2894d6af1a13SBernard Iremonger static int
2895f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2896d6af1a13SBernard Iremonger 		  void *ret_param)
289776ad4a2dSGaetan Rivet {
289876ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
2899d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
290076ad4a2dSGaetan Rivet 
290176ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
2902f431e010SHerakliusz Lipiec 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
290376ad4a2dSGaetan Rivet 			port_id, __func__, type);
290476ad4a2dSGaetan Rivet 		fflush(stderr);
29053af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2906f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
290797b5d8b5SThomas Monjalon 			eth_event_desc[type]);
290876ad4a2dSGaetan Rivet 		fflush(stdout);
290976ad4a2dSGaetan Rivet 	}
2910284c908cSGaetan Rivet 
2911284c908cSGaetan Rivet 	switch (type) {
29124f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
29134f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
29144f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
29154f1ed78eSThomas Monjalon 		break;
2916284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
29174f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
29184f1ed78eSThomas Monjalon 			break;
2919284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
2920cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
2921284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
2922284c908cSGaetan Rivet 		break;
2923284c908cSGaetan Rivet 	default:
2924284c908cSGaetan Rivet 		break;
2925284c908cSGaetan Rivet 	}
2926d6af1a13SBernard Iremonger 	return 0;
292776ad4a2dSGaetan Rivet }
292876ad4a2dSGaetan Rivet 
292997b5d8b5SThomas Monjalon static int
293097b5d8b5SThomas Monjalon register_eth_event_callback(void)
293197b5d8b5SThomas Monjalon {
293297b5d8b5SThomas Monjalon 	int ret;
293397b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
293497b5d8b5SThomas Monjalon 
293597b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
293697b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
293797b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
293897b5d8b5SThomas Monjalon 				event,
293997b5d8b5SThomas Monjalon 				eth_event_callback,
294097b5d8b5SThomas Monjalon 				NULL);
294197b5d8b5SThomas Monjalon 		if (ret != 0) {
294297b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
294397b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
294497b5d8b5SThomas Monjalon 			return -1;
294597b5d8b5SThomas Monjalon 		}
294697b5d8b5SThomas Monjalon 	}
294797b5d8b5SThomas Monjalon 
294897b5d8b5SThomas Monjalon 	return 0;
294997b5d8b5SThomas Monjalon }
295097b5d8b5SThomas Monjalon 
2951fb73e096SJeff Guo /* This function is used by the interrupt thread */
2952fb73e096SJeff Guo static void
2953cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2954fb73e096SJeff Guo 			     __rte_unused void *arg)
2955fb73e096SJeff Guo {
29562049c511SJeff Guo 	uint16_t port_id;
29572049c511SJeff Guo 	int ret;
29582049c511SJeff Guo 
2959fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
2960fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
2961fb73e096SJeff Guo 			__func__, type);
2962fb73e096SJeff Guo 		fflush(stderr);
2963fb73e096SJeff Guo 	}
2964fb73e096SJeff Guo 
2965fb73e096SJeff Guo 	switch (type) {
2966fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
2967cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
2968fb73e096SJeff Guo 			device_name);
29692049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
29702049c511SJeff Guo 		if (ret) {
29712049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
29722049c511SJeff Guo 				device_name);
29732049c511SJeff Guo 			return;
29742049c511SJeff Guo 		}
2975cc1bf307SJeff Guo 		/*
2976cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
2977cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
2978cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
2979cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
2980cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
2981cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
2982cc1bf307SJeff Guo 		 * be deleted.
2983cc1bf307SJeff Guo 		 */
2984cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
2985cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
2986cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
2987cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
2988fb73e096SJeff Guo 		break;
2989fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
2990fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2991fb73e096SJeff Guo 			device_name);
2992fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
2993fb73e096SJeff Guo 		 * begin to attach port.
2994fb73e096SJeff Guo 		 */
2995fb73e096SJeff Guo 		break;
2996fb73e096SJeff Guo 	default:
2997fb73e096SJeff Guo 		break;
2998fb73e096SJeff Guo 	}
2999fb73e096SJeff Guo }
3000fb73e096SJeff Guo 
3001013af9b6SIntel static int
300228caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3003af75078fSIntel {
3004013af9b6SIntel 	uint16_t i;
3005af75078fSIntel 	int diag;
3006013af9b6SIntel 	uint8_t mapping_found = 0;
3007af75078fSIntel 
3008013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3009013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3010013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
3011013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
3012013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
3013013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
3014013af9b6SIntel 			if (diag != 0)
3015013af9b6SIntel 				return diag;
3016013af9b6SIntel 			mapping_found = 1;
3017af75078fSIntel 		}
3018013af9b6SIntel 	}
3019013af9b6SIntel 	if (mapping_found)
3020013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
3021013af9b6SIntel 	return 0;
3022013af9b6SIntel }
3023013af9b6SIntel 
3024013af9b6SIntel static int
302528caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3026013af9b6SIntel {
3027013af9b6SIntel 	uint16_t i;
3028013af9b6SIntel 	int diag;
3029013af9b6SIntel 	uint8_t mapping_found = 0;
3030013af9b6SIntel 
3031013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3032013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3033013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
3034013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
3035013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
3036013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
3037013af9b6SIntel 			if (diag != 0)
3038013af9b6SIntel 				return diag;
3039013af9b6SIntel 			mapping_found = 1;
3040013af9b6SIntel 		}
3041013af9b6SIntel 	}
3042013af9b6SIntel 	if (mapping_found)
3043013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
3044013af9b6SIntel 	return 0;
3045013af9b6SIntel }
3046013af9b6SIntel 
3047013af9b6SIntel static void
304828caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
3049013af9b6SIntel {
3050013af9b6SIntel 	int diag = 0;
3051013af9b6SIntel 
3052013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
3053af75078fSIntel 	if (diag != 0) {
3054013af9b6SIntel 		if (diag == -ENOTSUP) {
3055013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
3056013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
3057013af9b6SIntel 		}
3058013af9b6SIntel 		else
3059013af9b6SIntel 			rte_exit(EXIT_FAILURE,
3060013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
3061013af9b6SIntel 					"failed for port id=%d diag=%d\n",
3062af75078fSIntel 					pi, diag);
3063af75078fSIntel 	}
3064013af9b6SIntel 
3065013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
3066af75078fSIntel 	if (diag != 0) {
3067013af9b6SIntel 		if (diag == -ENOTSUP) {
3068013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
3069013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
3070013af9b6SIntel 		}
3071013af9b6SIntel 		else
3072013af9b6SIntel 			rte_exit(EXIT_FAILURE,
3073013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
3074013af9b6SIntel 					"failed for port id=%d diag=%d\n",
3075af75078fSIntel 					pi, diag);
3076af75078fSIntel 	}
3077af75078fSIntel }
3078af75078fSIntel 
3079f2c5125aSPablo de Lara static void
3080f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
3081f2c5125aSPablo de Lara {
3082d44f8a48SQi Zhang 	uint16_t qid;
30835e91aeefSWei Zhao 	uint64_t offloads;
3084f2c5125aSPablo de Lara 
3085d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
30865e91aeefSWei Zhao 		offloads = port->rx_conf[qid].offloads;
3087d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
3088575e0fd1SWei Zhao 		if (offloads != 0)
3089575e0fd1SWei Zhao 			port->rx_conf[qid].offloads = offloads;
3090d44f8a48SQi Zhang 
3091d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
3092f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3093d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3094f2c5125aSPablo de Lara 
3095f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3096d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3097f2c5125aSPablo de Lara 
3098f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3099d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3100f2c5125aSPablo de Lara 
3101f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3102d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3103f2c5125aSPablo de Lara 
3104f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3105d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
3106f2c5125aSPablo de Lara 
3107d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
3108d44f8a48SQi Zhang 	}
3109d44f8a48SQi Zhang 
3110d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
31115e91aeefSWei Zhao 		offloads = port->tx_conf[qid].offloads;
3112d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
3113575e0fd1SWei Zhao 		if (offloads != 0)
3114575e0fd1SWei Zhao 			port->tx_conf[qid].offloads = offloads;
3115d44f8a48SQi Zhang 
3116d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
3117f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3118d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3119f2c5125aSPablo de Lara 
3120f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3121d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3122f2c5125aSPablo de Lara 
3123f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3124d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3125f2c5125aSPablo de Lara 
3126f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3127d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3128f2c5125aSPablo de Lara 
3129f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3130d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3131d44f8a48SQi Zhang 
3132d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
3133d44f8a48SQi Zhang 	}
3134f2c5125aSPablo de Lara }
3135f2c5125aSPablo de Lara 
3136013af9b6SIntel void
3137013af9b6SIntel init_port_config(void)
3138013af9b6SIntel {
3139013af9b6SIntel 	portid_t pid;
3140013af9b6SIntel 	struct rte_port *port;
31416f51deb9SIvan Ilchenko 	int ret;
3142013af9b6SIntel 
31437d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
3144013af9b6SIntel 		port = &ports[pid];
3145013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
31466f51deb9SIvan Ilchenko 
31476f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
31486f51deb9SIvan Ilchenko 		if (ret != 0)
31496f51deb9SIvan Ilchenko 			return;
31506f51deb9SIvan Ilchenko 
31513ce690d3SBruce Richardson 		if (nb_rxq > 1) {
3152013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
315390892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3154422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
3155af75078fSIntel 		} else {
3156013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3157013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3158af75078fSIntel 		}
31593ce690d3SBruce Richardson 
31605f592039SJingjing Wu 		if (port->dcb_flag == 0) {
31613ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
31623ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
31633ce690d3SBruce Richardson 			else
31643ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
31653ce690d3SBruce Richardson 		}
31663ce690d3SBruce Richardson 
3167f2c5125aSPablo de Lara 		rxtx_port_config(port);
3168013af9b6SIntel 
3169a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3170a5279d25SIgor Romanov 		if (ret != 0)
3171a5279d25SIgor Romanov 			return;
3172013af9b6SIntel 
3173013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
317450c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3175e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
31767b7e5ba7SIntel #endif
31778ea656f8SGaetan Rivet 
31788ea656f8SGaetan Rivet 		if (lsc_interrupt &&
31798ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
31808ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
31818ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
3182284c908cSGaetan Rivet 		if (rmv_interrupt &&
3183284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
3184284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
3185284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
3186013af9b6SIntel 	}
3187013af9b6SIntel }
3188013af9b6SIntel 
318941b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
319041b05095SBernard Iremonger {
319141b05095SBernard Iremonger 	struct rte_port *port;
319241b05095SBernard Iremonger 
319341b05095SBernard Iremonger 	port = &ports[slave_pid];
319441b05095SBernard Iremonger 	port->slave_flag = 1;
319541b05095SBernard Iremonger }
319641b05095SBernard Iremonger 
319741b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
319841b05095SBernard Iremonger {
319941b05095SBernard Iremonger 	struct rte_port *port;
320041b05095SBernard Iremonger 
320141b05095SBernard Iremonger 	port = &ports[slave_pid];
320241b05095SBernard Iremonger 	port->slave_flag = 0;
320341b05095SBernard Iremonger }
320441b05095SBernard Iremonger 
32050e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
32060e545d30SBernard Iremonger {
32070e545d30SBernard Iremonger 	struct rte_port *port;
32080e545d30SBernard Iremonger 
32090e545d30SBernard Iremonger 	port = &ports[slave_pid];
3210b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
3211b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3212b8b8b344SMatan Azrad 		return 1;
3213b8b8b344SMatan Azrad 	return 0;
32140e545d30SBernard Iremonger }
32150e545d30SBernard Iremonger 
3216013af9b6SIntel const uint16_t vlan_tags[] = {
3217013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
3218013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
3219013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
3220013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
3221013af9b6SIntel };
3222013af9b6SIntel 
3223013af9b6SIntel static  int
3224ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
32251a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
32261a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
32271a572499SJingjing Wu 		 uint8_t pfc_en)
3228013af9b6SIntel {
3229013af9b6SIntel 	uint8_t i;
3230ac7c491cSKonstantin Ananyev 	int32_t rc;
3231ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
3232af75078fSIntel 
3233af75078fSIntel 	/*
3234013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3235013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
3236af75078fSIntel 	 */
32371a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
32381a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
32391a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
32401a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
32411a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3242013af9b6SIntel 
3243547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
32441a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
32451a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
32461a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
32471a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
32481a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
32491a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3250013af9b6SIntel 
32511a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
32521a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
32531a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
32541a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
32551a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
3256af75078fSIntel 		}
3257013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3258f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3259f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3260013af9b6SIntel 		}
3261013af9b6SIntel 
3262013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
326332e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
326432e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
32651a572499SJingjing Wu 	} else {
32661a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
32671a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
32681a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
32691a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
3270013af9b6SIntel 
3271ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3272ac7c491cSKonstantin Ananyev 		if (rc != 0)
3273ac7c491cSKonstantin Ananyev 			return rc;
3274ac7c491cSKonstantin Ananyev 
32751a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
32761a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
32771a572499SJingjing Wu 
3278bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3279bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
3280bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
3281013af9b6SIntel 		}
3282ac7c491cSKonstantin Ananyev 
32831a572499SJingjing Wu 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
3284ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
328532e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
32861a572499SJingjing Wu 	}
32871a572499SJingjing Wu 
32881a572499SJingjing Wu 	if (pfc_en)
32891a572499SJingjing Wu 		eth_conf->dcb_capability_en =
32901a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3291013af9b6SIntel 	else
3292013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3293013af9b6SIntel 
3294013af9b6SIntel 	return 0;
3295013af9b6SIntel }
3296013af9b6SIntel 
3297013af9b6SIntel int
32981a572499SJingjing Wu init_port_dcb_config(portid_t pid,
32991a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
33001a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
33011a572499SJingjing Wu 		     uint8_t pfc_en)
3302013af9b6SIntel {
3303013af9b6SIntel 	struct rte_eth_conf port_conf;
3304013af9b6SIntel 	struct rte_port *rte_port;
3305013af9b6SIntel 	int retval;
3306013af9b6SIntel 	uint16_t i;
3307013af9b6SIntel 
33082a977b89SWenzhuo Lu 	rte_port = &ports[pid];
3309013af9b6SIntel 
3310013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3311013af9b6SIntel 	/* Enter DCB configuration status */
3312013af9b6SIntel 	dcb_config = 1;
3313013af9b6SIntel 
3314d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
3315d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
3316d5354e89SYanglong Wu 
3317013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
3318ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3319013af9b6SIntel 	if (retval < 0)
3320013af9b6SIntel 		return retval;
33210074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3322013af9b6SIntel 
33232f203d44SQi Zhang 	/* re-configure the device . */
33242b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
33252b0e0ebaSChenbo Xia 	if (retval < 0)
33262b0e0ebaSChenbo Xia 		return retval;
33276f51deb9SIvan Ilchenko 
33286f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
33296f51deb9SIvan Ilchenko 	if (retval != 0)
33306f51deb9SIvan Ilchenko 		return retval;
33312a977b89SWenzhuo Lu 
33322a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
33332a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
33342a977b89SWenzhuo Lu 	 */
33352a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
33362a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
33372a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
33382a977b89SWenzhuo Lu 			" for port %d.", pid);
33392a977b89SWenzhuo Lu 		return -1;
33402a977b89SWenzhuo Lu 	}
33412a977b89SWenzhuo Lu 
33422a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
33432a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
33442a977b89SWenzhuo Lu 	 */
33452a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
334686ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
334786ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
334886ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
334986ef65eeSBernard Iremonger 		} else {
33502a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
33512a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
335286ef65eeSBernard Iremonger 		}
33532a977b89SWenzhuo Lu 	} else {
33542a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
33552a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
33562a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
33572a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
33582a977b89SWenzhuo Lu 		} else {
33592a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
33602a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
33612a977b89SWenzhuo Lu 
33622a977b89SWenzhuo Lu 		}
33632a977b89SWenzhuo Lu 	}
33642a977b89SWenzhuo Lu 	rx_free_thresh = 64;
33652a977b89SWenzhuo Lu 
3366013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3367013af9b6SIntel 
3368f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
3369013af9b6SIntel 	/* VLAN filter */
33700074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
33711a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
3372013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
3373013af9b6SIntel 
3374a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3375a5279d25SIgor Romanov 	if (retval != 0)
3376a5279d25SIgor Romanov 		return retval;
3377a5279d25SIgor Romanov 
3378013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
3379013af9b6SIntel 
33807741e4cfSIntel 	rte_port->dcb_flag = 1;
33817741e4cfSIntel 
3382013af9b6SIntel 	return 0;
3383af75078fSIntel }
3384af75078fSIntel 
3385ffc468ffSTetsuya Mukawa static void
3386ffc468ffSTetsuya Mukawa init_port(void)
3387ffc468ffSTetsuya Mukawa {
3388ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
3389ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
3390ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3391ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
3392ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
3393ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
3394ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
3395ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
3396ffc468ffSTetsuya Mukawa 	}
339729841336SPhil Yang 
339829841336SPhil Yang 	/* Initialize ports NUMA structures */
339929841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
340029841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
340129841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3402ffc468ffSTetsuya Mukawa }
3403ffc468ffSTetsuya Mukawa 
3404d3a274ceSZhihong Wang static void
3405d3a274ceSZhihong Wang force_quit(void)
3406d3a274ceSZhihong Wang {
3407d3a274ceSZhihong Wang 	pmd_test_exit();
3408d3a274ceSZhihong Wang 	prompt_exit();
3409d3a274ceSZhihong Wang }
3410d3a274ceSZhihong Wang 
3411d3a274ceSZhihong Wang static void
3412cfea1f30SPablo de Lara print_stats(void)
3413cfea1f30SPablo de Lara {
3414cfea1f30SPablo de Lara 	uint8_t i;
3415cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3416cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3417cfea1f30SPablo de Lara 
3418cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
3419cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
3420cfea1f30SPablo de Lara 
3421cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
3422cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3423cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
3424683d1e82SIgor Romanov 
3425683d1e82SIgor Romanov 	fflush(stdout);
3426cfea1f30SPablo de Lara }
3427cfea1f30SPablo de Lara 
3428cfea1f30SPablo de Lara static void
3429d3a274ceSZhihong Wang signal_handler(int signum)
3430d3a274ceSZhihong Wang {
3431d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
3432d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
3433d3a274ceSZhihong Wang 				signum);
3434102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
3435102b7329SReshma Pattan 		/* uninitialize packet capture framework */
3436102b7329SReshma Pattan 		rte_pdump_uninit();
3437102b7329SReshma Pattan #endif
343862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
34398b36297dSAmit Gupta 		if (latencystats_enabled != 0)
344062d3216dSReshma Pattan 			rte_latencystats_uninit();
344162d3216dSReshma Pattan #endif
3442d3a274ceSZhihong Wang 		force_quit();
3443d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
3444d9a191a0SPhil Yang 		f_quit = 1;
3445d3a274ceSZhihong Wang 		/* exit with the expected status */
3446d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
3447d3a274ceSZhihong Wang 		kill(getpid(), signum);
3448d3a274ceSZhihong Wang 	}
3449d3a274ceSZhihong Wang }
3450d3a274ceSZhihong Wang 
3451af75078fSIntel int
3452af75078fSIntel main(int argc, char** argv)
3453af75078fSIntel {
3454af75078fSIntel 	int diag;
3455f8244c63SZhiyong Yang 	portid_t port_id;
34564918a357SXiaoyun Li 	uint16_t count;
3457fb73e096SJeff Guo 	int ret;
3458af75078fSIntel 
3459d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
3460d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
3461d3a274ceSZhihong Wang 
3462285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
3463285fd101SOlivier Matz 	if (testpmd_logtype < 0)
346416267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
3465285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3466285fd101SOlivier Matz 
34679201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
34689201806eSStephen Hemminger 	if (diag < 0)
346916267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
347016267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
34719201806eSStephen Hemminger 
3472a87ab9f7SStephen Hemminger 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
347316267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE,
347416267ceeSStephen Hemminger 			 "Secondary process type not supported.\n");
3475a87ab9f7SStephen Hemminger 
347697b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
347797b5d8b5SThomas Monjalon 	if (ret != 0)
347816267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
347997b5d8b5SThomas Monjalon 
34804aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
34814aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
3482e9436f54STiwei Bie 	rte_pdump_init();
34834aa0d012SAnatoly Burakov #endif
34844aa0d012SAnatoly Burakov 
34854918a357SXiaoyun Li 	count = 0;
34864918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
34874918a357SXiaoyun Li 		ports_ids[count] = port_id;
34884918a357SXiaoyun Li 		count++;
34894918a357SXiaoyun Li 	}
34904918a357SXiaoyun Li 	nb_ports = (portid_t) count;
34914aa0d012SAnatoly Burakov 	if (nb_ports == 0)
34924aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
34934aa0d012SAnatoly Burakov 
34944aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
34954aa0d012SAnatoly Burakov 	init_port();
34964aa0d012SAnatoly Burakov 
34974aa0d012SAnatoly Burakov 	set_def_fwd_config();
34984aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
349916267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
350016267ceeSStephen Hemminger 			 "Check the core mask argument\n");
35014aa0d012SAnatoly Burakov 
3502e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
3503e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE
3504e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3505e505d84cSAnatoly Burakov #endif
3506e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
3507e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3508e505d84cSAnatoly Burakov #endif
3509e505d84cSAnatoly Burakov 
3510fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
35115fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
3512fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3513fb7b8b32SAnatoly Burakov #else
3514fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3515fb7b8b32SAnatoly Burakov #endif
3516fb7b8b32SAnatoly Burakov 
3517e505d84cSAnatoly Burakov 	argc -= diag;
3518e505d84cSAnatoly Burakov 	argv += diag;
3519e505d84cSAnatoly Burakov 	if (argc > 1)
3520e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3521e505d84cSAnatoly Burakov 
3522e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3523285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
35241c036b16SEelco Chaudron 			strerror(errno));
35251c036b16SEelco Chaudron 	}
35261c036b16SEelco Chaudron 
352799cabef0SPablo de Lara 	if (tx_first && interactive)
352899cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
352999cabef0SPablo de Lara 				"interactive mode.\n");
35308820cba4SDavid Hunt 
35318820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
35328820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
35338820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
35348820cba4SDavid Hunt 		lsc_interrupt = 0;
35358820cba4SDavid Hunt 	}
35368820cba4SDavid Hunt 
35375a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
35385a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
35395a8fb55cSReshma Pattan 
35405a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3541af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3542af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3543af75078fSIntel 		       nb_rxq, nb_txq);
3544af75078fSIntel 
3545af75078fSIntel 	init_config();
3546fb73e096SJeff Guo 
3547fb73e096SJeff Guo 	if (hot_plug) {
35482049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
3549fb73e096SJeff Guo 		if (ret) {
35502049c511SJeff Guo 			RTE_LOG(ERR, EAL,
35512049c511SJeff Guo 				"fail to enable hotplug handling.");
3552fb73e096SJeff Guo 			return -1;
3553fb73e096SJeff Guo 		}
3554fb73e096SJeff Guo 
35552049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
35562049c511SJeff Guo 		if (ret) {
35572049c511SJeff Guo 			RTE_LOG(ERR, EAL,
35582049c511SJeff Guo 				"fail to start device event monitoring.");
35592049c511SJeff Guo 			return -1;
35602049c511SJeff Guo 		}
35612049c511SJeff Guo 
35622049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
3563cc1bf307SJeff Guo 			dev_event_callback, NULL);
35642049c511SJeff Guo 		if (ret) {
35652049c511SJeff Guo 			RTE_LOG(ERR, EAL,
35662049c511SJeff Guo 				"fail  to register device event callback\n");
35672049c511SJeff Guo 			return -1;
35682049c511SJeff Guo 		}
3569fb73e096SJeff Guo 	}
3570fb73e096SJeff Guo 
35716937d210SStephen Hemminger 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3572148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3573af75078fSIntel 
3574ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
357534fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
357634fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
357734fc1051SIvan Ilchenko 		if (ret != 0)
357834fc1051SIvan Ilchenko 			printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
357934fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
358034fc1051SIvan Ilchenko 	}
3581af75078fSIntel 
35827e4441c8SRemy Horton 	/* Init metrics library */
35837e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
35847e4441c8SRemy Horton 
358562d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
358662d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
358762d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
358862d3216dSReshma Pattan 		if (ret)
358962d3216dSReshma Pattan 			printf("Warning: latencystats init()"
359062d3216dSReshma Pattan 				" returned error %d\n",	ret);
359162d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
359262d3216dSReshma Pattan 			latencystats_lcore_id);
359362d3216dSReshma Pattan 	}
359462d3216dSReshma Pattan #endif
359562d3216dSReshma Pattan 
35967e4441c8SRemy Horton 	/* Setup bitrate stats */
35977e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
3598e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
35997e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
36007e4441c8SRemy Horton 		if (bitrate_data == NULL)
3601e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3602e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
36037e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3604e25e6c70SRemy Horton 	}
36057e4441c8SRemy Horton #endif
36067e4441c8SRemy Horton 
36070d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
360881ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
360981ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
361081ef862bSAllain Legacy 
3611ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3612ca7feb22SCyril Chemparathy 		if (auto_start) {
3613ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3614ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3615ca7feb22SCyril Chemparathy 		}
3616af75078fSIntel 		prompt();
36170de738cfSJiayu Hu 		pmd_test_exit();
3618ca7feb22SCyril Chemparathy 	} else
36190d56cb81SThomas Monjalon #endif
36200d56cb81SThomas Monjalon 	{
3621af75078fSIntel 		char c;
3622af75078fSIntel 		int rc;
3623af75078fSIntel 
3624d9a191a0SPhil Yang 		f_quit = 0;
3625d9a191a0SPhil Yang 
3626af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
362799cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3628cfea1f30SPablo de Lara 		if (stats_period != 0) {
3629cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3630cfea1f30SPablo de Lara 			uint64_t timer_period;
3631cfea1f30SPablo de Lara 
3632cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3633cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3634cfea1f30SPablo de Lara 
3635d9a191a0SPhil Yang 			while (f_quit == 0) {
3636cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
3637cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
3638cfea1f30SPablo de Lara 
3639cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
3640cfea1f30SPablo de Lara 					print_stats();
3641cfea1f30SPablo de Lara 					/* Reset the timer */
3642cfea1f30SPablo de Lara 					diff_time = 0;
3643cfea1f30SPablo de Lara 				}
3644cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
3645cfea1f30SPablo de Lara 				prev_time = cur_time;
3646cfea1f30SPablo de Lara 				sleep(1);
3647cfea1f30SPablo de Lara 			}
3648cfea1f30SPablo de Lara 		}
3649cfea1f30SPablo de Lara 
3650af75078fSIntel 		printf("Press enter to exit\n");
3651af75078fSIntel 		rc = read(0, &c, 1);
3652d3a274ceSZhihong Wang 		pmd_test_exit();
3653af75078fSIntel 		if (rc < 0)
3654af75078fSIntel 			return 1;
3655af75078fSIntel 	}
3656af75078fSIntel 
36575e516c89SStephen Hemminger 	ret = rte_eal_cleanup();
36585e516c89SStephen Hemminger 	if (ret != 0)
36595e516c89SStephen Hemminger 		rte_exit(EXIT_FAILURE,
36605e516c89SStephen Hemminger 			 "EAL cleanup failed: %s\n", strerror(-ret));
36615e516c89SStephen Hemminger 
36625e516c89SStephen Hemminger 	return EXIT_SUCCESS;
3663af75078fSIntel }
3664