xref: /dpdk/app/test-pmd/testpmd.c (revision 2b0e0eba2c37a1d53bf179c2e676eb920a6ac85c)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30c7f5dba7SAnatoly Burakov #include <rte_malloc_heap.h>
31af75078fSIntel #include <rte_memory.h>
32af75078fSIntel #include <rte_memcpy.h>
33af75078fSIntel #include <rte_launch.h>
34af75078fSIntel #include <rte_eal.h>
35284c908cSGaetan Rivet #include <rte_alarm.h>
36af75078fSIntel #include <rte_per_lcore.h>
37af75078fSIntel #include <rte_lcore.h>
38af75078fSIntel #include <rte_atomic.h>
39af75078fSIntel #include <rte_branch_prediction.h>
40af75078fSIntel #include <rte_mempool.h>
41af75078fSIntel #include <rte_malloc.h>
42af75078fSIntel #include <rte_mbuf.h>
430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
44af75078fSIntel #include <rte_interrupts.h>
45af75078fSIntel #include <rte_pci.h>
46af75078fSIntel #include <rte_ether.h>
47af75078fSIntel #include <rte_ethdev.h>
48edab33b1STetsuya Mukawa #include <rte_dev.h>
49af75078fSIntel #include <rte_string_fns.h>
50e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
52e261265eSRadu Nicolau #endif
53102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
54102b7329SReshma Pattan #include <rte_pdump.h>
55102b7329SReshma Pattan #endif
56938a184aSAdrien Mazarguil #include <rte_flow.h>
577e4441c8SRemy Horton #include <rte_metrics.h>
587e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
597e4441c8SRemy Horton #include <rte_bitrate.h>
607e4441c8SRemy Horton #endif
6162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6262d3216dSReshma Pattan #include <rte_latencystats.h>
6362d3216dSReshma Pattan #endif
64af75078fSIntel 
65af75078fSIntel #include "testpmd.h"
66af75078fSIntel 
67c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
68c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
70c7f5dba7SAnatoly Burakov #else
71c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
72c7f5dba7SAnatoly Burakov #endif
73c7f5dba7SAnatoly Burakov 
74c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
75c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
76c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
77c7f5dba7SAnatoly Burakov #else
78c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
79c7f5dba7SAnatoly Burakov #endif
80c7f5dba7SAnatoly Burakov 
81c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
82c7f5dba7SAnatoly Burakov 
83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
85af75078fSIntel 
86af75078fSIntel /* use master core for command line ? */
87af75078fSIntel uint8_t interactive = 0;
88ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8999cabef0SPablo de Lara uint8_t tx_first;
9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
91af75078fSIntel 
92af75078fSIntel /*
93af75078fSIntel  * NUMA support configuration.
94af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
95af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
97af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
98af75078fSIntel  */
99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
100af75078fSIntel 
101af75078fSIntel /*
102b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103b6ea6408SIntel  * not configured.
104b6ea6408SIntel  */
105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
106b6ea6408SIntel 
107b6ea6408SIntel /*
108c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
109c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
110c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
111c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
112c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
113148f963fSBruce Richardson  */
114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115148f963fSBruce Richardson 
116148f963fSBruce Richardson /*
11763531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11863531389SGeorgios Katsikas  * is allocated.
11963531389SGeorgios Katsikas  */
12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12163531389SGeorgios Katsikas 
12263531389SGeorgios Katsikas /*
12363531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12463531389SGeorgios Katsikas  * is allocated.
12563531389SGeorgios Katsikas  */
12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12763531389SGeorgios Katsikas 
12863531389SGeorgios Katsikas /*
12963531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13063531389SGeorgios Katsikas  * is allocated.
13163531389SGeorgios Katsikas  */
13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13363531389SGeorgios Katsikas 
13463531389SGeorgios Katsikas /*
135af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
136af75078fSIntel  * forwarded.
137547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
138af75078fSIntel  * ports.
139af75078fSIntel  */
140af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141af75078fSIntel portid_t nb_peer_eth_addrs = 0;
142af75078fSIntel 
143af75078fSIntel /*
144af75078fSIntel  * Probed Target Environment.
145af75078fSIntel  */
146af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
147af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150af75078fSIntel 
1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1524918a357SXiaoyun Li 
153af75078fSIntel /*
154af75078fSIntel  * Test Forwarding Configuration.
155af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157af75078fSIntel  */
158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162af75078fSIntel 
163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165af75078fSIntel 
166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168af75078fSIntel 
169af75078fSIntel /*
170af75078fSIntel  * Forwarding engines.
171af75078fSIntel  */
172af75078fSIntel struct fwd_engine * fwd_engines[] = {
173af75078fSIntel 	&io_fwd_engine,
174af75078fSIntel 	&mac_fwd_engine,
175d47388f1SCyril Chemparathy 	&mac_swap_engine,
176e9e23a61SCyril Chemparathy 	&flow_gen_engine,
177af75078fSIntel 	&rx_only_engine,
178af75078fSIntel 	&tx_only_engine,
179af75078fSIntel 	&csum_fwd_engine,
180168dfa61SIvan Boule 	&icmp_echo_engine,
1813c156061SJens Freimann 	&noisy_vnf_engine,
1820ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
1830ad778b3SJasvinder Singh 	&softnic_fwd_engine,
1845b590fbeSJasvinder Singh #endif
185af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
186af75078fSIntel 	&ieee1588_fwd_engine,
187af75078fSIntel #endif
188af75078fSIntel 	NULL,
189af75078fSIntel };
190af75078fSIntel 
191401b744dSShahaf Shuler struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
19259fcf854SShahaf Shuler uint16_t mempool_flags;
193401b744dSShahaf Shuler 
194af75078fSIntel struct fwd_config cur_fwd_config;
195af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
196bf56fce1SZhihong Wang uint32_t retry_enabled;
197bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
198bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
199af75078fSIntel 
200af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
201c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
202c8798818SIntel                                       * specified on command-line. */
203cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
204d9a191a0SPhil Yang 
205d9a191a0SPhil Yang /*
206d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
207d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
208d9a191a0SPhil Yang  */
209d9a191a0SPhil Yang uint8_t f_quit;
210d9a191a0SPhil Yang 
211af75078fSIntel /*
212af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
213af75078fSIntel  */
214af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
215af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
216af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
217af75078fSIntel };
218af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
219af75078fSIntel 
22079bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
22179bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
22279bec05bSKonstantin Ananyev 
22382010ef5SYongseok Koh uint8_t txonly_multi_flow;
22482010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
22582010ef5SYongseok Koh 
226af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
227e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
228af75078fSIntel 
229900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
230900550deSIntel uint8_t dcb_config = 0;
231900550deSIntel 
232900550deSIntel /* Whether the dcb is in testing status */
233900550deSIntel uint8_t dcb_test = 0;
234900550deSIntel 
235af75078fSIntel /*
236af75078fSIntel  * Configurable number of RX/TX queues.
237af75078fSIntel  */
238af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
239af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
240af75078fSIntel 
241af75078fSIntel /*
242af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2438599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
244af75078fSIntel  */
2458599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2468599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
247af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
248af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
249af75078fSIntel 
250f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
251af75078fSIntel /*
252af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
253af75078fSIntel  */
254af75078fSIntel 
255f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
256f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
257f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
258af75078fSIntel 
259f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
260f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
261f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
262af75078fSIntel 
263af75078fSIntel /*
264af75078fSIntel  * Configurable value of RX free threshold.
265af75078fSIntel  */
266f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
267af75078fSIntel 
268af75078fSIntel /*
269ce8d5614SIntel  * Configurable value of RX drop enable.
270ce8d5614SIntel  */
271f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
272ce8d5614SIntel 
273ce8d5614SIntel /*
274af75078fSIntel  * Configurable value of TX free threshold.
275af75078fSIntel  */
276f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
277af75078fSIntel 
278af75078fSIntel /*
279af75078fSIntel  * Configurable value of TX RS bit threshold.
280af75078fSIntel  */
281f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
282af75078fSIntel 
283af75078fSIntel /*
2843c156061SJens Freimann  * Configurable value of buffered packets before sending.
2853c156061SJens Freimann  */
2863c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
2873c156061SJens Freimann 
2883c156061SJens Freimann /*
2893c156061SJens Freimann  * Configurable value of packet buffer timeout.
2903c156061SJens Freimann  */
2913c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
2923c156061SJens Freimann 
2933c156061SJens Freimann /*
2943c156061SJens Freimann  * Configurable value for size of VNF internal memory area
2953c156061SJens Freimann  * used for simulating noisy neighbour behaviour
2963c156061SJens Freimann  */
2973c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
2983c156061SJens Freimann 
2993c156061SJens Freimann /*
3003c156061SJens Freimann  * Configurable value of number of random writes done in
3013c156061SJens Freimann  * VNF simulation memory area.
3023c156061SJens Freimann  */
3033c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3043c156061SJens Freimann 
3053c156061SJens Freimann /*
3063c156061SJens Freimann  * Configurable value of number of random reads done in
3073c156061SJens Freimann  * VNF simulation memory area.
3083c156061SJens Freimann  */
3093c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3103c156061SJens Freimann 
3113c156061SJens Freimann /*
3123c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3133c156061SJens Freimann  * VNF simulation memory area.
3143c156061SJens Freimann  */
3153c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3163c156061SJens Freimann 
3173c156061SJens Freimann /*
318af75078fSIntel  * Receive Side Scaling (RSS) configuration.
319af75078fSIntel  */
3208a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
321af75078fSIntel 
322af75078fSIntel /*
323af75078fSIntel  * Port topology configuration
324af75078fSIntel  */
325af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
326af75078fSIntel 
3277741e4cfSIntel /*
3287741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3297741e4cfSIntel  */
3307741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3317741e4cfSIntel 
332af75078fSIntel /*
3337ee3e944SVasily Philipov  * Flow API isolated mode.
3347ee3e944SVasily Philipov  */
3357ee3e944SVasily Philipov uint8_t flow_isolate_all;
3367ee3e944SVasily Philipov 
3377ee3e944SVasily Philipov /*
338bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
339bc202406SDavid Marchand  */
340bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
341bc202406SDavid Marchand 
342bc202406SDavid Marchand /*
3438ea656f8SGaetan Rivet  * Enable link status change notification
3448ea656f8SGaetan Rivet  */
3458ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3468ea656f8SGaetan Rivet 
3478ea656f8SGaetan Rivet /*
348284c908cSGaetan Rivet  * Enable device removal notification.
349284c908cSGaetan Rivet  */
350284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
351284c908cSGaetan Rivet 
352fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
353fb73e096SJeff Guo 
3544f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
3554f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
3564f1ed78eSThomas Monjalon 
35797b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
35897b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
35997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
36097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
36197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
36297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
36397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
36497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
36597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
36697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
36797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
36897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
36997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
37097b5d8b5SThomas Monjalon };
37197b5d8b5SThomas Monjalon 
372284c908cSGaetan Rivet /*
3733af72783SGaetan Rivet  * Display or mask ether events
3743af72783SGaetan Rivet  * Default to all events except VF_MBOX
3753af72783SGaetan Rivet  */
3763af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
3773af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
3783af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
3793af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
380badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
3813af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
3823af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
383e505d84cSAnatoly Burakov /*
384e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
385e505d84cSAnatoly Burakov  */
386e505d84cSAnatoly Burakov int do_mlockall = 0;
3873af72783SGaetan Rivet 
3883af72783SGaetan Rivet /*
3897b7e5ba7SIntel  * NIC bypass mode configuration options.
3907b7e5ba7SIntel  */
3917b7e5ba7SIntel 
39250c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3937b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
394e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
3957b7e5ba7SIntel #endif
3967b7e5ba7SIntel 
397e261265eSRadu Nicolau 
39862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
39962d3216dSReshma Pattan 
40062d3216dSReshma Pattan /*
40162d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
40262d3216dSReshma Pattan  */
40362d3216dSReshma Pattan uint8_t latencystats_enabled;
40462d3216dSReshma Pattan 
40562d3216dSReshma Pattan /*
40662d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
40762d3216dSReshma Pattan  */
40862d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
40962d3216dSReshma Pattan 
41062d3216dSReshma Pattan #endif
41162d3216dSReshma Pattan 
4127b7e5ba7SIntel /*
413af75078fSIntel  * Ethernet device configuration.
414af75078fSIntel  */
415af75078fSIntel struct rte_eth_rxmode rx_mode = {
416af75078fSIntel 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
417af75078fSIntel };
418af75078fSIntel 
41907e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
42007e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
42107e5f7bdSShahaf Shuler };
422fd8c20aaSShahaf Shuler 
423af75078fSIntel struct rte_fdir_conf fdir_conf = {
424af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
425af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
426af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
427d9d5e6f2SJingjing Wu 	.mask = {
42826f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
429d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
430d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
431d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
432d9d5e6f2SJingjing Wu 		},
433d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
434d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
435d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
436d9d5e6f2SJingjing Wu 		},
437d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
438d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
43947b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
44047b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
44147b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
442d9d5e6f2SJingjing Wu 	},
443af75078fSIntel 	.drop_queue = 127,
444af75078fSIntel };
445af75078fSIntel 
4462950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
447af75078fSIntel 
448ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
449ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
450ed30d9b6SIntel 
451ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
452ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
453ed30d9b6SIntel 
454ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
455ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
456ed30d9b6SIntel 
457a4fd5eeeSElza Mathew /*
458a4fd5eeeSElza Mathew  * Display zero values by default for xstats
459a4fd5eeeSElza Mathew  */
460a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
461a4fd5eeeSElza Mathew 
462c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
463c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
4647acf894dSStephen Hurd 
465e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE
4667e4441c8SRemy Horton /* Bitrate statistics */
4677e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
468e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
469e25e6c70SRemy Horton uint8_t bitrate_enabled;
470e25e6c70SRemy Horton #endif
4717e4441c8SRemy Horton 
472b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
473b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
474b40f8d78SJiayu Hu 
4751960be7dSNelio Laranjeiro struct vxlan_encap_conf vxlan_encap_conf = {
4761960be7dSNelio Laranjeiro 	.select_ipv4 = 1,
4771960be7dSNelio Laranjeiro 	.select_vlan = 0,
47862e8a5a8SViacheslav Ovsiienko 	.select_tos_ttl = 0,
4791960be7dSNelio Laranjeiro 	.vni = "\x00\x00\x00",
4801960be7dSNelio Laranjeiro 	.udp_src = 0,
4811960be7dSNelio Laranjeiro 	.udp_dst = RTE_BE16(4789),
4821960be7dSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
4831960be7dSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
4841960be7dSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
4851960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
4861960be7dSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
4871960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
4881960be7dSNelio Laranjeiro 	.vlan_tci = 0,
48962e8a5a8SViacheslav Ovsiienko 	.ip_tos = 0,
49062e8a5a8SViacheslav Ovsiienko 	.ip_ttl = 255,
4911960be7dSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
4921960be7dSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
4931960be7dSNelio Laranjeiro };
4941960be7dSNelio Laranjeiro 
495dcd962fcSNelio Laranjeiro struct nvgre_encap_conf nvgre_encap_conf = {
496dcd962fcSNelio Laranjeiro 	.select_ipv4 = 1,
497dcd962fcSNelio Laranjeiro 	.select_vlan = 0,
498dcd962fcSNelio Laranjeiro 	.tni = "\x00\x00\x00",
499dcd962fcSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
500dcd962fcSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
501dcd962fcSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
502dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
503dcd962fcSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
504dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
505dcd962fcSNelio Laranjeiro 	.vlan_tci = 0,
506dcd962fcSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
507dcd962fcSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
508dcd962fcSNelio Laranjeiro };
509dcd962fcSNelio Laranjeiro 
510ed30d9b6SIntel /* Forward function declarations */
511c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
51228caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
51328caa76aSZhiyong Yang 						   struct rte_port *port);
514edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
515f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
51676ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
517d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
518cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
519fb73e096SJeff Guo 				enum rte_dev_event_type type,
520fb73e096SJeff Guo 				void *param);
521ce8d5614SIntel 
522ce8d5614SIntel /*
523ce8d5614SIntel  * Check if all the ports are started.
524ce8d5614SIntel  * If yes, return positive value. If not, return zero.
525ce8d5614SIntel  */
526ce8d5614SIntel static int all_ports_started(void);
527ed30d9b6SIntel 
52852f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
52952f38a20SJiayu Hu uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
53052f38a20SJiayu Hu 
531af75078fSIntel /*
53298a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
533c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
534c9cafcc8SShahaf Shuler  */
535c9cafcc8SShahaf Shuler int
536c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
537c9cafcc8SShahaf Shuler {
538c9cafcc8SShahaf Shuler 	unsigned int i;
539c9cafcc8SShahaf Shuler 
540c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
541c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
542c9cafcc8SShahaf Shuler 			return 0;
543c9cafcc8SShahaf Shuler 	}
544c9cafcc8SShahaf Shuler 	return 1;
545c9cafcc8SShahaf Shuler }
546c9cafcc8SShahaf Shuler 
547c9cafcc8SShahaf Shuler /*
548af75078fSIntel  * Setup default configuration.
549af75078fSIntel  */
550af75078fSIntel static void
551af75078fSIntel set_default_fwd_lcores_config(void)
552af75078fSIntel {
553af75078fSIntel 	unsigned int i;
554af75078fSIntel 	unsigned int nb_lc;
5557acf894dSStephen Hurd 	unsigned int sock_num;
556af75078fSIntel 
557af75078fSIntel 	nb_lc = 0;
558af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
559dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
560dbfb8ec7SPhil Yang 			continue;
561c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
562c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
563c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
564c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
565c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
566c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
567c9cafcc8SShahaf Shuler 			}
568c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5697acf894dSStephen Hurd 		}
570f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
571f54fe5eeSStephen Hurd 			continue;
572f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
573af75078fSIntel 	}
574af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
575af75078fSIntel 	nb_cfg_lcores = nb_lcores;
576af75078fSIntel 	nb_fwd_lcores = 1;
577af75078fSIntel }
578af75078fSIntel 
579af75078fSIntel static void
580af75078fSIntel set_def_peer_eth_addrs(void)
581af75078fSIntel {
582af75078fSIntel 	portid_t i;
583af75078fSIntel 
584af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
585af75078fSIntel 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
586af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
587af75078fSIntel 	}
588af75078fSIntel }
589af75078fSIntel 
590af75078fSIntel static void
591af75078fSIntel set_default_fwd_ports_config(void)
592af75078fSIntel {
593af75078fSIntel 	portid_t pt_id;
59465a7360cSMatan Azrad 	int i = 0;
595af75078fSIntel 
596effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
59765a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
598af75078fSIntel 
599effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
600effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
601effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
602effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
603effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
604effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
605effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
606effdb8bbSPhil Yang 			}
607effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
608effdb8bbSPhil Yang 		}
609effdb8bbSPhil Yang 	}
610effdb8bbSPhil Yang 
611af75078fSIntel 	nb_cfg_ports = nb_ports;
612af75078fSIntel 	nb_fwd_ports = nb_ports;
613af75078fSIntel }
614af75078fSIntel 
615af75078fSIntel void
616af75078fSIntel set_def_fwd_config(void)
617af75078fSIntel {
618af75078fSIntel 	set_default_fwd_lcores_config();
619af75078fSIntel 	set_def_peer_eth_addrs();
620af75078fSIntel 	set_default_fwd_ports_config();
621af75078fSIntel }
622af75078fSIntel 
623c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
624c7f5dba7SAnatoly Burakov static int
625c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
626c7f5dba7SAnatoly Burakov {
627c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
628c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
629c7f5dba7SAnatoly Burakov 
630c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
631c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
632c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
633c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
634c7f5dba7SAnatoly Burakov 	 */
635c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
636c7f5dba7SAnatoly Burakov 
637c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
638c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
639c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
640c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
641c7f5dba7SAnatoly Burakov 		return -1;
642c7f5dba7SAnatoly Burakov 	}
643c7f5dba7SAnatoly Burakov 
644c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
645c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
646c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
647c7f5dba7SAnatoly Burakov 
648c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
649c7f5dba7SAnatoly Burakov 
650c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
651c7f5dba7SAnatoly Burakov 
652c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
653c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
654c7f5dba7SAnatoly Burakov 		return -1;
655c7f5dba7SAnatoly Burakov 	}
656c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
657c7f5dba7SAnatoly Burakov 
658c7f5dba7SAnatoly Burakov 	return 0;
659c7f5dba7SAnatoly Burakov }
660c7f5dba7SAnatoly Burakov 
661c7f5dba7SAnatoly Burakov static int
662c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
663c7f5dba7SAnatoly Burakov {
664c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
665c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
666c7f5dba7SAnatoly Burakov 	 */
6679d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
668c7f5dba7SAnatoly Burakov 
669c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
670c7f5dba7SAnatoly Burakov }
671c7f5dba7SAnatoly Burakov 
672c7f5dba7SAnatoly Burakov static void *
673c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
674c7f5dba7SAnatoly Burakov {
675c7f5dba7SAnatoly Burakov 	void *addr;
676c7f5dba7SAnatoly Burakov 	int flags;
677c7f5dba7SAnatoly Burakov 
678c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
679c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
680c7f5dba7SAnatoly Burakov 	if (huge)
681c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
682c7f5dba7SAnatoly Burakov 
683c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
684c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
685c7f5dba7SAnatoly Burakov 		return NULL;
686c7f5dba7SAnatoly Burakov 
687c7f5dba7SAnatoly Burakov 	return addr;
688c7f5dba7SAnatoly Burakov }
689c7f5dba7SAnatoly Burakov 
690c7f5dba7SAnatoly Burakov struct extmem_param {
691c7f5dba7SAnatoly Burakov 	void *addr;
692c7f5dba7SAnatoly Burakov 	size_t len;
693c7f5dba7SAnatoly Burakov 	size_t pgsz;
694c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
695c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
696c7f5dba7SAnatoly Burakov };
697c7f5dba7SAnatoly Burakov 
698c7f5dba7SAnatoly Burakov static int
699c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
700c7f5dba7SAnatoly Burakov 		bool huge)
701c7f5dba7SAnatoly Burakov {
702c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
703c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
704c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
705c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
706c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
707c7f5dba7SAnatoly Burakov 	void *addr;
708c7f5dba7SAnatoly Burakov 	int ret;
709c7f5dba7SAnatoly Burakov 
710c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
711c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
712c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
713c7f5dba7SAnatoly Burakov 			continue;
714c7f5dba7SAnatoly Burakov 
715c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
716c7f5dba7SAnatoly Burakov 
717c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
718c7f5dba7SAnatoly Burakov 		if (!huge)
719c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
720c7f5dba7SAnatoly Burakov 
721c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
722c7f5dba7SAnatoly Burakov 		if (ret < 0) {
723c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
724c7f5dba7SAnatoly Burakov 			return -1;
725c7f5dba7SAnatoly Burakov 		}
726c7f5dba7SAnatoly Burakov 
727c7f5dba7SAnatoly Burakov 		/* allocate our memory */
728c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
729c7f5dba7SAnatoly Burakov 
730c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
731c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
732c7f5dba7SAnatoly Burakov 		 * try another one.
733c7f5dba7SAnatoly Burakov 		 */
734c7f5dba7SAnatoly Burakov 		if (addr == NULL)
735c7f5dba7SAnatoly Burakov 			continue;
736c7f5dba7SAnatoly Burakov 
737c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
738c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
739c7f5dba7SAnatoly Burakov 
740c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
741c7f5dba7SAnatoly Burakov 
742c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
743c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
744c7f5dba7SAnatoly Burakov 			goto fail;
745c7f5dba7SAnatoly Burakov 		}
746c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
747c7f5dba7SAnatoly Burakov 		if (!huge)
748c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
749c7f5dba7SAnatoly Burakov 
750c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
751c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
752c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
753c7f5dba7SAnatoly Burakov 			size_t offset;
754c7f5dba7SAnatoly Burakov 			void *cur;
755c7f5dba7SAnatoly Burakov 
756c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
757c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
758c7f5dba7SAnatoly Burakov 
759c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
760c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
761c7f5dba7SAnatoly Burakov 
762c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
763c7f5dba7SAnatoly Burakov 
764c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
765c7f5dba7SAnatoly Burakov 		}
766c7f5dba7SAnatoly Burakov 
767c7f5dba7SAnatoly Burakov 		break;
768c7f5dba7SAnatoly Burakov 	}
769c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
770c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
771c7f5dba7SAnatoly Burakov 		return -1;
772c7f5dba7SAnatoly Burakov 
773c7f5dba7SAnatoly Burakov 	param->addr = addr;
774c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
775c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
776c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
777c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
778c7f5dba7SAnatoly Burakov 
779c7f5dba7SAnatoly Burakov 	return 0;
780c7f5dba7SAnatoly Burakov fail:
781c7f5dba7SAnatoly Burakov 	if (iovas)
782c7f5dba7SAnatoly Burakov 		free(iovas);
783c7f5dba7SAnatoly Burakov 	if (addr)
784c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
785c7f5dba7SAnatoly Burakov 
786c7f5dba7SAnatoly Burakov 	return -1;
787c7f5dba7SAnatoly Burakov }
788c7f5dba7SAnatoly Burakov 
789c7f5dba7SAnatoly Burakov static int
790c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
791c7f5dba7SAnatoly Burakov {
792c7f5dba7SAnatoly Burakov 	struct extmem_param param;
793c7f5dba7SAnatoly Burakov 	int socket_id, ret;
794c7f5dba7SAnatoly Burakov 
795c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
796c7f5dba7SAnatoly Burakov 
797c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
798c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
799c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
800c7f5dba7SAnatoly Burakov 		/* create our heap */
801c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
802c7f5dba7SAnatoly Burakov 		if (ret < 0) {
803c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
804c7f5dba7SAnatoly Burakov 			return -1;
805c7f5dba7SAnatoly Burakov 		}
806c7f5dba7SAnatoly Burakov 	}
807c7f5dba7SAnatoly Burakov 
808c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
809c7f5dba7SAnatoly Burakov 	if (ret < 0) {
810c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
811c7f5dba7SAnatoly Burakov 		return -1;
812c7f5dba7SAnatoly Burakov 	}
813c7f5dba7SAnatoly Burakov 
814c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
815c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
816c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
817c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
818c7f5dba7SAnatoly Burakov 
819c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
820c7f5dba7SAnatoly Burakov 
821c7f5dba7SAnatoly Burakov 	/* not needed any more */
822c7f5dba7SAnatoly Burakov 	free(param.iova_table);
823c7f5dba7SAnatoly Burakov 
824c7f5dba7SAnatoly Burakov 	if (ret < 0) {
825c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
826c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
827c7f5dba7SAnatoly Burakov 		return -1;
828c7f5dba7SAnatoly Burakov 	}
829c7f5dba7SAnatoly Burakov 
830c7f5dba7SAnatoly Burakov 	/* success */
831c7f5dba7SAnatoly Burakov 
832c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
833c7f5dba7SAnatoly Burakov 			param.len >> 20);
834c7f5dba7SAnatoly Burakov 
835c7f5dba7SAnatoly Burakov 	return 0;
836c7f5dba7SAnatoly Burakov }
8373a0968c8SShahaf Shuler static void
8383a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8393a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8403a0968c8SShahaf Shuler {
8413a0968c8SShahaf Shuler 	uint16_t pid = 0;
8423a0968c8SShahaf Shuler 	int ret;
8433a0968c8SShahaf Shuler 
8443a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8453a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8463a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8473a0968c8SShahaf Shuler 
8483a0968c8SShahaf Shuler 		ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
8493a0968c8SShahaf Shuler 					memhdr->len);
8503a0968c8SShahaf Shuler 		if (ret) {
8513a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8523a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
8533a0968c8SShahaf Shuler 				    "for device %s\n",
8543a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8553a0968c8SShahaf Shuler 		}
8563a0968c8SShahaf Shuler 	}
8573a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
8583a0968c8SShahaf Shuler 	if (ret) {
8593a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8603a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
8613a0968c8SShahaf Shuler 	}
8623a0968c8SShahaf Shuler }
8633a0968c8SShahaf Shuler 
8643a0968c8SShahaf Shuler static void
8653a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8663a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8673a0968c8SShahaf Shuler {
8683a0968c8SShahaf Shuler 	uint16_t pid = 0;
8693a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
8703a0968c8SShahaf Shuler 	int ret;
8713a0968c8SShahaf Shuler 
8723a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
8733a0968c8SShahaf Shuler 				  page_size);
8743a0968c8SShahaf Shuler 	if (ret) {
8753a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8763a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
8773a0968c8SShahaf Shuler 		return;
8783a0968c8SShahaf Shuler 	}
8793a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8803a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8813a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8823a0968c8SShahaf Shuler 
8833a0968c8SShahaf Shuler 		ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
8843a0968c8SShahaf Shuler 				      memhdr->len);
8853a0968c8SShahaf Shuler 		if (ret) {
8863a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8873a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
8883a0968c8SShahaf Shuler 				    "for device %s\n",
8893a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8903a0968c8SShahaf Shuler 		}
8913a0968c8SShahaf Shuler 	}
8923a0968c8SShahaf Shuler }
893c7f5dba7SAnatoly Burakov 
894af75078fSIntel /*
895af75078fSIntel  * Configuration initialisation done once at init time.
896af75078fSIntel  */
897401b744dSShahaf Shuler static struct rte_mempool *
898af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
899af75078fSIntel 		 unsigned int socket_id)
900af75078fSIntel {
901af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
902bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
903af75078fSIntel 	uint32_t mb_size;
904af75078fSIntel 
905dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
906af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
907148f963fSBruce Richardson 
908285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
909d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
910d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
911d1eb542eSOlivier Matz 
912c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
913c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
914c7f5dba7SAnatoly Burakov 		{
915c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
916c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
917c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
918c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
919c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
920c7f5dba7SAnatoly Burakov 			break;
921c7f5dba7SAnatoly Burakov 		}
922c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
923c7f5dba7SAnatoly Burakov 		{
924b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
925c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
926148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
92759fcf854SShahaf Shuler 				socket_id, mempool_flags);
92824427bb9SOlivier Matz 			if (rte_mp == NULL)
92924427bb9SOlivier Matz 				goto err;
930b19a0c75SOlivier Matz 
931b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
932b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
933b19a0c75SOlivier Matz 				rte_mp = NULL;
93424427bb9SOlivier Matz 				goto err;
935b19a0c75SOlivier Matz 			}
936b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
937b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
9383a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
939c7f5dba7SAnatoly Burakov 			break;
940c7f5dba7SAnatoly Burakov 		}
941c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
942c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
943c7f5dba7SAnatoly Burakov 		{
944c7f5dba7SAnatoly Burakov 			int heap_socket;
945c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
946c7f5dba7SAnatoly Burakov 
947c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
948c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
949c7f5dba7SAnatoly Burakov 
950c7f5dba7SAnatoly Burakov 			heap_socket =
951c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
952c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
953c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
954c7f5dba7SAnatoly Burakov 
9550e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
9560e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
957ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
958c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
959c7f5dba7SAnatoly Burakov 					heap_socket);
960c7f5dba7SAnatoly Burakov 			break;
961c7f5dba7SAnatoly Burakov 		}
962c7f5dba7SAnatoly Burakov 	default:
963c7f5dba7SAnatoly Burakov 		{
964c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
965c7f5dba7SAnatoly Burakov 		}
966bece7b6cSChristian Ehrhardt 	}
967148f963fSBruce Richardson 
96824427bb9SOlivier Matz err:
969af75078fSIntel 	if (rte_mp == NULL) {
970d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
971d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
972d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
973148f963fSBruce Richardson 	} else if (verbose_level > 0) {
974591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
975af75078fSIntel 	}
976401b744dSShahaf Shuler 	return rte_mp;
977af75078fSIntel }
978af75078fSIntel 
97920a0286fSLiu Xiaofeng /*
98020a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
98120a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
98220a0286fSLiu Xiaofeng  */
98320a0286fSLiu Xiaofeng static int
98420a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
98520a0286fSLiu Xiaofeng {
98620a0286fSLiu Xiaofeng 	static int warning_once = 0;
98720a0286fSLiu Xiaofeng 
988c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
98920a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
99020a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
99120a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
99220a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
99320a0286fSLiu Xiaofeng 			       " --numa.\n");
99420a0286fSLiu Xiaofeng 		warning_once = 1;
99520a0286fSLiu Xiaofeng 		return -1;
99620a0286fSLiu Xiaofeng 	}
99720a0286fSLiu Xiaofeng 	return 0;
99820a0286fSLiu Xiaofeng }
99920a0286fSLiu Xiaofeng 
10003f7311baSWei Dai /*
10013f7311baSWei Dai  * Get the allowed maximum number of RX queues.
10023f7311baSWei Dai  * *pid return the port id which has minimal value of
10033f7311baSWei Dai  * max_rx_queues in all ports.
10043f7311baSWei Dai  */
10053f7311baSWei Dai queueid_t
10063f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
10073f7311baSWei Dai {
10083f7311baSWei Dai 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
10093f7311baSWei Dai 	portid_t pi;
10103f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
10113f7311baSWei Dai 
10123f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
10133f7311baSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
10143f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
10153f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
10163f7311baSWei Dai 			*pid = pi;
10173f7311baSWei Dai 		}
10183f7311baSWei Dai 	}
10193f7311baSWei Dai 	return allowed_max_rxq;
10203f7311baSWei Dai }
10213f7311baSWei Dai 
10223f7311baSWei Dai /*
10233f7311baSWei Dai  * Check input rxq is valid or not.
10243f7311baSWei Dai  * If input rxq is not greater than any of maximum number
10253f7311baSWei Dai  * of RX queues of all ports, it is valid.
10263f7311baSWei Dai  * if valid, return 0, else return -1
10273f7311baSWei Dai  */
10283f7311baSWei Dai int
10293f7311baSWei Dai check_nb_rxq(queueid_t rxq)
10303f7311baSWei Dai {
10313f7311baSWei Dai 	queueid_t allowed_max_rxq;
10323f7311baSWei Dai 	portid_t pid = 0;
10333f7311baSWei Dai 
10343f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
10353f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
10363f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
10373f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
10383f7311baSWei Dai 		       rxq,
10393f7311baSWei Dai 		       allowed_max_rxq,
10403f7311baSWei Dai 		       pid);
10413f7311baSWei Dai 		return -1;
10423f7311baSWei Dai 	}
10433f7311baSWei Dai 	return 0;
10443f7311baSWei Dai }
10453f7311baSWei Dai 
104636db4f6cSWei Dai /*
104736db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
104836db4f6cSWei Dai  * *pid return the port id which has minimal value of
104936db4f6cSWei Dai  * max_tx_queues in all ports.
105036db4f6cSWei Dai  */
105136db4f6cSWei Dai queueid_t
105236db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
105336db4f6cSWei Dai {
105436db4f6cSWei Dai 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
105536db4f6cSWei Dai 	portid_t pi;
105636db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
105736db4f6cSWei Dai 
105836db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
105936db4f6cSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
106036db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
106136db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
106236db4f6cSWei Dai 			*pid = pi;
106336db4f6cSWei Dai 		}
106436db4f6cSWei Dai 	}
106536db4f6cSWei Dai 	return allowed_max_txq;
106636db4f6cSWei Dai }
106736db4f6cSWei Dai 
106836db4f6cSWei Dai /*
106936db4f6cSWei Dai  * Check input txq is valid or not.
107036db4f6cSWei Dai  * If input txq is not greater than any of maximum number
107136db4f6cSWei Dai  * of TX queues of all ports, it is valid.
107236db4f6cSWei Dai  * if valid, return 0, else return -1
107336db4f6cSWei Dai  */
107436db4f6cSWei Dai int
107536db4f6cSWei Dai check_nb_txq(queueid_t txq)
107636db4f6cSWei Dai {
107736db4f6cSWei Dai 	queueid_t allowed_max_txq;
107836db4f6cSWei Dai 	portid_t pid = 0;
107936db4f6cSWei Dai 
108036db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
108136db4f6cSWei Dai 	if (txq > allowed_max_txq) {
108236db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
108336db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
108436db4f6cSWei Dai 		       txq,
108536db4f6cSWei Dai 		       allowed_max_txq,
108636db4f6cSWei Dai 		       pid);
108736db4f6cSWei Dai 		return -1;
108836db4f6cSWei Dai 	}
108936db4f6cSWei Dai 	return 0;
109036db4f6cSWei Dai }
109136db4f6cSWei Dai 
1092af75078fSIntel static void
1093af75078fSIntel init_config(void)
1094af75078fSIntel {
1095ce8d5614SIntel 	portid_t pid;
1096af75078fSIntel 	struct rte_port *port;
1097af75078fSIntel 	struct rte_mempool *mbp;
1098af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1099af75078fSIntel 	lcoreid_t  lc_id;
11007acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1101b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
110252f38a20SJiayu Hu 	uint32_t gso_types;
1103c73a9071SWei Dai 	int k;
1104af75078fSIntel 
11057acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1106487f9a59SYulong Pei 
1107af75078fSIntel 	/* Configuration of logical cores. */
1108af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1109af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1110fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1111af75078fSIntel 	if (fwd_lcores == NULL) {
1112ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1113ce8d5614SIntel 							"failed\n", nb_lcores);
1114af75078fSIntel 	}
1115af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1116af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1117af75078fSIntel 					       sizeof(struct fwd_lcore),
1118fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1119af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1120ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1121ce8d5614SIntel 								"failed\n");
1122af75078fSIntel 		}
1123af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1124af75078fSIntel 	}
1125af75078fSIntel 
11267d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1127ce8d5614SIntel 		port = &ports[pid];
11288b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1129fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1130384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
1131ce8d5614SIntel 		rte_eth_dev_info_get(pid, &port->dev_info);
11327c45f6c0SFerruh Yigit 
113307e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
113407e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
113507e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
113607e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1137c18feafaSDekel Peled 		if (!(port->dev_info.tx_offload_capa &
1138c18feafaSDekel Peled 			DEV_TX_OFFLOAD_MATCH_METADATA))
1139c18feafaSDekel Peled 			port->dev_conf.txmode.offloads &=
1140c18feafaSDekel Peled 				~DEV_TX_OFFLOAD_MATCH_METADATA;
1141b6ea6408SIntel 		if (numa_support) {
1142b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1143b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1144b6ea6408SIntel 			else {
1145b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
114620a0286fSLiu Xiaofeng 
114729841336SPhil Yang 				/*
114829841336SPhil Yang 				 * if socket_id is invalid,
114929841336SPhil Yang 				 * set to the first available socket.
115029841336SPhil Yang 				 */
115120a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
115229841336SPhil Yang 					socket_id = socket_ids[0];
1153b6ea6408SIntel 				port_per_socket[socket_id]++;
1154b6ea6408SIntel 			}
1155b6ea6408SIntel 		}
1156b6ea6408SIntel 
1157c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1158c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1159c73a9071SWei Dai 			port->rx_conf[k].offloads =
1160c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1161c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1162c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1163c73a9071SWei Dai 			port->tx_conf[k].offloads =
1164c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1165c73a9071SWei Dai 
1166ce8d5614SIntel 		/* set flag to initialize port/queue */
1167ce8d5614SIntel 		port->need_reconfig = 1;
1168ce8d5614SIntel 		port->need_reconfig_queues = 1;
1169c18feafaSDekel Peled 		port->tx_metadata = 0;
1170ce8d5614SIntel 	}
1171ce8d5614SIntel 
11723ab64341SOlivier Matz 	/*
11733ab64341SOlivier Matz 	 * Create pools of mbuf.
11743ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
11753ab64341SOlivier Matz 	 * socket 0 memory by default.
11763ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
11773ab64341SOlivier Matz 	 *
11783ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
11793ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
11803ab64341SOlivier Matz 	 */
11813ab64341SOlivier Matz 	if (param_total_num_mbufs)
11823ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
11833ab64341SOlivier Matz 	else {
11843ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
11853ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
11863ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
11873ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
11883ab64341SOlivier Matz 	}
11893ab64341SOlivier Matz 
1190b6ea6408SIntel 	if (numa_support) {
1191b6ea6408SIntel 		uint8_t i;
1192ce8d5614SIntel 
1193c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
1194401b744dSShahaf Shuler 			mempools[i] = mbuf_pool_create(mbuf_data_size,
1195401b744dSShahaf Shuler 						       nb_mbuf_per_pool,
1196c9cafcc8SShahaf Shuler 						       socket_ids[i]);
11973ab64341SOlivier Matz 	} else {
11983ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
1199401b744dSShahaf Shuler 			mempools[0] = mbuf_pool_create(mbuf_data_size,
1200401b744dSShahaf Shuler 						       nb_mbuf_per_pool, 0);
12013ab64341SOlivier Matz 		else
1202401b744dSShahaf Shuler 			mempools[socket_num] = mbuf_pool_create
1203401b744dSShahaf Shuler 							(mbuf_data_size,
1204401b744dSShahaf Shuler 							 nb_mbuf_per_pool,
12053ab64341SOlivier Matz 							 socket_num);
12063ab64341SOlivier Matz 	}
1207b6ea6408SIntel 
1208b6ea6408SIntel 	init_port_config();
12095886ae07SAdrien Mazarguil 
121052f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1211aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
12125886ae07SAdrien Mazarguil 	/*
12135886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
12145886ae07SAdrien Mazarguil 	 */
12155886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
12168fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
12178fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
12188fd8bebcSAdrien Mazarguil 
12195886ae07SAdrien Mazarguil 		if (mbp == NULL)
12205886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
12215886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
122252f38a20SJiayu Hu 		/* initialize GSO context */
122352f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
122452f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
122552f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
122652f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
122752f38a20SJiayu Hu 			ETHER_CRC_LEN;
122852f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
12295886ae07SAdrien Mazarguil 	}
12305886ae07SAdrien Mazarguil 
1231ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1232ce8d5614SIntel 	if (init_fwd_streams() < 0)
1233ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
12340c0db76fSBernard Iremonger 
12350c0db76fSBernard Iremonger 	fwd_config_setup();
1236b7091f1dSJiayu Hu 
1237b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1238b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1239b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1240b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1241b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1242b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1243b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1244b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1245b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1246b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1247b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1248b7091f1dSJiayu Hu 		}
1249b7091f1dSJiayu Hu 	}
12500ad778b3SJasvinder Singh 
12510ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
12520ad778b3SJasvinder Singh 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
12530ad778b3SJasvinder Singh 		RTE_ETH_FOREACH_DEV(pid) {
12540ad778b3SJasvinder Singh 			port = &ports[pid];
12550ad778b3SJasvinder Singh 			const char *driver = port->dev_info.driver_name;
12560ad778b3SJasvinder Singh 
12570ad778b3SJasvinder Singh 			if (strcmp(driver, "net_softnic") == 0)
12580ad778b3SJasvinder Singh 				port->softport.fwd_lcore_arg = fwd_lcores;
12590ad778b3SJasvinder Singh 		}
12600ad778b3SJasvinder Singh 	}
12610ad778b3SJasvinder Singh #endif
12620ad778b3SJasvinder Singh 
1263ce8d5614SIntel }
1264ce8d5614SIntel 
12652950a769SDeclan Doherty 
12662950a769SDeclan Doherty void
1267a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
12682950a769SDeclan Doherty {
12692950a769SDeclan Doherty 	struct rte_port *port;
12702950a769SDeclan Doherty 
12712950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
12722950a769SDeclan Doherty 	port = &ports[new_port_id];
12732950a769SDeclan Doherty 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
12742950a769SDeclan Doherty 
12752950a769SDeclan Doherty 	/* set flag to initialize port/queue */
12762950a769SDeclan Doherty 	port->need_reconfig = 1;
12772950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1278a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
12792950a769SDeclan Doherty 
12802950a769SDeclan Doherty 	init_port_config();
12812950a769SDeclan Doherty }
12822950a769SDeclan Doherty 
12832950a769SDeclan Doherty 
1284ce8d5614SIntel int
1285ce8d5614SIntel init_fwd_streams(void)
1286ce8d5614SIntel {
1287ce8d5614SIntel 	portid_t pid;
1288ce8d5614SIntel 	struct rte_port *port;
1289ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
12905a8fb55cSReshma Pattan 	queueid_t q;
1291ce8d5614SIntel 
1292ce8d5614SIntel 	/* set socket id according to numa or not */
12937d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1294ce8d5614SIntel 		port = &ports[pid];
1295ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1296ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1297ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1298ce8d5614SIntel 				port->dev_info.max_rx_queues);
1299ce8d5614SIntel 			return -1;
1300ce8d5614SIntel 		}
1301ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1302ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1303ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1304ce8d5614SIntel 				port->dev_info.max_tx_queues);
1305ce8d5614SIntel 			return -1;
1306ce8d5614SIntel 		}
130720a0286fSLiu Xiaofeng 		if (numa_support) {
130820a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
130920a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
131020a0286fSLiu Xiaofeng 			else {
1311b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
131220a0286fSLiu Xiaofeng 
131329841336SPhil Yang 				/*
131429841336SPhil Yang 				 * if socket_id is invalid,
131529841336SPhil Yang 				 * set to the first available socket.
131629841336SPhil Yang 				 */
131720a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
131829841336SPhil Yang 					port->socket_id = socket_ids[0];
131920a0286fSLiu Xiaofeng 			}
132020a0286fSLiu Xiaofeng 		}
1321b6ea6408SIntel 		else {
1322b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1323af75078fSIntel 				port->socket_id = 0;
1324b6ea6408SIntel 			else
1325b6ea6408SIntel 				port->socket_id = socket_num;
1326b6ea6408SIntel 		}
1327af75078fSIntel 	}
1328af75078fSIntel 
13295a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
13305a8fb55cSReshma Pattan 	if (q == 0) {
13315a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
13325a8fb55cSReshma Pattan 		return -1;
13335a8fb55cSReshma Pattan 	}
13345a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1335ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1336ce8d5614SIntel 		return 0;
1337ce8d5614SIntel 	/* clear the old */
1338ce8d5614SIntel 	if (fwd_streams != NULL) {
1339ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1340ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1341ce8d5614SIntel 				continue;
1342ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1343ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1344af75078fSIntel 		}
1345ce8d5614SIntel 		rte_free(fwd_streams);
1346ce8d5614SIntel 		fwd_streams = NULL;
1347ce8d5614SIntel 	}
1348ce8d5614SIntel 
1349ce8d5614SIntel 	/* init new */
1350ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
13511f84c469SMatan Azrad 	if (nb_fwd_streams) {
1352ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
13531f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
13541f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1355ce8d5614SIntel 		if (fwd_streams == NULL)
13561f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
13571f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
13581f84c469SMatan Azrad 				 nb_fwd_streams);
1359ce8d5614SIntel 
1360af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
13611f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
13621f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
13631f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1364ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
13651f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
13661f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
13671f84c469SMatan Azrad 		}
1368af75078fSIntel 	}
1369ce8d5614SIntel 
1370ce8d5614SIntel 	return 0;
1371af75078fSIntel }
1372af75078fSIntel 
1373af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1374af75078fSIntel static void
1375af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1376af75078fSIntel {
1377af75078fSIntel 	unsigned int total_burst;
1378af75078fSIntel 	unsigned int nb_burst;
1379af75078fSIntel 	unsigned int burst_stats[3];
1380af75078fSIntel 	uint16_t pktnb_stats[3];
1381af75078fSIntel 	uint16_t nb_pkt;
1382af75078fSIntel 	int burst_percent[3];
1383af75078fSIntel 
1384af75078fSIntel 	/*
1385af75078fSIntel 	 * First compute the total number of packet bursts and the
1386af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1387af75078fSIntel 	 */
1388af75078fSIntel 	total_burst = 0;
1389af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1390af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1391af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1392af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1393af75078fSIntel 		if (nb_burst == 0)
1394af75078fSIntel 			continue;
1395af75078fSIntel 		total_burst += nb_burst;
1396af75078fSIntel 		if (nb_burst > burst_stats[0]) {
1397af75078fSIntel 			burst_stats[1] = burst_stats[0];
1398af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
1399af75078fSIntel 			burst_stats[0] = nb_burst;
1400af75078fSIntel 			pktnb_stats[0] = nb_pkt;
1401fe613657SDaniel Shelepov 		} else if (nb_burst > burst_stats[1]) {
1402fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1403fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
1404af75078fSIntel 		}
1405af75078fSIntel 	}
1406af75078fSIntel 	if (total_burst == 0)
1407af75078fSIntel 		return;
1408af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1409af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1410af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
1411af75078fSIntel 	if (burst_stats[0] == total_burst) {
1412af75078fSIntel 		printf("]\n");
1413af75078fSIntel 		return;
1414af75078fSIntel 	}
1415af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1416af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
1417af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
1418af75078fSIntel 		return;
1419af75078fSIntel 	}
1420af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1421af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1422af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1423af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1424af75078fSIntel 		return;
1425af75078fSIntel 	}
1426af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1427af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1428af75078fSIntel }
1429af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1430af75078fSIntel 
1431af75078fSIntel static void
1432af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1433af75078fSIntel {
1434af75078fSIntel 	struct fwd_stream *fs;
1435af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1436af75078fSIntel 
1437af75078fSIntel 	fs = fwd_streams[stream_id];
1438af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1439af75078fSIntel 	    (fs->fwd_dropped == 0))
1440af75078fSIntel 		return;
1441af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1442af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1443af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1444af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1445c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1446c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1447af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1448af75078fSIntel 
1449af75078fSIntel 	/* if checksum mode */
1450af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1451c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1452c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1453c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
145458d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
145558d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
145694d65546SDavid Marchand 	} else {
145794d65546SDavid Marchand 		printf("\n");
1458af75078fSIntel 	}
1459af75078fSIntel 
1460af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1461af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1462af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1463af75078fSIntel #endif
1464af75078fSIntel }
1465af75078fSIntel 
146653324971SDavid Marchand void
146753324971SDavid Marchand fwd_stats_display(void)
146853324971SDavid Marchand {
146953324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
147053324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
147153324971SDavid Marchand 	struct {
147253324971SDavid Marchand 		struct fwd_stream *rx_stream;
147353324971SDavid Marchand 		struct fwd_stream *tx_stream;
147453324971SDavid Marchand 		uint64_t tx_dropped;
147553324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
147653324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
147753324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
147853324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
147953324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
148053324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
148153324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
148253324971SDavid Marchand 	struct rte_eth_stats stats;
148353324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
148453324971SDavid Marchand 	uint64_t fwd_cycles = 0;
148553324971SDavid Marchand #endif
148653324971SDavid Marchand 	uint64_t total_recv = 0;
148753324971SDavid Marchand 	uint64_t total_xmit = 0;
148853324971SDavid Marchand 	struct rte_port *port;
148953324971SDavid Marchand 	streamid_t sm_id;
149053324971SDavid Marchand 	portid_t pt_id;
149153324971SDavid Marchand 	int i;
149253324971SDavid Marchand 
149353324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
149453324971SDavid Marchand 
149553324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
149653324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
149753324971SDavid Marchand 
149853324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
149953324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
150053324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
150153324971SDavid Marchand 		} else {
150253324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
150353324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
150453324971SDavid Marchand 		}
150553324971SDavid Marchand 
150653324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
150753324971SDavid Marchand 
150853324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
150953324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
151053324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
151153324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
151253324971SDavid Marchand 
151353324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
151453324971SDavid Marchand 		fwd_cycles += fs->core_cycles;
151553324971SDavid Marchand #endif
151653324971SDavid Marchand 	}
151753324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
151853324971SDavid Marchand 		uint8_t j;
151953324971SDavid Marchand 
152053324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
152153324971SDavid Marchand 		port = &ports[pt_id];
152253324971SDavid Marchand 
152353324971SDavid Marchand 		rte_eth_stats_get(pt_id, &stats);
152453324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
152553324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
152653324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
152753324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
152853324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
152953324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
153053324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
153153324971SDavid Marchand 
153253324971SDavid Marchand 		total_recv += stats.ipackets;
153353324971SDavid Marchand 		total_xmit += stats.opackets;
153453324971SDavid Marchand 		total_rx_dropped += stats.imissed;
153553324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
153653324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
153753324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
153853324971SDavid Marchand 
153953324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
154053324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
154153324971SDavid Marchand 
154253324971SDavid Marchand 		if (!port->rx_queue_stats_mapping_enabled &&
154353324971SDavid Marchand 		    !port->tx_queue_stats_mapping_enabled) {
154453324971SDavid Marchand 			printf("  RX-packets: %-14"PRIu64
154553324971SDavid Marchand 			       " RX-dropped: %-14"PRIu64
154653324971SDavid Marchand 			       "RX-total: %-"PRIu64"\n",
154753324971SDavid Marchand 			       stats.ipackets, stats.imissed,
154853324971SDavid Marchand 			       stats.ipackets + stats.imissed);
154953324971SDavid Marchand 
155053324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
155153324971SDavid Marchand 				printf("  Bad-ipcsum: %-14"PRIu64
155253324971SDavid Marchand 				       " Bad-l4csum: %-14"PRIu64
155353324971SDavid Marchand 				       "Bad-outer-l4csum: %-14"PRIu64"\n",
155453324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
155553324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
155653324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
155753324971SDavid Marchand 			if (stats.ierrors + stats.rx_nombuf > 0) {
155853324971SDavid Marchand 				printf("  RX-error: %-"PRIu64"\n",
155953324971SDavid Marchand 				       stats.ierrors);
156053324971SDavid Marchand 				printf("  RX-nombufs: %-14"PRIu64"\n",
156153324971SDavid Marchand 				       stats.rx_nombuf);
156253324971SDavid Marchand 			}
156353324971SDavid Marchand 
156453324971SDavid Marchand 			printf("  TX-packets: %-14"PRIu64
156553324971SDavid Marchand 			       " TX-dropped: %-14"PRIu64
156653324971SDavid Marchand 			       "TX-total: %-"PRIu64"\n",
156753324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
156853324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
156953324971SDavid Marchand 		} else {
157053324971SDavid Marchand 			printf("  RX-packets:             %14"PRIu64
157153324971SDavid Marchand 			       "    RX-dropped:%14"PRIu64
157253324971SDavid Marchand 			       "    RX-total:%14"PRIu64"\n",
157353324971SDavid Marchand 			       stats.ipackets, stats.imissed,
157453324971SDavid Marchand 			       stats.ipackets + stats.imissed);
157553324971SDavid Marchand 
157653324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
157753324971SDavid Marchand 				printf("  Bad-ipcsum:%14"PRIu64
157853324971SDavid Marchand 				       "    Bad-l4csum:%14"PRIu64
157953324971SDavid Marchand 				       "    Bad-outer-l4csum: %-14"PRIu64"\n",
158053324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
158153324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
158253324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
158353324971SDavid Marchand 			if ((stats.ierrors + stats.rx_nombuf) > 0) {
158453324971SDavid Marchand 				printf("  RX-error:%"PRIu64"\n", stats.ierrors);
158553324971SDavid Marchand 				printf("  RX-nombufs:             %14"PRIu64"\n",
158653324971SDavid Marchand 				       stats.rx_nombuf);
158753324971SDavid Marchand 			}
158853324971SDavid Marchand 
158953324971SDavid Marchand 			printf("  TX-packets:             %14"PRIu64
159053324971SDavid Marchand 			       "    TX-dropped:%14"PRIu64
159153324971SDavid Marchand 			       "    TX-total:%14"PRIu64"\n",
159253324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
159353324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
159453324971SDavid Marchand 		}
159553324971SDavid Marchand 
159653324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
159753324971SDavid Marchand 		if (ports_stats[pt_id].rx_stream)
159853324971SDavid Marchand 			pkt_burst_stats_display("RX",
159953324971SDavid Marchand 				&ports_stats[pt_id].rx_stream->rx_burst_stats);
160053324971SDavid Marchand 		if (ports_stats[pt_id].tx_stream)
160153324971SDavid Marchand 			pkt_burst_stats_display("TX",
160253324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
160353324971SDavid Marchand #endif
160453324971SDavid Marchand 
160553324971SDavid Marchand 		if (port->rx_queue_stats_mapping_enabled) {
160653324971SDavid Marchand 			printf("\n");
160753324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
160853324971SDavid Marchand 				printf("  Stats reg %2d RX-packets:%14"PRIu64
160953324971SDavid Marchand 				       "     RX-errors:%14"PRIu64
161053324971SDavid Marchand 				       "    RX-bytes:%14"PRIu64"\n",
161153324971SDavid Marchand 				       j, stats.q_ipackets[j],
161253324971SDavid Marchand 				       stats.q_errors[j], stats.q_ibytes[j]);
161353324971SDavid Marchand 			}
161453324971SDavid Marchand 			printf("\n");
161553324971SDavid Marchand 		}
161653324971SDavid Marchand 		if (port->tx_queue_stats_mapping_enabled) {
161753324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
161853324971SDavid Marchand 				printf("  Stats reg %2d TX-packets:%14"PRIu64
161953324971SDavid Marchand 				       "                                 TX-bytes:%14"
162053324971SDavid Marchand 				       PRIu64"\n",
162153324971SDavid Marchand 				       j, stats.q_opackets[j],
162253324971SDavid Marchand 				       stats.q_obytes[j]);
162353324971SDavid Marchand 			}
162453324971SDavid Marchand 		}
162553324971SDavid Marchand 
162653324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
162753324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
162853324971SDavid Marchand 	}
162953324971SDavid Marchand 
163053324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
163153324971SDavid Marchand 	       "%s\n",
163253324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
163353324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
163453324971SDavid Marchand 	       "%-"PRIu64"\n"
163553324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
163653324971SDavid Marchand 	       "%-"PRIu64"\n",
163753324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
163853324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
163953324971SDavid Marchand 	if (total_rx_nombuf > 0)
164053324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
164153324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
164253324971SDavid Marchand 	       "%s\n",
164353324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
164453324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
164553324971SDavid Marchand 	if (total_recv > 0)
164653324971SDavid Marchand 		printf("\n  CPU cycles/packet=%u (total cycles="
164753324971SDavid Marchand 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
164853324971SDavid Marchand 		       (unsigned int)(fwd_cycles / total_recv),
164953324971SDavid Marchand 		       fwd_cycles, total_recv);
165053324971SDavid Marchand #endif
165153324971SDavid Marchand }
165253324971SDavid Marchand 
165353324971SDavid Marchand void
165453324971SDavid Marchand fwd_stats_reset(void)
165553324971SDavid Marchand {
165653324971SDavid Marchand 	streamid_t sm_id;
165753324971SDavid Marchand 	portid_t pt_id;
165853324971SDavid Marchand 	int i;
165953324971SDavid Marchand 
166053324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
166153324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
166253324971SDavid Marchand 		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
166353324971SDavid Marchand 	}
166453324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
166553324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
166653324971SDavid Marchand 
166753324971SDavid Marchand 		fs->rx_packets = 0;
166853324971SDavid Marchand 		fs->tx_packets = 0;
166953324971SDavid Marchand 		fs->fwd_dropped = 0;
167053324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
167153324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
167253324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
167353324971SDavid Marchand 
167453324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
167553324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
167653324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
167753324971SDavid Marchand #endif
167853324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
167953324971SDavid Marchand 		fs->core_cycles = 0;
168053324971SDavid Marchand #endif
168153324971SDavid Marchand 	}
168253324971SDavid Marchand }
168353324971SDavid Marchand 
1684af75078fSIntel static void
16857741e4cfSIntel flush_fwd_rx_queues(void)
1686af75078fSIntel {
1687af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1688af75078fSIntel 	portid_t  rxp;
16897741e4cfSIntel 	portid_t port_id;
1690af75078fSIntel 	queueid_t rxq;
1691af75078fSIntel 	uint16_t  nb_rx;
1692af75078fSIntel 	uint16_t  i;
1693af75078fSIntel 	uint8_t   j;
1694f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1695594302c7SJames Poole 	uint64_t timer_period;
1696f487715fSReshma Pattan 
1697f487715fSReshma Pattan 	/* convert to number of cycles */
1698594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1699af75078fSIntel 
1700af75078fSIntel 	for (j = 0; j < 2; j++) {
17017741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1702af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
17037741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
1704f487715fSReshma Pattan 				/**
1705f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
1706f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
1707f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
1708f487715fSReshma Pattan 				* after 1sec timer expiry.
1709f487715fSReshma Pattan 				*/
1710f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
1711af75078fSIntel 				do {
17127741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1713013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
1714af75078fSIntel 					for (i = 0; i < nb_rx; i++)
1715af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
1716f487715fSReshma Pattan 
1717f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
1718f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
1719f487715fSReshma Pattan 					timer_tsc += diff_tsc;
1720f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
1721f487715fSReshma Pattan 					(timer_tsc < timer_period));
1722f487715fSReshma Pattan 				timer_tsc = 0;
1723af75078fSIntel 			}
1724af75078fSIntel 		}
1725af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1726af75078fSIntel 	}
1727af75078fSIntel }
1728af75078fSIntel 
1729af75078fSIntel static void
1730af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1731af75078fSIntel {
1732af75078fSIntel 	struct fwd_stream **fsm;
1733af75078fSIntel 	streamid_t nb_fs;
1734af75078fSIntel 	streamid_t sm_id;
17357e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
17367e4441c8SRemy Horton 	uint64_t tics_per_1sec;
17377e4441c8SRemy Horton 	uint64_t tics_datum;
17387e4441c8SRemy Horton 	uint64_t tics_current;
17394918a357SXiaoyun Li 	uint16_t i, cnt_ports;
1740af75078fSIntel 
17414918a357SXiaoyun Li 	cnt_ports = nb_ports;
17427e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
17437e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
17447e4441c8SRemy Horton #endif
1745af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
1746af75078fSIntel 	nb_fs = fc->stream_nb;
1747af75078fSIntel 	do {
1748af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1749af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
17507e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
1751e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
1752e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
17537e4441c8SRemy Horton 			tics_current = rte_rdtsc();
17547e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
17557e4441c8SRemy Horton 				/* Periodic bitrate calculation */
17564918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
1757e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
17584918a357SXiaoyun Li 						ports_ids[i]);
17597e4441c8SRemy Horton 				tics_datum = tics_current;
17607e4441c8SRemy Horton 			}
1761e25e6c70SRemy Horton 		}
17627e4441c8SRemy Horton #endif
176362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
176465eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
176565eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
176662d3216dSReshma Pattan 			rte_latencystats_update();
176762d3216dSReshma Pattan #endif
176862d3216dSReshma Pattan 
1769af75078fSIntel 	} while (! fc->stopped);
1770af75078fSIntel }
1771af75078fSIntel 
1772af75078fSIntel static int
1773af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
1774af75078fSIntel {
1775af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1776af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
1777af75078fSIntel 	return 0;
1778af75078fSIntel }
1779af75078fSIntel 
1780af75078fSIntel /*
1781af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1782af75078fSIntel  * Used to start communication flows in network loopback test configurations.
1783af75078fSIntel  */
1784af75078fSIntel static int
1785af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
1786af75078fSIntel {
1787af75078fSIntel 	struct fwd_lcore *fwd_lc;
1788af75078fSIntel 	struct fwd_lcore tmp_lcore;
1789af75078fSIntel 
1790af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1791af75078fSIntel 	tmp_lcore = *fwd_lc;
1792af75078fSIntel 	tmp_lcore.stopped = 1;
1793af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1794af75078fSIntel 	return 0;
1795af75078fSIntel }
1796af75078fSIntel 
1797af75078fSIntel /*
1798af75078fSIntel  * Launch packet forwarding:
1799af75078fSIntel  *     - Setup per-port forwarding context.
1800af75078fSIntel  *     - launch logical cores with their forwarding configuration.
1801af75078fSIntel  */
1802af75078fSIntel static void
1803af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1804af75078fSIntel {
1805af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1806af75078fSIntel 	unsigned int i;
1807af75078fSIntel 	unsigned int lc_id;
1808af75078fSIntel 	int diag;
1809af75078fSIntel 
1810af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1811af75078fSIntel 	if (port_fwd_begin != NULL) {
1812af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1813af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
1814af75078fSIntel 	}
1815af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1816af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
1817af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1818af75078fSIntel 			fwd_lcores[i]->stopped = 0;
1819af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1820af75078fSIntel 						     fwd_lcores[i], lc_id);
1821af75078fSIntel 			if (diag != 0)
1822af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
1823af75078fSIntel 				       lc_id, diag);
1824af75078fSIntel 		}
1825af75078fSIntel 	}
1826af75078fSIntel }
1827af75078fSIntel 
1828af75078fSIntel /*
1829af75078fSIntel  * Launch packet forwarding configuration.
1830af75078fSIntel  */
1831af75078fSIntel void
1832af75078fSIntel start_packet_forwarding(int with_tx_first)
1833af75078fSIntel {
1834af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1835af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1836af75078fSIntel 	struct rte_port *port;
1837af75078fSIntel 	unsigned int i;
1838af75078fSIntel 	portid_t   pt_id;
1839af75078fSIntel 
18405a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
18415a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
18425a8fb55cSReshma Pattan 
18435a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
18445a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
18455a8fb55cSReshma Pattan 
18465a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
18475a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
18485a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
18495a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
18505a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
18515a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
18525a8fb55cSReshma Pattan 
1853ce8d5614SIntel 	if (all_ports_started() == 0) {
1854ce8d5614SIntel 		printf("Not all ports were started\n");
1855ce8d5614SIntel 		return;
1856ce8d5614SIntel 	}
1857af75078fSIntel 	if (test_done == 0) {
1858af75078fSIntel 		printf("Packet forwarding already started\n");
1859af75078fSIntel 		return;
1860af75078fSIntel 	}
1861edf87b4aSBernard Iremonger 
1862edf87b4aSBernard Iremonger 
18637741e4cfSIntel 	if(dcb_test) {
18647741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
18657741e4cfSIntel 			pt_id = fwd_ports_ids[i];
18667741e4cfSIntel 			port = &ports[pt_id];
18677741e4cfSIntel 			if (!port->dcb_flag) {
18687741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
18697741e4cfSIntel                                        "be configured in this mode.\n");
1870013af9b6SIntel 				return;
1871013af9b6SIntel 			}
18727741e4cfSIntel 		}
18737741e4cfSIntel 		if (nb_fwd_lcores == 1) {
18747741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
18757741e4cfSIntel                                "should be larger than 1.\n");
18767741e4cfSIntel 			return;
18777741e4cfSIntel 		}
18787741e4cfSIntel 	}
1879af75078fSIntel 	test_done = 0;
18807741e4cfSIntel 
188147a767b2SMatan Azrad 	fwd_config_setup();
188247a767b2SMatan Azrad 
18837741e4cfSIntel 	if(!no_flush_rx)
18847741e4cfSIntel 		flush_fwd_rx_queues();
18857741e4cfSIntel 
1886933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
1887af75078fSIntel 	rxtx_config_display();
1888af75078fSIntel 
188953324971SDavid Marchand 	fwd_stats_reset();
1890af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1891af75078fSIntel 		pt_id = fwd_ports_ids[i];
1892af75078fSIntel 		port = &ports[pt_id];
1893013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
1894af75078fSIntel 	}
1895af75078fSIntel 	if (with_tx_first) {
1896af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1897af75078fSIntel 		if (port_fwd_begin != NULL) {
1898af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1899af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
1900af75078fSIntel 		}
1901acbf77a6SZhihong Wang 		while (with_tx_first--) {
1902acbf77a6SZhihong Wang 			launch_packet_forwarding(
1903acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
1904af75078fSIntel 			rte_eal_mp_wait_lcore();
1905acbf77a6SZhihong Wang 		}
1906af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
1907af75078fSIntel 		if (port_fwd_end != NULL) {
1908af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1909af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
1910af75078fSIntel 		}
1911af75078fSIntel 	}
1912af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
1913af75078fSIntel }
1914af75078fSIntel 
1915af75078fSIntel void
1916af75078fSIntel stop_packet_forwarding(void)
1917af75078fSIntel {
1918af75078fSIntel 	port_fwd_end_t port_fwd_end;
1919af75078fSIntel 	lcoreid_t lc_id;
192053324971SDavid Marchand 	portid_t pt_id;
192153324971SDavid Marchand 	int i;
1922af75078fSIntel 
1923af75078fSIntel 	if (test_done) {
1924af75078fSIntel 		printf("Packet forwarding not started\n");
1925af75078fSIntel 		return;
1926af75078fSIntel 	}
1927af75078fSIntel 	printf("Telling cores to stop...");
1928af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1929af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1930af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1931af75078fSIntel 	rte_eal_mp_wait_lcore();
1932af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1933af75078fSIntel 	if (port_fwd_end != NULL) {
1934af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1935af75078fSIntel 			pt_id = fwd_ports_ids[i];
1936af75078fSIntel 			(*port_fwd_end)(pt_id);
1937af75078fSIntel 		}
1938af75078fSIntel 	}
1939c185d42cSDavid Marchand 
194053324971SDavid Marchand 	fwd_stats_display();
194158d475b7SJerin Jacob 
1942af75078fSIntel 	printf("\nDone.\n");
1943af75078fSIntel 	test_done = 1;
1944af75078fSIntel }
1945af75078fSIntel 
1946cfae07fdSOuyang Changchun void
1947cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
1948cfae07fdSOuyang Changchun {
1949492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
1950cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
1951cfae07fdSOuyang Changchun }
1952cfae07fdSOuyang Changchun 
1953cfae07fdSOuyang Changchun void
1954cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
1955cfae07fdSOuyang Changchun {
1956492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
1957cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
1958cfae07fdSOuyang Changchun }
1959cfae07fdSOuyang Changchun 
1960ce8d5614SIntel static int
1961ce8d5614SIntel all_ports_started(void)
1962ce8d5614SIntel {
1963ce8d5614SIntel 	portid_t pi;
1964ce8d5614SIntel 	struct rte_port *port;
1965ce8d5614SIntel 
19667d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1967ce8d5614SIntel 		port = &ports[pi];
1968ce8d5614SIntel 		/* Check if there is a port which is not started */
196941b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
197041b05095SBernard Iremonger 			(port->slave_flag == 0))
1971ce8d5614SIntel 			return 0;
1972ce8d5614SIntel 	}
1973ce8d5614SIntel 
1974ce8d5614SIntel 	/* No port is not started */
1975ce8d5614SIntel 	return 1;
1976ce8d5614SIntel }
1977ce8d5614SIntel 
1978148f963fSBruce Richardson int
19796018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
19806018eb8cSShahaf Shuler {
19816018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
19826018eb8cSShahaf Shuler 
19836018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
19846018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
19856018eb8cSShahaf Shuler 		return 0;
19866018eb8cSShahaf Shuler 	return 1;
19876018eb8cSShahaf Shuler }
19886018eb8cSShahaf Shuler 
19896018eb8cSShahaf Shuler int
1990edab33b1STetsuya Mukawa all_ports_stopped(void)
1991edab33b1STetsuya Mukawa {
1992edab33b1STetsuya Mukawa 	portid_t pi;
1993edab33b1STetsuya Mukawa 
19947d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
19956018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
1996edab33b1STetsuya Mukawa 			return 0;
1997edab33b1STetsuya Mukawa 	}
1998edab33b1STetsuya Mukawa 
1999edab33b1STetsuya Mukawa 	return 1;
2000edab33b1STetsuya Mukawa }
2001edab33b1STetsuya Mukawa 
2002edab33b1STetsuya Mukawa int
2003edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2004edab33b1STetsuya Mukawa {
2005edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2006edab33b1STetsuya Mukawa 		return 0;
2007edab33b1STetsuya Mukawa 
2008edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2009edab33b1STetsuya Mukawa 		return 0;
2010edab33b1STetsuya Mukawa 
2011edab33b1STetsuya Mukawa 	return 1;
2012edab33b1STetsuya Mukawa }
2013edab33b1STetsuya Mukawa 
2014edab33b1STetsuya Mukawa int
2015ce8d5614SIntel start_port(portid_t pid)
2016ce8d5614SIntel {
201792d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
2018ce8d5614SIntel 	portid_t pi;
2019ce8d5614SIntel 	queueid_t qi;
2020ce8d5614SIntel 	struct rte_port *port;
20212950a769SDeclan Doherty 	struct ether_addr mac_addr;
2022ce8d5614SIntel 
20234468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
20244468635fSMichael Qiu 		return 0;
20254468635fSMichael Qiu 
2026ce8d5614SIntel 	if(dcb_config)
2027ce8d5614SIntel 		dcb_test = 1;
20287d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2029edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2030ce8d5614SIntel 			continue;
2031ce8d5614SIntel 
203292d2703eSMichael Qiu 		need_check_link_status = 0;
2033ce8d5614SIntel 		port = &ports[pi];
2034ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2035ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
2036ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2037ce8d5614SIntel 			continue;
2038ce8d5614SIntel 		}
2039ce8d5614SIntel 
2040ce8d5614SIntel 		if (port->need_reconfig > 0) {
2041ce8d5614SIntel 			port->need_reconfig = 0;
2042ce8d5614SIntel 
20437ee3e944SVasily Philipov 			if (flow_isolate_all) {
20447ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
20457ee3e944SVasily Philipov 				if (ret) {
20467ee3e944SVasily Philipov 					printf("Failed to apply isolated"
20477ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
20487ee3e944SVasily Philipov 					return -1;
20497ee3e944SVasily Philipov 				}
20507ee3e944SVasily Philipov 			}
2051b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
20525706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
205320a0286fSLiu Xiaofeng 					port->socket_id);
2054ce8d5614SIntel 			/* configure port */
2055ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
2056ce8d5614SIntel 						&(port->dev_conf));
2057ce8d5614SIntel 			if (diag != 0) {
2058ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2059ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2060ce8d5614SIntel 					printf("Port %d can not be set back "
2061ce8d5614SIntel 							"to stopped\n", pi);
2062ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
2063ce8d5614SIntel 				/* try to reconfigure port next time */
2064ce8d5614SIntel 				port->need_reconfig = 1;
2065148f963fSBruce Richardson 				return -1;
2066ce8d5614SIntel 			}
2067ce8d5614SIntel 		}
2068ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
2069ce8d5614SIntel 			port->need_reconfig_queues = 0;
2070ce8d5614SIntel 			/* setup tx queues */
2071ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
2072b6ea6408SIntel 				if ((numa_support) &&
2073b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2074b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2075d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2076d44f8a48SQi Zhang 						txring_numa[pi],
2077d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2078b6ea6408SIntel 				else
2079b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2080d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2081d44f8a48SQi Zhang 						port->socket_id,
2082d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2083b6ea6408SIntel 
2084ce8d5614SIntel 				if (diag == 0)
2085ce8d5614SIntel 					continue;
2086ce8d5614SIntel 
2087ce8d5614SIntel 				/* Fail to setup tx queue, return */
2088ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2089ce8d5614SIntel 							RTE_PORT_HANDLING,
2090ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2091ce8d5614SIntel 					printf("Port %d can not be set back "
2092ce8d5614SIntel 							"to stopped\n", pi);
2093d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
2094d44f8a48SQi Zhang 				       pi);
2095ce8d5614SIntel 				/* try to reconfigure queues next time */
2096ce8d5614SIntel 				port->need_reconfig_queues = 1;
2097148f963fSBruce Richardson 				return -1;
2098ce8d5614SIntel 			}
2099ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2100d44f8a48SQi Zhang 				/* setup rx queues */
2101b6ea6408SIntel 				if ((numa_support) &&
2102b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2103b6ea6408SIntel 					struct rte_mempool * mp =
2104b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
2105b6ea6408SIntel 					if (mp == NULL) {
2106b6ea6408SIntel 						printf("Failed to setup RX queue:"
2107b6ea6408SIntel 							"No mempool allocation"
2108b6ea6408SIntel 							" on the socket %d\n",
2109b6ea6408SIntel 							rxring_numa[pi]);
2110148f963fSBruce Richardson 						return -1;
2111b6ea6408SIntel 					}
2112b6ea6408SIntel 
2113b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2114d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2115d44f8a48SQi Zhang 					     rxring_numa[pi],
2116d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2117d44f8a48SQi Zhang 					     mp);
21181e1d6bddSBernard Iremonger 				} else {
21191e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
21201e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
21211e1d6bddSBernard Iremonger 					if (mp == NULL) {
21221e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
21231e1d6bddSBernard Iremonger 							"No mempool allocation"
21241e1d6bddSBernard Iremonger 							" on the socket %d\n",
21251e1d6bddSBernard Iremonger 							port->socket_id);
21261e1d6bddSBernard Iremonger 						return -1;
2127b6ea6408SIntel 					}
2128b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2129d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2130d44f8a48SQi Zhang 					     port->socket_id,
2131d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2132d44f8a48SQi Zhang 					     mp);
21331e1d6bddSBernard Iremonger 				}
2134ce8d5614SIntel 				if (diag == 0)
2135ce8d5614SIntel 					continue;
2136ce8d5614SIntel 
2137ce8d5614SIntel 				/* Fail to setup rx queue, return */
2138ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2139ce8d5614SIntel 							RTE_PORT_HANDLING,
2140ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2141ce8d5614SIntel 					printf("Port %d can not be set back "
2142ce8d5614SIntel 							"to stopped\n", pi);
2143d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2144d44f8a48SQi Zhang 				       pi);
2145ce8d5614SIntel 				/* try to reconfigure queues next time */
2146ce8d5614SIntel 				port->need_reconfig_queues = 1;
2147148f963fSBruce Richardson 				return -1;
2148ce8d5614SIntel 			}
2149ce8d5614SIntel 		}
2150b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
2151ce8d5614SIntel 		/* start port */
2152ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2153ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2154ce8d5614SIntel 
2155ce8d5614SIntel 			/* Fail to setup rx queue, return */
2156ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2157ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2158ce8d5614SIntel 				printf("Port %d can not be set back to "
2159ce8d5614SIntel 							"stopped\n", pi);
2160ce8d5614SIntel 			continue;
2161ce8d5614SIntel 		}
2162ce8d5614SIntel 
2163ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2164ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2165ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2166ce8d5614SIntel 
21672950a769SDeclan Doherty 		rte_eth_macaddr_get(pi, &mac_addr);
2168d8c89163SZijie Pan 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
21692950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
21702950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
21712950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2172d8c89163SZijie Pan 
2173ce8d5614SIntel 		/* at least one port started, need checking link status */
2174ce8d5614SIntel 		need_check_link_status = 1;
2175ce8d5614SIntel 	}
2176ce8d5614SIntel 
217792d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2178edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
217992d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2180ce8d5614SIntel 		printf("Please stop the ports first\n");
2181ce8d5614SIntel 
2182ce8d5614SIntel 	printf("Done\n");
2183148f963fSBruce Richardson 	return 0;
2184ce8d5614SIntel }
2185ce8d5614SIntel 
2186ce8d5614SIntel void
2187ce8d5614SIntel stop_port(portid_t pid)
2188ce8d5614SIntel {
2189ce8d5614SIntel 	portid_t pi;
2190ce8d5614SIntel 	struct rte_port *port;
2191ce8d5614SIntel 	int need_check_link_status = 0;
2192ce8d5614SIntel 
2193ce8d5614SIntel 	if (dcb_test) {
2194ce8d5614SIntel 		dcb_test = 0;
2195ce8d5614SIntel 		dcb_config = 0;
2196ce8d5614SIntel 	}
21974468635fSMichael Qiu 
21984468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
21994468635fSMichael Qiu 		return;
22004468635fSMichael Qiu 
2201ce8d5614SIntel 	printf("Stopping ports...\n");
2202ce8d5614SIntel 
22037d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
22044468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2205ce8d5614SIntel 			continue;
2206ce8d5614SIntel 
2207a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2208a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2209a8ef3e3aSBernard Iremonger 			continue;
2210a8ef3e3aSBernard Iremonger 		}
2211a8ef3e3aSBernard Iremonger 
22120e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
22130e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
22140e545d30SBernard Iremonger 			continue;
22150e545d30SBernard Iremonger 		}
22160e545d30SBernard Iremonger 
2217ce8d5614SIntel 		port = &ports[pi];
2218ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2219ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2220ce8d5614SIntel 			continue;
2221ce8d5614SIntel 
2222ce8d5614SIntel 		rte_eth_dev_stop(pi);
2223ce8d5614SIntel 
2224ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2225ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2226ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2227ce8d5614SIntel 		need_check_link_status = 1;
2228ce8d5614SIntel 	}
2229bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2230edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2231ce8d5614SIntel 
2232ce8d5614SIntel 	printf("Done\n");
2233ce8d5614SIntel }
2234ce8d5614SIntel 
2235ce6959bfSWisam Jaddo static void
22364f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
2237ce6959bfSWisam Jaddo {
22384f1de450SThomas Monjalon 	portid_t i;
22394f1de450SThomas Monjalon 	portid_t new_total = 0;
2240ce6959bfSWisam Jaddo 
22414f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
22424f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
22434f1de450SThomas Monjalon 			array[new_total] = array[i];
22444f1de450SThomas Monjalon 			new_total++;
2245ce6959bfSWisam Jaddo 		}
22464f1de450SThomas Monjalon 	*total = new_total;
22474f1de450SThomas Monjalon }
22484f1de450SThomas Monjalon 
22494f1de450SThomas Monjalon static void
22504f1de450SThomas Monjalon remove_invalid_ports(void)
22514f1de450SThomas Monjalon {
22524f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
22534f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
22544f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
2255ce6959bfSWisam Jaddo }
2256ce6959bfSWisam Jaddo 
2257ce8d5614SIntel void
2258ce8d5614SIntel close_port(portid_t pid)
2259ce8d5614SIntel {
2260ce8d5614SIntel 	portid_t pi;
2261ce8d5614SIntel 	struct rte_port *port;
2262ce8d5614SIntel 
22634468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
22644468635fSMichael Qiu 		return;
22654468635fSMichael Qiu 
2266ce8d5614SIntel 	printf("Closing ports...\n");
2267ce8d5614SIntel 
22687d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
22694468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2270ce8d5614SIntel 			continue;
2271ce8d5614SIntel 
2272a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2273a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2274a8ef3e3aSBernard Iremonger 			continue;
2275a8ef3e3aSBernard Iremonger 		}
2276a8ef3e3aSBernard Iremonger 
22770e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
22780e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
22790e545d30SBernard Iremonger 			continue;
22800e545d30SBernard Iremonger 		}
22810e545d30SBernard Iremonger 
2282ce8d5614SIntel 		port = &ports[pi];
2283ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2284d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2285d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2286d4e8ad64SMichael Qiu 			continue;
2287d4e8ad64SMichael Qiu 		}
2288d4e8ad64SMichael Qiu 
2289d4e8ad64SMichael Qiu 		if (rte_atomic16_cmpset(&(port->port_status),
2290ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2291ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2292ce8d5614SIntel 			continue;
2293ce8d5614SIntel 		}
2294ce8d5614SIntel 
2295938a184aSAdrien Mazarguil 		if (port->flow_list)
2296938a184aSAdrien Mazarguil 			port_flow_flush(pi);
2297ce8d5614SIntel 		rte_eth_dev_close(pi);
2298ce8d5614SIntel 
22994f1de450SThomas Monjalon 		remove_invalid_ports();
230023ea57a2SThomas Monjalon 
2301ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2302ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2303b38bb262SPablo de Lara 			printf("Port %d cannot be set to closed\n", pi);
2304ce8d5614SIntel 	}
2305ce8d5614SIntel 
2306ce8d5614SIntel 	printf("Done\n");
2307ce8d5614SIntel }
2308ce8d5614SIntel 
2309edab33b1STetsuya Mukawa void
231097f1e196SWei Dai reset_port(portid_t pid)
231197f1e196SWei Dai {
231297f1e196SWei Dai 	int diag;
231397f1e196SWei Dai 	portid_t pi;
231497f1e196SWei Dai 	struct rte_port *port;
231597f1e196SWei Dai 
231697f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
231797f1e196SWei Dai 		return;
231897f1e196SWei Dai 
231997f1e196SWei Dai 	printf("Resetting ports...\n");
232097f1e196SWei Dai 
232197f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
232297f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
232397f1e196SWei Dai 			continue;
232497f1e196SWei Dai 
232597f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
232697f1e196SWei Dai 			printf("Please remove port %d from forwarding "
232797f1e196SWei Dai 			       "configuration.\n", pi);
232897f1e196SWei Dai 			continue;
232997f1e196SWei Dai 		}
233097f1e196SWei Dai 
233197f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
233297f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
233397f1e196SWei Dai 			       pi);
233497f1e196SWei Dai 			continue;
233597f1e196SWei Dai 		}
233697f1e196SWei Dai 
233797f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
233897f1e196SWei Dai 		if (diag == 0) {
233997f1e196SWei Dai 			port = &ports[pi];
234097f1e196SWei Dai 			port->need_reconfig = 1;
234197f1e196SWei Dai 			port->need_reconfig_queues = 1;
234297f1e196SWei Dai 		} else {
234397f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
234497f1e196SWei Dai 		}
234597f1e196SWei Dai 	}
234697f1e196SWei Dai 
234797f1e196SWei Dai 	printf("Done\n");
234897f1e196SWei Dai }
234997f1e196SWei Dai 
235097f1e196SWei Dai void
2351edab33b1STetsuya Mukawa attach_port(char *identifier)
2352ce8d5614SIntel {
23534f1ed78eSThomas Monjalon 	portid_t pi;
2354c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
2355ce8d5614SIntel 
2356edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2357edab33b1STetsuya Mukawa 
2358edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2359edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2360edab33b1STetsuya Mukawa 		return;
2361ce8d5614SIntel 	}
2362ce8d5614SIntel 
2363c9cce428SThomas Monjalon 	if (rte_dev_probe(identifier) != 0) {
2364c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2365edab33b1STetsuya Mukawa 		return;
2366c9cce428SThomas Monjalon 	}
2367c9cce428SThomas Monjalon 
23684f1ed78eSThomas Monjalon 	/* first attach mode: event */
23694f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
23704f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
23714f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
23724f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
23734f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
23744f1ed78eSThomas Monjalon 				setup_attached_port(pi);
23754f1ed78eSThomas Monjalon 		return;
23764f1ed78eSThomas Monjalon 	}
23774f1ed78eSThomas Monjalon 
23784f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
237986fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
23804f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
238186fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
238286fa5de1SThomas Monjalon 			continue; /* port was already attached before */
2383c9cce428SThomas Monjalon 		setup_attached_port(pi);
2384c9cce428SThomas Monjalon 	}
238586fa5de1SThomas Monjalon }
2386c9cce428SThomas Monjalon 
2387c9cce428SThomas Monjalon static void
2388c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
2389c9cce428SThomas Monjalon {
2390c9cce428SThomas Monjalon 	unsigned int socket_id;
2391edab33b1STetsuya Mukawa 
2392931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
239329841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
2394931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
239529841336SPhil Yang 		socket_id = socket_ids[0];
2396931126baSBernard Iremonger 	reconfig(pi, socket_id);
2397edab33b1STetsuya Mukawa 	rte_eth_promiscuous_enable(pi);
2398edab33b1STetsuya Mukawa 
23994f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
24004f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
24014f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
24024f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
2403edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2404edab33b1STetsuya Mukawa 
2405edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2406edab33b1STetsuya Mukawa 	printf("Done\n");
2407edab33b1STetsuya Mukawa }
2408edab33b1STetsuya Mukawa 
2409edab33b1STetsuya Mukawa void
2410f8e5baa2SThomas Monjalon detach_port_device(portid_t port_id)
24115f4ec54fSChen Jing D(Mark) {
2412f8e5baa2SThomas Monjalon 	struct rte_device *dev;
2413f8e5baa2SThomas Monjalon 	portid_t sibling;
2414f8e5baa2SThomas Monjalon 
2415c9cce428SThomas Monjalon 	printf("Removing a device...\n");
24165f4ec54fSChen Jing D(Mark) 
2417f8e5baa2SThomas Monjalon 	dev = rte_eth_devices[port_id].device;
2418f8e5baa2SThomas Monjalon 	if (dev == NULL) {
2419f8e5baa2SThomas Monjalon 		printf("Device already removed\n");
2420f8e5baa2SThomas Monjalon 		return;
2421f8e5baa2SThomas Monjalon 	}
2422f8e5baa2SThomas Monjalon 
242323ea57a2SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
24243f4a8370SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
24253f4a8370SThomas Monjalon 			printf("Port not stopped\n");
2426edab33b1STetsuya Mukawa 			return;
2427edab33b1STetsuya Mukawa 		}
24283f4a8370SThomas Monjalon 		printf("Port was not closed\n");
2429938a184aSAdrien Mazarguil 		if (ports[port_id].flow_list)
2430938a184aSAdrien Mazarguil 			port_flow_flush(port_id);
24313f4a8370SThomas Monjalon 	}
2432938a184aSAdrien Mazarguil 
2433f8e5baa2SThomas Monjalon 	if (rte_dev_remove(dev) != 0) {
2434f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2435edab33b1STetsuya Mukawa 		return;
24363070419eSGaetan Rivet 	}
2437edab33b1STetsuya Mukawa 
243813302cd5SThomas Monjalon 	RTE_ETH_FOREACH_DEV_SIBLING(sibling, port_id) {
2439f8e5baa2SThomas Monjalon 		/* reset mapping between old ports and removed device */
2440f8e5baa2SThomas Monjalon 		rte_eth_devices[sibling].device = NULL;
2441f8e5baa2SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2442f8e5baa2SThomas Monjalon 			/* sibling ports are forced to be closed */
2443f8e5baa2SThomas Monjalon 			ports[sibling].port_status = RTE_PORT_CLOSED;
2444f8e5baa2SThomas Monjalon 			printf("Port %u is closed\n", sibling);
2445f8e5baa2SThomas Monjalon 		}
2446f8e5baa2SThomas Monjalon 	}
2447f8e5baa2SThomas Monjalon 
24484f1de450SThomas Monjalon 	remove_invalid_ports();
244903ce2c53SMatan Azrad 
2450f8e5baa2SThomas Monjalon 	printf("Device of port %u is detached\n", port_id);
2451f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
2452edab33b1STetsuya Mukawa 	printf("Done\n");
2453edab33b1STetsuya Mukawa 	return;
24545f4ec54fSChen Jing D(Mark) }
24555f4ec54fSChen Jing D(Mark) 
2456af75078fSIntel void
2457af75078fSIntel pmd_test_exit(void)
2458af75078fSIntel {
2459124909d7SZhiyong Yang 	struct rte_device *device;
2460af75078fSIntel 	portid_t pt_id;
2461fb73e096SJeff Guo 	int ret;
2462401b744dSShahaf Shuler 	int i;
2463af75078fSIntel 
24648210ec25SPablo de Lara 	if (test_done == 0)
24658210ec25SPablo de Lara 		stop_packet_forwarding();
24668210ec25SPablo de Lara 
24673a0968c8SShahaf Shuler 	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
24683a0968c8SShahaf Shuler 		if (mempools[i]) {
24693a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
24703a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
24713a0968c8SShahaf Shuler 						     NULL);
24723a0968c8SShahaf Shuler 		}
24733a0968c8SShahaf Shuler 	}
2474d3a274ceSZhihong Wang 	if (ports != NULL) {
2475d3a274ceSZhihong Wang 		no_link_check = 1;
24767d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
247708fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
2478af75078fSIntel 			fflush(stdout);
2479d3a274ceSZhihong Wang 			stop_port(pt_id);
248008fd782bSCristian Dumitrescu 		}
248108fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
248208fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
248308fd782bSCristian Dumitrescu 			fflush(stdout);
2484d3a274ceSZhihong Wang 			close_port(pt_id);
2485124909d7SZhiyong Yang 
2486124909d7SZhiyong Yang 			/*
2487124909d7SZhiyong Yang 			 * This is a workaround to fix a virtio-user issue that
2488124909d7SZhiyong Yang 			 * requires to call clean-up routine to remove existing
2489124909d7SZhiyong Yang 			 * socket.
2490124909d7SZhiyong Yang 			 * This workaround valid only for testpmd, needs a fix
2491124909d7SZhiyong Yang 			 * valid for all applications.
2492124909d7SZhiyong Yang 			 * TODO: Implement proper resource cleanup
2493124909d7SZhiyong Yang 			 */
2494124909d7SZhiyong Yang 			device = rte_eth_devices[pt_id].device;
2495124909d7SZhiyong Yang 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2496f8e5baa2SThomas Monjalon 				detach_port_device(pt_id);
2497af75078fSIntel 		}
2498d3a274ceSZhihong Wang 	}
2499fb73e096SJeff Guo 
2500fb73e096SJeff Guo 	if (hot_plug) {
2501fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
25022049c511SJeff Guo 		if (ret) {
2503fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2504fb73e096SJeff Guo 				"fail to stop device event monitor.");
25052049c511SJeff Guo 			return;
25062049c511SJeff Guo 		}
2507fb73e096SJeff Guo 
25082049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
2509cc1bf307SJeff Guo 			dev_event_callback, NULL);
25102049c511SJeff Guo 		if (ret < 0) {
2511fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
25122049c511SJeff Guo 				"fail to unregister device event callback.\n");
25132049c511SJeff Guo 			return;
25142049c511SJeff Guo 		}
25152049c511SJeff Guo 
25162049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
25172049c511SJeff Guo 		if (ret) {
25182049c511SJeff Guo 			RTE_LOG(ERR, EAL,
25192049c511SJeff Guo 				"fail to disable hotplug handling.\n");
25202049c511SJeff Guo 			return;
25212049c511SJeff Guo 		}
2522fb73e096SJeff Guo 	}
2523401b744dSShahaf Shuler 	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2524401b744dSShahaf Shuler 		if (mempools[i])
2525401b744dSShahaf Shuler 			rte_mempool_free(mempools[i]);
2526401b744dSShahaf Shuler 	}
2527fb73e096SJeff Guo 
2528d3a274ceSZhihong Wang 	printf("\nBye...\n");
2529af75078fSIntel }
2530af75078fSIntel 
2531af75078fSIntel typedef void (*cmd_func_t)(void);
2532af75078fSIntel struct pmd_test_command {
2533af75078fSIntel 	const char *cmd_name;
2534af75078fSIntel 	cmd_func_t cmd_func;
2535af75078fSIntel };
2536af75078fSIntel 
2537af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2538af75078fSIntel 
2539ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
2540af75078fSIntel static void
2541edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
2542af75078fSIntel {
2543ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
2544ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2545f8244c63SZhiyong Yang 	portid_t portid;
2546f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
2547ce8d5614SIntel 	struct rte_eth_link link;
2548ce8d5614SIntel 
2549ce8d5614SIntel 	printf("Checking link statuses...\n");
2550ce8d5614SIntel 	fflush(stdout);
2551ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2552ce8d5614SIntel 		all_ports_up = 1;
25537d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
2554ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
2555ce8d5614SIntel 				continue;
2556ce8d5614SIntel 			memset(&link, 0, sizeof(link));
2557ce8d5614SIntel 			rte_eth_link_get_nowait(portid, &link);
2558ce8d5614SIntel 			/* print link status if flag set */
2559ce8d5614SIntel 			if (print_flag == 1) {
2560ce8d5614SIntel 				if (link.link_status)
2561f8244c63SZhiyong Yang 					printf(
2562f8244c63SZhiyong Yang 					"Port%d Link Up. speed %u Mbps- %s\n",
2563f8244c63SZhiyong Yang 					portid, link.link_speed,
2564ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2565ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
2566ce8d5614SIntel 				else
2567f8244c63SZhiyong Yang 					printf("Port %d Link Down\n", portid);
2568ce8d5614SIntel 				continue;
2569ce8d5614SIntel 			}
2570ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
257109419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
2572ce8d5614SIntel 				all_ports_up = 0;
2573ce8d5614SIntel 				break;
2574ce8d5614SIntel 			}
2575ce8d5614SIntel 		}
2576ce8d5614SIntel 		/* after finally printing all link status, get out */
2577ce8d5614SIntel 		if (print_flag == 1)
2578ce8d5614SIntel 			break;
2579ce8d5614SIntel 
2580ce8d5614SIntel 		if (all_ports_up == 0) {
2581ce8d5614SIntel 			fflush(stdout);
2582ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
2583ce8d5614SIntel 		}
2584ce8d5614SIntel 
2585ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
2586ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2587ce8d5614SIntel 			print_flag = 1;
2588ce8d5614SIntel 		}
25898ea656f8SGaetan Rivet 
25908ea656f8SGaetan Rivet 		if (lsc_interrupt)
25918ea656f8SGaetan Rivet 			break;
2592ce8d5614SIntel 	}
2593af75078fSIntel }
2594af75078fSIntel 
2595cc1bf307SJeff Guo /*
2596cc1bf307SJeff Guo  * This callback is for remove a port for a device. It has limitation because
2597cc1bf307SJeff Guo  * it is not for multiple port removal for a device.
2598cc1bf307SJeff Guo  * TODO: the device detach invoke will plan to be removed from user side to
2599cc1bf307SJeff Guo  * eal. And convert all PMDs to free port resources on ether device closing.
2600cc1bf307SJeff Guo  */
2601284c908cSGaetan Rivet static void
2602cc1bf307SJeff Guo rmv_port_callback(void *arg)
2603284c908cSGaetan Rivet {
26043b97888aSMatan Azrad 	int need_to_start = 0;
26050da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
260628caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
2607284c908cSGaetan Rivet 
2608284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2609284c908cSGaetan Rivet 
26103b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
26113b97888aSMatan Azrad 		need_to_start = 1;
26123b97888aSMatan Azrad 		stop_packet_forwarding();
26133b97888aSMatan Azrad 	}
26140da2a62bSMatan Azrad 	no_link_check = 1;
2615284c908cSGaetan Rivet 	stop_port(port_id);
26160da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
2617284c908cSGaetan Rivet 	close_port(port_id);
2618f8e5baa2SThomas Monjalon 	detach_port_device(port_id);
26193b97888aSMatan Azrad 	if (need_to_start)
26203b97888aSMatan Azrad 		start_packet_forwarding(0);
2621284c908cSGaetan Rivet }
2622284c908cSGaetan Rivet 
262376ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
2624d6af1a13SBernard Iremonger static int
2625f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2626d6af1a13SBernard Iremonger 		  void *ret_param)
262776ad4a2dSGaetan Rivet {
262876ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
2629d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
263076ad4a2dSGaetan Rivet 
263176ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
2632f431e010SHerakliusz Lipiec 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
263376ad4a2dSGaetan Rivet 			port_id, __func__, type);
263476ad4a2dSGaetan Rivet 		fflush(stderr);
26353af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2636f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
263797b5d8b5SThomas Monjalon 			eth_event_desc[type]);
263876ad4a2dSGaetan Rivet 		fflush(stdout);
263976ad4a2dSGaetan Rivet 	}
2640284c908cSGaetan Rivet 
2641284c908cSGaetan Rivet 	switch (type) {
26424f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
26434f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
26444f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
26454f1ed78eSThomas Monjalon 		break;
2646284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
26474f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
26484f1ed78eSThomas Monjalon 			break;
2649284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
2650cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
2651284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
2652284c908cSGaetan Rivet 		break;
2653284c908cSGaetan Rivet 	default:
2654284c908cSGaetan Rivet 		break;
2655284c908cSGaetan Rivet 	}
2656d6af1a13SBernard Iremonger 	return 0;
265776ad4a2dSGaetan Rivet }
265876ad4a2dSGaetan Rivet 
265997b5d8b5SThomas Monjalon static int
266097b5d8b5SThomas Monjalon register_eth_event_callback(void)
266197b5d8b5SThomas Monjalon {
266297b5d8b5SThomas Monjalon 	int ret;
266397b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
266497b5d8b5SThomas Monjalon 
266597b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
266697b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
266797b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
266897b5d8b5SThomas Monjalon 				event,
266997b5d8b5SThomas Monjalon 				eth_event_callback,
267097b5d8b5SThomas Monjalon 				NULL);
267197b5d8b5SThomas Monjalon 		if (ret != 0) {
267297b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
267397b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
267497b5d8b5SThomas Monjalon 			return -1;
267597b5d8b5SThomas Monjalon 		}
267697b5d8b5SThomas Monjalon 	}
267797b5d8b5SThomas Monjalon 
267897b5d8b5SThomas Monjalon 	return 0;
267997b5d8b5SThomas Monjalon }
268097b5d8b5SThomas Monjalon 
2681fb73e096SJeff Guo /* This function is used by the interrupt thread */
2682fb73e096SJeff Guo static void
2683cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2684fb73e096SJeff Guo 			     __rte_unused void *arg)
2685fb73e096SJeff Guo {
26862049c511SJeff Guo 	uint16_t port_id;
26872049c511SJeff Guo 	int ret;
26882049c511SJeff Guo 
2689fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
2690fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
2691fb73e096SJeff Guo 			__func__, type);
2692fb73e096SJeff Guo 		fflush(stderr);
2693fb73e096SJeff Guo 	}
2694fb73e096SJeff Guo 
2695fb73e096SJeff Guo 	switch (type) {
2696fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
2697cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
2698fb73e096SJeff Guo 			device_name);
26992049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
27002049c511SJeff Guo 		if (ret) {
27012049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
27022049c511SJeff Guo 				device_name);
27032049c511SJeff Guo 			return;
27042049c511SJeff Guo 		}
2705cc1bf307SJeff Guo 		/*
2706cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
2707cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
2708cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
2709cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
2710cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
2711cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
2712cc1bf307SJeff Guo 		 * be deleted.
2713cc1bf307SJeff Guo 		 */
2714cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
2715cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
2716cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
2717cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
2718fb73e096SJeff Guo 		break;
2719fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
2720fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2721fb73e096SJeff Guo 			device_name);
2722fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
2723fb73e096SJeff Guo 		 * begin to attach port.
2724fb73e096SJeff Guo 		 */
2725fb73e096SJeff Guo 		break;
2726fb73e096SJeff Guo 	default:
2727fb73e096SJeff Guo 		break;
2728fb73e096SJeff Guo 	}
2729fb73e096SJeff Guo }
2730fb73e096SJeff Guo 
2731013af9b6SIntel static int
273228caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2733af75078fSIntel {
2734013af9b6SIntel 	uint16_t i;
2735af75078fSIntel 	int diag;
2736013af9b6SIntel 	uint8_t mapping_found = 0;
2737af75078fSIntel 
2738013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2739013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2740013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2741013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2742013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
2743013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
2744013af9b6SIntel 			if (diag != 0)
2745013af9b6SIntel 				return diag;
2746013af9b6SIntel 			mapping_found = 1;
2747af75078fSIntel 		}
2748013af9b6SIntel 	}
2749013af9b6SIntel 	if (mapping_found)
2750013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
2751013af9b6SIntel 	return 0;
2752013af9b6SIntel }
2753013af9b6SIntel 
2754013af9b6SIntel static int
275528caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2756013af9b6SIntel {
2757013af9b6SIntel 	uint16_t i;
2758013af9b6SIntel 	int diag;
2759013af9b6SIntel 	uint8_t mapping_found = 0;
2760013af9b6SIntel 
2761013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2762013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2763013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2764013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2765013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
2766013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
2767013af9b6SIntel 			if (diag != 0)
2768013af9b6SIntel 				return diag;
2769013af9b6SIntel 			mapping_found = 1;
2770013af9b6SIntel 		}
2771013af9b6SIntel 	}
2772013af9b6SIntel 	if (mapping_found)
2773013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
2774013af9b6SIntel 	return 0;
2775013af9b6SIntel }
2776013af9b6SIntel 
2777013af9b6SIntel static void
277828caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2779013af9b6SIntel {
2780013af9b6SIntel 	int diag = 0;
2781013af9b6SIntel 
2782013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2783af75078fSIntel 	if (diag != 0) {
2784013af9b6SIntel 		if (diag == -ENOTSUP) {
2785013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
2786013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2787013af9b6SIntel 		}
2788013af9b6SIntel 		else
2789013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2790013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
2791013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2792af75078fSIntel 					pi, diag);
2793af75078fSIntel 	}
2794013af9b6SIntel 
2795013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2796af75078fSIntel 	if (diag != 0) {
2797013af9b6SIntel 		if (diag == -ENOTSUP) {
2798013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
2799013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2800013af9b6SIntel 		}
2801013af9b6SIntel 		else
2802013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2803013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
2804013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2805af75078fSIntel 					pi, diag);
2806af75078fSIntel 	}
2807af75078fSIntel }
2808af75078fSIntel 
2809f2c5125aSPablo de Lara static void
2810f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
2811f2c5125aSPablo de Lara {
2812d44f8a48SQi Zhang 	uint16_t qid;
2813f2c5125aSPablo de Lara 
2814d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
2815d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2816d44f8a48SQi Zhang 
2817d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
2818f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2819d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2820f2c5125aSPablo de Lara 
2821f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2822d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2823f2c5125aSPablo de Lara 
2824f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2825d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2826f2c5125aSPablo de Lara 
2827f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2828d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2829f2c5125aSPablo de Lara 
2830f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2831d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2832f2c5125aSPablo de Lara 
2833d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
2834d44f8a48SQi Zhang 	}
2835d44f8a48SQi Zhang 
2836d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
2837d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
2838d44f8a48SQi Zhang 
2839d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
2840f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2841d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2842f2c5125aSPablo de Lara 
2843f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2844d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2845f2c5125aSPablo de Lara 
2846f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2847d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2848f2c5125aSPablo de Lara 
2849f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2850d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2851f2c5125aSPablo de Lara 
2852f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2853d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2854d44f8a48SQi Zhang 
2855d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
2856d44f8a48SQi Zhang 	}
2857f2c5125aSPablo de Lara }
2858f2c5125aSPablo de Lara 
2859013af9b6SIntel void
2860013af9b6SIntel init_port_config(void)
2861013af9b6SIntel {
2862013af9b6SIntel 	portid_t pid;
2863013af9b6SIntel 	struct rte_port *port;
2864013af9b6SIntel 
28657d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
2866013af9b6SIntel 		port = &ports[pid];
2867013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
2868422515b9SAdrien Mazarguil 		rte_eth_dev_info_get(pid, &port->dev_info);
28693ce690d3SBruce Richardson 		if (nb_rxq > 1) {
2870013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
287190892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2872422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
2873af75078fSIntel 		} else {
2874013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2875013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2876af75078fSIntel 		}
28773ce690d3SBruce Richardson 
28785f592039SJingjing Wu 		if (port->dcb_flag == 0) {
28793ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
28803ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
28813ce690d3SBruce Richardson 			else
28823ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
28833ce690d3SBruce Richardson 		}
28843ce690d3SBruce Richardson 
2885f2c5125aSPablo de Lara 		rxtx_port_config(port);
2886013af9b6SIntel 
2887013af9b6SIntel 		rte_eth_macaddr_get(pid, &port->eth_addr);
2888013af9b6SIntel 
2889013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
289050c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2891e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
28927b7e5ba7SIntel #endif
28938ea656f8SGaetan Rivet 
28948ea656f8SGaetan Rivet 		if (lsc_interrupt &&
28958ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
28968ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
28978ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
2898284c908cSGaetan Rivet 		if (rmv_interrupt &&
2899284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
2900284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
2901284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
2902013af9b6SIntel 	}
2903013af9b6SIntel }
2904013af9b6SIntel 
290541b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
290641b05095SBernard Iremonger {
290741b05095SBernard Iremonger 	struct rte_port *port;
290841b05095SBernard Iremonger 
290941b05095SBernard Iremonger 	port = &ports[slave_pid];
291041b05095SBernard Iremonger 	port->slave_flag = 1;
291141b05095SBernard Iremonger }
291241b05095SBernard Iremonger 
291341b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
291441b05095SBernard Iremonger {
291541b05095SBernard Iremonger 	struct rte_port *port;
291641b05095SBernard Iremonger 
291741b05095SBernard Iremonger 	port = &ports[slave_pid];
291841b05095SBernard Iremonger 	port->slave_flag = 0;
291941b05095SBernard Iremonger }
292041b05095SBernard Iremonger 
29210e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
29220e545d30SBernard Iremonger {
29230e545d30SBernard Iremonger 	struct rte_port *port;
29240e545d30SBernard Iremonger 
29250e545d30SBernard Iremonger 	port = &ports[slave_pid];
2926b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2927b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2928b8b8b344SMatan Azrad 		return 1;
2929b8b8b344SMatan Azrad 	return 0;
29300e545d30SBernard Iremonger }
29310e545d30SBernard Iremonger 
2932013af9b6SIntel const uint16_t vlan_tags[] = {
2933013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
2934013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
2935013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
2936013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
2937013af9b6SIntel };
2938013af9b6SIntel 
2939013af9b6SIntel static  int
2940ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
29411a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
29421a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
29431a572499SJingjing Wu 		 uint8_t pfc_en)
2944013af9b6SIntel {
2945013af9b6SIntel 	uint8_t i;
2946ac7c491cSKonstantin Ananyev 	int32_t rc;
2947ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
2948af75078fSIntel 
2949af75078fSIntel 	/*
2950013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2951013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
2952af75078fSIntel 	 */
29531a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
29541a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
29551a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
29561a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
29571a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2958013af9b6SIntel 
2959547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
29601a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
29611a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
29621a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
29631a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
29641a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
29651a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2966013af9b6SIntel 
29671a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
29681a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
29691a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
29701a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
29711a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2972af75078fSIntel 		}
2973013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2974f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2975f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2976013af9b6SIntel 		}
2977013af9b6SIntel 
2978013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
297932e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
298032e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
29811a572499SJingjing Wu 	} else {
29821a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
29831a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
29841a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
29851a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2986013af9b6SIntel 
2987ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2988ac7c491cSKonstantin Ananyev 		if (rc != 0)
2989ac7c491cSKonstantin Ananyev 			return rc;
2990ac7c491cSKonstantin Ananyev 
29911a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
29921a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
29931a572499SJingjing Wu 
2994bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2995bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
2996bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
2997013af9b6SIntel 		}
2998ac7c491cSKonstantin Ananyev 
29991a572499SJingjing Wu 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
3000ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
300132e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
30021a572499SJingjing Wu 	}
30031a572499SJingjing Wu 
30041a572499SJingjing Wu 	if (pfc_en)
30051a572499SJingjing Wu 		eth_conf->dcb_capability_en =
30061a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3007013af9b6SIntel 	else
3008013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3009013af9b6SIntel 
3010013af9b6SIntel 	return 0;
3011013af9b6SIntel }
3012013af9b6SIntel 
3013013af9b6SIntel int
30141a572499SJingjing Wu init_port_dcb_config(portid_t pid,
30151a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
30161a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
30171a572499SJingjing Wu 		     uint8_t pfc_en)
3018013af9b6SIntel {
3019013af9b6SIntel 	struct rte_eth_conf port_conf;
3020013af9b6SIntel 	struct rte_port *rte_port;
3021013af9b6SIntel 	int retval;
3022013af9b6SIntel 	uint16_t i;
3023013af9b6SIntel 
30242a977b89SWenzhuo Lu 	rte_port = &ports[pid];
3025013af9b6SIntel 
3026013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3027013af9b6SIntel 	/* Enter DCB configuration status */
3028013af9b6SIntel 	dcb_config = 1;
3029013af9b6SIntel 
3030d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
3031d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
3032d5354e89SYanglong Wu 
3033013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
3034ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3035013af9b6SIntel 	if (retval < 0)
3036013af9b6SIntel 		return retval;
30370074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3038013af9b6SIntel 
30392f203d44SQi Zhang 	/* re-configure the device . */
3040*2b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3041*2b0e0ebaSChenbo Xia 	if (retval < 0)
3042*2b0e0ebaSChenbo Xia 		return retval;
30432a977b89SWenzhuo Lu 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
30442a977b89SWenzhuo Lu 
30452a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
30462a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
30472a977b89SWenzhuo Lu 	 */
30482a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
30492a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
30502a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
30512a977b89SWenzhuo Lu 			" for port %d.", pid);
30522a977b89SWenzhuo Lu 		return -1;
30532a977b89SWenzhuo Lu 	}
30542a977b89SWenzhuo Lu 
30552a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
30562a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
30572a977b89SWenzhuo Lu 	 */
30582a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
305986ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
306086ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
306186ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
306286ef65eeSBernard Iremonger 		} else {
30632a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
30642a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
306586ef65eeSBernard Iremonger 		}
30662a977b89SWenzhuo Lu 	} else {
30672a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
30682a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
30692a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
30702a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
30712a977b89SWenzhuo Lu 		} else {
30722a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
30732a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
30742a977b89SWenzhuo Lu 
30752a977b89SWenzhuo Lu 		}
30762a977b89SWenzhuo Lu 	}
30772a977b89SWenzhuo Lu 	rx_free_thresh = 64;
30782a977b89SWenzhuo Lu 
3079013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3080013af9b6SIntel 
3081f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
3082013af9b6SIntel 	/* VLAN filter */
30830074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
30841a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
3085013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
3086013af9b6SIntel 
3087013af9b6SIntel 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
3088013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
3089013af9b6SIntel 
30907741e4cfSIntel 	rte_port->dcb_flag = 1;
30917741e4cfSIntel 
3092013af9b6SIntel 	return 0;
3093af75078fSIntel }
3094af75078fSIntel 
3095ffc468ffSTetsuya Mukawa static void
3096ffc468ffSTetsuya Mukawa init_port(void)
3097ffc468ffSTetsuya Mukawa {
3098ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
3099ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
3100ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3101ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
3102ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
3103ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
3104ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
3105ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
3106ffc468ffSTetsuya Mukawa 	}
310729841336SPhil Yang 
310829841336SPhil Yang 	/* Initialize ports NUMA structures */
310929841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
311029841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
311129841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3112ffc468ffSTetsuya Mukawa }
3113ffc468ffSTetsuya Mukawa 
3114d3a274ceSZhihong Wang static void
3115d3a274ceSZhihong Wang force_quit(void)
3116d3a274ceSZhihong Wang {
3117d3a274ceSZhihong Wang 	pmd_test_exit();
3118d3a274ceSZhihong Wang 	prompt_exit();
3119d3a274ceSZhihong Wang }
3120d3a274ceSZhihong Wang 
3121d3a274ceSZhihong Wang static void
3122cfea1f30SPablo de Lara print_stats(void)
3123cfea1f30SPablo de Lara {
3124cfea1f30SPablo de Lara 	uint8_t i;
3125cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3126cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3127cfea1f30SPablo de Lara 
3128cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
3129cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
3130cfea1f30SPablo de Lara 
3131cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
3132cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3133cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
3134683d1e82SIgor Romanov 
3135683d1e82SIgor Romanov 	fflush(stdout);
3136cfea1f30SPablo de Lara }
3137cfea1f30SPablo de Lara 
3138cfea1f30SPablo de Lara static void
3139d3a274ceSZhihong Wang signal_handler(int signum)
3140d3a274ceSZhihong Wang {
3141d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
3142d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
3143d3a274ceSZhihong Wang 				signum);
3144102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
3145102b7329SReshma Pattan 		/* uninitialize packet capture framework */
3146102b7329SReshma Pattan 		rte_pdump_uninit();
3147102b7329SReshma Pattan #endif
314862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
314962d3216dSReshma Pattan 		rte_latencystats_uninit();
315062d3216dSReshma Pattan #endif
3151d3a274ceSZhihong Wang 		force_quit();
3152d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
3153d9a191a0SPhil Yang 		f_quit = 1;
3154d3a274ceSZhihong Wang 		/* exit with the expected status */
3155d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
3156d3a274ceSZhihong Wang 		kill(getpid(), signum);
3157d3a274ceSZhihong Wang 	}
3158d3a274ceSZhihong Wang }
3159d3a274ceSZhihong Wang 
3160af75078fSIntel int
3161af75078fSIntel main(int argc, char** argv)
3162af75078fSIntel {
3163af75078fSIntel 	int diag;
3164f8244c63SZhiyong Yang 	portid_t port_id;
31654918a357SXiaoyun Li 	uint16_t count;
3166fb73e096SJeff Guo 	int ret;
3167af75078fSIntel 
3168d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
3169d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
3170d3a274ceSZhihong Wang 
3171af75078fSIntel 	diag = rte_eal_init(argc, argv);
3172af75078fSIntel 	if (diag < 0)
3173af75078fSIntel 		rte_panic("Cannot init EAL\n");
3174af75078fSIntel 
3175285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
3176285fd101SOlivier Matz 	if (testpmd_logtype < 0)
3177285fd101SOlivier Matz 		rte_panic("Cannot register log type");
3178285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3179285fd101SOlivier Matz 
318097b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
318197b5d8b5SThomas Monjalon 	if (ret != 0)
318297b5d8b5SThomas Monjalon 		rte_panic("Cannot register for ethdev events");
318397b5d8b5SThomas Monjalon 
31844aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
31854aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
3186e9436f54STiwei Bie 	rte_pdump_init();
31874aa0d012SAnatoly Burakov #endif
31884aa0d012SAnatoly Burakov 
31894918a357SXiaoyun Li 	count = 0;
31904918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
31914918a357SXiaoyun Li 		ports_ids[count] = port_id;
31924918a357SXiaoyun Li 		count++;
31934918a357SXiaoyun Li 	}
31944918a357SXiaoyun Li 	nb_ports = (portid_t) count;
31954aa0d012SAnatoly Burakov 	if (nb_ports == 0)
31964aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
31974aa0d012SAnatoly Burakov 
31984aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
31994aa0d012SAnatoly Burakov 	init_port();
32004aa0d012SAnatoly Burakov 
32014aa0d012SAnatoly Burakov 	set_def_fwd_config();
32024aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
32034aa0d012SAnatoly Burakov 		rte_panic("Empty set of forwarding logical cores - check the "
32044aa0d012SAnatoly Burakov 			  "core mask supplied in the command parameters\n");
32054aa0d012SAnatoly Burakov 
3206e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
3207e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE
3208e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3209e505d84cSAnatoly Burakov #endif
3210e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
3211e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3212e505d84cSAnatoly Burakov #endif
3213e505d84cSAnatoly Burakov 
3214fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
32155fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
3216fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3217fb7b8b32SAnatoly Burakov #else
3218fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3219fb7b8b32SAnatoly Burakov #endif
3220fb7b8b32SAnatoly Burakov 
3221e505d84cSAnatoly Burakov 	argc -= diag;
3222e505d84cSAnatoly Burakov 	argv += diag;
3223e505d84cSAnatoly Burakov 	if (argc > 1)
3224e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3225e505d84cSAnatoly Burakov 
3226e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3227285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
32281c036b16SEelco Chaudron 			strerror(errno));
32291c036b16SEelco Chaudron 	}
32301c036b16SEelco Chaudron 
323199cabef0SPablo de Lara 	if (tx_first && interactive)
323299cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
323399cabef0SPablo de Lara 				"interactive mode.\n");
32348820cba4SDavid Hunt 
32358820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
32368820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
32378820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
32388820cba4SDavid Hunt 		lsc_interrupt = 0;
32398820cba4SDavid Hunt 	}
32408820cba4SDavid Hunt 
32415a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
32425a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
32435a8fb55cSReshma Pattan 
32445a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3245af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3246af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3247af75078fSIntel 		       nb_rxq, nb_txq);
3248af75078fSIntel 
3249af75078fSIntel 	init_config();
3250fb73e096SJeff Guo 
3251fb73e096SJeff Guo 	if (hot_plug) {
32522049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
3253fb73e096SJeff Guo 		if (ret) {
32542049c511SJeff Guo 			RTE_LOG(ERR, EAL,
32552049c511SJeff Guo 				"fail to enable hotplug handling.");
3256fb73e096SJeff Guo 			return -1;
3257fb73e096SJeff Guo 		}
3258fb73e096SJeff Guo 
32592049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
32602049c511SJeff Guo 		if (ret) {
32612049c511SJeff Guo 			RTE_LOG(ERR, EAL,
32622049c511SJeff Guo 				"fail to start device event monitoring.");
32632049c511SJeff Guo 			return -1;
32642049c511SJeff Guo 		}
32652049c511SJeff Guo 
32662049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
3267cc1bf307SJeff Guo 			dev_event_callback, NULL);
32682049c511SJeff Guo 		if (ret) {
32692049c511SJeff Guo 			RTE_LOG(ERR, EAL,
32702049c511SJeff Guo 				"fail  to register device event callback\n");
32712049c511SJeff Guo 			return -1;
32722049c511SJeff Guo 		}
3273fb73e096SJeff Guo 	}
3274fb73e096SJeff Guo 
3275148f963fSBruce Richardson 	if (start_port(RTE_PORT_ALL) != 0)
3276148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3277af75078fSIntel 
3278ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
32797d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(port_id)
3280ce8d5614SIntel 		rte_eth_promiscuous_enable(port_id);
3281af75078fSIntel 
32827e4441c8SRemy Horton 	/* Init metrics library */
32837e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
32847e4441c8SRemy Horton 
328562d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
328662d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
328762d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
328862d3216dSReshma Pattan 		if (ret)
328962d3216dSReshma Pattan 			printf("Warning: latencystats init()"
329062d3216dSReshma Pattan 				" returned error %d\n",	ret);
329162d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
329262d3216dSReshma Pattan 			latencystats_lcore_id);
329362d3216dSReshma Pattan 	}
329462d3216dSReshma Pattan #endif
329562d3216dSReshma Pattan 
32967e4441c8SRemy Horton 	/* Setup bitrate stats */
32977e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
3298e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
32997e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
33007e4441c8SRemy Horton 		if (bitrate_data == NULL)
3301e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3302e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
33037e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3304e25e6c70SRemy Horton 	}
33057e4441c8SRemy Horton #endif
33067e4441c8SRemy Horton 
33070d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
330881ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
330981ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
331081ef862bSAllain Legacy 
3311ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3312ca7feb22SCyril Chemparathy 		if (auto_start) {
3313ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3314ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3315ca7feb22SCyril Chemparathy 		}
3316af75078fSIntel 		prompt();
33170de738cfSJiayu Hu 		pmd_test_exit();
3318ca7feb22SCyril Chemparathy 	} else
33190d56cb81SThomas Monjalon #endif
33200d56cb81SThomas Monjalon 	{
3321af75078fSIntel 		char c;
3322af75078fSIntel 		int rc;
3323af75078fSIntel 
3324d9a191a0SPhil Yang 		f_quit = 0;
3325d9a191a0SPhil Yang 
3326af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
332799cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3328cfea1f30SPablo de Lara 		if (stats_period != 0) {
3329cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3330cfea1f30SPablo de Lara 			uint64_t timer_period;
3331cfea1f30SPablo de Lara 
3332cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3333cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3334cfea1f30SPablo de Lara 
3335d9a191a0SPhil Yang 			while (f_quit == 0) {
3336cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
3337cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
3338cfea1f30SPablo de Lara 
3339cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
3340cfea1f30SPablo de Lara 					print_stats();
3341cfea1f30SPablo de Lara 					/* Reset the timer */
3342cfea1f30SPablo de Lara 					diff_time = 0;
3343cfea1f30SPablo de Lara 				}
3344cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
3345cfea1f30SPablo de Lara 				prev_time = cur_time;
3346cfea1f30SPablo de Lara 				sleep(1);
3347cfea1f30SPablo de Lara 			}
3348cfea1f30SPablo de Lara 		}
3349cfea1f30SPablo de Lara 
3350af75078fSIntel 		printf("Press enter to exit\n");
3351af75078fSIntel 		rc = read(0, &c, 1);
3352d3a274ceSZhihong Wang 		pmd_test_exit();
3353af75078fSIntel 		if (rc < 0)
3354af75078fSIntel 			return 1;
3355af75078fSIntel 	}
3356af75078fSIntel 
3357af75078fSIntel 	return 0;
3358af75078fSIntel }
3359