xref: /dpdk/app/test-pmd/testpmd.c (revision 1cde1b9a9b4dbf31cb5e5ccdfc5da3cb079f43a2)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30c7f5dba7SAnatoly Burakov #include <rte_malloc_heap.h>
31af75078fSIntel #include <rte_memory.h>
32af75078fSIntel #include <rte_memcpy.h>
33af75078fSIntel #include <rte_launch.h>
34af75078fSIntel #include <rte_eal.h>
35284c908cSGaetan Rivet #include <rte_alarm.h>
36af75078fSIntel #include <rte_per_lcore.h>
37af75078fSIntel #include <rte_lcore.h>
38af75078fSIntel #include <rte_atomic.h>
39af75078fSIntel #include <rte_branch_prediction.h>
40af75078fSIntel #include <rte_mempool.h>
41af75078fSIntel #include <rte_malloc.h>
42af75078fSIntel #include <rte_mbuf.h>
430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
44af75078fSIntel #include <rte_interrupts.h>
45af75078fSIntel #include <rte_pci.h>
46af75078fSIntel #include <rte_ether.h>
47af75078fSIntel #include <rte_ethdev.h>
48edab33b1STetsuya Mukawa #include <rte_dev.h>
49af75078fSIntel #include <rte_string_fns.h>
50e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
52e261265eSRadu Nicolau #endif
53102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
54102b7329SReshma Pattan #include <rte_pdump.h>
55102b7329SReshma Pattan #endif
56938a184aSAdrien Mazarguil #include <rte_flow.h>
577e4441c8SRemy Horton #include <rte_metrics.h>
587e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
597e4441c8SRemy Horton #include <rte_bitrate.h>
607e4441c8SRemy Horton #endif
6162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6262d3216dSReshma Pattan #include <rte_latencystats.h>
6362d3216dSReshma Pattan #endif
64af75078fSIntel 
65af75078fSIntel #include "testpmd.h"
66af75078fSIntel 
67c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
68c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
70c7f5dba7SAnatoly Burakov #else
71c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
72c7f5dba7SAnatoly Burakov #endif
73c7f5dba7SAnatoly Burakov 
74c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
75c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
76c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
77c7f5dba7SAnatoly Burakov #else
78c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
79c7f5dba7SAnatoly Burakov #endif
80c7f5dba7SAnatoly Burakov 
81c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
82c7f5dba7SAnatoly Burakov 
83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
85af75078fSIntel 
86af75078fSIntel /* use master core for command line ? */
87af75078fSIntel uint8_t interactive = 0;
88ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8999cabef0SPablo de Lara uint8_t tx_first;
9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
91af75078fSIntel 
92af75078fSIntel /*
93af75078fSIntel  * NUMA support configuration.
94af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
95af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
97af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
98af75078fSIntel  */
99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
100af75078fSIntel 
101af75078fSIntel /*
102b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103b6ea6408SIntel  * not configured.
104b6ea6408SIntel  */
105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
106b6ea6408SIntel 
107b6ea6408SIntel /*
108c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
109c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
110c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
111c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
112c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
113148f963fSBruce Richardson  */
114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115148f963fSBruce Richardson 
116148f963fSBruce Richardson /*
11763531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11863531389SGeorgios Katsikas  * is allocated.
11963531389SGeorgios Katsikas  */
12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12163531389SGeorgios Katsikas 
12263531389SGeorgios Katsikas /*
12363531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12463531389SGeorgios Katsikas  * is allocated.
12563531389SGeorgios Katsikas  */
12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12763531389SGeorgios Katsikas 
12863531389SGeorgios Katsikas /*
12963531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13063531389SGeorgios Katsikas  * is allocated.
13163531389SGeorgios Katsikas  */
13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13363531389SGeorgios Katsikas 
13463531389SGeorgios Katsikas /*
135af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
136af75078fSIntel  * forwarded.
137547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
138af75078fSIntel  * ports.
139af75078fSIntel  */
1406d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141af75078fSIntel portid_t nb_peer_eth_addrs = 0;
142af75078fSIntel 
143af75078fSIntel /*
144af75078fSIntel  * Probed Target Environment.
145af75078fSIntel  */
146af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
147af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150af75078fSIntel 
1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1524918a357SXiaoyun Li 
153af75078fSIntel /*
154af75078fSIntel  * Test Forwarding Configuration.
155af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157af75078fSIntel  */
158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162af75078fSIntel 
163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165af75078fSIntel 
166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168af75078fSIntel 
169af75078fSIntel /*
170af75078fSIntel  * Forwarding engines.
171af75078fSIntel  */
172af75078fSIntel struct fwd_engine * fwd_engines[] = {
173af75078fSIntel 	&io_fwd_engine,
174af75078fSIntel 	&mac_fwd_engine,
175d47388f1SCyril Chemparathy 	&mac_swap_engine,
176e9e23a61SCyril Chemparathy 	&flow_gen_engine,
177af75078fSIntel 	&rx_only_engine,
178af75078fSIntel 	&tx_only_engine,
179af75078fSIntel 	&csum_fwd_engine,
180168dfa61SIvan Boule 	&icmp_echo_engine,
1813c156061SJens Freimann 	&noisy_vnf_engine,
1820ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
1830ad778b3SJasvinder Singh 	&softnic_fwd_engine,
1845b590fbeSJasvinder Singh #endif
185af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
186af75078fSIntel 	&ieee1588_fwd_engine,
187af75078fSIntel #endif
188af75078fSIntel 	NULL,
189af75078fSIntel };
190af75078fSIntel 
191401b744dSShahaf Shuler struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
19259fcf854SShahaf Shuler uint16_t mempool_flags;
193401b744dSShahaf Shuler 
194af75078fSIntel struct fwd_config cur_fwd_config;
195af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
196bf56fce1SZhihong Wang uint32_t retry_enabled;
197bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
198bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
199af75078fSIntel 
200af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
201c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
202c8798818SIntel                                       * specified on command-line. */
203cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
204d9a191a0SPhil Yang 
205d9a191a0SPhil Yang /*
206d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
207d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
208d9a191a0SPhil Yang  */
209d9a191a0SPhil Yang uint8_t f_quit;
210d9a191a0SPhil Yang 
211af75078fSIntel /*
212af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
213af75078fSIntel  */
214af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
215af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
216af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
217af75078fSIntel };
218af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
219af75078fSIntel 
22079bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
22179bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
22279bec05bSKonstantin Ananyev 
22382010ef5SYongseok Koh uint8_t txonly_multi_flow;
22482010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
22582010ef5SYongseok Koh 
226af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
227e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
228af75078fSIntel 
229900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
230900550deSIntel uint8_t dcb_config = 0;
231900550deSIntel 
232900550deSIntel /* Whether the dcb is in testing status */
233900550deSIntel uint8_t dcb_test = 0;
234900550deSIntel 
235af75078fSIntel /*
236af75078fSIntel  * Configurable number of RX/TX queues.
237af75078fSIntel  */
238af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
239af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
240af75078fSIntel 
241af75078fSIntel /*
242af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2438599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
244af75078fSIntel  */
2458599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2468599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
247af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
248af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
249af75078fSIntel 
250f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
251af75078fSIntel /*
252af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
253af75078fSIntel  */
254af75078fSIntel 
255f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
256f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
257f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
258af75078fSIntel 
259f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
260f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
261f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
262af75078fSIntel 
263af75078fSIntel /*
264af75078fSIntel  * Configurable value of RX free threshold.
265af75078fSIntel  */
266f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
267af75078fSIntel 
268af75078fSIntel /*
269ce8d5614SIntel  * Configurable value of RX drop enable.
270ce8d5614SIntel  */
271f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
272ce8d5614SIntel 
273ce8d5614SIntel /*
274af75078fSIntel  * Configurable value of TX free threshold.
275af75078fSIntel  */
276f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
277af75078fSIntel 
278af75078fSIntel /*
279af75078fSIntel  * Configurable value of TX RS bit threshold.
280af75078fSIntel  */
281f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
282af75078fSIntel 
283af75078fSIntel /*
2843c156061SJens Freimann  * Configurable value of buffered packets before sending.
2853c156061SJens Freimann  */
2863c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
2873c156061SJens Freimann 
2883c156061SJens Freimann /*
2893c156061SJens Freimann  * Configurable value of packet buffer timeout.
2903c156061SJens Freimann  */
2913c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
2923c156061SJens Freimann 
2933c156061SJens Freimann /*
2943c156061SJens Freimann  * Configurable value for size of VNF internal memory area
2953c156061SJens Freimann  * used for simulating noisy neighbour behaviour
2963c156061SJens Freimann  */
2973c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
2983c156061SJens Freimann 
2993c156061SJens Freimann /*
3003c156061SJens Freimann  * Configurable value of number of random writes done in
3013c156061SJens Freimann  * VNF simulation memory area.
3023c156061SJens Freimann  */
3033c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3043c156061SJens Freimann 
3053c156061SJens Freimann /*
3063c156061SJens Freimann  * Configurable value of number of random reads done in
3073c156061SJens Freimann  * VNF simulation memory area.
3083c156061SJens Freimann  */
3093c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3103c156061SJens Freimann 
3113c156061SJens Freimann /*
3123c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3133c156061SJens Freimann  * VNF simulation memory area.
3143c156061SJens Freimann  */
3153c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3163c156061SJens Freimann 
3173c156061SJens Freimann /*
318af75078fSIntel  * Receive Side Scaling (RSS) configuration.
319af75078fSIntel  */
3208a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
321af75078fSIntel 
322af75078fSIntel /*
323af75078fSIntel  * Port topology configuration
324af75078fSIntel  */
325af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
326af75078fSIntel 
3277741e4cfSIntel /*
3287741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3297741e4cfSIntel  */
3307741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3317741e4cfSIntel 
332af75078fSIntel /*
3337ee3e944SVasily Philipov  * Flow API isolated mode.
3347ee3e944SVasily Philipov  */
3357ee3e944SVasily Philipov uint8_t flow_isolate_all;
3367ee3e944SVasily Philipov 
3377ee3e944SVasily Philipov /*
338bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
339bc202406SDavid Marchand  */
340bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
341bc202406SDavid Marchand 
342bc202406SDavid Marchand /*
3436937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
3446937d210SStephen Hemminger  */
3456937d210SStephen Hemminger uint8_t no_device_start = 0;
3466937d210SStephen Hemminger 
3476937d210SStephen Hemminger /*
3488ea656f8SGaetan Rivet  * Enable link status change notification
3498ea656f8SGaetan Rivet  */
3508ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3518ea656f8SGaetan Rivet 
3528ea656f8SGaetan Rivet /*
353284c908cSGaetan Rivet  * Enable device removal notification.
354284c908cSGaetan Rivet  */
355284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
356284c908cSGaetan Rivet 
357fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
358fb73e096SJeff Guo 
3594f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
3604f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
3614f1ed78eSThomas Monjalon 
36297b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
36397b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
36497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
36597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
36697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
36797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
36897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
36997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
37097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
37197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
37297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
37397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
37497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
37597b5d8b5SThomas Monjalon };
37697b5d8b5SThomas Monjalon 
377284c908cSGaetan Rivet /*
3783af72783SGaetan Rivet  * Display or mask ether events
3793af72783SGaetan Rivet  * Default to all events except VF_MBOX
3803af72783SGaetan Rivet  */
3813af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
3823af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
3833af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
3843af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
385badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
3863af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
3873af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
388e505d84cSAnatoly Burakov /*
389e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
390e505d84cSAnatoly Burakov  */
391e505d84cSAnatoly Burakov int do_mlockall = 0;
3923af72783SGaetan Rivet 
3933af72783SGaetan Rivet /*
3947b7e5ba7SIntel  * NIC bypass mode configuration options.
3957b7e5ba7SIntel  */
3967b7e5ba7SIntel 
39750c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3987b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
399e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
4007b7e5ba7SIntel #endif
4017b7e5ba7SIntel 
402e261265eSRadu Nicolau 
40362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
40462d3216dSReshma Pattan 
40562d3216dSReshma Pattan /*
40662d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
40762d3216dSReshma Pattan  */
40862d3216dSReshma Pattan uint8_t latencystats_enabled;
40962d3216dSReshma Pattan 
41062d3216dSReshma Pattan /*
41162d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
41262d3216dSReshma Pattan  */
41362d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
41462d3216dSReshma Pattan 
41562d3216dSReshma Pattan #endif
41662d3216dSReshma Pattan 
4177b7e5ba7SIntel /*
418af75078fSIntel  * Ethernet device configuration.
419af75078fSIntel  */
420af75078fSIntel struct rte_eth_rxmode rx_mode = {
42135b2d13fSOlivier Matz 	.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
42235b2d13fSOlivier Matz 		/**< Default maximum frame length. */
423af75078fSIntel };
424af75078fSIntel 
42507e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
42607e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
42707e5f7bdSShahaf Shuler };
428fd8c20aaSShahaf Shuler 
429af75078fSIntel struct rte_fdir_conf fdir_conf = {
430af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
431af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
432af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
433d9d5e6f2SJingjing Wu 	.mask = {
43426f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
435d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
436d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
437d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
438d9d5e6f2SJingjing Wu 		},
439d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
440d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
441d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
442d9d5e6f2SJingjing Wu 		},
443d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
444d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
44547b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
44647b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
44747b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
448d9d5e6f2SJingjing Wu 	},
449af75078fSIntel 	.drop_queue = 127,
450af75078fSIntel };
451af75078fSIntel 
4522950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
453af75078fSIntel 
454ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
455ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
456ed30d9b6SIntel 
457ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
458ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
459ed30d9b6SIntel 
460ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
461ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
462ed30d9b6SIntel 
463a4fd5eeeSElza Mathew /*
464a4fd5eeeSElza Mathew  * Display zero values by default for xstats
465a4fd5eeeSElza Mathew  */
466a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
467a4fd5eeeSElza Mathew 
468c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
469c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
4707acf894dSStephen Hurd 
471e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE
4727e4441c8SRemy Horton /* Bitrate statistics */
4737e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
474e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
475e25e6c70SRemy Horton uint8_t bitrate_enabled;
476e25e6c70SRemy Horton #endif
4777e4441c8SRemy Horton 
478b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
479b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
480b40f8d78SJiayu Hu 
4811960be7dSNelio Laranjeiro struct vxlan_encap_conf vxlan_encap_conf = {
4821960be7dSNelio Laranjeiro 	.select_ipv4 = 1,
4831960be7dSNelio Laranjeiro 	.select_vlan = 0,
48462e8a5a8SViacheslav Ovsiienko 	.select_tos_ttl = 0,
4851960be7dSNelio Laranjeiro 	.vni = "\x00\x00\x00",
4861960be7dSNelio Laranjeiro 	.udp_src = 0,
4871960be7dSNelio Laranjeiro 	.udp_dst = RTE_BE16(4789),
4880c9da755SDavid Marchand 	.ipv4_src = RTE_IPV4(127, 0, 0, 1),
4890c9da755SDavid Marchand 	.ipv4_dst = RTE_IPV4(255, 255, 255, 255),
4901960be7dSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
4911960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
4921960be7dSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
4931960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
4941960be7dSNelio Laranjeiro 	.vlan_tci = 0,
49562e8a5a8SViacheslav Ovsiienko 	.ip_tos = 0,
49662e8a5a8SViacheslav Ovsiienko 	.ip_ttl = 255,
4971960be7dSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
4981960be7dSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
4991960be7dSNelio Laranjeiro };
5001960be7dSNelio Laranjeiro 
501dcd962fcSNelio Laranjeiro struct nvgre_encap_conf nvgre_encap_conf = {
502dcd962fcSNelio Laranjeiro 	.select_ipv4 = 1,
503dcd962fcSNelio Laranjeiro 	.select_vlan = 0,
504dcd962fcSNelio Laranjeiro 	.tni = "\x00\x00\x00",
5050c9da755SDavid Marchand 	.ipv4_src = RTE_IPV4(127, 0, 0, 1),
5060c9da755SDavid Marchand 	.ipv4_dst = RTE_IPV4(255, 255, 255, 255),
507dcd962fcSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
508dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
509dcd962fcSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
510dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
511dcd962fcSNelio Laranjeiro 	.vlan_tci = 0,
512dcd962fcSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
513dcd962fcSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
514dcd962fcSNelio Laranjeiro };
515dcd962fcSNelio Laranjeiro 
516ed30d9b6SIntel /* Forward function declarations */
517c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
51828caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
51928caa76aSZhiyong Yang 						   struct rte_port *port);
520edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
521f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
52276ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
523d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
524cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
525fb73e096SJeff Guo 				enum rte_dev_event_type type,
526fb73e096SJeff Guo 				void *param);
527ce8d5614SIntel 
528ce8d5614SIntel /*
529ce8d5614SIntel  * Check if all the ports are started.
530ce8d5614SIntel  * If yes, return positive value. If not, return zero.
531ce8d5614SIntel  */
532ce8d5614SIntel static int all_ports_started(void);
533ed30d9b6SIntel 
53452f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
53535b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
53652f38a20SJiayu Hu 
537af75078fSIntel /*
53898a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
539c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
540c9cafcc8SShahaf Shuler  */
541c9cafcc8SShahaf Shuler int
542c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
543c9cafcc8SShahaf Shuler {
544c9cafcc8SShahaf Shuler 	unsigned int i;
545c9cafcc8SShahaf Shuler 
546c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
547c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
548c9cafcc8SShahaf Shuler 			return 0;
549c9cafcc8SShahaf Shuler 	}
550c9cafcc8SShahaf Shuler 	return 1;
551c9cafcc8SShahaf Shuler }
552c9cafcc8SShahaf Shuler 
553c9cafcc8SShahaf Shuler /*
554af75078fSIntel  * Setup default configuration.
555af75078fSIntel  */
556af75078fSIntel static void
557af75078fSIntel set_default_fwd_lcores_config(void)
558af75078fSIntel {
559af75078fSIntel 	unsigned int i;
560af75078fSIntel 	unsigned int nb_lc;
5617acf894dSStephen Hurd 	unsigned int sock_num;
562af75078fSIntel 
563af75078fSIntel 	nb_lc = 0;
564af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
565dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
566dbfb8ec7SPhil Yang 			continue;
567c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
568c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
569c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
570c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
571c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
572c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
573c9cafcc8SShahaf Shuler 			}
574c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5757acf894dSStephen Hurd 		}
576f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
577f54fe5eeSStephen Hurd 			continue;
578f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
579af75078fSIntel 	}
580af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
581af75078fSIntel 	nb_cfg_lcores = nb_lcores;
582af75078fSIntel 	nb_fwd_lcores = 1;
583af75078fSIntel }
584af75078fSIntel 
585af75078fSIntel static void
586af75078fSIntel set_def_peer_eth_addrs(void)
587af75078fSIntel {
588af75078fSIntel 	portid_t i;
589af75078fSIntel 
590af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
59135b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
592af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
593af75078fSIntel 	}
594af75078fSIntel }
595af75078fSIntel 
596af75078fSIntel static void
597af75078fSIntel set_default_fwd_ports_config(void)
598af75078fSIntel {
599af75078fSIntel 	portid_t pt_id;
60065a7360cSMatan Azrad 	int i = 0;
601af75078fSIntel 
602effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
60365a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
604af75078fSIntel 
605effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
606effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
607effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
608effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
609effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
610effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
611effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
612effdb8bbSPhil Yang 			}
613effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
614effdb8bbSPhil Yang 		}
615effdb8bbSPhil Yang 	}
616effdb8bbSPhil Yang 
617af75078fSIntel 	nb_cfg_ports = nb_ports;
618af75078fSIntel 	nb_fwd_ports = nb_ports;
619af75078fSIntel }
620af75078fSIntel 
621af75078fSIntel void
622af75078fSIntel set_def_fwd_config(void)
623af75078fSIntel {
624af75078fSIntel 	set_default_fwd_lcores_config();
625af75078fSIntel 	set_def_peer_eth_addrs();
626af75078fSIntel 	set_default_fwd_ports_config();
627af75078fSIntel }
628af75078fSIntel 
629c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
630c7f5dba7SAnatoly Burakov static int
631c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
632c7f5dba7SAnatoly Burakov {
633c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
634c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
635c7f5dba7SAnatoly Burakov 
636c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
637c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
638c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
639c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
640c7f5dba7SAnatoly Burakov 	 */
641c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
642c7f5dba7SAnatoly Burakov 
643c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
644c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
645c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
646c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
647c7f5dba7SAnatoly Burakov 		return -1;
648c7f5dba7SAnatoly Burakov 	}
649c7f5dba7SAnatoly Burakov 
650c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
651c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
652c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
653c7f5dba7SAnatoly Burakov 
654c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
655c7f5dba7SAnatoly Burakov 
656c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
657c7f5dba7SAnatoly Burakov 
658c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
659c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
660c7f5dba7SAnatoly Burakov 		return -1;
661c7f5dba7SAnatoly Burakov 	}
662c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
663c7f5dba7SAnatoly Burakov 
664c7f5dba7SAnatoly Burakov 	return 0;
665c7f5dba7SAnatoly Burakov }
666c7f5dba7SAnatoly Burakov 
667c7f5dba7SAnatoly Burakov static int
668c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
669c7f5dba7SAnatoly Burakov {
670c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
671c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
672c7f5dba7SAnatoly Burakov 	 */
6739d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
674c7f5dba7SAnatoly Burakov 
675c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
676c7f5dba7SAnatoly Burakov }
677c7f5dba7SAnatoly Burakov 
678c7f5dba7SAnatoly Burakov static void *
679c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
680c7f5dba7SAnatoly Burakov {
681c7f5dba7SAnatoly Burakov 	void *addr;
682c7f5dba7SAnatoly Burakov 	int flags;
683c7f5dba7SAnatoly Burakov 
684c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
685c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
686c7f5dba7SAnatoly Burakov 	if (huge)
687c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
688c7f5dba7SAnatoly Burakov 
689c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
690c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
691c7f5dba7SAnatoly Burakov 		return NULL;
692c7f5dba7SAnatoly Burakov 
693c7f5dba7SAnatoly Burakov 	return addr;
694c7f5dba7SAnatoly Burakov }
695c7f5dba7SAnatoly Burakov 
696c7f5dba7SAnatoly Burakov struct extmem_param {
697c7f5dba7SAnatoly Burakov 	void *addr;
698c7f5dba7SAnatoly Burakov 	size_t len;
699c7f5dba7SAnatoly Burakov 	size_t pgsz;
700c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
701c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
702c7f5dba7SAnatoly Burakov };
703c7f5dba7SAnatoly Burakov 
704c7f5dba7SAnatoly Burakov static int
705c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
706c7f5dba7SAnatoly Burakov 		bool huge)
707c7f5dba7SAnatoly Burakov {
708c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
709c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
710c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
711c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
712c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
713c7f5dba7SAnatoly Burakov 	void *addr;
714c7f5dba7SAnatoly Burakov 	int ret;
715c7f5dba7SAnatoly Burakov 
716c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
717c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
718c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
719c7f5dba7SAnatoly Burakov 			continue;
720c7f5dba7SAnatoly Burakov 
721c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
722c7f5dba7SAnatoly Burakov 
723c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
724c7f5dba7SAnatoly Burakov 		if (!huge)
725c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
726c7f5dba7SAnatoly Burakov 
727c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
728c7f5dba7SAnatoly Burakov 		if (ret < 0) {
729c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
730c7f5dba7SAnatoly Burakov 			return -1;
731c7f5dba7SAnatoly Burakov 		}
732c7f5dba7SAnatoly Burakov 
733c7f5dba7SAnatoly Burakov 		/* allocate our memory */
734c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
735c7f5dba7SAnatoly Burakov 
736c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
737c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
738c7f5dba7SAnatoly Burakov 		 * try another one.
739c7f5dba7SAnatoly Burakov 		 */
740c7f5dba7SAnatoly Burakov 		if (addr == NULL)
741c7f5dba7SAnatoly Burakov 			continue;
742c7f5dba7SAnatoly Burakov 
743c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
744c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
745c7f5dba7SAnatoly Burakov 
746c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
747c7f5dba7SAnatoly Burakov 
748c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
749c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
750c7f5dba7SAnatoly Burakov 			goto fail;
751c7f5dba7SAnatoly Burakov 		}
752c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
753c7f5dba7SAnatoly Burakov 		if (!huge)
754c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
755c7f5dba7SAnatoly Burakov 
756c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
757c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
758c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
759c7f5dba7SAnatoly Burakov 			size_t offset;
760c7f5dba7SAnatoly Burakov 			void *cur;
761c7f5dba7SAnatoly Burakov 
762c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
763c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
764c7f5dba7SAnatoly Burakov 
765c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
766c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
767c7f5dba7SAnatoly Burakov 
768c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
769c7f5dba7SAnatoly Burakov 
770c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
771c7f5dba7SAnatoly Burakov 		}
772c7f5dba7SAnatoly Burakov 
773c7f5dba7SAnatoly Burakov 		break;
774c7f5dba7SAnatoly Burakov 	}
775c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
776c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
777c7f5dba7SAnatoly Burakov 		return -1;
778c7f5dba7SAnatoly Burakov 
779c7f5dba7SAnatoly Burakov 	param->addr = addr;
780c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
781c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
782c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
783c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
784c7f5dba7SAnatoly Burakov 
785c7f5dba7SAnatoly Burakov 	return 0;
786c7f5dba7SAnatoly Burakov fail:
787c7f5dba7SAnatoly Burakov 	if (iovas)
788c7f5dba7SAnatoly Burakov 		free(iovas);
789c7f5dba7SAnatoly Burakov 	if (addr)
790c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
791c7f5dba7SAnatoly Burakov 
792c7f5dba7SAnatoly Burakov 	return -1;
793c7f5dba7SAnatoly Burakov }
794c7f5dba7SAnatoly Burakov 
795c7f5dba7SAnatoly Burakov static int
796c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
797c7f5dba7SAnatoly Burakov {
798c7f5dba7SAnatoly Burakov 	struct extmem_param param;
799c7f5dba7SAnatoly Burakov 	int socket_id, ret;
800c7f5dba7SAnatoly Burakov 
801c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
802c7f5dba7SAnatoly Burakov 
803c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
804c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
805c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
806c7f5dba7SAnatoly Burakov 		/* create our heap */
807c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
808c7f5dba7SAnatoly Burakov 		if (ret < 0) {
809c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
810c7f5dba7SAnatoly Burakov 			return -1;
811c7f5dba7SAnatoly Burakov 		}
812c7f5dba7SAnatoly Burakov 	}
813c7f5dba7SAnatoly Burakov 
814c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
815c7f5dba7SAnatoly Burakov 	if (ret < 0) {
816c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
817c7f5dba7SAnatoly Burakov 		return -1;
818c7f5dba7SAnatoly Burakov 	}
819c7f5dba7SAnatoly Burakov 
820c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
821c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
822c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
823c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
824c7f5dba7SAnatoly Burakov 
825c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
826c7f5dba7SAnatoly Burakov 
827c7f5dba7SAnatoly Burakov 	/* not needed any more */
828c7f5dba7SAnatoly Burakov 	free(param.iova_table);
829c7f5dba7SAnatoly Burakov 
830c7f5dba7SAnatoly Burakov 	if (ret < 0) {
831c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
832c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
833c7f5dba7SAnatoly Burakov 		return -1;
834c7f5dba7SAnatoly Burakov 	}
835c7f5dba7SAnatoly Burakov 
836c7f5dba7SAnatoly Burakov 	/* success */
837c7f5dba7SAnatoly Burakov 
838c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
839c7f5dba7SAnatoly Burakov 			param.len >> 20);
840c7f5dba7SAnatoly Burakov 
841c7f5dba7SAnatoly Burakov 	return 0;
842c7f5dba7SAnatoly Burakov }
8433a0968c8SShahaf Shuler static void
8443a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8453a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8463a0968c8SShahaf Shuler {
8473a0968c8SShahaf Shuler 	uint16_t pid = 0;
8483a0968c8SShahaf Shuler 	int ret;
8493a0968c8SShahaf Shuler 
8503a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8513a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8523a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8533a0968c8SShahaf Shuler 
8543a0968c8SShahaf Shuler 		ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
8553a0968c8SShahaf Shuler 					memhdr->len);
8563a0968c8SShahaf Shuler 		if (ret) {
8573a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8583a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
8593a0968c8SShahaf Shuler 				    "for device %s\n",
8603a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8613a0968c8SShahaf Shuler 		}
8623a0968c8SShahaf Shuler 	}
8633a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
8643a0968c8SShahaf Shuler 	if (ret) {
8653a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8663a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
8673a0968c8SShahaf Shuler 	}
8683a0968c8SShahaf Shuler }
8693a0968c8SShahaf Shuler 
8703a0968c8SShahaf Shuler static void
8713a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8723a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8733a0968c8SShahaf Shuler {
8743a0968c8SShahaf Shuler 	uint16_t pid = 0;
8753a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
8763a0968c8SShahaf Shuler 	int ret;
8773a0968c8SShahaf Shuler 
8783a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
8793a0968c8SShahaf Shuler 				  page_size);
8803a0968c8SShahaf Shuler 	if (ret) {
8813a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8823a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
8833a0968c8SShahaf Shuler 		return;
8843a0968c8SShahaf Shuler 	}
8853a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8863a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8873a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8883a0968c8SShahaf Shuler 
8893a0968c8SShahaf Shuler 		ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
8903a0968c8SShahaf Shuler 				      memhdr->len);
8913a0968c8SShahaf Shuler 		if (ret) {
8923a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8933a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
8943a0968c8SShahaf Shuler 				    "for device %s\n",
8953a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8963a0968c8SShahaf Shuler 		}
8973a0968c8SShahaf Shuler 	}
8983a0968c8SShahaf Shuler }
899c7f5dba7SAnatoly Burakov 
900af75078fSIntel /*
901af75078fSIntel  * Configuration initialisation done once at init time.
902af75078fSIntel  */
903401b744dSShahaf Shuler static struct rte_mempool *
904af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
905af75078fSIntel 		 unsigned int socket_id)
906af75078fSIntel {
907af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
908bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
909af75078fSIntel 	uint32_t mb_size;
910af75078fSIntel 
911dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
912af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
913148f963fSBruce Richardson 
914285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
915d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
916d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
917d1eb542eSOlivier Matz 
918c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
919c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
920c7f5dba7SAnatoly Burakov 		{
921c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
922c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
923c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
924c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
925c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
926c7f5dba7SAnatoly Burakov 			break;
927c7f5dba7SAnatoly Burakov 		}
928c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
929c7f5dba7SAnatoly Burakov 		{
930b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
931c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
932148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
93359fcf854SShahaf Shuler 				socket_id, mempool_flags);
93424427bb9SOlivier Matz 			if (rte_mp == NULL)
93524427bb9SOlivier Matz 				goto err;
936b19a0c75SOlivier Matz 
937b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
938b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
939b19a0c75SOlivier Matz 				rte_mp = NULL;
94024427bb9SOlivier Matz 				goto err;
941b19a0c75SOlivier Matz 			}
942b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
943b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
9443a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
945c7f5dba7SAnatoly Burakov 			break;
946c7f5dba7SAnatoly Burakov 		}
947c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
948c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
949c7f5dba7SAnatoly Burakov 		{
950c7f5dba7SAnatoly Burakov 			int heap_socket;
951c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
952c7f5dba7SAnatoly Burakov 
953c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
954c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
955c7f5dba7SAnatoly Burakov 
956c7f5dba7SAnatoly Burakov 			heap_socket =
957c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
958c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
959c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
960c7f5dba7SAnatoly Burakov 
9610e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
9620e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
963ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
964c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
965c7f5dba7SAnatoly Burakov 					heap_socket);
966c7f5dba7SAnatoly Burakov 			break;
967c7f5dba7SAnatoly Burakov 		}
968c7f5dba7SAnatoly Burakov 	default:
969c7f5dba7SAnatoly Burakov 		{
970c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
971c7f5dba7SAnatoly Burakov 		}
972bece7b6cSChristian Ehrhardt 	}
973148f963fSBruce Richardson 
97424427bb9SOlivier Matz err:
975af75078fSIntel 	if (rte_mp == NULL) {
976d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
977d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
978d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
979148f963fSBruce Richardson 	} else if (verbose_level > 0) {
980591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
981af75078fSIntel 	}
982401b744dSShahaf Shuler 	return rte_mp;
983af75078fSIntel }
984af75078fSIntel 
98520a0286fSLiu Xiaofeng /*
98620a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
98720a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
98820a0286fSLiu Xiaofeng  */
98920a0286fSLiu Xiaofeng static int
99020a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
99120a0286fSLiu Xiaofeng {
99220a0286fSLiu Xiaofeng 	static int warning_once = 0;
99320a0286fSLiu Xiaofeng 
994c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
99520a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
99620a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
99720a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
99820a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
99920a0286fSLiu Xiaofeng 			       " --numa.\n");
100020a0286fSLiu Xiaofeng 		warning_once = 1;
100120a0286fSLiu Xiaofeng 		return -1;
100220a0286fSLiu Xiaofeng 	}
100320a0286fSLiu Xiaofeng 	return 0;
100420a0286fSLiu Xiaofeng }
100520a0286fSLiu Xiaofeng 
10063f7311baSWei Dai /*
10073f7311baSWei Dai  * Get the allowed maximum number of RX queues.
10083f7311baSWei Dai  * *pid return the port id which has minimal value of
10093f7311baSWei Dai  * max_rx_queues in all ports.
10103f7311baSWei Dai  */
10113f7311baSWei Dai queueid_t
10123f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
10133f7311baSWei Dai {
10143f7311baSWei Dai 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
10156f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
10163f7311baSWei Dai 	portid_t pi;
10173f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
10183f7311baSWei Dai 
10193f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
10206f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
10216f51deb9SIvan Ilchenko 			continue;
10226f51deb9SIvan Ilchenko 
10236f51deb9SIvan Ilchenko 		max_rxq_valid = true;
10243f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
10253f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
10263f7311baSWei Dai 			*pid = pi;
10273f7311baSWei Dai 		}
10283f7311baSWei Dai 	}
10296f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
10303f7311baSWei Dai }
10313f7311baSWei Dai 
10323f7311baSWei Dai /*
10333f7311baSWei Dai  * Check input rxq is valid or not.
10343f7311baSWei Dai  * If input rxq is not greater than any of maximum number
10353f7311baSWei Dai  * of RX queues of all ports, it is valid.
10363f7311baSWei Dai  * if valid, return 0, else return -1
10373f7311baSWei Dai  */
10383f7311baSWei Dai int
10393f7311baSWei Dai check_nb_rxq(queueid_t rxq)
10403f7311baSWei Dai {
10413f7311baSWei Dai 	queueid_t allowed_max_rxq;
10423f7311baSWei Dai 	portid_t pid = 0;
10433f7311baSWei Dai 
10443f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
10453f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
10463f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
10473f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
10483f7311baSWei Dai 		       rxq,
10493f7311baSWei Dai 		       allowed_max_rxq,
10503f7311baSWei Dai 		       pid);
10513f7311baSWei Dai 		return -1;
10523f7311baSWei Dai 	}
10533f7311baSWei Dai 	return 0;
10543f7311baSWei Dai }
10553f7311baSWei Dai 
105636db4f6cSWei Dai /*
105736db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
105836db4f6cSWei Dai  * *pid return the port id which has minimal value of
105936db4f6cSWei Dai  * max_tx_queues in all ports.
106036db4f6cSWei Dai  */
106136db4f6cSWei Dai queueid_t
106236db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
106336db4f6cSWei Dai {
106436db4f6cSWei Dai 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
10656f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
106636db4f6cSWei Dai 	portid_t pi;
106736db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
106836db4f6cSWei Dai 
106936db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
10706f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
10716f51deb9SIvan Ilchenko 			continue;
10726f51deb9SIvan Ilchenko 
10736f51deb9SIvan Ilchenko 		max_txq_valid = true;
107436db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
107536db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
107636db4f6cSWei Dai 			*pid = pi;
107736db4f6cSWei Dai 		}
107836db4f6cSWei Dai 	}
10796f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
108036db4f6cSWei Dai }
108136db4f6cSWei Dai 
108236db4f6cSWei Dai /*
108336db4f6cSWei Dai  * Check input txq is valid or not.
108436db4f6cSWei Dai  * If input txq is not greater than any of maximum number
108536db4f6cSWei Dai  * of TX queues of all ports, it is valid.
108636db4f6cSWei Dai  * if valid, return 0, else return -1
108736db4f6cSWei Dai  */
108836db4f6cSWei Dai int
108936db4f6cSWei Dai check_nb_txq(queueid_t txq)
109036db4f6cSWei Dai {
109136db4f6cSWei Dai 	queueid_t allowed_max_txq;
109236db4f6cSWei Dai 	portid_t pid = 0;
109336db4f6cSWei Dai 
109436db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
109536db4f6cSWei Dai 	if (txq > allowed_max_txq) {
109636db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
109736db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
109836db4f6cSWei Dai 		       txq,
109936db4f6cSWei Dai 		       allowed_max_txq,
110036db4f6cSWei Dai 		       pid);
110136db4f6cSWei Dai 		return -1;
110236db4f6cSWei Dai 	}
110336db4f6cSWei Dai 	return 0;
110436db4f6cSWei Dai }
110536db4f6cSWei Dai 
1106af75078fSIntel static void
1107af75078fSIntel init_config(void)
1108af75078fSIntel {
1109ce8d5614SIntel 	portid_t pid;
1110af75078fSIntel 	struct rte_port *port;
1111af75078fSIntel 	struct rte_mempool *mbp;
1112af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1113af75078fSIntel 	lcoreid_t  lc_id;
11147acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1115b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
111652f38a20SJiayu Hu 	uint32_t gso_types;
111733f9630fSSunil Kumar Kori 	uint16_t data_size;
111833f9630fSSunil Kumar Kori 	bool warning = 0;
1119c73a9071SWei Dai 	int k;
11206f51deb9SIvan Ilchenko 	int ret;
1121af75078fSIntel 
11227acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1123487f9a59SYulong Pei 
1124af75078fSIntel 	/* Configuration of logical cores. */
1125af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1126af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1127fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1128af75078fSIntel 	if (fwd_lcores == NULL) {
1129ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1130ce8d5614SIntel 							"failed\n", nb_lcores);
1131af75078fSIntel 	}
1132af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1133af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1134af75078fSIntel 					       sizeof(struct fwd_lcore),
1135fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1136af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1137ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1138ce8d5614SIntel 								"failed\n");
1139af75078fSIntel 		}
1140af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1141af75078fSIntel 	}
1142af75078fSIntel 
11437d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1144ce8d5614SIntel 		port = &ports[pid];
11458b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1146fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1147384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
11486f51deb9SIvan Ilchenko 
11496f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
11506f51deb9SIvan Ilchenko 		if (ret != 0)
11516f51deb9SIvan Ilchenko 			rte_exit(EXIT_FAILURE,
11526f51deb9SIvan Ilchenko 				 "rte_eth_dev_info_get() failed\n");
11537c45f6c0SFerruh Yigit 
115407e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
115507e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
115607e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
115707e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1158c18feafaSDekel Peled 		if (!(port->dev_info.tx_offload_capa &
1159c18feafaSDekel Peled 			DEV_TX_OFFLOAD_MATCH_METADATA))
1160c18feafaSDekel Peled 			port->dev_conf.txmode.offloads &=
1161c18feafaSDekel Peled 				~DEV_TX_OFFLOAD_MATCH_METADATA;
1162b6ea6408SIntel 		if (numa_support) {
1163b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1164b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1165b6ea6408SIntel 			else {
1166b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
116720a0286fSLiu Xiaofeng 
116829841336SPhil Yang 				/*
116929841336SPhil Yang 				 * if socket_id is invalid,
117029841336SPhil Yang 				 * set to the first available socket.
117129841336SPhil Yang 				 */
117220a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
117329841336SPhil Yang 					socket_id = socket_ids[0];
1174b6ea6408SIntel 				port_per_socket[socket_id]++;
1175b6ea6408SIntel 			}
1176b6ea6408SIntel 		}
1177b6ea6408SIntel 
1178c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1179c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1180c73a9071SWei Dai 			port->rx_conf[k].offloads =
1181c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1182c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1183c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1184c73a9071SWei Dai 			port->tx_conf[k].offloads =
1185c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1186c73a9071SWei Dai 
1187ce8d5614SIntel 		/* set flag to initialize port/queue */
1188ce8d5614SIntel 		port->need_reconfig = 1;
1189ce8d5614SIntel 		port->need_reconfig_queues = 1;
1190c18feafaSDekel Peled 		port->tx_metadata = 0;
119133f9630fSSunil Kumar Kori 
119233f9630fSSunil Kumar Kori 		/* Check for maximum number of segments per MTU. Accordingly
119333f9630fSSunil Kumar Kori 		 * update the mbuf data size.
119433f9630fSSunil Kumar Kori 		 */
1195163fbaafSFerruh Yigit 		if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1196163fbaafSFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
119733f9630fSSunil Kumar Kori 			data_size = rx_mode.max_rx_pkt_len /
119833f9630fSSunil Kumar Kori 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
119933f9630fSSunil Kumar Kori 
120033f9630fSSunil Kumar Kori 			if ((data_size + RTE_PKTMBUF_HEADROOM) >
120133f9630fSSunil Kumar Kori 							mbuf_data_size) {
120233f9630fSSunil Kumar Kori 				mbuf_data_size = data_size +
120333f9630fSSunil Kumar Kori 						 RTE_PKTMBUF_HEADROOM;
120433f9630fSSunil Kumar Kori 				warning = 1;
1205ce8d5614SIntel 			}
120633f9630fSSunil Kumar Kori 		}
120733f9630fSSunil Kumar Kori 	}
120833f9630fSSunil Kumar Kori 
120933f9630fSSunil Kumar Kori 	if (warning)
121033f9630fSSunil Kumar Kori 		TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
121133f9630fSSunil Kumar Kori 			    mbuf_data_size);
1212ce8d5614SIntel 
12133ab64341SOlivier Matz 	/*
12143ab64341SOlivier Matz 	 * Create pools of mbuf.
12153ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
12163ab64341SOlivier Matz 	 * socket 0 memory by default.
12173ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
12183ab64341SOlivier Matz 	 *
12193ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
12203ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
12213ab64341SOlivier Matz 	 */
12223ab64341SOlivier Matz 	if (param_total_num_mbufs)
12233ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
12243ab64341SOlivier Matz 	else {
12253ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
12263ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
12273ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
12283ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
12293ab64341SOlivier Matz 	}
12303ab64341SOlivier Matz 
1231b6ea6408SIntel 	if (numa_support) {
1232b6ea6408SIntel 		uint8_t i;
1233ce8d5614SIntel 
1234c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
1235401b744dSShahaf Shuler 			mempools[i] = mbuf_pool_create(mbuf_data_size,
1236401b744dSShahaf Shuler 						       nb_mbuf_per_pool,
1237c9cafcc8SShahaf Shuler 						       socket_ids[i]);
12383ab64341SOlivier Matz 	} else {
12393ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
1240401b744dSShahaf Shuler 			mempools[0] = mbuf_pool_create(mbuf_data_size,
1241401b744dSShahaf Shuler 						       nb_mbuf_per_pool, 0);
12423ab64341SOlivier Matz 		else
1243401b744dSShahaf Shuler 			mempools[socket_num] = mbuf_pool_create
1244401b744dSShahaf Shuler 							(mbuf_data_size,
1245401b744dSShahaf Shuler 							 nb_mbuf_per_pool,
12463ab64341SOlivier Matz 							 socket_num);
12473ab64341SOlivier Matz 	}
1248b6ea6408SIntel 
1249b6ea6408SIntel 	init_port_config();
12505886ae07SAdrien Mazarguil 
125152f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1252aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
12535886ae07SAdrien Mazarguil 	/*
12545886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
12555886ae07SAdrien Mazarguil 	 */
12565886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
12578fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
12588fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
12598fd8bebcSAdrien Mazarguil 
12605886ae07SAdrien Mazarguil 		if (mbp == NULL)
12615886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
12625886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
126352f38a20SJiayu Hu 		/* initialize GSO context */
126452f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
126552f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
126652f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
126735b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
126835b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
126952f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
12705886ae07SAdrien Mazarguil 	}
12715886ae07SAdrien Mazarguil 
1272ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1273ce8d5614SIntel 	if (init_fwd_streams() < 0)
1274ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
12750c0db76fSBernard Iremonger 
12760c0db76fSBernard Iremonger 	fwd_config_setup();
1277b7091f1dSJiayu Hu 
1278b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1279b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1280b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1281b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1282b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1283b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1284b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1285b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1286b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1287b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1288b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1289b7091f1dSJiayu Hu 		}
1290b7091f1dSJiayu Hu 	}
12910ad778b3SJasvinder Singh 
12920ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
12930ad778b3SJasvinder Singh 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
12940ad778b3SJasvinder Singh 		RTE_ETH_FOREACH_DEV(pid) {
12950ad778b3SJasvinder Singh 			port = &ports[pid];
12960ad778b3SJasvinder Singh 			const char *driver = port->dev_info.driver_name;
12970ad778b3SJasvinder Singh 
12980ad778b3SJasvinder Singh 			if (strcmp(driver, "net_softnic") == 0)
12990ad778b3SJasvinder Singh 				port->softport.fwd_lcore_arg = fwd_lcores;
13000ad778b3SJasvinder Singh 		}
13010ad778b3SJasvinder Singh 	}
13020ad778b3SJasvinder Singh #endif
13030ad778b3SJasvinder Singh 
1304ce8d5614SIntel }
1305ce8d5614SIntel 
13062950a769SDeclan Doherty 
13072950a769SDeclan Doherty void
1308a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
13092950a769SDeclan Doherty {
13102950a769SDeclan Doherty 	struct rte_port *port;
13116f51deb9SIvan Ilchenko 	int ret;
13122950a769SDeclan Doherty 
13132950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
13142950a769SDeclan Doherty 	port = &ports[new_port_id];
13156f51deb9SIvan Ilchenko 
13166f51deb9SIvan Ilchenko 	ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
13176f51deb9SIvan Ilchenko 	if (ret != 0)
13186f51deb9SIvan Ilchenko 		return;
13192950a769SDeclan Doherty 
13202950a769SDeclan Doherty 	/* set flag to initialize port/queue */
13212950a769SDeclan Doherty 	port->need_reconfig = 1;
13222950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1323a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
13242950a769SDeclan Doherty 
13252950a769SDeclan Doherty 	init_port_config();
13262950a769SDeclan Doherty }
13272950a769SDeclan Doherty 
13282950a769SDeclan Doherty 
1329ce8d5614SIntel int
1330ce8d5614SIntel init_fwd_streams(void)
1331ce8d5614SIntel {
1332ce8d5614SIntel 	portid_t pid;
1333ce8d5614SIntel 	struct rte_port *port;
1334ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
13355a8fb55cSReshma Pattan 	queueid_t q;
1336ce8d5614SIntel 
1337ce8d5614SIntel 	/* set socket id according to numa or not */
13387d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1339ce8d5614SIntel 		port = &ports[pid];
1340ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1341ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1342ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1343ce8d5614SIntel 				port->dev_info.max_rx_queues);
1344ce8d5614SIntel 			return -1;
1345ce8d5614SIntel 		}
1346ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1347ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1348ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1349ce8d5614SIntel 				port->dev_info.max_tx_queues);
1350ce8d5614SIntel 			return -1;
1351ce8d5614SIntel 		}
135220a0286fSLiu Xiaofeng 		if (numa_support) {
135320a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
135420a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
135520a0286fSLiu Xiaofeng 			else {
1356b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
135720a0286fSLiu Xiaofeng 
135829841336SPhil Yang 				/*
135929841336SPhil Yang 				 * if socket_id is invalid,
136029841336SPhil Yang 				 * set to the first available socket.
136129841336SPhil Yang 				 */
136220a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
136329841336SPhil Yang 					port->socket_id = socket_ids[0];
136420a0286fSLiu Xiaofeng 			}
136520a0286fSLiu Xiaofeng 		}
1366b6ea6408SIntel 		else {
1367b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1368af75078fSIntel 				port->socket_id = 0;
1369b6ea6408SIntel 			else
1370b6ea6408SIntel 				port->socket_id = socket_num;
1371b6ea6408SIntel 		}
1372af75078fSIntel 	}
1373af75078fSIntel 
13745a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
13755a8fb55cSReshma Pattan 	if (q == 0) {
13765a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
13775a8fb55cSReshma Pattan 		return -1;
13785a8fb55cSReshma Pattan 	}
13795a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1380ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1381ce8d5614SIntel 		return 0;
1382ce8d5614SIntel 	/* clear the old */
1383ce8d5614SIntel 	if (fwd_streams != NULL) {
1384ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1385ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1386ce8d5614SIntel 				continue;
1387ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1388ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1389af75078fSIntel 		}
1390ce8d5614SIntel 		rte_free(fwd_streams);
1391ce8d5614SIntel 		fwd_streams = NULL;
1392ce8d5614SIntel 	}
1393ce8d5614SIntel 
1394ce8d5614SIntel 	/* init new */
1395ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
13961f84c469SMatan Azrad 	if (nb_fwd_streams) {
1397ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
13981f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
13991f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1400ce8d5614SIntel 		if (fwd_streams == NULL)
14011f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
14021f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
14031f84c469SMatan Azrad 				 nb_fwd_streams);
1404ce8d5614SIntel 
1405af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
14061f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
14071f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
14081f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1409ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
14101f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
14111f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
14121f84c469SMatan Azrad 		}
1413af75078fSIntel 	}
1414ce8d5614SIntel 
1415ce8d5614SIntel 	return 0;
1416af75078fSIntel }
1417af75078fSIntel 
1418af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1419af75078fSIntel static void
1420af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1421af75078fSIntel {
1422af75078fSIntel 	unsigned int total_burst;
1423af75078fSIntel 	unsigned int nb_burst;
1424af75078fSIntel 	unsigned int burst_stats[3];
1425af75078fSIntel 	uint16_t pktnb_stats[3];
1426af75078fSIntel 	uint16_t nb_pkt;
1427af75078fSIntel 	int burst_percent[3];
1428af75078fSIntel 
1429af75078fSIntel 	/*
1430af75078fSIntel 	 * First compute the total number of packet bursts and the
1431af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1432af75078fSIntel 	 */
1433af75078fSIntel 	total_burst = 0;
1434af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1435af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1436af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1437af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1438af75078fSIntel 		if (nb_burst == 0)
1439af75078fSIntel 			continue;
1440af75078fSIntel 		total_burst += nb_burst;
1441af75078fSIntel 		if (nb_burst > burst_stats[0]) {
1442af75078fSIntel 			burst_stats[1] = burst_stats[0];
1443af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
1444af75078fSIntel 			burst_stats[0] = nb_burst;
1445af75078fSIntel 			pktnb_stats[0] = nb_pkt;
1446fe613657SDaniel Shelepov 		} else if (nb_burst > burst_stats[1]) {
1447fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1448fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
1449af75078fSIntel 		}
1450af75078fSIntel 	}
1451af75078fSIntel 	if (total_burst == 0)
1452af75078fSIntel 		return;
1453af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1454af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1455af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
1456af75078fSIntel 	if (burst_stats[0] == total_burst) {
1457af75078fSIntel 		printf("]\n");
1458af75078fSIntel 		return;
1459af75078fSIntel 	}
1460af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1461af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
1462af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
1463af75078fSIntel 		return;
1464af75078fSIntel 	}
1465af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1466af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1467af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1468af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1469af75078fSIntel 		return;
1470af75078fSIntel 	}
1471af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1472af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1473af75078fSIntel }
1474af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1475af75078fSIntel 
1476af75078fSIntel static void
1477af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1478af75078fSIntel {
1479af75078fSIntel 	struct fwd_stream *fs;
1480af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1481af75078fSIntel 
1482af75078fSIntel 	fs = fwd_streams[stream_id];
1483af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1484af75078fSIntel 	    (fs->fwd_dropped == 0))
1485af75078fSIntel 		return;
1486af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1487af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1488af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1489af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1490c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1491c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1492af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1493af75078fSIntel 
1494af75078fSIntel 	/* if checksum mode */
1495af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1496c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1497c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1498c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
149958d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
150058d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
150194d65546SDavid Marchand 	} else {
150294d65546SDavid Marchand 		printf("\n");
1503af75078fSIntel 	}
1504af75078fSIntel 
1505af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1506af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1507af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1508af75078fSIntel #endif
1509af75078fSIntel }
1510af75078fSIntel 
151153324971SDavid Marchand void
151253324971SDavid Marchand fwd_stats_display(void)
151353324971SDavid Marchand {
151453324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
151553324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
151653324971SDavid Marchand 	struct {
151753324971SDavid Marchand 		struct fwd_stream *rx_stream;
151853324971SDavid Marchand 		struct fwd_stream *tx_stream;
151953324971SDavid Marchand 		uint64_t tx_dropped;
152053324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
152153324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
152253324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
152353324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
152453324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
152553324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
152653324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
152753324971SDavid Marchand 	struct rte_eth_stats stats;
152853324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
152953324971SDavid Marchand 	uint64_t fwd_cycles = 0;
153053324971SDavid Marchand #endif
153153324971SDavid Marchand 	uint64_t total_recv = 0;
153253324971SDavid Marchand 	uint64_t total_xmit = 0;
153353324971SDavid Marchand 	struct rte_port *port;
153453324971SDavid Marchand 	streamid_t sm_id;
153553324971SDavid Marchand 	portid_t pt_id;
153653324971SDavid Marchand 	int i;
153753324971SDavid Marchand 
153853324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
153953324971SDavid Marchand 
154053324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
154153324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
154253324971SDavid Marchand 
154353324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
154453324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
154553324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
154653324971SDavid Marchand 		} else {
154753324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
154853324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
154953324971SDavid Marchand 		}
155053324971SDavid Marchand 
155153324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
155253324971SDavid Marchand 
155353324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
155453324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
155553324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
155653324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
155753324971SDavid Marchand 
155853324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
155953324971SDavid Marchand 		fwd_cycles += fs->core_cycles;
156053324971SDavid Marchand #endif
156153324971SDavid Marchand 	}
156253324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
156353324971SDavid Marchand 		uint8_t j;
156453324971SDavid Marchand 
156553324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
156653324971SDavid Marchand 		port = &ports[pt_id];
156753324971SDavid Marchand 
156853324971SDavid Marchand 		rte_eth_stats_get(pt_id, &stats);
156953324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
157053324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
157153324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
157253324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
157353324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
157453324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
157553324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
157653324971SDavid Marchand 
157753324971SDavid Marchand 		total_recv += stats.ipackets;
157853324971SDavid Marchand 		total_xmit += stats.opackets;
157953324971SDavid Marchand 		total_rx_dropped += stats.imissed;
158053324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
158153324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
158253324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
158353324971SDavid Marchand 
158453324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
158553324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
158653324971SDavid Marchand 
158753324971SDavid Marchand 		if (!port->rx_queue_stats_mapping_enabled &&
158853324971SDavid Marchand 		    !port->tx_queue_stats_mapping_enabled) {
158953324971SDavid Marchand 			printf("  RX-packets: %-14"PRIu64
159053324971SDavid Marchand 			       " RX-dropped: %-14"PRIu64
159153324971SDavid Marchand 			       "RX-total: %-"PRIu64"\n",
159253324971SDavid Marchand 			       stats.ipackets, stats.imissed,
159353324971SDavid Marchand 			       stats.ipackets + stats.imissed);
159453324971SDavid Marchand 
159553324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
159653324971SDavid Marchand 				printf("  Bad-ipcsum: %-14"PRIu64
159753324971SDavid Marchand 				       " Bad-l4csum: %-14"PRIu64
159853324971SDavid Marchand 				       "Bad-outer-l4csum: %-14"PRIu64"\n",
159953324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
160053324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
160153324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
160253324971SDavid Marchand 			if (stats.ierrors + stats.rx_nombuf > 0) {
160353324971SDavid Marchand 				printf("  RX-error: %-"PRIu64"\n",
160453324971SDavid Marchand 				       stats.ierrors);
160553324971SDavid Marchand 				printf("  RX-nombufs: %-14"PRIu64"\n",
160653324971SDavid Marchand 				       stats.rx_nombuf);
160753324971SDavid Marchand 			}
160853324971SDavid Marchand 
160953324971SDavid Marchand 			printf("  TX-packets: %-14"PRIu64
161053324971SDavid Marchand 			       " TX-dropped: %-14"PRIu64
161153324971SDavid Marchand 			       "TX-total: %-"PRIu64"\n",
161253324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
161353324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
161453324971SDavid Marchand 		} else {
161553324971SDavid Marchand 			printf("  RX-packets:             %14"PRIu64
161653324971SDavid Marchand 			       "    RX-dropped:%14"PRIu64
161753324971SDavid Marchand 			       "    RX-total:%14"PRIu64"\n",
161853324971SDavid Marchand 			       stats.ipackets, stats.imissed,
161953324971SDavid Marchand 			       stats.ipackets + stats.imissed);
162053324971SDavid Marchand 
162153324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
162253324971SDavid Marchand 				printf("  Bad-ipcsum:%14"PRIu64
162353324971SDavid Marchand 				       "    Bad-l4csum:%14"PRIu64
162453324971SDavid Marchand 				       "    Bad-outer-l4csum: %-14"PRIu64"\n",
162553324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
162653324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
162753324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
162853324971SDavid Marchand 			if ((stats.ierrors + stats.rx_nombuf) > 0) {
162953324971SDavid Marchand 				printf("  RX-error:%"PRIu64"\n", stats.ierrors);
163053324971SDavid Marchand 				printf("  RX-nombufs:             %14"PRIu64"\n",
163153324971SDavid Marchand 				       stats.rx_nombuf);
163253324971SDavid Marchand 			}
163353324971SDavid Marchand 
163453324971SDavid Marchand 			printf("  TX-packets:             %14"PRIu64
163553324971SDavid Marchand 			       "    TX-dropped:%14"PRIu64
163653324971SDavid Marchand 			       "    TX-total:%14"PRIu64"\n",
163753324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
163853324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
163953324971SDavid Marchand 		}
164053324971SDavid Marchand 
164153324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
164253324971SDavid Marchand 		if (ports_stats[pt_id].rx_stream)
164353324971SDavid Marchand 			pkt_burst_stats_display("RX",
164453324971SDavid Marchand 				&ports_stats[pt_id].rx_stream->rx_burst_stats);
164553324971SDavid Marchand 		if (ports_stats[pt_id].tx_stream)
164653324971SDavid Marchand 			pkt_burst_stats_display("TX",
164753324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
164853324971SDavid Marchand #endif
164953324971SDavid Marchand 
165053324971SDavid Marchand 		if (port->rx_queue_stats_mapping_enabled) {
165153324971SDavid Marchand 			printf("\n");
165253324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
165353324971SDavid Marchand 				printf("  Stats reg %2d RX-packets:%14"PRIu64
165453324971SDavid Marchand 				       "     RX-errors:%14"PRIu64
165553324971SDavid Marchand 				       "    RX-bytes:%14"PRIu64"\n",
165653324971SDavid Marchand 				       j, stats.q_ipackets[j],
165753324971SDavid Marchand 				       stats.q_errors[j], stats.q_ibytes[j]);
165853324971SDavid Marchand 			}
165953324971SDavid Marchand 			printf("\n");
166053324971SDavid Marchand 		}
166153324971SDavid Marchand 		if (port->tx_queue_stats_mapping_enabled) {
166253324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
166353324971SDavid Marchand 				printf("  Stats reg %2d TX-packets:%14"PRIu64
166453324971SDavid Marchand 				       "                                 TX-bytes:%14"
166553324971SDavid Marchand 				       PRIu64"\n",
166653324971SDavid Marchand 				       j, stats.q_opackets[j],
166753324971SDavid Marchand 				       stats.q_obytes[j]);
166853324971SDavid Marchand 			}
166953324971SDavid Marchand 		}
167053324971SDavid Marchand 
167153324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
167253324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
167353324971SDavid Marchand 	}
167453324971SDavid Marchand 
167553324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
167653324971SDavid Marchand 	       "%s\n",
167753324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
167853324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
167953324971SDavid Marchand 	       "%-"PRIu64"\n"
168053324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
168153324971SDavid Marchand 	       "%-"PRIu64"\n",
168253324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
168353324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
168453324971SDavid Marchand 	if (total_rx_nombuf > 0)
168553324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
168653324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
168753324971SDavid Marchand 	       "%s\n",
168853324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
168953324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
169053324971SDavid Marchand 	if (total_recv > 0)
169153324971SDavid Marchand 		printf("\n  CPU cycles/packet=%u (total cycles="
169253324971SDavid Marchand 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
169353324971SDavid Marchand 		       (unsigned int)(fwd_cycles / total_recv),
169453324971SDavid Marchand 		       fwd_cycles, total_recv);
169553324971SDavid Marchand #endif
169653324971SDavid Marchand }
169753324971SDavid Marchand 
169853324971SDavid Marchand void
169953324971SDavid Marchand fwd_stats_reset(void)
170053324971SDavid Marchand {
170153324971SDavid Marchand 	streamid_t sm_id;
170253324971SDavid Marchand 	portid_t pt_id;
170353324971SDavid Marchand 	int i;
170453324971SDavid Marchand 
170553324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
170653324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
170753324971SDavid Marchand 		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
170853324971SDavid Marchand 	}
170953324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
171053324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
171153324971SDavid Marchand 
171253324971SDavid Marchand 		fs->rx_packets = 0;
171353324971SDavid Marchand 		fs->tx_packets = 0;
171453324971SDavid Marchand 		fs->fwd_dropped = 0;
171553324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
171653324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
171753324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
171853324971SDavid Marchand 
171953324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
172053324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
172153324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
172253324971SDavid Marchand #endif
172353324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
172453324971SDavid Marchand 		fs->core_cycles = 0;
172553324971SDavid Marchand #endif
172653324971SDavid Marchand 	}
172753324971SDavid Marchand }
172853324971SDavid Marchand 
1729af75078fSIntel static void
17307741e4cfSIntel flush_fwd_rx_queues(void)
1731af75078fSIntel {
1732af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1733af75078fSIntel 	portid_t  rxp;
17347741e4cfSIntel 	portid_t port_id;
1735af75078fSIntel 	queueid_t rxq;
1736af75078fSIntel 	uint16_t  nb_rx;
1737af75078fSIntel 	uint16_t  i;
1738af75078fSIntel 	uint8_t   j;
1739f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1740594302c7SJames Poole 	uint64_t timer_period;
1741f487715fSReshma Pattan 
1742f487715fSReshma Pattan 	/* convert to number of cycles */
1743594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1744af75078fSIntel 
1745af75078fSIntel 	for (j = 0; j < 2; j++) {
17467741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1747af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
17487741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
1749f487715fSReshma Pattan 				/**
1750f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
1751f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
1752f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
1753f487715fSReshma Pattan 				* after 1sec timer expiry.
1754f487715fSReshma Pattan 				*/
1755f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
1756af75078fSIntel 				do {
17577741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1758013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
1759af75078fSIntel 					for (i = 0; i < nb_rx; i++)
1760af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
1761f487715fSReshma Pattan 
1762f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
1763f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
1764f487715fSReshma Pattan 					timer_tsc += diff_tsc;
1765f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
1766f487715fSReshma Pattan 					(timer_tsc < timer_period));
1767f487715fSReshma Pattan 				timer_tsc = 0;
1768af75078fSIntel 			}
1769af75078fSIntel 		}
1770af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1771af75078fSIntel 	}
1772af75078fSIntel }
1773af75078fSIntel 
1774af75078fSIntel static void
1775af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1776af75078fSIntel {
1777af75078fSIntel 	struct fwd_stream **fsm;
1778af75078fSIntel 	streamid_t nb_fs;
1779af75078fSIntel 	streamid_t sm_id;
17807e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
17817e4441c8SRemy Horton 	uint64_t tics_per_1sec;
17827e4441c8SRemy Horton 	uint64_t tics_datum;
17837e4441c8SRemy Horton 	uint64_t tics_current;
17844918a357SXiaoyun Li 	uint16_t i, cnt_ports;
1785af75078fSIntel 
17864918a357SXiaoyun Li 	cnt_ports = nb_ports;
17877e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
17887e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
17897e4441c8SRemy Horton #endif
1790af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
1791af75078fSIntel 	nb_fs = fc->stream_nb;
1792af75078fSIntel 	do {
1793af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1794af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
17957e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
1796e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
1797e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
17987e4441c8SRemy Horton 			tics_current = rte_rdtsc();
17997e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
18007e4441c8SRemy Horton 				/* Periodic bitrate calculation */
18014918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
1802e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
18034918a357SXiaoyun Li 						ports_ids[i]);
18047e4441c8SRemy Horton 				tics_datum = tics_current;
18057e4441c8SRemy Horton 			}
1806e25e6c70SRemy Horton 		}
18077e4441c8SRemy Horton #endif
180862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
180965eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
181065eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
181162d3216dSReshma Pattan 			rte_latencystats_update();
181262d3216dSReshma Pattan #endif
181362d3216dSReshma Pattan 
1814af75078fSIntel 	} while (! fc->stopped);
1815af75078fSIntel }
1816af75078fSIntel 
1817af75078fSIntel static int
1818af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
1819af75078fSIntel {
1820af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1821af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
1822af75078fSIntel 	return 0;
1823af75078fSIntel }
1824af75078fSIntel 
1825af75078fSIntel /*
1826af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1827af75078fSIntel  * Used to start communication flows in network loopback test configurations.
1828af75078fSIntel  */
1829af75078fSIntel static int
1830af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
1831af75078fSIntel {
1832af75078fSIntel 	struct fwd_lcore *fwd_lc;
1833af75078fSIntel 	struct fwd_lcore tmp_lcore;
1834af75078fSIntel 
1835af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1836af75078fSIntel 	tmp_lcore = *fwd_lc;
1837af75078fSIntel 	tmp_lcore.stopped = 1;
1838af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1839af75078fSIntel 	return 0;
1840af75078fSIntel }
1841af75078fSIntel 
1842af75078fSIntel /*
1843af75078fSIntel  * Launch packet forwarding:
1844af75078fSIntel  *     - Setup per-port forwarding context.
1845af75078fSIntel  *     - launch logical cores with their forwarding configuration.
1846af75078fSIntel  */
1847af75078fSIntel static void
1848af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1849af75078fSIntel {
1850af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1851af75078fSIntel 	unsigned int i;
1852af75078fSIntel 	unsigned int lc_id;
1853af75078fSIntel 	int diag;
1854af75078fSIntel 
1855af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1856af75078fSIntel 	if (port_fwd_begin != NULL) {
1857af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1858af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
1859af75078fSIntel 	}
1860af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1861af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
1862af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1863af75078fSIntel 			fwd_lcores[i]->stopped = 0;
1864af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1865af75078fSIntel 						     fwd_lcores[i], lc_id);
1866af75078fSIntel 			if (diag != 0)
1867af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
1868af75078fSIntel 				       lc_id, diag);
1869af75078fSIntel 		}
1870af75078fSIntel 	}
1871af75078fSIntel }
1872af75078fSIntel 
1873af75078fSIntel /*
1874af75078fSIntel  * Launch packet forwarding configuration.
1875af75078fSIntel  */
1876af75078fSIntel void
1877af75078fSIntel start_packet_forwarding(int with_tx_first)
1878af75078fSIntel {
1879af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1880af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1881af75078fSIntel 	struct rte_port *port;
1882af75078fSIntel 	unsigned int i;
1883af75078fSIntel 	portid_t   pt_id;
1884af75078fSIntel 
18855a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
18865a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
18875a8fb55cSReshma Pattan 
18885a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
18895a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
18905a8fb55cSReshma Pattan 
18915a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
18925a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
18935a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
18945a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
18955a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
18965a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
18975a8fb55cSReshma Pattan 
1898ce8d5614SIntel 	if (all_ports_started() == 0) {
1899ce8d5614SIntel 		printf("Not all ports were started\n");
1900ce8d5614SIntel 		return;
1901ce8d5614SIntel 	}
1902af75078fSIntel 	if (test_done == 0) {
1903af75078fSIntel 		printf("Packet forwarding already started\n");
1904af75078fSIntel 		return;
1905af75078fSIntel 	}
1906edf87b4aSBernard Iremonger 
1907edf87b4aSBernard Iremonger 
19087741e4cfSIntel 	if(dcb_test) {
19097741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
19107741e4cfSIntel 			pt_id = fwd_ports_ids[i];
19117741e4cfSIntel 			port = &ports[pt_id];
19127741e4cfSIntel 			if (!port->dcb_flag) {
19137741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
19147741e4cfSIntel                                        "be configured in this mode.\n");
1915013af9b6SIntel 				return;
1916013af9b6SIntel 			}
19177741e4cfSIntel 		}
19187741e4cfSIntel 		if (nb_fwd_lcores == 1) {
19197741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
19207741e4cfSIntel                                "should be larger than 1.\n");
19217741e4cfSIntel 			return;
19227741e4cfSIntel 		}
19237741e4cfSIntel 	}
1924af75078fSIntel 	test_done = 0;
19257741e4cfSIntel 
192647a767b2SMatan Azrad 	fwd_config_setup();
192747a767b2SMatan Azrad 
19287741e4cfSIntel 	if(!no_flush_rx)
19297741e4cfSIntel 		flush_fwd_rx_queues();
19307741e4cfSIntel 
1931933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
1932af75078fSIntel 	rxtx_config_display();
1933af75078fSIntel 
193453324971SDavid Marchand 	fwd_stats_reset();
1935af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1936af75078fSIntel 		pt_id = fwd_ports_ids[i];
1937af75078fSIntel 		port = &ports[pt_id];
1938013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
1939af75078fSIntel 	}
1940af75078fSIntel 	if (with_tx_first) {
1941af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1942af75078fSIntel 		if (port_fwd_begin != NULL) {
1943af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1944af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
1945af75078fSIntel 		}
1946acbf77a6SZhihong Wang 		while (with_tx_first--) {
1947acbf77a6SZhihong Wang 			launch_packet_forwarding(
1948acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
1949af75078fSIntel 			rte_eal_mp_wait_lcore();
1950acbf77a6SZhihong Wang 		}
1951af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
1952af75078fSIntel 		if (port_fwd_end != NULL) {
1953af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1954af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
1955af75078fSIntel 		}
1956af75078fSIntel 	}
1957af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
1958af75078fSIntel }
1959af75078fSIntel 
1960af75078fSIntel void
1961af75078fSIntel stop_packet_forwarding(void)
1962af75078fSIntel {
1963af75078fSIntel 	port_fwd_end_t port_fwd_end;
1964af75078fSIntel 	lcoreid_t lc_id;
196553324971SDavid Marchand 	portid_t pt_id;
196653324971SDavid Marchand 	int i;
1967af75078fSIntel 
1968af75078fSIntel 	if (test_done) {
1969af75078fSIntel 		printf("Packet forwarding not started\n");
1970af75078fSIntel 		return;
1971af75078fSIntel 	}
1972af75078fSIntel 	printf("Telling cores to stop...");
1973af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1974af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1975af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1976af75078fSIntel 	rte_eal_mp_wait_lcore();
1977af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1978af75078fSIntel 	if (port_fwd_end != NULL) {
1979af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1980af75078fSIntel 			pt_id = fwd_ports_ids[i];
1981af75078fSIntel 			(*port_fwd_end)(pt_id);
1982af75078fSIntel 		}
1983af75078fSIntel 	}
1984c185d42cSDavid Marchand 
198553324971SDavid Marchand 	fwd_stats_display();
198658d475b7SJerin Jacob 
1987af75078fSIntel 	printf("\nDone.\n");
1988af75078fSIntel 	test_done = 1;
1989af75078fSIntel }
1990af75078fSIntel 
1991cfae07fdSOuyang Changchun void
1992cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
1993cfae07fdSOuyang Changchun {
1994492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
1995cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
1996cfae07fdSOuyang Changchun }
1997cfae07fdSOuyang Changchun 
1998cfae07fdSOuyang Changchun void
1999cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2000cfae07fdSOuyang Changchun {
2001492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
2002cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
2003cfae07fdSOuyang Changchun }
2004cfae07fdSOuyang Changchun 
2005ce8d5614SIntel static int
2006ce8d5614SIntel all_ports_started(void)
2007ce8d5614SIntel {
2008ce8d5614SIntel 	portid_t pi;
2009ce8d5614SIntel 	struct rte_port *port;
2010ce8d5614SIntel 
20117d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2012ce8d5614SIntel 		port = &ports[pi];
2013ce8d5614SIntel 		/* Check if there is a port which is not started */
201441b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
201541b05095SBernard Iremonger 			(port->slave_flag == 0))
2016ce8d5614SIntel 			return 0;
2017ce8d5614SIntel 	}
2018ce8d5614SIntel 
2019ce8d5614SIntel 	/* No port is not started */
2020ce8d5614SIntel 	return 1;
2021ce8d5614SIntel }
2022ce8d5614SIntel 
2023148f963fSBruce Richardson int
20246018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
20256018eb8cSShahaf Shuler {
20266018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
20276018eb8cSShahaf Shuler 
20286018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
20296018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
20306018eb8cSShahaf Shuler 		return 0;
20316018eb8cSShahaf Shuler 	return 1;
20326018eb8cSShahaf Shuler }
20336018eb8cSShahaf Shuler 
20346018eb8cSShahaf Shuler int
2035edab33b1STetsuya Mukawa all_ports_stopped(void)
2036edab33b1STetsuya Mukawa {
2037edab33b1STetsuya Mukawa 	portid_t pi;
2038edab33b1STetsuya Mukawa 
20397d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
20406018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2041edab33b1STetsuya Mukawa 			return 0;
2042edab33b1STetsuya Mukawa 	}
2043edab33b1STetsuya Mukawa 
2044edab33b1STetsuya Mukawa 	return 1;
2045edab33b1STetsuya Mukawa }
2046edab33b1STetsuya Mukawa 
2047edab33b1STetsuya Mukawa int
2048edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2049edab33b1STetsuya Mukawa {
2050edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2051edab33b1STetsuya Mukawa 		return 0;
2052edab33b1STetsuya Mukawa 
2053edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2054edab33b1STetsuya Mukawa 		return 0;
2055edab33b1STetsuya Mukawa 
2056edab33b1STetsuya Mukawa 	return 1;
2057edab33b1STetsuya Mukawa }
2058edab33b1STetsuya Mukawa 
2059edab33b1STetsuya Mukawa int
2060ce8d5614SIntel start_port(portid_t pid)
2061ce8d5614SIntel {
206292d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
2063ce8d5614SIntel 	portid_t pi;
2064ce8d5614SIntel 	queueid_t qi;
2065ce8d5614SIntel 	struct rte_port *port;
20666d13ea8eSOlivier Matz 	struct rte_ether_addr mac_addr;
2067ce8d5614SIntel 
20684468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
20694468635fSMichael Qiu 		return 0;
20704468635fSMichael Qiu 
2071ce8d5614SIntel 	if(dcb_config)
2072ce8d5614SIntel 		dcb_test = 1;
20737d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2074edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2075ce8d5614SIntel 			continue;
2076ce8d5614SIntel 
207792d2703eSMichael Qiu 		need_check_link_status = 0;
2078ce8d5614SIntel 		port = &ports[pi];
2079ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2080ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
2081ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2082ce8d5614SIntel 			continue;
2083ce8d5614SIntel 		}
2084ce8d5614SIntel 
2085ce8d5614SIntel 		if (port->need_reconfig > 0) {
2086ce8d5614SIntel 			port->need_reconfig = 0;
2087ce8d5614SIntel 
20887ee3e944SVasily Philipov 			if (flow_isolate_all) {
20897ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
20907ee3e944SVasily Philipov 				if (ret) {
20917ee3e944SVasily Philipov 					printf("Failed to apply isolated"
20927ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
20937ee3e944SVasily Philipov 					return -1;
20947ee3e944SVasily Philipov 				}
20957ee3e944SVasily Philipov 			}
2096b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
20975706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
209820a0286fSLiu Xiaofeng 					port->socket_id);
2099ce8d5614SIntel 			/* configure port */
2100ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
2101ce8d5614SIntel 						&(port->dev_conf));
2102ce8d5614SIntel 			if (diag != 0) {
2103ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2104ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2105ce8d5614SIntel 					printf("Port %d can not be set back "
2106ce8d5614SIntel 							"to stopped\n", pi);
2107ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
2108ce8d5614SIntel 				/* try to reconfigure port next time */
2109ce8d5614SIntel 				port->need_reconfig = 1;
2110148f963fSBruce Richardson 				return -1;
2111ce8d5614SIntel 			}
2112ce8d5614SIntel 		}
2113ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
2114ce8d5614SIntel 			port->need_reconfig_queues = 0;
2115ce8d5614SIntel 			/* setup tx queues */
2116ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
2117b6ea6408SIntel 				if ((numa_support) &&
2118b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2119b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2120d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2121d44f8a48SQi Zhang 						txring_numa[pi],
2122d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2123b6ea6408SIntel 				else
2124b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2125d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2126d44f8a48SQi Zhang 						port->socket_id,
2127d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2128b6ea6408SIntel 
2129ce8d5614SIntel 				if (diag == 0)
2130ce8d5614SIntel 					continue;
2131ce8d5614SIntel 
2132ce8d5614SIntel 				/* Fail to setup tx queue, return */
2133ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2134ce8d5614SIntel 							RTE_PORT_HANDLING,
2135ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2136ce8d5614SIntel 					printf("Port %d can not be set back "
2137ce8d5614SIntel 							"to stopped\n", pi);
2138d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
2139d44f8a48SQi Zhang 				       pi);
2140ce8d5614SIntel 				/* try to reconfigure queues next time */
2141ce8d5614SIntel 				port->need_reconfig_queues = 1;
2142148f963fSBruce Richardson 				return -1;
2143ce8d5614SIntel 			}
2144ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2145d44f8a48SQi Zhang 				/* setup rx queues */
2146b6ea6408SIntel 				if ((numa_support) &&
2147b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2148b6ea6408SIntel 					struct rte_mempool * mp =
2149b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
2150b6ea6408SIntel 					if (mp == NULL) {
2151b6ea6408SIntel 						printf("Failed to setup RX queue:"
2152b6ea6408SIntel 							"No mempool allocation"
2153b6ea6408SIntel 							" on the socket %d\n",
2154b6ea6408SIntel 							rxring_numa[pi]);
2155148f963fSBruce Richardson 						return -1;
2156b6ea6408SIntel 					}
2157b6ea6408SIntel 
2158b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2159d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2160d44f8a48SQi Zhang 					     rxring_numa[pi],
2161d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2162d44f8a48SQi Zhang 					     mp);
21631e1d6bddSBernard Iremonger 				} else {
21641e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
21651e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
21661e1d6bddSBernard Iremonger 					if (mp == NULL) {
21671e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
21681e1d6bddSBernard Iremonger 							"No mempool allocation"
21691e1d6bddSBernard Iremonger 							" on the socket %d\n",
21701e1d6bddSBernard Iremonger 							port->socket_id);
21711e1d6bddSBernard Iremonger 						return -1;
2172b6ea6408SIntel 					}
2173b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2174d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2175d44f8a48SQi Zhang 					     port->socket_id,
2176d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2177d44f8a48SQi Zhang 					     mp);
21781e1d6bddSBernard Iremonger 				}
2179ce8d5614SIntel 				if (diag == 0)
2180ce8d5614SIntel 					continue;
2181ce8d5614SIntel 
2182ce8d5614SIntel 				/* Fail to setup rx queue, return */
2183ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2184ce8d5614SIntel 							RTE_PORT_HANDLING,
2185ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2186ce8d5614SIntel 					printf("Port %d can not be set back "
2187ce8d5614SIntel 							"to stopped\n", pi);
2188d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2189d44f8a48SQi Zhang 				       pi);
2190ce8d5614SIntel 				/* try to reconfigure queues next time */
2191ce8d5614SIntel 				port->need_reconfig_queues = 1;
2192148f963fSBruce Richardson 				return -1;
2193ce8d5614SIntel 			}
2194ce8d5614SIntel 		}
2195b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
2196ce8d5614SIntel 		/* start port */
2197ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2198ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2199ce8d5614SIntel 
2200ce8d5614SIntel 			/* Fail to setup rx queue, return */
2201ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2202ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2203ce8d5614SIntel 				printf("Port %d can not be set back to "
2204ce8d5614SIntel 							"stopped\n", pi);
2205ce8d5614SIntel 			continue;
2206ce8d5614SIntel 		}
2207ce8d5614SIntel 
2208ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2209ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2210ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2211ce8d5614SIntel 
2212a5279d25SIgor Romanov 		if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2213d8c89163SZijie Pan 			printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
22142950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
22152950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
22162950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2217d8c89163SZijie Pan 
2218ce8d5614SIntel 		/* at least one port started, need checking link status */
2219ce8d5614SIntel 		need_check_link_status = 1;
2220ce8d5614SIntel 	}
2221ce8d5614SIntel 
222292d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2223edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
222492d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2225ce8d5614SIntel 		printf("Please stop the ports first\n");
2226ce8d5614SIntel 
2227ce8d5614SIntel 	printf("Done\n");
2228148f963fSBruce Richardson 	return 0;
2229ce8d5614SIntel }
2230ce8d5614SIntel 
2231ce8d5614SIntel void
2232ce8d5614SIntel stop_port(portid_t pid)
2233ce8d5614SIntel {
2234ce8d5614SIntel 	portid_t pi;
2235ce8d5614SIntel 	struct rte_port *port;
2236ce8d5614SIntel 	int need_check_link_status = 0;
2237ce8d5614SIntel 
2238ce8d5614SIntel 	if (dcb_test) {
2239ce8d5614SIntel 		dcb_test = 0;
2240ce8d5614SIntel 		dcb_config = 0;
2241ce8d5614SIntel 	}
22424468635fSMichael Qiu 
22434468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
22444468635fSMichael Qiu 		return;
22454468635fSMichael Qiu 
2246ce8d5614SIntel 	printf("Stopping ports...\n");
2247ce8d5614SIntel 
22487d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
22494468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2250ce8d5614SIntel 			continue;
2251ce8d5614SIntel 
2252a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2253a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2254a8ef3e3aSBernard Iremonger 			continue;
2255a8ef3e3aSBernard Iremonger 		}
2256a8ef3e3aSBernard Iremonger 
22570e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
22580e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
22590e545d30SBernard Iremonger 			continue;
22600e545d30SBernard Iremonger 		}
22610e545d30SBernard Iremonger 
2262ce8d5614SIntel 		port = &ports[pi];
2263ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2264ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2265ce8d5614SIntel 			continue;
2266ce8d5614SIntel 
2267ce8d5614SIntel 		rte_eth_dev_stop(pi);
2268ce8d5614SIntel 
2269ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2270ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2271ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2272ce8d5614SIntel 		need_check_link_status = 1;
2273ce8d5614SIntel 	}
2274bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2275edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2276ce8d5614SIntel 
2277ce8d5614SIntel 	printf("Done\n");
2278ce8d5614SIntel }
2279ce8d5614SIntel 
2280ce6959bfSWisam Jaddo static void
22814f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
2282ce6959bfSWisam Jaddo {
22834f1de450SThomas Monjalon 	portid_t i;
22844f1de450SThomas Monjalon 	portid_t new_total = 0;
2285ce6959bfSWisam Jaddo 
22864f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
22874f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
22884f1de450SThomas Monjalon 			array[new_total] = array[i];
22894f1de450SThomas Monjalon 			new_total++;
2290ce6959bfSWisam Jaddo 		}
22914f1de450SThomas Monjalon 	*total = new_total;
22924f1de450SThomas Monjalon }
22934f1de450SThomas Monjalon 
22944f1de450SThomas Monjalon static void
22954f1de450SThomas Monjalon remove_invalid_ports(void)
22964f1de450SThomas Monjalon {
22974f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
22984f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
22994f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
2300ce6959bfSWisam Jaddo }
2301ce6959bfSWisam Jaddo 
2302ce8d5614SIntel void
2303ce8d5614SIntel close_port(portid_t pid)
2304ce8d5614SIntel {
2305ce8d5614SIntel 	portid_t pi;
2306ce8d5614SIntel 	struct rte_port *port;
2307ce8d5614SIntel 
23084468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
23094468635fSMichael Qiu 		return;
23104468635fSMichael Qiu 
2311ce8d5614SIntel 	printf("Closing ports...\n");
2312ce8d5614SIntel 
23137d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
23144468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2315ce8d5614SIntel 			continue;
2316ce8d5614SIntel 
2317a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2318a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2319a8ef3e3aSBernard Iremonger 			continue;
2320a8ef3e3aSBernard Iremonger 		}
2321a8ef3e3aSBernard Iremonger 
23220e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
23230e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
23240e545d30SBernard Iremonger 			continue;
23250e545d30SBernard Iremonger 		}
23260e545d30SBernard Iremonger 
2327ce8d5614SIntel 		port = &ports[pi];
2328ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2329d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2330d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2331d4e8ad64SMichael Qiu 			continue;
2332d4e8ad64SMichael Qiu 		}
2333d4e8ad64SMichael Qiu 
2334d4e8ad64SMichael Qiu 		if (rte_atomic16_cmpset(&(port->port_status),
2335ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2336ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2337ce8d5614SIntel 			continue;
2338ce8d5614SIntel 		}
2339ce8d5614SIntel 
2340938a184aSAdrien Mazarguil 		if (port->flow_list)
2341938a184aSAdrien Mazarguil 			port_flow_flush(pi);
2342ce8d5614SIntel 		rte_eth_dev_close(pi);
2343ce8d5614SIntel 
23444f1de450SThomas Monjalon 		remove_invalid_ports();
234523ea57a2SThomas Monjalon 
2346ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2347ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2348b38bb262SPablo de Lara 			printf("Port %d cannot be set to closed\n", pi);
2349ce8d5614SIntel 	}
2350ce8d5614SIntel 
2351ce8d5614SIntel 	printf("Done\n");
2352ce8d5614SIntel }
2353ce8d5614SIntel 
2354edab33b1STetsuya Mukawa void
235597f1e196SWei Dai reset_port(portid_t pid)
235697f1e196SWei Dai {
235797f1e196SWei Dai 	int diag;
235897f1e196SWei Dai 	portid_t pi;
235997f1e196SWei Dai 	struct rte_port *port;
236097f1e196SWei Dai 
236197f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
236297f1e196SWei Dai 		return;
236397f1e196SWei Dai 
2364*1cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2365*1cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2366*1cde1b9aSShougang Wang 		printf("Can not reset port(s), please stop port(s) first.\n");
2367*1cde1b9aSShougang Wang 		return;
2368*1cde1b9aSShougang Wang 	}
2369*1cde1b9aSShougang Wang 
237097f1e196SWei Dai 	printf("Resetting ports...\n");
237197f1e196SWei Dai 
237297f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
237397f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
237497f1e196SWei Dai 			continue;
237597f1e196SWei Dai 
237697f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
237797f1e196SWei Dai 			printf("Please remove port %d from forwarding "
237897f1e196SWei Dai 			       "configuration.\n", pi);
237997f1e196SWei Dai 			continue;
238097f1e196SWei Dai 		}
238197f1e196SWei Dai 
238297f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
238397f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
238497f1e196SWei Dai 			       pi);
238597f1e196SWei Dai 			continue;
238697f1e196SWei Dai 		}
238797f1e196SWei Dai 
238897f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
238997f1e196SWei Dai 		if (diag == 0) {
239097f1e196SWei Dai 			port = &ports[pi];
239197f1e196SWei Dai 			port->need_reconfig = 1;
239297f1e196SWei Dai 			port->need_reconfig_queues = 1;
239397f1e196SWei Dai 		} else {
239497f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
239597f1e196SWei Dai 		}
239697f1e196SWei Dai 	}
239797f1e196SWei Dai 
239897f1e196SWei Dai 	printf("Done\n");
239997f1e196SWei Dai }
240097f1e196SWei Dai 
240197f1e196SWei Dai void
2402edab33b1STetsuya Mukawa attach_port(char *identifier)
2403ce8d5614SIntel {
24044f1ed78eSThomas Monjalon 	portid_t pi;
2405c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
2406ce8d5614SIntel 
2407edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2408edab33b1STetsuya Mukawa 
2409edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2410edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2411edab33b1STetsuya Mukawa 		return;
2412ce8d5614SIntel 	}
2413ce8d5614SIntel 
241475b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
2415c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2416edab33b1STetsuya Mukawa 		return;
2417c9cce428SThomas Monjalon 	}
2418c9cce428SThomas Monjalon 
24194f1ed78eSThomas Monjalon 	/* first attach mode: event */
24204f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
24214f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
24224f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
24234f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
24244f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
24254f1ed78eSThomas Monjalon 				setup_attached_port(pi);
24264f1ed78eSThomas Monjalon 		return;
24274f1ed78eSThomas Monjalon 	}
24284f1ed78eSThomas Monjalon 
24294f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
243086fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
24314f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
243286fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
243386fa5de1SThomas Monjalon 			continue; /* port was already attached before */
2434c9cce428SThomas Monjalon 		setup_attached_port(pi);
2435c9cce428SThomas Monjalon 	}
243686fa5de1SThomas Monjalon }
2437c9cce428SThomas Monjalon 
2438c9cce428SThomas Monjalon static void
2439c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
2440c9cce428SThomas Monjalon {
2441c9cce428SThomas Monjalon 	unsigned int socket_id;
244234fc1051SIvan Ilchenko 	int ret;
2443edab33b1STetsuya Mukawa 
2444931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
244529841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
2446931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
244729841336SPhil Yang 		socket_id = socket_ids[0];
2448931126baSBernard Iremonger 	reconfig(pi, socket_id);
244934fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
245034fc1051SIvan Ilchenko 	if (ret != 0)
245134fc1051SIvan Ilchenko 		printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
245234fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
2453edab33b1STetsuya Mukawa 
24544f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
24554f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
24564f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
24574f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
2458edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2459edab33b1STetsuya Mukawa 
2460edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2461edab33b1STetsuya Mukawa 	printf("Done\n");
2462edab33b1STetsuya Mukawa }
2463edab33b1STetsuya Mukawa 
2464edab33b1STetsuya Mukawa void
2465f8e5baa2SThomas Monjalon detach_port_device(portid_t port_id)
24665f4ec54fSChen Jing D(Mark) {
2467f8e5baa2SThomas Monjalon 	struct rte_device *dev;
2468f8e5baa2SThomas Monjalon 	portid_t sibling;
2469f8e5baa2SThomas Monjalon 
2470c9cce428SThomas Monjalon 	printf("Removing a device...\n");
24715f4ec54fSChen Jing D(Mark) 
2472f8e5baa2SThomas Monjalon 	dev = rte_eth_devices[port_id].device;
2473f8e5baa2SThomas Monjalon 	if (dev == NULL) {
2474f8e5baa2SThomas Monjalon 		printf("Device already removed\n");
2475f8e5baa2SThomas Monjalon 		return;
2476f8e5baa2SThomas Monjalon 	}
2477f8e5baa2SThomas Monjalon 
247823ea57a2SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
24793f4a8370SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
24803f4a8370SThomas Monjalon 			printf("Port not stopped\n");
2481edab33b1STetsuya Mukawa 			return;
2482edab33b1STetsuya Mukawa 		}
24833f4a8370SThomas Monjalon 		printf("Port was not closed\n");
2484938a184aSAdrien Mazarguil 		if (ports[port_id].flow_list)
2485938a184aSAdrien Mazarguil 			port_flow_flush(port_id);
24863f4a8370SThomas Monjalon 	}
2487938a184aSAdrien Mazarguil 
248875b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
2489f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2490edab33b1STetsuya Mukawa 		return;
24913070419eSGaetan Rivet 	}
24927ca262b8SViacheslav Ovsiienko 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2493f8e5baa2SThomas Monjalon 		/* reset mapping between old ports and removed device */
2494f8e5baa2SThomas Monjalon 		rte_eth_devices[sibling].device = NULL;
2495f8e5baa2SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2496f8e5baa2SThomas Monjalon 			/* sibling ports are forced to be closed */
2497f8e5baa2SThomas Monjalon 			ports[sibling].port_status = RTE_PORT_CLOSED;
2498f8e5baa2SThomas Monjalon 			printf("Port %u is closed\n", sibling);
2499f8e5baa2SThomas Monjalon 		}
2500f8e5baa2SThomas Monjalon 	}
2501f8e5baa2SThomas Monjalon 
25024f1de450SThomas Monjalon 	remove_invalid_ports();
250303ce2c53SMatan Azrad 
2504f8e5baa2SThomas Monjalon 	printf("Device of port %u is detached\n", port_id);
2505f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
2506edab33b1STetsuya Mukawa 	printf("Done\n");
2507edab33b1STetsuya Mukawa 	return;
25085f4ec54fSChen Jing D(Mark) }
25095f4ec54fSChen Jing D(Mark) 
2510af75078fSIntel void
251155e51c96SNithin Dabilpuram detach_device(char *identifier)
251255e51c96SNithin Dabilpuram {
251355e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
251455e51c96SNithin Dabilpuram 	struct rte_devargs da;
251555e51c96SNithin Dabilpuram 	portid_t port_id;
251655e51c96SNithin Dabilpuram 
251755e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
251855e51c96SNithin Dabilpuram 
251955e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
252055e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
252155e51c96SNithin Dabilpuram 		printf("cannot parse identifier\n");
252255e51c96SNithin Dabilpuram 		if (da.args)
252355e51c96SNithin Dabilpuram 			free(da.args);
252455e51c96SNithin Dabilpuram 		return;
252555e51c96SNithin Dabilpuram 	}
252655e51c96SNithin Dabilpuram 
252755e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
252855e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
252955e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
253055e51c96SNithin Dabilpuram 				printf("Port %u not stopped\n", port_id);
253155e51c96SNithin Dabilpuram 				return;
253255e51c96SNithin Dabilpuram 			}
253355e51c96SNithin Dabilpuram 
253455e51c96SNithin Dabilpuram 			/* sibling ports are forced to be closed */
253555e51c96SNithin Dabilpuram 			if (ports[port_id].flow_list)
253655e51c96SNithin Dabilpuram 				port_flow_flush(port_id);
253755e51c96SNithin Dabilpuram 			ports[port_id].port_status = RTE_PORT_CLOSED;
253855e51c96SNithin Dabilpuram 			printf("Port %u is now closed\n", port_id);
253955e51c96SNithin Dabilpuram 		}
254055e51c96SNithin Dabilpuram 	}
254155e51c96SNithin Dabilpuram 
254255e51c96SNithin Dabilpuram 	if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
254355e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
254455e51c96SNithin Dabilpuram 			    da.name, da.bus->name);
254555e51c96SNithin Dabilpuram 		return;
254655e51c96SNithin Dabilpuram 	}
254755e51c96SNithin Dabilpuram 
254855e51c96SNithin Dabilpuram 	remove_invalid_ports();
254955e51c96SNithin Dabilpuram 
255055e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
255155e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
255255e51c96SNithin Dabilpuram 	printf("Done\n");
255355e51c96SNithin Dabilpuram }
255455e51c96SNithin Dabilpuram 
255555e51c96SNithin Dabilpuram void
2556af75078fSIntel pmd_test_exit(void)
2557af75078fSIntel {
2558af75078fSIntel 	portid_t pt_id;
2559fb73e096SJeff Guo 	int ret;
2560401b744dSShahaf Shuler 	int i;
2561af75078fSIntel 
25628210ec25SPablo de Lara 	if (test_done == 0)
25638210ec25SPablo de Lara 		stop_packet_forwarding();
25648210ec25SPablo de Lara 
25653a0968c8SShahaf Shuler 	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
25663a0968c8SShahaf Shuler 		if (mempools[i]) {
25673a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
25683a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
25693a0968c8SShahaf Shuler 						     NULL);
25703a0968c8SShahaf Shuler 		}
25713a0968c8SShahaf Shuler 	}
2572d3a274ceSZhihong Wang 	if (ports != NULL) {
2573d3a274ceSZhihong Wang 		no_link_check = 1;
25747d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
257508fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
2576af75078fSIntel 			fflush(stdout);
2577d3a274ceSZhihong Wang 			stop_port(pt_id);
257808fd782bSCristian Dumitrescu 		}
257908fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
258008fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
258108fd782bSCristian Dumitrescu 			fflush(stdout);
2582d3a274ceSZhihong Wang 			close_port(pt_id);
2583af75078fSIntel 		}
2584d3a274ceSZhihong Wang 	}
2585fb73e096SJeff Guo 
2586fb73e096SJeff Guo 	if (hot_plug) {
2587fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
25882049c511SJeff Guo 		if (ret) {
2589fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2590fb73e096SJeff Guo 				"fail to stop device event monitor.");
25912049c511SJeff Guo 			return;
25922049c511SJeff Guo 		}
2593fb73e096SJeff Guo 
25942049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
2595cc1bf307SJeff Guo 			dev_event_callback, NULL);
25962049c511SJeff Guo 		if (ret < 0) {
2597fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
25982049c511SJeff Guo 				"fail to unregister device event callback.\n");
25992049c511SJeff Guo 			return;
26002049c511SJeff Guo 		}
26012049c511SJeff Guo 
26022049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
26032049c511SJeff Guo 		if (ret) {
26042049c511SJeff Guo 			RTE_LOG(ERR, EAL,
26052049c511SJeff Guo 				"fail to disable hotplug handling.\n");
26062049c511SJeff Guo 			return;
26072049c511SJeff Guo 		}
2608fb73e096SJeff Guo 	}
2609401b744dSShahaf Shuler 	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2610401b744dSShahaf Shuler 		if (mempools[i])
2611401b744dSShahaf Shuler 			rte_mempool_free(mempools[i]);
2612401b744dSShahaf Shuler 	}
2613fb73e096SJeff Guo 
2614d3a274ceSZhihong Wang 	printf("\nBye...\n");
2615af75078fSIntel }
2616af75078fSIntel 
2617af75078fSIntel typedef void (*cmd_func_t)(void);
2618af75078fSIntel struct pmd_test_command {
2619af75078fSIntel 	const char *cmd_name;
2620af75078fSIntel 	cmd_func_t cmd_func;
2621af75078fSIntel };
2622af75078fSIntel 
2623af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2624af75078fSIntel 
2625ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
2626af75078fSIntel static void
2627edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
2628af75078fSIntel {
2629ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
2630ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2631f8244c63SZhiyong Yang 	portid_t portid;
2632f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
2633ce8d5614SIntel 	struct rte_eth_link link;
2634e661a08bSIgor Romanov 	int ret;
2635ce8d5614SIntel 
2636ce8d5614SIntel 	printf("Checking link statuses...\n");
2637ce8d5614SIntel 	fflush(stdout);
2638ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2639ce8d5614SIntel 		all_ports_up = 1;
26407d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
2641ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
2642ce8d5614SIntel 				continue;
2643ce8d5614SIntel 			memset(&link, 0, sizeof(link));
2644e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
2645e661a08bSIgor Romanov 			if (ret < 0) {
2646e661a08bSIgor Romanov 				all_ports_up = 0;
2647e661a08bSIgor Romanov 				if (print_flag == 1)
2648e661a08bSIgor Romanov 					printf("Port %u link get failed: %s\n",
2649e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
2650e661a08bSIgor Romanov 				continue;
2651e661a08bSIgor Romanov 			}
2652ce8d5614SIntel 			/* print link status if flag set */
2653ce8d5614SIntel 			if (print_flag == 1) {
2654ce8d5614SIntel 				if (link.link_status)
2655f8244c63SZhiyong Yang 					printf(
2656f8244c63SZhiyong Yang 					"Port%d Link Up. speed %u Mbps- %s\n",
2657f8244c63SZhiyong Yang 					portid, link.link_speed,
2658ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2659ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
2660ce8d5614SIntel 				else
2661f8244c63SZhiyong Yang 					printf("Port %d Link Down\n", portid);
2662ce8d5614SIntel 				continue;
2663ce8d5614SIntel 			}
2664ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
266509419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
2666ce8d5614SIntel 				all_ports_up = 0;
2667ce8d5614SIntel 				break;
2668ce8d5614SIntel 			}
2669ce8d5614SIntel 		}
2670ce8d5614SIntel 		/* after finally printing all link status, get out */
2671ce8d5614SIntel 		if (print_flag == 1)
2672ce8d5614SIntel 			break;
2673ce8d5614SIntel 
2674ce8d5614SIntel 		if (all_ports_up == 0) {
2675ce8d5614SIntel 			fflush(stdout);
2676ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
2677ce8d5614SIntel 		}
2678ce8d5614SIntel 
2679ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
2680ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2681ce8d5614SIntel 			print_flag = 1;
2682ce8d5614SIntel 		}
26838ea656f8SGaetan Rivet 
26848ea656f8SGaetan Rivet 		if (lsc_interrupt)
26858ea656f8SGaetan Rivet 			break;
2686ce8d5614SIntel 	}
2687af75078fSIntel }
2688af75078fSIntel 
2689cc1bf307SJeff Guo /*
2690cc1bf307SJeff Guo  * This callback is for remove a port for a device. It has limitation because
2691cc1bf307SJeff Guo  * it is not for multiple port removal for a device.
2692cc1bf307SJeff Guo  * TODO: the device detach invoke will plan to be removed from user side to
2693cc1bf307SJeff Guo  * eal. And convert all PMDs to free port resources on ether device closing.
2694cc1bf307SJeff Guo  */
2695284c908cSGaetan Rivet static void
2696cc1bf307SJeff Guo rmv_port_callback(void *arg)
2697284c908cSGaetan Rivet {
26983b97888aSMatan Azrad 	int need_to_start = 0;
26990da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
270028caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
2701284c908cSGaetan Rivet 
2702284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2703284c908cSGaetan Rivet 
27043b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
27053b97888aSMatan Azrad 		need_to_start = 1;
27063b97888aSMatan Azrad 		stop_packet_forwarding();
27073b97888aSMatan Azrad 	}
27080da2a62bSMatan Azrad 	no_link_check = 1;
2709284c908cSGaetan Rivet 	stop_port(port_id);
27100da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
2711284c908cSGaetan Rivet 	close_port(port_id);
2712f8e5baa2SThomas Monjalon 	detach_port_device(port_id);
27133b97888aSMatan Azrad 	if (need_to_start)
27143b97888aSMatan Azrad 		start_packet_forwarding(0);
2715284c908cSGaetan Rivet }
2716284c908cSGaetan Rivet 
271776ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
2718d6af1a13SBernard Iremonger static int
2719f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2720d6af1a13SBernard Iremonger 		  void *ret_param)
272176ad4a2dSGaetan Rivet {
272276ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
2723d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
272476ad4a2dSGaetan Rivet 
272576ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
2726f431e010SHerakliusz Lipiec 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
272776ad4a2dSGaetan Rivet 			port_id, __func__, type);
272876ad4a2dSGaetan Rivet 		fflush(stderr);
27293af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2730f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
273197b5d8b5SThomas Monjalon 			eth_event_desc[type]);
273276ad4a2dSGaetan Rivet 		fflush(stdout);
273376ad4a2dSGaetan Rivet 	}
2734284c908cSGaetan Rivet 
2735284c908cSGaetan Rivet 	switch (type) {
27364f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
27374f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
27384f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
27394f1ed78eSThomas Monjalon 		break;
2740284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
27414f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
27424f1ed78eSThomas Monjalon 			break;
2743284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
2744cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
2745284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
2746284c908cSGaetan Rivet 		break;
2747284c908cSGaetan Rivet 	default:
2748284c908cSGaetan Rivet 		break;
2749284c908cSGaetan Rivet 	}
2750d6af1a13SBernard Iremonger 	return 0;
275176ad4a2dSGaetan Rivet }
275276ad4a2dSGaetan Rivet 
275397b5d8b5SThomas Monjalon static int
275497b5d8b5SThomas Monjalon register_eth_event_callback(void)
275597b5d8b5SThomas Monjalon {
275697b5d8b5SThomas Monjalon 	int ret;
275797b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
275897b5d8b5SThomas Monjalon 
275997b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
276097b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
276197b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
276297b5d8b5SThomas Monjalon 				event,
276397b5d8b5SThomas Monjalon 				eth_event_callback,
276497b5d8b5SThomas Monjalon 				NULL);
276597b5d8b5SThomas Monjalon 		if (ret != 0) {
276697b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
276797b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
276897b5d8b5SThomas Monjalon 			return -1;
276997b5d8b5SThomas Monjalon 		}
277097b5d8b5SThomas Monjalon 	}
277197b5d8b5SThomas Monjalon 
277297b5d8b5SThomas Monjalon 	return 0;
277397b5d8b5SThomas Monjalon }
277497b5d8b5SThomas Monjalon 
2775fb73e096SJeff Guo /* This function is used by the interrupt thread */
2776fb73e096SJeff Guo static void
2777cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2778fb73e096SJeff Guo 			     __rte_unused void *arg)
2779fb73e096SJeff Guo {
27802049c511SJeff Guo 	uint16_t port_id;
27812049c511SJeff Guo 	int ret;
27822049c511SJeff Guo 
2783fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
2784fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
2785fb73e096SJeff Guo 			__func__, type);
2786fb73e096SJeff Guo 		fflush(stderr);
2787fb73e096SJeff Guo 	}
2788fb73e096SJeff Guo 
2789fb73e096SJeff Guo 	switch (type) {
2790fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
2791cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
2792fb73e096SJeff Guo 			device_name);
27932049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
27942049c511SJeff Guo 		if (ret) {
27952049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
27962049c511SJeff Guo 				device_name);
27972049c511SJeff Guo 			return;
27982049c511SJeff Guo 		}
2799cc1bf307SJeff Guo 		/*
2800cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
2801cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
2802cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
2803cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
2804cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
2805cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
2806cc1bf307SJeff Guo 		 * be deleted.
2807cc1bf307SJeff Guo 		 */
2808cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
2809cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
2810cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
2811cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
2812fb73e096SJeff Guo 		break;
2813fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
2814fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2815fb73e096SJeff Guo 			device_name);
2816fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
2817fb73e096SJeff Guo 		 * begin to attach port.
2818fb73e096SJeff Guo 		 */
2819fb73e096SJeff Guo 		break;
2820fb73e096SJeff Guo 	default:
2821fb73e096SJeff Guo 		break;
2822fb73e096SJeff Guo 	}
2823fb73e096SJeff Guo }
2824fb73e096SJeff Guo 
2825013af9b6SIntel static int
282628caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2827af75078fSIntel {
2828013af9b6SIntel 	uint16_t i;
2829af75078fSIntel 	int diag;
2830013af9b6SIntel 	uint8_t mapping_found = 0;
2831af75078fSIntel 
2832013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2833013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2834013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2835013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2836013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
2837013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
2838013af9b6SIntel 			if (diag != 0)
2839013af9b6SIntel 				return diag;
2840013af9b6SIntel 			mapping_found = 1;
2841af75078fSIntel 		}
2842013af9b6SIntel 	}
2843013af9b6SIntel 	if (mapping_found)
2844013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
2845013af9b6SIntel 	return 0;
2846013af9b6SIntel }
2847013af9b6SIntel 
2848013af9b6SIntel static int
284928caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2850013af9b6SIntel {
2851013af9b6SIntel 	uint16_t i;
2852013af9b6SIntel 	int diag;
2853013af9b6SIntel 	uint8_t mapping_found = 0;
2854013af9b6SIntel 
2855013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2856013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2857013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2858013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2859013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
2860013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
2861013af9b6SIntel 			if (diag != 0)
2862013af9b6SIntel 				return diag;
2863013af9b6SIntel 			mapping_found = 1;
2864013af9b6SIntel 		}
2865013af9b6SIntel 	}
2866013af9b6SIntel 	if (mapping_found)
2867013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
2868013af9b6SIntel 	return 0;
2869013af9b6SIntel }
2870013af9b6SIntel 
2871013af9b6SIntel static void
287228caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2873013af9b6SIntel {
2874013af9b6SIntel 	int diag = 0;
2875013af9b6SIntel 
2876013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2877af75078fSIntel 	if (diag != 0) {
2878013af9b6SIntel 		if (diag == -ENOTSUP) {
2879013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
2880013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2881013af9b6SIntel 		}
2882013af9b6SIntel 		else
2883013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2884013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
2885013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2886af75078fSIntel 					pi, diag);
2887af75078fSIntel 	}
2888013af9b6SIntel 
2889013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2890af75078fSIntel 	if (diag != 0) {
2891013af9b6SIntel 		if (diag == -ENOTSUP) {
2892013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
2893013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2894013af9b6SIntel 		}
2895013af9b6SIntel 		else
2896013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2897013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
2898013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2899af75078fSIntel 					pi, diag);
2900af75078fSIntel 	}
2901af75078fSIntel }
2902af75078fSIntel 
2903f2c5125aSPablo de Lara static void
2904f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
2905f2c5125aSPablo de Lara {
2906d44f8a48SQi Zhang 	uint16_t qid;
29075e91aeefSWei Zhao 	uint64_t offloads;
2908f2c5125aSPablo de Lara 
2909d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
29105e91aeefSWei Zhao 		offloads = port->rx_conf[qid].offloads;
2911d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2912575e0fd1SWei Zhao 		if (offloads != 0)
2913575e0fd1SWei Zhao 			port->rx_conf[qid].offloads = offloads;
2914d44f8a48SQi Zhang 
2915d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
2916f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2917d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2918f2c5125aSPablo de Lara 
2919f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2920d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2921f2c5125aSPablo de Lara 
2922f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2923d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2924f2c5125aSPablo de Lara 
2925f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2926d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2927f2c5125aSPablo de Lara 
2928f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2929d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2930f2c5125aSPablo de Lara 
2931d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
2932d44f8a48SQi Zhang 	}
2933d44f8a48SQi Zhang 
2934d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
29355e91aeefSWei Zhao 		offloads = port->tx_conf[qid].offloads;
2936d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
2937575e0fd1SWei Zhao 		if (offloads != 0)
2938575e0fd1SWei Zhao 			port->tx_conf[qid].offloads = offloads;
2939d44f8a48SQi Zhang 
2940d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
2941f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2942d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2943f2c5125aSPablo de Lara 
2944f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2945d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2946f2c5125aSPablo de Lara 
2947f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2948d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2949f2c5125aSPablo de Lara 
2950f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2951d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2952f2c5125aSPablo de Lara 
2953f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2954d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2955d44f8a48SQi Zhang 
2956d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
2957d44f8a48SQi Zhang 	}
2958f2c5125aSPablo de Lara }
2959f2c5125aSPablo de Lara 
2960013af9b6SIntel void
2961013af9b6SIntel init_port_config(void)
2962013af9b6SIntel {
2963013af9b6SIntel 	portid_t pid;
2964013af9b6SIntel 	struct rte_port *port;
29656f51deb9SIvan Ilchenko 	int ret;
2966013af9b6SIntel 
29677d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
2968013af9b6SIntel 		port = &ports[pid];
2969013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
29706f51deb9SIvan Ilchenko 
29716f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
29726f51deb9SIvan Ilchenko 		if (ret != 0)
29736f51deb9SIvan Ilchenko 			return;
29746f51deb9SIvan Ilchenko 
29753ce690d3SBruce Richardson 		if (nb_rxq > 1) {
2976013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
297790892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2978422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
2979af75078fSIntel 		} else {
2980013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2981013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2982af75078fSIntel 		}
29833ce690d3SBruce Richardson 
29845f592039SJingjing Wu 		if (port->dcb_flag == 0) {
29853ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
29863ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
29873ce690d3SBruce Richardson 			else
29883ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
29893ce690d3SBruce Richardson 		}
29903ce690d3SBruce Richardson 
2991f2c5125aSPablo de Lara 		rxtx_port_config(port);
2992013af9b6SIntel 
2993a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
2994a5279d25SIgor Romanov 		if (ret != 0)
2995a5279d25SIgor Romanov 			return;
2996013af9b6SIntel 
2997013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
299850c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2999e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
30007b7e5ba7SIntel #endif
30018ea656f8SGaetan Rivet 
30028ea656f8SGaetan Rivet 		if (lsc_interrupt &&
30038ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
30048ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
30058ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
3006284c908cSGaetan Rivet 		if (rmv_interrupt &&
3007284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
3008284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
3009284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
3010013af9b6SIntel 	}
3011013af9b6SIntel }
3012013af9b6SIntel 
301341b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
301441b05095SBernard Iremonger {
301541b05095SBernard Iremonger 	struct rte_port *port;
301641b05095SBernard Iremonger 
301741b05095SBernard Iremonger 	port = &ports[slave_pid];
301841b05095SBernard Iremonger 	port->slave_flag = 1;
301941b05095SBernard Iremonger }
302041b05095SBernard Iremonger 
302141b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
302241b05095SBernard Iremonger {
302341b05095SBernard Iremonger 	struct rte_port *port;
302441b05095SBernard Iremonger 
302541b05095SBernard Iremonger 	port = &ports[slave_pid];
302641b05095SBernard Iremonger 	port->slave_flag = 0;
302741b05095SBernard Iremonger }
302841b05095SBernard Iremonger 
30290e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
30300e545d30SBernard Iremonger {
30310e545d30SBernard Iremonger 	struct rte_port *port;
30320e545d30SBernard Iremonger 
30330e545d30SBernard Iremonger 	port = &ports[slave_pid];
3034b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
3035b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3036b8b8b344SMatan Azrad 		return 1;
3037b8b8b344SMatan Azrad 	return 0;
30380e545d30SBernard Iremonger }
30390e545d30SBernard Iremonger 
3040013af9b6SIntel const uint16_t vlan_tags[] = {
3041013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
3042013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
3043013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
3044013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
3045013af9b6SIntel };
3046013af9b6SIntel 
3047013af9b6SIntel static  int
3048ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
30491a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
30501a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
30511a572499SJingjing Wu 		 uint8_t pfc_en)
3052013af9b6SIntel {
3053013af9b6SIntel 	uint8_t i;
3054ac7c491cSKonstantin Ananyev 	int32_t rc;
3055ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
3056af75078fSIntel 
3057af75078fSIntel 	/*
3058013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3059013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
3060af75078fSIntel 	 */
30611a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
30621a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
30631a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
30641a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
30651a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3066013af9b6SIntel 
3067547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
30681a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
30691a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
30701a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
30711a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
30721a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
30731a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3074013af9b6SIntel 
30751a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
30761a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
30771a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
30781a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
30791a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
3080af75078fSIntel 		}
3081013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3082f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3083f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3084013af9b6SIntel 		}
3085013af9b6SIntel 
3086013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
308732e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
308832e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
30891a572499SJingjing Wu 	} else {
30901a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
30911a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
30921a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
30931a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
3094013af9b6SIntel 
3095ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3096ac7c491cSKonstantin Ananyev 		if (rc != 0)
3097ac7c491cSKonstantin Ananyev 			return rc;
3098ac7c491cSKonstantin Ananyev 
30991a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
31001a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
31011a572499SJingjing Wu 
3102bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3103bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
3104bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
3105013af9b6SIntel 		}
3106ac7c491cSKonstantin Ananyev 
31071a572499SJingjing Wu 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
3108ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
310932e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
31101a572499SJingjing Wu 	}
31111a572499SJingjing Wu 
31121a572499SJingjing Wu 	if (pfc_en)
31131a572499SJingjing Wu 		eth_conf->dcb_capability_en =
31141a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3115013af9b6SIntel 	else
3116013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3117013af9b6SIntel 
3118013af9b6SIntel 	return 0;
3119013af9b6SIntel }
3120013af9b6SIntel 
3121013af9b6SIntel int
31221a572499SJingjing Wu init_port_dcb_config(portid_t pid,
31231a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
31241a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
31251a572499SJingjing Wu 		     uint8_t pfc_en)
3126013af9b6SIntel {
3127013af9b6SIntel 	struct rte_eth_conf port_conf;
3128013af9b6SIntel 	struct rte_port *rte_port;
3129013af9b6SIntel 	int retval;
3130013af9b6SIntel 	uint16_t i;
3131013af9b6SIntel 
31322a977b89SWenzhuo Lu 	rte_port = &ports[pid];
3133013af9b6SIntel 
3134013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3135013af9b6SIntel 	/* Enter DCB configuration status */
3136013af9b6SIntel 	dcb_config = 1;
3137013af9b6SIntel 
3138d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
3139d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
3140d5354e89SYanglong Wu 
3141013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
3142ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3143013af9b6SIntel 	if (retval < 0)
3144013af9b6SIntel 		return retval;
31450074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3146013af9b6SIntel 
31472f203d44SQi Zhang 	/* re-configure the device . */
31482b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
31492b0e0ebaSChenbo Xia 	if (retval < 0)
31502b0e0ebaSChenbo Xia 		return retval;
31516f51deb9SIvan Ilchenko 
31526f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
31536f51deb9SIvan Ilchenko 	if (retval != 0)
31546f51deb9SIvan Ilchenko 		return retval;
31552a977b89SWenzhuo Lu 
31562a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
31572a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
31582a977b89SWenzhuo Lu 	 */
31592a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
31602a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
31612a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
31622a977b89SWenzhuo Lu 			" for port %d.", pid);
31632a977b89SWenzhuo Lu 		return -1;
31642a977b89SWenzhuo Lu 	}
31652a977b89SWenzhuo Lu 
31662a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
31672a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
31682a977b89SWenzhuo Lu 	 */
31692a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
317086ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
317186ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
317286ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
317386ef65eeSBernard Iremonger 		} else {
31742a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
31752a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
317686ef65eeSBernard Iremonger 		}
31772a977b89SWenzhuo Lu 	} else {
31782a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
31792a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
31802a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
31812a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
31822a977b89SWenzhuo Lu 		} else {
31832a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
31842a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
31852a977b89SWenzhuo Lu 
31862a977b89SWenzhuo Lu 		}
31872a977b89SWenzhuo Lu 	}
31882a977b89SWenzhuo Lu 	rx_free_thresh = 64;
31892a977b89SWenzhuo Lu 
3190013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3191013af9b6SIntel 
3192f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
3193013af9b6SIntel 	/* VLAN filter */
31940074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
31951a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
3196013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
3197013af9b6SIntel 
3198a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3199a5279d25SIgor Romanov 	if (retval != 0)
3200a5279d25SIgor Romanov 		return retval;
3201a5279d25SIgor Romanov 
3202013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
3203013af9b6SIntel 
32047741e4cfSIntel 	rte_port->dcb_flag = 1;
32057741e4cfSIntel 
3206013af9b6SIntel 	return 0;
3207af75078fSIntel }
3208af75078fSIntel 
3209ffc468ffSTetsuya Mukawa static void
3210ffc468ffSTetsuya Mukawa init_port(void)
3211ffc468ffSTetsuya Mukawa {
3212ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
3213ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
3214ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3215ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
3216ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
3217ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
3218ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
3219ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
3220ffc468ffSTetsuya Mukawa 	}
322129841336SPhil Yang 
322229841336SPhil Yang 	/* Initialize ports NUMA structures */
322329841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
322429841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
322529841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3226ffc468ffSTetsuya Mukawa }
3227ffc468ffSTetsuya Mukawa 
3228d3a274ceSZhihong Wang static void
3229d3a274ceSZhihong Wang force_quit(void)
3230d3a274ceSZhihong Wang {
3231d3a274ceSZhihong Wang 	pmd_test_exit();
3232d3a274ceSZhihong Wang 	prompt_exit();
3233d3a274ceSZhihong Wang }
3234d3a274ceSZhihong Wang 
3235d3a274ceSZhihong Wang static void
3236cfea1f30SPablo de Lara print_stats(void)
3237cfea1f30SPablo de Lara {
3238cfea1f30SPablo de Lara 	uint8_t i;
3239cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3240cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3241cfea1f30SPablo de Lara 
3242cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
3243cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
3244cfea1f30SPablo de Lara 
3245cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
3246cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3247cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
3248683d1e82SIgor Romanov 
3249683d1e82SIgor Romanov 	fflush(stdout);
3250cfea1f30SPablo de Lara }
3251cfea1f30SPablo de Lara 
3252cfea1f30SPablo de Lara static void
3253d3a274ceSZhihong Wang signal_handler(int signum)
3254d3a274ceSZhihong Wang {
3255d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
3256d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
3257d3a274ceSZhihong Wang 				signum);
3258102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
3259102b7329SReshma Pattan 		/* uninitialize packet capture framework */
3260102b7329SReshma Pattan 		rte_pdump_uninit();
3261102b7329SReshma Pattan #endif
326262d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
32638b36297dSAmit Gupta 		if (latencystats_enabled != 0)
326462d3216dSReshma Pattan 			rte_latencystats_uninit();
326562d3216dSReshma Pattan #endif
3266d3a274ceSZhihong Wang 		force_quit();
3267d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
3268d9a191a0SPhil Yang 		f_quit = 1;
3269d3a274ceSZhihong Wang 		/* exit with the expected status */
3270d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
3271d3a274ceSZhihong Wang 		kill(getpid(), signum);
3272d3a274ceSZhihong Wang 	}
3273d3a274ceSZhihong Wang }
3274d3a274ceSZhihong Wang 
3275af75078fSIntel int
3276af75078fSIntel main(int argc, char** argv)
3277af75078fSIntel {
3278af75078fSIntel 	int diag;
3279f8244c63SZhiyong Yang 	portid_t port_id;
32804918a357SXiaoyun Li 	uint16_t count;
3281fb73e096SJeff Guo 	int ret;
3282af75078fSIntel 
3283d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
3284d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
3285d3a274ceSZhihong Wang 
3286285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
3287285fd101SOlivier Matz 	if (testpmd_logtype < 0)
328816267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
3289285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3290285fd101SOlivier Matz 
32919201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
32929201806eSStephen Hemminger 	if (diag < 0)
329316267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
329416267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
32959201806eSStephen Hemminger 
3296a87ab9f7SStephen Hemminger 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
329716267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE,
329816267ceeSStephen Hemminger 			 "Secondary process type not supported.\n");
3299a87ab9f7SStephen Hemminger 
330097b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
330197b5d8b5SThomas Monjalon 	if (ret != 0)
330216267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
330397b5d8b5SThomas Monjalon 
33044aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
33054aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
3306e9436f54STiwei Bie 	rte_pdump_init();
33074aa0d012SAnatoly Burakov #endif
33084aa0d012SAnatoly Burakov 
33094918a357SXiaoyun Li 	count = 0;
33104918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
33114918a357SXiaoyun Li 		ports_ids[count] = port_id;
33124918a357SXiaoyun Li 		count++;
33134918a357SXiaoyun Li 	}
33144918a357SXiaoyun Li 	nb_ports = (portid_t) count;
33154aa0d012SAnatoly Burakov 	if (nb_ports == 0)
33164aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
33174aa0d012SAnatoly Burakov 
33184aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
33194aa0d012SAnatoly Burakov 	init_port();
33204aa0d012SAnatoly Burakov 
33214aa0d012SAnatoly Burakov 	set_def_fwd_config();
33224aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
332316267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
332416267ceeSStephen Hemminger 			 "Check the core mask argument\n");
33254aa0d012SAnatoly Burakov 
3326e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
3327e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE
3328e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3329e505d84cSAnatoly Burakov #endif
3330e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
3331e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3332e505d84cSAnatoly Burakov #endif
3333e505d84cSAnatoly Burakov 
3334fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
33355fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
3336fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3337fb7b8b32SAnatoly Burakov #else
3338fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3339fb7b8b32SAnatoly Burakov #endif
3340fb7b8b32SAnatoly Burakov 
3341e505d84cSAnatoly Burakov 	argc -= diag;
3342e505d84cSAnatoly Burakov 	argv += diag;
3343e505d84cSAnatoly Burakov 	if (argc > 1)
3344e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3345e505d84cSAnatoly Burakov 
3346e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3347285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
33481c036b16SEelco Chaudron 			strerror(errno));
33491c036b16SEelco Chaudron 	}
33501c036b16SEelco Chaudron 
335199cabef0SPablo de Lara 	if (tx_first && interactive)
335299cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
335399cabef0SPablo de Lara 				"interactive mode.\n");
33548820cba4SDavid Hunt 
33558820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
33568820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
33578820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
33588820cba4SDavid Hunt 		lsc_interrupt = 0;
33598820cba4SDavid Hunt 	}
33608820cba4SDavid Hunt 
33615a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
33625a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
33635a8fb55cSReshma Pattan 
33645a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3365af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3366af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3367af75078fSIntel 		       nb_rxq, nb_txq);
3368af75078fSIntel 
3369af75078fSIntel 	init_config();
3370fb73e096SJeff Guo 
3371fb73e096SJeff Guo 	if (hot_plug) {
33722049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
3373fb73e096SJeff Guo 		if (ret) {
33742049c511SJeff Guo 			RTE_LOG(ERR, EAL,
33752049c511SJeff Guo 				"fail to enable hotplug handling.");
3376fb73e096SJeff Guo 			return -1;
3377fb73e096SJeff Guo 		}
3378fb73e096SJeff Guo 
33792049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
33802049c511SJeff Guo 		if (ret) {
33812049c511SJeff Guo 			RTE_LOG(ERR, EAL,
33822049c511SJeff Guo 				"fail to start device event monitoring.");
33832049c511SJeff Guo 			return -1;
33842049c511SJeff Guo 		}
33852049c511SJeff Guo 
33862049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
3387cc1bf307SJeff Guo 			dev_event_callback, NULL);
33882049c511SJeff Guo 		if (ret) {
33892049c511SJeff Guo 			RTE_LOG(ERR, EAL,
33902049c511SJeff Guo 				"fail  to register device event callback\n");
33912049c511SJeff Guo 			return -1;
33922049c511SJeff Guo 		}
3393fb73e096SJeff Guo 	}
3394fb73e096SJeff Guo 
33956937d210SStephen Hemminger 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3396148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3397af75078fSIntel 
3398ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
339934fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
340034fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
340134fc1051SIvan Ilchenko 		if (ret != 0)
340234fc1051SIvan Ilchenko 			printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
340334fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
340434fc1051SIvan Ilchenko 	}
3405af75078fSIntel 
34067e4441c8SRemy Horton 	/* Init metrics library */
34077e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
34087e4441c8SRemy Horton 
340962d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
341062d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
341162d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
341262d3216dSReshma Pattan 		if (ret)
341362d3216dSReshma Pattan 			printf("Warning: latencystats init()"
341462d3216dSReshma Pattan 				" returned error %d\n",	ret);
341562d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
341662d3216dSReshma Pattan 			latencystats_lcore_id);
341762d3216dSReshma Pattan 	}
341862d3216dSReshma Pattan #endif
341962d3216dSReshma Pattan 
34207e4441c8SRemy Horton 	/* Setup bitrate stats */
34217e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
3422e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
34237e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
34247e4441c8SRemy Horton 		if (bitrate_data == NULL)
3425e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3426e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
34277e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3428e25e6c70SRemy Horton 	}
34297e4441c8SRemy Horton #endif
34307e4441c8SRemy Horton 
34310d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
343281ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
343381ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
343481ef862bSAllain Legacy 
3435ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3436ca7feb22SCyril Chemparathy 		if (auto_start) {
3437ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3438ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3439ca7feb22SCyril Chemparathy 		}
3440af75078fSIntel 		prompt();
34410de738cfSJiayu Hu 		pmd_test_exit();
3442ca7feb22SCyril Chemparathy 	} else
34430d56cb81SThomas Monjalon #endif
34440d56cb81SThomas Monjalon 	{
3445af75078fSIntel 		char c;
3446af75078fSIntel 		int rc;
3447af75078fSIntel 
3448d9a191a0SPhil Yang 		f_quit = 0;
3449d9a191a0SPhil Yang 
3450af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
345199cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3452cfea1f30SPablo de Lara 		if (stats_period != 0) {
3453cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3454cfea1f30SPablo de Lara 			uint64_t timer_period;
3455cfea1f30SPablo de Lara 
3456cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3457cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3458cfea1f30SPablo de Lara 
3459d9a191a0SPhil Yang 			while (f_quit == 0) {
3460cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
3461cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
3462cfea1f30SPablo de Lara 
3463cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
3464cfea1f30SPablo de Lara 					print_stats();
3465cfea1f30SPablo de Lara 					/* Reset the timer */
3466cfea1f30SPablo de Lara 					diff_time = 0;
3467cfea1f30SPablo de Lara 				}
3468cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
3469cfea1f30SPablo de Lara 				prev_time = cur_time;
3470cfea1f30SPablo de Lara 				sleep(1);
3471cfea1f30SPablo de Lara 			}
3472cfea1f30SPablo de Lara 		}
3473cfea1f30SPablo de Lara 
3474af75078fSIntel 		printf("Press enter to exit\n");
3475af75078fSIntel 		rc = read(0, &c, 1);
3476d3a274ceSZhihong Wang 		pmd_test_exit();
3477af75078fSIntel 		if (rc < 0)
3478af75078fSIntel 			return 1;
3479af75078fSIntel 	}
3480af75078fSIntel 
3481af75078fSIntel 	return 0;
3482af75078fSIntel }
3483