xref: /dpdk/app/test-pmd/testpmd.c (revision 55e51c96243228317d25fd206d98fe2fa2b7da08)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30c7f5dba7SAnatoly Burakov #include <rte_malloc_heap.h>
31af75078fSIntel #include <rte_memory.h>
32af75078fSIntel #include <rte_memcpy.h>
33af75078fSIntel #include <rte_launch.h>
34af75078fSIntel #include <rte_eal.h>
35284c908cSGaetan Rivet #include <rte_alarm.h>
36af75078fSIntel #include <rte_per_lcore.h>
37af75078fSIntel #include <rte_lcore.h>
38af75078fSIntel #include <rte_atomic.h>
39af75078fSIntel #include <rte_branch_prediction.h>
40af75078fSIntel #include <rte_mempool.h>
41af75078fSIntel #include <rte_malloc.h>
42af75078fSIntel #include <rte_mbuf.h>
430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
44af75078fSIntel #include <rte_interrupts.h>
45af75078fSIntel #include <rte_pci.h>
46af75078fSIntel #include <rte_ether.h>
47af75078fSIntel #include <rte_ethdev.h>
48edab33b1STetsuya Mukawa #include <rte_dev.h>
49af75078fSIntel #include <rte_string_fns.h>
50e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
52e261265eSRadu Nicolau #endif
53102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
54102b7329SReshma Pattan #include <rte_pdump.h>
55102b7329SReshma Pattan #endif
56938a184aSAdrien Mazarguil #include <rte_flow.h>
577e4441c8SRemy Horton #include <rte_metrics.h>
587e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
597e4441c8SRemy Horton #include <rte_bitrate.h>
607e4441c8SRemy Horton #endif
6162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6262d3216dSReshma Pattan #include <rte_latencystats.h>
6362d3216dSReshma Pattan #endif
64af75078fSIntel 
65af75078fSIntel #include "testpmd.h"
66af75078fSIntel 
67c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
68c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
70c7f5dba7SAnatoly Burakov #else
71c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
72c7f5dba7SAnatoly Burakov #endif
73c7f5dba7SAnatoly Burakov 
74c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
75c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
76c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
77c7f5dba7SAnatoly Burakov #else
78c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
79c7f5dba7SAnatoly Burakov #endif
80c7f5dba7SAnatoly Burakov 
81c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
82c7f5dba7SAnatoly Burakov 
83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
85af75078fSIntel 
86af75078fSIntel /* use master core for command line ? */
87af75078fSIntel uint8_t interactive = 0;
88ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8999cabef0SPablo de Lara uint8_t tx_first;
9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
91af75078fSIntel 
92af75078fSIntel /*
93af75078fSIntel  * NUMA support configuration.
94af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
95af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
97af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
98af75078fSIntel  */
99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
100af75078fSIntel 
101af75078fSIntel /*
102b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103b6ea6408SIntel  * not configured.
104b6ea6408SIntel  */
105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
106b6ea6408SIntel 
107b6ea6408SIntel /*
108c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
109c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
110c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
111c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
112c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
113148f963fSBruce Richardson  */
114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115148f963fSBruce Richardson 
116148f963fSBruce Richardson /*
11763531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11863531389SGeorgios Katsikas  * is allocated.
11963531389SGeorgios Katsikas  */
12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12163531389SGeorgios Katsikas 
12263531389SGeorgios Katsikas /*
12363531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12463531389SGeorgios Katsikas  * is allocated.
12563531389SGeorgios Katsikas  */
12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12763531389SGeorgios Katsikas 
12863531389SGeorgios Katsikas /*
12963531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13063531389SGeorgios Katsikas  * is allocated.
13163531389SGeorgios Katsikas  */
13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13363531389SGeorgios Katsikas 
13463531389SGeorgios Katsikas /*
135af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
136af75078fSIntel  * forwarded.
137547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
138af75078fSIntel  * ports.
139af75078fSIntel  */
1406d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141af75078fSIntel portid_t nb_peer_eth_addrs = 0;
142af75078fSIntel 
143af75078fSIntel /*
144af75078fSIntel  * Probed Target Environment.
145af75078fSIntel  */
146af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
147af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150af75078fSIntel 
1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1524918a357SXiaoyun Li 
153af75078fSIntel /*
154af75078fSIntel  * Test Forwarding Configuration.
155af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157af75078fSIntel  */
158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162af75078fSIntel 
163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165af75078fSIntel 
166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168af75078fSIntel 
169af75078fSIntel /*
170af75078fSIntel  * Forwarding engines.
171af75078fSIntel  */
172af75078fSIntel struct fwd_engine * fwd_engines[] = {
173af75078fSIntel 	&io_fwd_engine,
174af75078fSIntel 	&mac_fwd_engine,
175d47388f1SCyril Chemparathy 	&mac_swap_engine,
176e9e23a61SCyril Chemparathy 	&flow_gen_engine,
177af75078fSIntel 	&rx_only_engine,
178af75078fSIntel 	&tx_only_engine,
179af75078fSIntel 	&csum_fwd_engine,
180168dfa61SIvan Boule 	&icmp_echo_engine,
1813c156061SJens Freimann 	&noisy_vnf_engine,
1820ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
1830ad778b3SJasvinder Singh 	&softnic_fwd_engine,
1845b590fbeSJasvinder Singh #endif
185af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
186af75078fSIntel 	&ieee1588_fwd_engine,
187af75078fSIntel #endif
188af75078fSIntel 	NULL,
189af75078fSIntel };
190af75078fSIntel 
191401b744dSShahaf Shuler struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
19259fcf854SShahaf Shuler uint16_t mempool_flags;
193401b744dSShahaf Shuler 
194af75078fSIntel struct fwd_config cur_fwd_config;
195af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
196bf56fce1SZhihong Wang uint32_t retry_enabled;
197bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
198bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
199af75078fSIntel 
200af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
201c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
202c8798818SIntel                                       * specified on command-line. */
203cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
204d9a191a0SPhil Yang 
205d9a191a0SPhil Yang /*
206d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
207d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
208d9a191a0SPhil Yang  */
209d9a191a0SPhil Yang uint8_t f_quit;
210d9a191a0SPhil Yang 
211af75078fSIntel /*
212af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
213af75078fSIntel  */
214af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
215af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
216af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
217af75078fSIntel };
218af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
219af75078fSIntel 
22079bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
22179bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
22279bec05bSKonstantin Ananyev 
22382010ef5SYongseok Koh uint8_t txonly_multi_flow;
22482010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
22582010ef5SYongseok Koh 
226af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
227e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
228af75078fSIntel 
229900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
230900550deSIntel uint8_t dcb_config = 0;
231900550deSIntel 
232900550deSIntel /* Whether the dcb is in testing status */
233900550deSIntel uint8_t dcb_test = 0;
234900550deSIntel 
235af75078fSIntel /*
236af75078fSIntel  * Configurable number of RX/TX queues.
237af75078fSIntel  */
238af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
239af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
240af75078fSIntel 
241af75078fSIntel /*
242af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2438599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
244af75078fSIntel  */
2458599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2468599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
247af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
248af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
249af75078fSIntel 
250f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
251af75078fSIntel /*
252af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
253af75078fSIntel  */
254af75078fSIntel 
255f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
256f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
257f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
258af75078fSIntel 
259f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
260f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
261f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
262af75078fSIntel 
263af75078fSIntel /*
264af75078fSIntel  * Configurable value of RX free threshold.
265af75078fSIntel  */
266f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
267af75078fSIntel 
268af75078fSIntel /*
269ce8d5614SIntel  * Configurable value of RX drop enable.
270ce8d5614SIntel  */
271f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
272ce8d5614SIntel 
273ce8d5614SIntel /*
274af75078fSIntel  * Configurable value of TX free threshold.
275af75078fSIntel  */
276f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
277af75078fSIntel 
278af75078fSIntel /*
279af75078fSIntel  * Configurable value of TX RS bit threshold.
280af75078fSIntel  */
281f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
282af75078fSIntel 
283af75078fSIntel /*
2843c156061SJens Freimann  * Configurable value of buffered packets before sending.
2853c156061SJens Freimann  */
2863c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
2873c156061SJens Freimann 
2883c156061SJens Freimann /*
2893c156061SJens Freimann  * Configurable value of packet buffer timeout.
2903c156061SJens Freimann  */
2913c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
2923c156061SJens Freimann 
2933c156061SJens Freimann /*
2943c156061SJens Freimann  * Configurable value for size of VNF internal memory area
2953c156061SJens Freimann  * used for simulating noisy neighbour behaviour
2963c156061SJens Freimann  */
2973c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
2983c156061SJens Freimann 
2993c156061SJens Freimann /*
3003c156061SJens Freimann  * Configurable value of number of random writes done in
3013c156061SJens Freimann  * VNF simulation memory area.
3023c156061SJens Freimann  */
3033c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3043c156061SJens Freimann 
3053c156061SJens Freimann /*
3063c156061SJens Freimann  * Configurable value of number of random reads done in
3073c156061SJens Freimann  * VNF simulation memory area.
3083c156061SJens Freimann  */
3093c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3103c156061SJens Freimann 
3113c156061SJens Freimann /*
3123c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3133c156061SJens Freimann  * VNF simulation memory area.
3143c156061SJens Freimann  */
3153c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3163c156061SJens Freimann 
3173c156061SJens Freimann /*
318af75078fSIntel  * Receive Side Scaling (RSS) configuration.
319af75078fSIntel  */
3208a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
321af75078fSIntel 
322af75078fSIntel /*
323af75078fSIntel  * Port topology configuration
324af75078fSIntel  */
325af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
326af75078fSIntel 
3277741e4cfSIntel /*
3287741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3297741e4cfSIntel  */
3307741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3317741e4cfSIntel 
332af75078fSIntel /*
3337ee3e944SVasily Philipov  * Flow API isolated mode.
3347ee3e944SVasily Philipov  */
3357ee3e944SVasily Philipov uint8_t flow_isolate_all;
3367ee3e944SVasily Philipov 
3377ee3e944SVasily Philipov /*
338bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
339bc202406SDavid Marchand  */
340bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
341bc202406SDavid Marchand 
342bc202406SDavid Marchand /*
3438ea656f8SGaetan Rivet  * Enable link status change notification
3448ea656f8SGaetan Rivet  */
3458ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3468ea656f8SGaetan Rivet 
3478ea656f8SGaetan Rivet /*
348284c908cSGaetan Rivet  * Enable device removal notification.
349284c908cSGaetan Rivet  */
350284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
351284c908cSGaetan Rivet 
352fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
353fb73e096SJeff Guo 
3544f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
3554f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
3564f1ed78eSThomas Monjalon 
35797b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
35897b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
35997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
36097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
36197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
36297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
36397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
36497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
36597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
36697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
36797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
36897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
36997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
37097b5d8b5SThomas Monjalon };
37197b5d8b5SThomas Monjalon 
372284c908cSGaetan Rivet /*
3733af72783SGaetan Rivet  * Display or mask ether events
3743af72783SGaetan Rivet  * Default to all events except VF_MBOX
3753af72783SGaetan Rivet  */
3763af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
3773af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
3783af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
3793af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
380badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
3813af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
3823af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
383e505d84cSAnatoly Burakov /*
384e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
385e505d84cSAnatoly Burakov  */
386e505d84cSAnatoly Burakov int do_mlockall = 0;
3873af72783SGaetan Rivet 
3883af72783SGaetan Rivet /*
3897b7e5ba7SIntel  * NIC bypass mode configuration options.
3907b7e5ba7SIntel  */
3917b7e5ba7SIntel 
39250c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3937b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
394e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
3957b7e5ba7SIntel #endif
3967b7e5ba7SIntel 
397e261265eSRadu Nicolau 
39862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
39962d3216dSReshma Pattan 
40062d3216dSReshma Pattan /*
40162d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
40262d3216dSReshma Pattan  */
40362d3216dSReshma Pattan uint8_t latencystats_enabled;
40462d3216dSReshma Pattan 
40562d3216dSReshma Pattan /*
40662d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
40762d3216dSReshma Pattan  */
40862d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
40962d3216dSReshma Pattan 
41062d3216dSReshma Pattan #endif
41162d3216dSReshma Pattan 
4127b7e5ba7SIntel /*
413af75078fSIntel  * Ethernet device configuration.
414af75078fSIntel  */
415af75078fSIntel struct rte_eth_rxmode rx_mode = {
41635b2d13fSOlivier Matz 	.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
41735b2d13fSOlivier Matz 		/**< Default maximum frame length. */
418af75078fSIntel };
419af75078fSIntel 
42007e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
42107e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
42207e5f7bdSShahaf Shuler };
423fd8c20aaSShahaf Shuler 
424af75078fSIntel struct rte_fdir_conf fdir_conf = {
425af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
426af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
427af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
428d9d5e6f2SJingjing Wu 	.mask = {
42926f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
430d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
431d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
432d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
433d9d5e6f2SJingjing Wu 		},
434d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
435d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
436d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
437d9d5e6f2SJingjing Wu 		},
438d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
439d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
44047b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
44147b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
44247b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
443d9d5e6f2SJingjing Wu 	},
444af75078fSIntel 	.drop_queue = 127,
445af75078fSIntel };
446af75078fSIntel 
4472950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
448af75078fSIntel 
449ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
450ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
451ed30d9b6SIntel 
452ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
453ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
454ed30d9b6SIntel 
455ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
456ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
457ed30d9b6SIntel 
458a4fd5eeeSElza Mathew /*
459a4fd5eeeSElza Mathew  * Display zero values by default for xstats
460a4fd5eeeSElza Mathew  */
461a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
462a4fd5eeeSElza Mathew 
463c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
464c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
4657acf894dSStephen Hurd 
466e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE
4677e4441c8SRemy Horton /* Bitrate statistics */
4687e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
469e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
470e25e6c70SRemy Horton uint8_t bitrate_enabled;
471e25e6c70SRemy Horton #endif
4727e4441c8SRemy Horton 
473b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
474b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
475b40f8d78SJiayu Hu 
4761960be7dSNelio Laranjeiro struct vxlan_encap_conf vxlan_encap_conf = {
4771960be7dSNelio Laranjeiro 	.select_ipv4 = 1,
4781960be7dSNelio Laranjeiro 	.select_vlan = 0,
47962e8a5a8SViacheslav Ovsiienko 	.select_tos_ttl = 0,
4801960be7dSNelio Laranjeiro 	.vni = "\x00\x00\x00",
4811960be7dSNelio Laranjeiro 	.udp_src = 0,
4821960be7dSNelio Laranjeiro 	.udp_dst = RTE_BE16(4789),
4830c9da755SDavid Marchand 	.ipv4_src = RTE_IPV4(127, 0, 0, 1),
4840c9da755SDavid Marchand 	.ipv4_dst = RTE_IPV4(255, 255, 255, 255),
4851960be7dSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
4861960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
4871960be7dSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
4881960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
4891960be7dSNelio Laranjeiro 	.vlan_tci = 0,
49062e8a5a8SViacheslav Ovsiienko 	.ip_tos = 0,
49162e8a5a8SViacheslav Ovsiienko 	.ip_ttl = 255,
4921960be7dSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
4931960be7dSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
4941960be7dSNelio Laranjeiro };
4951960be7dSNelio Laranjeiro 
496dcd962fcSNelio Laranjeiro struct nvgre_encap_conf nvgre_encap_conf = {
497dcd962fcSNelio Laranjeiro 	.select_ipv4 = 1,
498dcd962fcSNelio Laranjeiro 	.select_vlan = 0,
499dcd962fcSNelio Laranjeiro 	.tni = "\x00\x00\x00",
5000c9da755SDavid Marchand 	.ipv4_src = RTE_IPV4(127, 0, 0, 1),
5010c9da755SDavid Marchand 	.ipv4_dst = RTE_IPV4(255, 255, 255, 255),
502dcd962fcSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
503dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
504dcd962fcSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
505dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
506dcd962fcSNelio Laranjeiro 	.vlan_tci = 0,
507dcd962fcSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
508dcd962fcSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
509dcd962fcSNelio Laranjeiro };
510dcd962fcSNelio Laranjeiro 
511ed30d9b6SIntel /* Forward function declarations */
512c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
51328caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
51428caa76aSZhiyong Yang 						   struct rte_port *port);
515edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
516f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
51776ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
518d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
519cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
520fb73e096SJeff Guo 				enum rte_dev_event_type type,
521fb73e096SJeff Guo 				void *param);
522ce8d5614SIntel 
523ce8d5614SIntel /*
524ce8d5614SIntel  * Check if all the ports are started.
525ce8d5614SIntel  * If yes, return positive value. If not, return zero.
526ce8d5614SIntel  */
527ce8d5614SIntel static int all_ports_started(void);
528ed30d9b6SIntel 
52952f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
53035b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
53152f38a20SJiayu Hu 
532af75078fSIntel /*
53398a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
534c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
535c9cafcc8SShahaf Shuler  */
536c9cafcc8SShahaf Shuler int
537c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
538c9cafcc8SShahaf Shuler {
539c9cafcc8SShahaf Shuler 	unsigned int i;
540c9cafcc8SShahaf Shuler 
541c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
542c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
543c9cafcc8SShahaf Shuler 			return 0;
544c9cafcc8SShahaf Shuler 	}
545c9cafcc8SShahaf Shuler 	return 1;
546c9cafcc8SShahaf Shuler }
547c9cafcc8SShahaf Shuler 
548c9cafcc8SShahaf Shuler /*
549af75078fSIntel  * Setup default configuration.
550af75078fSIntel  */
551af75078fSIntel static void
552af75078fSIntel set_default_fwd_lcores_config(void)
553af75078fSIntel {
554af75078fSIntel 	unsigned int i;
555af75078fSIntel 	unsigned int nb_lc;
5567acf894dSStephen Hurd 	unsigned int sock_num;
557af75078fSIntel 
558af75078fSIntel 	nb_lc = 0;
559af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
560dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
561dbfb8ec7SPhil Yang 			continue;
562c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
563c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
564c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
565c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
566c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
567c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
568c9cafcc8SShahaf Shuler 			}
569c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5707acf894dSStephen Hurd 		}
571f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
572f54fe5eeSStephen Hurd 			continue;
573f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
574af75078fSIntel 	}
575af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
576af75078fSIntel 	nb_cfg_lcores = nb_lcores;
577af75078fSIntel 	nb_fwd_lcores = 1;
578af75078fSIntel }
579af75078fSIntel 
580af75078fSIntel static void
581af75078fSIntel set_def_peer_eth_addrs(void)
582af75078fSIntel {
583af75078fSIntel 	portid_t i;
584af75078fSIntel 
585af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
58635b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
587af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
588af75078fSIntel 	}
589af75078fSIntel }
590af75078fSIntel 
591af75078fSIntel static void
592af75078fSIntel set_default_fwd_ports_config(void)
593af75078fSIntel {
594af75078fSIntel 	portid_t pt_id;
59565a7360cSMatan Azrad 	int i = 0;
596af75078fSIntel 
597effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
59865a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
599af75078fSIntel 
600effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
601effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
602effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
603effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
604effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
605effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
606effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
607effdb8bbSPhil Yang 			}
608effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
609effdb8bbSPhil Yang 		}
610effdb8bbSPhil Yang 	}
611effdb8bbSPhil Yang 
612af75078fSIntel 	nb_cfg_ports = nb_ports;
613af75078fSIntel 	nb_fwd_ports = nb_ports;
614af75078fSIntel }
615af75078fSIntel 
616af75078fSIntel void
617af75078fSIntel set_def_fwd_config(void)
618af75078fSIntel {
619af75078fSIntel 	set_default_fwd_lcores_config();
620af75078fSIntel 	set_def_peer_eth_addrs();
621af75078fSIntel 	set_default_fwd_ports_config();
622af75078fSIntel }
623af75078fSIntel 
624c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
625c7f5dba7SAnatoly Burakov static int
626c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
627c7f5dba7SAnatoly Burakov {
628c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
629c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
630c7f5dba7SAnatoly Burakov 
631c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
632c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
633c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
634c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
635c7f5dba7SAnatoly Burakov 	 */
636c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
637c7f5dba7SAnatoly Burakov 
638c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
639c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
640c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
641c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
642c7f5dba7SAnatoly Burakov 		return -1;
643c7f5dba7SAnatoly Burakov 	}
644c7f5dba7SAnatoly Burakov 
645c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
646c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
647c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
648c7f5dba7SAnatoly Burakov 
649c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
650c7f5dba7SAnatoly Burakov 
651c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
652c7f5dba7SAnatoly Burakov 
653c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
654c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
655c7f5dba7SAnatoly Burakov 		return -1;
656c7f5dba7SAnatoly Burakov 	}
657c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
658c7f5dba7SAnatoly Burakov 
659c7f5dba7SAnatoly Burakov 	return 0;
660c7f5dba7SAnatoly Burakov }
661c7f5dba7SAnatoly Burakov 
662c7f5dba7SAnatoly Burakov static int
663c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
664c7f5dba7SAnatoly Burakov {
665c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
666c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
667c7f5dba7SAnatoly Burakov 	 */
6689d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
669c7f5dba7SAnatoly Burakov 
670c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
671c7f5dba7SAnatoly Burakov }
672c7f5dba7SAnatoly Burakov 
673c7f5dba7SAnatoly Burakov static void *
674c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
675c7f5dba7SAnatoly Burakov {
676c7f5dba7SAnatoly Burakov 	void *addr;
677c7f5dba7SAnatoly Burakov 	int flags;
678c7f5dba7SAnatoly Burakov 
679c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
680c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
681c7f5dba7SAnatoly Burakov 	if (huge)
682c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
683c7f5dba7SAnatoly Burakov 
684c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
685c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
686c7f5dba7SAnatoly Burakov 		return NULL;
687c7f5dba7SAnatoly Burakov 
688c7f5dba7SAnatoly Burakov 	return addr;
689c7f5dba7SAnatoly Burakov }
690c7f5dba7SAnatoly Burakov 
691c7f5dba7SAnatoly Burakov struct extmem_param {
692c7f5dba7SAnatoly Burakov 	void *addr;
693c7f5dba7SAnatoly Burakov 	size_t len;
694c7f5dba7SAnatoly Burakov 	size_t pgsz;
695c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
696c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
697c7f5dba7SAnatoly Burakov };
698c7f5dba7SAnatoly Burakov 
699c7f5dba7SAnatoly Burakov static int
700c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
701c7f5dba7SAnatoly Burakov 		bool huge)
702c7f5dba7SAnatoly Burakov {
703c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
704c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
705c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
706c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
707c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
708c7f5dba7SAnatoly Burakov 	void *addr;
709c7f5dba7SAnatoly Burakov 	int ret;
710c7f5dba7SAnatoly Burakov 
711c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
712c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
713c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
714c7f5dba7SAnatoly Burakov 			continue;
715c7f5dba7SAnatoly Burakov 
716c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
717c7f5dba7SAnatoly Burakov 
718c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
719c7f5dba7SAnatoly Burakov 		if (!huge)
720c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
721c7f5dba7SAnatoly Burakov 
722c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
723c7f5dba7SAnatoly Burakov 		if (ret < 0) {
724c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
725c7f5dba7SAnatoly Burakov 			return -1;
726c7f5dba7SAnatoly Burakov 		}
727c7f5dba7SAnatoly Burakov 
728c7f5dba7SAnatoly Burakov 		/* allocate our memory */
729c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
730c7f5dba7SAnatoly Burakov 
731c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
732c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
733c7f5dba7SAnatoly Burakov 		 * try another one.
734c7f5dba7SAnatoly Burakov 		 */
735c7f5dba7SAnatoly Burakov 		if (addr == NULL)
736c7f5dba7SAnatoly Burakov 			continue;
737c7f5dba7SAnatoly Burakov 
738c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
739c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
740c7f5dba7SAnatoly Burakov 
741c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
742c7f5dba7SAnatoly Burakov 
743c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
744c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
745c7f5dba7SAnatoly Burakov 			goto fail;
746c7f5dba7SAnatoly Burakov 		}
747c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
748c7f5dba7SAnatoly Burakov 		if (!huge)
749c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
750c7f5dba7SAnatoly Burakov 
751c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
752c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
753c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
754c7f5dba7SAnatoly Burakov 			size_t offset;
755c7f5dba7SAnatoly Burakov 			void *cur;
756c7f5dba7SAnatoly Burakov 
757c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
758c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
759c7f5dba7SAnatoly Burakov 
760c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
761c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
762c7f5dba7SAnatoly Burakov 
763c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
764c7f5dba7SAnatoly Burakov 
765c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
766c7f5dba7SAnatoly Burakov 		}
767c7f5dba7SAnatoly Burakov 
768c7f5dba7SAnatoly Burakov 		break;
769c7f5dba7SAnatoly Burakov 	}
770c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
771c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
772c7f5dba7SAnatoly Burakov 		return -1;
773c7f5dba7SAnatoly Burakov 
774c7f5dba7SAnatoly Burakov 	param->addr = addr;
775c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
776c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
777c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
778c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
779c7f5dba7SAnatoly Burakov 
780c7f5dba7SAnatoly Burakov 	return 0;
781c7f5dba7SAnatoly Burakov fail:
782c7f5dba7SAnatoly Burakov 	if (iovas)
783c7f5dba7SAnatoly Burakov 		free(iovas);
784c7f5dba7SAnatoly Burakov 	if (addr)
785c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
786c7f5dba7SAnatoly Burakov 
787c7f5dba7SAnatoly Burakov 	return -1;
788c7f5dba7SAnatoly Burakov }
789c7f5dba7SAnatoly Burakov 
790c7f5dba7SAnatoly Burakov static int
791c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
792c7f5dba7SAnatoly Burakov {
793c7f5dba7SAnatoly Burakov 	struct extmem_param param;
794c7f5dba7SAnatoly Burakov 	int socket_id, ret;
795c7f5dba7SAnatoly Burakov 
796c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
797c7f5dba7SAnatoly Burakov 
798c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
799c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
800c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
801c7f5dba7SAnatoly Burakov 		/* create our heap */
802c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
803c7f5dba7SAnatoly Burakov 		if (ret < 0) {
804c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
805c7f5dba7SAnatoly Burakov 			return -1;
806c7f5dba7SAnatoly Burakov 		}
807c7f5dba7SAnatoly Burakov 	}
808c7f5dba7SAnatoly Burakov 
809c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
810c7f5dba7SAnatoly Burakov 	if (ret < 0) {
811c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
812c7f5dba7SAnatoly Burakov 		return -1;
813c7f5dba7SAnatoly Burakov 	}
814c7f5dba7SAnatoly Burakov 
815c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
816c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
817c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
818c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
819c7f5dba7SAnatoly Burakov 
820c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
821c7f5dba7SAnatoly Burakov 
822c7f5dba7SAnatoly Burakov 	/* not needed any more */
823c7f5dba7SAnatoly Burakov 	free(param.iova_table);
824c7f5dba7SAnatoly Burakov 
825c7f5dba7SAnatoly Burakov 	if (ret < 0) {
826c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
827c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
828c7f5dba7SAnatoly Burakov 		return -1;
829c7f5dba7SAnatoly Burakov 	}
830c7f5dba7SAnatoly Burakov 
831c7f5dba7SAnatoly Burakov 	/* success */
832c7f5dba7SAnatoly Burakov 
833c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
834c7f5dba7SAnatoly Burakov 			param.len >> 20);
835c7f5dba7SAnatoly Burakov 
836c7f5dba7SAnatoly Burakov 	return 0;
837c7f5dba7SAnatoly Burakov }
8383a0968c8SShahaf Shuler static void
8393a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8403a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8413a0968c8SShahaf Shuler {
8423a0968c8SShahaf Shuler 	uint16_t pid = 0;
8433a0968c8SShahaf Shuler 	int ret;
8443a0968c8SShahaf Shuler 
8453a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8463a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8473a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8483a0968c8SShahaf Shuler 
8493a0968c8SShahaf Shuler 		ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
8503a0968c8SShahaf Shuler 					memhdr->len);
8513a0968c8SShahaf Shuler 		if (ret) {
8523a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8533a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
8543a0968c8SShahaf Shuler 				    "for device %s\n",
8553a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8563a0968c8SShahaf Shuler 		}
8573a0968c8SShahaf Shuler 	}
8583a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
8593a0968c8SShahaf Shuler 	if (ret) {
8603a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8613a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
8623a0968c8SShahaf Shuler 	}
8633a0968c8SShahaf Shuler }
8643a0968c8SShahaf Shuler 
8653a0968c8SShahaf Shuler static void
8663a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8673a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8683a0968c8SShahaf Shuler {
8693a0968c8SShahaf Shuler 	uint16_t pid = 0;
8703a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
8713a0968c8SShahaf Shuler 	int ret;
8723a0968c8SShahaf Shuler 
8733a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
8743a0968c8SShahaf Shuler 				  page_size);
8753a0968c8SShahaf Shuler 	if (ret) {
8763a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8773a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
8783a0968c8SShahaf Shuler 		return;
8793a0968c8SShahaf Shuler 	}
8803a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8813a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8823a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8833a0968c8SShahaf Shuler 
8843a0968c8SShahaf Shuler 		ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
8853a0968c8SShahaf Shuler 				      memhdr->len);
8863a0968c8SShahaf Shuler 		if (ret) {
8873a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8883a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
8893a0968c8SShahaf Shuler 				    "for device %s\n",
8903a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8913a0968c8SShahaf Shuler 		}
8923a0968c8SShahaf Shuler 	}
8933a0968c8SShahaf Shuler }
894c7f5dba7SAnatoly Burakov 
895af75078fSIntel /*
896af75078fSIntel  * Configuration initialisation done once at init time.
897af75078fSIntel  */
898401b744dSShahaf Shuler static struct rte_mempool *
899af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
900af75078fSIntel 		 unsigned int socket_id)
901af75078fSIntel {
902af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
903bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
904af75078fSIntel 	uint32_t mb_size;
905af75078fSIntel 
906dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
907af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
908148f963fSBruce Richardson 
909285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
910d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
911d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
912d1eb542eSOlivier Matz 
913c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
914c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
915c7f5dba7SAnatoly Burakov 		{
916c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
917c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
918c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
919c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
920c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
921c7f5dba7SAnatoly Burakov 			break;
922c7f5dba7SAnatoly Burakov 		}
923c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
924c7f5dba7SAnatoly Burakov 		{
925b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
926c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
927148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
92859fcf854SShahaf Shuler 				socket_id, mempool_flags);
92924427bb9SOlivier Matz 			if (rte_mp == NULL)
93024427bb9SOlivier Matz 				goto err;
931b19a0c75SOlivier Matz 
932b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
933b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
934b19a0c75SOlivier Matz 				rte_mp = NULL;
93524427bb9SOlivier Matz 				goto err;
936b19a0c75SOlivier Matz 			}
937b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
938b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
9393a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
940c7f5dba7SAnatoly Burakov 			break;
941c7f5dba7SAnatoly Burakov 		}
942c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
943c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
944c7f5dba7SAnatoly Burakov 		{
945c7f5dba7SAnatoly Burakov 			int heap_socket;
946c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
947c7f5dba7SAnatoly Burakov 
948c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
949c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
950c7f5dba7SAnatoly Burakov 
951c7f5dba7SAnatoly Burakov 			heap_socket =
952c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
953c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
954c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
955c7f5dba7SAnatoly Burakov 
9560e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
9570e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
958ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
959c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
960c7f5dba7SAnatoly Burakov 					heap_socket);
961c7f5dba7SAnatoly Burakov 			break;
962c7f5dba7SAnatoly Burakov 		}
963c7f5dba7SAnatoly Burakov 	default:
964c7f5dba7SAnatoly Burakov 		{
965c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
966c7f5dba7SAnatoly Burakov 		}
967bece7b6cSChristian Ehrhardt 	}
968148f963fSBruce Richardson 
96924427bb9SOlivier Matz err:
970af75078fSIntel 	if (rte_mp == NULL) {
971d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
972d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
973d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
974148f963fSBruce Richardson 	} else if (verbose_level > 0) {
975591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
976af75078fSIntel 	}
977401b744dSShahaf Shuler 	return rte_mp;
978af75078fSIntel }
979af75078fSIntel 
98020a0286fSLiu Xiaofeng /*
98120a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
98220a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
98320a0286fSLiu Xiaofeng  */
98420a0286fSLiu Xiaofeng static int
98520a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
98620a0286fSLiu Xiaofeng {
98720a0286fSLiu Xiaofeng 	static int warning_once = 0;
98820a0286fSLiu Xiaofeng 
989c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
99020a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
99120a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
99220a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
99320a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
99420a0286fSLiu Xiaofeng 			       " --numa.\n");
99520a0286fSLiu Xiaofeng 		warning_once = 1;
99620a0286fSLiu Xiaofeng 		return -1;
99720a0286fSLiu Xiaofeng 	}
99820a0286fSLiu Xiaofeng 	return 0;
99920a0286fSLiu Xiaofeng }
100020a0286fSLiu Xiaofeng 
10013f7311baSWei Dai /*
10023f7311baSWei Dai  * Get the allowed maximum number of RX queues.
10033f7311baSWei Dai  * *pid return the port id which has minimal value of
10043f7311baSWei Dai  * max_rx_queues in all ports.
10053f7311baSWei Dai  */
10063f7311baSWei Dai queueid_t
10073f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
10083f7311baSWei Dai {
10093f7311baSWei Dai 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
10103f7311baSWei Dai 	portid_t pi;
10113f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
10123f7311baSWei Dai 
10133f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
10143f7311baSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
10153f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
10163f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
10173f7311baSWei Dai 			*pid = pi;
10183f7311baSWei Dai 		}
10193f7311baSWei Dai 	}
10203f7311baSWei Dai 	return allowed_max_rxq;
10213f7311baSWei Dai }
10223f7311baSWei Dai 
10233f7311baSWei Dai /*
10243f7311baSWei Dai  * Check input rxq is valid or not.
10253f7311baSWei Dai  * If input rxq is not greater than any of maximum number
10263f7311baSWei Dai  * of RX queues of all ports, it is valid.
10273f7311baSWei Dai  * if valid, return 0, else return -1
10283f7311baSWei Dai  */
10293f7311baSWei Dai int
10303f7311baSWei Dai check_nb_rxq(queueid_t rxq)
10313f7311baSWei Dai {
10323f7311baSWei Dai 	queueid_t allowed_max_rxq;
10333f7311baSWei Dai 	portid_t pid = 0;
10343f7311baSWei Dai 
10353f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
10363f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
10373f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
10383f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
10393f7311baSWei Dai 		       rxq,
10403f7311baSWei Dai 		       allowed_max_rxq,
10413f7311baSWei Dai 		       pid);
10423f7311baSWei Dai 		return -1;
10433f7311baSWei Dai 	}
10443f7311baSWei Dai 	return 0;
10453f7311baSWei Dai }
10463f7311baSWei Dai 
104736db4f6cSWei Dai /*
104836db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
104936db4f6cSWei Dai  * *pid return the port id which has minimal value of
105036db4f6cSWei Dai  * max_tx_queues in all ports.
105136db4f6cSWei Dai  */
105236db4f6cSWei Dai queueid_t
105336db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
105436db4f6cSWei Dai {
105536db4f6cSWei Dai 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
105636db4f6cSWei Dai 	portid_t pi;
105736db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
105836db4f6cSWei Dai 
105936db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
106036db4f6cSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
106136db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
106236db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
106336db4f6cSWei Dai 			*pid = pi;
106436db4f6cSWei Dai 		}
106536db4f6cSWei Dai 	}
106636db4f6cSWei Dai 	return allowed_max_txq;
106736db4f6cSWei Dai }
106836db4f6cSWei Dai 
106936db4f6cSWei Dai /*
107036db4f6cSWei Dai  * Check input txq is valid or not.
107136db4f6cSWei Dai  * If input txq is not greater than any of maximum number
107236db4f6cSWei Dai  * of TX queues of all ports, it is valid.
107336db4f6cSWei Dai  * if valid, return 0, else return -1
107436db4f6cSWei Dai  */
107536db4f6cSWei Dai int
107636db4f6cSWei Dai check_nb_txq(queueid_t txq)
107736db4f6cSWei Dai {
107836db4f6cSWei Dai 	queueid_t allowed_max_txq;
107936db4f6cSWei Dai 	portid_t pid = 0;
108036db4f6cSWei Dai 
108136db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
108236db4f6cSWei Dai 	if (txq > allowed_max_txq) {
108336db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
108436db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
108536db4f6cSWei Dai 		       txq,
108636db4f6cSWei Dai 		       allowed_max_txq,
108736db4f6cSWei Dai 		       pid);
108836db4f6cSWei Dai 		return -1;
108936db4f6cSWei Dai 	}
109036db4f6cSWei Dai 	return 0;
109136db4f6cSWei Dai }
109236db4f6cSWei Dai 
1093af75078fSIntel static void
1094af75078fSIntel init_config(void)
1095af75078fSIntel {
1096ce8d5614SIntel 	portid_t pid;
1097af75078fSIntel 	struct rte_port *port;
1098af75078fSIntel 	struct rte_mempool *mbp;
1099af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1100af75078fSIntel 	lcoreid_t  lc_id;
11017acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1102b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
110352f38a20SJiayu Hu 	uint32_t gso_types;
110433f9630fSSunil Kumar Kori 	uint16_t data_size;
110533f9630fSSunil Kumar Kori 	bool warning = 0;
1106c73a9071SWei Dai 	int k;
1107af75078fSIntel 
11087acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1109487f9a59SYulong Pei 
1110af75078fSIntel 	/* Configuration of logical cores. */
1111af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1112af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1113fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1114af75078fSIntel 	if (fwd_lcores == NULL) {
1115ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1116ce8d5614SIntel 							"failed\n", nb_lcores);
1117af75078fSIntel 	}
1118af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1119af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1120af75078fSIntel 					       sizeof(struct fwd_lcore),
1121fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1122af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1123ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1124ce8d5614SIntel 								"failed\n");
1125af75078fSIntel 		}
1126af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1127af75078fSIntel 	}
1128af75078fSIntel 
11297d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1130ce8d5614SIntel 		port = &ports[pid];
11318b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1132fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1133384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
1134ce8d5614SIntel 		rte_eth_dev_info_get(pid, &port->dev_info);
11357c45f6c0SFerruh Yigit 
113607e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
113707e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
113807e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
113907e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1140c18feafaSDekel Peled 		if (!(port->dev_info.tx_offload_capa &
1141c18feafaSDekel Peled 			DEV_TX_OFFLOAD_MATCH_METADATA))
1142c18feafaSDekel Peled 			port->dev_conf.txmode.offloads &=
1143c18feafaSDekel Peled 				~DEV_TX_OFFLOAD_MATCH_METADATA;
1144b6ea6408SIntel 		if (numa_support) {
1145b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1146b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1147b6ea6408SIntel 			else {
1148b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
114920a0286fSLiu Xiaofeng 
115029841336SPhil Yang 				/*
115129841336SPhil Yang 				 * if socket_id is invalid,
115229841336SPhil Yang 				 * set to the first available socket.
115329841336SPhil Yang 				 */
115420a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
115529841336SPhil Yang 					socket_id = socket_ids[0];
1156b6ea6408SIntel 				port_per_socket[socket_id]++;
1157b6ea6408SIntel 			}
1158b6ea6408SIntel 		}
1159b6ea6408SIntel 
1160c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1161c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1162c73a9071SWei Dai 			port->rx_conf[k].offloads =
1163c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1164c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1165c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1166c73a9071SWei Dai 			port->tx_conf[k].offloads =
1167c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1168c73a9071SWei Dai 
1169ce8d5614SIntel 		/* set flag to initialize port/queue */
1170ce8d5614SIntel 		port->need_reconfig = 1;
1171ce8d5614SIntel 		port->need_reconfig_queues = 1;
1172c18feafaSDekel Peled 		port->tx_metadata = 0;
117333f9630fSSunil Kumar Kori 
117433f9630fSSunil Kumar Kori 		/* Check for maximum number of segments per MTU. Accordingly
117533f9630fSSunil Kumar Kori 		 * update the mbuf data size.
117633f9630fSSunil Kumar Kori 		 */
1177163fbaafSFerruh Yigit 		if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1178163fbaafSFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
117933f9630fSSunil Kumar Kori 			data_size = rx_mode.max_rx_pkt_len /
118033f9630fSSunil Kumar Kori 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
118133f9630fSSunil Kumar Kori 
118233f9630fSSunil Kumar Kori 			if ((data_size + RTE_PKTMBUF_HEADROOM) >
118333f9630fSSunil Kumar Kori 							mbuf_data_size) {
118433f9630fSSunil Kumar Kori 				mbuf_data_size = data_size +
118533f9630fSSunil Kumar Kori 						 RTE_PKTMBUF_HEADROOM;
118633f9630fSSunil Kumar Kori 				warning = 1;
1187ce8d5614SIntel 			}
118833f9630fSSunil Kumar Kori 		}
118933f9630fSSunil Kumar Kori 	}
119033f9630fSSunil Kumar Kori 
119133f9630fSSunil Kumar Kori 	if (warning)
119233f9630fSSunil Kumar Kori 		TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
119333f9630fSSunil Kumar Kori 			    mbuf_data_size);
1194ce8d5614SIntel 
11953ab64341SOlivier Matz 	/*
11963ab64341SOlivier Matz 	 * Create pools of mbuf.
11973ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
11983ab64341SOlivier Matz 	 * socket 0 memory by default.
11993ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
12003ab64341SOlivier Matz 	 *
12013ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
12023ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
12033ab64341SOlivier Matz 	 */
12043ab64341SOlivier Matz 	if (param_total_num_mbufs)
12053ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
12063ab64341SOlivier Matz 	else {
12073ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
12083ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
12093ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
12103ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
12113ab64341SOlivier Matz 	}
12123ab64341SOlivier Matz 
1213b6ea6408SIntel 	if (numa_support) {
1214b6ea6408SIntel 		uint8_t i;
1215ce8d5614SIntel 
1216c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
1217401b744dSShahaf Shuler 			mempools[i] = mbuf_pool_create(mbuf_data_size,
1218401b744dSShahaf Shuler 						       nb_mbuf_per_pool,
1219c9cafcc8SShahaf Shuler 						       socket_ids[i]);
12203ab64341SOlivier Matz 	} else {
12213ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
1222401b744dSShahaf Shuler 			mempools[0] = mbuf_pool_create(mbuf_data_size,
1223401b744dSShahaf Shuler 						       nb_mbuf_per_pool, 0);
12243ab64341SOlivier Matz 		else
1225401b744dSShahaf Shuler 			mempools[socket_num] = mbuf_pool_create
1226401b744dSShahaf Shuler 							(mbuf_data_size,
1227401b744dSShahaf Shuler 							 nb_mbuf_per_pool,
12283ab64341SOlivier Matz 							 socket_num);
12293ab64341SOlivier Matz 	}
1230b6ea6408SIntel 
1231b6ea6408SIntel 	init_port_config();
12325886ae07SAdrien Mazarguil 
123352f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1234aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
12355886ae07SAdrien Mazarguil 	/*
12365886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
12375886ae07SAdrien Mazarguil 	 */
12385886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
12398fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
12408fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
12418fd8bebcSAdrien Mazarguil 
12425886ae07SAdrien Mazarguil 		if (mbp == NULL)
12435886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
12445886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
124552f38a20SJiayu Hu 		/* initialize GSO context */
124652f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
124752f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
124852f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
124935b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
125035b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
125152f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
12525886ae07SAdrien Mazarguil 	}
12535886ae07SAdrien Mazarguil 
1254ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1255ce8d5614SIntel 	if (init_fwd_streams() < 0)
1256ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
12570c0db76fSBernard Iremonger 
12580c0db76fSBernard Iremonger 	fwd_config_setup();
1259b7091f1dSJiayu Hu 
1260b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1261b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1262b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1263b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1264b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1265b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1266b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1267b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1268b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1269b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1270b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1271b7091f1dSJiayu Hu 		}
1272b7091f1dSJiayu Hu 	}
12730ad778b3SJasvinder Singh 
12740ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
12750ad778b3SJasvinder Singh 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
12760ad778b3SJasvinder Singh 		RTE_ETH_FOREACH_DEV(pid) {
12770ad778b3SJasvinder Singh 			port = &ports[pid];
12780ad778b3SJasvinder Singh 			const char *driver = port->dev_info.driver_name;
12790ad778b3SJasvinder Singh 
12800ad778b3SJasvinder Singh 			if (strcmp(driver, "net_softnic") == 0)
12810ad778b3SJasvinder Singh 				port->softport.fwd_lcore_arg = fwd_lcores;
12820ad778b3SJasvinder Singh 		}
12830ad778b3SJasvinder Singh 	}
12840ad778b3SJasvinder Singh #endif
12850ad778b3SJasvinder Singh 
1286ce8d5614SIntel }
1287ce8d5614SIntel 
12882950a769SDeclan Doherty 
12892950a769SDeclan Doherty void
1290a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
12912950a769SDeclan Doherty {
12922950a769SDeclan Doherty 	struct rte_port *port;
12932950a769SDeclan Doherty 
12942950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
12952950a769SDeclan Doherty 	port = &ports[new_port_id];
12962950a769SDeclan Doherty 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
12972950a769SDeclan Doherty 
12982950a769SDeclan Doherty 	/* set flag to initialize port/queue */
12992950a769SDeclan Doherty 	port->need_reconfig = 1;
13002950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1301a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
13022950a769SDeclan Doherty 
13032950a769SDeclan Doherty 	init_port_config();
13042950a769SDeclan Doherty }
13052950a769SDeclan Doherty 
13062950a769SDeclan Doherty 
1307ce8d5614SIntel int
1308ce8d5614SIntel init_fwd_streams(void)
1309ce8d5614SIntel {
1310ce8d5614SIntel 	portid_t pid;
1311ce8d5614SIntel 	struct rte_port *port;
1312ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
13135a8fb55cSReshma Pattan 	queueid_t q;
1314ce8d5614SIntel 
1315ce8d5614SIntel 	/* set socket id according to numa or not */
13167d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1317ce8d5614SIntel 		port = &ports[pid];
1318ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1319ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1320ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1321ce8d5614SIntel 				port->dev_info.max_rx_queues);
1322ce8d5614SIntel 			return -1;
1323ce8d5614SIntel 		}
1324ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1325ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1326ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1327ce8d5614SIntel 				port->dev_info.max_tx_queues);
1328ce8d5614SIntel 			return -1;
1329ce8d5614SIntel 		}
133020a0286fSLiu Xiaofeng 		if (numa_support) {
133120a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
133220a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
133320a0286fSLiu Xiaofeng 			else {
1334b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
133520a0286fSLiu Xiaofeng 
133629841336SPhil Yang 				/*
133729841336SPhil Yang 				 * if socket_id is invalid,
133829841336SPhil Yang 				 * set to the first available socket.
133929841336SPhil Yang 				 */
134020a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
134129841336SPhil Yang 					port->socket_id = socket_ids[0];
134220a0286fSLiu Xiaofeng 			}
134320a0286fSLiu Xiaofeng 		}
1344b6ea6408SIntel 		else {
1345b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1346af75078fSIntel 				port->socket_id = 0;
1347b6ea6408SIntel 			else
1348b6ea6408SIntel 				port->socket_id = socket_num;
1349b6ea6408SIntel 		}
1350af75078fSIntel 	}
1351af75078fSIntel 
13525a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
13535a8fb55cSReshma Pattan 	if (q == 0) {
13545a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
13555a8fb55cSReshma Pattan 		return -1;
13565a8fb55cSReshma Pattan 	}
13575a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1358ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1359ce8d5614SIntel 		return 0;
1360ce8d5614SIntel 	/* clear the old */
1361ce8d5614SIntel 	if (fwd_streams != NULL) {
1362ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1363ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1364ce8d5614SIntel 				continue;
1365ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1366ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1367af75078fSIntel 		}
1368ce8d5614SIntel 		rte_free(fwd_streams);
1369ce8d5614SIntel 		fwd_streams = NULL;
1370ce8d5614SIntel 	}
1371ce8d5614SIntel 
1372ce8d5614SIntel 	/* init new */
1373ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
13741f84c469SMatan Azrad 	if (nb_fwd_streams) {
1375ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
13761f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
13771f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1378ce8d5614SIntel 		if (fwd_streams == NULL)
13791f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
13801f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
13811f84c469SMatan Azrad 				 nb_fwd_streams);
1382ce8d5614SIntel 
1383af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
13841f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
13851f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
13861f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1387ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
13881f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
13891f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
13901f84c469SMatan Azrad 		}
1391af75078fSIntel 	}
1392ce8d5614SIntel 
1393ce8d5614SIntel 	return 0;
1394af75078fSIntel }
1395af75078fSIntel 
1396af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1397af75078fSIntel static void
1398af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1399af75078fSIntel {
1400af75078fSIntel 	unsigned int total_burst;
1401af75078fSIntel 	unsigned int nb_burst;
1402af75078fSIntel 	unsigned int burst_stats[3];
1403af75078fSIntel 	uint16_t pktnb_stats[3];
1404af75078fSIntel 	uint16_t nb_pkt;
1405af75078fSIntel 	int burst_percent[3];
1406af75078fSIntel 
1407af75078fSIntel 	/*
1408af75078fSIntel 	 * First compute the total number of packet bursts and the
1409af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1410af75078fSIntel 	 */
1411af75078fSIntel 	total_burst = 0;
1412af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1413af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1414af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1415af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1416af75078fSIntel 		if (nb_burst == 0)
1417af75078fSIntel 			continue;
1418af75078fSIntel 		total_burst += nb_burst;
1419af75078fSIntel 		if (nb_burst > burst_stats[0]) {
1420af75078fSIntel 			burst_stats[1] = burst_stats[0];
1421af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
1422af75078fSIntel 			burst_stats[0] = nb_burst;
1423af75078fSIntel 			pktnb_stats[0] = nb_pkt;
1424fe613657SDaniel Shelepov 		} else if (nb_burst > burst_stats[1]) {
1425fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1426fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
1427af75078fSIntel 		}
1428af75078fSIntel 	}
1429af75078fSIntel 	if (total_burst == 0)
1430af75078fSIntel 		return;
1431af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1432af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1433af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
1434af75078fSIntel 	if (burst_stats[0] == total_burst) {
1435af75078fSIntel 		printf("]\n");
1436af75078fSIntel 		return;
1437af75078fSIntel 	}
1438af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1439af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
1440af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
1441af75078fSIntel 		return;
1442af75078fSIntel 	}
1443af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1444af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1445af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1446af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1447af75078fSIntel 		return;
1448af75078fSIntel 	}
1449af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1450af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1451af75078fSIntel }
1452af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1453af75078fSIntel 
1454af75078fSIntel static void
1455af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1456af75078fSIntel {
1457af75078fSIntel 	struct fwd_stream *fs;
1458af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1459af75078fSIntel 
1460af75078fSIntel 	fs = fwd_streams[stream_id];
1461af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1462af75078fSIntel 	    (fs->fwd_dropped == 0))
1463af75078fSIntel 		return;
1464af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1465af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1466af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1467af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1468c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1469c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1470af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1471af75078fSIntel 
1472af75078fSIntel 	/* if checksum mode */
1473af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1474c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1475c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1476c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
147758d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
147858d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
147994d65546SDavid Marchand 	} else {
148094d65546SDavid Marchand 		printf("\n");
1481af75078fSIntel 	}
1482af75078fSIntel 
1483af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1484af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1485af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1486af75078fSIntel #endif
1487af75078fSIntel }
1488af75078fSIntel 
148953324971SDavid Marchand void
149053324971SDavid Marchand fwd_stats_display(void)
149153324971SDavid Marchand {
149253324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
149353324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
149453324971SDavid Marchand 	struct {
149553324971SDavid Marchand 		struct fwd_stream *rx_stream;
149653324971SDavid Marchand 		struct fwd_stream *tx_stream;
149753324971SDavid Marchand 		uint64_t tx_dropped;
149853324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
149953324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
150053324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
150153324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
150253324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
150353324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
150453324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
150553324971SDavid Marchand 	struct rte_eth_stats stats;
150653324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
150753324971SDavid Marchand 	uint64_t fwd_cycles = 0;
150853324971SDavid Marchand #endif
150953324971SDavid Marchand 	uint64_t total_recv = 0;
151053324971SDavid Marchand 	uint64_t total_xmit = 0;
151153324971SDavid Marchand 	struct rte_port *port;
151253324971SDavid Marchand 	streamid_t sm_id;
151353324971SDavid Marchand 	portid_t pt_id;
151453324971SDavid Marchand 	int i;
151553324971SDavid Marchand 
151653324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
151753324971SDavid Marchand 
151853324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
151953324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
152053324971SDavid Marchand 
152153324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
152253324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
152353324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
152453324971SDavid Marchand 		} else {
152553324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
152653324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
152753324971SDavid Marchand 		}
152853324971SDavid Marchand 
152953324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
153053324971SDavid Marchand 
153153324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
153253324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
153353324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
153453324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
153553324971SDavid Marchand 
153653324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
153753324971SDavid Marchand 		fwd_cycles += fs->core_cycles;
153853324971SDavid Marchand #endif
153953324971SDavid Marchand 	}
154053324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
154153324971SDavid Marchand 		uint8_t j;
154253324971SDavid Marchand 
154353324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
154453324971SDavid Marchand 		port = &ports[pt_id];
154553324971SDavid Marchand 
154653324971SDavid Marchand 		rte_eth_stats_get(pt_id, &stats);
154753324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
154853324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
154953324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
155053324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
155153324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
155253324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
155353324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
155453324971SDavid Marchand 
155553324971SDavid Marchand 		total_recv += stats.ipackets;
155653324971SDavid Marchand 		total_xmit += stats.opackets;
155753324971SDavid Marchand 		total_rx_dropped += stats.imissed;
155853324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
155953324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
156053324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
156153324971SDavid Marchand 
156253324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
156353324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
156453324971SDavid Marchand 
156553324971SDavid Marchand 		if (!port->rx_queue_stats_mapping_enabled &&
156653324971SDavid Marchand 		    !port->tx_queue_stats_mapping_enabled) {
156753324971SDavid Marchand 			printf("  RX-packets: %-14"PRIu64
156853324971SDavid Marchand 			       " RX-dropped: %-14"PRIu64
156953324971SDavid Marchand 			       "RX-total: %-"PRIu64"\n",
157053324971SDavid Marchand 			       stats.ipackets, stats.imissed,
157153324971SDavid Marchand 			       stats.ipackets + stats.imissed);
157253324971SDavid Marchand 
157353324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
157453324971SDavid Marchand 				printf("  Bad-ipcsum: %-14"PRIu64
157553324971SDavid Marchand 				       " Bad-l4csum: %-14"PRIu64
157653324971SDavid Marchand 				       "Bad-outer-l4csum: %-14"PRIu64"\n",
157753324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
157853324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
157953324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
158053324971SDavid Marchand 			if (stats.ierrors + stats.rx_nombuf > 0) {
158153324971SDavid Marchand 				printf("  RX-error: %-"PRIu64"\n",
158253324971SDavid Marchand 				       stats.ierrors);
158353324971SDavid Marchand 				printf("  RX-nombufs: %-14"PRIu64"\n",
158453324971SDavid Marchand 				       stats.rx_nombuf);
158553324971SDavid Marchand 			}
158653324971SDavid Marchand 
158753324971SDavid Marchand 			printf("  TX-packets: %-14"PRIu64
158853324971SDavid Marchand 			       " TX-dropped: %-14"PRIu64
158953324971SDavid Marchand 			       "TX-total: %-"PRIu64"\n",
159053324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
159153324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
159253324971SDavid Marchand 		} else {
159353324971SDavid Marchand 			printf("  RX-packets:             %14"PRIu64
159453324971SDavid Marchand 			       "    RX-dropped:%14"PRIu64
159553324971SDavid Marchand 			       "    RX-total:%14"PRIu64"\n",
159653324971SDavid Marchand 			       stats.ipackets, stats.imissed,
159753324971SDavid Marchand 			       stats.ipackets + stats.imissed);
159853324971SDavid Marchand 
159953324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
160053324971SDavid Marchand 				printf("  Bad-ipcsum:%14"PRIu64
160153324971SDavid Marchand 				       "    Bad-l4csum:%14"PRIu64
160253324971SDavid Marchand 				       "    Bad-outer-l4csum: %-14"PRIu64"\n",
160353324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
160453324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
160553324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
160653324971SDavid Marchand 			if ((stats.ierrors + stats.rx_nombuf) > 0) {
160753324971SDavid Marchand 				printf("  RX-error:%"PRIu64"\n", stats.ierrors);
160853324971SDavid Marchand 				printf("  RX-nombufs:             %14"PRIu64"\n",
160953324971SDavid Marchand 				       stats.rx_nombuf);
161053324971SDavid Marchand 			}
161153324971SDavid Marchand 
161253324971SDavid Marchand 			printf("  TX-packets:             %14"PRIu64
161353324971SDavid Marchand 			       "    TX-dropped:%14"PRIu64
161453324971SDavid Marchand 			       "    TX-total:%14"PRIu64"\n",
161553324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
161653324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
161753324971SDavid Marchand 		}
161853324971SDavid Marchand 
161953324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
162053324971SDavid Marchand 		if (ports_stats[pt_id].rx_stream)
162153324971SDavid Marchand 			pkt_burst_stats_display("RX",
162253324971SDavid Marchand 				&ports_stats[pt_id].rx_stream->rx_burst_stats);
162353324971SDavid Marchand 		if (ports_stats[pt_id].tx_stream)
162453324971SDavid Marchand 			pkt_burst_stats_display("TX",
162553324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
162653324971SDavid Marchand #endif
162753324971SDavid Marchand 
162853324971SDavid Marchand 		if (port->rx_queue_stats_mapping_enabled) {
162953324971SDavid Marchand 			printf("\n");
163053324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
163153324971SDavid Marchand 				printf("  Stats reg %2d RX-packets:%14"PRIu64
163253324971SDavid Marchand 				       "     RX-errors:%14"PRIu64
163353324971SDavid Marchand 				       "    RX-bytes:%14"PRIu64"\n",
163453324971SDavid Marchand 				       j, stats.q_ipackets[j],
163553324971SDavid Marchand 				       stats.q_errors[j], stats.q_ibytes[j]);
163653324971SDavid Marchand 			}
163753324971SDavid Marchand 			printf("\n");
163853324971SDavid Marchand 		}
163953324971SDavid Marchand 		if (port->tx_queue_stats_mapping_enabled) {
164053324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
164153324971SDavid Marchand 				printf("  Stats reg %2d TX-packets:%14"PRIu64
164253324971SDavid Marchand 				       "                                 TX-bytes:%14"
164353324971SDavid Marchand 				       PRIu64"\n",
164453324971SDavid Marchand 				       j, stats.q_opackets[j],
164553324971SDavid Marchand 				       stats.q_obytes[j]);
164653324971SDavid Marchand 			}
164753324971SDavid Marchand 		}
164853324971SDavid Marchand 
164953324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
165053324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
165153324971SDavid Marchand 	}
165253324971SDavid Marchand 
165353324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
165453324971SDavid Marchand 	       "%s\n",
165553324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
165653324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
165753324971SDavid Marchand 	       "%-"PRIu64"\n"
165853324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
165953324971SDavid Marchand 	       "%-"PRIu64"\n",
166053324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
166153324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
166253324971SDavid Marchand 	if (total_rx_nombuf > 0)
166353324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
166453324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
166553324971SDavid Marchand 	       "%s\n",
166653324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
166753324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
166853324971SDavid Marchand 	if (total_recv > 0)
166953324971SDavid Marchand 		printf("\n  CPU cycles/packet=%u (total cycles="
167053324971SDavid Marchand 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
167153324971SDavid Marchand 		       (unsigned int)(fwd_cycles / total_recv),
167253324971SDavid Marchand 		       fwd_cycles, total_recv);
167353324971SDavid Marchand #endif
167453324971SDavid Marchand }
167553324971SDavid Marchand 
167653324971SDavid Marchand void
167753324971SDavid Marchand fwd_stats_reset(void)
167853324971SDavid Marchand {
167953324971SDavid Marchand 	streamid_t sm_id;
168053324971SDavid Marchand 	portid_t pt_id;
168153324971SDavid Marchand 	int i;
168253324971SDavid Marchand 
168353324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
168453324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
168553324971SDavid Marchand 		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
168653324971SDavid Marchand 	}
168753324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
168853324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
168953324971SDavid Marchand 
169053324971SDavid Marchand 		fs->rx_packets = 0;
169153324971SDavid Marchand 		fs->tx_packets = 0;
169253324971SDavid Marchand 		fs->fwd_dropped = 0;
169353324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
169453324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
169553324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
169653324971SDavid Marchand 
169753324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
169853324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
169953324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
170053324971SDavid Marchand #endif
170153324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
170253324971SDavid Marchand 		fs->core_cycles = 0;
170353324971SDavid Marchand #endif
170453324971SDavid Marchand 	}
170553324971SDavid Marchand }
170653324971SDavid Marchand 
1707af75078fSIntel static void
17087741e4cfSIntel flush_fwd_rx_queues(void)
1709af75078fSIntel {
1710af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1711af75078fSIntel 	portid_t  rxp;
17127741e4cfSIntel 	portid_t port_id;
1713af75078fSIntel 	queueid_t rxq;
1714af75078fSIntel 	uint16_t  nb_rx;
1715af75078fSIntel 	uint16_t  i;
1716af75078fSIntel 	uint8_t   j;
1717f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1718594302c7SJames Poole 	uint64_t timer_period;
1719f487715fSReshma Pattan 
1720f487715fSReshma Pattan 	/* convert to number of cycles */
1721594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1722af75078fSIntel 
1723af75078fSIntel 	for (j = 0; j < 2; j++) {
17247741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1725af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
17267741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
1727f487715fSReshma Pattan 				/**
1728f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
1729f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
1730f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
1731f487715fSReshma Pattan 				* after 1sec timer expiry.
1732f487715fSReshma Pattan 				*/
1733f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
1734af75078fSIntel 				do {
17357741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1736013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
1737af75078fSIntel 					for (i = 0; i < nb_rx; i++)
1738af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
1739f487715fSReshma Pattan 
1740f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
1741f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
1742f487715fSReshma Pattan 					timer_tsc += diff_tsc;
1743f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
1744f487715fSReshma Pattan 					(timer_tsc < timer_period));
1745f487715fSReshma Pattan 				timer_tsc = 0;
1746af75078fSIntel 			}
1747af75078fSIntel 		}
1748af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1749af75078fSIntel 	}
1750af75078fSIntel }
1751af75078fSIntel 
1752af75078fSIntel static void
1753af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1754af75078fSIntel {
1755af75078fSIntel 	struct fwd_stream **fsm;
1756af75078fSIntel 	streamid_t nb_fs;
1757af75078fSIntel 	streamid_t sm_id;
17587e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
17597e4441c8SRemy Horton 	uint64_t tics_per_1sec;
17607e4441c8SRemy Horton 	uint64_t tics_datum;
17617e4441c8SRemy Horton 	uint64_t tics_current;
17624918a357SXiaoyun Li 	uint16_t i, cnt_ports;
1763af75078fSIntel 
17644918a357SXiaoyun Li 	cnt_ports = nb_ports;
17657e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
17667e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
17677e4441c8SRemy Horton #endif
1768af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
1769af75078fSIntel 	nb_fs = fc->stream_nb;
1770af75078fSIntel 	do {
1771af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1772af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
17737e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
1774e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
1775e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
17767e4441c8SRemy Horton 			tics_current = rte_rdtsc();
17777e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
17787e4441c8SRemy Horton 				/* Periodic bitrate calculation */
17794918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
1780e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
17814918a357SXiaoyun Li 						ports_ids[i]);
17827e4441c8SRemy Horton 				tics_datum = tics_current;
17837e4441c8SRemy Horton 			}
1784e25e6c70SRemy Horton 		}
17857e4441c8SRemy Horton #endif
178662d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
178765eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
178865eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
178962d3216dSReshma Pattan 			rte_latencystats_update();
179062d3216dSReshma Pattan #endif
179162d3216dSReshma Pattan 
1792af75078fSIntel 	} while (! fc->stopped);
1793af75078fSIntel }
1794af75078fSIntel 
1795af75078fSIntel static int
1796af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
1797af75078fSIntel {
1798af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1799af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
1800af75078fSIntel 	return 0;
1801af75078fSIntel }
1802af75078fSIntel 
1803af75078fSIntel /*
1804af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1805af75078fSIntel  * Used to start communication flows in network loopback test configurations.
1806af75078fSIntel  */
1807af75078fSIntel static int
1808af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
1809af75078fSIntel {
1810af75078fSIntel 	struct fwd_lcore *fwd_lc;
1811af75078fSIntel 	struct fwd_lcore tmp_lcore;
1812af75078fSIntel 
1813af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1814af75078fSIntel 	tmp_lcore = *fwd_lc;
1815af75078fSIntel 	tmp_lcore.stopped = 1;
1816af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1817af75078fSIntel 	return 0;
1818af75078fSIntel }
1819af75078fSIntel 
1820af75078fSIntel /*
1821af75078fSIntel  * Launch packet forwarding:
1822af75078fSIntel  *     - Setup per-port forwarding context.
1823af75078fSIntel  *     - launch logical cores with their forwarding configuration.
1824af75078fSIntel  */
1825af75078fSIntel static void
1826af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1827af75078fSIntel {
1828af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1829af75078fSIntel 	unsigned int i;
1830af75078fSIntel 	unsigned int lc_id;
1831af75078fSIntel 	int diag;
1832af75078fSIntel 
1833af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1834af75078fSIntel 	if (port_fwd_begin != NULL) {
1835af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1836af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
1837af75078fSIntel 	}
1838af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1839af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
1840af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1841af75078fSIntel 			fwd_lcores[i]->stopped = 0;
1842af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1843af75078fSIntel 						     fwd_lcores[i], lc_id);
1844af75078fSIntel 			if (diag != 0)
1845af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
1846af75078fSIntel 				       lc_id, diag);
1847af75078fSIntel 		}
1848af75078fSIntel 	}
1849af75078fSIntel }
1850af75078fSIntel 
1851af75078fSIntel /*
1852af75078fSIntel  * Launch packet forwarding configuration.
1853af75078fSIntel  */
1854af75078fSIntel void
1855af75078fSIntel start_packet_forwarding(int with_tx_first)
1856af75078fSIntel {
1857af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1858af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1859af75078fSIntel 	struct rte_port *port;
1860af75078fSIntel 	unsigned int i;
1861af75078fSIntel 	portid_t   pt_id;
1862af75078fSIntel 
18635a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
18645a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
18655a8fb55cSReshma Pattan 
18665a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
18675a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
18685a8fb55cSReshma Pattan 
18695a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
18705a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
18715a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
18725a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
18735a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
18745a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
18755a8fb55cSReshma Pattan 
1876ce8d5614SIntel 	if (all_ports_started() == 0) {
1877ce8d5614SIntel 		printf("Not all ports were started\n");
1878ce8d5614SIntel 		return;
1879ce8d5614SIntel 	}
1880af75078fSIntel 	if (test_done == 0) {
1881af75078fSIntel 		printf("Packet forwarding already started\n");
1882af75078fSIntel 		return;
1883af75078fSIntel 	}
1884edf87b4aSBernard Iremonger 
1885edf87b4aSBernard Iremonger 
18867741e4cfSIntel 	if(dcb_test) {
18877741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
18887741e4cfSIntel 			pt_id = fwd_ports_ids[i];
18897741e4cfSIntel 			port = &ports[pt_id];
18907741e4cfSIntel 			if (!port->dcb_flag) {
18917741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
18927741e4cfSIntel                                        "be configured in this mode.\n");
1893013af9b6SIntel 				return;
1894013af9b6SIntel 			}
18957741e4cfSIntel 		}
18967741e4cfSIntel 		if (nb_fwd_lcores == 1) {
18977741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
18987741e4cfSIntel                                "should be larger than 1.\n");
18997741e4cfSIntel 			return;
19007741e4cfSIntel 		}
19017741e4cfSIntel 	}
1902af75078fSIntel 	test_done = 0;
19037741e4cfSIntel 
190447a767b2SMatan Azrad 	fwd_config_setup();
190547a767b2SMatan Azrad 
19067741e4cfSIntel 	if(!no_flush_rx)
19077741e4cfSIntel 		flush_fwd_rx_queues();
19087741e4cfSIntel 
1909933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
1910af75078fSIntel 	rxtx_config_display();
1911af75078fSIntel 
191253324971SDavid Marchand 	fwd_stats_reset();
1913af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1914af75078fSIntel 		pt_id = fwd_ports_ids[i];
1915af75078fSIntel 		port = &ports[pt_id];
1916013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
1917af75078fSIntel 	}
1918af75078fSIntel 	if (with_tx_first) {
1919af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1920af75078fSIntel 		if (port_fwd_begin != NULL) {
1921af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1922af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
1923af75078fSIntel 		}
1924acbf77a6SZhihong Wang 		while (with_tx_first--) {
1925acbf77a6SZhihong Wang 			launch_packet_forwarding(
1926acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
1927af75078fSIntel 			rte_eal_mp_wait_lcore();
1928acbf77a6SZhihong Wang 		}
1929af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
1930af75078fSIntel 		if (port_fwd_end != NULL) {
1931af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1932af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
1933af75078fSIntel 		}
1934af75078fSIntel 	}
1935af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
1936af75078fSIntel }
1937af75078fSIntel 
1938af75078fSIntel void
1939af75078fSIntel stop_packet_forwarding(void)
1940af75078fSIntel {
1941af75078fSIntel 	port_fwd_end_t port_fwd_end;
1942af75078fSIntel 	lcoreid_t lc_id;
194353324971SDavid Marchand 	portid_t pt_id;
194453324971SDavid Marchand 	int i;
1945af75078fSIntel 
1946af75078fSIntel 	if (test_done) {
1947af75078fSIntel 		printf("Packet forwarding not started\n");
1948af75078fSIntel 		return;
1949af75078fSIntel 	}
1950af75078fSIntel 	printf("Telling cores to stop...");
1951af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1952af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1953af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1954af75078fSIntel 	rte_eal_mp_wait_lcore();
1955af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1956af75078fSIntel 	if (port_fwd_end != NULL) {
1957af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1958af75078fSIntel 			pt_id = fwd_ports_ids[i];
1959af75078fSIntel 			(*port_fwd_end)(pt_id);
1960af75078fSIntel 		}
1961af75078fSIntel 	}
1962c185d42cSDavid Marchand 
196353324971SDavid Marchand 	fwd_stats_display();
196458d475b7SJerin Jacob 
1965af75078fSIntel 	printf("\nDone.\n");
1966af75078fSIntel 	test_done = 1;
1967af75078fSIntel }
1968af75078fSIntel 
1969cfae07fdSOuyang Changchun void
1970cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
1971cfae07fdSOuyang Changchun {
1972492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
1973cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
1974cfae07fdSOuyang Changchun }
1975cfae07fdSOuyang Changchun 
1976cfae07fdSOuyang Changchun void
1977cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
1978cfae07fdSOuyang Changchun {
1979492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
1980cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
1981cfae07fdSOuyang Changchun }
1982cfae07fdSOuyang Changchun 
1983ce8d5614SIntel static int
1984ce8d5614SIntel all_ports_started(void)
1985ce8d5614SIntel {
1986ce8d5614SIntel 	portid_t pi;
1987ce8d5614SIntel 	struct rte_port *port;
1988ce8d5614SIntel 
19897d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1990ce8d5614SIntel 		port = &ports[pi];
1991ce8d5614SIntel 		/* Check if there is a port which is not started */
199241b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
199341b05095SBernard Iremonger 			(port->slave_flag == 0))
1994ce8d5614SIntel 			return 0;
1995ce8d5614SIntel 	}
1996ce8d5614SIntel 
1997ce8d5614SIntel 	/* No port is not started */
1998ce8d5614SIntel 	return 1;
1999ce8d5614SIntel }
2000ce8d5614SIntel 
2001148f963fSBruce Richardson int
20026018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
20036018eb8cSShahaf Shuler {
20046018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
20056018eb8cSShahaf Shuler 
20066018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
20076018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
20086018eb8cSShahaf Shuler 		return 0;
20096018eb8cSShahaf Shuler 	return 1;
20106018eb8cSShahaf Shuler }
20116018eb8cSShahaf Shuler 
20126018eb8cSShahaf Shuler int
2013edab33b1STetsuya Mukawa all_ports_stopped(void)
2014edab33b1STetsuya Mukawa {
2015edab33b1STetsuya Mukawa 	portid_t pi;
2016edab33b1STetsuya Mukawa 
20177d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
20186018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2019edab33b1STetsuya Mukawa 			return 0;
2020edab33b1STetsuya Mukawa 	}
2021edab33b1STetsuya Mukawa 
2022edab33b1STetsuya Mukawa 	return 1;
2023edab33b1STetsuya Mukawa }
2024edab33b1STetsuya Mukawa 
2025edab33b1STetsuya Mukawa int
2026edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2027edab33b1STetsuya Mukawa {
2028edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2029edab33b1STetsuya Mukawa 		return 0;
2030edab33b1STetsuya Mukawa 
2031edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2032edab33b1STetsuya Mukawa 		return 0;
2033edab33b1STetsuya Mukawa 
2034edab33b1STetsuya Mukawa 	return 1;
2035edab33b1STetsuya Mukawa }
2036edab33b1STetsuya Mukawa 
2037edab33b1STetsuya Mukawa int
2038ce8d5614SIntel start_port(portid_t pid)
2039ce8d5614SIntel {
204092d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
2041ce8d5614SIntel 	portid_t pi;
2042ce8d5614SIntel 	queueid_t qi;
2043ce8d5614SIntel 	struct rte_port *port;
20446d13ea8eSOlivier Matz 	struct rte_ether_addr mac_addr;
2045ce8d5614SIntel 
20464468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
20474468635fSMichael Qiu 		return 0;
20484468635fSMichael Qiu 
2049ce8d5614SIntel 	if(dcb_config)
2050ce8d5614SIntel 		dcb_test = 1;
20517d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2052edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2053ce8d5614SIntel 			continue;
2054ce8d5614SIntel 
205592d2703eSMichael Qiu 		need_check_link_status = 0;
2056ce8d5614SIntel 		port = &ports[pi];
2057ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2058ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
2059ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2060ce8d5614SIntel 			continue;
2061ce8d5614SIntel 		}
2062ce8d5614SIntel 
2063ce8d5614SIntel 		if (port->need_reconfig > 0) {
2064ce8d5614SIntel 			port->need_reconfig = 0;
2065ce8d5614SIntel 
20667ee3e944SVasily Philipov 			if (flow_isolate_all) {
20677ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
20687ee3e944SVasily Philipov 				if (ret) {
20697ee3e944SVasily Philipov 					printf("Failed to apply isolated"
20707ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
20717ee3e944SVasily Philipov 					return -1;
20727ee3e944SVasily Philipov 				}
20737ee3e944SVasily Philipov 			}
2074b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
20755706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
207620a0286fSLiu Xiaofeng 					port->socket_id);
2077ce8d5614SIntel 			/* configure port */
2078ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
2079ce8d5614SIntel 						&(port->dev_conf));
2080ce8d5614SIntel 			if (diag != 0) {
2081ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2082ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2083ce8d5614SIntel 					printf("Port %d can not be set back "
2084ce8d5614SIntel 							"to stopped\n", pi);
2085ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
2086ce8d5614SIntel 				/* try to reconfigure port next time */
2087ce8d5614SIntel 				port->need_reconfig = 1;
2088148f963fSBruce Richardson 				return -1;
2089ce8d5614SIntel 			}
2090ce8d5614SIntel 		}
2091ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
2092ce8d5614SIntel 			port->need_reconfig_queues = 0;
2093ce8d5614SIntel 			/* setup tx queues */
2094ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
2095b6ea6408SIntel 				if ((numa_support) &&
2096b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2097b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2098d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2099d44f8a48SQi Zhang 						txring_numa[pi],
2100d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2101b6ea6408SIntel 				else
2102b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2103d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2104d44f8a48SQi Zhang 						port->socket_id,
2105d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2106b6ea6408SIntel 
2107ce8d5614SIntel 				if (diag == 0)
2108ce8d5614SIntel 					continue;
2109ce8d5614SIntel 
2110ce8d5614SIntel 				/* Fail to setup tx queue, return */
2111ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2112ce8d5614SIntel 							RTE_PORT_HANDLING,
2113ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2114ce8d5614SIntel 					printf("Port %d can not be set back "
2115ce8d5614SIntel 							"to stopped\n", pi);
2116d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
2117d44f8a48SQi Zhang 				       pi);
2118ce8d5614SIntel 				/* try to reconfigure queues next time */
2119ce8d5614SIntel 				port->need_reconfig_queues = 1;
2120148f963fSBruce Richardson 				return -1;
2121ce8d5614SIntel 			}
2122ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2123d44f8a48SQi Zhang 				/* setup rx queues */
2124b6ea6408SIntel 				if ((numa_support) &&
2125b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2126b6ea6408SIntel 					struct rte_mempool * mp =
2127b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
2128b6ea6408SIntel 					if (mp == NULL) {
2129b6ea6408SIntel 						printf("Failed to setup RX queue:"
2130b6ea6408SIntel 							"No mempool allocation"
2131b6ea6408SIntel 							" on the socket %d\n",
2132b6ea6408SIntel 							rxring_numa[pi]);
2133148f963fSBruce Richardson 						return -1;
2134b6ea6408SIntel 					}
2135b6ea6408SIntel 
2136b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2137d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2138d44f8a48SQi Zhang 					     rxring_numa[pi],
2139d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2140d44f8a48SQi Zhang 					     mp);
21411e1d6bddSBernard Iremonger 				} else {
21421e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
21431e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
21441e1d6bddSBernard Iremonger 					if (mp == NULL) {
21451e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
21461e1d6bddSBernard Iremonger 							"No mempool allocation"
21471e1d6bddSBernard Iremonger 							" on the socket %d\n",
21481e1d6bddSBernard Iremonger 							port->socket_id);
21491e1d6bddSBernard Iremonger 						return -1;
2150b6ea6408SIntel 					}
2151b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2152d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2153d44f8a48SQi Zhang 					     port->socket_id,
2154d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2155d44f8a48SQi Zhang 					     mp);
21561e1d6bddSBernard Iremonger 				}
2157ce8d5614SIntel 				if (diag == 0)
2158ce8d5614SIntel 					continue;
2159ce8d5614SIntel 
2160ce8d5614SIntel 				/* Fail to setup rx queue, return */
2161ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2162ce8d5614SIntel 							RTE_PORT_HANDLING,
2163ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2164ce8d5614SIntel 					printf("Port %d can not be set back "
2165ce8d5614SIntel 							"to stopped\n", pi);
2166d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2167d44f8a48SQi Zhang 				       pi);
2168ce8d5614SIntel 				/* try to reconfigure queues next time */
2169ce8d5614SIntel 				port->need_reconfig_queues = 1;
2170148f963fSBruce Richardson 				return -1;
2171ce8d5614SIntel 			}
2172ce8d5614SIntel 		}
2173b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
2174ce8d5614SIntel 		/* start port */
2175ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2176ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2177ce8d5614SIntel 
2178ce8d5614SIntel 			/* Fail to setup rx queue, return */
2179ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2180ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2181ce8d5614SIntel 				printf("Port %d can not be set back to "
2182ce8d5614SIntel 							"stopped\n", pi);
2183ce8d5614SIntel 			continue;
2184ce8d5614SIntel 		}
2185ce8d5614SIntel 
2186ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2187ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2188ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2189ce8d5614SIntel 
21902950a769SDeclan Doherty 		rte_eth_macaddr_get(pi, &mac_addr);
2191d8c89163SZijie Pan 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
21922950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
21932950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
21942950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2195d8c89163SZijie Pan 
2196ce8d5614SIntel 		/* at least one port started, need checking link status */
2197ce8d5614SIntel 		need_check_link_status = 1;
2198ce8d5614SIntel 	}
2199ce8d5614SIntel 
220092d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2201edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
220292d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2203ce8d5614SIntel 		printf("Please stop the ports first\n");
2204ce8d5614SIntel 
2205ce8d5614SIntel 	printf("Done\n");
2206148f963fSBruce Richardson 	return 0;
2207ce8d5614SIntel }
2208ce8d5614SIntel 
2209ce8d5614SIntel void
2210ce8d5614SIntel stop_port(portid_t pid)
2211ce8d5614SIntel {
2212ce8d5614SIntel 	portid_t pi;
2213ce8d5614SIntel 	struct rte_port *port;
2214ce8d5614SIntel 	int need_check_link_status = 0;
2215ce8d5614SIntel 
2216ce8d5614SIntel 	if (dcb_test) {
2217ce8d5614SIntel 		dcb_test = 0;
2218ce8d5614SIntel 		dcb_config = 0;
2219ce8d5614SIntel 	}
22204468635fSMichael Qiu 
22214468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
22224468635fSMichael Qiu 		return;
22234468635fSMichael Qiu 
2224ce8d5614SIntel 	printf("Stopping ports...\n");
2225ce8d5614SIntel 
22267d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
22274468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2228ce8d5614SIntel 			continue;
2229ce8d5614SIntel 
2230a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2231a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2232a8ef3e3aSBernard Iremonger 			continue;
2233a8ef3e3aSBernard Iremonger 		}
2234a8ef3e3aSBernard Iremonger 
22350e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
22360e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
22370e545d30SBernard Iremonger 			continue;
22380e545d30SBernard Iremonger 		}
22390e545d30SBernard Iremonger 
2240ce8d5614SIntel 		port = &ports[pi];
2241ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2242ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2243ce8d5614SIntel 			continue;
2244ce8d5614SIntel 
2245ce8d5614SIntel 		rte_eth_dev_stop(pi);
2246ce8d5614SIntel 
2247ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2248ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2249ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2250ce8d5614SIntel 		need_check_link_status = 1;
2251ce8d5614SIntel 	}
2252bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2253edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2254ce8d5614SIntel 
2255ce8d5614SIntel 	printf("Done\n");
2256ce8d5614SIntel }
2257ce8d5614SIntel 
2258ce6959bfSWisam Jaddo static void
22594f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
2260ce6959bfSWisam Jaddo {
22614f1de450SThomas Monjalon 	portid_t i;
22624f1de450SThomas Monjalon 	portid_t new_total = 0;
2263ce6959bfSWisam Jaddo 
22644f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
22654f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
22664f1de450SThomas Monjalon 			array[new_total] = array[i];
22674f1de450SThomas Monjalon 			new_total++;
2268ce6959bfSWisam Jaddo 		}
22694f1de450SThomas Monjalon 	*total = new_total;
22704f1de450SThomas Monjalon }
22714f1de450SThomas Monjalon 
22724f1de450SThomas Monjalon static void
22734f1de450SThomas Monjalon remove_invalid_ports(void)
22744f1de450SThomas Monjalon {
22754f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
22764f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
22774f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
2278ce6959bfSWisam Jaddo }
2279ce6959bfSWisam Jaddo 
2280ce8d5614SIntel void
2281ce8d5614SIntel close_port(portid_t pid)
2282ce8d5614SIntel {
2283ce8d5614SIntel 	portid_t pi;
2284ce8d5614SIntel 	struct rte_port *port;
2285ce8d5614SIntel 
22864468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
22874468635fSMichael Qiu 		return;
22884468635fSMichael Qiu 
2289ce8d5614SIntel 	printf("Closing ports...\n");
2290ce8d5614SIntel 
22917d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
22924468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2293ce8d5614SIntel 			continue;
2294ce8d5614SIntel 
2295a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2296a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2297a8ef3e3aSBernard Iremonger 			continue;
2298a8ef3e3aSBernard Iremonger 		}
2299a8ef3e3aSBernard Iremonger 
23000e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
23010e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
23020e545d30SBernard Iremonger 			continue;
23030e545d30SBernard Iremonger 		}
23040e545d30SBernard Iremonger 
2305ce8d5614SIntel 		port = &ports[pi];
2306ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2307d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2308d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2309d4e8ad64SMichael Qiu 			continue;
2310d4e8ad64SMichael Qiu 		}
2311d4e8ad64SMichael Qiu 
2312d4e8ad64SMichael Qiu 		if (rte_atomic16_cmpset(&(port->port_status),
2313ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2314ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2315ce8d5614SIntel 			continue;
2316ce8d5614SIntel 		}
2317ce8d5614SIntel 
2318938a184aSAdrien Mazarguil 		if (port->flow_list)
2319938a184aSAdrien Mazarguil 			port_flow_flush(pi);
2320ce8d5614SIntel 		rte_eth_dev_close(pi);
2321ce8d5614SIntel 
23224f1de450SThomas Monjalon 		remove_invalid_ports();
232323ea57a2SThomas Monjalon 
2324ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2325ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2326b38bb262SPablo de Lara 			printf("Port %d cannot be set to closed\n", pi);
2327ce8d5614SIntel 	}
2328ce8d5614SIntel 
2329ce8d5614SIntel 	printf("Done\n");
2330ce8d5614SIntel }
2331ce8d5614SIntel 
2332edab33b1STetsuya Mukawa void
233397f1e196SWei Dai reset_port(portid_t pid)
233497f1e196SWei Dai {
233597f1e196SWei Dai 	int diag;
233697f1e196SWei Dai 	portid_t pi;
233797f1e196SWei Dai 	struct rte_port *port;
233897f1e196SWei Dai 
233997f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
234097f1e196SWei Dai 		return;
234197f1e196SWei Dai 
234297f1e196SWei Dai 	printf("Resetting ports...\n");
234397f1e196SWei Dai 
234497f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
234597f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
234697f1e196SWei Dai 			continue;
234797f1e196SWei Dai 
234897f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
234997f1e196SWei Dai 			printf("Please remove port %d from forwarding "
235097f1e196SWei Dai 			       "configuration.\n", pi);
235197f1e196SWei Dai 			continue;
235297f1e196SWei Dai 		}
235397f1e196SWei Dai 
235497f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
235597f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
235697f1e196SWei Dai 			       pi);
235797f1e196SWei Dai 			continue;
235897f1e196SWei Dai 		}
235997f1e196SWei Dai 
236097f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
236197f1e196SWei Dai 		if (diag == 0) {
236297f1e196SWei Dai 			port = &ports[pi];
236397f1e196SWei Dai 			port->need_reconfig = 1;
236497f1e196SWei Dai 			port->need_reconfig_queues = 1;
236597f1e196SWei Dai 		} else {
236697f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
236797f1e196SWei Dai 		}
236897f1e196SWei Dai 	}
236997f1e196SWei Dai 
237097f1e196SWei Dai 	printf("Done\n");
237197f1e196SWei Dai }
237297f1e196SWei Dai 
237397f1e196SWei Dai void
2374edab33b1STetsuya Mukawa attach_port(char *identifier)
2375ce8d5614SIntel {
23764f1ed78eSThomas Monjalon 	portid_t pi;
2377c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
2378ce8d5614SIntel 
2379edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2380edab33b1STetsuya Mukawa 
2381edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2382edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2383edab33b1STetsuya Mukawa 		return;
2384ce8d5614SIntel 	}
2385ce8d5614SIntel 
238675b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
2387c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2388edab33b1STetsuya Mukawa 		return;
2389c9cce428SThomas Monjalon 	}
2390c9cce428SThomas Monjalon 
23914f1ed78eSThomas Monjalon 	/* first attach mode: event */
23924f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
23934f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
23944f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
23954f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
23964f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
23974f1ed78eSThomas Monjalon 				setup_attached_port(pi);
23984f1ed78eSThomas Monjalon 		return;
23994f1ed78eSThomas Monjalon 	}
24004f1ed78eSThomas Monjalon 
24014f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
240286fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
24034f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
240486fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
240586fa5de1SThomas Monjalon 			continue; /* port was already attached before */
2406c9cce428SThomas Monjalon 		setup_attached_port(pi);
2407c9cce428SThomas Monjalon 	}
240886fa5de1SThomas Monjalon }
2409c9cce428SThomas Monjalon 
2410c9cce428SThomas Monjalon static void
2411c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
2412c9cce428SThomas Monjalon {
2413c9cce428SThomas Monjalon 	unsigned int socket_id;
2414edab33b1STetsuya Mukawa 
2415931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
241629841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
2417931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
241829841336SPhil Yang 		socket_id = socket_ids[0];
2419931126baSBernard Iremonger 	reconfig(pi, socket_id);
2420edab33b1STetsuya Mukawa 	rte_eth_promiscuous_enable(pi);
2421edab33b1STetsuya Mukawa 
24224f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
24234f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
24244f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
24254f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
2426edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2427edab33b1STetsuya Mukawa 
2428edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2429edab33b1STetsuya Mukawa 	printf("Done\n");
2430edab33b1STetsuya Mukawa }
2431edab33b1STetsuya Mukawa 
2432edab33b1STetsuya Mukawa void
2433f8e5baa2SThomas Monjalon detach_port_device(portid_t port_id)
24345f4ec54fSChen Jing D(Mark) {
2435f8e5baa2SThomas Monjalon 	struct rte_device *dev;
2436f8e5baa2SThomas Monjalon 	portid_t sibling;
2437f8e5baa2SThomas Monjalon 
2438c9cce428SThomas Monjalon 	printf("Removing a device...\n");
24395f4ec54fSChen Jing D(Mark) 
2440f8e5baa2SThomas Monjalon 	dev = rte_eth_devices[port_id].device;
2441f8e5baa2SThomas Monjalon 	if (dev == NULL) {
2442f8e5baa2SThomas Monjalon 		printf("Device already removed\n");
2443f8e5baa2SThomas Monjalon 		return;
2444f8e5baa2SThomas Monjalon 	}
2445f8e5baa2SThomas Monjalon 
244623ea57a2SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
24473f4a8370SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
24483f4a8370SThomas Monjalon 			printf("Port not stopped\n");
2449edab33b1STetsuya Mukawa 			return;
2450edab33b1STetsuya Mukawa 		}
24513f4a8370SThomas Monjalon 		printf("Port was not closed\n");
2452938a184aSAdrien Mazarguil 		if (ports[port_id].flow_list)
2453938a184aSAdrien Mazarguil 			port_flow_flush(port_id);
24543f4a8370SThomas Monjalon 	}
2455938a184aSAdrien Mazarguil 
245675b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
2457f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2458edab33b1STetsuya Mukawa 		return;
24593070419eSGaetan Rivet 	}
24607ca262b8SViacheslav Ovsiienko 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2461f8e5baa2SThomas Monjalon 		/* reset mapping between old ports and removed device */
2462f8e5baa2SThomas Monjalon 		rte_eth_devices[sibling].device = NULL;
2463f8e5baa2SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2464f8e5baa2SThomas Monjalon 			/* sibling ports are forced to be closed */
2465f8e5baa2SThomas Monjalon 			ports[sibling].port_status = RTE_PORT_CLOSED;
2466f8e5baa2SThomas Monjalon 			printf("Port %u is closed\n", sibling);
2467f8e5baa2SThomas Monjalon 		}
2468f8e5baa2SThomas Monjalon 	}
2469f8e5baa2SThomas Monjalon 
24704f1de450SThomas Monjalon 	remove_invalid_ports();
247103ce2c53SMatan Azrad 
2472f8e5baa2SThomas Monjalon 	printf("Device of port %u is detached\n", port_id);
2473f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
2474edab33b1STetsuya Mukawa 	printf("Done\n");
2475edab33b1STetsuya Mukawa 	return;
24765f4ec54fSChen Jing D(Mark) }
24775f4ec54fSChen Jing D(Mark) 
2478af75078fSIntel void
2479*55e51c96SNithin Dabilpuram detach_device(char *identifier)
2480*55e51c96SNithin Dabilpuram {
2481*55e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
2482*55e51c96SNithin Dabilpuram 	struct rte_devargs da;
2483*55e51c96SNithin Dabilpuram 	portid_t port_id;
2484*55e51c96SNithin Dabilpuram 
2485*55e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
2486*55e51c96SNithin Dabilpuram 
2487*55e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
2488*55e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
2489*55e51c96SNithin Dabilpuram 		printf("cannot parse identifier\n");
2490*55e51c96SNithin Dabilpuram 		if (da.args)
2491*55e51c96SNithin Dabilpuram 			free(da.args);
2492*55e51c96SNithin Dabilpuram 		return;
2493*55e51c96SNithin Dabilpuram 	}
2494*55e51c96SNithin Dabilpuram 
2495*55e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
2496*55e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2497*55e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2498*55e51c96SNithin Dabilpuram 				printf("Port %u not stopped\n", port_id);
2499*55e51c96SNithin Dabilpuram 				return;
2500*55e51c96SNithin Dabilpuram 			}
2501*55e51c96SNithin Dabilpuram 
2502*55e51c96SNithin Dabilpuram 			/* sibling ports are forced to be closed */
2503*55e51c96SNithin Dabilpuram 			if (ports[port_id].flow_list)
2504*55e51c96SNithin Dabilpuram 				port_flow_flush(port_id);
2505*55e51c96SNithin Dabilpuram 			ports[port_id].port_status = RTE_PORT_CLOSED;
2506*55e51c96SNithin Dabilpuram 			printf("Port %u is now closed\n", port_id);
2507*55e51c96SNithin Dabilpuram 		}
2508*55e51c96SNithin Dabilpuram 	}
2509*55e51c96SNithin Dabilpuram 
2510*55e51c96SNithin Dabilpuram 	if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
2511*55e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
2512*55e51c96SNithin Dabilpuram 			    da.name, da.bus->name);
2513*55e51c96SNithin Dabilpuram 		return;
2514*55e51c96SNithin Dabilpuram 	}
2515*55e51c96SNithin Dabilpuram 
2516*55e51c96SNithin Dabilpuram 	remove_invalid_ports();
2517*55e51c96SNithin Dabilpuram 
2518*55e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
2519*55e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
2520*55e51c96SNithin Dabilpuram 	printf("Done\n");
2521*55e51c96SNithin Dabilpuram }
2522*55e51c96SNithin Dabilpuram 
2523*55e51c96SNithin Dabilpuram void
2524af75078fSIntel pmd_test_exit(void)
2525af75078fSIntel {
2526af75078fSIntel 	portid_t pt_id;
2527fb73e096SJeff Guo 	int ret;
2528401b744dSShahaf Shuler 	int i;
2529af75078fSIntel 
25308210ec25SPablo de Lara 	if (test_done == 0)
25318210ec25SPablo de Lara 		stop_packet_forwarding();
25328210ec25SPablo de Lara 
25333a0968c8SShahaf Shuler 	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
25343a0968c8SShahaf Shuler 		if (mempools[i]) {
25353a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
25363a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
25373a0968c8SShahaf Shuler 						     NULL);
25383a0968c8SShahaf Shuler 		}
25393a0968c8SShahaf Shuler 	}
2540d3a274ceSZhihong Wang 	if (ports != NULL) {
2541d3a274ceSZhihong Wang 		no_link_check = 1;
25427d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
254308fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
2544af75078fSIntel 			fflush(stdout);
2545d3a274ceSZhihong Wang 			stop_port(pt_id);
254608fd782bSCristian Dumitrescu 		}
254708fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
254808fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
254908fd782bSCristian Dumitrescu 			fflush(stdout);
2550d3a274ceSZhihong Wang 			close_port(pt_id);
2551af75078fSIntel 		}
2552d3a274ceSZhihong Wang 	}
2553fb73e096SJeff Guo 
2554fb73e096SJeff Guo 	if (hot_plug) {
2555fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
25562049c511SJeff Guo 		if (ret) {
2557fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2558fb73e096SJeff Guo 				"fail to stop device event monitor.");
25592049c511SJeff Guo 			return;
25602049c511SJeff Guo 		}
2561fb73e096SJeff Guo 
25622049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
2563cc1bf307SJeff Guo 			dev_event_callback, NULL);
25642049c511SJeff Guo 		if (ret < 0) {
2565fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
25662049c511SJeff Guo 				"fail to unregister device event callback.\n");
25672049c511SJeff Guo 			return;
25682049c511SJeff Guo 		}
25692049c511SJeff Guo 
25702049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
25712049c511SJeff Guo 		if (ret) {
25722049c511SJeff Guo 			RTE_LOG(ERR, EAL,
25732049c511SJeff Guo 				"fail to disable hotplug handling.\n");
25742049c511SJeff Guo 			return;
25752049c511SJeff Guo 		}
2576fb73e096SJeff Guo 	}
2577401b744dSShahaf Shuler 	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2578401b744dSShahaf Shuler 		if (mempools[i])
2579401b744dSShahaf Shuler 			rte_mempool_free(mempools[i]);
2580401b744dSShahaf Shuler 	}
2581fb73e096SJeff Guo 
2582d3a274ceSZhihong Wang 	printf("\nBye...\n");
2583af75078fSIntel }
2584af75078fSIntel 
2585af75078fSIntel typedef void (*cmd_func_t)(void);
2586af75078fSIntel struct pmd_test_command {
2587af75078fSIntel 	const char *cmd_name;
2588af75078fSIntel 	cmd_func_t cmd_func;
2589af75078fSIntel };
2590af75078fSIntel 
2591af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2592af75078fSIntel 
2593ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
2594af75078fSIntel static void
2595edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
2596af75078fSIntel {
2597ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
2598ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2599f8244c63SZhiyong Yang 	portid_t portid;
2600f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
2601ce8d5614SIntel 	struct rte_eth_link link;
2602ce8d5614SIntel 
2603ce8d5614SIntel 	printf("Checking link statuses...\n");
2604ce8d5614SIntel 	fflush(stdout);
2605ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2606ce8d5614SIntel 		all_ports_up = 1;
26077d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
2608ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
2609ce8d5614SIntel 				continue;
2610ce8d5614SIntel 			memset(&link, 0, sizeof(link));
2611ce8d5614SIntel 			rte_eth_link_get_nowait(portid, &link);
2612ce8d5614SIntel 			/* print link status if flag set */
2613ce8d5614SIntel 			if (print_flag == 1) {
2614ce8d5614SIntel 				if (link.link_status)
2615f8244c63SZhiyong Yang 					printf(
2616f8244c63SZhiyong Yang 					"Port%d Link Up. speed %u Mbps- %s\n",
2617f8244c63SZhiyong Yang 					portid, link.link_speed,
2618ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2619ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
2620ce8d5614SIntel 				else
2621f8244c63SZhiyong Yang 					printf("Port %d Link Down\n", portid);
2622ce8d5614SIntel 				continue;
2623ce8d5614SIntel 			}
2624ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
262509419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
2626ce8d5614SIntel 				all_ports_up = 0;
2627ce8d5614SIntel 				break;
2628ce8d5614SIntel 			}
2629ce8d5614SIntel 		}
2630ce8d5614SIntel 		/* after finally printing all link status, get out */
2631ce8d5614SIntel 		if (print_flag == 1)
2632ce8d5614SIntel 			break;
2633ce8d5614SIntel 
2634ce8d5614SIntel 		if (all_ports_up == 0) {
2635ce8d5614SIntel 			fflush(stdout);
2636ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
2637ce8d5614SIntel 		}
2638ce8d5614SIntel 
2639ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
2640ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2641ce8d5614SIntel 			print_flag = 1;
2642ce8d5614SIntel 		}
26438ea656f8SGaetan Rivet 
26448ea656f8SGaetan Rivet 		if (lsc_interrupt)
26458ea656f8SGaetan Rivet 			break;
2646ce8d5614SIntel 	}
2647af75078fSIntel }
2648af75078fSIntel 
2649cc1bf307SJeff Guo /*
2650cc1bf307SJeff Guo  * This callback is for remove a port for a device. It has limitation because
2651cc1bf307SJeff Guo  * it is not for multiple port removal for a device.
2652cc1bf307SJeff Guo  * TODO: the device detach invoke will plan to be removed from user side to
2653cc1bf307SJeff Guo  * eal. And convert all PMDs to free port resources on ether device closing.
2654cc1bf307SJeff Guo  */
2655284c908cSGaetan Rivet static void
2656cc1bf307SJeff Guo rmv_port_callback(void *arg)
2657284c908cSGaetan Rivet {
26583b97888aSMatan Azrad 	int need_to_start = 0;
26590da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
266028caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
2661284c908cSGaetan Rivet 
2662284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2663284c908cSGaetan Rivet 
26643b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
26653b97888aSMatan Azrad 		need_to_start = 1;
26663b97888aSMatan Azrad 		stop_packet_forwarding();
26673b97888aSMatan Azrad 	}
26680da2a62bSMatan Azrad 	no_link_check = 1;
2669284c908cSGaetan Rivet 	stop_port(port_id);
26700da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
2671284c908cSGaetan Rivet 	close_port(port_id);
2672f8e5baa2SThomas Monjalon 	detach_port_device(port_id);
26733b97888aSMatan Azrad 	if (need_to_start)
26743b97888aSMatan Azrad 		start_packet_forwarding(0);
2675284c908cSGaetan Rivet }
2676284c908cSGaetan Rivet 
267776ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
2678d6af1a13SBernard Iremonger static int
2679f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2680d6af1a13SBernard Iremonger 		  void *ret_param)
268176ad4a2dSGaetan Rivet {
268276ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
2683d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
268476ad4a2dSGaetan Rivet 
268576ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
2686f431e010SHerakliusz Lipiec 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
268776ad4a2dSGaetan Rivet 			port_id, __func__, type);
268876ad4a2dSGaetan Rivet 		fflush(stderr);
26893af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2690f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
269197b5d8b5SThomas Monjalon 			eth_event_desc[type]);
269276ad4a2dSGaetan Rivet 		fflush(stdout);
269376ad4a2dSGaetan Rivet 	}
2694284c908cSGaetan Rivet 
2695284c908cSGaetan Rivet 	switch (type) {
26964f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
26974f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
26984f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
26994f1ed78eSThomas Monjalon 		break;
2700284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
27014f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
27024f1ed78eSThomas Monjalon 			break;
2703284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
2704cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
2705284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
2706284c908cSGaetan Rivet 		break;
2707284c908cSGaetan Rivet 	default:
2708284c908cSGaetan Rivet 		break;
2709284c908cSGaetan Rivet 	}
2710d6af1a13SBernard Iremonger 	return 0;
271176ad4a2dSGaetan Rivet }
271276ad4a2dSGaetan Rivet 
271397b5d8b5SThomas Monjalon static int
271497b5d8b5SThomas Monjalon register_eth_event_callback(void)
271597b5d8b5SThomas Monjalon {
271697b5d8b5SThomas Monjalon 	int ret;
271797b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
271897b5d8b5SThomas Monjalon 
271997b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
272097b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
272197b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
272297b5d8b5SThomas Monjalon 				event,
272397b5d8b5SThomas Monjalon 				eth_event_callback,
272497b5d8b5SThomas Monjalon 				NULL);
272597b5d8b5SThomas Monjalon 		if (ret != 0) {
272697b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
272797b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
272897b5d8b5SThomas Monjalon 			return -1;
272997b5d8b5SThomas Monjalon 		}
273097b5d8b5SThomas Monjalon 	}
273197b5d8b5SThomas Monjalon 
273297b5d8b5SThomas Monjalon 	return 0;
273397b5d8b5SThomas Monjalon }
273497b5d8b5SThomas Monjalon 
2735fb73e096SJeff Guo /* This function is used by the interrupt thread */
2736fb73e096SJeff Guo static void
2737cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2738fb73e096SJeff Guo 			     __rte_unused void *arg)
2739fb73e096SJeff Guo {
27402049c511SJeff Guo 	uint16_t port_id;
27412049c511SJeff Guo 	int ret;
27422049c511SJeff Guo 
2743fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
2744fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
2745fb73e096SJeff Guo 			__func__, type);
2746fb73e096SJeff Guo 		fflush(stderr);
2747fb73e096SJeff Guo 	}
2748fb73e096SJeff Guo 
2749fb73e096SJeff Guo 	switch (type) {
2750fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
2751cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
2752fb73e096SJeff Guo 			device_name);
27532049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
27542049c511SJeff Guo 		if (ret) {
27552049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
27562049c511SJeff Guo 				device_name);
27572049c511SJeff Guo 			return;
27582049c511SJeff Guo 		}
2759cc1bf307SJeff Guo 		/*
2760cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
2761cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
2762cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
2763cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
2764cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
2765cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
2766cc1bf307SJeff Guo 		 * be deleted.
2767cc1bf307SJeff Guo 		 */
2768cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
2769cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
2770cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
2771cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
2772fb73e096SJeff Guo 		break;
2773fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
2774fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2775fb73e096SJeff Guo 			device_name);
2776fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
2777fb73e096SJeff Guo 		 * begin to attach port.
2778fb73e096SJeff Guo 		 */
2779fb73e096SJeff Guo 		break;
2780fb73e096SJeff Guo 	default:
2781fb73e096SJeff Guo 		break;
2782fb73e096SJeff Guo 	}
2783fb73e096SJeff Guo }
2784fb73e096SJeff Guo 
2785013af9b6SIntel static int
278628caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2787af75078fSIntel {
2788013af9b6SIntel 	uint16_t i;
2789af75078fSIntel 	int diag;
2790013af9b6SIntel 	uint8_t mapping_found = 0;
2791af75078fSIntel 
2792013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2793013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2794013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2795013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2796013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
2797013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
2798013af9b6SIntel 			if (diag != 0)
2799013af9b6SIntel 				return diag;
2800013af9b6SIntel 			mapping_found = 1;
2801af75078fSIntel 		}
2802013af9b6SIntel 	}
2803013af9b6SIntel 	if (mapping_found)
2804013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
2805013af9b6SIntel 	return 0;
2806013af9b6SIntel }
2807013af9b6SIntel 
2808013af9b6SIntel static int
280928caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2810013af9b6SIntel {
2811013af9b6SIntel 	uint16_t i;
2812013af9b6SIntel 	int diag;
2813013af9b6SIntel 	uint8_t mapping_found = 0;
2814013af9b6SIntel 
2815013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2816013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2817013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2818013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2819013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
2820013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
2821013af9b6SIntel 			if (diag != 0)
2822013af9b6SIntel 				return diag;
2823013af9b6SIntel 			mapping_found = 1;
2824013af9b6SIntel 		}
2825013af9b6SIntel 	}
2826013af9b6SIntel 	if (mapping_found)
2827013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
2828013af9b6SIntel 	return 0;
2829013af9b6SIntel }
2830013af9b6SIntel 
2831013af9b6SIntel static void
283228caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2833013af9b6SIntel {
2834013af9b6SIntel 	int diag = 0;
2835013af9b6SIntel 
2836013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2837af75078fSIntel 	if (diag != 0) {
2838013af9b6SIntel 		if (diag == -ENOTSUP) {
2839013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
2840013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2841013af9b6SIntel 		}
2842013af9b6SIntel 		else
2843013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2844013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
2845013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2846af75078fSIntel 					pi, diag);
2847af75078fSIntel 	}
2848013af9b6SIntel 
2849013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2850af75078fSIntel 	if (diag != 0) {
2851013af9b6SIntel 		if (diag == -ENOTSUP) {
2852013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
2853013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2854013af9b6SIntel 		}
2855013af9b6SIntel 		else
2856013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2857013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
2858013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2859af75078fSIntel 					pi, diag);
2860af75078fSIntel 	}
2861af75078fSIntel }
2862af75078fSIntel 
2863f2c5125aSPablo de Lara static void
2864f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
2865f2c5125aSPablo de Lara {
2866d44f8a48SQi Zhang 	uint16_t qid;
28675e91aeefSWei Zhao 	uint64_t offloads;
2868f2c5125aSPablo de Lara 
2869d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
28705e91aeefSWei Zhao 		offloads = port->rx_conf[qid].offloads;
2871d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2872575e0fd1SWei Zhao 		if (offloads != 0)
2873575e0fd1SWei Zhao 			port->rx_conf[qid].offloads = offloads;
2874d44f8a48SQi Zhang 
2875d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
2876f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2877d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2878f2c5125aSPablo de Lara 
2879f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2880d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2881f2c5125aSPablo de Lara 
2882f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2883d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2884f2c5125aSPablo de Lara 
2885f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2886d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2887f2c5125aSPablo de Lara 
2888f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2889d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2890f2c5125aSPablo de Lara 
2891d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
2892d44f8a48SQi Zhang 	}
2893d44f8a48SQi Zhang 
2894d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
28955e91aeefSWei Zhao 		offloads = port->tx_conf[qid].offloads;
2896d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
2897575e0fd1SWei Zhao 		if (offloads != 0)
2898575e0fd1SWei Zhao 			port->tx_conf[qid].offloads = offloads;
2899d44f8a48SQi Zhang 
2900d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
2901f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2902d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2903f2c5125aSPablo de Lara 
2904f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2905d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2906f2c5125aSPablo de Lara 
2907f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2908d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2909f2c5125aSPablo de Lara 
2910f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2911d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2912f2c5125aSPablo de Lara 
2913f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2914d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2915d44f8a48SQi Zhang 
2916d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
2917d44f8a48SQi Zhang 	}
2918f2c5125aSPablo de Lara }
2919f2c5125aSPablo de Lara 
2920013af9b6SIntel void
2921013af9b6SIntel init_port_config(void)
2922013af9b6SIntel {
2923013af9b6SIntel 	portid_t pid;
2924013af9b6SIntel 	struct rte_port *port;
2925013af9b6SIntel 
29267d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
2927013af9b6SIntel 		port = &ports[pid];
2928013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
2929422515b9SAdrien Mazarguil 		rte_eth_dev_info_get(pid, &port->dev_info);
29303ce690d3SBruce Richardson 		if (nb_rxq > 1) {
2931013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
293290892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2933422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
2934af75078fSIntel 		} else {
2935013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2936013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2937af75078fSIntel 		}
29383ce690d3SBruce Richardson 
29395f592039SJingjing Wu 		if (port->dcb_flag == 0) {
29403ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
29413ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
29423ce690d3SBruce Richardson 			else
29433ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
29443ce690d3SBruce Richardson 		}
29453ce690d3SBruce Richardson 
2946f2c5125aSPablo de Lara 		rxtx_port_config(port);
2947013af9b6SIntel 
2948013af9b6SIntel 		rte_eth_macaddr_get(pid, &port->eth_addr);
2949013af9b6SIntel 
2950013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
295150c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2952e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
29537b7e5ba7SIntel #endif
29548ea656f8SGaetan Rivet 
29558ea656f8SGaetan Rivet 		if (lsc_interrupt &&
29568ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
29578ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
29588ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
2959284c908cSGaetan Rivet 		if (rmv_interrupt &&
2960284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
2961284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
2962284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
2963013af9b6SIntel 	}
2964013af9b6SIntel }
2965013af9b6SIntel 
296641b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
296741b05095SBernard Iremonger {
296841b05095SBernard Iremonger 	struct rte_port *port;
296941b05095SBernard Iremonger 
297041b05095SBernard Iremonger 	port = &ports[slave_pid];
297141b05095SBernard Iremonger 	port->slave_flag = 1;
297241b05095SBernard Iremonger }
297341b05095SBernard Iremonger 
297441b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
297541b05095SBernard Iremonger {
297641b05095SBernard Iremonger 	struct rte_port *port;
297741b05095SBernard Iremonger 
297841b05095SBernard Iremonger 	port = &ports[slave_pid];
297941b05095SBernard Iremonger 	port->slave_flag = 0;
298041b05095SBernard Iremonger }
298141b05095SBernard Iremonger 
29820e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
29830e545d30SBernard Iremonger {
29840e545d30SBernard Iremonger 	struct rte_port *port;
29850e545d30SBernard Iremonger 
29860e545d30SBernard Iremonger 	port = &ports[slave_pid];
2987b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2988b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2989b8b8b344SMatan Azrad 		return 1;
2990b8b8b344SMatan Azrad 	return 0;
29910e545d30SBernard Iremonger }
29920e545d30SBernard Iremonger 
2993013af9b6SIntel const uint16_t vlan_tags[] = {
2994013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
2995013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
2996013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
2997013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
2998013af9b6SIntel };
2999013af9b6SIntel 
3000013af9b6SIntel static  int
3001ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
30021a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
30031a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
30041a572499SJingjing Wu 		 uint8_t pfc_en)
3005013af9b6SIntel {
3006013af9b6SIntel 	uint8_t i;
3007ac7c491cSKonstantin Ananyev 	int32_t rc;
3008ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
3009af75078fSIntel 
3010af75078fSIntel 	/*
3011013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3012013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
3013af75078fSIntel 	 */
30141a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
30151a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
30161a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
30171a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
30181a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3019013af9b6SIntel 
3020547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
30211a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
30221a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
30231a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
30241a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
30251a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
30261a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3027013af9b6SIntel 
30281a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
30291a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
30301a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
30311a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
30321a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
3033af75078fSIntel 		}
3034013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3035f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3036f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3037013af9b6SIntel 		}
3038013af9b6SIntel 
3039013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
304032e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
304132e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
30421a572499SJingjing Wu 	} else {
30431a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
30441a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
30451a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
30461a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
3047013af9b6SIntel 
3048ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3049ac7c491cSKonstantin Ananyev 		if (rc != 0)
3050ac7c491cSKonstantin Ananyev 			return rc;
3051ac7c491cSKonstantin Ananyev 
30521a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
30531a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
30541a572499SJingjing Wu 
3055bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3056bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
3057bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
3058013af9b6SIntel 		}
3059ac7c491cSKonstantin Ananyev 
30601a572499SJingjing Wu 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
3061ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
306232e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
30631a572499SJingjing Wu 	}
30641a572499SJingjing Wu 
30651a572499SJingjing Wu 	if (pfc_en)
30661a572499SJingjing Wu 		eth_conf->dcb_capability_en =
30671a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3068013af9b6SIntel 	else
3069013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3070013af9b6SIntel 
3071013af9b6SIntel 	return 0;
3072013af9b6SIntel }
3073013af9b6SIntel 
3074013af9b6SIntel int
30751a572499SJingjing Wu init_port_dcb_config(portid_t pid,
30761a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
30771a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
30781a572499SJingjing Wu 		     uint8_t pfc_en)
3079013af9b6SIntel {
3080013af9b6SIntel 	struct rte_eth_conf port_conf;
3081013af9b6SIntel 	struct rte_port *rte_port;
3082013af9b6SIntel 	int retval;
3083013af9b6SIntel 	uint16_t i;
3084013af9b6SIntel 
30852a977b89SWenzhuo Lu 	rte_port = &ports[pid];
3086013af9b6SIntel 
3087013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3088013af9b6SIntel 	/* Enter DCB configuration status */
3089013af9b6SIntel 	dcb_config = 1;
3090013af9b6SIntel 
3091d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
3092d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
3093d5354e89SYanglong Wu 
3094013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
3095ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3096013af9b6SIntel 	if (retval < 0)
3097013af9b6SIntel 		return retval;
30980074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3099013af9b6SIntel 
31002f203d44SQi Zhang 	/* re-configure the device . */
31012b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
31022b0e0ebaSChenbo Xia 	if (retval < 0)
31032b0e0ebaSChenbo Xia 		return retval;
31042a977b89SWenzhuo Lu 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
31052a977b89SWenzhuo Lu 
31062a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
31072a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
31082a977b89SWenzhuo Lu 	 */
31092a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
31102a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
31112a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
31122a977b89SWenzhuo Lu 			" for port %d.", pid);
31132a977b89SWenzhuo Lu 		return -1;
31142a977b89SWenzhuo Lu 	}
31152a977b89SWenzhuo Lu 
31162a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
31172a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
31182a977b89SWenzhuo Lu 	 */
31192a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
312086ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
312186ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
312286ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
312386ef65eeSBernard Iremonger 		} else {
31242a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
31252a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
312686ef65eeSBernard Iremonger 		}
31272a977b89SWenzhuo Lu 	} else {
31282a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
31292a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
31302a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
31312a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
31322a977b89SWenzhuo Lu 		} else {
31332a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
31342a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
31352a977b89SWenzhuo Lu 
31362a977b89SWenzhuo Lu 		}
31372a977b89SWenzhuo Lu 	}
31382a977b89SWenzhuo Lu 	rx_free_thresh = 64;
31392a977b89SWenzhuo Lu 
3140013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3141013af9b6SIntel 
3142f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
3143013af9b6SIntel 	/* VLAN filter */
31440074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
31451a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
3146013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
3147013af9b6SIntel 
3148013af9b6SIntel 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
3149013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
3150013af9b6SIntel 
31517741e4cfSIntel 	rte_port->dcb_flag = 1;
31527741e4cfSIntel 
3153013af9b6SIntel 	return 0;
3154af75078fSIntel }
3155af75078fSIntel 
3156ffc468ffSTetsuya Mukawa static void
3157ffc468ffSTetsuya Mukawa init_port(void)
3158ffc468ffSTetsuya Mukawa {
3159ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
3160ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
3161ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3162ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
3163ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
3164ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
3165ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
3166ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
3167ffc468ffSTetsuya Mukawa 	}
316829841336SPhil Yang 
316929841336SPhil Yang 	/* Initialize ports NUMA structures */
317029841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
317129841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
317229841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3173ffc468ffSTetsuya Mukawa }
3174ffc468ffSTetsuya Mukawa 
3175d3a274ceSZhihong Wang static void
3176d3a274ceSZhihong Wang force_quit(void)
3177d3a274ceSZhihong Wang {
3178d3a274ceSZhihong Wang 	pmd_test_exit();
3179d3a274ceSZhihong Wang 	prompt_exit();
3180d3a274ceSZhihong Wang }
3181d3a274ceSZhihong Wang 
3182d3a274ceSZhihong Wang static void
3183cfea1f30SPablo de Lara print_stats(void)
3184cfea1f30SPablo de Lara {
3185cfea1f30SPablo de Lara 	uint8_t i;
3186cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3187cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3188cfea1f30SPablo de Lara 
3189cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
3190cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
3191cfea1f30SPablo de Lara 
3192cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
3193cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3194cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
3195683d1e82SIgor Romanov 
3196683d1e82SIgor Romanov 	fflush(stdout);
3197cfea1f30SPablo de Lara }
3198cfea1f30SPablo de Lara 
3199cfea1f30SPablo de Lara static void
3200d3a274ceSZhihong Wang signal_handler(int signum)
3201d3a274ceSZhihong Wang {
3202d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
3203d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
3204d3a274ceSZhihong Wang 				signum);
3205102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
3206102b7329SReshma Pattan 		/* uninitialize packet capture framework */
3207102b7329SReshma Pattan 		rte_pdump_uninit();
3208102b7329SReshma Pattan #endif
320962d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
321062d3216dSReshma Pattan 		rte_latencystats_uninit();
321162d3216dSReshma Pattan #endif
3212d3a274ceSZhihong Wang 		force_quit();
3213d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
3214d9a191a0SPhil Yang 		f_quit = 1;
3215d3a274ceSZhihong Wang 		/* exit with the expected status */
3216d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
3217d3a274ceSZhihong Wang 		kill(getpid(), signum);
3218d3a274ceSZhihong Wang 	}
3219d3a274ceSZhihong Wang }
3220d3a274ceSZhihong Wang 
3221af75078fSIntel int
3222af75078fSIntel main(int argc, char** argv)
3223af75078fSIntel {
3224af75078fSIntel 	int diag;
3225f8244c63SZhiyong Yang 	portid_t port_id;
32264918a357SXiaoyun Li 	uint16_t count;
3227fb73e096SJeff Guo 	int ret;
3228af75078fSIntel 
3229d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
3230d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
3231d3a274ceSZhihong Wang 
3232af75078fSIntel 	diag = rte_eal_init(argc, argv);
3233af75078fSIntel 	if (diag < 0)
3234af75078fSIntel 		rte_panic("Cannot init EAL\n");
3235af75078fSIntel 
3236285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
3237285fd101SOlivier Matz 	if (testpmd_logtype < 0)
3238285fd101SOlivier Matz 		rte_panic("Cannot register log type");
3239285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3240285fd101SOlivier Matz 
324197b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
324297b5d8b5SThomas Monjalon 	if (ret != 0)
324397b5d8b5SThomas Monjalon 		rte_panic("Cannot register for ethdev events");
324497b5d8b5SThomas Monjalon 
32454aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
32464aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
3247e9436f54STiwei Bie 	rte_pdump_init();
32484aa0d012SAnatoly Burakov #endif
32494aa0d012SAnatoly Burakov 
32504918a357SXiaoyun Li 	count = 0;
32514918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
32524918a357SXiaoyun Li 		ports_ids[count] = port_id;
32534918a357SXiaoyun Li 		count++;
32544918a357SXiaoyun Li 	}
32554918a357SXiaoyun Li 	nb_ports = (portid_t) count;
32564aa0d012SAnatoly Burakov 	if (nb_ports == 0)
32574aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
32584aa0d012SAnatoly Burakov 
32594aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
32604aa0d012SAnatoly Burakov 	init_port();
32614aa0d012SAnatoly Burakov 
32624aa0d012SAnatoly Burakov 	set_def_fwd_config();
32634aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
32644aa0d012SAnatoly Burakov 		rte_panic("Empty set of forwarding logical cores - check the "
32654aa0d012SAnatoly Burakov 			  "core mask supplied in the command parameters\n");
32664aa0d012SAnatoly Burakov 
3267e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
3268e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE
3269e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3270e505d84cSAnatoly Burakov #endif
3271e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
3272e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3273e505d84cSAnatoly Burakov #endif
3274e505d84cSAnatoly Burakov 
3275fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
32765fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
3277fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3278fb7b8b32SAnatoly Burakov #else
3279fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3280fb7b8b32SAnatoly Burakov #endif
3281fb7b8b32SAnatoly Burakov 
3282e505d84cSAnatoly Burakov 	argc -= diag;
3283e505d84cSAnatoly Burakov 	argv += diag;
3284e505d84cSAnatoly Burakov 	if (argc > 1)
3285e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3286e505d84cSAnatoly Burakov 
3287e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3288285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
32891c036b16SEelco Chaudron 			strerror(errno));
32901c036b16SEelco Chaudron 	}
32911c036b16SEelco Chaudron 
329299cabef0SPablo de Lara 	if (tx_first && interactive)
329399cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
329499cabef0SPablo de Lara 				"interactive mode.\n");
32958820cba4SDavid Hunt 
32968820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
32978820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
32988820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
32998820cba4SDavid Hunt 		lsc_interrupt = 0;
33008820cba4SDavid Hunt 	}
33018820cba4SDavid Hunt 
33025a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
33035a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
33045a8fb55cSReshma Pattan 
33055a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3306af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3307af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3308af75078fSIntel 		       nb_rxq, nb_txq);
3309af75078fSIntel 
3310af75078fSIntel 	init_config();
3311fb73e096SJeff Guo 
3312fb73e096SJeff Guo 	if (hot_plug) {
33132049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
3314fb73e096SJeff Guo 		if (ret) {
33152049c511SJeff Guo 			RTE_LOG(ERR, EAL,
33162049c511SJeff Guo 				"fail to enable hotplug handling.");
3317fb73e096SJeff Guo 			return -1;
3318fb73e096SJeff Guo 		}
3319fb73e096SJeff Guo 
33202049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
33212049c511SJeff Guo 		if (ret) {
33222049c511SJeff Guo 			RTE_LOG(ERR, EAL,
33232049c511SJeff Guo 				"fail to start device event monitoring.");
33242049c511SJeff Guo 			return -1;
33252049c511SJeff Guo 		}
33262049c511SJeff Guo 
33272049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
3328cc1bf307SJeff Guo 			dev_event_callback, NULL);
33292049c511SJeff Guo 		if (ret) {
33302049c511SJeff Guo 			RTE_LOG(ERR, EAL,
33312049c511SJeff Guo 				"fail  to register device event callback\n");
33322049c511SJeff Guo 			return -1;
33332049c511SJeff Guo 		}
3334fb73e096SJeff Guo 	}
3335fb73e096SJeff Guo 
3336148f963fSBruce Richardson 	if (start_port(RTE_PORT_ALL) != 0)
3337148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3338af75078fSIntel 
3339ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
33407d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(port_id)
3341ce8d5614SIntel 		rte_eth_promiscuous_enable(port_id);
3342af75078fSIntel 
33437e4441c8SRemy Horton 	/* Init metrics library */
33447e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
33457e4441c8SRemy Horton 
334662d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
334762d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
334862d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
334962d3216dSReshma Pattan 		if (ret)
335062d3216dSReshma Pattan 			printf("Warning: latencystats init()"
335162d3216dSReshma Pattan 				" returned error %d\n",	ret);
335262d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
335362d3216dSReshma Pattan 			latencystats_lcore_id);
335462d3216dSReshma Pattan 	}
335562d3216dSReshma Pattan #endif
335662d3216dSReshma Pattan 
33577e4441c8SRemy Horton 	/* Setup bitrate stats */
33587e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
3359e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
33607e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
33617e4441c8SRemy Horton 		if (bitrate_data == NULL)
3362e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3363e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
33647e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3365e25e6c70SRemy Horton 	}
33667e4441c8SRemy Horton #endif
33677e4441c8SRemy Horton 
33680d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
336981ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
337081ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
337181ef862bSAllain Legacy 
3372ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3373ca7feb22SCyril Chemparathy 		if (auto_start) {
3374ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3375ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3376ca7feb22SCyril Chemparathy 		}
3377af75078fSIntel 		prompt();
33780de738cfSJiayu Hu 		pmd_test_exit();
3379ca7feb22SCyril Chemparathy 	} else
33800d56cb81SThomas Monjalon #endif
33810d56cb81SThomas Monjalon 	{
3382af75078fSIntel 		char c;
3383af75078fSIntel 		int rc;
3384af75078fSIntel 
3385d9a191a0SPhil Yang 		f_quit = 0;
3386d9a191a0SPhil Yang 
3387af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
338899cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3389cfea1f30SPablo de Lara 		if (stats_period != 0) {
3390cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3391cfea1f30SPablo de Lara 			uint64_t timer_period;
3392cfea1f30SPablo de Lara 
3393cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3394cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3395cfea1f30SPablo de Lara 
3396d9a191a0SPhil Yang 			while (f_quit == 0) {
3397cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
3398cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
3399cfea1f30SPablo de Lara 
3400cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
3401cfea1f30SPablo de Lara 					print_stats();
3402cfea1f30SPablo de Lara 					/* Reset the timer */
3403cfea1f30SPablo de Lara 					diff_time = 0;
3404cfea1f30SPablo de Lara 				}
3405cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
3406cfea1f30SPablo de Lara 				prev_time = cur_time;
3407cfea1f30SPablo de Lara 				sleep(1);
3408cfea1f30SPablo de Lara 			}
3409cfea1f30SPablo de Lara 		}
3410cfea1f30SPablo de Lara 
3411af75078fSIntel 		printf("Press enter to exit\n");
3412af75078fSIntel 		rc = read(0, &c, 1);
3413d3a274ceSZhihong Wang 		pmd_test_exit();
3414af75078fSIntel 		if (rc < 0)
3415af75078fSIntel 			return 1;
3416af75078fSIntel 	}
3417af75078fSIntel 
3418af75078fSIntel 	return 0;
3419af75078fSIntel }
3420