xref: /dpdk/app/test-pmd/testpmd.c (revision 4940344dab1d4da95ec6cd9f4fa8ac1fbc61ba54)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30af75078fSIntel #include <rte_memory.h>
31af75078fSIntel #include <rte_memcpy.h>
32af75078fSIntel #include <rte_launch.h>
33af75078fSIntel #include <rte_eal.h>
34284c908cSGaetan Rivet #include <rte_alarm.h>
35af75078fSIntel #include <rte_per_lcore.h>
36af75078fSIntel #include <rte_lcore.h>
37af75078fSIntel #include <rte_atomic.h>
38af75078fSIntel #include <rte_branch_prediction.h>
39af75078fSIntel #include <rte_mempool.h>
40af75078fSIntel #include <rte_malloc.h>
41af75078fSIntel #include <rte_mbuf.h>
420e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
43af75078fSIntel #include <rte_interrupts.h>
44af75078fSIntel #include <rte_pci.h>
45af75078fSIntel #include <rte_ether.h>
46af75078fSIntel #include <rte_ethdev.h>
47edab33b1STetsuya Mukawa #include <rte_dev.h>
48af75078fSIntel #include <rte_string_fns.h>
49e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
50e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
51e261265eSRadu Nicolau #endif
52102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
53102b7329SReshma Pattan #include <rte_pdump.h>
54102b7329SReshma Pattan #endif
55938a184aSAdrien Mazarguil #include <rte_flow.h>
567e4441c8SRemy Horton #include <rte_metrics.h>
577e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
587e4441c8SRemy Horton #include <rte_bitrate.h>
597e4441c8SRemy Horton #endif
6062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6162d3216dSReshma Pattan #include <rte_latencystats.h>
6262d3216dSReshma Pattan #endif
63af75078fSIntel 
64af75078fSIntel #include "testpmd.h"
65af75078fSIntel 
66c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
67c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
69c7f5dba7SAnatoly Burakov #else
70c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
71c7f5dba7SAnatoly Burakov #endif
72c7f5dba7SAnatoly Burakov 
73c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
74c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
75c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
76c7f5dba7SAnatoly Burakov #else
77c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
78c7f5dba7SAnatoly Burakov #endif
79c7f5dba7SAnatoly Burakov 
80c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
8172512e18SViacheslav Ovsiienko #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
82c7f5dba7SAnatoly Burakov 
83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
85af75078fSIntel 
86af75078fSIntel /* use master core for command line ? */
87af75078fSIntel uint8_t interactive = 0;
88ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8999cabef0SPablo de Lara uint8_t tx_first;
9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
91af75078fSIntel 
92af75078fSIntel /*
93af75078fSIntel  * NUMA support configuration.
94af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
95af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
97af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
98af75078fSIntel  */
99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
100af75078fSIntel 
101af75078fSIntel /*
102b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103b6ea6408SIntel  * not configured.
104b6ea6408SIntel  */
105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
106b6ea6408SIntel 
107b6ea6408SIntel /*
108c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
109c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
110c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
111c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
112c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
113148f963fSBruce Richardson  */
114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115148f963fSBruce Richardson 
116148f963fSBruce Richardson /*
11763531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11863531389SGeorgios Katsikas  * is allocated.
11963531389SGeorgios Katsikas  */
12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12163531389SGeorgios Katsikas 
12263531389SGeorgios Katsikas /*
12363531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12463531389SGeorgios Katsikas  * is allocated.
12563531389SGeorgios Katsikas  */
12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12763531389SGeorgios Katsikas 
12863531389SGeorgios Katsikas /*
12963531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13063531389SGeorgios Katsikas  * is allocated.
13163531389SGeorgios Katsikas  */
13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13363531389SGeorgios Katsikas 
13463531389SGeorgios Katsikas /*
135af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
136af75078fSIntel  * forwarded.
137547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
138af75078fSIntel  * ports.
139af75078fSIntel  */
1406d13ea8eSOlivier Matz struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141af75078fSIntel portid_t nb_peer_eth_addrs = 0;
142af75078fSIntel 
143af75078fSIntel /*
144af75078fSIntel  * Probed Target Environment.
145af75078fSIntel  */
146af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
147af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150af75078fSIntel 
1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1524918a357SXiaoyun Li 
153af75078fSIntel /*
154af75078fSIntel  * Test Forwarding Configuration.
155af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157af75078fSIntel  */
158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162af75078fSIntel 
163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165af75078fSIntel 
166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168af75078fSIntel 
169af75078fSIntel /*
170af75078fSIntel  * Forwarding engines.
171af75078fSIntel  */
172af75078fSIntel struct fwd_engine * fwd_engines[] = {
173af75078fSIntel 	&io_fwd_engine,
174af75078fSIntel 	&mac_fwd_engine,
175d47388f1SCyril Chemparathy 	&mac_swap_engine,
176e9e23a61SCyril Chemparathy 	&flow_gen_engine,
177af75078fSIntel 	&rx_only_engine,
178af75078fSIntel 	&tx_only_engine,
179af75078fSIntel 	&csum_fwd_engine,
180168dfa61SIvan Boule 	&icmp_echo_engine,
1813c156061SJens Freimann 	&noisy_vnf_engine,
182af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
183af75078fSIntel 	&ieee1588_fwd_engine,
184af75078fSIntel #endif
185af75078fSIntel 	NULL,
186af75078fSIntel };
187af75078fSIntel 
188401b744dSShahaf Shuler struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
18959fcf854SShahaf Shuler uint16_t mempool_flags;
190401b744dSShahaf Shuler 
191af75078fSIntel struct fwd_config cur_fwd_config;
192af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193bf56fce1SZhihong Wang uint32_t retry_enabled;
194bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
196af75078fSIntel 
197af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
199c8798818SIntel                                       * specified on command-line. */
200cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
201d9a191a0SPhil Yang 
202d9a191a0SPhil Yang /*
203d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
204d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
205d9a191a0SPhil Yang  */
206d9a191a0SPhil Yang uint8_t f_quit;
207d9a191a0SPhil Yang 
208af75078fSIntel /*
209af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
210af75078fSIntel  */
211af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
214af75078fSIntel };
215af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
216af75078fSIntel 
21779bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
21879bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
21979bec05bSKonstantin Ananyev 
22082010ef5SYongseok Koh uint8_t txonly_multi_flow;
22182010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
22282010ef5SYongseok Koh 
223*4940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_inter;
224*4940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between bursts. */
225*4940344dSViacheslav Ovsiienko 
226*4940344dSViacheslav Ovsiienko uint32_t tx_pkt_times_intra;
227*4940344dSViacheslav Ovsiienko /**< Timings for send scheduling in TXONLY mode, time between packets. */
228*4940344dSViacheslav Ovsiienko 
229af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
230e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
231af75078fSIntel 
232900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
233900550deSIntel uint8_t dcb_config = 0;
234900550deSIntel 
235900550deSIntel /* Whether the dcb is in testing status */
236900550deSIntel uint8_t dcb_test = 0;
237900550deSIntel 
238af75078fSIntel /*
239af75078fSIntel  * Configurable number of RX/TX queues.
240af75078fSIntel  */
2411c69df45SOri Kam queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
242af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
243af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
244af75078fSIntel 
245af75078fSIntel /*
246af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2478599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
248af75078fSIntel  */
2498599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2508599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
251af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
252af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
253af75078fSIntel 
254f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
255af75078fSIntel /*
256af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
257af75078fSIntel  */
258af75078fSIntel 
259f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
260f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
261f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
262af75078fSIntel 
263f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
264f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
265f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
266af75078fSIntel 
267af75078fSIntel /*
268af75078fSIntel  * Configurable value of RX free threshold.
269af75078fSIntel  */
270f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
271af75078fSIntel 
272af75078fSIntel /*
273ce8d5614SIntel  * Configurable value of RX drop enable.
274ce8d5614SIntel  */
275f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
276ce8d5614SIntel 
277ce8d5614SIntel /*
278af75078fSIntel  * Configurable value of TX free threshold.
279af75078fSIntel  */
280f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
281af75078fSIntel 
282af75078fSIntel /*
283af75078fSIntel  * Configurable value of TX RS bit threshold.
284af75078fSIntel  */
285f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
286af75078fSIntel 
287af75078fSIntel /*
2883c156061SJens Freimann  * Configurable value of buffered packets before sending.
2893c156061SJens Freimann  */
2903c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
2913c156061SJens Freimann 
2923c156061SJens Freimann /*
2933c156061SJens Freimann  * Configurable value of packet buffer timeout.
2943c156061SJens Freimann  */
2953c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
2963c156061SJens Freimann 
2973c156061SJens Freimann /*
2983c156061SJens Freimann  * Configurable value for size of VNF internal memory area
2993c156061SJens Freimann  * used for simulating noisy neighbour behaviour
3003c156061SJens Freimann  */
3013c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
3023c156061SJens Freimann 
3033c156061SJens Freimann /*
3043c156061SJens Freimann  * Configurable value of number of random writes done in
3053c156061SJens Freimann  * VNF simulation memory area.
3063c156061SJens Freimann  */
3073c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3083c156061SJens Freimann 
3093c156061SJens Freimann /*
3103c156061SJens Freimann  * Configurable value of number of random reads done in
3113c156061SJens Freimann  * VNF simulation memory area.
3123c156061SJens Freimann  */
3133c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3143c156061SJens Freimann 
3153c156061SJens Freimann /*
3163c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3173c156061SJens Freimann  * VNF simulation memory area.
3183c156061SJens Freimann  */
3193c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3203c156061SJens Freimann 
3213c156061SJens Freimann /*
322af75078fSIntel  * Receive Side Scaling (RSS) configuration.
323af75078fSIntel  */
3248a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
325af75078fSIntel 
326af75078fSIntel /*
327af75078fSIntel  * Port topology configuration
328af75078fSIntel  */
329af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
330af75078fSIntel 
3317741e4cfSIntel /*
3327741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3337741e4cfSIntel  */
3347741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3357741e4cfSIntel 
336af75078fSIntel /*
3377ee3e944SVasily Philipov  * Flow API isolated mode.
3387ee3e944SVasily Philipov  */
3397ee3e944SVasily Philipov uint8_t flow_isolate_all;
3407ee3e944SVasily Philipov 
3417ee3e944SVasily Philipov /*
342bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
343bc202406SDavid Marchand  */
344bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
345bc202406SDavid Marchand 
346bc202406SDavid Marchand /*
3476937d210SStephen Hemminger  * Don't automatically start all ports in interactive mode.
3486937d210SStephen Hemminger  */
3496937d210SStephen Hemminger uint8_t no_device_start = 0;
3506937d210SStephen Hemminger 
3516937d210SStephen Hemminger /*
3528ea656f8SGaetan Rivet  * Enable link status change notification
3538ea656f8SGaetan Rivet  */
3548ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3558ea656f8SGaetan Rivet 
3568ea656f8SGaetan Rivet /*
357284c908cSGaetan Rivet  * Enable device removal notification.
358284c908cSGaetan Rivet  */
359284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
360284c908cSGaetan Rivet 
361fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
362fb73e096SJeff Guo 
3634f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
3644f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
3654f1ed78eSThomas Monjalon 
366b0a9354aSPavan Nikhilesh /* Clear ptypes on port initialization. */
367b0a9354aSPavan Nikhilesh uint8_t clear_ptypes = true;
368b0a9354aSPavan Nikhilesh 
36997b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
37097b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
37197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
37297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
37397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
37497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
37597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
37697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
37797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
37897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
37997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
38097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
3810e459ffaSDong Zhou 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
38297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
38397b5d8b5SThomas Monjalon };
38497b5d8b5SThomas Monjalon 
385284c908cSGaetan Rivet /*
3863af72783SGaetan Rivet  * Display or mask ether events
3873af72783SGaetan Rivet  * Default to all events except VF_MBOX
3883af72783SGaetan Rivet  */
3893af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
3903af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
3913af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
3923af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
393badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
3943af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
3950e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
3960e459ffaSDong Zhou 			    (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
397e505d84cSAnatoly Burakov /*
398e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
399e505d84cSAnatoly Burakov  */
400e505d84cSAnatoly Burakov int do_mlockall = 0;
4013af72783SGaetan Rivet 
4023af72783SGaetan Rivet /*
4037b7e5ba7SIntel  * NIC bypass mode configuration options.
4047b7e5ba7SIntel  */
4057b7e5ba7SIntel 
40650c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
4077b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
408e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
4097b7e5ba7SIntel #endif
4107b7e5ba7SIntel 
411e261265eSRadu Nicolau 
41262d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
41362d3216dSReshma Pattan 
41462d3216dSReshma Pattan /*
41562d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
41662d3216dSReshma Pattan  */
41762d3216dSReshma Pattan uint8_t latencystats_enabled;
41862d3216dSReshma Pattan 
41962d3216dSReshma Pattan /*
42062d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
42162d3216dSReshma Pattan  */
42262d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
42362d3216dSReshma Pattan 
42462d3216dSReshma Pattan #endif
42562d3216dSReshma Pattan 
4267b7e5ba7SIntel /*
427af75078fSIntel  * Ethernet device configuration.
428af75078fSIntel  */
429af75078fSIntel struct rte_eth_rxmode rx_mode = {
43035b2d13fSOlivier Matz 	.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
43135b2d13fSOlivier Matz 		/**< Default maximum frame length. */
432af75078fSIntel };
433af75078fSIntel 
43407e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
43507e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
43607e5f7bdSShahaf Shuler };
437fd8c20aaSShahaf Shuler 
438af75078fSIntel struct rte_fdir_conf fdir_conf = {
439af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
440af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
441af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
442d9d5e6f2SJingjing Wu 	.mask = {
44326f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
444d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
445d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
446d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
447d9d5e6f2SJingjing Wu 		},
448d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
449d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
450d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
451d9d5e6f2SJingjing Wu 		},
452d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
453d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
45447b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
45547b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
45647b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
457d9d5e6f2SJingjing Wu 	},
458af75078fSIntel 	.drop_queue = 127,
459af75078fSIntel };
460af75078fSIntel 
4612950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
462af75078fSIntel 
463ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
464ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
465ed30d9b6SIntel 
466ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
467ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
468ed30d9b6SIntel 
469ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
470ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
471ed30d9b6SIntel 
472a4fd5eeeSElza Mathew /*
473a4fd5eeeSElza Mathew  * Display zero values by default for xstats
474a4fd5eeeSElza Mathew  */
475a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
476a4fd5eeeSElza Mathew 
477c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
478c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
4797acf894dSStephen Hurd 
480e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE
4817e4441c8SRemy Horton /* Bitrate statistics */
4827e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
483e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
484e25e6c70SRemy Horton uint8_t bitrate_enabled;
485e25e6c70SRemy Horton #endif
4867e4441c8SRemy Horton 
487b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
488b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
489b40f8d78SJiayu Hu 
490f9295aa2SXiaoyu Min /*
491f9295aa2SXiaoyu Min  * hexadecimal bitmask of RX mq mode can be enabled.
492f9295aa2SXiaoyu Min  */
493f9295aa2SXiaoyu Min enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
494f9295aa2SXiaoyu Min 
495ed30d9b6SIntel /* Forward function declarations */
496c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
49728caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
49828caa76aSZhiyong Yang 						   struct rte_port *port);
499edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
500f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
50176ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
502d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
503cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
504fb73e096SJeff Guo 				enum rte_dev_event_type type,
505fb73e096SJeff Guo 				void *param);
506ce8d5614SIntel 
507ce8d5614SIntel /*
508ce8d5614SIntel  * Check if all the ports are started.
509ce8d5614SIntel  * If yes, return positive value. If not, return zero.
510ce8d5614SIntel  */
511ce8d5614SIntel static int all_ports_started(void);
512ed30d9b6SIntel 
51352f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
51435b2d13fSOlivier Matz uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
51552f38a20SJiayu Hu 
516b57b66a9SOri Kam /* Holds the registered mbuf dynamic flags names. */
517b57b66a9SOri Kam char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
518b57b66a9SOri Kam 
519af75078fSIntel /*
52098a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
521c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
522c9cafcc8SShahaf Shuler  */
523c9cafcc8SShahaf Shuler int
524c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
525c9cafcc8SShahaf Shuler {
526c9cafcc8SShahaf Shuler 	unsigned int i;
527c9cafcc8SShahaf Shuler 
528c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
529c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
530c9cafcc8SShahaf Shuler 			return 0;
531c9cafcc8SShahaf Shuler 	}
532c9cafcc8SShahaf Shuler 	return 1;
533c9cafcc8SShahaf Shuler }
534c9cafcc8SShahaf Shuler 
535c9cafcc8SShahaf Shuler /*
536af75078fSIntel  * Setup default configuration.
537af75078fSIntel  */
538af75078fSIntel static void
539af75078fSIntel set_default_fwd_lcores_config(void)
540af75078fSIntel {
541af75078fSIntel 	unsigned int i;
542af75078fSIntel 	unsigned int nb_lc;
5437acf894dSStephen Hurd 	unsigned int sock_num;
544af75078fSIntel 
545af75078fSIntel 	nb_lc = 0;
546af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
547dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
548dbfb8ec7SPhil Yang 			continue;
549c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
550c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
551c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
552c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
553c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
554c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
555c9cafcc8SShahaf Shuler 			}
556c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5577acf894dSStephen Hurd 		}
558f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
559f54fe5eeSStephen Hurd 			continue;
560f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
561af75078fSIntel 	}
562af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
563af75078fSIntel 	nb_cfg_lcores = nb_lcores;
564af75078fSIntel 	nb_fwd_lcores = 1;
565af75078fSIntel }
566af75078fSIntel 
567af75078fSIntel static void
568af75078fSIntel set_def_peer_eth_addrs(void)
569af75078fSIntel {
570af75078fSIntel 	portid_t i;
571af75078fSIntel 
572af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
57335b2d13fSOlivier Matz 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
574af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
575af75078fSIntel 	}
576af75078fSIntel }
577af75078fSIntel 
578af75078fSIntel static void
579af75078fSIntel set_default_fwd_ports_config(void)
580af75078fSIntel {
581af75078fSIntel 	portid_t pt_id;
58265a7360cSMatan Azrad 	int i = 0;
583af75078fSIntel 
584effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
58565a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
586af75078fSIntel 
587effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
588effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
589effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
590effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
591effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
592effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
593effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
594effdb8bbSPhil Yang 			}
595effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
596effdb8bbSPhil Yang 		}
597effdb8bbSPhil Yang 	}
598effdb8bbSPhil Yang 
599af75078fSIntel 	nb_cfg_ports = nb_ports;
600af75078fSIntel 	nb_fwd_ports = nb_ports;
601af75078fSIntel }
602af75078fSIntel 
603af75078fSIntel void
604af75078fSIntel set_def_fwd_config(void)
605af75078fSIntel {
606af75078fSIntel 	set_default_fwd_lcores_config();
607af75078fSIntel 	set_def_peer_eth_addrs();
608af75078fSIntel 	set_default_fwd_ports_config();
609af75078fSIntel }
610af75078fSIntel 
611c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
612c7f5dba7SAnatoly Burakov static int
613c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
614c7f5dba7SAnatoly Burakov {
615c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
616c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
617c7f5dba7SAnatoly Burakov 
618c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
619c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
620c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
621c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
622c7f5dba7SAnatoly Burakov 	 */
623c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
624c7f5dba7SAnatoly Burakov 
625c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
626c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
627c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
628c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
629c7f5dba7SAnatoly Burakov 		return -1;
630c7f5dba7SAnatoly Burakov 	}
631c7f5dba7SAnatoly Burakov 
632c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
633c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
634c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
635c7f5dba7SAnatoly Burakov 
636c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
637c7f5dba7SAnatoly Burakov 
638c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
639c7f5dba7SAnatoly Burakov 
640c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
641c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
642c7f5dba7SAnatoly Burakov 		return -1;
643c7f5dba7SAnatoly Burakov 	}
644c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
645c7f5dba7SAnatoly Burakov 
646c7f5dba7SAnatoly Burakov 	return 0;
647c7f5dba7SAnatoly Burakov }
648c7f5dba7SAnatoly Burakov 
649c7f5dba7SAnatoly Burakov static int
650c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
651c7f5dba7SAnatoly Burakov {
652c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
653c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
654c7f5dba7SAnatoly Burakov 	 */
6559d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
656c7f5dba7SAnatoly Burakov 
657c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
658c7f5dba7SAnatoly Burakov }
659c7f5dba7SAnatoly Burakov 
660c7f5dba7SAnatoly Burakov static void *
661c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
662c7f5dba7SAnatoly Burakov {
663c7f5dba7SAnatoly Burakov 	void *addr;
664c7f5dba7SAnatoly Burakov 	int flags;
665c7f5dba7SAnatoly Burakov 
666c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
667c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
668c7f5dba7SAnatoly Burakov 	if (huge)
669c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
670c7f5dba7SAnatoly Burakov 
671c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
672c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
673c7f5dba7SAnatoly Burakov 		return NULL;
674c7f5dba7SAnatoly Burakov 
675c7f5dba7SAnatoly Burakov 	return addr;
676c7f5dba7SAnatoly Burakov }
677c7f5dba7SAnatoly Burakov 
678c7f5dba7SAnatoly Burakov struct extmem_param {
679c7f5dba7SAnatoly Burakov 	void *addr;
680c7f5dba7SAnatoly Burakov 	size_t len;
681c7f5dba7SAnatoly Burakov 	size_t pgsz;
682c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
683c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
684c7f5dba7SAnatoly Burakov };
685c7f5dba7SAnatoly Burakov 
686c7f5dba7SAnatoly Burakov static int
687c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
688c7f5dba7SAnatoly Burakov 		bool huge)
689c7f5dba7SAnatoly Burakov {
690c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
691c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
692c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
693c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
694c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
695c7f5dba7SAnatoly Burakov 	void *addr;
696c7f5dba7SAnatoly Burakov 	int ret;
697c7f5dba7SAnatoly Burakov 
698c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
699c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
700c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
701c7f5dba7SAnatoly Burakov 			continue;
702c7f5dba7SAnatoly Burakov 
703c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
704c7f5dba7SAnatoly Burakov 
705c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
706c7f5dba7SAnatoly Burakov 		if (!huge)
707c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
708c7f5dba7SAnatoly Burakov 
709c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
710c7f5dba7SAnatoly Burakov 		if (ret < 0) {
711c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
712c7f5dba7SAnatoly Burakov 			return -1;
713c7f5dba7SAnatoly Burakov 		}
714c7f5dba7SAnatoly Burakov 
715c7f5dba7SAnatoly Burakov 		/* allocate our memory */
716c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
717c7f5dba7SAnatoly Burakov 
718c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
719c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
720c7f5dba7SAnatoly Burakov 		 * try another one.
721c7f5dba7SAnatoly Burakov 		 */
722c7f5dba7SAnatoly Burakov 		if (addr == NULL)
723c7f5dba7SAnatoly Burakov 			continue;
724c7f5dba7SAnatoly Burakov 
725c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
726c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
727c7f5dba7SAnatoly Burakov 
728c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
729c7f5dba7SAnatoly Burakov 
730c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
731c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
732c7f5dba7SAnatoly Burakov 			goto fail;
733c7f5dba7SAnatoly Burakov 		}
734c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
735c7f5dba7SAnatoly Burakov 		if (!huge)
736c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
737c7f5dba7SAnatoly Burakov 
738c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
739c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
740c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
741c7f5dba7SAnatoly Burakov 			size_t offset;
742c7f5dba7SAnatoly Burakov 			void *cur;
743c7f5dba7SAnatoly Burakov 
744c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
745c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
746c7f5dba7SAnatoly Burakov 
747c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
748c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
749c7f5dba7SAnatoly Burakov 
750c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
751c7f5dba7SAnatoly Burakov 
752c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
753c7f5dba7SAnatoly Burakov 		}
754c7f5dba7SAnatoly Burakov 
755c7f5dba7SAnatoly Burakov 		break;
756c7f5dba7SAnatoly Burakov 	}
757c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
758c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
759c7f5dba7SAnatoly Burakov 		return -1;
760c7f5dba7SAnatoly Burakov 
761c7f5dba7SAnatoly Burakov 	param->addr = addr;
762c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
763c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
764c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
765c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
766c7f5dba7SAnatoly Burakov 
767c7f5dba7SAnatoly Burakov 	return 0;
768c7f5dba7SAnatoly Burakov fail:
769c7f5dba7SAnatoly Burakov 	if (iovas)
770c7f5dba7SAnatoly Burakov 		free(iovas);
771c7f5dba7SAnatoly Burakov 	if (addr)
772c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
773c7f5dba7SAnatoly Burakov 
774c7f5dba7SAnatoly Burakov 	return -1;
775c7f5dba7SAnatoly Burakov }
776c7f5dba7SAnatoly Burakov 
777c7f5dba7SAnatoly Burakov static int
778c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
779c7f5dba7SAnatoly Burakov {
780c7f5dba7SAnatoly Burakov 	struct extmem_param param;
781c7f5dba7SAnatoly Burakov 	int socket_id, ret;
782c7f5dba7SAnatoly Burakov 
783c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
784c7f5dba7SAnatoly Burakov 
785c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
786c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
787c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
788c7f5dba7SAnatoly Burakov 		/* create our heap */
789c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
790c7f5dba7SAnatoly Burakov 		if (ret < 0) {
791c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
792c7f5dba7SAnatoly Burakov 			return -1;
793c7f5dba7SAnatoly Burakov 		}
794c7f5dba7SAnatoly Burakov 	}
795c7f5dba7SAnatoly Burakov 
796c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
797c7f5dba7SAnatoly Burakov 	if (ret < 0) {
798c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
799c7f5dba7SAnatoly Burakov 		return -1;
800c7f5dba7SAnatoly Burakov 	}
801c7f5dba7SAnatoly Burakov 
802c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
803c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
804c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
805c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
806c7f5dba7SAnatoly Burakov 
807c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
808c7f5dba7SAnatoly Burakov 
809c7f5dba7SAnatoly Burakov 	/* not needed any more */
810c7f5dba7SAnatoly Burakov 	free(param.iova_table);
811c7f5dba7SAnatoly Burakov 
812c7f5dba7SAnatoly Burakov 	if (ret < 0) {
813c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
814c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
815c7f5dba7SAnatoly Burakov 		return -1;
816c7f5dba7SAnatoly Burakov 	}
817c7f5dba7SAnatoly Burakov 
818c7f5dba7SAnatoly Burakov 	/* success */
819c7f5dba7SAnatoly Burakov 
820c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
821c7f5dba7SAnatoly Burakov 			param.len >> 20);
822c7f5dba7SAnatoly Burakov 
823c7f5dba7SAnatoly Burakov 	return 0;
824c7f5dba7SAnatoly Burakov }
8253a0968c8SShahaf Shuler static void
8263a0968c8SShahaf Shuler dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8273a0968c8SShahaf Shuler 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8283a0968c8SShahaf Shuler {
8293a0968c8SShahaf Shuler 	uint16_t pid = 0;
8303a0968c8SShahaf Shuler 	int ret;
8313a0968c8SShahaf Shuler 
8323a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8333a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8343a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8353a0968c8SShahaf Shuler 
8363a0968c8SShahaf Shuler 		ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
8373a0968c8SShahaf Shuler 					memhdr->len);
8383a0968c8SShahaf Shuler 		if (ret) {
8393a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8403a0968c8SShahaf Shuler 				    "unable to DMA unmap addr 0x%p "
8413a0968c8SShahaf Shuler 				    "for device %s\n",
8423a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8433a0968c8SShahaf Shuler 		}
8443a0968c8SShahaf Shuler 	}
8453a0968c8SShahaf Shuler 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
8463a0968c8SShahaf Shuler 	if (ret) {
8473a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8483a0968c8SShahaf Shuler 			    "unable to un-register addr 0x%p\n", memhdr->addr);
8493a0968c8SShahaf Shuler 	}
8503a0968c8SShahaf Shuler }
8513a0968c8SShahaf Shuler 
8523a0968c8SShahaf Shuler static void
8533a0968c8SShahaf Shuler dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
8543a0968c8SShahaf Shuler 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
8553a0968c8SShahaf Shuler {
8563a0968c8SShahaf Shuler 	uint16_t pid = 0;
8573a0968c8SShahaf Shuler 	size_t page_size = sysconf(_SC_PAGESIZE);
8583a0968c8SShahaf Shuler 	int ret;
8593a0968c8SShahaf Shuler 
8603a0968c8SShahaf Shuler 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
8613a0968c8SShahaf Shuler 				  page_size);
8623a0968c8SShahaf Shuler 	if (ret) {
8633a0968c8SShahaf Shuler 		TESTPMD_LOG(DEBUG,
8643a0968c8SShahaf Shuler 			    "unable to register addr 0x%p\n", memhdr->addr);
8653a0968c8SShahaf Shuler 		return;
8663a0968c8SShahaf Shuler 	}
8673a0968c8SShahaf Shuler 	RTE_ETH_FOREACH_DEV(pid) {
8683a0968c8SShahaf Shuler 		struct rte_eth_dev *dev =
8693a0968c8SShahaf Shuler 			&rte_eth_devices[pid];
8703a0968c8SShahaf Shuler 
8713a0968c8SShahaf Shuler 		ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
8723a0968c8SShahaf Shuler 				      memhdr->len);
8733a0968c8SShahaf Shuler 		if (ret) {
8743a0968c8SShahaf Shuler 			TESTPMD_LOG(DEBUG,
8753a0968c8SShahaf Shuler 				    "unable to DMA map addr 0x%p "
8763a0968c8SShahaf Shuler 				    "for device %s\n",
8773a0968c8SShahaf Shuler 				    memhdr->addr, dev->data->name);
8783a0968c8SShahaf Shuler 		}
8793a0968c8SShahaf Shuler 	}
8803a0968c8SShahaf Shuler }
881c7f5dba7SAnatoly Burakov 
88272512e18SViacheslav Ovsiienko static unsigned int
88372512e18SViacheslav Ovsiienko setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
88472512e18SViacheslav Ovsiienko 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
88572512e18SViacheslav Ovsiienko {
88672512e18SViacheslav Ovsiienko 	struct rte_pktmbuf_extmem *xmem;
88772512e18SViacheslav Ovsiienko 	unsigned int ext_num, zone_num, elt_num;
88872512e18SViacheslav Ovsiienko 	uint16_t elt_size;
88972512e18SViacheslav Ovsiienko 
89072512e18SViacheslav Ovsiienko 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
89172512e18SViacheslav Ovsiienko 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
89272512e18SViacheslav Ovsiienko 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
89372512e18SViacheslav Ovsiienko 
89472512e18SViacheslav Ovsiienko 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
89572512e18SViacheslav Ovsiienko 	if (xmem == NULL) {
89672512e18SViacheslav Ovsiienko 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
89772512e18SViacheslav Ovsiienko 				 "external buffer descriptors\n");
89872512e18SViacheslav Ovsiienko 		*ext_mem = NULL;
89972512e18SViacheslav Ovsiienko 		return 0;
90072512e18SViacheslav Ovsiienko 	}
90172512e18SViacheslav Ovsiienko 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
90272512e18SViacheslav Ovsiienko 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
90372512e18SViacheslav Ovsiienko 		const struct rte_memzone *mz;
90472512e18SViacheslav Ovsiienko 		char mz_name[RTE_MEMZONE_NAMESIZE];
90572512e18SViacheslav Ovsiienko 		int ret;
90672512e18SViacheslav Ovsiienko 
90772512e18SViacheslav Ovsiienko 		ret = snprintf(mz_name, sizeof(mz_name),
90872512e18SViacheslav Ovsiienko 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
90972512e18SViacheslav Ovsiienko 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
91072512e18SViacheslav Ovsiienko 			errno = ENAMETOOLONG;
91172512e18SViacheslav Ovsiienko 			ext_num = 0;
91272512e18SViacheslav Ovsiienko 			break;
91372512e18SViacheslav Ovsiienko 		}
91472512e18SViacheslav Ovsiienko 		mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
91572512e18SViacheslav Ovsiienko 						 socket_id,
91672512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_IOVA_CONTIG |
91772512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_1GB |
91872512e18SViacheslav Ovsiienko 						 RTE_MEMZONE_SIZE_HINT_ONLY,
91972512e18SViacheslav Ovsiienko 						 EXTBUF_ZONE_SIZE);
92072512e18SViacheslav Ovsiienko 		if (mz == NULL) {
92172512e18SViacheslav Ovsiienko 			/*
92272512e18SViacheslav Ovsiienko 			 * The caller exits on external buffer creation
92372512e18SViacheslav Ovsiienko 			 * error, so there is no need to free memzones.
92472512e18SViacheslav Ovsiienko 			 */
92572512e18SViacheslav Ovsiienko 			errno = ENOMEM;
92672512e18SViacheslav Ovsiienko 			ext_num = 0;
92772512e18SViacheslav Ovsiienko 			break;
92872512e18SViacheslav Ovsiienko 		}
92972512e18SViacheslav Ovsiienko 		xseg->buf_ptr = mz->addr;
93072512e18SViacheslav Ovsiienko 		xseg->buf_iova = mz->iova;
93172512e18SViacheslav Ovsiienko 		xseg->buf_len = EXTBUF_ZONE_SIZE;
93272512e18SViacheslav Ovsiienko 		xseg->elt_size = elt_size;
93372512e18SViacheslav Ovsiienko 	}
93472512e18SViacheslav Ovsiienko 	if (ext_num == 0 && xmem != NULL) {
93572512e18SViacheslav Ovsiienko 		free(xmem);
93672512e18SViacheslav Ovsiienko 		xmem = NULL;
93772512e18SViacheslav Ovsiienko 	}
93872512e18SViacheslav Ovsiienko 	*ext_mem = xmem;
93972512e18SViacheslav Ovsiienko 	return ext_num;
94072512e18SViacheslav Ovsiienko }
94172512e18SViacheslav Ovsiienko 
942af75078fSIntel /*
943af75078fSIntel  * Configuration initialisation done once at init time.
944af75078fSIntel  */
945401b744dSShahaf Shuler static struct rte_mempool *
946af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
947af75078fSIntel 		 unsigned int socket_id)
948af75078fSIntel {
949af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
950bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
951af75078fSIntel 	uint32_t mb_size;
952af75078fSIntel 
953dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
954af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
955148f963fSBruce Richardson 
956285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
957d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
958d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
959d1eb542eSOlivier Matz 
960c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
961c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
962c7f5dba7SAnatoly Burakov 		{
963c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
964c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
965c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
966c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
967c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
968c7f5dba7SAnatoly Burakov 			break;
969c7f5dba7SAnatoly Burakov 		}
970c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
971c7f5dba7SAnatoly Burakov 		{
972b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
973c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
974148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
97559fcf854SShahaf Shuler 				socket_id, mempool_flags);
97624427bb9SOlivier Matz 			if (rte_mp == NULL)
97724427bb9SOlivier Matz 				goto err;
978b19a0c75SOlivier Matz 
979b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
980b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
981b19a0c75SOlivier Matz 				rte_mp = NULL;
98224427bb9SOlivier Matz 				goto err;
983b19a0c75SOlivier Matz 			}
984b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
985b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
9863a0968c8SShahaf Shuler 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
987c7f5dba7SAnatoly Burakov 			break;
988c7f5dba7SAnatoly Burakov 		}
989c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
990c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
991c7f5dba7SAnatoly Burakov 		{
992c7f5dba7SAnatoly Burakov 			int heap_socket;
993c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
994c7f5dba7SAnatoly Burakov 
995c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
996c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
997c7f5dba7SAnatoly Burakov 
998c7f5dba7SAnatoly Burakov 			heap_socket =
999c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1000c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
1001c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1002c7f5dba7SAnatoly Burakov 
10030e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
10040e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
1005ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1006c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
1007c7f5dba7SAnatoly Burakov 					heap_socket);
1008c7f5dba7SAnatoly Burakov 			break;
1009c7f5dba7SAnatoly Burakov 		}
101072512e18SViacheslav Ovsiienko 	case MP_ALLOC_XBUF:
101172512e18SViacheslav Ovsiienko 		{
101272512e18SViacheslav Ovsiienko 			struct rte_pktmbuf_extmem *ext_mem;
101372512e18SViacheslav Ovsiienko 			unsigned int ext_num;
101472512e18SViacheslav Ovsiienko 
101572512e18SViacheslav Ovsiienko 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
101672512e18SViacheslav Ovsiienko 					       socket_id, pool_name, &ext_mem);
101772512e18SViacheslav Ovsiienko 			if (ext_num == 0)
101872512e18SViacheslav Ovsiienko 				rte_exit(EXIT_FAILURE,
101972512e18SViacheslav Ovsiienko 					 "Can't create pinned data buffers\n");
102072512e18SViacheslav Ovsiienko 
102172512e18SViacheslav Ovsiienko 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
102272512e18SViacheslav Ovsiienko 					rte_mbuf_best_mempool_ops());
102372512e18SViacheslav Ovsiienko 			rte_mp = rte_pktmbuf_pool_create_extbuf
102472512e18SViacheslav Ovsiienko 					(pool_name, nb_mbuf, mb_mempool_cache,
102572512e18SViacheslav Ovsiienko 					 0, mbuf_seg_size, socket_id,
102672512e18SViacheslav Ovsiienko 					 ext_mem, ext_num);
102772512e18SViacheslav Ovsiienko 			free(ext_mem);
102872512e18SViacheslav Ovsiienko 			break;
102972512e18SViacheslav Ovsiienko 		}
1030c7f5dba7SAnatoly Burakov 	default:
1031c7f5dba7SAnatoly Burakov 		{
1032c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1033c7f5dba7SAnatoly Burakov 		}
1034bece7b6cSChristian Ehrhardt 	}
1035148f963fSBruce Richardson 
103624427bb9SOlivier Matz err:
1037af75078fSIntel 	if (rte_mp == NULL) {
1038d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
1039d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
1040d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
1041148f963fSBruce Richardson 	} else if (verbose_level > 0) {
1042591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
1043af75078fSIntel 	}
1044401b744dSShahaf Shuler 	return rte_mp;
1045af75078fSIntel }
1046af75078fSIntel 
104720a0286fSLiu Xiaofeng /*
104820a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
104920a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
105020a0286fSLiu Xiaofeng  */
105120a0286fSLiu Xiaofeng static int
105220a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
105320a0286fSLiu Xiaofeng {
105420a0286fSLiu Xiaofeng 	static int warning_once = 0;
105520a0286fSLiu Xiaofeng 
1056c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
105720a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
105820a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
105920a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
106020a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
106120a0286fSLiu Xiaofeng 			       " --numa.\n");
106220a0286fSLiu Xiaofeng 		warning_once = 1;
106320a0286fSLiu Xiaofeng 		return -1;
106420a0286fSLiu Xiaofeng 	}
106520a0286fSLiu Xiaofeng 	return 0;
106620a0286fSLiu Xiaofeng }
106720a0286fSLiu Xiaofeng 
10683f7311baSWei Dai /*
10693f7311baSWei Dai  * Get the allowed maximum number of RX queues.
10703f7311baSWei Dai  * *pid return the port id which has minimal value of
10713f7311baSWei Dai  * max_rx_queues in all ports.
10723f7311baSWei Dai  */
10733f7311baSWei Dai queueid_t
10743f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
10753f7311baSWei Dai {
10769e6b36c3SDavid Marchand 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
10776f51deb9SIvan Ilchenko 	bool max_rxq_valid = false;
10783f7311baSWei Dai 	portid_t pi;
10793f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
10803f7311baSWei Dai 
10813f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
10826f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
10836f51deb9SIvan Ilchenko 			continue;
10846f51deb9SIvan Ilchenko 
10856f51deb9SIvan Ilchenko 		max_rxq_valid = true;
10863f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
10873f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
10883f7311baSWei Dai 			*pid = pi;
10893f7311baSWei Dai 		}
10903f7311baSWei Dai 	}
10916f51deb9SIvan Ilchenko 	return max_rxq_valid ? allowed_max_rxq : 0;
10923f7311baSWei Dai }
10933f7311baSWei Dai 
10943f7311baSWei Dai /*
10953f7311baSWei Dai  * Check input rxq is valid or not.
10963f7311baSWei Dai  * If input rxq is not greater than any of maximum number
10973f7311baSWei Dai  * of RX queues of all ports, it is valid.
10983f7311baSWei Dai  * if valid, return 0, else return -1
10993f7311baSWei Dai  */
11003f7311baSWei Dai int
11013f7311baSWei Dai check_nb_rxq(queueid_t rxq)
11023f7311baSWei Dai {
11033f7311baSWei Dai 	queueid_t allowed_max_rxq;
11043f7311baSWei Dai 	portid_t pid = 0;
11053f7311baSWei Dai 
11063f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
11073f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
11083f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
11093f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
11103f7311baSWei Dai 		       rxq,
11113f7311baSWei Dai 		       allowed_max_rxq,
11123f7311baSWei Dai 		       pid);
11133f7311baSWei Dai 		return -1;
11143f7311baSWei Dai 	}
11153f7311baSWei Dai 	return 0;
11163f7311baSWei Dai }
11173f7311baSWei Dai 
111836db4f6cSWei Dai /*
111936db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
112036db4f6cSWei Dai  * *pid return the port id which has minimal value of
112136db4f6cSWei Dai  * max_tx_queues in all ports.
112236db4f6cSWei Dai  */
112336db4f6cSWei Dai queueid_t
112436db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
112536db4f6cSWei Dai {
11269e6b36c3SDavid Marchand 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
11276f51deb9SIvan Ilchenko 	bool max_txq_valid = false;
112836db4f6cSWei Dai 	portid_t pi;
112936db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
113036db4f6cSWei Dai 
113136db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
11326f51deb9SIvan Ilchenko 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
11336f51deb9SIvan Ilchenko 			continue;
11346f51deb9SIvan Ilchenko 
11356f51deb9SIvan Ilchenko 		max_txq_valid = true;
113636db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
113736db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
113836db4f6cSWei Dai 			*pid = pi;
113936db4f6cSWei Dai 		}
114036db4f6cSWei Dai 	}
11416f51deb9SIvan Ilchenko 	return max_txq_valid ? allowed_max_txq : 0;
114236db4f6cSWei Dai }
114336db4f6cSWei Dai 
114436db4f6cSWei Dai /*
114536db4f6cSWei Dai  * Check input txq is valid or not.
114636db4f6cSWei Dai  * If input txq is not greater than any of maximum number
114736db4f6cSWei Dai  * of TX queues of all ports, it is valid.
114836db4f6cSWei Dai  * if valid, return 0, else return -1
114936db4f6cSWei Dai  */
115036db4f6cSWei Dai int
115136db4f6cSWei Dai check_nb_txq(queueid_t txq)
115236db4f6cSWei Dai {
115336db4f6cSWei Dai 	queueid_t allowed_max_txq;
115436db4f6cSWei Dai 	portid_t pid = 0;
115536db4f6cSWei Dai 
115636db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
115736db4f6cSWei Dai 	if (txq > allowed_max_txq) {
115836db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
115936db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
116036db4f6cSWei Dai 		       txq,
116136db4f6cSWei Dai 		       allowed_max_txq,
116236db4f6cSWei Dai 		       pid);
116336db4f6cSWei Dai 		return -1;
116436db4f6cSWei Dai 	}
116536db4f6cSWei Dai 	return 0;
116636db4f6cSWei Dai }
116736db4f6cSWei Dai 
11681c69df45SOri Kam /*
116999e040d3SLijun Ou  * Get the allowed maximum number of RXDs of every rx queue.
117099e040d3SLijun Ou  * *pid return the port id which has minimal value of
117199e040d3SLijun Ou  * max_rxd in all queues of all ports.
117299e040d3SLijun Ou  */
117399e040d3SLijun Ou static uint16_t
117499e040d3SLijun Ou get_allowed_max_nb_rxd(portid_t *pid)
117599e040d3SLijun Ou {
117699e040d3SLijun Ou 	uint16_t allowed_max_rxd = UINT16_MAX;
117799e040d3SLijun Ou 	portid_t pi;
117899e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
117999e040d3SLijun Ou 
118099e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
118199e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
118299e040d3SLijun Ou 			continue;
118399e040d3SLijun Ou 
118499e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
118599e040d3SLijun Ou 			allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
118699e040d3SLijun Ou 			*pid = pi;
118799e040d3SLijun Ou 		}
118899e040d3SLijun Ou 	}
118999e040d3SLijun Ou 	return allowed_max_rxd;
119099e040d3SLijun Ou }
119199e040d3SLijun Ou 
119299e040d3SLijun Ou /*
119399e040d3SLijun Ou  * Get the allowed minimal number of RXDs of every rx queue.
119499e040d3SLijun Ou  * *pid return the port id which has minimal value of
119599e040d3SLijun Ou  * min_rxd in all queues of all ports.
119699e040d3SLijun Ou  */
119799e040d3SLijun Ou static uint16_t
119899e040d3SLijun Ou get_allowed_min_nb_rxd(portid_t *pid)
119999e040d3SLijun Ou {
120099e040d3SLijun Ou 	uint16_t allowed_min_rxd = 0;
120199e040d3SLijun Ou 	portid_t pi;
120299e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
120399e040d3SLijun Ou 
120499e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
120599e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
120699e040d3SLijun Ou 			continue;
120799e040d3SLijun Ou 
120899e040d3SLijun Ou 		if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
120999e040d3SLijun Ou 			allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
121099e040d3SLijun Ou 			*pid = pi;
121199e040d3SLijun Ou 		}
121299e040d3SLijun Ou 	}
121399e040d3SLijun Ou 
121499e040d3SLijun Ou 	return allowed_min_rxd;
121599e040d3SLijun Ou }
121699e040d3SLijun Ou 
121799e040d3SLijun Ou /*
121899e040d3SLijun Ou  * Check input rxd is valid or not.
121999e040d3SLijun Ou  * If input rxd is not greater than any of maximum number
122099e040d3SLijun Ou  * of RXDs of every Rx queues and is not less than any of
122199e040d3SLijun Ou  * minimal number of RXDs of every Rx queues, it is valid.
122299e040d3SLijun Ou  * if valid, return 0, else return -1
122399e040d3SLijun Ou  */
122499e040d3SLijun Ou int
122599e040d3SLijun Ou check_nb_rxd(queueid_t rxd)
122699e040d3SLijun Ou {
122799e040d3SLijun Ou 	uint16_t allowed_max_rxd;
122899e040d3SLijun Ou 	uint16_t allowed_min_rxd;
122999e040d3SLijun Ou 	portid_t pid = 0;
123099e040d3SLijun Ou 
123199e040d3SLijun Ou 	allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
123299e040d3SLijun Ou 	if (rxd > allowed_max_rxd) {
123399e040d3SLijun Ou 		printf("Fail: input rxd (%u) can't be greater "
123499e040d3SLijun Ou 		       "than max_rxds (%u) of port %u\n",
123599e040d3SLijun Ou 		       rxd,
123699e040d3SLijun Ou 		       allowed_max_rxd,
123799e040d3SLijun Ou 		       pid);
123899e040d3SLijun Ou 		return -1;
123999e040d3SLijun Ou 	}
124099e040d3SLijun Ou 
124199e040d3SLijun Ou 	allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
124299e040d3SLijun Ou 	if (rxd < allowed_min_rxd) {
124399e040d3SLijun Ou 		printf("Fail: input rxd (%u) can't be less "
124499e040d3SLijun Ou 		       "than min_rxds (%u) of port %u\n",
124599e040d3SLijun Ou 		       rxd,
124699e040d3SLijun Ou 		       allowed_min_rxd,
124799e040d3SLijun Ou 		       pid);
124899e040d3SLijun Ou 		return -1;
124999e040d3SLijun Ou 	}
125099e040d3SLijun Ou 
125199e040d3SLijun Ou 	return 0;
125299e040d3SLijun Ou }
125399e040d3SLijun Ou 
125499e040d3SLijun Ou /*
125599e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every rx queues.
125699e040d3SLijun Ou  * *pid return the port id which has minimal value of
125799e040d3SLijun Ou  * max_txd in every tx queue.
125899e040d3SLijun Ou  */
125999e040d3SLijun Ou static uint16_t
126099e040d3SLijun Ou get_allowed_max_nb_txd(portid_t *pid)
126199e040d3SLijun Ou {
126299e040d3SLijun Ou 	uint16_t allowed_max_txd = UINT16_MAX;
126399e040d3SLijun Ou 	portid_t pi;
126499e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
126599e040d3SLijun Ou 
126699e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
126799e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
126899e040d3SLijun Ou 			continue;
126999e040d3SLijun Ou 
127099e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
127199e040d3SLijun Ou 			allowed_max_txd = dev_info.tx_desc_lim.nb_max;
127299e040d3SLijun Ou 			*pid = pi;
127399e040d3SLijun Ou 		}
127499e040d3SLijun Ou 	}
127599e040d3SLijun Ou 	return allowed_max_txd;
127699e040d3SLijun Ou }
127799e040d3SLijun Ou 
127899e040d3SLijun Ou /*
127999e040d3SLijun Ou  * Get the allowed maximum number of TXDs of every tx queues.
128099e040d3SLijun Ou  * *pid return the port id which has minimal value of
128199e040d3SLijun Ou  * min_txd in every tx queue.
128299e040d3SLijun Ou  */
128399e040d3SLijun Ou static uint16_t
128499e040d3SLijun Ou get_allowed_min_nb_txd(portid_t *pid)
128599e040d3SLijun Ou {
128699e040d3SLijun Ou 	uint16_t allowed_min_txd = 0;
128799e040d3SLijun Ou 	portid_t pi;
128899e040d3SLijun Ou 	struct rte_eth_dev_info dev_info;
128999e040d3SLijun Ou 
129099e040d3SLijun Ou 	RTE_ETH_FOREACH_DEV(pi) {
129199e040d3SLijun Ou 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
129299e040d3SLijun Ou 			continue;
129399e040d3SLijun Ou 
129499e040d3SLijun Ou 		if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
129599e040d3SLijun Ou 			allowed_min_txd = dev_info.tx_desc_lim.nb_min;
129699e040d3SLijun Ou 			*pid = pi;
129799e040d3SLijun Ou 		}
129899e040d3SLijun Ou 	}
129999e040d3SLijun Ou 
130099e040d3SLijun Ou 	return allowed_min_txd;
130199e040d3SLijun Ou }
130299e040d3SLijun Ou 
130399e040d3SLijun Ou /*
130499e040d3SLijun Ou  * Check input txd is valid or not.
130599e040d3SLijun Ou  * If input txd is not greater than any of maximum number
130699e040d3SLijun Ou  * of TXDs of every Rx queues, it is valid.
130799e040d3SLijun Ou  * if valid, return 0, else return -1
130899e040d3SLijun Ou  */
130999e040d3SLijun Ou int
131099e040d3SLijun Ou check_nb_txd(queueid_t txd)
131199e040d3SLijun Ou {
131299e040d3SLijun Ou 	uint16_t allowed_max_txd;
131399e040d3SLijun Ou 	uint16_t allowed_min_txd;
131499e040d3SLijun Ou 	portid_t pid = 0;
131599e040d3SLijun Ou 
131699e040d3SLijun Ou 	allowed_max_txd = get_allowed_max_nb_txd(&pid);
131799e040d3SLijun Ou 	if (txd > allowed_max_txd) {
131899e040d3SLijun Ou 		printf("Fail: input txd (%u) can't be greater "
131999e040d3SLijun Ou 		       "than max_txds (%u) of port %u\n",
132099e040d3SLijun Ou 		       txd,
132199e040d3SLijun Ou 		       allowed_max_txd,
132299e040d3SLijun Ou 		       pid);
132399e040d3SLijun Ou 		return -1;
132499e040d3SLijun Ou 	}
132599e040d3SLijun Ou 
132699e040d3SLijun Ou 	allowed_min_txd = get_allowed_min_nb_txd(&pid);
132799e040d3SLijun Ou 	if (txd < allowed_min_txd) {
132899e040d3SLijun Ou 		printf("Fail: input txd (%u) can't be less "
132999e040d3SLijun Ou 		       "than min_txds (%u) of port %u\n",
133099e040d3SLijun Ou 		       txd,
133199e040d3SLijun Ou 		       allowed_min_txd,
133299e040d3SLijun Ou 		       pid);
133399e040d3SLijun Ou 		return -1;
133499e040d3SLijun Ou 	}
133599e040d3SLijun Ou 	return 0;
133699e040d3SLijun Ou }
133799e040d3SLijun Ou 
133899e040d3SLijun Ou 
133999e040d3SLijun Ou /*
13401c69df45SOri Kam  * Get the allowed maximum number of hairpin queues.
13411c69df45SOri Kam  * *pid return the port id which has minimal value of
13421c69df45SOri Kam  * max_hairpin_queues in all ports.
13431c69df45SOri Kam  */
13441c69df45SOri Kam queueid_t
13451c69df45SOri Kam get_allowed_max_nb_hairpinq(portid_t *pid)
13461c69df45SOri Kam {
13479e6b36c3SDavid Marchand 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
13481c69df45SOri Kam 	portid_t pi;
13491c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
13501c69df45SOri Kam 
13511c69df45SOri Kam 	RTE_ETH_FOREACH_DEV(pi) {
13521c69df45SOri Kam 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
13531c69df45SOri Kam 			*pid = pi;
13541c69df45SOri Kam 			return 0;
13551c69df45SOri Kam 		}
13561c69df45SOri Kam 		if (cap.max_nb_queues < allowed_max_hairpinq) {
13571c69df45SOri Kam 			allowed_max_hairpinq = cap.max_nb_queues;
13581c69df45SOri Kam 			*pid = pi;
13591c69df45SOri Kam 		}
13601c69df45SOri Kam 	}
13611c69df45SOri Kam 	return allowed_max_hairpinq;
13621c69df45SOri Kam }
13631c69df45SOri Kam 
13641c69df45SOri Kam /*
13651c69df45SOri Kam  * Check input hairpin is valid or not.
13661c69df45SOri Kam  * If input hairpin is not greater than any of maximum number
13671c69df45SOri Kam  * of hairpin queues of all ports, it is valid.
13681c69df45SOri Kam  * if valid, return 0, else return -1
13691c69df45SOri Kam  */
13701c69df45SOri Kam int
13711c69df45SOri Kam check_nb_hairpinq(queueid_t hairpinq)
13721c69df45SOri Kam {
13731c69df45SOri Kam 	queueid_t allowed_max_hairpinq;
13741c69df45SOri Kam 	portid_t pid = 0;
13751c69df45SOri Kam 
13761c69df45SOri Kam 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
13771c69df45SOri Kam 	if (hairpinq > allowed_max_hairpinq) {
13781c69df45SOri Kam 		printf("Fail: input hairpin (%u) can't be greater "
13791c69df45SOri Kam 		       "than max_hairpin_queues (%u) of port %u\n",
13801c69df45SOri Kam 		       hairpinq, allowed_max_hairpinq, pid);
13811c69df45SOri Kam 		return -1;
13821c69df45SOri Kam 	}
13831c69df45SOri Kam 	return 0;
13841c69df45SOri Kam }
13851c69df45SOri Kam 
1386af75078fSIntel static void
1387af75078fSIntel init_config(void)
1388af75078fSIntel {
1389ce8d5614SIntel 	portid_t pid;
1390af75078fSIntel 	struct rte_port *port;
1391af75078fSIntel 	struct rte_mempool *mbp;
1392af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1393af75078fSIntel 	lcoreid_t  lc_id;
13947acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1395b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
139652f38a20SJiayu Hu 	uint32_t gso_types;
139733f9630fSSunil Kumar Kori 	uint16_t data_size;
139833f9630fSSunil Kumar Kori 	bool warning = 0;
1399c73a9071SWei Dai 	int k;
14006f51deb9SIvan Ilchenko 	int ret;
1401af75078fSIntel 
14027acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1403487f9a59SYulong Pei 
1404af75078fSIntel 	/* Configuration of logical cores. */
1405af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1406af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1407fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1408af75078fSIntel 	if (fwd_lcores == NULL) {
1409ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1410ce8d5614SIntel 							"failed\n", nb_lcores);
1411af75078fSIntel 	}
1412af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1413af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1414af75078fSIntel 					       sizeof(struct fwd_lcore),
1415fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1416af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1417ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1418ce8d5614SIntel 								"failed\n");
1419af75078fSIntel 		}
1420af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1421af75078fSIntel 	}
1422af75078fSIntel 
14237d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1424ce8d5614SIntel 		port = &ports[pid];
14258b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1426fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1427384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
14286f51deb9SIvan Ilchenko 
14296f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
14306f51deb9SIvan Ilchenko 		if (ret != 0)
14316f51deb9SIvan Ilchenko 			rte_exit(EXIT_FAILURE,
14326f51deb9SIvan Ilchenko 				 "rte_eth_dev_info_get() failed\n");
14337c45f6c0SFerruh Yigit 
143407e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
143507e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
143607e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
143707e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1438b6ea6408SIntel 		if (numa_support) {
1439b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1440b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1441b6ea6408SIntel 			else {
1442b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
144320a0286fSLiu Xiaofeng 
144429841336SPhil Yang 				/*
144529841336SPhil Yang 				 * if socket_id is invalid,
144629841336SPhil Yang 				 * set to the first available socket.
144729841336SPhil Yang 				 */
144820a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
144929841336SPhil Yang 					socket_id = socket_ids[0];
1450b6ea6408SIntel 				port_per_socket[socket_id]++;
1451b6ea6408SIntel 			}
1452b6ea6408SIntel 		}
1453b6ea6408SIntel 
1454c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1455c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1456c73a9071SWei Dai 			port->rx_conf[k].offloads =
1457c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1458c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1459c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1460c73a9071SWei Dai 			port->tx_conf[k].offloads =
1461c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1462c73a9071SWei Dai 
1463ce8d5614SIntel 		/* set flag to initialize port/queue */
1464ce8d5614SIntel 		port->need_reconfig = 1;
1465ce8d5614SIntel 		port->need_reconfig_queues = 1;
1466c18feafaSDekel Peled 		port->tx_metadata = 0;
146733f9630fSSunil Kumar Kori 
146833f9630fSSunil Kumar Kori 		/* Check for maximum number of segments per MTU. Accordingly
146933f9630fSSunil Kumar Kori 		 * update the mbuf data size.
147033f9630fSSunil Kumar Kori 		 */
1471163fbaafSFerruh Yigit 		if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1472163fbaafSFerruh Yigit 				port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
147333f9630fSSunil Kumar Kori 			data_size = rx_mode.max_rx_pkt_len /
147433f9630fSSunil Kumar Kori 				port->dev_info.rx_desc_lim.nb_mtu_seg_max;
147533f9630fSSunil Kumar Kori 
147633f9630fSSunil Kumar Kori 			if ((data_size + RTE_PKTMBUF_HEADROOM) >
147733f9630fSSunil Kumar Kori 							mbuf_data_size) {
147833f9630fSSunil Kumar Kori 				mbuf_data_size = data_size +
147933f9630fSSunil Kumar Kori 						 RTE_PKTMBUF_HEADROOM;
148033f9630fSSunil Kumar Kori 				warning = 1;
1481ce8d5614SIntel 			}
148233f9630fSSunil Kumar Kori 		}
148333f9630fSSunil Kumar Kori 	}
148433f9630fSSunil Kumar Kori 
148533f9630fSSunil Kumar Kori 	if (warning)
148633f9630fSSunil Kumar Kori 		TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
148733f9630fSSunil Kumar Kori 			    mbuf_data_size);
1488ce8d5614SIntel 
14893ab64341SOlivier Matz 	/*
14903ab64341SOlivier Matz 	 * Create pools of mbuf.
14913ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
14923ab64341SOlivier Matz 	 * socket 0 memory by default.
14933ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
14943ab64341SOlivier Matz 	 *
14953ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
14963ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
14973ab64341SOlivier Matz 	 */
14983ab64341SOlivier Matz 	if (param_total_num_mbufs)
14993ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
15003ab64341SOlivier Matz 	else {
15013ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
15023ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
15033ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
15043ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
15053ab64341SOlivier Matz 	}
15063ab64341SOlivier Matz 
1507b6ea6408SIntel 	if (numa_support) {
1508b6ea6408SIntel 		uint8_t i;
1509ce8d5614SIntel 
1510c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
1511401b744dSShahaf Shuler 			mempools[i] = mbuf_pool_create(mbuf_data_size,
1512401b744dSShahaf Shuler 						       nb_mbuf_per_pool,
1513c9cafcc8SShahaf Shuler 						       socket_ids[i]);
15143ab64341SOlivier Matz 	} else {
15153ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
1516401b744dSShahaf Shuler 			mempools[0] = mbuf_pool_create(mbuf_data_size,
1517401b744dSShahaf Shuler 						       nb_mbuf_per_pool, 0);
15183ab64341SOlivier Matz 		else
1519401b744dSShahaf Shuler 			mempools[socket_num] = mbuf_pool_create
1520401b744dSShahaf Shuler 							(mbuf_data_size,
1521401b744dSShahaf Shuler 							 nb_mbuf_per_pool,
15223ab64341SOlivier Matz 							 socket_num);
15233ab64341SOlivier Matz 	}
1524b6ea6408SIntel 
1525b6ea6408SIntel 	init_port_config();
15265886ae07SAdrien Mazarguil 
152752f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1528aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
15295886ae07SAdrien Mazarguil 	/*
15305886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
15315886ae07SAdrien Mazarguil 	 */
15325886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
15338fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
15348fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
15358fd8bebcSAdrien Mazarguil 
15365886ae07SAdrien Mazarguil 		if (mbp == NULL)
15375886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
15385886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
153952f38a20SJiayu Hu 		/* initialize GSO context */
154052f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
154152f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
154252f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
154335b2d13fSOlivier Matz 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
154435b2d13fSOlivier Matz 			RTE_ETHER_CRC_LEN;
154552f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
15465886ae07SAdrien Mazarguil 	}
15475886ae07SAdrien Mazarguil 
1548ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1549ce8d5614SIntel 	if (init_fwd_streams() < 0)
1550ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
15510c0db76fSBernard Iremonger 
15520c0db76fSBernard Iremonger 	fwd_config_setup();
1553b7091f1dSJiayu Hu 
1554b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1555b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1556b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1557b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1558b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1559b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1560b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1561b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1562b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1563b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1564b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1565b7091f1dSJiayu Hu 		}
1566b7091f1dSJiayu Hu 	}
1567ce8d5614SIntel }
1568ce8d5614SIntel 
15692950a769SDeclan Doherty 
15702950a769SDeclan Doherty void
1571a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
15722950a769SDeclan Doherty {
15732950a769SDeclan Doherty 	struct rte_port *port;
15746f51deb9SIvan Ilchenko 	int ret;
15752950a769SDeclan Doherty 
15762950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
15772950a769SDeclan Doherty 	port = &ports[new_port_id];
15786f51deb9SIvan Ilchenko 
15796f51deb9SIvan Ilchenko 	ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
15806f51deb9SIvan Ilchenko 	if (ret != 0)
15816f51deb9SIvan Ilchenko 		return;
15822950a769SDeclan Doherty 
15832950a769SDeclan Doherty 	/* set flag to initialize port/queue */
15842950a769SDeclan Doherty 	port->need_reconfig = 1;
15852950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1586a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
15872950a769SDeclan Doherty 
15882950a769SDeclan Doherty 	init_port_config();
15892950a769SDeclan Doherty }
15902950a769SDeclan Doherty 
15912950a769SDeclan Doherty 
1592ce8d5614SIntel int
1593ce8d5614SIntel init_fwd_streams(void)
1594ce8d5614SIntel {
1595ce8d5614SIntel 	portid_t pid;
1596ce8d5614SIntel 	struct rte_port *port;
1597ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
15985a8fb55cSReshma Pattan 	queueid_t q;
1599ce8d5614SIntel 
1600ce8d5614SIntel 	/* set socket id according to numa or not */
16017d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1602ce8d5614SIntel 		port = &ports[pid];
1603ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1604ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1605ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1606ce8d5614SIntel 				port->dev_info.max_rx_queues);
1607ce8d5614SIntel 			return -1;
1608ce8d5614SIntel 		}
1609ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1610ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1611ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1612ce8d5614SIntel 				port->dev_info.max_tx_queues);
1613ce8d5614SIntel 			return -1;
1614ce8d5614SIntel 		}
161520a0286fSLiu Xiaofeng 		if (numa_support) {
161620a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
161720a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
161820a0286fSLiu Xiaofeng 			else {
1619b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
162020a0286fSLiu Xiaofeng 
162129841336SPhil Yang 				/*
162229841336SPhil Yang 				 * if socket_id is invalid,
162329841336SPhil Yang 				 * set to the first available socket.
162429841336SPhil Yang 				 */
162520a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
162629841336SPhil Yang 					port->socket_id = socket_ids[0];
162720a0286fSLiu Xiaofeng 			}
162820a0286fSLiu Xiaofeng 		}
1629b6ea6408SIntel 		else {
1630b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1631af75078fSIntel 				port->socket_id = 0;
1632b6ea6408SIntel 			else
1633b6ea6408SIntel 				port->socket_id = socket_num;
1634b6ea6408SIntel 		}
1635af75078fSIntel 	}
1636af75078fSIntel 
16375a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
16385a8fb55cSReshma Pattan 	if (q == 0) {
16395a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
16405a8fb55cSReshma Pattan 		return -1;
16415a8fb55cSReshma Pattan 	}
16425a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1643ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1644ce8d5614SIntel 		return 0;
1645ce8d5614SIntel 	/* clear the old */
1646ce8d5614SIntel 	if (fwd_streams != NULL) {
1647ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1648ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1649ce8d5614SIntel 				continue;
1650ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1651ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1652af75078fSIntel 		}
1653ce8d5614SIntel 		rte_free(fwd_streams);
1654ce8d5614SIntel 		fwd_streams = NULL;
1655ce8d5614SIntel 	}
1656ce8d5614SIntel 
1657ce8d5614SIntel 	/* init new */
1658ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
16591f84c469SMatan Azrad 	if (nb_fwd_streams) {
1660ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
16611f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
16621f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1663ce8d5614SIntel 		if (fwd_streams == NULL)
16641f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
16651f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
16661f84c469SMatan Azrad 				 nb_fwd_streams);
1667ce8d5614SIntel 
1668af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
16691f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
16701f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
16711f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1672ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
16731f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
16741f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
16751f84c469SMatan Azrad 		}
1676af75078fSIntel 	}
1677ce8d5614SIntel 
1678ce8d5614SIntel 	return 0;
1679af75078fSIntel }
1680af75078fSIntel 
1681af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1682af75078fSIntel static void
1683af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1684af75078fSIntel {
16857569b8c1SHonnappa Nagarahalli 	uint64_t total_burst, sburst;
168685de481aSHonnappa Nagarahalli 	uint64_t nb_burst;
16877569b8c1SHonnappa Nagarahalli 	uint64_t burst_stats[4];
16887569b8c1SHonnappa Nagarahalli 	uint16_t pktnb_stats[4];
1689af75078fSIntel 	uint16_t nb_pkt;
16907569b8c1SHonnappa Nagarahalli 	int burst_percent[4], sburstp;
16917569b8c1SHonnappa Nagarahalli 	int i;
1692af75078fSIntel 
1693af75078fSIntel 	/*
1694af75078fSIntel 	 * First compute the total number of packet bursts and the
1695af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1696af75078fSIntel 	 */
16977569b8c1SHonnappa Nagarahalli 	memset(&burst_stats, 0x0, sizeof(burst_stats));
16987569b8c1SHonnappa Nagarahalli 	memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
16997569b8c1SHonnappa Nagarahalli 
17007569b8c1SHonnappa Nagarahalli 	/* Show stats for 0 burst size always */
17017569b8c1SHonnappa Nagarahalli 	total_burst = pbs->pkt_burst_spread[0];
17027569b8c1SHonnappa Nagarahalli 	burst_stats[0] = pbs->pkt_burst_spread[0];
17037569b8c1SHonnappa Nagarahalli 	pktnb_stats[0] = 0;
17047569b8c1SHonnappa Nagarahalli 
17057569b8c1SHonnappa Nagarahalli 	/* Find the next 2 burst sizes with highest occurrences. */
17067569b8c1SHonnappa Nagarahalli 	for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1707af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
17087569b8c1SHonnappa Nagarahalli 
1709af75078fSIntel 		if (nb_burst == 0)
1710af75078fSIntel 			continue;
17117569b8c1SHonnappa Nagarahalli 
1712af75078fSIntel 		total_burst += nb_burst;
17137569b8c1SHonnappa Nagarahalli 
17147569b8c1SHonnappa Nagarahalli 		if (nb_burst > burst_stats[1]) {
17157569b8c1SHonnappa Nagarahalli 			burst_stats[2] = burst_stats[1];
17167569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = pktnb_stats[1];
1717fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1718fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
17197569b8c1SHonnappa Nagarahalli 		} else if (nb_burst > burst_stats[2]) {
17207569b8c1SHonnappa Nagarahalli 			burst_stats[2] = nb_burst;
17217569b8c1SHonnappa Nagarahalli 			pktnb_stats[2] = nb_pkt;
1722af75078fSIntel 		}
1723af75078fSIntel 	}
1724af75078fSIntel 	if (total_burst == 0)
1725af75078fSIntel 		return;
17267569b8c1SHonnappa Nagarahalli 
17277569b8c1SHonnappa Nagarahalli 	printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
17287569b8c1SHonnappa Nagarahalli 	for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
17297569b8c1SHonnappa Nagarahalli 		if (i == 3) {
17307569b8c1SHonnappa Nagarahalli 			printf("%d%% of other]\n", 100 - sburstp);
1731af75078fSIntel 			return;
1732af75078fSIntel 		}
17337569b8c1SHonnappa Nagarahalli 
17347569b8c1SHonnappa Nagarahalli 		sburst += burst_stats[i];
17357569b8c1SHonnappa Nagarahalli 		if (sburst == total_burst) {
17367569b8c1SHonnappa Nagarahalli 			printf("%d%% of %d pkts]\n",
17377569b8c1SHonnappa Nagarahalli 				100 - sburstp, (int) pktnb_stats[i]);
1738af75078fSIntel 			return;
1739af75078fSIntel 		}
17407569b8c1SHonnappa Nagarahalli 
17417569b8c1SHonnappa Nagarahalli 		burst_percent[i] =
17427569b8c1SHonnappa Nagarahalli 			(double)burst_stats[i] / total_burst * 100;
17437569b8c1SHonnappa Nagarahalli 		printf("%d%% of %d pkts + ",
17447569b8c1SHonnappa Nagarahalli 			burst_percent[i], (int) pktnb_stats[i]);
17457569b8c1SHonnappa Nagarahalli 		sburstp += burst_percent[i];
1746af75078fSIntel 	}
1747af75078fSIntel }
1748af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1749af75078fSIntel 
1750af75078fSIntel static void
1751af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1752af75078fSIntel {
1753af75078fSIntel 	struct fwd_stream *fs;
1754af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1755af75078fSIntel 
1756af75078fSIntel 	fs = fwd_streams[stream_id];
1757af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1758af75078fSIntel 	    (fs->fwd_dropped == 0))
1759af75078fSIntel 		return;
1760af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1761af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1762af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1763af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1764c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1765c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1766af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1767af75078fSIntel 
1768af75078fSIntel 	/* if checksum mode */
1769af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1770c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1771c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1772c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
177358d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
177458d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
177594d65546SDavid Marchand 	} else {
177694d65546SDavid Marchand 		printf("\n");
1777af75078fSIntel 	}
1778af75078fSIntel 
1779af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1780af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1781af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1782af75078fSIntel #endif
1783af75078fSIntel }
1784af75078fSIntel 
178553324971SDavid Marchand void
178653324971SDavid Marchand fwd_stats_display(void)
178753324971SDavid Marchand {
178853324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
178953324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
179053324971SDavid Marchand 	struct {
179153324971SDavid Marchand 		struct fwd_stream *rx_stream;
179253324971SDavid Marchand 		struct fwd_stream *tx_stream;
179353324971SDavid Marchand 		uint64_t tx_dropped;
179453324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
179553324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
179653324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
179753324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
179853324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
179953324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
180053324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
180153324971SDavid Marchand 	struct rte_eth_stats stats;
180253324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
180353324971SDavid Marchand 	uint64_t fwd_cycles = 0;
180453324971SDavid Marchand #endif
180553324971SDavid Marchand 	uint64_t total_recv = 0;
180653324971SDavid Marchand 	uint64_t total_xmit = 0;
180753324971SDavid Marchand 	struct rte_port *port;
180853324971SDavid Marchand 	streamid_t sm_id;
180953324971SDavid Marchand 	portid_t pt_id;
181053324971SDavid Marchand 	int i;
181153324971SDavid Marchand 
181253324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
181353324971SDavid Marchand 
181453324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
181553324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
181653324971SDavid Marchand 
181753324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
181853324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
181953324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
182053324971SDavid Marchand 		} else {
182153324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
182253324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
182353324971SDavid Marchand 		}
182453324971SDavid Marchand 
182553324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
182653324971SDavid Marchand 
182753324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
182853324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
182953324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
183053324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
183153324971SDavid Marchand 
183253324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
183353324971SDavid Marchand 		fwd_cycles += fs->core_cycles;
183453324971SDavid Marchand #endif
183553324971SDavid Marchand 	}
183653324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
183753324971SDavid Marchand 		uint8_t j;
183853324971SDavid Marchand 
183953324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
184053324971SDavid Marchand 		port = &ports[pt_id];
184153324971SDavid Marchand 
184253324971SDavid Marchand 		rte_eth_stats_get(pt_id, &stats);
184353324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
184453324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
184553324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
184653324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
184753324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
184853324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
184953324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
185053324971SDavid Marchand 
185153324971SDavid Marchand 		total_recv += stats.ipackets;
185253324971SDavid Marchand 		total_xmit += stats.opackets;
185353324971SDavid Marchand 		total_rx_dropped += stats.imissed;
185453324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
185553324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
185653324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
185753324971SDavid Marchand 
185853324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
185953324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
186053324971SDavid Marchand 
186153324971SDavid Marchand 		if (!port->rx_queue_stats_mapping_enabled &&
186253324971SDavid Marchand 		    !port->tx_queue_stats_mapping_enabled) {
186353324971SDavid Marchand 			printf("  RX-packets: %-14"PRIu64
186453324971SDavid Marchand 			       " RX-dropped: %-14"PRIu64
186553324971SDavid Marchand 			       "RX-total: %-"PRIu64"\n",
186653324971SDavid Marchand 			       stats.ipackets, stats.imissed,
186753324971SDavid Marchand 			       stats.ipackets + stats.imissed);
186853324971SDavid Marchand 
186953324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
187053324971SDavid Marchand 				printf("  Bad-ipcsum: %-14"PRIu64
187153324971SDavid Marchand 				       " Bad-l4csum: %-14"PRIu64
187253324971SDavid Marchand 				       "Bad-outer-l4csum: %-14"PRIu64"\n",
187353324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
187453324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
187553324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
187653324971SDavid Marchand 			if (stats.ierrors + stats.rx_nombuf > 0) {
187753324971SDavid Marchand 				printf("  RX-error: %-"PRIu64"\n",
187853324971SDavid Marchand 				       stats.ierrors);
187953324971SDavid Marchand 				printf("  RX-nombufs: %-14"PRIu64"\n",
188053324971SDavid Marchand 				       stats.rx_nombuf);
188153324971SDavid Marchand 			}
188253324971SDavid Marchand 
188353324971SDavid Marchand 			printf("  TX-packets: %-14"PRIu64
188453324971SDavid Marchand 			       " TX-dropped: %-14"PRIu64
188553324971SDavid Marchand 			       "TX-total: %-"PRIu64"\n",
188653324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
188753324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
188853324971SDavid Marchand 		} else {
188953324971SDavid Marchand 			printf("  RX-packets:             %14"PRIu64
189053324971SDavid Marchand 			       "    RX-dropped:%14"PRIu64
189153324971SDavid Marchand 			       "    RX-total:%14"PRIu64"\n",
189253324971SDavid Marchand 			       stats.ipackets, stats.imissed,
189353324971SDavid Marchand 			       stats.ipackets + stats.imissed);
189453324971SDavid Marchand 
189553324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
189653324971SDavid Marchand 				printf("  Bad-ipcsum:%14"PRIu64
189753324971SDavid Marchand 				       "    Bad-l4csum:%14"PRIu64
189853324971SDavid Marchand 				       "    Bad-outer-l4csum: %-14"PRIu64"\n",
189953324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
190053324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
190153324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
190253324971SDavid Marchand 			if ((stats.ierrors + stats.rx_nombuf) > 0) {
190353324971SDavid Marchand 				printf("  RX-error:%"PRIu64"\n", stats.ierrors);
190453324971SDavid Marchand 				printf("  RX-nombufs:             %14"PRIu64"\n",
190553324971SDavid Marchand 				       stats.rx_nombuf);
190653324971SDavid Marchand 			}
190753324971SDavid Marchand 
190853324971SDavid Marchand 			printf("  TX-packets:             %14"PRIu64
190953324971SDavid Marchand 			       "    TX-dropped:%14"PRIu64
191053324971SDavid Marchand 			       "    TX-total:%14"PRIu64"\n",
191153324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
191253324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
191353324971SDavid Marchand 		}
191453324971SDavid Marchand 
191553324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
191653324971SDavid Marchand 		if (ports_stats[pt_id].rx_stream)
191753324971SDavid Marchand 			pkt_burst_stats_display("RX",
191853324971SDavid Marchand 				&ports_stats[pt_id].rx_stream->rx_burst_stats);
191953324971SDavid Marchand 		if (ports_stats[pt_id].tx_stream)
192053324971SDavid Marchand 			pkt_burst_stats_display("TX",
192153324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
192253324971SDavid Marchand #endif
192353324971SDavid Marchand 
192453324971SDavid Marchand 		if (port->rx_queue_stats_mapping_enabled) {
192553324971SDavid Marchand 			printf("\n");
192653324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
192753324971SDavid Marchand 				printf("  Stats reg %2d RX-packets:%14"PRIu64
192853324971SDavid Marchand 				       "     RX-errors:%14"PRIu64
192953324971SDavid Marchand 				       "    RX-bytes:%14"PRIu64"\n",
193053324971SDavid Marchand 				       j, stats.q_ipackets[j],
193153324971SDavid Marchand 				       stats.q_errors[j], stats.q_ibytes[j]);
193253324971SDavid Marchand 			}
193353324971SDavid Marchand 			printf("\n");
193453324971SDavid Marchand 		}
193553324971SDavid Marchand 		if (port->tx_queue_stats_mapping_enabled) {
193653324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
193753324971SDavid Marchand 				printf("  Stats reg %2d TX-packets:%14"PRIu64
193853324971SDavid Marchand 				       "                                 TX-bytes:%14"
193953324971SDavid Marchand 				       PRIu64"\n",
194053324971SDavid Marchand 				       j, stats.q_opackets[j],
194153324971SDavid Marchand 				       stats.q_obytes[j]);
194253324971SDavid Marchand 			}
194353324971SDavid Marchand 		}
194453324971SDavid Marchand 
194553324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
194653324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
194753324971SDavid Marchand 	}
194853324971SDavid Marchand 
194953324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
195053324971SDavid Marchand 	       "%s\n",
195153324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
195253324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
195353324971SDavid Marchand 	       "%-"PRIu64"\n"
195453324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
195553324971SDavid Marchand 	       "%-"PRIu64"\n",
195653324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
195753324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
195853324971SDavid Marchand 	if (total_rx_nombuf > 0)
195953324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
196053324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
196153324971SDavid Marchand 	       "%s\n",
196253324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
196353324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
19644c0497b1SDharmik Thakkar #define CYC_PER_MHZ 1E6
19653a164e00SPhil Yang 	if (total_recv > 0 || total_xmit > 0) {
19663a164e00SPhil Yang 		uint64_t total_pkts = 0;
19673a164e00SPhil Yang 		if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
19683a164e00SPhil Yang 		    strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
19693a164e00SPhil Yang 			total_pkts = total_xmit;
19703a164e00SPhil Yang 		else
19713a164e00SPhil Yang 			total_pkts = total_recv;
19723a164e00SPhil Yang 
19731920832aSDharmik Thakkar 		printf("\n  CPU cycles/packet=%.2F (total cycles="
19743a164e00SPhil Yang 		       "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
19754c0497b1SDharmik Thakkar 		       " MHz Clock\n",
19763a164e00SPhil Yang 		       (double) fwd_cycles / total_pkts,
19773a164e00SPhil Yang 		       fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
19784c0497b1SDharmik Thakkar 		       (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
19793a164e00SPhil Yang 	}
198053324971SDavid Marchand #endif
198153324971SDavid Marchand }
198253324971SDavid Marchand 
198353324971SDavid Marchand void
198453324971SDavid Marchand fwd_stats_reset(void)
198553324971SDavid Marchand {
198653324971SDavid Marchand 	streamid_t sm_id;
198753324971SDavid Marchand 	portid_t pt_id;
198853324971SDavid Marchand 	int i;
198953324971SDavid Marchand 
199053324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
199153324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
199253324971SDavid Marchand 		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
199353324971SDavid Marchand 	}
199453324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
199553324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
199653324971SDavid Marchand 
199753324971SDavid Marchand 		fs->rx_packets = 0;
199853324971SDavid Marchand 		fs->tx_packets = 0;
199953324971SDavid Marchand 		fs->fwd_dropped = 0;
200053324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
200153324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
200253324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
200353324971SDavid Marchand 
200453324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
200553324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
200653324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
200753324971SDavid Marchand #endif
200853324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
200953324971SDavid Marchand 		fs->core_cycles = 0;
201053324971SDavid Marchand #endif
201153324971SDavid Marchand 	}
201253324971SDavid Marchand }
201353324971SDavid Marchand 
2014af75078fSIntel static void
20157741e4cfSIntel flush_fwd_rx_queues(void)
2016af75078fSIntel {
2017af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2018af75078fSIntel 	portid_t  rxp;
20197741e4cfSIntel 	portid_t port_id;
2020af75078fSIntel 	queueid_t rxq;
2021af75078fSIntel 	uint16_t  nb_rx;
2022af75078fSIntel 	uint16_t  i;
2023af75078fSIntel 	uint8_t   j;
2024f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2025594302c7SJames Poole 	uint64_t timer_period;
2026f487715fSReshma Pattan 
2027f487715fSReshma Pattan 	/* convert to number of cycles */
2028594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
2029af75078fSIntel 
2030af75078fSIntel 	for (j = 0; j < 2; j++) {
20317741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2032af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
20337741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
2034f487715fSReshma Pattan 				/**
2035f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
2036f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
2037f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
2038f487715fSReshma Pattan 				* after 1sec timer expiry.
2039f487715fSReshma Pattan 				*/
2040f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
2041af75078fSIntel 				do {
20427741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
2043013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
2044af75078fSIntel 					for (i = 0; i < nb_rx; i++)
2045af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
2046f487715fSReshma Pattan 
2047f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
2048f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
2049f487715fSReshma Pattan 					timer_tsc += diff_tsc;
2050f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
2051f487715fSReshma Pattan 					(timer_tsc < timer_period));
2052f487715fSReshma Pattan 				timer_tsc = 0;
2053af75078fSIntel 			}
2054af75078fSIntel 		}
2055af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2056af75078fSIntel 	}
2057af75078fSIntel }
2058af75078fSIntel 
2059af75078fSIntel static void
2060af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2061af75078fSIntel {
2062af75078fSIntel 	struct fwd_stream **fsm;
2063af75078fSIntel 	streamid_t nb_fs;
2064af75078fSIntel 	streamid_t sm_id;
20657e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
20667e4441c8SRemy Horton 	uint64_t tics_per_1sec;
20677e4441c8SRemy Horton 	uint64_t tics_datum;
20687e4441c8SRemy Horton 	uint64_t tics_current;
20694918a357SXiaoyun Li 	uint16_t i, cnt_ports;
2070af75078fSIntel 
20714918a357SXiaoyun Li 	cnt_ports = nb_ports;
20727e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
20737e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
20747e4441c8SRemy Horton #endif
2075af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
2076af75078fSIntel 	nb_fs = fc->stream_nb;
2077af75078fSIntel 	do {
2078af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
2079af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
20807e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
2081e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
2082e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
20837e4441c8SRemy Horton 			tics_current = rte_rdtsc();
20847e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
20857e4441c8SRemy Horton 				/* Periodic bitrate calculation */
20864918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
2087e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
20884918a357SXiaoyun Li 						ports_ids[i]);
20897e4441c8SRemy Horton 				tics_datum = tics_current;
20907e4441c8SRemy Horton 			}
2091e25e6c70SRemy Horton 		}
20927e4441c8SRemy Horton #endif
209362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
209465eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
209565eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
209662d3216dSReshma Pattan 			rte_latencystats_update();
209762d3216dSReshma Pattan #endif
209862d3216dSReshma Pattan 
2099af75078fSIntel 	} while (! fc->stopped);
2100af75078fSIntel }
2101af75078fSIntel 
2102af75078fSIntel static int
2103af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
2104af75078fSIntel {
2105af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2106af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
2107af75078fSIntel 	return 0;
2108af75078fSIntel }
2109af75078fSIntel 
2110af75078fSIntel /*
2111af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
2112af75078fSIntel  * Used to start communication flows in network loopback test configurations.
2113af75078fSIntel  */
2114af75078fSIntel static int
2115af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
2116af75078fSIntel {
2117af75078fSIntel 	struct fwd_lcore *fwd_lc;
2118af75078fSIntel 	struct fwd_lcore tmp_lcore;
2119af75078fSIntel 
2120af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
2121af75078fSIntel 	tmp_lcore = *fwd_lc;
2122af75078fSIntel 	tmp_lcore.stopped = 1;
2123af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2124af75078fSIntel 	return 0;
2125af75078fSIntel }
2126af75078fSIntel 
2127af75078fSIntel /*
2128af75078fSIntel  * Launch packet forwarding:
2129af75078fSIntel  *     - Setup per-port forwarding context.
2130af75078fSIntel  *     - launch logical cores with their forwarding configuration.
2131af75078fSIntel  */
2132af75078fSIntel static void
2133af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2134af75078fSIntel {
2135af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2136af75078fSIntel 	unsigned int i;
2137af75078fSIntel 	unsigned int lc_id;
2138af75078fSIntel 	int diag;
2139af75078fSIntel 
2140af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2141af75078fSIntel 	if (port_fwd_begin != NULL) {
2142af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2143af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
2144af75078fSIntel 	}
2145af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2146af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
2147af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2148af75078fSIntel 			fwd_lcores[i]->stopped = 0;
2149af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2150af75078fSIntel 						     fwd_lcores[i], lc_id);
2151af75078fSIntel 			if (diag != 0)
2152af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
2153af75078fSIntel 				       lc_id, diag);
2154af75078fSIntel 		}
2155af75078fSIntel 	}
2156af75078fSIntel }
2157af75078fSIntel 
2158af75078fSIntel /*
2159af75078fSIntel  * Launch packet forwarding configuration.
2160af75078fSIntel  */
2161af75078fSIntel void
2162af75078fSIntel start_packet_forwarding(int with_tx_first)
2163af75078fSIntel {
2164af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
2165af75078fSIntel 	port_fwd_end_t  port_fwd_end;
2166af75078fSIntel 	struct rte_port *port;
2167af75078fSIntel 	unsigned int i;
2168af75078fSIntel 	portid_t   pt_id;
2169af75078fSIntel 
21705a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
21715a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
21725a8fb55cSReshma Pattan 
21735a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
21745a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
21755a8fb55cSReshma Pattan 
21765a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
21775a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
21785a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
21795a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
21805a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
21815a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
21825a8fb55cSReshma Pattan 
2183ce8d5614SIntel 	if (all_ports_started() == 0) {
2184ce8d5614SIntel 		printf("Not all ports were started\n");
2185ce8d5614SIntel 		return;
2186ce8d5614SIntel 	}
2187af75078fSIntel 	if (test_done == 0) {
2188af75078fSIntel 		printf("Packet forwarding already started\n");
2189af75078fSIntel 		return;
2190af75078fSIntel 	}
2191edf87b4aSBernard Iremonger 
2192edf87b4aSBernard Iremonger 
21937741e4cfSIntel 	if(dcb_test) {
21947741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
21957741e4cfSIntel 			pt_id = fwd_ports_ids[i];
21967741e4cfSIntel 			port = &ports[pt_id];
21977741e4cfSIntel 			if (!port->dcb_flag) {
21987741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
21997741e4cfSIntel                                        "be configured in this mode.\n");
2200013af9b6SIntel 				return;
2201013af9b6SIntel 			}
22027741e4cfSIntel 		}
22037741e4cfSIntel 		if (nb_fwd_lcores == 1) {
22047741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
22057741e4cfSIntel                                "should be larger than 1.\n");
22067741e4cfSIntel 			return;
22077741e4cfSIntel 		}
22087741e4cfSIntel 	}
2209af75078fSIntel 	test_done = 0;
22107741e4cfSIntel 
221147a767b2SMatan Azrad 	fwd_config_setup();
221247a767b2SMatan Azrad 
22137741e4cfSIntel 	if(!no_flush_rx)
22147741e4cfSIntel 		flush_fwd_rx_queues();
22157741e4cfSIntel 
2216933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
2217af75078fSIntel 	rxtx_config_display();
2218af75078fSIntel 
221953324971SDavid Marchand 	fwd_stats_reset();
2220af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2221af75078fSIntel 		pt_id = fwd_ports_ids[i];
2222af75078fSIntel 		port = &ports[pt_id];
2223013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
2224af75078fSIntel 	}
2225af75078fSIntel 	if (with_tx_first) {
2226af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2227af75078fSIntel 		if (port_fwd_begin != NULL) {
2228af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2229af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
2230af75078fSIntel 		}
2231acbf77a6SZhihong Wang 		while (with_tx_first--) {
2232acbf77a6SZhihong Wang 			launch_packet_forwarding(
2233acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
2234af75078fSIntel 			rte_eal_mp_wait_lcore();
2235acbf77a6SZhihong Wang 		}
2236af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
2237af75078fSIntel 		if (port_fwd_end != NULL) {
2238af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2239af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
2240af75078fSIntel 		}
2241af75078fSIntel 	}
2242af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
2243af75078fSIntel }
2244af75078fSIntel 
2245af75078fSIntel void
2246af75078fSIntel stop_packet_forwarding(void)
2247af75078fSIntel {
2248af75078fSIntel 	port_fwd_end_t port_fwd_end;
2249af75078fSIntel 	lcoreid_t lc_id;
225053324971SDavid Marchand 	portid_t pt_id;
225153324971SDavid Marchand 	int i;
2252af75078fSIntel 
2253af75078fSIntel 	if (test_done) {
2254af75078fSIntel 		printf("Packet forwarding not started\n");
2255af75078fSIntel 		return;
2256af75078fSIntel 	}
2257af75078fSIntel 	printf("Telling cores to stop...");
2258af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2259af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
2260af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
2261af75078fSIntel 	rte_eal_mp_wait_lcore();
2262af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2263af75078fSIntel 	if (port_fwd_end != NULL) {
2264af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2265af75078fSIntel 			pt_id = fwd_ports_ids[i];
2266af75078fSIntel 			(*port_fwd_end)(pt_id);
2267af75078fSIntel 		}
2268af75078fSIntel 	}
2269c185d42cSDavid Marchand 
227053324971SDavid Marchand 	fwd_stats_display();
227158d475b7SJerin Jacob 
2272af75078fSIntel 	printf("\nDone.\n");
2273af75078fSIntel 	test_done = 1;
2274af75078fSIntel }
2275af75078fSIntel 
2276cfae07fdSOuyang Changchun void
2277cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
2278cfae07fdSOuyang Changchun {
2279492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
2280cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
2281cfae07fdSOuyang Changchun }
2282cfae07fdSOuyang Changchun 
2283cfae07fdSOuyang Changchun void
2284cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
2285cfae07fdSOuyang Changchun {
2286492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
2287cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
2288cfae07fdSOuyang Changchun }
2289cfae07fdSOuyang Changchun 
2290ce8d5614SIntel static int
2291ce8d5614SIntel all_ports_started(void)
2292ce8d5614SIntel {
2293ce8d5614SIntel 	portid_t pi;
2294ce8d5614SIntel 	struct rte_port *port;
2295ce8d5614SIntel 
22967d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2297ce8d5614SIntel 		port = &ports[pi];
2298ce8d5614SIntel 		/* Check if there is a port which is not started */
229941b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
230041b05095SBernard Iremonger 			(port->slave_flag == 0))
2301ce8d5614SIntel 			return 0;
2302ce8d5614SIntel 	}
2303ce8d5614SIntel 
2304ce8d5614SIntel 	/* No port is not started */
2305ce8d5614SIntel 	return 1;
2306ce8d5614SIntel }
2307ce8d5614SIntel 
2308148f963fSBruce Richardson int
23096018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
23106018eb8cSShahaf Shuler {
23116018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
23126018eb8cSShahaf Shuler 
23136018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
23146018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
23156018eb8cSShahaf Shuler 		return 0;
23166018eb8cSShahaf Shuler 	return 1;
23176018eb8cSShahaf Shuler }
23186018eb8cSShahaf Shuler 
23196018eb8cSShahaf Shuler int
2320edab33b1STetsuya Mukawa all_ports_stopped(void)
2321edab33b1STetsuya Mukawa {
2322edab33b1STetsuya Mukawa 	portid_t pi;
2323edab33b1STetsuya Mukawa 
23247d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
23256018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
2326edab33b1STetsuya Mukawa 			return 0;
2327edab33b1STetsuya Mukawa 	}
2328edab33b1STetsuya Mukawa 
2329edab33b1STetsuya Mukawa 	return 1;
2330edab33b1STetsuya Mukawa }
2331edab33b1STetsuya Mukawa 
2332edab33b1STetsuya Mukawa int
2333edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
2334edab33b1STetsuya Mukawa {
2335edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2336edab33b1STetsuya Mukawa 		return 0;
2337edab33b1STetsuya Mukawa 
2338edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2339edab33b1STetsuya Mukawa 		return 0;
2340edab33b1STetsuya Mukawa 
2341edab33b1STetsuya Mukawa 	return 1;
2342edab33b1STetsuya Mukawa }
2343edab33b1STetsuya Mukawa 
23441c69df45SOri Kam /* Configure the Rx and Tx hairpin queues for the selected port. */
23451c69df45SOri Kam static int
23461c69df45SOri Kam setup_hairpin_queues(portid_t pi)
23471c69df45SOri Kam {
23481c69df45SOri Kam 	queueid_t qi;
23491c69df45SOri Kam 	struct rte_eth_hairpin_conf hairpin_conf = {
23501c69df45SOri Kam 		.peer_count = 1,
23511c69df45SOri Kam 	};
23521c69df45SOri Kam 	int i;
23531c69df45SOri Kam 	int diag;
23541c69df45SOri Kam 	struct rte_port *port = &ports[pi];
23551c69df45SOri Kam 
23561c69df45SOri Kam 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
23571c69df45SOri Kam 		hairpin_conf.peers[0].port = pi;
23581c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_rxq;
23591c69df45SOri Kam 		diag = rte_eth_tx_hairpin_queue_setup
23601c69df45SOri Kam 			(pi, qi, nb_txd, &hairpin_conf);
23611c69df45SOri Kam 		i++;
23621c69df45SOri Kam 		if (diag == 0)
23631c69df45SOri Kam 			continue;
23641c69df45SOri Kam 
23651c69df45SOri Kam 		/* Fail to setup rx queue, return */
23661c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
23671c69df45SOri Kam 					RTE_PORT_HANDLING,
23681c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
23691c69df45SOri Kam 			printf("Port %d can not be set back "
23701c69df45SOri Kam 					"to stopped\n", pi);
23711c69df45SOri Kam 		printf("Fail to configure port %d hairpin "
23721c69df45SOri Kam 				"queues\n", pi);
23731c69df45SOri Kam 		/* try to reconfigure queues next time */
23741c69df45SOri Kam 		port->need_reconfig_queues = 1;
23751c69df45SOri Kam 		return -1;
23761c69df45SOri Kam 	}
23771c69df45SOri Kam 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
23781c69df45SOri Kam 		hairpin_conf.peers[0].port = pi;
23791c69df45SOri Kam 		hairpin_conf.peers[0].queue = i + nb_txq;
23801c69df45SOri Kam 		diag = rte_eth_rx_hairpin_queue_setup
23811c69df45SOri Kam 			(pi, qi, nb_rxd, &hairpin_conf);
23821c69df45SOri Kam 		i++;
23831c69df45SOri Kam 		if (diag == 0)
23841c69df45SOri Kam 			continue;
23851c69df45SOri Kam 
23861c69df45SOri Kam 		/* Fail to setup rx queue, return */
23871c69df45SOri Kam 		if (rte_atomic16_cmpset(&(port->port_status),
23881c69df45SOri Kam 					RTE_PORT_HANDLING,
23891c69df45SOri Kam 					RTE_PORT_STOPPED) == 0)
23901c69df45SOri Kam 			printf("Port %d can not be set back "
23911c69df45SOri Kam 					"to stopped\n", pi);
23921c69df45SOri Kam 		printf("Fail to configure port %d hairpin "
23931c69df45SOri Kam 				"queues\n", pi);
23941c69df45SOri Kam 		/* try to reconfigure queues next time */
23951c69df45SOri Kam 		port->need_reconfig_queues = 1;
23961c69df45SOri Kam 		return -1;
23971c69df45SOri Kam 	}
23981c69df45SOri Kam 	return 0;
23991c69df45SOri Kam }
24001c69df45SOri Kam 
2401edab33b1STetsuya Mukawa int
2402ce8d5614SIntel start_port(portid_t pid)
2403ce8d5614SIntel {
240492d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
2405ce8d5614SIntel 	portid_t pi;
2406ce8d5614SIntel 	queueid_t qi;
2407ce8d5614SIntel 	struct rte_port *port;
24086d13ea8eSOlivier Matz 	struct rte_ether_addr mac_addr;
24091c69df45SOri Kam 	struct rte_eth_hairpin_cap cap;
2410ce8d5614SIntel 
24114468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
24124468635fSMichael Qiu 		return 0;
24134468635fSMichael Qiu 
2414ce8d5614SIntel 	if(dcb_config)
2415ce8d5614SIntel 		dcb_test = 1;
24167d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
2417edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2418ce8d5614SIntel 			continue;
2419ce8d5614SIntel 
242092d2703eSMichael Qiu 		need_check_link_status = 0;
2421ce8d5614SIntel 		port = &ports[pi];
2422ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2423ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
2424ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2425ce8d5614SIntel 			continue;
2426ce8d5614SIntel 		}
2427ce8d5614SIntel 
2428ce8d5614SIntel 		if (port->need_reconfig > 0) {
2429ce8d5614SIntel 			port->need_reconfig = 0;
2430ce8d5614SIntel 
24317ee3e944SVasily Philipov 			if (flow_isolate_all) {
24327ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
24337ee3e944SVasily Philipov 				if (ret) {
24347ee3e944SVasily Philipov 					printf("Failed to apply isolated"
24357ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
24367ee3e944SVasily Philipov 					return -1;
24377ee3e944SVasily Philipov 				}
24387ee3e944SVasily Philipov 			}
2439b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
24405706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
244120a0286fSLiu Xiaofeng 					port->socket_id);
24421c69df45SOri Kam 			if (nb_hairpinq > 0 &&
24431c69df45SOri Kam 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
24441c69df45SOri Kam 				printf("Port %d doesn't support hairpin "
24451c69df45SOri Kam 				       "queues\n", pi);
24461c69df45SOri Kam 				return -1;
24471c69df45SOri Kam 			}
2448ce8d5614SIntel 			/* configure port */
24491c69df45SOri Kam 			diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
24501c69df45SOri Kam 						     nb_txq + nb_hairpinq,
2451ce8d5614SIntel 						     &(port->dev_conf));
2452ce8d5614SIntel 			if (diag != 0) {
2453ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2454ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2455ce8d5614SIntel 					printf("Port %d can not be set back "
2456ce8d5614SIntel 							"to stopped\n", pi);
2457ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
2458ce8d5614SIntel 				/* try to reconfigure port next time */
2459ce8d5614SIntel 				port->need_reconfig = 1;
2460148f963fSBruce Richardson 				return -1;
2461ce8d5614SIntel 			}
2462ce8d5614SIntel 		}
2463ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
2464ce8d5614SIntel 			port->need_reconfig_queues = 0;
2465ce8d5614SIntel 			/* setup tx queues */
2466ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
2467b6ea6408SIntel 				if ((numa_support) &&
2468b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2469b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2470d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2471d44f8a48SQi Zhang 						txring_numa[pi],
2472d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2473b6ea6408SIntel 				else
2474b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2475d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2476d44f8a48SQi Zhang 						port->socket_id,
2477d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2478b6ea6408SIntel 
2479ce8d5614SIntel 				if (diag == 0)
2480ce8d5614SIntel 					continue;
2481ce8d5614SIntel 
2482ce8d5614SIntel 				/* Fail to setup tx queue, return */
2483ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2484ce8d5614SIntel 							RTE_PORT_HANDLING,
2485ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2486ce8d5614SIntel 					printf("Port %d can not be set back "
2487ce8d5614SIntel 							"to stopped\n", pi);
2488d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
2489d44f8a48SQi Zhang 				       pi);
2490ce8d5614SIntel 				/* try to reconfigure queues next time */
2491ce8d5614SIntel 				port->need_reconfig_queues = 1;
2492148f963fSBruce Richardson 				return -1;
2493ce8d5614SIntel 			}
2494ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2495d44f8a48SQi Zhang 				/* setup rx queues */
2496b6ea6408SIntel 				if ((numa_support) &&
2497b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2498b6ea6408SIntel 					struct rte_mempool * mp =
2499b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
2500b6ea6408SIntel 					if (mp == NULL) {
2501b6ea6408SIntel 						printf("Failed to setup RX queue:"
2502b6ea6408SIntel 							"No mempool allocation"
2503b6ea6408SIntel 							" on the socket %d\n",
2504b6ea6408SIntel 							rxring_numa[pi]);
2505148f963fSBruce Richardson 						return -1;
2506b6ea6408SIntel 					}
2507b6ea6408SIntel 
2508b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2509d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2510d44f8a48SQi Zhang 					     rxring_numa[pi],
2511d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2512d44f8a48SQi Zhang 					     mp);
25131e1d6bddSBernard Iremonger 				} else {
25141e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
25151e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
25161e1d6bddSBernard Iremonger 					if (mp == NULL) {
25171e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
25181e1d6bddSBernard Iremonger 							"No mempool allocation"
25191e1d6bddSBernard Iremonger 							" on the socket %d\n",
25201e1d6bddSBernard Iremonger 							port->socket_id);
25211e1d6bddSBernard Iremonger 						return -1;
2522b6ea6408SIntel 					}
2523b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2524d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2525d44f8a48SQi Zhang 					     port->socket_id,
2526d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2527d44f8a48SQi Zhang 					     mp);
25281e1d6bddSBernard Iremonger 				}
2529ce8d5614SIntel 				if (diag == 0)
2530ce8d5614SIntel 					continue;
2531ce8d5614SIntel 
2532ce8d5614SIntel 				/* Fail to setup rx queue, return */
2533ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2534ce8d5614SIntel 							RTE_PORT_HANDLING,
2535ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2536ce8d5614SIntel 					printf("Port %d can not be set back "
2537ce8d5614SIntel 							"to stopped\n", pi);
2538d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2539d44f8a48SQi Zhang 				       pi);
2540ce8d5614SIntel 				/* try to reconfigure queues next time */
2541ce8d5614SIntel 				port->need_reconfig_queues = 1;
2542148f963fSBruce Richardson 				return -1;
2543ce8d5614SIntel 			}
25441c69df45SOri Kam 			/* setup hairpin queues */
25451c69df45SOri Kam 			if (setup_hairpin_queues(pi) != 0)
25461c69df45SOri Kam 				return -1;
2547ce8d5614SIntel 		}
2548b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
2549b0a9354aSPavan Nikhilesh 		if (clear_ptypes) {
2550b0a9354aSPavan Nikhilesh 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2551b0a9354aSPavan Nikhilesh 					NULL, 0);
2552b0a9354aSPavan Nikhilesh 			if (diag < 0)
2553b0a9354aSPavan Nikhilesh 				printf(
2554b0a9354aSPavan Nikhilesh 				"Port %d: Failed to disable Ptype parsing\n",
2555b0a9354aSPavan Nikhilesh 				pi);
2556b0a9354aSPavan Nikhilesh 		}
2557b0a9354aSPavan Nikhilesh 
2558ce8d5614SIntel 		/* start port */
2559ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2560ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2561ce8d5614SIntel 
2562ce8d5614SIntel 			/* Fail to setup rx queue, return */
2563ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2564ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2565ce8d5614SIntel 				printf("Port %d can not be set back to "
2566ce8d5614SIntel 							"stopped\n", pi);
2567ce8d5614SIntel 			continue;
2568ce8d5614SIntel 		}
2569ce8d5614SIntel 
2570ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2571ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2572ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2573ce8d5614SIntel 
2574a5279d25SIgor Romanov 		if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2575d8c89163SZijie Pan 			printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
25762950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
25772950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
25782950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2579d8c89163SZijie Pan 
2580ce8d5614SIntel 		/* at least one port started, need checking link status */
2581ce8d5614SIntel 		need_check_link_status = 1;
2582ce8d5614SIntel 	}
2583ce8d5614SIntel 
258492d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2585edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
258692d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2587ce8d5614SIntel 		printf("Please stop the ports first\n");
2588ce8d5614SIntel 
2589ce8d5614SIntel 	printf("Done\n");
2590148f963fSBruce Richardson 	return 0;
2591ce8d5614SIntel }
2592ce8d5614SIntel 
2593ce8d5614SIntel void
2594ce8d5614SIntel stop_port(portid_t pid)
2595ce8d5614SIntel {
2596ce8d5614SIntel 	portid_t pi;
2597ce8d5614SIntel 	struct rte_port *port;
2598ce8d5614SIntel 	int need_check_link_status = 0;
2599ce8d5614SIntel 
2600ce8d5614SIntel 	if (dcb_test) {
2601ce8d5614SIntel 		dcb_test = 0;
2602ce8d5614SIntel 		dcb_config = 0;
2603ce8d5614SIntel 	}
26044468635fSMichael Qiu 
26054468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
26064468635fSMichael Qiu 		return;
26074468635fSMichael Qiu 
2608ce8d5614SIntel 	printf("Stopping ports...\n");
2609ce8d5614SIntel 
26107d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
26114468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2612ce8d5614SIntel 			continue;
2613ce8d5614SIntel 
2614a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2615a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2616a8ef3e3aSBernard Iremonger 			continue;
2617a8ef3e3aSBernard Iremonger 		}
2618a8ef3e3aSBernard Iremonger 
26190e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
26200e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
26210e545d30SBernard Iremonger 			continue;
26220e545d30SBernard Iremonger 		}
26230e545d30SBernard Iremonger 
2624ce8d5614SIntel 		port = &ports[pi];
2625ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2626ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2627ce8d5614SIntel 			continue;
2628ce8d5614SIntel 
2629ce8d5614SIntel 		rte_eth_dev_stop(pi);
2630ce8d5614SIntel 
2631ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2632ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2633ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2634ce8d5614SIntel 		need_check_link_status = 1;
2635ce8d5614SIntel 	}
2636bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2637edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2638ce8d5614SIntel 
2639ce8d5614SIntel 	printf("Done\n");
2640ce8d5614SIntel }
2641ce8d5614SIntel 
2642ce6959bfSWisam Jaddo static void
26434f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
2644ce6959bfSWisam Jaddo {
26454f1de450SThomas Monjalon 	portid_t i;
26464f1de450SThomas Monjalon 	portid_t new_total = 0;
2647ce6959bfSWisam Jaddo 
26484f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
26494f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
26504f1de450SThomas Monjalon 			array[new_total] = array[i];
26514f1de450SThomas Monjalon 			new_total++;
2652ce6959bfSWisam Jaddo 		}
26534f1de450SThomas Monjalon 	*total = new_total;
26544f1de450SThomas Monjalon }
26554f1de450SThomas Monjalon 
26564f1de450SThomas Monjalon static void
26574f1de450SThomas Monjalon remove_invalid_ports(void)
26584f1de450SThomas Monjalon {
26594f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
26604f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
26614f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
2662ce6959bfSWisam Jaddo }
2663ce6959bfSWisam Jaddo 
2664ce8d5614SIntel void
2665ce8d5614SIntel close_port(portid_t pid)
2666ce8d5614SIntel {
2667ce8d5614SIntel 	portid_t pi;
2668ce8d5614SIntel 	struct rte_port *port;
2669ce8d5614SIntel 
26704468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
26714468635fSMichael Qiu 		return;
26724468635fSMichael Qiu 
2673ce8d5614SIntel 	printf("Closing ports...\n");
2674ce8d5614SIntel 
26757d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
26764468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2677ce8d5614SIntel 			continue;
2678ce8d5614SIntel 
2679a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2680a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2681a8ef3e3aSBernard Iremonger 			continue;
2682a8ef3e3aSBernard Iremonger 		}
2683a8ef3e3aSBernard Iremonger 
26840e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
26850e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
26860e545d30SBernard Iremonger 			continue;
26870e545d30SBernard Iremonger 		}
26880e545d30SBernard Iremonger 
2689ce8d5614SIntel 		port = &ports[pi];
2690ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2691d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2692d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2693d4e8ad64SMichael Qiu 			continue;
2694d4e8ad64SMichael Qiu 		}
2695d4e8ad64SMichael Qiu 
2696d4e8ad64SMichael Qiu 		if (rte_atomic16_cmpset(&(port->port_status),
2697ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2698ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2699ce8d5614SIntel 			continue;
2700ce8d5614SIntel 		}
2701ce8d5614SIntel 
2702938a184aSAdrien Mazarguil 		if (port->flow_list)
2703938a184aSAdrien Mazarguil 			port_flow_flush(pi);
2704ce8d5614SIntel 		rte_eth_dev_close(pi);
2705ce8d5614SIntel 
27064f1de450SThomas Monjalon 		remove_invalid_ports();
270723ea57a2SThomas Monjalon 
2708ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2709ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2710b38bb262SPablo de Lara 			printf("Port %d cannot be set to closed\n", pi);
2711ce8d5614SIntel 	}
2712ce8d5614SIntel 
2713ce8d5614SIntel 	printf("Done\n");
2714ce8d5614SIntel }
2715ce8d5614SIntel 
2716edab33b1STetsuya Mukawa void
271797f1e196SWei Dai reset_port(portid_t pid)
271897f1e196SWei Dai {
271997f1e196SWei Dai 	int diag;
272097f1e196SWei Dai 	portid_t pi;
272197f1e196SWei Dai 	struct rte_port *port;
272297f1e196SWei Dai 
272397f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
272497f1e196SWei Dai 		return;
272597f1e196SWei Dai 
27261cde1b9aSShougang Wang 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
27271cde1b9aSShougang Wang 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
27281cde1b9aSShougang Wang 		printf("Can not reset port(s), please stop port(s) first.\n");
27291cde1b9aSShougang Wang 		return;
27301cde1b9aSShougang Wang 	}
27311cde1b9aSShougang Wang 
273297f1e196SWei Dai 	printf("Resetting ports...\n");
273397f1e196SWei Dai 
273497f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
273597f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
273697f1e196SWei Dai 			continue;
273797f1e196SWei Dai 
273897f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
273997f1e196SWei Dai 			printf("Please remove port %d from forwarding "
274097f1e196SWei Dai 			       "configuration.\n", pi);
274197f1e196SWei Dai 			continue;
274297f1e196SWei Dai 		}
274397f1e196SWei Dai 
274497f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
274597f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
274697f1e196SWei Dai 			       pi);
274797f1e196SWei Dai 			continue;
274897f1e196SWei Dai 		}
274997f1e196SWei Dai 
275097f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
275197f1e196SWei Dai 		if (diag == 0) {
275297f1e196SWei Dai 			port = &ports[pi];
275397f1e196SWei Dai 			port->need_reconfig = 1;
275497f1e196SWei Dai 			port->need_reconfig_queues = 1;
275597f1e196SWei Dai 		} else {
275697f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
275797f1e196SWei Dai 		}
275897f1e196SWei Dai 	}
275997f1e196SWei Dai 
276097f1e196SWei Dai 	printf("Done\n");
276197f1e196SWei Dai }
276297f1e196SWei Dai 
276397f1e196SWei Dai void
2764edab33b1STetsuya Mukawa attach_port(char *identifier)
2765ce8d5614SIntel {
27664f1ed78eSThomas Monjalon 	portid_t pi;
2767c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
2768ce8d5614SIntel 
2769edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2770edab33b1STetsuya Mukawa 
2771edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2772edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2773edab33b1STetsuya Mukawa 		return;
2774ce8d5614SIntel 	}
2775ce8d5614SIntel 
277675b66decSIlya Maximets 	if (rte_dev_probe(identifier) < 0) {
2777c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2778edab33b1STetsuya Mukawa 		return;
2779c9cce428SThomas Monjalon 	}
2780c9cce428SThomas Monjalon 
27814f1ed78eSThomas Monjalon 	/* first attach mode: event */
27824f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
27834f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
27844f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
27854f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
27864f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
27874f1ed78eSThomas Monjalon 				setup_attached_port(pi);
27884f1ed78eSThomas Monjalon 		return;
27894f1ed78eSThomas Monjalon 	}
27904f1ed78eSThomas Monjalon 
27914f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
279286fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
27934f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
279486fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
279586fa5de1SThomas Monjalon 			continue; /* port was already attached before */
2796c9cce428SThomas Monjalon 		setup_attached_port(pi);
2797c9cce428SThomas Monjalon 	}
279886fa5de1SThomas Monjalon }
2799c9cce428SThomas Monjalon 
2800c9cce428SThomas Monjalon static void
2801c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
2802c9cce428SThomas Monjalon {
2803c9cce428SThomas Monjalon 	unsigned int socket_id;
280434fc1051SIvan Ilchenko 	int ret;
2805edab33b1STetsuya Mukawa 
2806931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
280729841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
2808931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
280929841336SPhil Yang 		socket_id = socket_ids[0];
2810931126baSBernard Iremonger 	reconfig(pi, socket_id);
281134fc1051SIvan Ilchenko 	ret = rte_eth_promiscuous_enable(pi);
281234fc1051SIvan Ilchenko 	if (ret != 0)
281334fc1051SIvan Ilchenko 		printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
281434fc1051SIvan Ilchenko 			pi, rte_strerror(-ret));
2815edab33b1STetsuya Mukawa 
28164f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
28174f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
28184f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
28194f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
2820edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2821edab33b1STetsuya Mukawa 
2822edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2823edab33b1STetsuya Mukawa 	printf("Done\n");
2824edab33b1STetsuya Mukawa }
2825edab33b1STetsuya Mukawa 
28260654d4a8SThomas Monjalon static void
28270654d4a8SThomas Monjalon detach_device(struct rte_device *dev)
28285f4ec54fSChen Jing D(Mark) {
2829f8e5baa2SThomas Monjalon 	portid_t sibling;
2830f8e5baa2SThomas Monjalon 
2831f8e5baa2SThomas Monjalon 	if (dev == NULL) {
2832f8e5baa2SThomas Monjalon 		printf("Device already removed\n");
2833f8e5baa2SThomas Monjalon 		return;
2834f8e5baa2SThomas Monjalon 	}
2835f8e5baa2SThomas Monjalon 
28360654d4a8SThomas Monjalon 	printf("Removing a device...\n");
2837938a184aSAdrien Mazarguil 
283875b66decSIlya Maximets 	if (rte_dev_remove(dev) < 0) {
2839f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2840edab33b1STetsuya Mukawa 		return;
28413070419eSGaetan Rivet 	}
28427ca262b8SViacheslav Ovsiienko 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2843f8e5baa2SThomas Monjalon 		/* reset mapping between old ports and removed device */
2844f8e5baa2SThomas Monjalon 		rte_eth_devices[sibling].device = NULL;
2845f8e5baa2SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2846f8e5baa2SThomas Monjalon 			/* sibling ports are forced to be closed */
2847f8e5baa2SThomas Monjalon 			ports[sibling].port_status = RTE_PORT_CLOSED;
2848f8e5baa2SThomas Monjalon 			printf("Port %u is closed\n", sibling);
2849f8e5baa2SThomas Monjalon 		}
2850f8e5baa2SThomas Monjalon 	}
2851f8e5baa2SThomas Monjalon 
28524f1de450SThomas Monjalon 	remove_invalid_ports();
285303ce2c53SMatan Azrad 
28540654d4a8SThomas Monjalon 	printf("Device is detached\n");
2855f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
2856edab33b1STetsuya Mukawa 	printf("Done\n");
2857edab33b1STetsuya Mukawa 	return;
28585f4ec54fSChen Jing D(Mark) }
28595f4ec54fSChen Jing D(Mark) 
2860af75078fSIntel void
28610654d4a8SThomas Monjalon detach_port_device(portid_t port_id)
28620654d4a8SThomas Monjalon {
28630654d4a8SThomas Monjalon 	if (port_id_is_invalid(port_id, ENABLED_WARN))
28640654d4a8SThomas Monjalon 		return;
28650654d4a8SThomas Monjalon 
28660654d4a8SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
28670654d4a8SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
28680654d4a8SThomas Monjalon 			printf("Port not stopped\n");
28690654d4a8SThomas Monjalon 			return;
28700654d4a8SThomas Monjalon 		}
28710654d4a8SThomas Monjalon 		printf("Port was not closed\n");
28720654d4a8SThomas Monjalon 		if (ports[port_id].flow_list)
28730654d4a8SThomas Monjalon 			port_flow_flush(port_id);
28740654d4a8SThomas Monjalon 	}
28750654d4a8SThomas Monjalon 
28760654d4a8SThomas Monjalon 	detach_device(rte_eth_devices[port_id].device);
28770654d4a8SThomas Monjalon }
28780654d4a8SThomas Monjalon 
28790654d4a8SThomas Monjalon void
28805edee5f6SThomas Monjalon detach_devargs(char *identifier)
288155e51c96SNithin Dabilpuram {
288255e51c96SNithin Dabilpuram 	struct rte_dev_iterator iterator;
288355e51c96SNithin Dabilpuram 	struct rte_devargs da;
288455e51c96SNithin Dabilpuram 	portid_t port_id;
288555e51c96SNithin Dabilpuram 
288655e51c96SNithin Dabilpuram 	printf("Removing a device...\n");
288755e51c96SNithin Dabilpuram 
288855e51c96SNithin Dabilpuram 	memset(&da, 0, sizeof(da));
288955e51c96SNithin Dabilpuram 	if (rte_devargs_parsef(&da, "%s", identifier)) {
289055e51c96SNithin Dabilpuram 		printf("cannot parse identifier\n");
289155e51c96SNithin Dabilpuram 		if (da.args)
289255e51c96SNithin Dabilpuram 			free(da.args);
289355e51c96SNithin Dabilpuram 		return;
289455e51c96SNithin Dabilpuram 	}
289555e51c96SNithin Dabilpuram 
289655e51c96SNithin Dabilpuram 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
289755e51c96SNithin Dabilpuram 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
289855e51c96SNithin Dabilpuram 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
289955e51c96SNithin Dabilpuram 				printf("Port %u not stopped\n", port_id);
2900149677c9SStephen Hemminger 				rte_eth_iterator_cleanup(&iterator);
290155e51c96SNithin Dabilpuram 				return;
290255e51c96SNithin Dabilpuram 			}
290355e51c96SNithin Dabilpuram 
290455e51c96SNithin Dabilpuram 			/* sibling ports are forced to be closed */
290555e51c96SNithin Dabilpuram 			if (ports[port_id].flow_list)
290655e51c96SNithin Dabilpuram 				port_flow_flush(port_id);
290755e51c96SNithin Dabilpuram 			ports[port_id].port_status = RTE_PORT_CLOSED;
290855e51c96SNithin Dabilpuram 			printf("Port %u is now closed\n", port_id);
290955e51c96SNithin Dabilpuram 		}
291055e51c96SNithin Dabilpuram 	}
291155e51c96SNithin Dabilpuram 
291255e51c96SNithin Dabilpuram 	if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
291355e51c96SNithin Dabilpuram 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
291455e51c96SNithin Dabilpuram 			    da.name, da.bus->name);
291555e51c96SNithin Dabilpuram 		return;
291655e51c96SNithin Dabilpuram 	}
291755e51c96SNithin Dabilpuram 
291855e51c96SNithin Dabilpuram 	remove_invalid_ports();
291955e51c96SNithin Dabilpuram 
292055e51c96SNithin Dabilpuram 	printf("Device %s is detached\n", identifier);
292155e51c96SNithin Dabilpuram 	printf("Now total ports is %d\n", nb_ports);
292255e51c96SNithin Dabilpuram 	printf("Done\n");
292355e51c96SNithin Dabilpuram }
292455e51c96SNithin Dabilpuram 
292555e51c96SNithin Dabilpuram void
2926af75078fSIntel pmd_test_exit(void)
2927af75078fSIntel {
2928af75078fSIntel 	portid_t pt_id;
2929fb73e096SJeff Guo 	int ret;
2930401b744dSShahaf Shuler 	int i;
2931af75078fSIntel 
29328210ec25SPablo de Lara 	if (test_done == 0)
29338210ec25SPablo de Lara 		stop_packet_forwarding();
29348210ec25SPablo de Lara 
29353a0968c8SShahaf Shuler 	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
29363a0968c8SShahaf Shuler 		if (mempools[i]) {
29373a0968c8SShahaf Shuler 			if (mp_alloc_type == MP_ALLOC_ANON)
29383a0968c8SShahaf Shuler 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
29393a0968c8SShahaf Shuler 						     NULL);
29403a0968c8SShahaf Shuler 		}
29413a0968c8SShahaf Shuler 	}
2942d3a274ceSZhihong Wang 	if (ports != NULL) {
2943d3a274ceSZhihong Wang 		no_link_check = 1;
29447d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
294508fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
2946af75078fSIntel 			fflush(stdout);
2947d3a274ceSZhihong Wang 			stop_port(pt_id);
294808fd782bSCristian Dumitrescu 		}
294908fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
295008fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
295108fd782bSCristian Dumitrescu 			fflush(stdout);
2952d3a274ceSZhihong Wang 			close_port(pt_id);
2953af75078fSIntel 		}
2954d3a274ceSZhihong Wang 	}
2955fb73e096SJeff Guo 
2956fb73e096SJeff Guo 	if (hot_plug) {
2957fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
29582049c511SJeff Guo 		if (ret) {
2959fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2960fb73e096SJeff Guo 				"fail to stop device event monitor.");
29612049c511SJeff Guo 			return;
29622049c511SJeff Guo 		}
2963fb73e096SJeff Guo 
29642049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
2965cc1bf307SJeff Guo 			dev_event_callback, NULL);
29662049c511SJeff Guo 		if (ret < 0) {
2967fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
29682049c511SJeff Guo 				"fail to unregister device event callback.\n");
29692049c511SJeff Guo 			return;
29702049c511SJeff Guo 		}
29712049c511SJeff Guo 
29722049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
29732049c511SJeff Guo 		if (ret) {
29742049c511SJeff Guo 			RTE_LOG(ERR, EAL,
29752049c511SJeff Guo 				"fail to disable hotplug handling.\n");
29762049c511SJeff Guo 			return;
29772049c511SJeff Guo 		}
2978fb73e096SJeff Guo 	}
2979401b744dSShahaf Shuler 	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2980401b744dSShahaf Shuler 		if (mempools[i])
2981401b744dSShahaf Shuler 			rte_mempool_free(mempools[i]);
2982401b744dSShahaf Shuler 	}
2983fb73e096SJeff Guo 
2984d3a274ceSZhihong Wang 	printf("\nBye...\n");
2985af75078fSIntel }
2986af75078fSIntel 
2987af75078fSIntel typedef void (*cmd_func_t)(void);
2988af75078fSIntel struct pmd_test_command {
2989af75078fSIntel 	const char *cmd_name;
2990af75078fSIntel 	cmd_func_t cmd_func;
2991af75078fSIntel };
2992af75078fSIntel 
2993ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
2994af75078fSIntel static void
2995edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
2996af75078fSIntel {
2997ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
2998ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2999f8244c63SZhiyong Yang 	portid_t portid;
3000f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
3001ce8d5614SIntel 	struct rte_eth_link link;
3002e661a08bSIgor Romanov 	int ret;
3003ce8d5614SIntel 
3004ce8d5614SIntel 	printf("Checking link statuses...\n");
3005ce8d5614SIntel 	fflush(stdout);
3006ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
3007ce8d5614SIntel 		all_ports_up = 1;
30087d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
3009ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
3010ce8d5614SIntel 				continue;
3011ce8d5614SIntel 			memset(&link, 0, sizeof(link));
3012e661a08bSIgor Romanov 			ret = rte_eth_link_get_nowait(portid, &link);
3013e661a08bSIgor Romanov 			if (ret < 0) {
3014e661a08bSIgor Romanov 				all_ports_up = 0;
3015e661a08bSIgor Romanov 				if (print_flag == 1)
3016e661a08bSIgor Romanov 					printf("Port %u link get failed: %s\n",
3017e661a08bSIgor Romanov 						portid, rte_strerror(-ret));
3018e661a08bSIgor Romanov 				continue;
3019e661a08bSIgor Romanov 			}
3020ce8d5614SIntel 			/* print link status if flag set */
3021ce8d5614SIntel 			if (print_flag == 1) {
3022ce8d5614SIntel 				if (link.link_status)
3023f8244c63SZhiyong Yang 					printf(
3024f8244c63SZhiyong Yang 					"Port%d Link Up. speed %u Mbps- %s\n",
3025f8244c63SZhiyong Yang 					portid, link.link_speed,
3026ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
3027a357d09dSIvan Dyukov 					("full-duplex") : ("half-duplex"));
3028ce8d5614SIntel 				else
3029f8244c63SZhiyong Yang 					printf("Port %d Link Down\n", portid);
3030ce8d5614SIntel 				continue;
3031ce8d5614SIntel 			}
3032ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
303309419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
3034ce8d5614SIntel 				all_ports_up = 0;
3035ce8d5614SIntel 				break;
3036ce8d5614SIntel 			}
3037ce8d5614SIntel 		}
3038ce8d5614SIntel 		/* after finally printing all link status, get out */
3039ce8d5614SIntel 		if (print_flag == 1)
3040ce8d5614SIntel 			break;
3041ce8d5614SIntel 
3042ce8d5614SIntel 		if (all_ports_up == 0) {
3043ce8d5614SIntel 			fflush(stdout);
3044ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
3045ce8d5614SIntel 		}
3046ce8d5614SIntel 
3047ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
3048ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3049ce8d5614SIntel 			print_flag = 1;
3050ce8d5614SIntel 		}
30518ea656f8SGaetan Rivet 
30528ea656f8SGaetan Rivet 		if (lsc_interrupt)
30538ea656f8SGaetan Rivet 			break;
3054ce8d5614SIntel 	}
3055af75078fSIntel }
3056af75078fSIntel 
3057cc1bf307SJeff Guo /*
3058cc1bf307SJeff Guo  * This callback is for remove a port for a device. It has limitation because
3059cc1bf307SJeff Guo  * it is not for multiple port removal for a device.
3060cc1bf307SJeff Guo  * TODO: the device detach invoke will plan to be removed from user side to
3061cc1bf307SJeff Guo  * eal. And convert all PMDs to free port resources on ether device closing.
3062cc1bf307SJeff Guo  */
3063284c908cSGaetan Rivet static void
3064cc1bf307SJeff Guo rmv_port_callback(void *arg)
3065284c908cSGaetan Rivet {
30663b97888aSMatan Azrad 	int need_to_start = 0;
30670da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
306828caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
30690654d4a8SThomas Monjalon 	struct rte_device *dev;
3070284c908cSGaetan Rivet 
3071284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
3072284c908cSGaetan Rivet 
30733b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
30743b97888aSMatan Azrad 		need_to_start = 1;
30753b97888aSMatan Azrad 		stop_packet_forwarding();
30763b97888aSMatan Azrad 	}
30770da2a62bSMatan Azrad 	no_link_check = 1;
3078284c908cSGaetan Rivet 	stop_port(port_id);
30790da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
30800654d4a8SThomas Monjalon 
30810654d4a8SThomas Monjalon 	/* Save rte_device pointer before closing ethdev port */
30820654d4a8SThomas Monjalon 	dev = rte_eth_devices[port_id].device;
3083284c908cSGaetan Rivet 	close_port(port_id);
30840654d4a8SThomas Monjalon 	detach_device(dev); /* might be already removed or have more ports */
30850654d4a8SThomas Monjalon 
30863b97888aSMatan Azrad 	if (need_to_start)
30873b97888aSMatan Azrad 		start_packet_forwarding(0);
3088284c908cSGaetan Rivet }
3089284c908cSGaetan Rivet 
309076ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
3091d6af1a13SBernard Iremonger static int
3092f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3093d6af1a13SBernard Iremonger 		  void *ret_param)
309476ad4a2dSGaetan Rivet {
309576ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
3096d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
309776ad4a2dSGaetan Rivet 
309876ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
3099f431e010SHerakliusz Lipiec 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
310076ad4a2dSGaetan Rivet 			port_id, __func__, type);
310176ad4a2dSGaetan Rivet 		fflush(stderr);
31023af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
3103f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
310497b5d8b5SThomas Monjalon 			eth_event_desc[type]);
310576ad4a2dSGaetan Rivet 		fflush(stdout);
310676ad4a2dSGaetan Rivet 	}
3107284c908cSGaetan Rivet 
3108284c908cSGaetan Rivet 	switch (type) {
31094f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
31104f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
31114f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
31124f1ed78eSThomas Monjalon 		break;
3113284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
31144f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
31154f1ed78eSThomas Monjalon 			break;
3116284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
3117cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
3118284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
3119284c908cSGaetan Rivet 		break;
3120284c908cSGaetan Rivet 	default:
3121284c908cSGaetan Rivet 		break;
3122284c908cSGaetan Rivet 	}
3123d6af1a13SBernard Iremonger 	return 0;
312476ad4a2dSGaetan Rivet }
312576ad4a2dSGaetan Rivet 
312697b5d8b5SThomas Monjalon static int
312797b5d8b5SThomas Monjalon register_eth_event_callback(void)
312897b5d8b5SThomas Monjalon {
312997b5d8b5SThomas Monjalon 	int ret;
313097b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
313197b5d8b5SThomas Monjalon 
313297b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
313397b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
313497b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
313597b5d8b5SThomas Monjalon 				event,
313697b5d8b5SThomas Monjalon 				eth_event_callback,
313797b5d8b5SThomas Monjalon 				NULL);
313897b5d8b5SThomas Monjalon 		if (ret != 0) {
313997b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
314097b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
314197b5d8b5SThomas Monjalon 			return -1;
314297b5d8b5SThomas Monjalon 		}
314397b5d8b5SThomas Monjalon 	}
314497b5d8b5SThomas Monjalon 
314597b5d8b5SThomas Monjalon 	return 0;
314697b5d8b5SThomas Monjalon }
314797b5d8b5SThomas Monjalon 
3148fb73e096SJeff Guo /* This function is used by the interrupt thread */
3149fb73e096SJeff Guo static void
3150cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3151fb73e096SJeff Guo 			     __rte_unused void *arg)
3152fb73e096SJeff Guo {
31532049c511SJeff Guo 	uint16_t port_id;
31542049c511SJeff Guo 	int ret;
31552049c511SJeff Guo 
3156fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
3157fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
3158fb73e096SJeff Guo 			__func__, type);
3159fb73e096SJeff Guo 		fflush(stderr);
3160fb73e096SJeff Guo 	}
3161fb73e096SJeff Guo 
3162fb73e096SJeff Guo 	switch (type) {
3163fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
3164cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3165fb73e096SJeff Guo 			device_name);
31662049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
31672049c511SJeff Guo 		if (ret) {
31682049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
31692049c511SJeff Guo 				device_name);
31702049c511SJeff Guo 			return;
31712049c511SJeff Guo 		}
3172cc1bf307SJeff Guo 		/*
3173cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
3174cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
3175cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
3176cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
3177cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
3178cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
3179cc1bf307SJeff Guo 		 * be deleted.
3180cc1bf307SJeff Guo 		 */
3181cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
3182cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
3183cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
3184cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
3185fb73e096SJeff Guo 		break;
3186fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
3187fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3188fb73e096SJeff Guo 			device_name);
3189fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
3190fb73e096SJeff Guo 		 * begin to attach port.
3191fb73e096SJeff Guo 		 */
3192fb73e096SJeff Guo 		break;
3193fb73e096SJeff Guo 	default:
3194fb73e096SJeff Guo 		break;
3195fb73e096SJeff Guo 	}
3196fb73e096SJeff Guo }
3197fb73e096SJeff Guo 
3198013af9b6SIntel static int
319928caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3200af75078fSIntel {
3201013af9b6SIntel 	uint16_t i;
3202af75078fSIntel 	int diag;
3203013af9b6SIntel 	uint8_t mapping_found = 0;
3204af75078fSIntel 
3205013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3206013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3207013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
3208013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
3209013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
3210013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
3211013af9b6SIntel 			if (diag != 0)
3212013af9b6SIntel 				return diag;
3213013af9b6SIntel 			mapping_found = 1;
3214af75078fSIntel 		}
3215013af9b6SIntel 	}
3216013af9b6SIntel 	if (mapping_found)
3217013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
3218013af9b6SIntel 	return 0;
3219013af9b6SIntel }
3220013af9b6SIntel 
3221013af9b6SIntel static int
322228caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3223013af9b6SIntel {
3224013af9b6SIntel 	uint16_t i;
3225013af9b6SIntel 	int diag;
3226013af9b6SIntel 	uint8_t mapping_found = 0;
3227013af9b6SIntel 
3228013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3229013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3230013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
3231013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
3232013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
3233013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
3234013af9b6SIntel 			if (diag != 0)
3235013af9b6SIntel 				return diag;
3236013af9b6SIntel 			mapping_found = 1;
3237013af9b6SIntel 		}
3238013af9b6SIntel 	}
3239013af9b6SIntel 	if (mapping_found)
3240013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
3241013af9b6SIntel 	return 0;
3242013af9b6SIntel }
3243013af9b6SIntel 
3244013af9b6SIntel static void
324528caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
3246013af9b6SIntel {
3247013af9b6SIntel 	int diag = 0;
3248013af9b6SIntel 
3249013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
3250af75078fSIntel 	if (diag != 0) {
3251013af9b6SIntel 		if (diag == -ENOTSUP) {
3252013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
3253013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
3254013af9b6SIntel 		}
3255013af9b6SIntel 		else
3256013af9b6SIntel 			rte_exit(EXIT_FAILURE,
3257013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
3258013af9b6SIntel 					"failed for port id=%d diag=%d\n",
3259af75078fSIntel 					pi, diag);
3260af75078fSIntel 	}
3261013af9b6SIntel 
3262013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
3263af75078fSIntel 	if (diag != 0) {
3264013af9b6SIntel 		if (diag == -ENOTSUP) {
3265013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
3266013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
3267013af9b6SIntel 		}
3268013af9b6SIntel 		else
3269013af9b6SIntel 			rte_exit(EXIT_FAILURE,
3270013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
3271013af9b6SIntel 					"failed for port id=%d diag=%d\n",
3272af75078fSIntel 					pi, diag);
3273af75078fSIntel 	}
3274af75078fSIntel }
3275af75078fSIntel 
3276f2c5125aSPablo de Lara static void
3277f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
3278f2c5125aSPablo de Lara {
3279d44f8a48SQi Zhang 	uint16_t qid;
32805e91aeefSWei Zhao 	uint64_t offloads;
3281f2c5125aSPablo de Lara 
3282d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
32835e91aeefSWei Zhao 		offloads = port->rx_conf[qid].offloads;
3284d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
3285575e0fd1SWei Zhao 		if (offloads != 0)
3286575e0fd1SWei Zhao 			port->rx_conf[qid].offloads = offloads;
3287d44f8a48SQi Zhang 
3288d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
3289f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3290d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3291f2c5125aSPablo de Lara 
3292f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3293d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3294f2c5125aSPablo de Lara 
3295f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3296d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3297f2c5125aSPablo de Lara 
3298f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3299d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3300f2c5125aSPablo de Lara 
3301f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3302d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
3303f2c5125aSPablo de Lara 
3304d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
3305d44f8a48SQi Zhang 	}
3306d44f8a48SQi Zhang 
3307d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
33085e91aeefSWei Zhao 		offloads = port->tx_conf[qid].offloads;
3309d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
3310575e0fd1SWei Zhao 		if (offloads != 0)
3311575e0fd1SWei Zhao 			port->tx_conf[qid].offloads = offloads;
3312d44f8a48SQi Zhang 
3313d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
3314f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3315d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3316f2c5125aSPablo de Lara 
3317f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3318d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3319f2c5125aSPablo de Lara 
3320f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3321d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3322f2c5125aSPablo de Lara 
3323f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3324d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3325f2c5125aSPablo de Lara 
3326f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3327d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3328d44f8a48SQi Zhang 
3329d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
3330d44f8a48SQi Zhang 	}
3331f2c5125aSPablo de Lara }
3332f2c5125aSPablo de Lara 
3333013af9b6SIntel void
3334013af9b6SIntel init_port_config(void)
3335013af9b6SIntel {
3336013af9b6SIntel 	portid_t pid;
3337013af9b6SIntel 	struct rte_port *port;
33386f51deb9SIvan Ilchenko 	int ret;
3339013af9b6SIntel 
33407d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
3341013af9b6SIntel 		port = &ports[pid];
3342013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
33436f51deb9SIvan Ilchenko 
33446f51deb9SIvan Ilchenko 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
33456f51deb9SIvan Ilchenko 		if (ret != 0)
33466f51deb9SIvan Ilchenko 			return;
33476f51deb9SIvan Ilchenko 
33483ce690d3SBruce Richardson 		if (nb_rxq > 1) {
3349013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
335090892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3351422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
3352af75078fSIntel 		} else {
3353013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3354013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3355af75078fSIntel 		}
33563ce690d3SBruce Richardson 
33575f592039SJingjing Wu 		if (port->dcb_flag == 0) {
33583ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3359f9295aa2SXiaoyu Min 				port->dev_conf.rxmode.mq_mode =
3360f9295aa2SXiaoyu Min 					(enum rte_eth_rx_mq_mode)
3361f9295aa2SXiaoyu Min 						(rx_mq_mode & ETH_MQ_RX_RSS);
33623ce690d3SBruce Richardson 			else
33633ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
33643ce690d3SBruce Richardson 		}
33653ce690d3SBruce Richardson 
3366f2c5125aSPablo de Lara 		rxtx_port_config(port);
3367013af9b6SIntel 
3368a5279d25SIgor Romanov 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3369a5279d25SIgor Romanov 		if (ret != 0)
3370a5279d25SIgor Romanov 			return;
3371013af9b6SIntel 
3372013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
337350c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3374e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
33757b7e5ba7SIntel #endif
33768ea656f8SGaetan Rivet 
33778ea656f8SGaetan Rivet 		if (lsc_interrupt &&
33788ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
33798ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
33808ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
3381284c908cSGaetan Rivet 		if (rmv_interrupt &&
3382284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
3383284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
3384284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
3385013af9b6SIntel 	}
3386013af9b6SIntel }
3387013af9b6SIntel 
338841b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
338941b05095SBernard Iremonger {
339041b05095SBernard Iremonger 	struct rte_port *port;
339141b05095SBernard Iremonger 
339241b05095SBernard Iremonger 	port = &ports[slave_pid];
339341b05095SBernard Iremonger 	port->slave_flag = 1;
339441b05095SBernard Iremonger }
339541b05095SBernard Iremonger 
339641b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
339741b05095SBernard Iremonger {
339841b05095SBernard Iremonger 	struct rte_port *port;
339941b05095SBernard Iremonger 
340041b05095SBernard Iremonger 	port = &ports[slave_pid];
340141b05095SBernard Iremonger 	port->slave_flag = 0;
340241b05095SBernard Iremonger }
340341b05095SBernard Iremonger 
34040e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
34050e545d30SBernard Iremonger {
34060e545d30SBernard Iremonger 	struct rte_port *port;
34070e545d30SBernard Iremonger 
34080e545d30SBernard Iremonger 	port = &ports[slave_pid];
3409b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
3410b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3411b8b8b344SMatan Azrad 		return 1;
3412b8b8b344SMatan Azrad 	return 0;
34130e545d30SBernard Iremonger }
34140e545d30SBernard Iremonger 
3415013af9b6SIntel const uint16_t vlan_tags[] = {
3416013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
3417013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
3418013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
3419013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
3420013af9b6SIntel };
3421013af9b6SIntel 
3422013af9b6SIntel static  int
3423ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
34241a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
34251a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
34261a572499SJingjing Wu 		 uint8_t pfc_en)
3427013af9b6SIntel {
3428013af9b6SIntel 	uint8_t i;
3429ac7c491cSKonstantin Ananyev 	int32_t rc;
3430ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
3431af75078fSIntel 
3432af75078fSIntel 	/*
3433013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3434013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
3435af75078fSIntel 	 */
34361a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
34371a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
34381a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
34391a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
34401a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3441013af9b6SIntel 
3442547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
34431a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
34441a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
34451a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
34461a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
34471a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
34481a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3449013af9b6SIntel 
34501a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
34511a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
34521a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
34531a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
34541a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
3455af75078fSIntel 		}
3456013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3457f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3458f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3459013af9b6SIntel 		}
3460013af9b6SIntel 
3461013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
3462f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
3463f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
3464f9295aa2SXiaoyu Min 					(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
346532e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
34661a572499SJingjing Wu 	} else {
34671a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
34681a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
34691a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
34701a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
3471013af9b6SIntel 
34725139bc12STing Xu 		memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
34735139bc12STing Xu 
3474ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3475ac7c491cSKonstantin Ananyev 		if (rc != 0)
3476ac7c491cSKonstantin Ananyev 			return rc;
3477ac7c491cSKonstantin Ananyev 
34781a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
34791a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
34801a572499SJingjing Wu 
3481bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3482bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
3483bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
3484013af9b6SIntel 		}
3485ac7c491cSKonstantin Ananyev 
3486f9295aa2SXiaoyu Min 		eth_conf->rxmode.mq_mode =
3487f9295aa2SXiaoyu Min 				(enum rte_eth_rx_mq_mode)
3488f9295aa2SXiaoyu Min 					(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3489ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
349032e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
34911a572499SJingjing Wu 	}
34921a572499SJingjing Wu 
34931a572499SJingjing Wu 	if (pfc_en)
34941a572499SJingjing Wu 		eth_conf->dcb_capability_en =
34951a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3496013af9b6SIntel 	else
3497013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3498013af9b6SIntel 
3499013af9b6SIntel 	return 0;
3500013af9b6SIntel }
3501013af9b6SIntel 
3502013af9b6SIntel int
35031a572499SJingjing Wu init_port_dcb_config(portid_t pid,
35041a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
35051a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
35061a572499SJingjing Wu 		     uint8_t pfc_en)
3507013af9b6SIntel {
3508013af9b6SIntel 	struct rte_eth_conf port_conf;
3509013af9b6SIntel 	struct rte_port *rte_port;
3510013af9b6SIntel 	int retval;
3511013af9b6SIntel 	uint16_t i;
3512013af9b6SIntel 
35132a977b89SWenzhuo Lu 	rte_port = &ports[pid];
3514013af9b6SIntel 
3515013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3516013af9b6SIntel 	/* Enter DCB configuration status */
3517013af9b6SIntel 	dcb_config = 1;
3518013af9b6SIntel 
3519d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
3520d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
3521d5354e89SYanglong Wu 
3522013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
3523ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3524013af9b6SIntel 	if (retval < 0)
3525013af9b6SIntel 		return retval;
35260074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3527013af9b6SIntel 
35282f203d44SQi Zhang 	/* re-configure the device . */
35292b0e0ebaSChenbo Xia 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
35302b0e0ebaSChenbo Xia 	if (retval < 0)
35312b0e0ebaSChenbo Xia 		return retval;
35326f51deb9SIvan Ilchenko 
35336f51deb9SIvan Ilchenko 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
35346f51deb9SIvan Ilchenko 	if (retval != 0)
35356f51deb9SIvan Ilchenko 		return retval;
35362a977b89SWenzhuo Lu 
35372a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
35382a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
35392a977b89SWenzhuo Lu 	 */
35402a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
35412a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
35422a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
35432a977b89SWenzhuo Lu 			" for port %d.", pid);
35442a977b89SWenzhuo Lu 		return -1;
35452a977b89SWenzhuo Lu 	}
35462a977b89SWenzhuo Lu 
35472a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
35482a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
35492a977b89SWenzhuo Lu 	 */
35502a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
355186ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
355286ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
355386ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
355486ef65eeSBernard Iremonger 		} else {
35552a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
35562a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
355786ef65eeSBernard Iremonger 		}
35582a977b89SWenzhuo Lu 	} else {
35592a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
35602a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
35612a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
35622a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
35632a977b89SWenzhuo Lu 		} else {
35642a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
35652a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
35662a977b89SWenzhuo Lu 
35672a977b89SWenzhuo Lu 		}
35682a977b89SWenzhuo Lu 	}
35692a977b89SWenzhuo Lu 	rx_free_thresh = 64;
35702a977b89SWenzhuo Lu 
3571013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3572013af9b6SIntel 
3573f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
3574013af9b6SIntel 	/* VLAN filter */
35750074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
35761a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
3577013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
3578013af9b6SIntel 
3579a5279d25SIgor Romanov 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3580a5279d25SIgor Romanov 	if (retval != 0)
3581a5279d25SIgor Romanov 		return retval;
3582a5279d25SIgor Romanov 
3583013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
3584013af9b6SIntel 
35857741e4cfSIntel 	rte_port->dcb_flag = 1;
35867741e4cfSIntel 
3587013af9b6SIntel 	return 0;
3588af75078fSIntel }
3589af75078fSIntel 
3590ffc468ffSTetsuya Mukawa static void
3591ffc468ffSTetsuya Mukawa init_port(void)
3592ffc468ffSTetsuya Mukawa {
3593ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
3594ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
3595ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3596ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
3597ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
3598ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
3599ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
3600ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
3601ffc468ffSTetsuya Mukawa 	}
360229841336SPhil Yang 
360329841336SPhil Yang 	/* Initialize ports NUMA structures */
360429841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
360529841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
360629841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3607ffc468ffSTetsuya Mukawa }
3608ffc468ffSTetsuya Mukawa 
3609d3a274ceSZhihong Wang static void
3610d3a274ceSZhihong Wang force_quit(void)
3611d3a274ceSZhihong Wang {
3612d3a274ceSZhihong Wang 	pmd_test_exit();
3613d3a274ceSZhihong Wang 	prompt_exit();
3614d3a274ceSZhihong Wang }
3615d3a274ceSZhihong Wang 
3616d3a274ceSZhihong Wang static void
3617cfea1f30SPablo de Lara print_stats(void)
3618cfea1f30SPablo de Lara {
3619cfea1f30SPablo de Lara 	uint8_t i;
3620cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3621cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3622cfea1f30SPablo de Lara 
3623cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
3624cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
3625cfea1f30SPablo de Lara 
3626cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
3627cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3628cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
3629683d1e82SIgor Romanov 
3630683d1e82SIgor Romanov 	fflush(stdout);
3631cfea1f30SPablo de Lara }
3632cfea1f30SPablo de Lara 
3633cfea1f30SPablo de Lara static void
3634d3a274ceSZhihong Wang signal_handler(int signum)
3635d3a274ceSZhihong Wang {
3636d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
3637d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
3638d3a274ceSZhihong Wang 				signum);
3639102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
3640102b7329SReshma Pattan 		/* uninitialize packet capture framework */
3641102b7329SReshma Pattan 		rte_pdump_uninit();
3642102b7329SReshma Pattan #endif
364362d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
36448b36297dSAmit Gupta 		if (latencystats_enabled != 0)
364562d3216dSReshma Pattan 			rte_latencystats_uninit();
364662d3216dSReshma Pattan #endif
3647d3a274ceSZhihong Wang 		force_quit();
3648d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
3649d9a191a0SPhil Yang 		f_quit = 1;
3650d3a274ceSZhihong Wang 		/* exit with the expected status */
3651d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
3652d3a274ceSZhihong Wang 		kill(getpid(), signum);
3653d3a274ceSZhihong Wang 	}
3654d3a274ceSZhihong Wang }
3655d3a274ceSZhihong Wang 
3656af75078fSIntel int
3657af75078fSIntel main(int argc, char** argv)
3658af75078fSIntel {
3659af75078fSIntel 	int diag;
3660f8244c63SZhiyong Yang 	portid_t port_id;
36614918a357SXiaoyun Li 	uint16_t count;
3662fb73e096SJeff Guo 	int ret;
3663af75078fSIntel 
3664d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
3665d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
3666d3a274ceSZhihong Wang 
3667285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
3668285fd101SOlivier Matz 	if (testpmd_logtype < 0)
366916267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register log type");
3670285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3671285fd101SOlivier Matz 
36729201806eSStephen Hemminger 	diag = rte_eal_init(argc, argv);
36739201806eSStephen Hemminger 	if (diag < 0)
367416267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
367516267ceeSStephen Hemminger 			 rte_strerror(rte_errno));
36769201806eSStephen Hemminger 
3677a87ab9f7SStephen Hemminger 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
367816267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE,
367916267ceeSStephen Hemminger 			 "Secondary process type not supported.\n");
3680a87ab9f7SStephen Hemminger 
368197b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
368297b5d8b5SThomas Monjalon 	if (ret != 0)
368316267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
368497b5d8b5SThomas Monjalon 
36854aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
36864aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
3687e9436f54STiwei Bie 	rte_pdump_init();
36884aa0d012SAnatoly Burakov #endif
36894aa0d012SAnatoly Burakov 
36904918a357SXiaoyun Li 	count = 0;
36914918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
36924918a357SXiaoyun Li 		ports_ids[count] = port_id;
36934918a357SXiaoyun Li 		count++;
36944918a357SXiaoyun Li 	}
36954918a357SXiaoyun Li 	nb_ports = (portid_t) count;
36964aa0d012SAnatoly Burakov 	if (nb_ports == 0)
36974aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
36984aa0d012SAnatoly Burakov 
36994aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
37004aa0d012SAnatoly Burakov 	init_port();
37014aa0d012SAnatoly Burakov 
37024aa0d012SAnatoly Burakov 	set_def_fwd_config();
37034aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
370416267ceeSStephen Hemminger 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
370516267ceeSStephen Hemminger 			 "Check the core mask argument\n");
37064aa0d012SAnatoly Burakov 
3707e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
3708e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE
3709e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3710e505d84cSAnatoly Burakov #endif
3711e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
3712e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3713e505d84cSAnatoly Burakov #endif
3714e505d84cSAnatoly Burakov 
3715fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
37165fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
3717fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3718fb7b8b32SAnatoly Burakov #else
3719fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3720fb7b8b32SAnatoly Burakov #endif
3721fb7b8b32SAnatoly Burakov 
3722e505d84cSAnatoly Burakov 	argc -= diag;
3723e505d84cSAnatoly Burakov 	argv += diag;
3724e505d84cSAnatoly Burakov 	if (argc > 1)
3725e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3726e505d84cSAnatoly Burakov 
3727e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3728285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
37291c036b16SEelco Chaudron 			strerror(errno));
37301c036b16SEelco Chaudron 	}
37311c036b16SEelco Chaudron 
373299cabef0SPablo de Lara 	if (tx_first && interactive)
373399cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
373499cabef0SPablo de Lara 				"interactive mode.\n");
37358820cba4SDavid Hunt 
37368820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
37378820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
37388820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
37398820cba4SDavid Hunt 		lsc_interrupt = 0;
37408820cba4SDavid Hunt 	}
37418820cba4SDavid Hunt 
37425a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
37435a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
37445a8fb55cSReshma Pattan 
37455a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3746af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3747af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3748af75078fSIntel 		       nb_rxq, nb_txq);
3749af75078fSIntel 
3750af75078fSIntel 	init_config();
3751fb73e096SJeff Guo 
3752fb73e096SJeff Guo 	if (hot_plug) {
37532049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
3754fb73e096SJeff Guo 		if (ret) {
37552049c511SJeff Guo 			RTE_LOG(ERR, EAL,
37562049c511SJeff Guo 				"fail to enable hotplug handling.");
3757fb73e096SJeff Guo 			return -1;
3758fb73e096SJeff Guo 		}
3759fb73e096SJeff Guo 
37602049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
37612049c511SJeff Guo 		if (ret) {
37622049c511SJeff Guo 			RTE_LOG(ERR, EAL,
37632049c511SJeff Guo 				"fail to start device event monitoring.");
37642049c511SJeff Guo 			return -1;
37652049c511SJeff Guo 		}
37662049c511SJeff Guo 
37672049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
3768cc1bf307SJeff Guo 			dev_event_callback, NULL);
37692049c511SJeff Guo 		if (ret) {
37702049c511SJeff Guo 			RTE_LOG(ERR, EAL,
37712049c511SJeff Guo 				"fail  to register device event callback\n");
37722049c511SJeff Guo 			return -1;
37732049c511SJeff Guo 		}
3774fb73e096SJeff Guo 	}
3775fb73e096SJeff Guo 
37766937d210SStephen Hemminger 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3777148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3778af75078fSIntel 
3779ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
378034fc1051SIvan Ilchenko 	RTE_ETH_FOREACH_DEV(port_id) {
378134fc1051SIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
378234fc1051SIvan Ilchenko 		if (ret != 0)
378334fc1051SIvan Ilchenko 			printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
378434fc1051SIvan Ilchenko 				port_id, rte_strerror(-ret));
378534fc1051SIvan Ilchenko 	}
3786af75078fSIntel 
37877e4441c8SRemy Horton 	/* Init metrics library */
37887e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
37897e4441c8SRemy Horton 
379062d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
379162d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
379262d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
379362d3216dSReshma Pattan 		if (ret)
379462d3216dSReshma Pattan 			printf("Warning: latencystats init()"
379562d3216dSReshma Pattan 				" returned error %d\n",	ret);
379662d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
379762d3216dSReshma Pattan 			latencystats_lcore_id);
379862d3216dSReshma Pattan 	}
379962d3216dSReshma Pattan #endif
380062d3216dSReshma Pattan 
38017e4441c8SRemy Horton 	/* Setup bitrate stats */
38027e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
3803e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
38047e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
38057e4441c8SRemy Horton 		if (bitrate_data == NULL)
3806e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3807e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
38087e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3809e25e6c70SRemy Horton 	}
38107e4441c8SRemy Horton #endif
38117e4441c8SRemy Horton 
38120d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
381381ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
381481ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
381581ef862bSAllain Legacy 
3816ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3817ca7feb22SCyril Chemparathy 		if (auto_start) {
3818ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3819ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3820ca7feb22SCyril Chemparathy 		}
3821af75078fSIntel 		prompt();
38220de738cfSJiayu Hu 		pmd_test_exit();
3823ca7feb22SCyril Chemparathy 	} else
38240d56cb81SThomas Monjalon #endif
38250d56cb81SThomas Monjalon 	{
3826af75078fSIntel 		char c;
3827af75078fSIntel 		int rc;
3828af75078fSIntel 
3829d9a191a0SPhil Yang 		f_quit = 0;
3830d9a191a0SPhil Yang 
3831af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
383299cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3833cfea1f30SPablo de Lara 		if (stats_period != 0) {
3834cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3835cfea1f30SPablo de Lara 			uint64_t timer_period;
3836cfea1f30SPablo de Lara 
3837cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3838cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3839cfea1f30SPablo de Lara 
3840d9a191a0SPhil Yang 			while (f_quit == 0) {
3841cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
3842cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
3843cfea1f30SPablo de Lara 
3844cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
3845cfea1f30SPablo de Lara 					print_stats();
3846cfea1f30SPablo de Lara 					/* Reset the timer */
3847cfea1f30SPablo de Lara 					diff_time = 0;
3848cfea1f30SPablo de Lara 				}
3849cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
3850cfea1f30SPablo de Lara 				prev_time = cur_time;
3851cfea1f30SPablo de Lara 				sleep(1);
3852cfea1f30SPablo de Lara 			}
3853cfea1f30SPablo de Lara 		}
3854cfea1f30SPablo de Lara 
3855af75078fSIntel 		printf("Press enter to exit\n");
3856af75078fSIntel 		rc = read(0, &c, 1);
3857d3a274ceSZhihong Wang 		pmd_test_exit();
3858af75078fSIntel 		if (rc < 0)
3859af75078fSIntel 			return 1;
3860af75078fSIntel 	}
3861af75078fSIntel 
38625e516c89SStephen Hemminger 	ret = rte_eal_cleanup();
38635e516c89SStephen Hemminger 	if (ret != 0)
38645e516c89SStephen Hemminger 		rte_exit(EXIT_FAILURE,
38655e516c89SStephen Hemminger 			 "EAL cleanup failed: %s\n", strerror(-ret));
38665e516c89SStephen Hemminger 
38675e516c89SStephen Hemminger 	return EXIT_SUCCESS;
3868af75078fSIntel }
3869