xref: /dpdk/app/test-pmd/testpmd.c (revision c7f5dba7d4bb7971fac51755aad09b71b10cef90)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30*c7f5dba7SAnatoly Burakov #include <rte_malloc_heap.h>
31af75078fSIntel #include <rte_memory.h>
32af75078fSIntel #include <rte_memcpy.h>
33af75078fSIntel #include <rte_launch.h>
34af75078fSIntel #include <rte_eal.h>
35284c908cSGaetan Rivet #include <rte_alarm.h>
36af75078fSIntel #include <rte_per_lcore.h>
37af75078fSIntel #include <rte_lcore.h>
38af75078fSIntel #include <rte_atomic.h>
39af75078fSIntel #include <rte_branch_prediction.h>
40af75078fSIntel #include <rte_mempool.h>
41af75078fSIntel #include <rte_malloc.h>
42af75078fSIntel #include <rte_mbuf.h>
430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
44af75078fSIntel #include <rte_interrupts.h>
45af75078fSIntel #include <rte_pci.h>
46af75078fSIntel #include <rte_ether.h>
47af75078fSIntel #include <rte_ethdev.h>
48edab33b1STetsuya Mukawa #include <rte_dev.h>
49af75078fSIntel #include <rte_string_fns.h>
50e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
52e261265eSRadu Nicolau #endif
53102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
54102b7329SReshma Pattan #include <rte_pdump.h>
55102b7329SReshma Pattan #endif
56938a184aSAdrien Mazarguil #include <rte_flow.h>
577e4441c8SRemy Horton #include <rte_metrics.h>
587e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
597e4441c8SRemy Horton #include <rte_bitrate.h>
607e4441c8SRemy Horton #endif
6162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6262d3216dSReshma Pattan #include <rte_latencystats.h>
6362d3216dSReshma Pattan #endif
64af75078fSIntel 
65af75078fSIntel #include "testpmd.h"
66af75078fSIntel 
67*c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
68*c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69*c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
70*c7f5dba7SAnatoly Burakov #else
71*c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
72*c7f5dba7SAnatoly Burakov #endif
73*c7f5dba7SAnatoly Burakov 
74*c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
75*c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
76*c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
77*c7f5dba7SAnatoly Burakov #else
78*c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
79*c7f5dba7SAnatoly Burakov #endif
80*c7f5dba7SAnatoly Burakov 
81*c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
82*c7f5dba7SAnatoly Burakov 
83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
85af75078fSIntel 
86af75078fSIntel /* use master core for command line ? */
87af75078fSIntel uint8_t interactive = 0;
88ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8999cabef0SPablo de Lara uint8_t tx_first;
9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
91af75078fSIntel 
92af75078fSIntel /*
93af75078fSIntel  * NUMA support configuration.
94af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
95af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
97af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
98af75078fSIntel  */
99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
100af75078fSIntel 
101af75078fSIntel /*
102b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103b6ea6408SIntel  * not configured.
104b6ea6408SIntel  */
105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
106b6ea6408SIntel 
107b6ea6408SIntel /*
108*c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
109*c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
110*c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
111*c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
112*c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
113148f963fSBruce Richardson  */
114*c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115148f963fSBruce Richardson 
116148f963fSBruce Richardson /*
11763531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11863531389SGeorgios Katsikas  * is allocated.
11963531389SGeorgios Katsikas  */
12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12163531389SGeorgios Katsikas 
12263531389SGeorgios Katsikas /*
12363531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12463531389SGeorgios Katsikas  * is allocated.
12563531389SGeorgios Katsikas  */
12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12763531389SGeorgios Katsikas 
12863531389SGeorgios Katsikas /*
12963531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13063531389SGeorgios Katsikas  * is allocated.
13163531389SGeorgios Katsikas  */
13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13363531389SGeorgios Katsikas 
13463531389SGeorgios Katsikas /*
135af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
136af75078fSIntel  * forwarded.
137547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
138af75078fSIntel  * ports.
139af75078fSIntel  */
140af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141af75078fSIntel portid_t nb_peer_eth_addrs = 0;
142af75078fSIntel 
143af75078fSIntel /*
144af75078fSIntel  * Probed Target Environment.
145af75078fSIntel  */
146af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
147af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150af75078fSIntel 
1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1524918a357SXiaoyun Li 
153af75078fSIntel /*
154af75078fSIntel  * Test Forwarding Configuration.
155af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157af75078fSIntel  */
158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162af75078fSIntel 
163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165af75078fSIntel 
166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168af75078fSIntel 
169af75078fSIntel /*
170af75078fSIntel  * Forwarding engines.
171af75078fSIntel  */
172af75078fSIntel struct fwd_engine * fwd_engines[] = {
173af75078fSIntel 	&io_fwd_engine,
174af75078fSIntel 	&mac_fwd_engine,
175d47388f1SCyril Chemparathy 	&mac_swap_engine,
176e9e23a61SCyril Chemparathy 	&flow_gen_engine,
177af75078fSIntel 	&rx_only_engine,
178af75078fSIntel 	&tx_only_engine,
179af75078fSIntel 	&csum_fwd_engine,
180168dfa61SIvan Boule 	&icmp_echo_engine,
1810ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
1820ad778b3SJasvinder Singh 	&softnic_fwd_engine,
1835b590fbeSJasvinder Singh #endif
184af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
185af75078fSIntel 	&ieee1588_fwd_engine,
186af75078fSIntel #endif
187af75078fSIntel 	NULL,
188af75078fSIntel };
189af75078fSIntel 
190af75078fSIntel struct fwd_config cur_fwd_config;
191af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
192bf56fce1SZhihong Wang uint32_t retry_enabled;
193bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
194bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
195af75078fSIntel 
196af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
197c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
198c8798818SIntel                                       * specified on command-line. */
199cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
200d9a191a0SPhil Yang 
201d9a191a0SPhil Yang /*
202d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
203d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
204d9a191a0SPhil Yang  */
205d9a191a0SPhil Yang uint8_t f_quit;
206d9a191a0SPhil Yang 
207af75078fSIntel /*
208af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
209af75078fSIntel  */
210af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
211af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
212af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
213af75078fSIntel };
214af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
215af75078fSIntel 
21679bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
21779bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
21879bec05bSKonstantin Ananyev 
219af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
220e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
221af75078fSIntel 
222900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
223900550deSIntel uint8_t dcb_config = 0;
224900550deSIntel 
225900550deSIntel /* Whether the dcb is in testing status */
226900550deSIntel uint8_t dcb_test = 0;
227900550deSIntel 
228af75078fSIntel /*
229af75078fSIntel  * Configurable number of RX/TX queues.
230af75078fSIntel  */
231af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
232af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
233af75078fSIntel 
234af75078fSIntel /*
235af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2368599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
237af75078fSIntel  */
2388599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2398599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
240af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
241af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
242af75078fSIntel 
243f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
244af75078fSIntel /*
245af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
246af75078fSIntel  */
247af75078fSIntel 
248f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
249f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
250f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
251af75078fSIntel 
252f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
253f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
254f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
255af75078fSIntel 
256af75078fSIntel /*
257af75078fSIntel  * Configurable value of RX free threshold.
258af75078fSIntel  */
259f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
260af75078fSIntel 
261af75078fSIntel /*
262ce8d5614SIntel  * Configurable value of RX drop enable.
263ce8d5614SIntel  */
264f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
265ce8d5614SIntel 
266ce8d5614SIntel /*
267af75078fSIntel  * Configurable value of TX free threshold.
268af75078fSIntel  */
269f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
270af75078fSIntel 
271af75078fSIntel /*
272af75078fSIntel  * Configurable value of TX RS bit threshold.
273af75078fSIntel  */
274f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
275af75078fSIntel 
276af75078fSIntel /*
277af75078fSIntel  * Receive Side Scaling (RSS) configuration.
278af75078fSIntel  */
2798a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
280af75078fSIntel 
281af75078fSIntel /*
282af75078fSIntel  * Port topology configuration
283af75078fSIntel  */
284af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
285af75078fSIntel 
2867741e4cfSIntel /*
2877741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
2887741e4cfSIntel  */
2897741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
2907741e4cfSIntel 
291af75078fSIntel /*
2927ee3e944SVasily Philipov  * Flow API isolated mode.
2937ee3e944SVasily Philipov  */
2947ee3e944SVasily Philipov uint8_t flow_isolate_all;
2957ee3e944SVasily Philipov 
2967ee3e944SVasily Philipov /*
297bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
298bc202406SDavid Marchand  */
299bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
300bc202406SDavid Marchand 
301bc202406SDavid Marchand /*
3028ea656f8SGaetan Rivet  * Enable link status change notification
3038ea656f8SGaetan Rivet  */
3048ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3058ea656f8SGaetan Rivet 
3068ea656f8SGaetan Rivet /*
307284c908cSGaetan Rivet  * Enable device removal notification.
308284c908cSGaetan Rivet  */
309284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
310284c908cSGaetan Rivet 
311fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
312fb73e096SJeff Guo 
313284c908cSGaetan Rivet /*
3143af72783SGaetan Rivet  * Display or mask ether events
3153af72783SGaetan Rivet  * Default to all events except VF_MBOX
3163af72783SGaetan Rivet  */
3173af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
3183af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
3193af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
3203af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
321badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
3223af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
3233af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
324e505d84cSAnatoly Burakov /*
325e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
326e505d84cSAnatoly Burakov  */
327e505d84cSAnatoly Burakov int do_mlockall = 0;
3283af72783SGaetan Rivet 
3293af72783SGaetan Rivet /*
3307b7e5ba7SIntel  * NIC bypass mode configuration options.
3317b7e5ba7SIntel  */
3327b7e5ba7SIntel 
33350c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3347b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
335e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
3367b7e5ba7SIntel #endif
3377b7e5ba7SIntel 
338e261265eSRadu Nicolau 
33962d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
34062d3216dSReshma Pattan 
34162d3216dSReshma Pattan /*
34262d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
34362d3216dSReshma Pattan  */
34462d3216dSReshma Pattan uint8_t latencystats_enabled;
34562d3216dSReshma Pattan 
34662d3216dSReshma Pattan /*
34762d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
34862d3216dSReshma Pattan  */
34962d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
35062d3216dSReshma Pattan 
35162d3216dSReshma Pattan #endif
35262d3216dSReshma Pattan 
3537b7e5ba7SIntel /*
354af75078fSIntel  * Ethernet device configuration.
355af75078fSIntel  */
356af75078fSIntel struct rte_eth_rxmode rx_mode = {
357af75078fSIntel 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
358af75078fSIntel };
359af75078fSIntel 
36007e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
36107e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
36207e5f7bdSShahaf Shuler };
363fd8c20aaSShahaf Shuler 
364af75078fSIntel struct rte_fdir_conf fdir_conf = {
365af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
366af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
367af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
368d9d5e6f2SJingjing Wu 	.mask = {
36926f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
370d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
371d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
372d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
373d9d5e6f2SJingjing Wu 		},
374d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
375d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
376d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
377d9d5e6f2SJingjing Wu 		},
378d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
379d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
38047b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
38147b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
38247b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
383d9d5e6f2SJingjing Wu 	},
384af75078fSIntel 	.drop_queue = 127,
385af75078fSIntel };
386af75078fSIntel 
3872950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
388af75078fSIntel 
389ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
390ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
391ed30d9b6SIntel 
392ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
393ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
394ed30d9b6SIntel 
395ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
396ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
397ed30d9b6SIntel 
398a4fd5eeeSElza Mathew /*
399a4fd5eeeSElza Mathew  * Display zero values by default for xstats
400a4fd5eeeSElza Mathew  */
401a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
402a4fd5eeeSElza Mathew 
403c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
404c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
4057acf894dSStephen Hurd 
406e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE
4077e4441c8SRemy Horton /* Bitrate statistics */
4087e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
409e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
410e25e6c70SRemy Horton uint8_t bitrate_enabled;
411e25e6c70SRemy Horton #endif
4127e4441c8SRemy Horton 
413b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
414b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
415b40f8d78SJiayu Hu 
4161960be7dSNelio Laranjeiro struct vxlan_encap_conf vxlan_encap_conf = {
4171960be7dSNelio Laranjeiro 	.select_ipv4 = 1,
4181960be7dSNelio Laranjeiro 	.select_vlan = 0,
4191960be7dSNelio Laranjeiro 	.vni = "\x00\x00\x00",
4201960be7dSNelio Laranjeiro 	.udp_src = 0,
4211960be7dSNelio Laranjeiro 	.udp_dst = RTE_BE16(4789),
4221960be7dSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
4231960be7dSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
4241960be7dSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
4251960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
4261960be7dSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
4271960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
4281960be7dSNelio Laranjeiro 	.vlan_tci = 0,
4291960be7dSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
4301960be7dSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
4311960be7dSNelio Laranjeiro };
4321960be7dSNelio Laranjeiro 
433dcd962fcSNelio Laranjeiro struct nvgre_encap_conf nvgre_encap_conf = {
434dcd962fcSNelio Laranjeiro 	.select_ipv4 = 1,
435dcd962fcSNelio Laranjeiro 	.select_vlan = 0,
436dcd962fcSNelio Laranjeiro 	.tni = "\x00\x00\x00",
437dcd962fcSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
438dcd962fcSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
439dcd962fcSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
440dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
441dcd962fcSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
442dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
443dcd962fcSNelio Laranjeiro 	.vlan_tci = 0,
444dcd962fcSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
445dcd962fcSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
446dcd962fcSNelio Laranjeiro };
447dcd962fcSNelio Laranjeiro 
448ed30d9b6SIntel /* Forward function declarations */
44928caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
45028caa76aSZhiyong Yang 						   struct rte_port *port);
451edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
452f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
45376ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
454d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
455fb73e096SJeff Guo static void eth_dev_event_callback(char *device_name,
456fb73e096SJeff Guo 				enum rte_dev_event_type type,
457fb73e096SJeff Guo 				void *param);
458fb73e096SJeff Guo static int eth_dev_event_callback_register(void);
459fb73e096SJeff Guo static int eth_dev_event_callback_unregister(void);
460fb73e096SJeff Guo 
461ce8d5614SIntel 
462ce8d5614SIntel /*
463ce8d5614SIntel  * Check if all the ports are started.
464ce8d5614SIntel  * If yes, return positive value. If not, return zero.
465ce8d5614SIntel  */
466ce8d5614SIntel static int all_ports_started(void);
467ed30d9b6SIntel 
46852f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
46952f38a20SJiayu Hu uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
47052f38a20SJiayu Hu 
471af75078fSIntel /*
47298a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
473c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
474c9cafcc8SShahaf Shuler  */
475c9cafcc8SShahaf Shuler int
476c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
477c9cafcc8SShahaf Shuler {
478c9cafcc8SShahaf Shuler 	unsigned int i;
479c9cafcc8SShahaf Shuler 
480c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
481c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
482c9cafcc8SShahaf Shuler 			return 0;
483c9cafcc8SShahaf Shuler 	}
484c9cafcc8SShahaf Shuler 	return 1;
485c9cafcc8SShahaf Shuler }
486c9cafcc8SShahaf Shuler 
487c9cafcc8SShahaf Shuler /*
488af75078fSIntel  * Setup default configuration.
489af75078fSIntel  */
490af75078fSIntel static void
491af75078fSIntel set_default_fwd_lcores_config(void)
492af75078fSIntel {
493af75078fSIntel 	unsigned int i;
494af75078fSIntel 	unsigned int nb_lc;
4957acf894dSStephen Hurd 	unsigned int sock_num;
496af75078fSIntel 
497af75078fSIntel 	nb_lc = 0;
498af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
499dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
500dbfb8ec7SPhil Yang 			continue;
501c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
502c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
503c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
504c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
505c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
506c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
507c9cafcc8SShahaf Shuler 			}
508c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5097acf894dSStephen Hurd 		}
510f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
511f54fe5eeSStephen Hurd 			continue;
512f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
513af75078fSIntel 	}
514af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
515af75078fSIntel 	nb_cfg_lcores = nb_lcores;
516af75078fSIntel 	nb_fwd_lcores = 1;
517af75078fSIntel }
518af75078fSIntel 
519af75078fSIntel static void
520af75078fSIntel set_def_peer_eth_addrs(void)
521af75078fSIntel {
522af75078fSIntel 	portid_t i;
523af75078fSIntel 
524af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
525af75078fSIntel 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
526af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
527af75078fSIntel 	}
528af75078fSIntel }
529af75078fSIntel 
530af75078fSIntel static void
531af75078fSIntel set_default_fwd_ports_config(void)
532af75078fSIntel {
533af75078fSIntel 	portid_t pt_id;
53465a7360cSMatan Azrad 	int i = 0;
535af75078fSIntel 
53665a7360cSMatan Azrad 	RTE_ETH_FOREACH_DEV(pt_id)
53765a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
538af75078fSIntel 
539af75078fSIntel 	nb_cfg_ports = nb_ports;
540af75078fSIntel 	nb_fwd_ports = nb_ports;
541af75078fSIntel }
542af75078fSIntel 
543af75078fSIntel void
544af75078fSIntel set_def_fwd_config(void)
545af75078fSIntel {
546af75078fSIntel 	set_default_fwd_lcores_config();
547af75078fSIntel 	set_def_peer_eth_addrs();
548af75078fSIntel 	set_default_fwd_ports_config();
549af75078fSIntel }
550af75078fSIntel 
551*c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
552*c7f5dba7SAnatoly Burakov static int
553*c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
554*c7f5dba7SAnatoly Burakov {
555*c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
556*c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
557*c7f5dba7SAnatoly Burakov 
558*c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
559*c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
560*c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
561*c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
562*c7f5dba7SAnatoly Burakov 	 */
563*c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
564*c7f5dba7SAnatoly Burakov 
565*c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
566*c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
567*c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
568*c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
569*c7f5dba7SAnatoly Burakov 		return -1;
570*c7f5dba7SAnatoly Burakov 	}
571*c7f5dba7SAnatoly Burakov 
572*c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
573*c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
574*c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
575*c7f5dba7SAnatoly Burakov 
576*c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
577*c7f5dba7SAnatoly Burakov 
578*c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
579*c7f5dba7SAnatoly Burakov 
580*c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
581*c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
582*c7f5dba7SAnatoly Burakov 		return -1;
583*c7f5dba7SAnatoly Burakov 	}
584*c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
585*c7f5dba7SAnatoly Burakov 
586*c7f5dba7SAnatoly Burakov 	return 0;
587*c7f5dba7SAnatoly Burakov }
588*c7f5dba7SAnatoly Burakov 
589*c7f5dba7SAnatoly Burakov static inline uint32_t
590*c7f5dba7SAnatoly Burakov bsf64(uint64_t v)
591*c7f5dba7SAnatoly Burakov {
592*c7f5dba7SAnatoly Burakov 	return (uint32_t)__builtin_ctzll(v);
593*c7f5dba7SAnatoly Burakov }
594*c7f5dba7SAnatoly Burakov 
595*c7f5dba7SAnatoly Burakov static inline uint32_t
596*c7f5dba7SAnatoly Burakov log2_u64(uint64_t v)
597*c7f5dba7SAnatoly Burakov {
598*c7f5dba7SAnatoly Burakov 	if (v == 0)
599*c7f5dba7SAnatoly Burakov 		return 0;
600*c7f5dba7SAnatoly Burakov 	v = rte_align64pow2(v);
601*c7f5dba7SAnatoly Burakov 	return bsf64(v);
602*c7f5dba7SAnatoly Burakov }
603*c7f5dba7SAnatoly Burakov 
604*c7f5dba7SAnatoly Burakov static int
605*c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
606*c7f5dba7SAnatoly Burakov {
607*c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
608*c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
609*c7f5dba7SAnatoly Burakov 	 */
610*c7f5dba7SAnatoly Burakov 	int log2 = log2_u64(page_sz);
611*c7f5dba7SAnatoly Burakov 
612*c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
613*c7f5dba7SAnatoly Burakov }
614*c7f5dba7SAnatoly Burakov 
615*c7f5dba7SAnatoly Burakov static void *
616*c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
617*c7f5dba7SAnatoly Burakov {
618*c7f5dba7SAnatoly Burakov 	void *addr;
619*c7f5dba7SAnatoly Burakov 	int flags;
620*c7f5dba7SAnatoly Burakov 
621*c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
622*c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
623*c7f5dba7SAnatoly Burakov 	if (huge)
624*c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
625*c7f5dba7SAnatoly Burakov 
626*c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
627*c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
628*c7f5dba7SAnatoly Burakov 		return NULL;
629*c7f5dba7SAnatoly Burakov 
630*c7f5dba7SAnatoly Burakov 	return addr;
631*c7f5dba7SAnatoly Burakov }
632*c7f5dba7SAnatoly Burakov 
633*c7f5dba7SAnatoly Burakov struct extmem_param {
634*c7f5dba7SAnatoly Burakov 	void *addr;
635*c7f5dba7SAnatoly Burakov 	size_t len;
636*c7f5dba7SAnatoly Burakov 	size_t pgsz;
637*c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
638*c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
639*c7f5dba7SAnatoly Burakov };
640*c7f5dba7SAnatoly Burakov 
641*c7f5dba7SAnatoly Burakov static int
642*c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
643*c7f5dba7SAnatoly Burakov 		bool huge)
644*c7f5dba7SAnatoly Burakov {
645*c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
646*c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
647*c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
648*c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
649*c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
650*c7f5dba7SAnatoly Burakov 	void *addr;
651*c7f5dba7SAnatoly Burakov 	int ret;
652*c7f5dba7SAnatoly Burakov 
653*c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
654*c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
655*c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
656*c7f5dba7SAnatoly Burakov 			continue;
657*c7f5dba7SAnatoly Burakov 
658*c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
659*c7f5dba7SAnatoly Burakov 
660*c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
661*c7f5dba7SAnatoly Burakov 		if (!huge)
662*c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
663*c7f5dba7SAnatoly Burakov 
664*c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
665*c7f5dba7SAnatoly Burakov 		if (ret < 0) {
666*c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
667*c7f5dba7SAnatoly Burakov 			return -1;
668*c7f5dba7SAnatoly Burakov 		}
669*c7f5dba7SAnatoly Burakov 
670*c7f5dba7SAnatoly Burakov 		/* allocate our memory */
671*c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
672*c7f5dba7SAnatoly Burakov 
673*c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
674*c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
675*c7f5dba7SAnatoly Burakov 		 * try another one.
676*c7f5dba7SAnatoly Burakov 		 */
677*c7f5dba7SAnatoly Burakov 		if (addr == NULL)
678*c7f5dba7SAnatoly Burakov 			continue;
679*c7f5dba7SAnatoly Burakov 
680*c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
681*c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
682*c7f5dba7SAnatoly Burakov 
683*c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
684*c7f5dba7SAnatoly Burakov 
685*c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
686*c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
687*c7f5dba7SAnatoly Burakov 			goto fail;
688*c7f5dba7SAnatoly Burakov 		}
689*c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
690*c7f5dba7SAnatoly Burakov 		if (!huge)
691*c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
692*c7f5dba7SAnatoly Burakov 
693*c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
694*c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
695*c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
696*c7f5dba7SAnatoly Burakov 			size_t offset;
697*c7f5dba7SAnatoly Burakov 			void *cur;
698*c7f5dba7SAnatoly Burakov 
699*c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
700*c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
701*c7f5dba7SAnatoly Burakov 
702*c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
703*c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
704*c7f5dba7SAnatoly Burakov 
705*c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
706*c7f5dba7SAnatoly Burakov 
707*c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
708*c7f5dba7SAnatoly Burakov 		}
709*c7f5dba7SAnatoly Burakov 
710*c7f5dba7SAnatoly Burakov 		break;
711*c7f5dba7SAnatoly Burakov 	}
712*c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
713*c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
714*c7f5dba7SAnatoly Burakov 		return -1;
715*c7f5dba7SAnatoly Burakov 
716*c7f5dba7SAnatoly Burakov 	param->addr = addr;
717*c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
718*c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
719*c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
720*c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
721*c7f5dba7SAnatoly Burakov 
722*c7f5dba7SAnatoly Burakov 	return 0;
723*c7f5dba7SAnatoly Burakov fail:
724*c7f5dba7SAnatoly Burakov 	if (iovas)
725*c7f5dba7SAnatoly Burakov 		free(iovas);
726*c7f5dba7SAnatoly Burakov 	if (addr)
727*c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
728*c7f5dba7SAnatoly Burakov 
729*c7f5dba7SAnatoly Burakov 	return -1;
730*c7f5dba7SAnatoly Burakov }
731*c7f5dba7SAnatoly Burakov 
732*c7f5dba7SAnatoly Burakov static int
733*c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
734*c7f5dba7SAnatoly Burakov {
735*c7f5dba7SAnatoly Burakov 	struct extmem_param param;
736*c7f5dba7SAnatoly Burakov 	int socket_id, ret;
737*c7f5dba7SAnatoly Burakov 
738*c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
739*c7f5dba7SAnatoly Burakov 
740*c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
741*c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
742*c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
743*c7f5dba7SAnatoly Burakov 		/* create our heap */
744*c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
745*c7f5dba7SAnatoly Burakov 		if (ret < 0) {
746*c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
747*c7f5dba7SAnatoly Burakov 			return -1;
748*c7f5dba7SAnatoly Burakov 		}
749*c7f5dba7SAnatoly Burakov 	}
750*c7f5dba7SAnatoly Burakov 
751*c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
752*c7f5dba7SAnatoly Burakov 	if (ret < 0) {
753*c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
754*c7f5dba7SAnatoly Burakov 		return -1;
755*c7f5dba7SAnatoly Burakov 	}
756*c7f5dba7SAnatoly Burakov 
757*c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
758*c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
759*c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
760*c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
761*c7f5dba7SAnatoly Burakov 
762*c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
763*c7f5dba7SAnatoly Burakov 
764*c7f5dba7SAnatoly Burakov 	/* not needed any more */
765*c7f5dba7SAnatoly Burakov 	free(param.iova_table);
766*c7f5dba7SAnatoly Burakov 
767*c7f5dba7SAnatoly Burakov 	if (ret < 0) {
768*c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
769*c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
770*c7f5dba7SAnatoly Burakov 		return -1;
771*c7f5dba7SAnatoly Burakov 	}
772*c7f5dba7SAnatoly Burakov 
773*c7f5dba7SAnatoly Burakov 	/* success */
774*c7f5dba7SAnatoly Burakov 
775*c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
776*c7f5dba7SAnatoly Burakov 			param.len >> 20);
777*c7f5dba7SAnatoly Burakov 
778*c7f5dba7SAnatoly Burakov 	return 0;
779*c7f5dba7SAnatoly Burakov }
780*c7f5dba7SAnatoly Burakov 
781af75078fSIntel /*
782af75078fSIntel  * Configuration initialisation done once at init time.
783af75078fSIntel  */
784af75078fSIntel static void
785af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
786af75078fSIntel 		 unsigned int socket_id)
787af75078fSIntel {
788af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
789bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
790af75078fSIntel 	uint32_t mb_size;
791af75078fSIntel 
792dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
793af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
794148f963fSBruce Richardson 
795285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
796d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
797d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
798d1eb542eSOlivier Matz 
799*c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
800*c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
801*c7f5dba7SAnatoly Burakov 		{
802*c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
803*c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
804*c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
805*c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
806*c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
807*c7f5dba7SAnatoly Burakov 			break;
808*c7f5dba7SAnatoly Burakov 		}
809*c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
810*c7f5dba7SAnatoly Burakov 		{
811b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
812*c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
813148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
814148f963fSBruce Richardson 				socket_id, 0);
81524427bb9SOlivier Matz 			if (rte_mp == NULL)
81624427bb9SOlivier Matz 				goto err;
817b19a0c75SOlivier Matz 
818b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
819b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
820b19a0c75SOlivier Matz 				rte_mp = NULL;
82124427bb9SOlivier Matz 				goto err;
822b19a0c75SOlivier Matz 			}
823b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
824b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
825*c7f5dba7SAnatoly Burakov 			break;
826*c7f5dba7SAnatoly Burakov 		}
827*c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
828*c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
829*c7f5dba7SAnatoly Burakov 		{
830*c7f5dba7SAnatoly Burakov 			int heap_socket;
831*c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
832*c7f5dba7SAnatoly Burakov 
833*c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
834*c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
835*c7f5dba7SAnatoly Burakov 
836*c7f5dba7SAnatoly Burakov 			heap_socket =
837*c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
838*c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
839*c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
840*c7f5dba7SAnatoly Burakov 
8410e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
8420e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
843ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
844*c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
845*c7f5dba7SAnatoly Burakov 					heap_socket);
846*c7f5dba7SAnatoly Burakov 			break;
847*c7f5dba7SAnatoly Burakov 		}
848*c7f5dba7SAnatoly Burakov 	default:
849*c7f5dba7SAnatoly Burakov 		{
850*c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
851*c7f5dba7SAnatoly Burakov 		}
852bece7b6cSChristian Ehrhardt 	}
853148f963fSBruce Richardson 
85424427bb9SOlivier Matz err:
855af75078fSIntel 	if (rte_mp == NULL) {
856d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
857d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
858d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
859148f963fSBruce Richardson 	} else if (verbose_level > 0) {
860591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
861af75078fSIntel 	}
862af75078fSIntel }
863af75078fSIntel 
86420a0286fSLiu Xiaofeng /*
86520a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
86620a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
86720a0286fSLiu Xiaofeng  */
86820a0286fSLiu Xiaofeng static int
86920a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
87020a0286fSLiu Xiaofeng {
87120a0286fSLiu Xiaofeng 	static int warning_once = 0;
87220a0286fSLiu Xiaofeng 
873c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
87420a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
87520a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
87620a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
87720a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
87820a0286fSLiu Xiaofeng 			       " --numa.\n");
87920a0286fSLiu Xiaofeng 		warning_once = 1;
88020a0286fSLiu Xiaofeng 		return -1;
88120a0286fSLiu Xiaofeng 	}
88220a0286fSLiu Xiaofeng 	return 0;
88320a0286fSLiu Xiaofeng }
88420a0286fSLiu Xiaofeng 
8853f7311baSWei Dai /*
8863f7311baSWei Dai  * Get the allowed maximum number of RX queues.
8873f7311baSWei Dai  * *pid return the port id which has minimal value of
8883f7311baSWei Dai  * max_rx_queues in all ports.
8893f7311baSWei Dai  */
8903f7311baSWei Dai queueid_t
8913f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
8923f7311baSWei Dai {
8933f7311baSWei Dai 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
8943f7311baSWei Dai 	portid_t pi;
8953f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
8963f7311baSWei Dai 
8973f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
8983f7311baSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
8993f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
9003f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
9013f7311baSWei Dai 			*pid = pi;
9023f7311baSWei Dai 		}
9033f7311baSWei Dai 	}
9043f7311baSWei Dai 	return allowed_max_rxq;
9053f7311baSWei Dai }
9063f7311baSWei Dai 
9073f7311baSWei Dai /*
9083f7311baSWei Dai  * Check input rxq is valid or not.
9093f7311baSWei Dai  * If input rxq is not greater than any of maximum number
9103f7311baSWei Dai  * of RX queues of all ports, it is valid.
9113f7311baSWei Dai  * if valid, return 0, else return -1
9123f7311baSWei Dai  */
9133f7311baSWei Dai int
9143f7311baSWei Dai check_nb_rxq(queueid_t rxq)
9153f7311baSWei Dai {
9163f7311baSWei Dai 	queueid_t allowed_max_rxq;
9173f7311baSWei Dai 	portid_t pid = 0;
9183f7311baSWei Dai 
9193f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
9203f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
9213f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
9223f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
9233f7311baSWei Dai 		       rxq,
9243f7311baSWei Dai 		       allowed_max_rxq,
9253f7311baSWei Dai 		       pid);
9263f7311baSWei Dai 		return -1;
9273f7311baSWei Dai 	}
9283f7311baSWei Dai 	return 0;
9293f7311baSWei Dai }
9303f7311baSWei Dai 
93136db4f6cSWei Dai /*
93236db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
93336db4f6cSWei Dai  * *pid return the port id which has minimal value of
93436db4f6cSWei Dai  * max_tx_queues in all ports.
93536db4f6cSWei Dai  */
93636db4f6cSWei Dai queueid_t
93736db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
93836db4f6cSWei Dai {
93936db4f6cSWei Dai 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
94036db4f6cSWei Dai 	portid_t pi;
94136db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
94236db4f6cSWei Dai 
94336db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
94436db4f6cSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
94536db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
94636db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
94736db4f6cSWei Dai 			*pid = pi;
94836db4f6cSWei Dai 		}
94936db4f6cSWei Dai 	}
95036db4f6cSWei Dai 	return allowed_max_txq;
95136db4f6cSWei Dai }
95236db4f6cSWei Dai 
95336db4f6cSWei Dai /*
95436db4f6cSWei Dai  * Check input txq is valid or not.
95536db4f6cSWei Dai  * If input txq is not greater than any of maximum number
95636db4f6cSWei Dai  * of TX queues of all ports, it is valid.
95736db4f6cSWei Dai  * if valid, return 0, else return -1
95836db4f6cSWei Dai  */
95936db4f6cSWei Dai int
96036db4f6cSWei Dai check_nb_txq(queueid_t txq)
96136db4f6cSWei Dai {
96236db4f6cSWei Dai 	queueid_t allowed_max_txq;
96336db4f6cSWei Dai 	portid_t pid = 0;
96436db4f6cSWei Dai 
96536db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
96636db4f6cSWei Dai 	if (txq > allowed_max_txq) {
96736db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
96836db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
96936db4f6cSWei Dai 		       txq,
97036db4f6cSWei Dai 		       allowed_max_txq,
97136db4f6cSWei Dai 		       pid);
97236db4f6cSWei Dai 		return -1;
97336db4f6cSWei Dai 	}
97436db4f6cSWei Dai 	return 0;
97536db4f6cSWei Dai }
97636db4f6cSWei Dai 
977af75078fSIntel static void
978af75078fSIntel init_config(void)
979af75078fSIntel {
980ce8d5614SIntel 	portid_t pid;
981af75078fSIntel 	struct rte_port *port;
982af75078fSIntel 	struct rte_mempool *mbp;
983af75078fSIntel 	unsigned int nb_mbuf_per_pool;
984af75078fSIntel 	lcoreid_t  lc_id;
9857acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
986b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
98752f38a20SJiayu Hu 	uint32_t gso_types;
988c73a9071SWei Dai 	int k;
989af75078fSIntel 
9907acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
991487f9a59SYulong Pei 
992487f9a59SYulong Pei 	if (numa_support) {
993487f9a59SYulong Pei 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
994487f9a59SYulong Pei 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
995487f9a59SYulong Pei 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
996487f9a59SYulong Pei 	}
997487f9a59SYulong Pei 
998af75078fSIntel 	/* Configuration of logical cores. */
999af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1000af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1001fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1002af75078fSIntel 	if (fwd_lcores == NULL) {
1003ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1004ce8d5614SIntel 							"failed\n", nb_lcores);
1005af75078fSIntel 	}
1006af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1007af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1008af75078fSIntel 					       sizeof(struct fwd_lcore),
1009fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1010af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1011ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1012ce8d5614SIntel 								"failed\n");
1013af75078fSIntel 		}
1014af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1015af75078fSIntel 	}
1016af75078fSIntel 
10177d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1018ce8d5614SIntel 		port = &ports[pid];
10198b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1020fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1021384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
1022ce8d5614SIntel 		rte_eth_dev_info_get(pid, &port->dev_info);
10237c45f6c0SFerruh Yigit 
102407e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
102507e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
102607e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
102707e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1028b6ea6408SIntel 		if (numa_support) {
1029b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1030b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1031b6ea6408SIntel 			else {
1032b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
103320a0286fSLiu Xiaofeng 
103420a0286fSLiu Xiaofeng 				/* if socket_id is invalid, set to 0 */
103520a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
103620a0286fSLiu Xiaofeng 					socket_id = 0;
1037b6ea6408SIntel 				port_per_socket[socket_id]++;
1038b6ea6408SIntel 			}
1039b6ea6408SIntel 		}
1040b6ea6408SIntel 
1041c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1042c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1043c73a9071SWei Dai 			port->rx_conf[k].offloads =
1044c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1045c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1046c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1047c73a9071SWei Dai 			port->tx_conf[k].offloads =
1048c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1049c73a9071SWei Dai 
1050ce8d5614SIntel 		/* set flag to initialize port/queue */
1051ce8d5614SIntel 		port->need_reconfig = 1;
1052ce8d5614SIntel 		port->need_reconfig_queues = 1;
1053ce8d5614SIntel 	}
1054ce8d5614SIntel 
10553ab64341SOlivier Matz 	/*
10563ab64341SOlivier Matz 	 * Create pools of mbuf.
10573ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
10583ab64341SOlivier Matz 	 * socket 0 memory by default.
10593ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
10603ab64341SOlivier Matz 	 *
10613ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
10623ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
10633ab64341SOlivier Matz 	 */
10643ab64341SOlivier Matz 	if (param_total_num_mbufs)
10653ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
10663ab64341SOlivier Matz 	else {
10673ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
10683ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
10693ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
10703ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
10713ab64341SOlivier Matz 	}
10723ab64341SOlivier Matz 
1073b6ea6408SIntel 	if (numa_support) {
1074b6ea6408SIntel 		uint8_t i;
1075ce8d5614SIntel 
1076c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
1077c9cafcc8SShahaf Shuler 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1078c9cafcc8SShahaf Shuler 					 socket_ids[i]);
10793ab64341SOlivier Matz 	} else {
10803ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
10813ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
10823ab64341SOlivier Matz 		else
10833ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
10843ab64341SOlivier Matz 						 socket_num);
10853ab64341SOlivier Matz 	}
1086b6ea6408SIntel 
1087b6ea6408SIntel 	init_port_config();
10885886ae07SAdrien Mazarguil 
108952f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1090aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
10915886ae07SAdrien Mazarguil 	/*
10925886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
10935886ae07SAdrien Mazarguil 	 */
10945886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
10958fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
10968fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
10978fd8bebcSAdrien Mazarguil 
10985886ae07SAdrien Mazarguil 		if (mbp == NULL)
10995886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
11005886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
110152f38a20SJiayu Hu 		/* initialize GSO context */
110252f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
110352f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
110452f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
110552f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
110652f38a20SJiayu Hu 			ETHER_CRC_LEN;
110752f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
11085886ae07SAdrien Mazarguil 	}
11095886ae07SAdrien Mazarguil 
1110ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1111ce8d5614SIntel 	if (init_fwd_streams() < 0)
1112ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
11130c0db76fSBernard Iremonger 
11140c0db76fSBernard Iremonger 	fwd_config_setup();
1115b7091f1dSJiayu Hu 
1116b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1117b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1118b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1119b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1120b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1121b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1122b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1123b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1124b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1125b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1126b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1127b7091f1dSJiayu Hu 		}
1128b7091f1dSJiayu Hu 	}
11290ad778b3SJasvinder Singh 
11300ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
11310ad778b3SJasvinder Singh 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
11320ad778b3SJasvinder Singh 		RTE_ETH_FOREACH_DEV(pid) {
11330ad778b3SJasvinder Singh 			port = &ports[pid];
11340ad778b3SJasvinder Singh 			const char *driver = port->dev_info.driver_name;
11350ad778b3SJasvinder Singh 
11360ad778b3SJasvinder Singh 			if (strcmp(driver, "net_softnic") == 0)
11370ad778b3SJasvinder Singh 				port->softport.fwd_lcore_arg = fwd_lcores;
11380ad778b3SJasvinder Singh 		}
11390ad778b3SJasvinder Singh 	}
11400ad778b3SJasvinder Singh #endif
11410ad778b3SJasvinder Singh 
1142ce8d5614SIntel }
1143ce8d5614SIntel 
11442950a769SDeclan Doherty 
11452950a769SDeclan Doherty void
1146a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
11472950a769SDeclan Doherty {
11482950a769SDeclan Doherty 	struct rte_port *port;
11492950a769SDeclan Doherty 
11502950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
11512950a769SDeclan Doherty 	port = &ports[new_port_id];
11522950a769SDeclan Doherty 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
11532950a769SDeclan Doherty 
11542950a769SDeclan Doherty 	/* set flag to initialize port/queue */
11552950a769SDeclan Doherty 	port->need_reconfig = 1;
11562950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1157a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
11582950a769SDeclan Doherty 
11592950a769SDeclan Doherty 	init_port_config();
11602950a769SDeclan Doherty }
11612950a769SDeclan Doherty 
11622950a769SDeclan Doherty 
1163ce8d5614SIntel int
1164ce8d5614SIntel init_fwd_streams(void)
1165ce8d5614SIntel {
1166ce8d5614SIntel 	portid_t pid;
1167ce8d5614SIntel 	struct rte_port *port;
1168ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
11695a8fb55cSReshma Pattan 	queueid_t q;
1170ce8d5614SIntel 
1171ce8d5614SIntel 	/* set socket id according to numa or not */
11727d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1173ce8d5614SIntel 		port = &ports[pid];
1174ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1175ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1176ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1177ce8d5614SIntel 				port->dev_info.max_rx_queues);
1178ce8d5614SIntel 			return -1;
1179ce8d5614SIntel 		}
1180ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1181ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1182ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1183ce8d5614SIntel 				port->dev_info.max_tx_queues);
1184ce8d5614SIntel 			return -1;
1185ce8d5614SIntel 		}
118620a0286fSLiu Xiaofeng 		if (numa_support) {
118720a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
118820a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
118920a0286fSLiu Xiaofeng 			else {
1190b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
119120a0286fSLiu Xiaofeng 
119220a0286fSLiu Xiaofeng 				/* if socket_id is invalid, set to 0 */
119320a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
119420a0286fSLiu Xiaofeng 					port->socket_id = 0;
119520a0286fSLiu Xiaofeng 			}
119620a0286fSLiu Xiaofeng 		}
1197b6ea6408SIntel 		else {
1198b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1199af75078fSIntel 				port->socket_id = 0;
1200b6ea6408SIntel 			else
1201b6ea6408SIntel 				port->socket_id = socket_num;
1202b6ea6408SIntel 		}
1203af75078fSIntel 	}
1204af75078fSIntel 
12055a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
12065a8fb55cSReshma Pattan 	if (q == 0) {
12075a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
12085a8fb55cSReshma Pattan 		return -1;
12095a8fb55cSReshma Pattan 	}
12105a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1211ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1212ce8d5614SIntel 		return 0;
1213ce8d5614SIntel 	/* clear the old */
1214ce8d5614SIntel 	if (fwd_streams != NULL) {
1215ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1216ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1217ce8d5614SIntel 				continue;
1218ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1219ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1220af75078fSIntel 		}
1221ce8d5614SIntel 		rte_free(fwd_streams);
1222ce8d5614SIntel 		fwd_streams = NULL;
1223ce8d5614SIntel 	}
1224ce8d5614SIntel 
1225ce8d5614SIntel 	/* init new */
1226ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
12271f84c469SMatan Azrad 	if (nb_fwd_streams) {
1228ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
12291f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
12301f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1231ce8d5614SIntel 		if (fwd_streams == NULL)
12321f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
12331f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
12341f84c469SMatan Azrad 				 nb_fwd_streams);
1235ce8d5614SIntel 
1236af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
12371f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
12381f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
12391f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1240ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
12411f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
12421f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
12431f84c469SMatan Azrad 		}
1244af75078fSIntel 	}
1245ce8d5614SIntel 
1246ce8d5614SIntel 	return 0;
1247af75078fSIntel }
1248af75078fSIntel 
1249af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1250af75078fSIntel static void
1251af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1252af75078fSIntel {
1253af75078fSIntel 	unsigned int total_burst;
1254af75078fSIntel 	unsigned int nb_burst;
1255af75078fSIntel 	unsigned int burst_stats[3];
1256af75078fSIntel 	uint16_t pktnb_stats[3];
1257af75078fSIntel 	uint16_t nb_pkt;
1258af75078fSIntel 	int burst_percent[3];
1259af75078fSIntel 
1260af75078fSIntel 	/*
1261af75078fSIntel 	 * First compute the total number of packet bursts and the
1262af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1263af75078fSIntel 	 */
1264af75078fSIntel 	total_burst = 0;
1265af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1266af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1267af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1268af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1269af75078fSIntel 		if (nb_burst == 0)
1270af75078fSIntel 			continue;
1271af75078fSIntel 		total_burst += nb_burst;
1272af75078fSIntel 		if (nb_burst > burst_stats[0]) {
1273af75078fSIntel 			burst_stats[1] = burst_stats[0];
1274af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
1275af75078fSIntel 			burst_stats[0] = nb_burst;
1276af75078fSIntel 			pktnb_stats[0] = nb_pkt;
1277fe613657SDaniel Shelepov 		} else if (nb_burst > burst_stats[1]) {
1278fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1279fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
1280af75078fSIntel 		}
1281af75078fSIntel 	}
1282af75078fSIntel 	if (total_burst == 0)
1283af75078fSIntel 		return;
1284af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1285af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1286af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
1287af75078fSIntel 	if (burst_stats[0] == total_burst) {
1288af75078fSIntel 		printf("]\n");
1289af75078fSIntel 		return;
1290af75078fSIntel 	}
1291af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1292af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
1293af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
1294af75078fSIntel 		return;
1295af75078fSIntel 	}
1296af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1297af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1298af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1299af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1300af75078fSIntel 		return;
1301af75078fSIntel 	}
1302af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1303af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1304af75078fSIntel }
1305af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1306af75078fSIntel 
1307af75078fSIntel static void
1308af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1309af75078fSIntel {
1310af75078fSIntel 	struct rte_port *port;
1311013af9b6SIntel 	uint8_t i;
1312af75078fSIntel 
1313af75078fSIntel 	static const char *fwd_stats_border = "----------------------";
1314af75078fSIntel 
1315af75078fSIntel 	port = &ports[port_id];
1316af75078fSIntel 	printf("\n  %s Forward statistics for port %-2d %s\n",
1317af75078fSIntel 	       fwd_stats_border, port_id, fwd_stats_border);
1318013af9b6SIntel 
1319013af9b6SIntel 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1320af75078fSIntel 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1321af75078fSIntel 		       "%-"PRIu64"\n",
132270bdb186SIvan Boule 		       stats->ipackets, stats->imissed,
132370bdb186SIvan Boule 		       (uint64_t) (stats->ipackets + stats->imissed));
1324af75078fSIntel 
1325af75078fSIntel 		if (cur_fwd_eng == &csum_fwd_engine)
1326af75078fSIntel 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1327af75078fSIntel 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
132886057c99SIgor Ryzhov 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1329f72a0fa6SStephen Hemminger 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
133070bdb186SIvan Boule 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
133170bdb186SIvan Boule 		}
1332af75078fSIntel 
1333af75078fSIntel 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1334af75078fSIntel 		       "%-"PRIu64"\n",
1335af75078fSIntel 		       stats->opackets, port->tx_dropped,
1336af75078fSIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
1337013af9b6SIntel 	}
1338013af9b6SIntel 	else {
1339013af9b6SIntel 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1340013af9b6SIntel 		       "%14"PRIu64"\n",
134170bdb186SIvan Boule 		       stats->ipackets, stats->imissed,
134270bdb186SIvan Boule 		       (uint64_t) (stats->ipackets + stats->imissed));
1343013af9b6SIntel 
1344013af9b6SIntel 		if (cur_fwd_eng == &csum_fwd_engine)
1345013af9b6SIntel 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1346013af9b6SIntel 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
134786057c99SIgor Ryzhov 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1348f72a0fa6SStephen Hemminger 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
134970bdb186SIvan Boule 			printf("  RX-nombufs:             %14"PRIu64"\n",
135070bdb186SIvan Boule 			       stats->rx_nombuf);
135170bdb186SIvan Boule 		}
1352013af9b6SIntel 
1353013af9b6SIntel 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1354013af9b6SIntel 		       "%14"PRIu64"\n",
1355013af9b6SIntel 		       stats->opackets, port->tx_dropped,
1356013af9b6SIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
1357013af9b6SIntel 	}
1358e659b6b4SIvan Boule 
1359af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1360af75078fSIntel 	if (port->rx_stream)
1361013af9b6SIntel 		pkt_burst_stats_display("RX",
1362013af9b6SIntel 			&port->rx_stream->rx_burst_stats);
1363af75078fSIntel 	if (port->tx_stream)
1364013af9b6SIntel 		pkt_burst_stats_display("TX",
1365013af9b6SIntel 			&port->tx_stream->tx_burst_stats);
1366af75078fSIntel #endif
1367af75078fSIntel 
1368013af9b6SIntel 	if (port->rx_queue_stats_mapping_enabled) {
1369013af9b6SIntel 		printf("\n");
1370013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1371013af9b6SIntel 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1372013af9b6SIntel 			       "     RX-errors:%14"PRIu64
1373013af9b6SIntel 			       "    RX-bytes:%14"PRIu64"\n",
1374013af9b6SIntel 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1375013af9b6SIntel 		}
1376013af9b6SIntel 		printf("\n");
1377013af9b6SIntel 	}
1378013af9b6SIntel 	if (port->tx_queue_stats_mapping_enabled) {
1379013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1380013af9b6SIntel 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1381013af9b6SIntel 			       "                                 TX-bytes:%14"PRIu64"\n",
1382013af9b6SIntel 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1383013af9b6SIntel 		}
1384013af9b6SIntel 	}
1385013af9b6SIntel 
1386af75078fSIntel 	printf("  %s--------------------------------%s\n",
1387af75078fSIntel 	       fwd_stats_border, fwd_stats_border);
1388af75078fSIntel }
1389af75078fSIntel 
1390af75078fSIntel static void
1391af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1392af75078fSIntel {
1393af75078fSIntel 	struct fwd_stream *fs;
1394af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1395af75078fSIntel 
1396af75078fSIntel 	fs = fwd_streams[stream_id];
1397af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1398af75078fSIntel 	    (fs->fwd_dropped == 0))
1399af75078fSIntel 		return;
1400af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1401af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1402af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1403af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1404af75078fSIntel 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1405af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1406af75078fSIntel 
1407af75078fSIntel 	/* if checksum mode */
1408af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1409013af9b6SIntel 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1410013af9b6SIntel 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1411af75078fSIntel 	}
1412af75078fSIntel 
1413af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1414af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1415af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1416af75078fSIntel #endif
1417af75078fSIntel }
1418af75078fSIntel 
1419af75078fSIntel static void
14207741e4cfSIntel flush_fwd_rx_queues(void)
1421af75078fSIntel {
1422af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1423af75078fSIntel 	portid_t  rxp;
14247741e4cfSIntel 	portid_t port_id;
1425af75078fSIntel 	queueid_t rxq;
1426af75078fSIntel 	uint16_t  nb_rx;
1427af75078fSIntel 	uint16_t  i;
1428af75078fSIntel 	uint8_t   j;
1429f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1430594302c7SJames Poole 	uint64_t timer_period;
1431f487715fSReshma Pattan 
1432f487715fSReshma Pattan 	/* convert to number of cycles */
1433594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1434af75078fSIntel 
1435af75078fSIntel 	for (j = 0; j < 2; j++) {
14367741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1437af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
14387741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
1439f487715fSReshma Pattan 				/**
1440f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
1441f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
1442f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
1443f487715fSReshma Pattan 				* after 1sec timer expiry.
1444f487715fSReshma Pattan 				*/
1445f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
1446af75078fSIntel 				do {
14477741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1448013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
1449af75078fSIntel 					for (i = 0; i < nb_rx; i++)
1450af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
1451f487715fSReshma Pattan 
1452f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
1453f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
1454f487715fSReshma Pattan 					timer_tsc += diff_tsc;
1455f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
1456f487715fSReshma Pattan 					(timer_tsc < timer_period));
1457f487715fSReshma Pattan 				timer_tsc = 0;
1458af75078fSIntel 			}
1459af75078fSIntel 		}
1460af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1461af75078fSIntel 	}
1462af75078fSIntel }
1463af75078fSIntel 
1464af75078fSIntel static void
1465af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1466af75078fSIntel {
1467af75078fSIntel 	struct fwd_stream **fsm;
1468af75078fSIntel 	streamid_t nb_fs;
1469af75078fSIntel 	streamid_t sm_id;
14707e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
14717e4441c8SRemy Horton 	uint64_t tics_per_1sec;
14727e4441c8SRemy Horton 	uint64_t tics_datum;
14737e4441c8SRemy Horton 	uint64_t tics_current;
14744918a357SXiaoyun Li 	uint16_t i, cnt_ports;
1475af75078fSIntel 
14764918a357SXiaoyun Li 	cnt_ports = nb_ports;
14777e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
14787e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
14797e4441c8SRemy Horton #endif
1480af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
1481af75078fSIntel 	nb_fs = fc->stream_nb;
1482af75078fSIntel 	do {
1483af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1484af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
14857e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
1486e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
1487e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
14887e4441c8SRemy Horton 			tics_current = rte_rdtsc();
14897e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
14907e4441c8SRemy Horton 				/* Periodic bitrate calculation */
14914918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
1492e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
14934918a357SXiaoyun Li 						ports_ids[i]);
14947e4441c8SRemy Horton 				tics_datum = tics_current;
14957e4441c8SRemy Horton 			}
1496e25e6c70SRemy Horton 		}
14977e4441c8SRemy Horton #endif
149862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
149965eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
150065eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
150162d3216dSReshma Pattan 			rte_latencystats_update();
150262d3216dSReshma Pattan #endif
150362d3216dSReshma Pattan 
1504af75078fSIntel 	} while (! fc->stopped);
1505af75078fSIntel }
1506af75078fSIntel 
1507af75078fSIntel static int
1508af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
1509af75078fSIntel {
1510af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1511af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
1512af75078fSIntel 	return 0;
1513af75078fSIntel }
1514af75078fSIntel 
1515af75078fSIntel /*
1516af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1517af75078fSIntel  * Used to start communication flows in network loopback test configurations.
1518af75078fSIntel  */
1519af75078fSIntel static int
1520af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
1521af75078fSIntel {
1522af75078fSIntel 	struct fwd_lcore *fwd_lc;
1523af75078fSIntel 	struct fwd_lcore tmp_lcore;
1524af75078fSIntel 
1525af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1526af75078fSIntel 	tmp_lcore = *fwd_lc;
1527af75078fSIntel 	tmp_lcore.stopped = 1;
1528af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1529af75078fSIntel 	return 0;
1530af75078fSIntel }
1531af75078fSIntel 
1532af75078fSIntel /*
1533af75078fSIntel  * Launch packet forwarding:
1534af75078fSIntel  *     - Setup per-port forwarding context.
1535af75078fSIntel  *     - launch logical cores with their forwarding configuration.
1536af75078fSIntel  */
1537af75078fSIntel static void
1538af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1539af75078fSIntel {
1540af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1541af75078fSIntel 	unsigned int i;
1542af75078fSIntel 	unsigned int lc_id;
1543af75078fSIntel 	int diag;
1544af75078fSIntel 
1545af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1546af75078fSIntel 	if (port_fwd_begin != NULL) {
1547af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1548af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
1549af75078fSIntel 	}
1550af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1551af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
1552af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1553af75078fSIntel 			fwd_lcores[i]->stopped = 0;
1554af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1555af75078fSIntel 						     fwd_lcores[i], lc_id);
1556af75078fSIntel 			if (diag != 0)
1557af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
1558af75078fSIntel 				       lc_id, diag);
1559af75078fSIntel 		}
1560af75078fSIntel 	}
1561af75078fSIntel }
1562af75078fSIntel 
1563af75078fSIntel /*
156403ce2c53SMatan Azrad  * Update the forward ports list.
156503ce2c53SMatan Azrad  */
156603ce2c53SMatan Azrad void
156703ce2c53SMatan Azrad update_fwd_ports(portid_t new_pid)
156803ce2c53SMatan Azrad {
156903ce2c53SMatan Azrad 	unsigned int i;
157003ce2c53SMatan Azrad 	unsigned int new_nb_fwd_ports = 0;
157103ce2c53SMatan Azrad 	int move = 0;
157203ce2c53SMatan Azrad 
157303ce2c53SMatan Azrad 	for (i = 0; i < nb_fwd_ports; ++i) {
157403ce2c53SMatan Azrad 		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
157503ce2c53SMatan Azrad 			move = 1;
157603ce2c53SMatan Azrad 		else if (move)
157703ce2c53SMatan Azrad 			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
157803ce2c53SMatan Azrad 		else
157903ce2c53SMatan Azrad 			new_nb_fwd_ports++;
158003ce2c53SMatan Azrad 	}
158103ce2c53SMatan Azrad 	if (new_pid < RTE_MAX_ETHPORTS)
158203ce2c53SMatan Azrad 		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
158303ce2c53SMatan Azrad 
158403ce2c53SMatan Azrad 	nb_fwd_ports = new_nb_fwd_ports;
158503ce2c53SMatan Azrad 	nb_cfg_ports = new_nb_fwd_ports;
158603ce2c53SMatan Azrad }
158703ce2c53SMatan Azrad 
158803ce2c53SMatan Azrad /*
1589af75078fSIntel  * Launch packet forwarding configuration.
1590af75078fSIntel  */
1591af75078fSIntel void
1592af75078fSIntel start_packet_forwarding(int with_tx_first)
1593af75078fSIntel {
1594af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1595af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1596af75078fSIntel 	struct rte_port *port;
1597af75078fSIntel 	unsigned int i;
1598af75078fSIntel 	portid_t   pt_id;
1599af75078fSIntel 	streamid_t sm_id;
1600af75078fSIntel 
16015a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
16025a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
16035a8fb55cSReshma Pattan 
16045a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
16055a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
16065a8fb55cSReshma Pattan 
16075a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
16085a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
16095a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
16105a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
16115a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
16125a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
16135a8fb55cSReshma Pattan 
1614ce8d5614SIntel 	if (all_ports_started() == 0) {
1615ce8d5614SIntel 		printf("Not all ports were started\n");
1616ce8d5614SIntel 		return;
1617ce8d5614SIntel 	}
1618af75078fSIntel 	if (test_done == 0) {
1619af75078fSIntel 		printf("Packet forwarding already started\n");
1620af75078fSIntel 		return;
1621af75078fSIntel 	}
1622edf87b4aSBernard Iremonger 
1623edf87b4aSBernard Iremonger 
16247741e4cfSIntel 	if(dcb_test) {
16257741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
16267741e4cfSIntel 			pt_id = fwd_ports_ids[i];
16277741e4cfSIntel 			port = &ports[pt_id];
16287741e4cfSIntel 			if (!port->dcb_flag) {
16297741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
16307741e4cfSIntel                                        "be configured in this mode.\n");
1631013af9b6SIntel 				return;
1632013af9b6SIntel 			}
16337741e4cfSIntel 		}
16347741e4cfSIntel 		if (nb_fwd_lcores == 1) {
16357741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
16367741e4cfSIntel                                "should be larger than 1.\n");
16377741e4cfSIntel 			return;
16387741e4cfSIntel 		}
16397741e4cfSIntel 	}
1640af75078fSIntel 	test_done = 0;
16417741e4cfSIntel 
164247a767b2SMatan Azrad 	fwd_config_setup();
164347a767b2SMatan Azrad 
16447741e4cfSIntel 	if(!no_flush_rx)
16457741e4cfSIntel 		flush_fwd_rx_queues();
16467741e4cfSIntel 
1647933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
1648af75078fSIntel 	rxtx_config_display();
1649af75078fSIntel 
1650af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1651af75078fSIntel 		pt_id = fwd_ports_ids[i];
1652af75078fSIntel 		port = &ports[pt_id];
1653af75078fSIntel 		rte_eth_stats_get(pt_id, &port->stats);
1654af75078fSIntel 		port->tx_dropped = 0;
1655013af9b6SIntel 
1656013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
1657af75078fSIntel 	}
1658af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1659af75078fSIntel 		fwd_streams[sm_id]->rx_packets = 0;
1660af75078fSIntel 		fwd_streams[sm_id]->tx_packets = 0;
1661af75078fSIntel 		fwd_streams[sm_id]->fwd_dropped = 0;
1662af75078fSIntel 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1663af75078fSIntel 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1664af75078fSIntel 
1665af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1666af75078fSIntel 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1667af75078fSIntel 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1668af75078fSIntel 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1669af75078fSIntel 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1670af75078fSIntel #endif
1671af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1672af75078fSIntel 		fwd_streams[sm_id]->core_cycles = 0;
1673af75078fSIntel #endif
1674af75078fSIntel 	}
1675af75078fSIntel 	if (with_tx_first) {
1676af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1677af75078fSIntel 		if (port_fwd_begin != NULL) {
1678af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1679af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
1680af75078fSIntel 		}
1681acbf77a6SZhihong Wang 		while (with_tx_first--) {
1682acbf77a6SZhihong Wang 			launch_packet_forwarding(
1683acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
1684af75078fSIntel 			rte_eal_mp_wait_lcore();
1685acbf77a6SZhihong Wang 		}
1686af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
1687af75078fSIntel 		if (port_fwd_end != NULL) {
1688af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1689af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
1690af75078fSIntel 		}
1691af75078fSIntel 	}
1692af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
1693af75078fSIntel }
1694af75078fSIntel 
1695af75078fSIntel void
1696af75078fSIntel stop_packet_forwarding(void)
1697af75078fSIntel {
1698af75078fSIntel 	struct rte_eth_stats stats;
1699af75078fSIntel 	struct rte_port *port;
1700af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1701af75078fSIntel 	int i;
1702af75078fSIntel 	portid_t   pt_id;
1703af75078fSIntel 	streamid_t sm_id;
1704af75078fSIntel 	lcoreid_t  lc_id;
1705af75078fSIntel 	uint64_t total_recv;
1706af75078fSIntel 	uint64_t total_xmit;
1707af75078fSIntel 	uint64_t total_rx_dropped;
1708af75078fSIntel 	uint64_t total_tx_dropped;
1709af75078fSIntel 	uint64_t total_rx_nombuf;
1710af75078fSIntel 	uint64_t tx_dropped;
1711af75078fSIntel 	uint64_t rx_bad_ip_csum;
1712af75078fSIntel 	uint64_t rx_bad_l4_csum;
1713af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1714af75078fSIntel 	uint64_t fwd_cycles;
1715af75078fSIntel #endif
1716b7091f1dSJiayu Hu 
1717af75078fSIntel 	static const char *acc_stats_border = "+++++++++++++++";
1718af75078fSIntel 
1719af75078fSIntel 	if (test_done) {
1720af75078fSIntel 		printf("Packet forwarding not started\n");
1721af75078fSIntel 		return;
1722af75078fSIntel 	}
1723af75078fSIntel 	printf("Telling cores to stop...");
1724af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1725af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1726af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1727af75078fSIntel 	rte_eal_mp_wait_lcore();
1728af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1729af75078fSIntel 	if (port_fwd_end != NULL) {
1730af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1731af75078fSIntel 			pt_id = fwd_ports_ids[i];
1732af75078fSIntel 			(*port_fwd_end)(pt_id);
1733af75078fSIntel 		}
1734af75078fSIntel 	}
1735af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1736af75078fSIntel 	fwd_cycles = 0;
1737af75078fSIntel #endif
1738af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1739af75078fSIntel 		if (cur_fwd_config.nb_fwd_streams >
1740af75078fSIntel 		    cur_fwd_config.nb_fwd_ports) {
1741af75078fSIntel 			fwd_stream_stats_display(sm_id);
1742af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1743af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1744af75078fSIntel 		} else {
1745af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1746af75078fSIntel 				fwd_streams[sm_id];
1747af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1748af75078fSIntel 				fwd_streams[sm_id];
1749af75078fSIntel 		}
1750af75078fSIntel 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1751af75078fSIntel 		tx_dropped = (uint64_t) (tx_dropped +
1752af75078fSIntel 					 fwd_streams[sm_id]->fwd_dropped);
1753af75078fSIntel 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1754af75078fSIntel 
1755013af9b6SIntel 		rx_bad_ip_csum =
1756013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1757af75078fSIntel 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1758af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1759013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1760013af9b6SIntel 							rx_bad_ip_csum;
1761af75078fSIntel 
1762013af9b6SIntel 		rx_bad_l4_csum =
1763013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1764af75078fSIntel 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1765af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1766013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1767013af9b6SIntel 							rx_bad_l4_csum;
1768af75078fSIntel 
1769af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1770af75078fSIntel 		fwd_cycles = (uint64_t) (fwd_cycles +
1771af75078fSIntel 					 fwd_streams[sm_id]->core_cycles);
1772af75078fSIntel #endif
1773af75078fSIntel 	}
1774af75078fSIntel 	total_recv = 0;
1775af75078fSIntel 	total_xmit = 0;
1776af75078fSIntel 	total_rx_dropped = 0;
1777af75078fSIntel 	total_tx_dropped = 0;
1778af75078fSIntel 	total_rx_nombuf  = 0;
17797741e4cfSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1780af75078fSIntel 		pt_id = fwd_ports_ids[i];
1781af75078fSIntel 
1782af75078fSIntel 		port = &ports[pt_id];
1783af75078fSIntel 		rte_eth_stats_get(pt_id, &stats);
1784af75078fSIntel 		stats.ipackets -= port->stats.ipackets;
1785af75078fSIntel 		port->stats.ipackets = 0;
1786af75078fSIntel 		stats.opackets -= port->stats.opackets;
1787af75078fSIntel 		port->stats.opackets = 0;
1788af75078fSIntel 		stats.ibytes   -= port->stats.ibytes;
1789af75078fSIntel 		port->stats.ibytes = 0;
1790af75078fSIntel 		stats.obytes   -= port->stats.obytes;
1791af75078fSIntel 		port->stats.obytes = 0;
179270bdb186SIvan Boule 		stats.imissed  -= port->stats.imissed;
179370bdb186SIvan Boule 		port->stats.imissed = 0;
1794af75078fSIntel 		stats.oerrors  -= port->stats.oerrors;
1795af75078fSIntel 		port->stats.oerrors = 0;
1796af75078fSIntel 		stats.rx_nombuf -= port->stats.rx_nombuf;
1797af75078fSIntel 		port->stats.rx_nombuf = 0;
1798af75078fSIntel 
1799af75078fSIntel 		total_recv += stats.ipackets;
1800af75078fSIntel 		total_xmit += stats.opackets;
180170bdb186SIvan Boule 		total_rx_dropped += stats.imissed;
1802af75078fSIntel 		total_tx_dropped += port->tx_dropped;
1803af75078fSIntel 		total_rx_nombuf  += stats.rx_nombuf;
1804af75078fSIntel 
1805af75078fSIntel 		fwd_port_stats_display(pt_id, &stats);
1806af75078fSIntel 	}
1807b7091f1dSJiayu Hu 
1808af75078fSIntel 	printf("\n  %s Accumulated forward statistics for all ports"
1809af75078fSIntel 	       "%s\n",
1810af75078fSIntel 	       acc_stats_border, acc_stats_border);
1811af75078fSIntel 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1812af75078fSIntel 	       "%-"PRIu64"\n"
1813af75078fSIntel 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1814af75078fSIntel 	       "%-"PRIu64"\n",
1815af75078fSIntel 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1816af75078fSIntel 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1817af75078fSIntel 	if (total_rx_nombuf > 0)
1818af75078fSIntel 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1819af75078fSIntel 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1820af75078fSIntel 	       "%s\n",
1821af75078fSIntel 	       acc_stats_border, acc_stats_border);
1822af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1823af75078fSIntel 	if (total_recv > 0)
1824af75078fSIntel 		printf("\n  CPU cycles/packet=%u (total cycles="
1825af75078fSIntel 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1826af75078fSIntel 		       (unsigned int)(fwd_cycles / total_recv),
1827af75078fSIntel 		       fwd_cycles, total_recv);
1828af75078fSIntel #endif
1829af75078fSIntel 	printf("\nDone.\n");
1830af75078fSIntel 	test_done = 1;
1831af75078fSIntel }
1832af75078fSIntel 
1833cfae07fdSOuyang Changchun void
1834cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
1835cfae07fdSOuyang Changchun {
1836492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
1837cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
1838cfae07fdSOuyang Changchun }
1839cfae07fdSOuyang Changchun 
1840cfae07fdSOuyang Changchun void
1841cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
1842cfae07fdSOuyang Changchun {
1843492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
1844cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
1845cfae07fdSOuyang Changchun }
1846cfae07fdSOuyang Changchun 
1847ce8d5614SIntel static int
1848ce8d5614SIntel all_ports_started(void)
1849ce8d5614SIntel {
1850ce8d5614SIntel 	portid_t pi;
1851ce8d5614SIntel 	struct rte_port *port;
1852ce8d5614SIntel 
18537d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1854ce8d5614SIntel 		port = &ports[pi];
1855ce8d5614SIntel 		/* Check if there is a port which is not started */
185641b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
185741b05095SBernard Iremonger 			(port->slave_flag == 0))
1858ce8d5614SIntel 			return 0;
1859ce8d5614SIntel 	}
1860ce8d5614SIntel 
1861ce8d5614SIntel 	/* No port is not started */
1862ce8d5614SIntel 	return 1;
1863ce8d5614SIntel }
1864ce8d5614SIntel 
1865148f963fSBruce Richardson int
18666018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
18676018eb8cSShahaf Shuler {
18686018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
18696018eb8cSShahaf Shuler 
18706018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
18716018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
18726018eb8cSShahaf Shuler 		return 0;
18736018eb8cSShahaf Shuler 	return 1;
18746018eb8cSShahaf Shuler }
18756018eb8cSShahaf Shuler 
18766018eb8cSShahaf Shuler int
1877edab33b1STetsuya Mukawa all_ports_stopped(void)
1878edab33b1STetsuya Mukawa {
1879edab33b1STetsuya Mukawa 	portid_t pi;
1880edab33b1STetsuya Mukawa 
18817d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
18826018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
1883edab33b1STetsuya Mukawa 			return 0;
1884edab33b1STetsuya Mukawa 	}
1885edab33b1STetsuya Mukawa 
1886edab33b1STetsuya Mukawa 	return 1;
1887edab33b1STetsuya Mukawa }
1888edab33b1STetsuya Mukawa 
1889edab33b1STetsuya Mukawa int
1890edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
1891edab33b1STetsuya Mukawa {
1892edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1893edab33b1STetsuya Mukawa 		return 0;
1894edab33b1STetsuya Mukawa 
1895edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1896edab33b1STetsuya Mukawa 		return 0;
1897edab33b1STetsuya Mukawa 
1898edab33b1STetsuya Mukawa 	return 1;
1899edab33b1STetsuya Mukawa }
1900edab33b1STetsuya Mukawa 
1901edab33b1STetsuya Mukawa static int
1902edab33b1STetsuya Mukawa port_is_closed(portid_t port_id)
1903edab33b1STetsuya Mukawa {
1904edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1905edab33b1STetsuya Mukawa 		return 0;
1906edab33b1STetsuya Mukawa 
1907edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1908edab33b1STetsuya Mukawa 		return 0;
1909edab33b1STetsuya Mukawa 
1910edab33b1STetsuya Mukawa 	return 1;
1911edab33b1STetsuya Mukawa }
1912edab33b1STetsuya Mukawa 
1913edab33b1STetsuya Mukawa int
1914ce8d5614SIntel start_port(portid_t pid)
1915ce8d5614SIntel {
191692d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
1917ce8d5614SIntel 	portid_t pi;
1918ce8d5614SIntel 	queueid_t qi;
1919ce8d5614SIntel 	struct rte_port *port;
19202950a769SDeclan Doherty 	struct ether_addr mac_addr;
192176ad4a2dSGaetan Rivet 	enum rte_eth_event_type event_type;
1922ce8d5614SIntel 
19234468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
19244468635fSMichael Qiu 		return 0;
19254468635fSMichael Qiu 
1926ce8d5614SIntel 	if(dcb_config)
1927ce8d5614SIntel 		dcb_test = 1;
19287d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1929edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1930ce8d5614SIntel 			continue;
1931ce8d5614SIntel 
193292d2703eSMichael Qiu 		need_check_link_status = 0;
1933ce8d5614SIntel 		port = &ports[pi];
1934ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1935ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
1936ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1937ce8d5614SIntel 			continue;
1938ce8d5614SIntel 		}
1939ce8d5614SIntel 
1940ce8d5614SIntel 		if (port->need_reconfig > 0) {
1941ce8d5614SIntel 			port->need_reconfig = 0;
1942ce8d5614SIntel 
19437ee3e944SVasily Philipov 			if (flow_isolate_all) {
19447ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
19457ee3e944SVasily Philipov 				if (ret) {
19467ee3e944SVasily Philipov 					printf("Failed to apply isolated"
19477ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
19487ee3e944SVasily Philipov 					return -1;
19497ee3e944SVasily Philipov 				}
19507ee3e944SVasily Philipov 			}
19517ee3e944SVasily Philipov 
19525706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
195320a0286fSLiu Xiaofeng 					port->socket_id);
1954ce8d5614SIntel 			/* configure port */
1955ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1956ce8d5614SIntel 						&(port->dev_conf));
1957ce8d5614SIntel 			if (diag != 0) {
1958ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1959ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1960ce8d5614SIntel 					printf("Port %d can not be set back "
1961ce8d5614SIntel 							"to stopped\n", pi);
1962ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
1963ce8d5614SIntel 				/* try to reconfigure port next time */
1964ce8d5614SIntel 				port->need_reconfig = 1;
1965148f963fSBruce Richardson 				return -1;
1966ce8d5614SIntel 			}
1967ce8d5614SIntel 		}
1968ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
1969ce8d5614SIntel 			port->need_reconfig_queues = 0;
1970ce8d5614SIntel 			/* setup tx queues */
1971ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
1972b6ea6408SIntel 				if ((numa_support) &&
1973b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
1974b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1975d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
1976d44f8a48SQi Zhang 						txring_numa[pi],
1977d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
1978b6ea6408SIntel 				else
1979b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1980d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
1981d44f8a48SQi Zhang 						port->socket_id,
1982d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
1983b6ea6408SIntel 
1984ce8d5614SIntel 				if (diag == 0)
1985ce8d5614SIntel 					continue;
1986ce8d5614SIntel 
1987ce8d5614SIntel 				/* Fail to setup tx queue, return */
1988ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1989ce8d5614SIntel 							RTE_PORT_HANDLING,
1990ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
1991ce8d5614SIntel 					printf("Port %d can not be set back "
1992ce8d5614SIntel 							"to stopped\n", pi);
1993d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
1994d44f8a48SQi Zhang 				       pi);
1995ce8d5614SIntel 				/* try to reconfigure queues next time */
1996ce8d5614SIntel 				port->need_reconfig_queues = 1;
1997148f963fSBruce Richardson 				return -1;
1998ce8d5614SIntel 			}
1999ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2000d44f8a48SQi Zhang 				/* setup rx queues */
2001b6ea6408SIntel 				if ((numa_support) &&
2002b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2003b6ea6408SIntel 					struct rte_mempool * mp =
2004b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
2005b6ea6408SIntel 					if (mp == NULL) {
2006b6ea6408SIntel 						printf("Failed to setup RX queue:"
2007b6ea6408SIntel 							"No mempool allocation"
2008b6ea6408SIntel 							" on the socket %d\n",
2009b6ea6408SIntel 							rxring_numa[pi]);
2010148f963fSBruce Richardson 						return -1;
2011b6ea6408SIntel 					}
2012b6ea6408SIntel 
2013b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2014d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2015d44f8a48SQi Zhang 					     rxring_numa[pi],
2016d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2017d44f8a48SQi Zhang 					     mp);
20181e1d6bddSBernard Iremonger 				} else {
20191e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
20201e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
20211e1d6bddSBernard Iremonger 					if (mp == NULL) {
20221e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
20231e1d6bddSBernard Iremonger 							"No mempool allocation"
20241e1d6bddSBernard Iremonger 							" on the socket %d\n",
20251e1d6bddSBernard Iremonger 							port->socket_id);
20261e1d6bddSBernard Iremonger 						return -1;
2027b6ea6408SIntel 					}
2028b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2029d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2030d44f8a48SQi Zhang 					     port->socket_id,
2031d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2032d44f8a48SQi Zhang 					     mp);
20331e1d6bddSBernard Iremonger 				}
2034ce8d5614SIntel 				if (diag == 0)
2035ce8d5614SIntel 					continue;
2036ce8d5614SIntel 
2037ce8d5614SIntel 				/* Fail to setup rx queue, return */
2038ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2039ce8d5614SIntel 							RTE_PORT_HANDLING,
2040ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2041ce8d5614SIntel 					printf("Port %d can not be set back "
2042ce8d5614SIntel 							"to stopped\n", pi);
2043d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2044d44f8a48SQi Zhang 				       pi);
2045ce8d5614SIntel 				/* try to reconfigure queues next time */
2046ce8d5614SIntel 				port->need_reconfig_queues = 1;
2047148f963fSBruce Richardson 				return -1;
2048ce8d5614SIntel 			}
2049ce8d5614SIntel 		}
205076ad4a2dSGaetan Rivet 
2051ce8d5614SIntel 		/* start port */
2052ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2053ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2054ce8d5614SIntel 
2055ce8d5614SIntel 			/* Fail to setup rx queue, return */
2056ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2057ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2058ce8d5614SIntel 				printf("Port %d can not be set back to "
2059ce8d5614SIntel 							"stopped\n", pi);
2060ce8d5614SIntel 			continue;
2061ce8d5614SIntel 		}
2062ce8d5614SIntel 
2063ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2064ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2065ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2066ce8d5614SIntel 
20672950a769SDeclan Doherty 		rte_eth_macaddr_get(pi, &mac_addr);
2068d8c89163SZijie Pan 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
20692950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
20702950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
20712950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2072d8c89163SZijie Pan 
2073ce8d5614SIntel 		/* at least one port started, need checking link status */
2074ce8d5614SIntel 		need_check_link_status = 1;
2075ce8d5614SIntel 	}
2076ce8d5614SIntel 
20774fb82244SMatan Azrad 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
20784fb82244SMatan Azrad 	     event_type < RTE_ETH_EVENT_MAX;
20794fb82244SMatan Azrad 	     event_type++) {
20804fb82244SMatan Azrad 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
20814fb82244SMatan Azrad 						event_type,
20824fb82244SMatan Azrad 						eth_event_callback,
20834fb82244SMatan Azrad 						NULL);
20844fb82244SMatan Azrad 		if (diag) {
20854fb82244SMatan Azrad 			printf("Failed to setup even callback for event %d\n",
20864fb82244SMatan Azrad 				event_type);
20874fb82244SMatan Azrad 			return -1;
20884fb82244SMatan Azrad 		}
20894fb82244SMatan Azrad 	}
20904fb82244SMatan Azrad 
209192d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2092edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
209392d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2094ce8d5614SIntel 		printf("Please stop the ports first\n");
2095ce8d5614SIntel 
2096ce8d5614SIntel 	printf("Done\n");
2097148f963fSBruce Richardson 	return 0;
2098ce8d5614SIntel }
2099ce8d5614SIntel 
2100ce8d5614SIntel void
2101ce8d5614SIntel stop_port(portid_t pid)
2102ce8d5614SIntel {
2103ce8d5614SIntel 	portid_t pi;
2104ce8d5614SIntel 	struct rte_port *port;
2105ce8d5614SIntel 	int need_check_link_status = 0;
2106ce8d5614SIntel 
2107ce8d5614SIntel 	if (dcb_test) {
2108ce8d5614SIntel 		dcb_test = 0;
2109ce8d5614SIntel 		dcb_config = 0;
2110ce8d5614SIntel 	}
21114468635fSMichael Qiu 
21124468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
21134468635fSMichael Qiu 		return;
21144468635fSMichael Qiu 
2115ce8d5614SIntel 	printf("Stopping ports...\n");
2116ce8d5614SIntel 
21177d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
21184468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2119ce8d5614SIntel 			continue;
2120ce8d5614SIntel 
2121a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2122a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2123a8ef3e3aSBernard Iremonger 			continue;
2124a8ef3e3aSBernard Iremonger 		}
2125a8ef3e3aSBernard Iremonger 
21260e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
21270e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
21280e545d30SBernard Iremonger 			continue;
21290e545d30SBernard Iremonger 		}
21300e545d30SBernard Iremonger 
2131ce8d5614SIntel 		port = &ports[pi];
2132ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2133ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2134ce8d5614SIntel 			continue;
2135ce8d5614SIntel 
2136ce8d5614SIntel 		rte_eth_dev_stop(pi);
2137ce8d5614SIntel 
2138ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2139ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2140ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2141ce8d5614SIntel 		need_check_link_status = 1;
2142ce8d5614SIntel 	}
2143bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2144edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2145ce8d5614SIntel 
2146ce8d5614SIntel 	printf("Done\n");
2147ce8d5614SIntel }
2148ce8d5614SIntel 
2149ce8d5614SIntel void
2150ce8d5614SIntel close_port(portid_t pid)
2151ce8d5614SIntel {
2152ce8d5614SIntel 	portid_t pi;
2153ce8d5614SIntel 	struct rte_port *port;
2154ce8d5614SIntel 
21554468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
21564468635fSMichael Qiu 		return;
21574468635fSMichael Qiu 
2158ce8d5614SIntel 	printf("Closing ports...\n");
2159ce8d5614SIntel 
21607d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
21614468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2162ce8d5614SIntel 			continue;
2163ce8d5614SIntel 
2164a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2165a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2166a8ef3e3aSBernard Iremonger 			continue;
2167a8ef3e3aSBernard Iremonger 		}
2168a8ef3e3aSBernard Iremonger 
21690e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
21700e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
21710e545d30SBernard Iremonger 			continue;
21720e545d30SBernard Iremonger 		}
21730e545d30SBernard Iremonger 
2174ce8d5614SIntel 		port = &ports[pi];
2175ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2176d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2177d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2178d4e8ad64SMichael Qiu 			continue;
2179d4e8ad64SMichael Qiu 		}
2180d4e8ad64SMichael Qiu 
2181d4e8ad64SMichael Qiu 		if (rte_atomic16_cmpset(&(port->port_status),
2182ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2183ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2184ce8d5614SIntel 			continue;
2185ce8d5614SIntel 		}
2186ce8d5614SIntel 
2187938a184aSAdrien Mazarguil 		if (port->flow_list)
2188938a184aSAdrien Mazarguil 			port_flow_flush(pi);
2189ce8d5614SIntel 		rte_eth_dev_close(pi);
2190ce8d5614SIntel 
2191ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2192ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2193b38bb262SPablo de Lara 			printf("Port %d cannot be set to closed\n", pi);
2194ce8d5614SIntel 	}
2195ce8d5614SIntel 
2196ce8d5614SIntel 	printf("Done\n");
2197ce8d5614SIntel }
2198ce8d5614SIntel 
2199edab33b1STetsuya Mukawa void
220097f1e196SWei Dai reset_port(portid_t pid)
220197f1e196SWei Dai {
220297f1e196SWei Dai 	int diag;
220397f1e196SWei Dai 	portid_t pi;
220497f1e196SWei Dai 	struct rte_port *port;
220597f1e196SWei Dai 
220697f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
220797f1e196SWei Dai 		return;
220897f1e196SWei Dai 
220997f1e196SWei Dai 	printf("Resetting ports...\n");
221097f1e196SWei Dai 
221197f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
221297f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
221397f1e196SWei Dai 			continue;
221497f1e196SWei Dai 
221597f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
221697f1e196SWei Dai 			printf("Please remove port %d from forwarding "
221797f1e196SWei Dai 			       "configuration.\n", pi);
221897f1e196SWei Dai 			continue;
221997f1e196SWei Dai 		}
222097f1e196SWei Dai 
222197f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
222297f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
222397f1e196SWei Dai 			       pi);
222497f1e196SWei Dai 			continue;
222597f1e196SWei Dai 		}
222697f1e196SWei Dai 
222797f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
222897f1e196SWei Dai 		if (diag == 0) {
222997f1e196SWei Dai 			port = &ports[pi];
223097f1e196SWei Dai 			port->need_reconfig = 1;
223197f1e196SWei Dai 			port->need_reconfig_queues = 1;
223297f1e196SWei Dai 		} else {
223397f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
223497f1e196SWei Dai 		}
223597f1e196SWei Dai 	}
223697f1e196SWei Dai 
223797f1e196SWei Dai 	printf("Done\n");
223897f1e196SWei Dai }
223997f1e196SWei Dai 
2240fb73e096SJeff Guo static int
2241fb73e096SJeff Guo eth_dev_event_callback_register(void)
2242fb73e096SJeff Guo {
2243fb73e096SJeff Guo 	int ret;
2244fb73e096SJeff Guo 
2245fb73e096SJeff Guo 	/* register the device event callback */
2246fb73e096SJeff Guo 	ret = rte_dev_event_callback_register(NULL,
2247fb73e096SJeff Guo 		eth_dev_event_callback, NULL);
2248fb73e096SJeff Guo 	if (ret) {
2249fb73e096SJeff Guo 		printf("Failed to register device event callback\n");
2250fb73e096SJeff Guo 		return -1;
2251fb73e096SJeff Guo 	}
2252fb73e096SJeff Guo 
2253fb73e096SJeff Guo 	return 0;
2254fb73e096SJeff Guo }
2255fb73e096SJeff Guo 
2256fb73e096SJeff Guo 
2257fb73e096SJeff Guo static int
2258fb73e096SJeff Guo eth_dev_event_callback_unregister(void)
2259fb73e096SJeff Guo {
2260fb73e096SJeff Guo 	int ret;
2261fb73e096SJeff Guo 
2262fb73e096SJeff Guo 	/* unregister the device event callback */
2263fb73e096SJeff Guo 	ret = rte_dev_event_callback_unregister(NULL,
2264fb73e096SJeff Guo 		eth_dev_event_callback, NULL);
2265fb73e096SJeff Guo 	if (ret < 0) {
2266fb73e096SJeff Guo 		printf("Failed to unregister device event callback\n");
2267fb73e096SJeff Guo 		return -1;
2268fb73e096SJeff Guo 	}
2269fb73e096SJeff Guo 
2270fb73e096SJeff Guo 	return 0;
2271fb73e096SJeff Guo }
2272fb73e096SJeff Guo 
227397f1e196SWei Dai void
2274edab33b1STetsuya Mukawa attach_port(char *identifier)
2275ce8d5614SIntel {
2276ebf5e9b7SBernard Iremonger 	portid_t pi = 0;
2277931126baSBernard Iremonger 	unsigned int socket_id;
2278ce8d5614SIntel 
2279edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2280edab33b1STetsuya Mukawa 
2281edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2282edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2283edab33b1STetsuya Mukawa 		return;
2284ce8d5614SIntel 	}
2285ce8d5614SIntel 
2286edab33b1STetsuya Mukawa 	if (rte_eth_dev_attach(identifier, &pi))
2287edab33b1STetsuya Mukawa 		return;
2288edab33b1STetsuya Mukawa 
2289931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2290931126baSBernard Iremonger 	/* if socket_id is invalid, set to 0 */
2291931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
2292931126baSBernard Iremonger 		socket_id = 0;
2293931126baSBernard Iremonger 	reconfig(pi, socket_id);
2294edab33b1STetsuya Mukawa 	rte_eth_promiscuous_enable(pi);
2295edab33b1STetsuya Mukawa 
22964918a357SXiaoyun Li 	ports_ids[nb_ports] = pi;
2297d9a42a69SThomas Monjalon 	nb_ports = rte_eth_dev_count_avail();
2298edab33b1STetsuya Mukawa 
2299edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2300edab33b1STetsuya Mukawa 
230103ce2c53SMatan Azrad 	update_fwd_ports(pi);
230203ce2c53SMatan Azrad 
2303edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2304edab33b1STetsuya Mukawa 	printf("Done\n");
2305edab33b1STetsuya Mukawa }
2306edab33b1STetsuya Mukawa 
2307edab33b1STetsuya Mukawa void
230828caa76aSZhiyong Yang detach_port(portid_t port_id)
23095f4ec54fSChen Jing D(Mark) {
2310edab33b1STetsuya Mukawa 	char name[RTE_ETH_NAME_MAX_LEN];
23114918a357SXiaoyun Li 	uint16_t i;
23125f4ec54fSChen Jing D(Mark) 
2313edab33b1STetsuya Mukawa 	printf("Detaching a port...\n");
23145f4ec54fSChen Jing D(Mark) 
2315edab33b1STetsuya Mukawa 	if (!port_is_closed(port_id)) {
2316edab33b1STetsuya Mukawa 		printf("Please close port first\n");
2317edab33b1STetsuya Mukawa 		return;
2318edab33b1STetsuya Mukawa 	}
2319edab33b1STetsuya Mukawa 
2320938a184aSAdrien Mazarguil 	if (ports[port_id].flow_list)
2321938a184aSAdrien Mazarguil 		port_flow_flush(port_id);
2322938a184aSAdrien Mazarguil 
23233070419eSGaetan Rivet 	if (rte_eth_dev_detach(port_id, name)) {
2324adea04c4SZhiyong Yang 		TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2325edab33b1STetsuya Mukawa 		return;
23263070419eSGaetan Rivet 	}
2327edab33b1STetsuya Mukawa 
23284918a357SXiaoyun Li 	for (i = 0; i < nb_ports; i++) {
23294918a357SXiaoyun Li 		if (ports_ids[i] == port_id) {
23304918a357SXiaoyun Li 			ports_ids[i] = ports_ids[nb_ports-1];
23314918a357SXiaoyun Li 			ports_ids[nb_ports-1] = 0;
23324918a357SXiaoyun Li 			break;
23334918a357SXiaoyun Li 		}
23344918a357SXiaoyun Li 	}
2335d9a42a69SThomas Monjalon 	nb_ports = rte_eth_dev_count_avail();
2336edab33b1STetsuya Mukawa 
233703ce2c53SMatan Azrad 	update_fwd_ports(RTE_MAX_ETHPORTS);
233803ce2c53SMatan Azrad 
2339adea04c4SZhiyong Yang 	printf("Port %u is detached. Now total ports is %d\n",
2340adea04c4SZhiyong Yang 			port_id, nb_ports);
2341edab33b1STetsuya Mukawa 	printf("Done\n");
2342edab33b1STetsuya Mukawa 	return;
23435f4ec54fSChen Jing D(Mark) }
23445f4ec54fSChen Jing D(Mark) 
2345af75078fSIntel void
2346af75078fSIntel pmd_test_exit(void)
2347af75078fSIntel {
2348124909d7SZhiyong Yang 	struct rte_device *device;
2349af75078fSIntel 	portid_t pt_id;
2350fb73e096SJeff Guo 	int ret;
2351af75078fSIntel 
23528210ec25SPablo de Lara 	if (test_done == 0)
23538210ec25SPablo de Lara 		stop_packet_forwarding();
23548210ec25SPablo de Lara 
2355d3a274ceSZhihong Wang 	if (ports != NULL) {
2356d3a274ceSZhihong Wang 		no_link_check = 1;
23577d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
2358d3a274ceSZhihong Wang 			printf("\nShutting down port %d...\n", pt_id);
2359af75078fSIntel 			fflush(stdout);
2360d3a274ceSZhihong Wang 			stop_port(pt_id);
2361d3a274ceSZhihong Wang 			close_port(pt_id);
2362124909d7SZhiyong Yang 
2363124909d7SZhiyong Yang 			/*
2364124909d7SZhiyong Yang 			 * This is a workaround to fix a virtio-user issue that
2365124909d7SZhiyong Yang 			 * requires to call clean-up routine to remove existing
2366124909d7SZhiyong Yang 			 * socket.
2367124909d7SZhiyong Yang 			 * This workaround valid only for testpmd, needs a fix
2368124909d7SZhiyong Yang 			 * valid for all applications.
2369124909d7SZhiyong Yang 			 * TODO: Implement proper resource cleanup
2370124909d7SZhiyong Yang 			 */
2371124909d7SZhiyong Yang 			device = rte_eth_devices[pt_id].device;
2372124909d7SZhiyong Yang 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2373124909d7SZhiyong Yang 				detach_port(pt_id);
2374af75078fSIntel 		}
2375d3a274ceSZhihong Wang 	}
2376fb73e096SJeff Guo 
2377fb73e096SJeff Guo 	if (hot_plug) {
2378fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
2379fb73e096SJeff Guo 		if (ret)
2380fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2381fb73e096SJeff Guo 				"fail to stop device event monitor.");
2382fb73e096SJeff Guo 
2383fb73e096SJeff Guo 		ret = eth_dev_event_callback_unregister();
2384fb73e096SJeff Guo 		if (ret)
2385fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2386fb73e096SJeff Guo 				"fail to unregister all event callbacks.");
2387fb73e096SJeff Guo 	}
2388fb73e096SJeff Guo 
2389d3a274ceSZhihong Wang 	printf("\nBye...\n");
2390af75078fSIntel }
2391af75078fSIntel 
2392af75078fSIntel typedef void (*cmd_func_t)(void);
2393af75078fSIntel struct pmd_test_command {
2394af75078fSIntel 	const char *cmd_name;
2395af75078fSIntel 	cmd_func_t cmd_func;
2396af75078fSIntel };
2397af75078fSIntel 
2398af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2399af75078fSIntel 
2400ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
2401af75078fSIntel static void
2402edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
2403af75078fSIntel {
2404ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
2405ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2406f8244c63SZhiyong Yang 	portid_t portid;
2407f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
2408ce8d5614SIntel 	struct rte_eth_link link;
2409ce8d5614SIntel 
2410ce8d5614SIntel 	printf("Checking link statuses...\n");
2411ce8d5614SIntel 	fflush(stdout);
2412ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2413ce8d5614SIntel 		all_ports_up = 1;
24147d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
2415ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
2416ce8d5614SIntel 				continue;
2417ce8d5614SIntel 			memset(&link, 0, sizeof(link));
2418ce8d5614SIntel 			rte_eth_link_get_nowait(portid, &link);
2419ce8d5614SIntel 			/* print link status if flag set */
2420ce8d5614SIntel 			if (print_flag == 1) {
2421ce8d5614SIntel 				if (link.link_status)
2422f8244c63SZhiyong Yang 					printf(
2423f8244c63SZhiyong Yang 					"Port%d Link Up. speed %u Mbps- %s\n",
2424f8244c63SZhiyong Yang 					portid, link.link_speed,
2425ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2426ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
2427ce8d5614SIntel 				else
2428f8244c63SZhiyong Yang 					printf("Port %d Link Down\n", portid);
2429ce8d5614SIntel 				continue;
2430ce8d5614SIntel 			}
2431ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
243209419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
2433ce8d5614SIntel 				all_ports_up = 0;
2434ce8d5614SIntel 				break;
2435ce8d5614SIntel 			}
2436ce8d5614SIntel 		}
2437ce8d5614SIntel 		/* after finally printing all link status, get out */
2438ce8d5614SIntel 		if (print_flag == 1)
2439ce8d5614SIntel 			break;
2440ce8d5614SIntel 
2441ce8d5614SIntel 		if (all_ports_up == 0) {
2442ce8d5614SIntel 			fflush(stdout);
2443ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
2444ce8d5614SIntel 		}
2445ce8d5614SIntel 
2446ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
2447ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2448ce8d5614SIntel 			print_flag = 1;
2449ce8d5614SIntel 		}
24508ea656f8SGaetan Rivet 
24518ea656f8SGaetan Rivet 		if (lsc_interrupt)
24528ea656f8SGaetan Rivet 			break;
2453ce8d5614SIntel 	}
2454af75078fSIntel }
2455af75078fSIntel 
2456284c908cSGaetan Rivet static void
2457284c908cSGaetan Rivet rmv_event_callback(void *arg)
2458284c908cSGaetan Rivet {
24593b97888aSMatan Azrad 	int need_to_start = 0;
24600da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
246128caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
2462284c908cSGaetan Rivet 
2463284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2464284c908cSGaetan Rivet 
24653b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
24663b97888aSMatan Azrad 		need_to_start = 1;
24673b97888aSMatan Azrad 		stop_packet_forwarding();
24683b97888aSMatan Azrad 	}
24690da2a62bSMatan Azrad 	no_link_check = 1;
2470284c908cSGaetan Rivet 	stop_port(port_id);
24710da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
2472284c908cSGaetan Rivet 	close_port(port_id);
24733b97888aSMatan Azrad 	detach_port(port_id);
24743b97888aSMatan Azrad 	if (need_to_start)
24753b97888aSMatan Azrad 		start_packet_forwarding(0);
2476284c908cSGaetan Rivet }
2477284c908cSGaetan Rivet 
247876ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
2479d6af1a13SBernard Iremonger static int
2480f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2481d6af1a13SBernard Iremonger 		  void *ret_param)
248276ad4a2dSGaetan Rivet {
248376ad4a2dSGaetan Rivet 	static const char * const event_desc[] = {
248476ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
248576ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
248676ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
248776ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
248876ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2489badb87c1SAnoob Joseph 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
249076ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
249176ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
24924fb82244SMatan Azrad 		[RTE_ETH_EVENT_NEW] = "device probed",
24934fb82244SMatan Azrad 		[RTE_ETH_EVENT_DESTROY] = "device released",
249476ad4a2dSGaetan Rivet 		[RTE_ETH_EVENT_MAX] = NULL,
249576ad4a2dSGaetan Rivet 	};
249676ad4a2dSGaetan Rivet 
249776ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
2498d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
249976ad4a2dSGaetan Rivet 
250076ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
250176ad4a2dSGaetan Rivet 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
250276ad4a2dSGaetan Rivet 			port_id, __func__, type);
250376ad4a2dSGaetan Rivet 		fflush(stderr);
25043af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
250576ad4a2dSGaetan Rivet 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
250676ad4a2dSGaetan Rivet 			event_desc[type]);
250776ad4a2dSGaetan Rivet 		fflush(stdout);
250876ad4a2dSGaetan Rivet 	}
2509284c908cSGaetan Rivet 
25100e45c64dSMatan Azrad 	if (port_id_is_invalid(port_id, DISABLED_WARN))
25110e45c64dSMatan Azrad 		return 0;
25120e45c64dSMatan Azrad 
2513284c908cSGaetan Rivet 	switch (type) {
2514284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
2515284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
2516284c908cSGaetan Rivet 				rmv_event_callback, (void *)(intptr_t)port_id))
2517284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
2518284c908cSGaetan Rivet 		break;
2519284c908cSGaetan Rivet 	default:
2520284c908cSGaetan Rivet 		break;
2521284c908cSGaetan Rivet 	}
2522d6af1a13SBernard Iremonger 	return 0;
252376ad4a2dSGaetan Rivet }
252476ad4a2dSGaetan Rivet 
2525fb73e096SJeff Guo /* This function is used by the interrupt thread */
2526fb73e096SJeff Guo static void
2527fb73e096SJeff Guo eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2528fb73e096SJeff Guo 			     __rte_unused void *arg)
2529fb73e096SJeff Guo {
2530fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
2531fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
2532fb73e096SJeff Guo 			__func__, type);
2533fb73e096SJeff Guo 		fflush(stderr);
2534fb73e096SJeff Guo 	}
2535fb73e096SJeff Guo 
2536fb73e096SJeff Guo 	switch (type) {
2537fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
2538fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2539fb73e096SJeff Guo 			device_name);
2540fb73e096SJeff Guo 		/* TODO: After finish failure handle, begin to stop
2541fb73e096SJeff Guo 		 * packet forward, stop port, close port, detach port.
2542fb73e096SJeff Guo 		 */
2543fb73e096SJeff Guo 		break;
2544fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
2545fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2546fb73e096SJeff Guo 			device_name);
2547fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
2548fb73e096SJeff Guo 		 * begin to attach port.
2549fb73e096SJeff Guo 		 */
2550fb73e096SJeff Guo 		break;
2551fb73e096SJeff Guo 	default:
2552fb73e096SJeff Guo 		break;
2553fb73e096SJeff Guo 	}
2554fb73e096SJeff Guo }
2555fb73e096SJeff Guo 
2556013af9b6SIntel static int
255728caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2558af75078fSIntel {
2559013af9b6SIntel 	uint16_t i;
2560af75078fSIntel 	int diag;
2561013af9b6SIntel 	uint8_t mapping_found = 0;
2562af75078fSIntel 
2563013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2564013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2565013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2566013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2567013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
2568013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
2569013af9b6SIntel 			if (diag != 0)
2570013af9b6SIntel 				return diag;
2571013af9b6SIntel 			mapping_found = 1;
2572af75078fSIntel 		}
2573013af9b6SIntel 	}
2574013af9b6SIntel 	if (mapping_found)
2575013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
2576013af9b6SIntel 	return 0;
2577013af9b6SIntel }
2578013af9b6SIntel 
2579013af9b6SIntel static int
258028caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2581013af9b6SIntel {
2582013af9b6SIntel 	uint16_t i;
2583013af9b6SIntel 	int diag;
2584013af9b6SIntel 	uint8_t mapping_found = 0;
2585013af9b6SIntel 
2586013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2587013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2588013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2589013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2590013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
2591013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
2592013af9b6SIntel 			if (diag != 0)
2593013af9b6SIntel 				return diag;
2594013af9b6SIntel 			mapping_found = 1;
2595013af9b6SIntel 		}
2596013af9b6SIntel 	}
2597013af9b6SIntel 	if (mapping_found)
2598013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
2599013af9b6SIntel 	return 0;
2600013af9b6SIntel }
2601013af9b6SIntel 
2602013af9b6SIntel static void
260328caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2604013af9b6SIntel {
2605013af9b6SIntel 	int diag = 0;
2606013af9b6SIntel 
2607013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2608af75078fSIntel 	if (diag != 0) {
2609013af9b6SIntel 		if (diag == -ENOTSUP) {
2610013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
2611013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2612013af9b6SIntel 		}
2613013af9b6SIntel 		else
2614013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2615013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
2616013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2617af75078fSIntel 					pi, diag);
2618af75078fSIntel 	}
2619013af9b6SIntel 
2620013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2621af75078fSIntel 	if (diag != 0) {
2622013af9b6SIntel 		if (diag == -ENOTSUP) {
2623013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
2624013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2625013af9b6SIntel 		}
2626013af9b6SIntel 		else
2627013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2628013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
2629013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2630af75078fSIntel 					pi, diag);
2631af75078fSIntel 	}
2632af75078fSIntel }
2633af75078fSIntel 
2634f2c5125aSPablo de Lara static void
2635f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
2636f2c5125aSPablo de Lara {
2637d44f8a48SQi Zhang 	uint16_t qid;
2638f2c5125aSPablo de Lara 
2639d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
2640d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2641d44f8a48SQi Zhang 
2642d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
2643f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2644d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2645f2c5125aSPablo de Lara 
2646f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2647d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2648f2c5125aSPablo de Lara 
2649f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2650d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2651f2c5125aSPablo de Lara 
2652f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2653d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2654f2c5125aSPablo de Lara 
2655f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2656d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2657f2c5125aSPablo de Lara 
2658d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
2659d44f8a48SQi Zhang 	}
2660d44f8a48SQi Zhang 
2661d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
2662d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
2663d44f8a48SQi Zhang 
2664d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
2665f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2666d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2667f2c5125aSPablo de Lara 
2668f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2669d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2670f2c5125aSPablo de Lara 
2671f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2672d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2673f2c5125aSPablo de Lara 
2674f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2675d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2676f2c5125aSPablo de Lara 
2677f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2678d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2679d44f8a48SQi Zhang 
2680d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
2681d44f8a48SQi Zhang 	}
2682f2c5125aSPablo de Lara }
2683f2c5125aSPablo de Lara 
2684013af9b6SIntel void
2685013af9b6SIntel init_port_config(void)
2686013af9b6SIntel {
2687013af9b6SIntel 	portid_t pid;
2688013af9b6SIntel 	struct rte_port *port;
2689013af9b6SIntel 
26907d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
2691013af9b6SIntel 		port = &ports[pid];
2692013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
2693422515b9SAdrien Mazarguil 		rte_eth_dev_info_get(pid, &port->dev_info);
26943ce690d3SBruce Richardson 		if (nb_rxq > 1) {
2695013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
269690892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2697422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
2698af75078fSIntel 		} else {
2699013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2700013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2701af75078fSIntel 		}
27023ce690d3SBruce Richardson 
27035f592039SJingjing Wu 		if (port->dcb_flag == 0) {
27043ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
27053ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
27063ce690d3SBruce Richardson 			else
27073ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
27083ce690d3SBruce Richardson 		}
27093ce690d3SBruce Richardson 
2710f2c5125aSPablo de Lara 		rxtx_port_config(port);
2711013af9b6SIntel 
2712013af9b6SIntel 		rte_eth_macaddr_get(pid, &port->eth_addr);
2713013af9b6SIntel 
2714013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
271550c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2716e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
27177b7e5ba7SIntel #endif
27188ea656f8SGaetan Rivet 
27198ea656f8SGaetan Rivet 		if (lsc_interrupt &&
27208ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
27218ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
27228ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
2723284c908cSGaetan Rivet 		if (rmv_interrupt &&
2724284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
2725284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
2726284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
2727013af9b6SIntel 	}
2728013af9b6SIntel }
2729013af9b6SIntel 
273041b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
273141b05095SBernard Iremonger {
273241b05095SBernard Iremonger 	struct rte_port *port;
273341b05095SBernard Iremonger 
273441b05095SBernard Iremonger 	port = &ports[slave_pid];
273541b05095SBernard Iremonger 	port->slave_flag = 1;
273641b05095SBernard Iremonger }
273741b05095SBernard Iremonger 
273841b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
273941b05095SBernard Iremonger {
274041b05095SBernard Iremonger 	struct rte_port *port;
274141b05095SBernard Iremonger 
274241b05095SBernard Iremonger 	port = &ports[slave_pid];
274341b05095SBernard Iremonger 	port->slave_flag = 0;
274441b05095SBernard Iremonger }
274541b05095SBernard Iremonger 
27460e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
27470e545d30SBernard Iremonger {
27480e545d30SBernard Iremonger 	struct rte_port *port;
27490e545d30SBernard Iremonger 
27500e545d30SBernard Iremonger 	port = &ports[slave_pid];
2751b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2752b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2753b8b8b344SMatan Azrad 		return 1;
2754b8b8b344SMatan Azrad 	return 0;
27550e545d30SBernard Iremonger }
27560e545d30SBernard Iremonger 
2757013af9b6SIntel const uint16_t vlan_tags[] = {
2758013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
2759013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
2760013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
2761013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
2762013af9b6SIntel };
2763013af9b6SIntel 
2764013af9b6SIntel static  int
2765ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
27661a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
27671a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
27681a572499SJingjing Wu 		 uint8_t pfc_en)
2769013af9b6SIntel {
2770013af9b6SIntel 	uint8_t i;
2771ac7c491cSKonstantin Ananyev 	int32_t rc;
2772ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
2773af75078fSIntel 
2774af75078fSIntel 	/*
2775013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2776013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
2777af75078fSIntel 	 */
27781a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
27791a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
27801a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
27811a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
27821a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2783013af9b6SIntel 
2784547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
27851a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
27861a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
27871a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
27881a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
27891a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
27901a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2791013af9b6SIntel 
27921a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
27931a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
27941a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
27951a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
27961a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2797af75078fSIntel 		}
2798013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2799f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2800f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2801013af9b6SIntel 		}
2802013af9b6SIntel 
2803013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
280432e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
280532e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
28061a572499SJingjing Wu 	} else {
28071a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
28081a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
28091a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
28101a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2811013af9b6SIntel 
2812ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2813ac7c491cSKonstantin Ananyev 		if (rc != 0)
2814ac7c491cSKonstantin Ananyev 			return rc;
2815ac7c491cSKonstantin Ananyev 
28161a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
28171a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
28181a572499SJingjing Wu 
2819bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2820bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
2821bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
2822013af9b6SIntel 		}
2823ac7c491cSKonstantin Ananyev 
28241a572499SJingjing Wu 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2825ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
282632e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
28271a572499SJingjing Wu 	}
28281a572499SJingjing Wu 
28291a572499SJingjing Wu 	if (pfc_en)
28301a572499SJingjing Wu 		eth_conf->dcb_capability_en =
28311a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2832013af9b6SIntel 	else
2833013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2834013af9b6SIntel 
2835013af9b6SIntel 	return 0;
2836013af9b6SIntel }
2837013af9b6SIntel 
2838013af9b6SIntel int
28391a572499SJingjing Wu init_port_dcb_config(portid_t pid,
28401a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
28411a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
28421a572499SJingjing Wu 		     uint8_t pfc_en)
2843013af9b6SIntel {
2844013af9b6SIntel 	struct rte_eth_conf port_conf;
2845013af9b6SIntel 	struct rte_port *rte_port;
2846013af9b6SIntel 	int retval;
2847013af9b6SIntel 	uint16_t i;
2848013af9b6SIntel 
28492a977b89SWenzhuo Lu 	rte_port = &ports[pid];
2850013af9b6SIntel 
2851013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2852013af9b6SIntel 	/* Enter DCB configuration status */
2853013af9b6SIntel 	dcb_config = 1;
2854013af9b6SIntel 
2855d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2856d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
2857d5354e89SYanglong Wu 
2858013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2859ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2860013af9b6SIntel 	if (retval < 0)
2861013af9b6SIntel 		return retval;
28620074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2863013af9b6SIntel 
28642f203d44SQi Zhang 	/* re-configure the device . */
28652f203d44SQi Zhang 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
28662a977b89SWenzhuo Lu 
28672a977b89SWenzhuo Lu 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
28682a977b89SWenzhuo Lu 
28692a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
28702a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
28712a977b89SWenzhuo Lu 	 */
28722a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
28732a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
28742a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
28752a977b89SWenzhuo Lu 			" for port %d.", pid);
28762a977b89SWenzhuo Lu 		return -1;
28772a977b89SWenzhuo Lu 	}
28782a977b89SWenzhuo Lu 
28792a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
28802a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
28812a977b89SWenzhuo Lu 	 */
28822a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
288386ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
288486ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
288586ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
288686ef65eeSBernard Iremonger 		} else {
28872a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
28882a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
288986ef65eeSBernard Iremonger 		}
28902a977b89SWenzhuo Lu 	} else {
28912a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
28922a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
28932a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
28942a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
28952a977b89SWenzhuo Lu 		} else {
28962a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
28972a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
28982a977b89SWenzhuo Lu 
28992a977b89SWenzhuo Lu 		}
29002a977b89SWenzhuo Lu 	}
29012a977b89SWenzhuo Lu 	rx_free_thresh = 64;
29022a977b89SWenzhuo Lu 
2903013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2904013af9b6SIntel 
2905f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
2906013af9b6SIntel 	/* VLAN filter */
29070074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
29081a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2909013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
2910013af9b6SIntel 
2911013af9b6SIntel 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2912013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
2913013af9b6SIntel 
29147741e4cfSIntel 	rte_port->dcb_flag = 1;
29157741e4cfSIntel 
2916013af9b6SIntel 	return 0;
2917af75078fSIntel }
2918af75078fSIntel 
2919ffc468ffSTetsuya Mukawa static void
2920ffc468ffSTetsuya Mukawa init_port(void)
2921ffc468ffSTetsuya Mukawa {
2922ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
2923ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
2924ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2925ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
2926ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
2927ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
2928ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
2929ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
2930ffc468ffSTetsuya Mukawa 	}
2931ffc468ffSTetsuya Mukawa }
2932ffc468ffSTetsuya Mukawa 
2933d3a274ceSZhihong Wang static void
2934d3a274ceSZhihong Wang force_quit(void)
2935d3a274ceSZhihong Wang {
2936d3a274ceSZhihong Wang 	pmd_test_exit();
2937d3a274ceSZhihong Wang 	prompt_exit();
2938d3a274ceSZhihong Wang }
2939d3a274ceSZhihong Wang 
2940d3a274ceSZhihong Wang static void
2941cfea1f30SPablo de Lara print_stats(void)
2942cfea1f30SPablo de Lara {
2943cfea1f30SPablo de Lara 	uint8_t i;
2944cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2945cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2946cfea1f30SPablo de Lara 
2947cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
2948cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
2949cfea1f30SPablo de Lara 
2950cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
2951cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2952cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
2953cfea1f30SPablo de Lara }
2954cfea1f30SPablo de Lara 
2955cfea1f30SPablo de Lara static void
2956d3a274ceSZhihong Wang signal_handler(int signum)
2957d3a274ceSZhihong Wang {
2958d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
2959d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
2960d3a274ceSZhihong Wang 				signum);
2961102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
2962102b7329SReshma Pattan 		/* uninitialize packet capture framework */
2963102b7329SReshma Pattan 		rte_pdump_uninit();
2964102b7329SReshma Pattan #endif
296562d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
296662d3216dSReshma Pattan 		rte_latencystats_uninit();
296762d3216dSReshma Pattan #endif
2968d3a274ceSZhihong Wang 		force_quit();
2969d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
2970d9a191a0SPhil Yang 		f_quit = 1;
2971d3a274ceSZhihong Wang 		/* exit with the expected status */
2972d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
2973d3a274ceSZhihong Wang 		kill(getpid(), signum);
2974d3a274ceSZhihong Wang 	}
2975d3a274ceSZhihong Wang }
2976d3a274ceSZhihong Wang 
2977af75078fSIntel int
2978af75078fSIntel main(int argc, char** argv)
2979af75078fSIntel {
2980af75078fSIntel 	int diag;
2981f8244c63SZhiyong Yang 	portid_t port_id;
29824918a357SXiaoyun Li 	uint16_t count;
2983fb73e096SJeff Guo 	int ret;
2984af75078fSIntel 
2985d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
2986d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
2987d3a274ceSZhihong Wang 
2988af75078fSIntel 	diag = rte_eal_init(argc, argv);
2989af75078fSIntel 	if (diag < 0)
2990af75078fSIntel 		rte_panic("Cannot init EAL\n");
2991af75078fSIntel 
2992285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
2993285fd101SOlivier Matz 	if (testpmd_logtype < 0)
2994285fd101SOlivier Matz 		rte_panic("Cannot register log type");
2995285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2996285fd101SOlivier Matz 
29974aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
29984aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
29994aa0d012SAnatoly Burakov 	rte_pdump_init(NULL);
30004aa0d012SAnatoly Burakov #endif
30014aa0d012SAnatoly Burakov 
30024918a357SXiaoyun Li 	count = 0;
30034918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
30044918a357SXiaoyun Li 		ports_ids[count] = port_id;
30054918a357SXiaoyun Li 		count++;
30064918a357SXiaoyun Li 	}
30074918a357SXiaoyun Li 	nb_ports = (portid_t) count;
30084aa0d012SAnatoly Burakov 	if (nb_ports == 0)
30094aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
30104aa0d012SAnatoly Burakov 
30114aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
30124aa0d012SAnatoly Burakov 	init_port();
30134aa0d012SAnatoly Burakov 
30144aa0d012SAnatoly Burakov 	set_def_fwd_config();
30154aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
30164aa0d012SAnatoly Burakov 		rte_panic("Empty set of forwarding logical cores - check the "
30174aa0d012SAnatoly Burakov 			  "core mask supplied in the command parameters\n");
30184aa0d012SAnatoly Burakov 
3019e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
3020e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE
3021e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3022e505d84cSAnatoly Burakov #endif
3023e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
3024e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3025e505d84cSAnatoly Burakov #endif
3026e505d84cSAnatoly Burakov 
3027fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
3028fb7b8b32SAnatoly Burakov #ifdef RTE_EXEC_ENV_BSDAPP
3029fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3030fb7b8b32SAnatoly Burakov #else
3031fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3032fb7b8b32SAnatoly Burakov #endif
3033fb7b8b32SAnatoly Burakov 
3034e505d84cSAnatoly Burakov 	argc -= diag;
3035e505d84cSAnatoly Burakov 	argv += diag;
3036e505d84cSAnatoly Burakov 	if (argc > 1)
3037e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3038e505d84cSAnatoly Burakov 
3039e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3040285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
30411c036b16SEelco Chaudron 			strerror(errno));
30421c036b16SEelco Chaudron 	}
30431c036b16SEelco Chaudron 
304499cabef0SPablo de Lara 	if (tx_first && interactive)
304599cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
304699cabef0SPablo de Lara 				"interactive mode.\n");
30478820cba4SDavid Hunt 
30488820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
30498820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
30508820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
30518820cba4SDavid Hunt 		lsc_interrupt = 0;
30528820cba4SDavid Hunt 	}
30538820cba4SDavid Hunt 
30545a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
30555a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
30565a8fb55cSReshma Pattan 
30575a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3058af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3059af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3060af75078fSIntel 		       nb_rxq, nb_txq);
3061af75078fSIntel 
3062af75078fSIntel 	init_config();
3063fb73e096SJeff Guo 
3064fb73e096SJeff Guo 	if (hot_plug) {
3065fb73e096SJeff Guo 		/* enable hot plug monitoring */
3066fb73e096SJeff Guo 		ret = rte_dev_event_monitor_start();
3067fb73e096SJeff Guo 		if (ret) {
3068fb73e096SJeff Guo 			rte_errno = EINVAL;
3069fb73e096SJeff Guo 			return -1;
3070fb73e096SJeff Guo 		}
3071fb73e096SJeff Guo 		eth_dev_event_callback_register();
3072fb73e096SJeff Guo 
3073fb73e096SJeff Guo 	}
3074fb73e096SJeff Guo 
3075148f963fSBruce Richardson 	if (start_port(RTE_PORT_ALL) != 0)
3076148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3077af75078fSIntel 
3078ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
30797d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(port_id)
3080ce8d5614SIntel 		rte_eth_promiscuous_enable(port_id);
3081af75078fSIntel 
30827e4441c8SRemy Horton 	/* Init metrics library */
30837e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
30847e4441c8SRemy Horton 
308562d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
308662d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
308762d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
308862d3216dSReshma Pattan 		if (ret)
308962d3216dSReshma Pattan 			printf("Warning: latencystats init()"
309062d3216dSReshma Pattan 				" returned error %d\n",	ret);
309162d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
309262d3216dSReshma Pattan 			latencystats_lcore_id);
309362d3216dSReshma Pattan 	}
309462d3216dSReshma Pattan #endif
309562d3216dSReshma Pattan 
30967e4441c8SRemy Horton 	/* Setup bitrate stats */
30977e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
3098e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
30997e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
31007e4441c8SRemy Horton 		if (bitrate_data == NULL)
3101e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3102e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
31037e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3104e25e6c70SRemy Horton 	}
31057e4441c8SRemy Horton #endif
31067e4441c8SRemy Horton 
31070d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
310881ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
310981ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
311081ef862bSAllain Legacy 
3111ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3112ca7feb22SCyril Chemparathy 		if (auto_start) {
3113ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3114ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3115ca7feb22SCyril Chemparathy 		}
3116af75078fSIntel 		prompt();
31170de738cfSJiayu Hu 		pmd_test_exit();
3118ca7feb22SCyril Chemparathy 	} else
31190d56cb81SThomas Monjalon #endif
31200d56cb81SThomas Monjalon 	{
3121af75078fSIntel 		char c;
3122af75078fSIntel 		int rc;
3123af75078fSIntel 
3124d9a191a0SPhil Yang 		f_quit = 0;
3125d9a191a0SPhil Yang 
3126af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
312799cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3128cfea1f30SPablo de Lara 		if (stats_period != 0) {
3129cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3130cfea1f30SPablo de Lara 			uint64_t timer_period;
3131cfea1f30SPablo de Lara 
3132cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3133cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3134cfea1f30SPablo de Lara 
3135d9a191a0SPhil Yang 			while (f_quit == 0) {
3136cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
3137cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
3138cfea1f30SPablo de Lara 
3139cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
3140cfea1f30SPablo de Lara 					print_stats();
3141cfea1f30SPablo de Lara 					/* Reset the timer */
3142cfea1f30SPablo de Lara 					diff_time = 0;
3143cfea1f30SPablo de Lara 				}
3144cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
3145cfea1f30SPablo de Lara 				prev_time = cur_time;
3146cfea1f30SPablo de Lara 				sleep(1);
3147cfea1f30SPablo de Lara 			}
3148cfea1f30SPablo de Lara 		}
3149cfea1f30SPablo de Lara 
3150af75078fSIntel 		printf("Press enter to exit\n");
3151af75078fSIntel 		rc = read(0, &c, 1);
3152d3a274ceSZhihong Wang 		pmd_test_exit();
3153af75078fSIntel 		if (rc < 0)
3154af75078fSIntel 			return 1;
3155af75078fSIntel 	}
3156af75078fSIntel 
3157af75078fSIntel 	return 0;
3158af75078fSIntel }
3159