xref: /dpdk/app/test-pmd/testpmd.c (revision 82010ef55e7cb4948b4bf4ff229716a3a142e95c)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <stdio.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <signal.h>
9af75078fSIntel #include <string.h>
10af75078fSIntel #include <time.h>
11af75078fSIntel #include <fcntl.h>
121c036b16SEelco Chaudron #include <sys/mman.h>
13af75078fSIntel #include <sys/types.h>
14af75078fSIntel #include <errno.h>
15fb73e096SJeff Guo #include <stdbool.h>
16af75078fSIntel 
17af75078fSIntel #include <sys/queue.h>
18af75078fSIntel #include <sys/stat.h>
19af75078fSIntel 
20af75078fSIntel #include <stdint.h>
21af75078fSIntel #include <unistd.h>
22af75078fSIntel #include <inttypes.h>
23af75078fSIntel 
24af75078fSIntel #include <rte_common.h>
25d1eb542eSOlivier Matz #include <rte_errno.h>
26af75078fSIntel #include <rte_byteorder.h>
27af75078fSIntel #include <rte_log.h>
28af75078fSIntel #include <rte_debug.h>
29af75078fSIntel #include <rte_cycles.h>
30c7f5dba7SAnatoly Burakov #include <rte_malloc_heap.h>
31af75078fSIntel #include <rte_memory.h>
32af75078fSIntel #include <rte_memcpy.h>
33af75078fSIntel #include <rte_launch.h>
34af75078fSIntel #include <rte_eal.h>
35284c908cSGaetan Rivet #include <rte_alarm.h>
36af75078fSIntel #include <rte_per_lcore.h>
37af75078fSIntel #include <rte_lcore.h>
38af75078fSIntel #include <rte_atomic.h>
39af75078fSIntel #include <rte_branch_prediction.h>
40af75078fSIntel #include <rte_mempool.h>
41af75078fSIntel #include <rte_malloc.h>
42af75078fSIntel #include <rte_mbuf.h>
430e798567SPavan Nikhilesh #include <rte_mbuf_pool_ops.h>
44af75078fSIntel #include <rte_interrupts.h>
45af75078fSIntel #include <rte_pci.h>
46af75078fSIntel #include <rte_ether.h>
47af75078fSIntel #include <rte_ethdev.h>
48edab33b1STetsuya Mukawa #include <rte_dev.h>
49af75078fSIntel #include <rte_string_fns.h>
50e261265eSRadu Nicolau #ifdef RTE_LIBRTE_IXGBE_PMD
51e261265eSRadu Nicolau #include <rte_pmd_ixgbe.h>
52e261265eSRadu Nicolau #endif
53102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
54102b7329SReshma Pattan #include <rte_pdump.h>
55102b7329SReshma Pattan #endif
56938a184aSAdrien Mazarguil #include <rte_flow.h>
577e4441c8SRemy Horton #include <rte_metrics.h>
587e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
597e4441c8SRemy Horton #include <rte_bitrate.h>
607e4441c8SRemy Horton #endif
6162d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
6262d3216dSReshma Pattan #include <rte_latencystats.h>
6362d3216dSReshma Pattan #endif
64af75078fSIntel 
65af75078fSIntel #include "testpmd.h"
66af75078fSIntel 
67c7f5dba7SAnatoly Burakov #ifndef MAP_HUGETLB
68c7f5dba7SAnatoly Burakov /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69c7f5dba7SAnatoly Burakov #define HUGE_FLAG (0x40000)
70c7f5dba7SAnatoly Burakov #else
71c7f5dba7SAnatoly Burakov #define HUGE_FLAG MAP_HUGETLB
72c7f5dba7SAnatoly Burakov #endif
73c7f5dba7SAnatoly Burakov 
74c7f5dba7SAnatoly Burakov #ifndef MAP_HUGE_SHIFT
75c7f5dba7SAnatoly Burakov /* older kernels (or FreeBSD) will not have this define */
76c7f5dba7SAnatoly Burakov #define HUGE_SHIFT (26)
77c7f5dba7SAnatoly Burakov #else
78c7f5dba7SAnatoly Burakov #define HUGE_SHIFT MAP_HUGE_SHIFT
79c7f5dba7SAnatoly Burakov #endif
80c7f5dba7SAnatoly Burakov 
81c7f5dba7SAnatoly Burakov #define EXTMEM_HEAP_NAME "extmem"
82c7f5dba7SAnatoly Burakov 
83af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
84285fd101SOlivier Matz int testpmd_logtype; /**< Log type for testpmd logs */
85af75078fSIntel 
86af75078fSIntel /* use master core for command line ? */
87af75078fSIntel uint8_t interactive = 0;
88ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
8999cabef0SPablo de Lara uint8_t tx_first;
9081ef862bSAllain Legacy char cmdline_filename[PATH_MAX] = {0};
91af75078fSIntel 
92af75078fSIntel /*
93af75078fSIntel  * NUMA support configuration.
94af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
95af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
97af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
98af75078fSIntel  */
99999b2ee0SBruce Richardson uint8_t numa_support = 1; /**< numa enabled by default */
100af75078fSIntel 
101af75078fSIntel /*
102b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103b6ea6408SIntel  * not configured.
104b6ea6408SIntel  */
105b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
106b6ea6408SIntel 
107b6ea6408SIntel /*
108c7f5dba7SAnatoly Burakov  * Select mempool allocation type:
109c7f5dba7SAnatoly Burakov  * - native: use regular DPDK memory
110c7f5dba7SAnatoly Burakov  * - anon: use regular DPDK memory to create mempool, but populate using
111c7f5dba7SAnatoly Burakov  *         anonymous memory (may not be IOVA-contiguous)
112c7f5dba7SAnatoly Burakov  * - xmem: use externally allocated hugepage memory
113148f963fSBruce Richardson  */
114c7f5dba7SAnatoly Burakov uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115148f963fSBruce Richardson 
116148f963fSBruce Richardson /*
11763531389SGeorgios Katsikas  * Store specified sockets on which memory pool to be used by ports
11863531389SGeorgios Katsikas  * is allocated.
11963531389SGeorgios Katsikas  */
12063531389SGeorgios Katsikas uint8_t port_numa[RTE_MAX_ETHPORTS];
12163531389SGeorgios Katsikas 
12263531389SGeorgios Katsikas /*
12363531389SGeorgios Katsikas  * Store specified sockets on which RX ring to be used by ports
12463531389SGeorgios Katsikas  * is allocated.
12563531389SGeorgios Katsikas  */
12663531389SGeorgios Katsikas uint8_t rxring_numa[RTE_MAX_ETHPORTS];
12763531389SGeorgios Katsikas 
12863531389SGeorgios Katsikas /*
12963531389SGeorgios Katsikas  * Store specified sockets on which TX ring to be used by ports
13063531389SGeorgios Katsikas  * is allocated.
13163531389SGeorgios Katsikas  */
13263531389SGeorgios Katsikas uint8_t txring_numa[RTE_MAX_ETHPORTS];
13363531389SGeorgios Katsikas 
13463531389SGeorgios Katsikas /*
135af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
136af75078fSIntel  * forwarded.
137547d946cSNirmoy Das  * Must be instantiated with the ethernet addresses of peer traffic generator
138af75078fSIntel  * ports.
139af75078fSIntel  */
140af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141af75078fSIntel portid_t nb_peer_eth_addrs = 0;
142af75078fSIntel 
143af75078fSIntel /*
144af75078fSIntel  * Probed Target Environment.
145af75078fSIntel  */
146af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
147af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
148af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150af75078fSIntel 
1514918a357SXiaoyun Li portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
1524918a357SXiaoyun Li 
153af75078fSIntel /*
154af75078fSIntel  * Test Forwarding Configuration.
155af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157af75078fSIntel  */
158af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162af75078fSIntel 
163af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165af75078fSIntel 
166af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168af75078fSIntel 
169af75078fSIntel /*
170af75078fSIntel  * Forwarding engines.
171af75078fSIntel  */
172af75078fSIntel struct fwd_engine * fwd_engines[] = {
173af75078fSIntel 	&io_fwd_engine,
174af75078fSIntel 	&mac_fwd_engine,
175d47388f1SCyril Chemparathy 	&mac_swap_engine,
176e9e23a61SCyril Chemparathy 	&flow_gen_engine,
177af75078fSIntel 	&rx_only_engine,
178af75078fSIntel 	&tx_only_engine,
179af75078fSIntel 	&csum_fwd_engine,
180168dfa61SIvan Boule 	&icmp_echo_engine,
1813c156061SJens Freimann 	&noisy_vnf_engine,
1820ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
1830ad778b3SJasvinder Singh 	&softnic_fwd_engine,
1845b590fbeSJasvinder Singh #endif
185af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
186af75078fSIntel 	&ieee1588_fwd_engine,
187af75078fSIntel #endif
188af75078fSIntel 	NULL,
189af75078fSIntel };
190af75078fSIntel 
191af75078fSIntel struct fwd_config cur_fwd_config;
192af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193bf56fce1SZhihong Wang uint32_t retry_enabled;
194bf56fce1SZhihong Wang uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195bf56fce1SZhihong Wang uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
196af75078fSIntel 
197af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
199c8798818SIntel                                       * specified on command-line. */
200cfea1f30SPablo de Lara uint16_t stats_period; /**< Period to show statistics (disabled by default) */
201d9a191a0SPhil Yang 
202d9a191a0SPhil Yang /*
203d9a191a0SPhil Yang  * In container, it cannot terminate the process which running with 'stats-period'
204d9a191a0SPhil Yang  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
205d9a191a0SPhil Yang  */
206d9a191a0SPhil Yang uint8_t f_quit;
207d9a191a0SPhil Yang 
208af75078fSIntel /*
209af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
210af75078fSIntel  */
211af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
214af75078fSIntel };
215af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
216af75078fSIntel 
21779bec05bSKonstantin Ananyev enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
21879bec05bSKonstantin Ananyev /**< Split policy for packets to TX. */
21979bec05bSKonstantin Ananyev 
220*82010ef5SYongseok Koh uint8_t txonly_multi_flow;
221*82010ef5SYongseok Koh /**< Whether multiple flows are generated in TXONLY mode. */
222*82010ef5SYongseok Koh 
223af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
224e9378bbcSCunming Liang uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
225af75078fSIntel 
226900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
227900550deSIntel uint8_t dcb_config = 0;
228900550deSIntel 
229900550deSIntel /* Whether the dcb is in testing status */
230900550deSIntel uint8_t dcb_test = 0;
231900550deSIntel 
232af75078fSIntel /*
233af75078fSIntel  * Configurable number of RX/TX queues.
234af75078fSIntel  */
235af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
236af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
237af75078fSIntel 
238af75078fSIntel /*
239af75078fSIntel  * Configurable number of RX/TX ring descriptors.
2408599ed31SRemy Horton  * Defaults are supplied by drivers via ethdev.
241af75078fSIntel  */
2428599ed31SRemy Horton #define RTE_TEST_RX_DESC_DEFAULT 0
2438599ed31SRemy Horton #define RTE_TEST_TX_DESC_DEFAULT 0
244af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
245af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
246af75078fSIntel 
247f2c5125aSPablo de Lara #define RTE_PMD_PARAM_UNSET -1
248af75078fSIntel /*
249af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
250af75078fSIntel  */
251af75078fSIntel 
252f2c5125aSPablo de Lara int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
253f2c5125aSPablo de Lara int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
254f2c5125aSPablo de Lara int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
255af75078fSIntel 
256f2c5125aSPablo de Lara int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
257f2c5125aSPablo de Lara int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
258f2c5125aSPablo de Lara int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
259af75078fSIntel 
260af75078fSIntel /*
261af75078fSIntel  * Configurable value of RX free threshold.
262af75078fSIntel  */
263f2c5125aSPablo de Lara int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
264af75078fSIntel 
265af75078fSIntel /*
266ce8d5614SIntel  * Configurable value of RX drop enable.
267ce8d5614SIntel  */
268f2c5125aSPablo de Lara int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
269ce8d5614SIntel 
270ce8d5614SIntel /*
271af75078fSIntel  * Configurable value of TX free threshold.
272af75078fSIntel  */
273f2c5125aSPablo de Lara int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
274af75078fSIntel 
275af75078fSIntel /*
276af75078fSIntel  * Configurable value of TX RS bit threshold.
277af75078fSIntel  */
278f2c5125aSPablo de Lara int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
279af75078fSIntel 
280af75078fSIntel /*
2813c156061SJens Freimann  * Configurable value of buffered packets before sending.
2823c156061SJens Freimann  */
2833c156061SJens Freimann uint16_t noisy_tx_sw_bufsz;
2843c156061SJens Freimann 
2853c156061SJens Freimann /*
2863c156061SJens Freimann  * Configurable value of packet buffer timeout.
2873c156061SJens Freimann  */
2883c156061SJens Freimann uint16_t noisy_tx_sw_buf_flush_time;
2893c156061SJens Freimann 
2903c156061SJens Freimann /*
2913c156061SJens Freimann  * Configurable value for size of VNF internal memory area
2923c156061SJens Freimann  * used for simulating noisy neighbour behaviour
2933c156061SJens Freimann  */
2943c156061SJens Freimann uint64_t noisy_lkup_mem_sz;
2953c156061SJens Freimann 
2963c156061SJens Freimann /*
2973c156061SJens Freimann  * Configurable value of number of random writes done in
2983c156061SJens Freimann  * VNF simulation memory area.
2993c156061SJens Freimann  */
3003c156061SJens Freimann uint64_t noisy_lkup_num_writes;
3013c156061SJens Freimann 
3023c156061SJens Freimann /*
3033c156061SJens Freimann  * Configurable value of number of random reads done in
3043c156061SJens Freimann  * VNF simulation memory area.
3053c156061SJens Freimann  */
3063c156061SJens Freimann uint64_t noisy_lkup_num_reads;
3073c156061SJens Freimann 
3083c156061SJens Freimann /*
3093c156061SJens Freimann  * Configurable value of number of random reads/writes done in
3103c156061SJens Freimann  * VNF simulation memory area.
3113c156061SJens Freimann  */
3123c156061SJens Freimann uint64_t noisy_lkup_num_reads_writes;
3133c156061SJens Freimann 
3143c156061SJens Freimann /*
315af75078fSIntel  * Receive Side Scaling (RSS) configuration.
316af75078fSIntel  */
3178a387fa8SHelin Zhang uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
318af75078fSIntel 
319af75078fSIntel /*
320af75078fSIntel  * Port topology configuration
321af75078fSIntel  */
322af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
323af75078fSIntel 
3247741e4cfSIntel /*
3257741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
3267741e4cfSIntel  */
3277741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
3287741e4cfSIntel 
329af75078fSIntel /*
3307ee3e944SVasily Philipov  * Flow API isolated mode.
3317ee3e944SVasily Philipov  */
3327ee3e944SVasily Philipov uint8_t flow_isolate_all;
3337ee3e944SVasily Philipov 
3347ee3e944SVasily Philipov /*
335bc202406SDavid Marchand  * Avoids to check link status when starting/stopping a port.
336bc202406SDavid Marchand  */
337bc202406SDavid Marchand uint8_t no_link_check = 0; /* check by default */
338bc202406SDavid Marchand 
339bc202406SDavid Marchand /*
3408ea656f8SGaetan Rivet  * Enable link status change notification
3418ea656f8SGaetan Rivet  */
3428ea656f8SGaetan Rivet uint8_t lsc_interrupt = 1; /* enabled by default */
3438ea656f8SGaetan Rivet 
3448ea656f8SGaetan Rivet /*
345284c908cSGaetan Rivet  * Enable device removal notification.
346284c908cSGaetan Rivet  */
347284c908cSGaetan Rivet uint8_t rmv_interrupt = 1; /* enabled by default */
348284c908cSGaetan Rivet 
349fb73e096SJeff Guo uint8_t hot_plug = 0; /**< hotplug disabled by default. */
350fb73e096SJeff Guo 
3514f1ed78eSThomas Monjalon /* After attach, port setup is called on event or by iterator */
3524f1ed78eSThomas Monjalon bool setup_on_probe_event = true;
3534f1ed78eSThomas Monjalon 
35497b5d8b5SThomas Monjalon /* Pretty printing of ethdev events */
35597b5d8b5SThomas Monjalon static const char * const eth_event_desc[] = {
35697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
35797b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
35897b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
35997b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
36097b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
36197b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
36297b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
36397b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
36497b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_NEW] = "device probed",
36597b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_DESTROY] = "device released",
36697b5d8b5SThomas Monjalon 	[RTE_ETH_EVENT_MAX] = NULL,
36797b5d8b5SThomas Monjalon };
36897b5d8b5SThomas Monjalon 
369284c908cSGaetan Rivet /*
3703af72783SGaetan Rivet  * Display or mask ether events
3713af72783SGaetan Rivet  * Default to all events except VF_MBOX
3723af72783SGaetan Rivet  */
3733af72783SGaetan Rivet uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
3743af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
3753af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
3763af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
377badb87c1SAnoob Joseph 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
3783af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
3793af72783SGaetan Rivet 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
380e505d84cSAnatoly Burakov /*
381e505d84cSAnatoly Burakov  * Decide if all memory are locked for performance.
382e505d84cSAnatoly Burakov  */
383e505d84cSAnatoly Burakov int do_mlockall = 0;
3843af72783SGaetan Rivet 
3853af72783SGaetan Rivet /*
3867b7e5ba7SIntel  * NIC bypass mode configuration options.
3877b7e5ba7SIntel  */
3887b7e5ba7SIntel 
38950c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3907b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
391e261265eSRadu Nicolau uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
3927b7e5ba7SIntel #endif
3937b7e5ba7SIntel 
394e261265eSRadu Nicolau 
39562d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
39662d3216dSReshma Pattan 
39762d3216dSReshma Pattan /*
39862d3216dSReshma Pattan  * Set when latency stats is enabled in the commandline
39962d3216dSReshma Pattan  */
40062d3216dSReshma Pattan uint8_t latencystats_enabled;
40162d3216dSReshma Pattan 
40262d3216dSReshma Pattan /*
40362d3216dSReshma Pattan  * Lcore ID to serive latency statistics.
40462d3216dSReshma Pattan  */
40562d3216dSReshma Pattan lcoreid_t latencystats_lcore_id = -1;
40662d3216dSReshma Pattan 
40762d3216dSReshma Pattan #endif
40862d3216dSReshma Pattan 
4097b7e5ba7SIntel /*
410af75078fSIntel  * Ethernet device configuration.
411af75078fSIntel  */
412af75078fSIntel struct rte_eth_rxmode rx_mode = {
413af75078fSIntel 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
414af75078fSIntel };
415af75078fSIntel 
41607e5f7bdSShahaf Shuler struct rte_eth_txmode tx_mode = {
41707e5f7bdSShahaf Shuler 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
41807e5f7bdSShahaf Shuler };
419fd8c20aaSShahaf Shuler 
420af75078fSIntel struct rte_fdir_conf fdir_conf = {
421af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
422af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
423af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
424d9d5e6f2SJingjing Wu 	.mask = {
42526f579aaSWei Zhao 		.vlan_tci_mask = 0xFFEF,
426d9d5e6f2SJingjing Wu 		.ipv4_mask     = {
427d9d5e6f2SJingjing Wu 			.src_ip = 0xFFFFFFFF,
428d9d5e6f2SJingjing Wu 			.dst_ip = 0xFFFFFFFF,
429d9d5e6f2SJingjing Wu 		},
430d9d5e6f2SJingjing Wu 		.ipv6_mask     = {
431d9d5e6f2SJingjing Wu 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
432d9d5e6f2SJingjing Wu 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
433d9d5e6f2SJingjing Wu 		},
434d9d5e6f2SJingjing Wu 		.src_port_mask = 0xFFFF,
435d9d5e6f2SJingjing Wu 		.dst_port_mask = 0xFFFF,
43647b3ac6bSWenzhuo Lu 		.mac_addr_byte_mask = 0xFF,
43747b3ac6bSWenzhuo Lu 		.tunnel_type_mask = 1,
43847b3ac6bSWenzhuo Lu 		.tunnel_id_mask = 0xFFFFFFFF,
439d9d5e6f2SJingjing Wu 	},
440af75078fSIntel 	.drop_queue = 127,
441af75078fSIntel };
442af75078fSIntel 
4432950a769SDeclan Doherty volatile int test_done = 1; /* stop packet forwarding when set to 1. */
444af75078fSIntel 
445ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
446ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
447ed30d9b6SIntel 
448ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
449ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
450ed30d9b6SIntel 
451ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
452ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
453ed30d9b6SIntel 
454a4fd5eeeSElza Mathew /*
455a4fd5eeeSElza Mathew  * Display zero values by default for xstats
456a4fd5eeeSElza Mathew  */
457a4fd5eeeSElza Mathew uint8_t xstats_hide_zero;
458a4fd5eeeSElza Mathew 
459c9cafcc8SShahaf Shuler unsigned int num_sockets = 0;
460c9cafcc8SShahaf Shuler unsigned int socket_ids[RTE_MAX_NUMA_NODES];
4617acf894dSStephen Hurd 
462e25e6c70SRemy Horton #ifdef RTE_LIBRTE_BITRATE
4637e4441c8SRemy Horton /* Bitrate statistics */
4647e4441c8SRemy Horton struct rte_stats_bitrates *bitrate_data;
465e25e6c70SRemy Horton lcoreid_t bitrate_lcore_id;
466e25e6c70SRemy Horton uint8_t bitrate_enabled;
467e25e6c70SRemy Horton #endif
4687e4441c8SRemy Horton 
469b40f8d78SJiayu Hu struct gro_status gro_ports[RTE_MAX_ETHPORTS];
470b7091f1dSJiayu Hu uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
471b40f8d78SJiayu Hu 
4721960be7dSNelio Laranjeiro struct vxlan_encap_conf vxlan_encap_conf = {
4731960be7dSNelio Laranjeiro 	.select_ipv4 = 1,
4741960be7dSNelio Laranjeiro 	.select_vlan = 0,
47562e8a5a8SViacheslav Ovsiienko 	.select_tos_ttl = 0,
4761960be7dSNelio Laranjeiro 	.vni = "\x00\x00\x00",
4771960be7dSNelio Laranjeiro 	.udp_src = 0,
4781960be7dSNelio Laranjeiro 	.udp_dst = RTE_BE16(4789),
4791960be7dSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
4801960be7dSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
4811960be7dSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
4821960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
4831960be7dSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
4841960be7dSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
4851960be7dSNelio Laranjeiro 	.vlan_tci = 0,
48662e8a5a8SViacheslav Ovsiienko 	.ip_tos = 0,
48762e8a5a8SViacheslav Ovsiienko 	.ip_ttl = 255,
4881960be7dSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
4891960be7dSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
4901960be7dSNelio Laranjeiro };
4911960be7dSNelio Laranjeiro 
492dcd962fcSNelio Laranjeiro struct nvgre_encap_conf nvgre_encap_conf = {
493dcd962fcSNelio Laranjeiro 	.select_ipv4 = 1,
494dcd962fcSNelio Laranjeiro 	.select_vlan = 0,
495dcd962fcSNelio Laranjeiro 	.tni = "\x00\x00\x00",
496dcd962fcSNelio Laranjeiro 	.ipv4_src = IPv4(127, 0, 0, 1),
497dcd962fcSNelio Laranjeiro 	.ipv4_dst = IPv4(255, 255, 255, 255),
498dcd962fcSNelio Laranjeiro 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
499dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x00\x01",
500dcd962fcSNelio Laranjeiro 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
501dcd962fcSNelio Laranjeiro 		"\x00\x00\x00\x00\x00\x00\x11\x11",
502dcd962fcSNelio Laranjeiro 	.vlan_tci = 0,
503dcd962fcSNelio Laranjeiro 	.eth_src = "\x00\x00\x00\x00\x00\x00",
504dcd962fcSNelio Laranjeiro 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
505dcd962fcSNelio Laranjeiro };
506dcd962fcSNelio Laranjeiro 
507ed30d9b6SIntel /* Forward function declarations */
508c9cce428SThomas Monjalon static void setup_attached_port(portid_t pi);
50928caa76aSZhiyong Yang static void map_port_queue_stats_mapping_registers(portid_t pi,
51028caa76aSZhiyong Yang 						   struct rte_port *port);
511edab33b1STetsuya Mukawa static void check_all_ports_link_status(uint32_t port_mask);
512f8244c63SZhiyong Yang static int eth_event_callback(portid_t port_id,
51376ad4a2dSGaetan Rivet 			      enum rte_eth_event_type type,
514d6af1a13SBernard Iremonger 			      void *param, void *ret_param);
515cc1bf307SJeff Guo static void dev_event_callback(const char *device_name,
516fb73e096SJeff Guo 				enum rte_dev_event_type type,
517fb73e096SJeff Guo 				void *param);
518ce8d5614SIntel 
519ce8d5614SIntel /*
520ce8d5614SIntel  * Check if all the ports are started.
521ce8d5614SIntel  * If yes, return positive value. If not, return zero.
522ce8d5614SIntel  */
523ce8d5614SIntel static int all_ports_started(void);
524ed30d9b6SIntel 
52552f38a20SJiayu Hu struct gso_status gso_ports[RTE_MAX_ETHPORTS];
52652f38a20SJiayu Hu uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
52752f38a20SJiayu Hu 
528af75078fSIntel /*
52998a7ea33SJerin Jacob  * Helper function to check if socket is already discovered.
530c9cafcc8SShahaf Shuler  * If yes, return positive value. If not, return zero.
531c9cafcc8SShahaf Shuler  */
532c9cafcc8SShahaf Shuler int
533c9cafcc8SShahaf Shuler new_socket_id(unsigned int socket_id)
534c9cafcc8SShahaf Shuler {
535c9cafcc8SShahaf Shuler 	unsigned int i;
536c9cafcc8SShahaf Shuler 
537c9cafcc8SShahaf Shuler 	for (i = 0; i < num_sockets; i++) {
538c9cafcc8SShahaf Shuler 		if (socket_ids[i] == socket_id)
539c9cafcc8SShahaf Shuler 			return 0;
540c9cafcc8SShahaf Shuler 	}
541c9cafcc8SShahaf Shuler 	return 1;
542c9cafcc8SShahaf Shuler }
543c9cafcc8SShahaf Shuler 
544c9cafcc8SShahaf Shuler /*
545af75078fSIntel  * Setup default configuration.
546af75078fSIntel  */
547af75078fSIntel static void
548af75078fSIntel set_default_fwd_lcores_config(void)
549af75078fSIntel {
550af75078fSIntel 	unsigned int i;
551af75078fSIntel 	unsigned int nb_lc;
5527acf894dSStephen Hurd 	unsigned int sock_num;
553af75078fSIntel 
554af75078fSIntel 	nb_lc = 0;
555af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
556dbfb8ec7SPhil Yang 		if (!rte_lcore_is_enabled(i))
557dbfb8ec7SPhil Yang 			continue;
558c9cafcc8SShahaf Shuler 		sock_num = rte_lcore_to_socket_id(i);
559c9cafcc8SShahaf Shuler 		if (new_socket_id(sock_num)) {
560c9cafcc8SShahaf Shuler 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
561c9cafcc8SShahaf Shuler 				rte_exit(EXIT_FAILURE,
562c9cafcc8SShahaf Shuler 					 "Total sockets greater than %u\n",
563c9cafcc8SShahaf Shuler 					 RTE_MAX_NUMA_NODES);
564c9cafcc8SShahaf Shuler 			}
565c9cafcc8SShahaf Shuler 			socket_ids[num_sockets++] = sock_num;
5667acf894dSStephen Hurd 		}
567f54fe5eeSStephen Hurd 		if (i == rte_get_master_lcore())
568f54fe5eeSStephen Hurd 			continue;
569f54fe5eeSStephen Hurd 		fwd_lcores_cpuids[nb_lc++] = i;
570af75078fSIntel 	}
571af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
572af75078fSIntel 	nb_cfg_lcores = nb_lcores;
573af75078fSIntel 	nb_fwd_lcores = 1;
574af75078fSIntel }
575af75078fSIntel 
576af75078fSIntel static void
577af75078fSIntel set_def_peer_eth_addrs(void)
578af75078fSIntel {
579af75078fSIntel 	portid_t i;
580af75078fSIntel 
581af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
582af75078fSIntel 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
583af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
584af75078fSIntel 	}
585af75078fSIntel }
586af75078fSIntel 
587af75078fSIntel static void
588af75078fSIntel set_default_fwd_ports_config(void)
589af75078fSIntel {
590af75078fSIntel 	portid_t pt_id;
59165a7360cSMatan Azrad 	int i = 0;
592af75078fSIntel 
593effdb8bbSPhil Yang 	RTE_ETH_FOREACH_DEV(pt_id) {
59465a7360cSMatan Azrad 		fwd_ports_ids[i++] = pt_id;
595af75078fSIntel 
596effdb8bbSPhil Yang 		/* Update sockets info according to the attached device */
597effdb8bbSPhil Yang 		int socket_id = rte_eth_dev_socket_id(pt_id);
598effdb8bbSPhil Yang 		if (socket_id >= 0 && new_socket_id(socket_id)) {
599effdb8bbSPhil Yang 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
600effdb8bbSPhil Yang 				rte_exit(EXIT_FAILURE,
601effdb8bbSPhil Yang 					 "Total sockets greater than %u\n",
602effdb8bbSPhil Yang 					 RTE_MAX_NUMA_NODES);
603effdb8bbSPhil Yang 			}
604effdb8bbSPhil Yang 			socket_ids[num_sockets++] = socket_id;
605effdb8bbSPhil Yang 		}
606effdb8bbSPhil Yang 	}
607effdb8bbSPhil Yang 
608af75078fSIntel 	nb_cfg_ports = nb_ports;
609af75078fSIntel 	nb_fwd_ports = nb_ports;
610af75078fSIntel }
611af75078fSIntel 
612af75078fSIntel void
613af75078fSIntel set_def_fwd_config(void)
614af75078fSIntel {
615af75078fSIntel 	set_default_fwd_lcores_config();
616af75078fSIntel 	set_def_peer_eth_addrs();
617af75078fSIntel 	set_default_fwd_ports_config();
618af75078fSIntel }
619af75078fSIntel 
620c7f5dba7SAnatoly Burakov /* extremely pessimistic estimation of memory required to create a mempool */
621c7f5dba7SAnatoly Burakov static int
622c7f5dba7SAnatoly Burakov calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
623c7f5dba7SAnatoly Burakov {
624c7f5dba7SAnatoly Burakov 	unsigned int n_pages, mbuf_per_pg, leftover;
625c7f5dba7SAnatoly Burakov 	uint64_t total_mem, mbuf_mem, obj_sz;
626c7f5dba7SAnatoly Burakov 
627c7f5dba7SAnatoly Burakov 	/* there is no good way to predict how much space the mempool will
628c7f5dba7SAnatoly Burakov 	 * occupy because it will allocate chunks on the fly, and some of those
629c7f5dba7SAnatoly Burakov 	 * will come from default DPDK memory while some will come from our
630c7f5dba7SAnatoly Burakov 	 * external memory, so just assume 128MB will be enough for everyone.
631c7f5dba7SAnatoly Burakov 	 */
632c7f5dba7SAnatoly Burakov 	uint64_t hdr_mem = 128 << 20;
633c7f5dba7SAnatoly Burakov 
634c7f5dba7SAnatoly Burakov 	/* account for possible non-contiguousness */
635c7f5dba7SAnatoly Burakov 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
636c7f5dba7SAnatoly Burakov 	if (obj_sz > pgsz) {
637c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
638c7f5dba7SAnatoly Burakov 		return -1;
639c7f5dba7SAnatoly Burakov 	}
640c7f5dba7SAnatoly Burakov 
641c7f5dba7SAnatoly Burakov 	mbuf_per_pg = pgsz / obj_sz;
642c7f5dba7SAnatoly Burakov 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
643c7f5dba7SAnatoly Burakov 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
644c7f5dba7SAnatoly Burakov 
645c7f5dba7SAnatoly Burakov 	mbuf_mem = n_pages * pgsz;
646c7f5dba7SAnatoly Burakov 
647c7f5dba7SAnatoly Burakov 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
648c7f5dba7SAnatoly Burakov 
649c7f5dba7SAnatoly Burakov 	if (total_mem > SIZE_MAX) {
650c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Memory size too big\n");
651c7f5dba7SAnatoly Burakov 		return -1;
652c7f5dba7SAnatoly Burakov 	}
653c7f5dba7SAnatoly Burakov 	*out = (size_t)total_mem;
654c7f5dba7SAnatoly Burakov 
655c7f5dba7SAnatoly Burakov 	return 0;
656c7f5dba7SAnatoly Burakov }
657c7f5dba7SAnatoly Burakov 
658c7f5dba7SAnatoly Burakov static int
659c7f5dba7SAnatoly Burakov pagesz_flags(uint64_t page_sz)
660c7f5dba7SAnatoly Burakov {
661c7f5dba7SAnatoly Burakov 	/* as per mmap() manpage, all page sizes are log2 of page size
662c7f5dba7SAnatoly Burakov 	 * shifted by MAP_HUGE_SHIFT
663c7f5dba7SAnatoly Burakov 	 */
6649d650537SAnatoly Burakov 	int log2 = rte_log2_u64(page_sz);
665c7f5dba7SAnatoly Burakov 
666c7f5dba7SAnatoly Burakov 	return (log2 << HUGE_SHIFT);
667c7f5dba7SAnatoly Burakov }
668c7f5dba7SAnatoly Burakov 
669c7f5dba7SAnatoly Burakov static void *
670c7f5dba7SAnatoly Burakov alloc_mem(size_t memsz, size_t pgsz, bool huge)
671c7f5dba7SAnatoly Burakov {
672c7f5dba7SAnatoly Burakov 	void *addr;
673c7f5dba7SAnatoly Burakov 	int flags;
674c7f5dba7SAnatoly Burakov 
675c7f5dba7SAnatoly Burakov 	/* allocate anonymous hugepages */
676c7f5dba7SAnatoly Burakov 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
677c7f5dba7SAnatoly Burakov 	if (huge)
678c7f5dba7SAnatoly Burakov 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
679c7f5dba7SAnatoly Burakov 
680c7f5dba7SAnatoly Burakov 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
681c7f5dba7SAnatoly Burakov 	if (addr == MAP_FAILED)
682c7f5dba7SAnatoly Burakov 		return NULL;
683c7f5dba7SAnatoly Burakov 
684c7f5dba7SAnatoly Burakov 	return addr;
685c7f5dba7SAnatoly Burakov }
686c7f5dba7SAnatoly Burakov 
687c7f5dba7SAnatoly Burakov struct extmem_param {
688c7f5dba7SAnatoly Burakov 	void *addr;
689c7f5dba7SAnatoly Burakov 	size_t len;
690c7f5dba7SAnatoly Burakov 	size_t pgsz;
691c7f5dba7SAnatoly Burakov 	rte_iova_t *iova_table;
692c7f5dba7SAnatoly Burakov 	unsigned int iova_table_len;
693c7f5dba7SAnatoly Burakov };
694c7f5dba7SAnatoly Burakov 
695c7f5dba7SAnatoly Burakov static int
696c7f5dba7SAnatoly Burakov create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
697c7f5dba7SAnatoly Burakov 		bool huge)
698c7f5dba7SAnatoly Burakov {
699c7f5dba7SAnatoly Burakov 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
700c7f5dba7SAnatoly Burakov 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
701c7f5dba7SAnatoly Burakov 	unsigned int cur_page, n_pages, pgsz_idx;
702c7f5dba7SAnatoly Burakov 	size_t mem_sz, cur_pgsz;
703c7f5dba7SAnatoly Burakov 	rte_iova_t *iovas = NULL;
704c7f5dba7SAnatoly Burakov 	void *addr;
705c7f5dba7SAnatoly Burakov 	int ret;
706c7f5dba7SAnatoly Burakov 
707c7f5dba7SAnatoly Burakov 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
708c7f5dba7SAnatoly Burakov 		/* skip anything that is too big */
709c7f5dba7SAnatoly Burakov 		if (pgsizes[pgsz_idx] > SIZE_MAX)
710c7f5dba7SAnatoly Burakov 			continue;
711c7f5dba7SAnatoly Burakov 
712c7f5dba7SAnatoly Burakov 		cur_pgsz = pgsizes[pgsz_idx];
713c7f5dba7SAnatoly Burakov 
714c7f5dba7SAnatoly Burakov 		/* if we were told not to allocate hugepages, override */
715c7f5dba7SAnatoly Burakov 		if (!huge)
716c7f5dba7SAnatoly Burakov 			cur_pgsz = sysconf(_SC_PAGESIZE);
717c7f5dba7SAnatoly Burakov 
718c7f5dba7SAnatoly Burakov 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
719c7f5dba7SAnatoly Burakov 		if (ret < 0) {
720c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
721c7f5dba7SAnatoly Burakov 			return -1;
722c7f5dba7SAnatoly Burakov 		}
723c7f5dba7SAnatoly Burakov 
724c7f5dba7SAnatoly Burakov 		/* allocate our memory */
725c7f5dba7SAnatoly Burakov 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
726c7f5dba7SAnatoly Burakov 
727c7f5dba7SAnatoly Burakov 		/* if we couldn't allocate memory with a specified page size,
728c7f5dba7SAnatoly Burakov 		 * that doesn't mean we can't do it with other page sizes, so
729c7f5dba7SAnatoly Burakov 		 * try another one.
730c7f5dba7SAnatoly Burakov 		 */
731c7f5dba7SAnatoly Burakov 		if (addr == NULL)
732c7f5dba7SAnatoly Burakov 			continue;
733c7f5dba7SAnatoly Burakov 
734c7f5dba7SAnatoly Burakov 		/* store IOVA addresses for every page in this memory area */
735c7f5dba7SAnatoly Burakov 		n_pages = mem_sz / cur_pgsz;
736c7f5dba7SAnatoly Burakov 
737c7f5dba7SAnatoly Burakov 		iovas = malloc(sizeof(*iovas) * n_pages);
738c7f5dba7SAnatoly Burakov 
739c7f5dba7SAnatoly Burakov 		if (iovas == NULL) {
740c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
741c7f5dba7SAnatoly Burakov 			goto fail;
742c7f5dba7SAnatoly Burakov 		}
743c7f5dba7SAnatoly Burakov 		/* lock memory if it's not huge pages */
744c7f5dba7SAnatoly Burakov 		if (!huge)
745c7f5dba7SAnatoly Burakov 			mlock(addr, mem_sz);
746c7f5dba7SAnatoly Burakov 
747c7f5dba7SAnatoly Burakov 		/* populate IOVA addresses */
748c7f5dba7SAnatoly Burakov 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
749c7f5dba7SAnatoly Burakov 			rte_iova_t iova;
750c7f5dba7SAnatoly Burakov 			size_t offset;
751c7f5dba7SAnatoly Burakov 			void *cur;
752c7f5dba7SAnatoly Burakov 
753c7f5dba7SAnatoly Burakov 			offset = cur_pgsz * cur_page;
754c7f5dba7SAnatoly Burakov 			cur = RTE_PTR_ADD(addr, offset);
755c7f5dba7SAnatoly Burakov 
756c7f5dba7SAnatoly Burakov 			/* touch the page before getting its IOVA */
757c7f5dba7SAnatoly Burakov 			*(volatile char *)cur = 0;
758c7f5dba7SAnatoly Burakov 
759c7f5dba7SAnatoly Burakov 			iova = rte_mem_virt2iova(cur);
760c7f5dba7SAnatoly Burakov 
761c7f5dba7SAnatoly Burakov 			iovas[cur_page] = iova;
762c7f5dba7SAnatoly Burakov 		}
763c7f5dba7SAnatoly Burakov 
764c7f5dba7SAnatoly Burakov 		break;
765c7f5dba7SAnatoly Burakov 	}
766c7f5dba7SAnatoly Burakov 	/* if we couldn't allocate anything */
767c7f5dba7SAnatoly Burakov 	if (iovas == NULL)
768c7f5dba7SAnatoly Burakov 		return -1;
769c7f5dba7SAnatoly Burakov 
770c7f5dba7SAnatoly Burakov 	param->addr = addr;
771c7f5dba7SAnatoly Burakov 	param->len = mem_sz;
772c7f5dba7SAnatoly Burakov 	param->pgsz = cur_pgsz;
773c7f5dba7SAnatoly Burakov 	param->iova_table = iovas;
774c7f5dba7SAnatoly Burakov 	param->iova_table_len = n_pages;
775c7f5dba7SAnatoly Burakov 
776c7f5dba7SAnatoly Burakov 	return 0;
777c7f5dba7SAnatoly Burakov fail:
778c7f5dba7SAnatoly Burakov 	if (iovas)
779c7f5dba7SAnatoly Burakov 		free(iovas);
780c7f5dba7SAnatoly Burakov 	if (addr)
781c7f5dba7SAnatoly Burakov 		munmap(addr, mem_sz);
782c7f5dba7SAnatoly Burakov 
783c7f5dba7SAnatoly Burakov 	return -1;
784c7f5dba7SAnatoly Burakov }
785c7f5dba7SAnatoly Burakov 
786c7f5dba7SAnatoly Burakov static int
787c7f5dba7SAnatoly Burakov setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
788c7f5dba7SAnatoly Burakov {
789c7f5dba7SAnatoly Burakov 	struct extmem_param param;
790c7f5dba7SAnatoly Burakov 	int socket_id, ret;
791c7f5dba7SAnatoly Burakov 
792c7f5dba7SAnatoly Burakov 	memset(&param, 0, sizeof(param));
793c7f5dba7SAnatoly Burakov 
794c7f5dba7SAnatoly Burakov 	/* check if our heap exists */
795c7f5dba7SAnatoly Burakov 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
796c7f5dba7SAnatoly Burakov 	if (socket_id < 0) {
797c7f5dba7SAnatoly Burakov 		/* create our heap */
798c7f5dba7SAnatoly Burakov 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
799c7f5dba7SAnatoly Burakov 		if (ret < 0) {
800c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(ERR, "Cannot create heap\n");
801c7f5dba7SAnatoly Burakov 			return -1;
802c7f5dba7SAnatoly Burakov 		}
803c7f5dba7SAnatoly Burakov 	}
804c7f5dba7SAnatoly Burakov 
805c7f5dba7SAnatoly Burakov 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
806c7f5dba7SAnatoly Burakov 	if (ret < 0) {
807c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
808c7f5dba7SAnatoly Burakov 		return -1;
809c7f5dba7SAnatoly Burakov 	}
810c7f5dba7SAnatoly Burakov 
811c7f5dba7SAnatoly Burakov 	/* we now have a valid memory area, so add it to heap */
812c7f5dba7SAnatoly Burakov 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
813c7f5dba7SAnatoly Burakov 			param.addr, param.len, param.iova_table,
814c7f5dba7SAnatoly Burakov 			param.iova_table_len, param.pgsz);
815c7f5dba7SAnatoly Burakov 
816c7f5dba7SAnatoly Burakov 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
817c7f5dba7SAnatoly Burakov 
818c7f5dba7SAnatoly Burakov 	/* not needed any more */
819c7f5dba7SAnatoly Burakov 	free(param.iova_table);
820c7f5dba7SAnatoly Burakov 
821c7f5dba7SAnatoly Burakov 	if (ret < 0) {
822c7f5dba7SAnatoly Burakov 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
823c7f5dba7SAnatoly Burakov 		munmap(param.addr, param.len);
824c7f5dba7SAnatoly Burakov 		return -1;
825c7f5dba7SAnatoly Burakov 	}
826c7f5dba7SAnatoly Burakov 
827c7f5dba7SAnatoly Burakov 	/* success */
828c7f5dba7SAnatoly Burakov 
829c7f5dba7SAnatoly Burakov 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
830c7f5dba7SAnatoly Burakov 			param.len >> 20);
831c7f5dba7SAnatoly Burakov 
832c7f5dba7SAnatoly Burakov 	return 0;
833c7f5dba7SAnatoly Burakov }
834c7f5dba7SAnatoly Burakov 
835af75078fSIntel /*
836af75078fSIntel  * Configuration initialisation done once at init time.
837af75078fSIntel  */
838af75078fSIntel static void
839af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
840af75078fSIntel 		 unsigned int socket_id)
841af75078fSIntel {
842af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
843bece7b6cSChristian Ehrhardt 	struct rte_mempool *rte_mp = NULL;
844af75078fSIntel 	uint32_t mb_size;
845af75078fSIntel 
846dfb03bbeSOlivier Matz 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
847af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
848148f963fSBruce Richardson 
849285fd101SOlivier Matz 	TESTPMD_LOG(INFO,
850d1eb542eSOlivier Matz 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
851d1eb542eSOlivier Matz 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
852d1eb542eSOlivier Matz 
853c7f5dba7SAnatoly Burakov 	switch (mp_alloc_type) {
854c7f5dba7SAnatoly Burakov 	case MP_ALLOC_NATIVE:
855c7f5dba7SAnatoly Burakov 		{
856c7f5dba7SAnatoly Burakov 			/* wrapper to rte_mempool_create() */
857c7f5dba7SAnatoly Burakov 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
858c7f5dba7SAnatoly Burakov 					rte_mbuf_best_mempool_ops());
859c7f5dba7SAnatoly Burakov 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
860c7f5dba7SAnatoly Burakov 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
861c7f5dba7SAnatoly Burakov 			break;
862c7f5dba7SAnatoly Burakov 		}
863c7f5dba7SAnatoly Burakov 	case MP_ALLOC_ANON:
864c7f5dba7SAnatoly Burakov 		{
865b19a0c75SOlivier Matz 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
866c7f5dba7SAnatoly Burakov 				mb_size, (unsigned int) mb_mempool_cache,
867148f963fSBruce Richardson 				sizeof(struct rte_pktmbuf_pool_private),
868148f963fSBruce Richardson 				socket_id, 0);
86924427bb9SOlivier Matz 			if (rte_mp == NULL)
87024427bb9SOlivier Matz 				goto err;
871b19a0c75SOlivier Matz 
872b19a0c75SOlivier Matz 			if (rte_mempool_populate_anon(rte_mp) == 0) {
873b19a0c75SOlivier Matz 				rte_mempool_free(rte_mp);
874b19a0c75SOlivier Matz 				rte_mp = NULL;
87524427bb9SOlivier Matz 				goto err;
876b19a0c75SOlivier Matz 			}
877b19a0c75SOlivier Matz 			rte_pktmbuf_pool_init(rte_mp, NULL);
878b19a0c75SOlivier Matz 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
879c7f5dba7SAnatoly Burakov 			break;
880c7f5dba7SAnatoly Burakov 		}
881c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM:
882c7f5dba7SAnatoly Burakov 	case MP_ALLOC_XMEM_HUGE:
883c7f5dba7SAnatoly Burakov 		{
884c7f5dba7SAnatoly Burakov 			int heap_socket;
885c7f5dba7SAnatoly Burakov 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
886c7f5dba7SAnatoly Burakov 
887c7f5dba7SAnatoly Burakov 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
888c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
889c7f5dba7SAnatoly Burakov 
890c7f5dba7SAnatoly Burakov 			heap_socket =
891c7f5dba7SAnatoly Burakov 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
892c7f5dba7SAnatoly Burakov 			if (heap_socket < 0)
893c7f5dba7SAnatoly Burakov 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
894c7f5dba7SAnatoly Burakov 
8950e798567SPavan Nikhilesh 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
8960e798567SPavan Nikhilesh 					rte_mbuf_best_mempool_ops());
897ea0c20eaSOlivier Matz 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
898c7f5dba7SAnatoly Burakov 					mb_mempool_cache, 0, mbuf_seg_size,
899c7f5dba7SAnatoly Burakov 					heap_socket);
900c7f5dba7SAnatoly Burakov 			break;
901c7f5dba7SAnatoly Burakov 		}
902c7f5dba7SAnatoly Burakov 	default:
903c7f5dba7SAnatoly Burakov 		{
904c7f5dba7SAnatoly Burakov 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
905c7f5dba7SAnatoly Burakov 		}
906bece7b6cSChristian Ehrhardt 	}
907148f963fSBruce Richardson 
90824427bb9SOlivier Matz err:
909af75078fSIntel 	if (rte_mp == NULL) {
910d1eb542eSOlivier Matz 		rte_exit(EXIT_FAILURE,
911d1eb542eSOlivier Matz 			"Creation of mbuf pool for socket %u failed: %s\n",
912d1eb542eSOlivier Matz 			socket_id, rte_strerror(rte_errno));
913148f963fSBruce Richardson 	} else if (verbose_level > 0) {
914591a9d79SStephen Hemminger 		rte_mempool_dump(stdout, rte_mp);
915af75078fSIntel 	}
916af75078fSIntel }
917af75078fSIntel 
91820a0286fSLiu Xiaofeng /*
91920a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
92020a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
92120a0286fSLiu Xiaofeng  */
92220a0286fSLiu Xiaofeng static int
92320a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
92420a0286fSLiu Xiaofeng {
92520a0286fSLiu Xiaofeng 	static int warning_once = 0;
92620a0286fSLiu Xiaofeng 
927c9cafcc8SShahaf Shuler 	if (new_socket_id(socket_id)) {
92820a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
92920a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
93020a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
93120a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
93220a0286fSLiu Xiaofeng 			       " --numa.\n");
93320a0286fSLiu Xiaofeng 		warning_once = 1;
93420a0286fSLiu Xiaofeng 		return -1;
93520a0286fSLiu Xiaofeng 	}
93620a0286fSLiu Xiaofeng 	return 0;
93720a0286fSLiu Xiaofeng }
93820a0286fSLiu Xiaofeng 
9393f7311baSWei Dai /*
9403f7311baSWei Dai  * Get the allowed maximum number of RX queues.
9413f7311baSWei Dai  * *pid return the port id which has minimal value of
9423f7311baSWei Dai  * max_rx_queues in all ports.
9433f7311baSWei Dai  */
9443f7311baSWei Dai queueid_t
9453f7311baSWei Dai get_allowed_max_nb_rxq(portid_t *pid)
9463f7311baSWei Dai {
9473f7311baSWei Dai 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
9483f7311baSWei Dai 	portid_t pi;
9493f7311baSWei Dai 	struct rte_eth_dev_info dev_info;
9503f7311baSWei Dai 
9513f7311baSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
9523f7311baSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
9533f7311baSWei Dai 		if (dev_info.max_rx_queues < allowed_max_rxq) {
9543f7311baSWei Dai 			allowed_max_rxq = dev_info.max_rx_queues;
9553f7311baSWei Dai 			*pid = pi;
9563f7311baSWei Dai 		}
9573f7311baSWei Dai 	}
9583f7311baSWei Dai 	return allowed_max_rxq;
9593f7311baSWei Dai }
9603f7311baSWei Dai 
9613f7311baSWei Dai /*
9623f7311baSWei Dai  * Check input rxq is valid or not.
9633f7311baSWei Dai  * If input rxq is not greater than any of maximum number
9643f7311baSWei Dai  * of RX queues of all ports, it is valid.
9653f7311baSWei Dai  * if valid, return 0, else return -1
9663f7311baSWei Dai  */
9673f7311baSWei Dai int
9683f7311baSWei Dai check_nb_rxq(queueid_t rxq)
9693f7311baSWei Dai {
9703f7311baSWei Dai 	queueid_t allowed_max_rxq;
9713f7311baSWei Dai 	portid_t pid = 0;
9723f7311baSWei Dai 
9733f7311baSWei Dai 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
9743f7311baSWei Dai 	if (rxq > allowed_max_rxq) {
9753f7311baSWei Dai 		printf("Fail: input rxq (%u) can't be greater "
9763f7311baSWei Dai 		       "than max_rx_queues (%u) of port %u\n",
9773f7311baSWei Dai 		       rxq,
9783f7311baSWei Dai 		       allowed_max_rxq,
9793f7311baSWei Dai 		       pid);
9803f7311baSWei Dai 		return -1;
9813f7311baSWei Dai 	}
9823f7311baSWei Dai 	return 0;
9833f7311baSWei Dai }
9843f7311baSWei Dai 
98536db4f6cSWei Dai /*
98636db4f6cSWei Dai  * Get the allowed maximum number of TX queues.
98736db4f6cSWei Dai  * *pid return the port id which has minimal value of
98836db4f6cSWei Dai  * max_tx_queues in all ports.
98936db4f6cSWei Dai  */
99036db4f6cSWei Dai queueid_t
99136db4f6cSWei Dai get_allowed_max_nb_txq(portid_t *pid)
99236db4f6cSWei Dai {
99336db4f6cSWei Dai 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
99436db4f6cSWei Dai 	portid_t pi;
99536db4f6cSWei Dai 	struct rte_eth_dev_info dev_info;
99636db4f6cSWei Dai 
99736db4f6cSWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
99836db4f6cSWei Dai 		rte_eth_dev_info_get(pi, &dev_info);
99936db4f6cSWei Dai 		if (dev_info.max_tx_queues < allowed_max_txq) {
100036db4f6cSWei Dai 			allowed_max_txq = dev_info.max_tx_queues;
100136db4f6cSWei Dai 			*pid = pi;
100236db4f6cSWei Dai 		}
100336db4f6cSWei Dai 	}
100436db4f6cSWei Dai 	return allowed_max_txq;
100536db4f6cSWei Dai }
100636db4f6cSWei Dai 
100736db4f6cSWei Dai /*
100836db4f6cSWei Dai  * Check input txq is valid or not.
100936db4f6cSWei Dai  * If input txq is not greater than any of maximum number
101036db4f6cSWei Dai  * of TX queues of all ports, it is valid.
101136db4f6cSWei Dai  * if valid, return 0, else return -1
101236db4f6cSWei Dai  */
101336db4f6cSWei Dai int
101436db4f6cSWei Dai check_nb_txq(queueid_t txq)
101536db4f6cSWei Dai {
101636db4f6cSWei Dai 	queueid_t allowed_max_txq;
101736db4f6cSWei Dai 	portid_t pid = 0;
101836db4f6cSWei Dai 
101936db4f6cSWei Dai 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
102036db4f6cSWei Dai 	if (txq > allowed_max_txq) {
102136db4f6cSWei Dai 		printf("Fail: input txq (%u) can't be greater "
102236db4f6cSWei Dai 		       "than max_tx_queues (%u) of port %u\n",
102336db4f6cSWei Dai 		       txq,
102436db4f6cSWei Dai 		       allowed_max_txq,
102536db4f6cSWei Dai 		       pid);
102636db4f6cSWei Dai 		return -1;
102736db4f6cSWei Dai 	}
102836db4f6cSWei Dai 	return 0;
102936db4f6cSWei Dai }
103036db4f6cSWei Dai 
1031af75078fSIntel static void
1032af75078fSIntel init_config(void)
1033af75078fSIntel {
1034ce8d5614SIntel 	portid_t pid;
1035af75078fSIntel 	struct rte_port *port;
1036af75078fSIntel 	struct rte_mempool *mbp;
1037af75078fSIntel 	unsigned int nb_mbuf_per_pool;
1038af75078fSIntel 	lcoreid_t  lc_id;
10397acf894dSStephen Hurd 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1040b7091f1dSJiayu Hu 	struct rte_gro_param gro_param;
104152f38a20SJiayu Hu 	uint32_t gso_types;
1042c73a9071SWei Dai 	int k;
1043af75078fSIntel 
10447acf894dSStephen Hurd 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1045487f9a59SYulong Pei 
1046af75078fSIntel 	/* Configuration of logical cores. */
1047af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1048af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
1049fdf20fa7SSergio Gonzalez Monroy 				RTE_CACHE_LINE_SIZE);
1050af75078fSIntel 	if (fwd_lcores == NULL) {
1051ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1052ce8d5614SIntel 							"failed\n", nb_lcores);
1053af75078fSIntel 	}
1054af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1055af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1056af75078fSIntel 					       sizeof(struct fwd_lcore),
1057fdf20fa7SSergio Gonzalez Monroy 					       RTE_CACHE_LINE_SIZE);
1058af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
1059ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1060ce8d5614SIntel 								"failed\n");
1061af75078fSIntel 		}
1062af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1063af75078fSIntel 	}
1064af75078fSIntel 
10657d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1066ce8d5614SIntel 		port = &ports[pid];
10678b9bd0efSMoti Haimovsky 		/* Apply default TxRx configuration for all ports */
1068fd8c20aaSShahaf Shuler 		port->dev_conf.txmode = tx_mode;
1069384161e0SShahaf Shuler 		port->dev_conf.rxmode = rx_mode;
1070ce8d5614SIntel 		rte_eth_dev_info_get(pid, &port->dev_info);
10717c45f6c0SFerruh Yigit 
107207e5f7bdSShahaf Shuler 		if (!(port->dev_info.tx_offload_capa &
107307e5f7bdSShahaf Shuler 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
107407e5f7bdSShahaf Shuler 			port->dev_conf.txmode.offloads &=
107507e5f7bdSShahaf Shuler 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1076c18feafaSDekel Peled 		if (!(port->dev_info.tx_offload_capa &
1077c18feafaSDekel Peled 			DEV_TX_OFFLOAD_MATCH_METADATA))
1078c18feafaSDekel Peled 			port->dev_conf.txmode.offloads &=
1079c18feafaSDekel Peled 				~DEV_TX_OFFLOAD_MATCH_METADATA;
1080b6ea6408SIntel 		if (numa_support) {
1081b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
1082b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
1083b6ea6408SIntel 			else {
1084b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
108520a0286fSLiu Xiaofeng 
108629841336SPhil Yang 				/*
108729841336SPhil Yang 				 * if socket_id is invalid,
108829841336SPhil Yang 				 * set to the first available socket.
108929841336SPhil Yang 				 */
109020a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
109129841336SPhil Yang 					socket_id = socket_ids[0];
1092b6ea6408SIntel 				port_per_socket[socket_id]++;
1093b6ea6408SIntel 			}
1094b6ea6408SIntel 		}
1095b6ea6408SIntel 
1096c73a9071SWei Dai 		/* Apply Rx offloads configuration */
1097c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1098c73a9071SWei Dai 			port->rx_conf[k].offloads =
1099c73a9071SWei Dai 				port->dev_conf.rxmode.offloads;
1100c73a9071SWei Dai 		/* Apply Tx offloads configuration */
1101c73a9071SWei Dai 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1102c73a9071SWei Dai 			port->tx_conf[k].offloads =
1103c73a9071SWei Dai 				port->dev_conf.txmode.offloads;
1104c73a9071SWei Dai 
1105ce8d5614SIntel 		/* set flag to initialize port/queue */
1106ce8d5614SIntel 		port->need_reconfig = 1;
1107ce8d5614SIntel 		port->need_reconfig_queues = 1;
1108c18feafaSDekel Peled 		port->tx_metadata = 0;
1109ce8d5614SIntel 	}
1110ce8d5614SIntel 
11113ab64341SOlivier Matz 	/*
11123ab64341SOlivier Matz 	 * Create pools of mbuf.
11133ab64341SOlivier Matz 	 * If NUMA support is disabled, create a single pool of mbuf in
11143ab64341SOlivier Matz 	 * socket 0 memory by default.
11153ab64341SOlivier Matz 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
11163ab64341SOlivier Matz 	 *
11173ab64341SOlivier Matz 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
11183ab64341SOlivier Matz 	 * nb_txd can be configured at run time.
11193ab64341SOlivier Matz 	 */
11203ab64341SOlivier Matz 	if (param_total_num_mbufs)
11213ab64341SOlivier Matz 		nb_mbuf_per_pool = param_total_num_mbufs;
11223ab64341SOlivier Matz 	else {
11233ab64341SOlivier Matz 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
11243ab64341SOlivier Matz 			(nb_lcores * mb_mempool_cache) +
11253ab64341SOlivier Matz 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
11263ab64341SOlivier Matz 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
11273ab64341SOlivier Matz 	}
11283ab64341SOlivier Matz 
1129b6ea6408SIntel 	if (numa_support) {
1130b6ea6408SIntel 		uint8_t i;
1131ce8d5614SIntel 
1132c9cafcc8SShahaf Shuler 		for (i = 0; i < num_sockets; i++)
1133c9cafcc8SShahaf Shuler 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1134c9cafcc8SShahaf Shuler 					 socket_ids[i]);
11353ab64341SOlivier Matz 	} else {
11363ab64341SOlivier Matz 		if (socket_num == UMA_NO_CONFIG)
11373ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
11383ab64341SOlivier Matz 		else
11393ab64341SOlivier Matz 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
11403ab64341SOlivier Matz 						 socket_num);
11413ab64341SOlivier Matz 	}
1142b6ea6408SIntel 
1143b6ea6408SIntel 	init_port_config();
11445886ae07SAdrien Mazarguil 
114552f38a20SJiayu Hu 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1146aaacd052SJiayu Hu 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
11475886ae07SAdrien Mazarguil 	/*
11485886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
11495886ae07SAdrien Mazarguil 	 */
11505886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
11518fd8bebcSAdrien Mazarguil 		mbp = mbuf_pool_find(
11528fd8bebcSAdrien Mazarguil 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
11538fd8bebcSAdrien Mazarguil 
11545886ae07SAdrien Mazarguil 		if (mbp == NULL)
11555886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
11565886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
115752f38a20SJiayu Hu 		/* initialize GSO context */
115852f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
115952f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
116052f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
116152f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
116252f38a20SJiayu Hu 			ETHER_CRC_LEN;
116352f38a20SJiayu Hu 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
11645886ae07SAdrien Mazarguil 	}
11655886ae07SAdrien Mazarguil 
1166ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
1167ce8d5614SIntel 	if (init_fwd_streams() < 0)
1168ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
11690c0db76fSBernard Iremonger 
11700c0db76fSBernard Iremonger 	fwd_config_setup();
1171b7091f1dSJiayu Hu 
1172b7091f1dSJiayu Hu 	/* create a gro context for each lcore */
1173b7091f1dSJiayu Hu 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1174b7091f1dSJiayu Hu 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1175b7091f1dSJiayu Hu 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1176b7091f1dSJiayu Hu 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1177b7091f1dSJiayu Hu 		gro_param.socket_id = rte_lcore_to_socket_id(
1178b7091f1dSJiayu Hu 				fwd_lcores_cpuids[lc_id]);
1179b7091f1dSJiayu Hu 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1180b7091f1dSJiayu Hu 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1181b7091f1dSJiayu Hu 			rte_exit(EXIT_FAILURE,
1182b7091f1dSJiayu Hu 					"rte_gro_ctx_create() failed\n");
1183b7091f1dSJiayu Hu 		}
1184b7091f1dSJiayu Hu 	}
11850ad778b3SJasvinder Singh 
11860ad778b3SJasvinder Singh #if defined RTE_LIBRTE_PMD_SOFTNIC
11870ad778b3SJasvinder Singh 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
11880ad778b3SJasvinder Singh 		RTE_ETH_FOREACH_DEV(pid) {
11890ad778b3SJasvinder Singh 			port = &ports[pid];
11900ad778b3SJasvinder Singh 			const char *driver = port->dev_info.driver_name;
11910ad778b3SJasvinder Singh 
11920ad778b3SJasvinder Singh 			if (strcmp(driver, "net_softnic") == 0)
11930ad778b3SJasvinder Singh 				port->softport.fwd_lcore_arg = fwd_lcores;
11940ad778b3SJasvinder Singh 		}
11950ad778b3SJasvinder Singh 	}
11960ad778b3SJasvinder Singh #endif
11970ad778b3SJasvinder Singh 
1198ce8d5614SIntel }
1199ce8d5614SIntel 
12002950a769SDeclan Doherty 
12012950a769SDeclan Doherty void
1202a21d5a4bSDeclan Doherty reconfig(portid_t new_port_id, unsigned socket_id)
12032950a769SDeclan Doherty {
12042950a769SDeclan Doherty 	struct rte_port *port;
12052950a769SDeclan Doherty 
12062950a769SDeclan Doherty 	/* Reconfiguration of Ethernet ports. */
12072950a769SDeclan Doherty 	port = &ports[new_port_id];
12082950a769SDeclan Doherty 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
12092950a769SDeclan Doherty 
12102950a769SDeclan Doherty 	/* set flag to initialize port/queue */
12112950a769SDeclan Doherty 	port->need_reconfig = 1;
12122950a769SDeclan Doherty 	port->need_reconfig_queues = 1;
1213a21d5a4bSDeclan Doherty 	port->socket_id = socket_id;
12142950a769SDeclan Doherty 
12152950a769SDeclan Doherty 	init_port_config();
12162950a769SDeclan Doherty }
12172950a769SDeclan Doherty 
12182950a769SDeclan Doherty 
1219ce8d5614SIntel int
1220ce8d5614SIntel init_fwd_streams(void)
1221ce8d5614SIntel {
1222ce8d5614SIntel 	portid_t pid;
1223ce8d5614SIntel 	struct rte_port *port;
1224ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
12255a8fb55cSReshma Pattan 	queueid_t q;
1226ce8d5614SIntel 
1227ce8d5614SIntel 	/* set socket id according to numa or not */
12287d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
1229ce8d5614SIntel 		port = &ports[pid];
1230ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
1231ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
1232ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
1233ce8d5614SIntel 				port->dev_info.max_rx_queues);
1234ce8d5614SIntel 			return -1;
1235ce8d5614SIntel 		}
1236ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
1237ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
1238ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
1239ce8d5614SIntel 				port->dev_info.max_tx_queues);
1240ce8d5614SIntel 			return -1;
1241ce8d5614SIntel 		}
124220a0286fSLiu Xiaofeng 		if (numa_support) {
124320a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
124420a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
124520a0286fSLiu Xiaofeng 			else {
1246b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
124720a0286fSLiu Xiaofeng 
124829841336SPhil Yang 				/*
124929841336SPhil Yang 				 * if socket_id is invalid,
125029841336SPhil Yang 				 * set to the first available socket.
125129841336SPhil Yang 				 */
125220a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
125329841336SPhil Yang 					port->socket_id = socket_ids[0];
125420a0286fSLiu Xiaofeng 			}
125520a0286fSLiu Xiaofeng 		}
1256b6ea6408SIntel 		else {
1257b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
1258af75078fSIntel 				port->socket_id = 0;
1259b6ea6408SIntel 			else
1260b6ea6408SIntel 				port->socket_id = socket_num;
1261b6ea6408SIntel 		}
1262af75078fSIntel 	}
1263af75078fSIntel 
12645a8fb55cSReshma Pattan 	q = RTE_MAX(nb_rxq, nb_txq);
12655a8fb55cSReshma Pattan 	if (q == 0) {
12665a8fb55cSReshma Pattan 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
12675a8fb55cSReshma Pattan 		return -1;
12685a8fb55cSReshma Pattan 	}
12695a8fb55cSReshma Pattan 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1270ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
1271ce8d5614SIntel 		return 0;
1272ce8d5614SIntel 	/* clear the old */
1273ce8d5614SIntel 	if (fwd_streams != NULL) {
1274ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1275ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
1276ce8d5614SIntel 				continue;
1277ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
1278ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
1279af75078fSIntel 		}
1280ce8d5614SIntel 		rte_free(fwd_streams);
1281ce8d5614SIntel 		fwd_streams = NULL;
1282ce8d5614SIntel 	}
1283ce8d5614SIntel 
1284ce8d5614SIntel 	/* init new */
1285ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
12861f84c469SMatan Azrad 	if (nb_fwd_streams) {
1287ce8d5614SIntel 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
12881f84c469SMatan Azrad 			sizeof(struct fwd_stream *) * nb_fwd_streams,
12891f84c469SMatan Azrad 			RTE_CACHE_LINE_SIZE);
1290ce8d5614SIntel 		if (fwd_streams == NULL)
12911f84c469SMatan Azrad 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
12921f84c469SMatan Azrad 				 " (struct fwd_stream *)) failed\n",
12931f84c469SMatan Azrad 				 nb_fwd_streams);
1294ce8d5614SIntel 
1295af75078fSIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
12961f84c469SMatan Azrad 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
12971f84c469SMatan Azrad 				" struct fwd_stream", sizeof(struct fwd_stream),
12981f84c469SMatan Azrad 				RTE_CACHE_LINE_SIZE);
1299ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
13001f84c469SMatan Azrad 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
13011f84c469SMatan Azrad 					 "(struct fwd_stream) failed\n");
13021f84c469SMatan Azrad 		}
1303af75078fSIntel 	}
1304ce8d5614SIntel 
1305ce8d5614SIntel 	return 0;
1306af75078fSIntel }
1307af75078fSIntel 
1308af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1309af75078fSIntel static void
1310af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1311af75078fSIntel {
1312af75078fSIntel 	unsigned int total_burst;
1313af75078fSIntel 	unsigned int nb_burst;
1314af75078fSIntel 	unsigned int burst_stats[3];
1315af75078fSIntel 	uint16_t pktnb_stats[3];
1316af75078fSIntel 	uint16_t nb_pkt;
1317af75078fSIntel 	int burst_percent[3];
1318af75078fSIntel 
1319af75078fSIntel 	/*
1320af75078fSIntel 	 * First compute the total number of packet bursts and the
1321af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
1322af75078fSIntel 	 */
1323af75078fSIntel 	total_burst = 0;
1324af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1325af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1326af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1327af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1328af75078fSIntel 		if (nb_burst == 0)
1329af75078fSIntel 			continue;
1330af75078fSIntel 		total_burst += nb_burst;
1331af75078fSIntel 		if (nb_burst > burst_stats[0]) {
1332af75078fSIntel 			burst_stats[1] = burst_stats[0];
1333af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
1334af75078fSIntel 			burst_stats[0] = nb_burst;
1335af75078fSIntel 			pktnb_stats[0] = nb_pkt;
1336fe613657SDaniel Shelepov 		} else if (nb_burst > burst_stats[1]) {
1337fe613657SDaniel Shelepov 			burst_stats[1] = nb_burst;
1338fe613657SDaniel Shelepov 			pktnb_stats[1] = nb_pkt;
1339af75078fSIntel 		}
1340af75078fSIntel 	}
1341af75078fSIntel 	if (total_burst == 0)
1342af75078fSIntel 		return;
1343af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1344af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1345af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
1346af75078fSIntel 	if (burst_stats[0] == total_burst) {
1347af75078fSIntel 		printf("]\n");
1348af75078fSIntel 		return;
1349af75078fSIntel 	}
1350af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1351af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
1352af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
1353af75078fSIntel 		return;
1354af75078fSIntel 	}
1355af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1356af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1357af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1358af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1359af75078fSIntel 		return;
1360af75078fSIntel 	}
1361af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1362af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1363af75078fSIntel }
1364af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1365af75078fSIntel 
1366af75078fSIntel static void
1367af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
1368af75078fSIntel {
1369af75078fSIntel 	struct fwd_stream *fs;
1370af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
1371af75078fSIntel 
1372af75078fSIntel 	fs = fwd_streams[stream_id];
1373af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1374af75078fSIntel 	    (fs->fwd_dropped == 0))
1375af75078fSIntel 		return;
1376af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1377af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
1378af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1379af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1380c185d42cSDavid Marchand 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1381c185d42cSDavid Marchand 	       " TX-dropped: %-14"PRIu64,
1382af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1383af75078fSIntel 
1384af75078fSIntel 	/* if checksum mode */
1385af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
1386c185d42cSDavid Marchand 		printf("  RX- bad IP checksum: %-14"PRIu64
1387c185d42cSDavid Marchand 		       "  Rx- bad L4 checksum: %-14"PRIu64
1388c185d42cSDavid Marchand 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
138958d475b7SJerin Jacob 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
139058d475b7SJerin Jacob 			fs->rx_bad_outer_l4_csum);
139194d65546SDavid Marchand 	} else {
139294d65546SDavid Marchand 		printf("\n");
1393af75078fSIntel 	}
1394af75078fSIntel 
1395af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1396af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1397af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1398af75078fSIntel #endif
1399af75078fSIntel }
1400af75078fSIntel 
140153324971SDavid Marchand void
140253324971SDavid Marchand fwd_stats_display(void)
140353324971SDavid Marchand {
140453324971SDavid Marchand 	static const char *fwd_stats_border = "----------------------";
140553324971SDavid Marchand 	static const char *acc_stats_border = "+++++++++++++++";
140653324971SDavid Marchand 	struct {
140753324971SDavid Marchand 		struct fwd_stream *rx_stream;
140853324971SDavid Marchand 		struct fwd_stream *tx_stream;
140953324971SDavid Marchand 		uint64_t tx_dropped;
141053324971SDavid Marchand 		uint64_t rx_bad_ip_csum;
141153324971SDavid Marchand 		uint64_t rx_bad_l4_csum;
141253324971SDavid Marchand 		uint64_t rx_bad_outer_l4_csum;
141353324971SDavid Marchand 	} ports_stats[RTE_MAX_ETHPORTS];
141453324971SDavid Marchand 	uint64_t total_rx_dropped = 0;
141553324971SDavid Marchand 	uint64_t total_tx_dropped = 0;
141653324971SDavid Marchand 	uint64_t total_rx_nombuf = 0;
141753324971SDavid Marchand 	struct rte_eth_stats stats;
141853324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
141953324971SDavid Marchand 	uint64_t fwd_cycles = 0;
142053324971SDavid Marchand #endif
142153324971SDavid Marchand 	uint64_t total_recv = 0;
142253324971SDavid Marchand 	uint64_t total_xmit = 0;
142353324971SDavid Marchand 	struct rte_port *port;
142453324971SDavid Marchand 	streamid_t sm_id;
142553324971SDavid Marchand 	portid_t pt_id;
142653324971SDavid Marchand 	int i;
142753324971SDavid Marchand 
142853324971SDavid Marchand 	memset(ports_stats, 0, sizeof(ports_stats));
142953324971SDavid Marchand 
143053324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
143153324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
143253324971SDavid Marchand 
143353324971SDavid Marchand 		if (cur_fwd_config.nb_fwd_streams >
143453324971SDavid Marchand 		    cur_fwd_config.nb_fwd_ports) {
143553324971SDavid Marchand 			fwd_stream_stats_display(sm_id);
143653324971SDavid Marchand 		} else {
143753324971SDavid Marchand 			ports_stats[fs->tx_port].tx_stream = fs;
143853324971SDavid Marchand 			ports_stats[fs->rx_port].rx_stream = fs;
143953324971SDavid Marchand 		}
144053324971SDavid Marchand 
144153324971SDavid Marchand 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
144253324971SDavid Marchand 
144353324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
144453324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
144553324971SDavid Marchand 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
144653324971SDavid Marchand 				fs->rx_bad_outer_l4_csum;
144753324971SDavid Marchand 
144853324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
144953324971SDavid Marchand 		fwd_cycles += fs->core_cycles;
145053324971SDavid Marchand #endif
145153324971SDavid Marchand 	}
145253324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
145353324971SDavid Marchand 		uint8_t j;
145453324971SDavid Marchand 
145553324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
145653324971SDavid Marchand 		port = &ports[pt_id];
145753324971SDavid Marchand 
145853324971SDavid Marchand 		rte_eth_stats_get(pt_id, &stats);
145953324971SDavid Marchand 		stats.ipackets -= port->stats.ipackets;
146053324971SDavid Marchand 		stats.opackets -= port->stats.opackets;
146153324971SDavid Marchand 		stats.ibytes -= port->stats.ibytes;
146253324971SDavid Marchand 		stats.obytes -= port->stats.obytes;
146353324971SDavid Marchand 		stats.imissed -= port->stats.imissed;
146453324971SDavid Marchand 		stats.oerrors -= port->stats.oerrors;
146553324971SDavid Marchand 		stats.rx_nombuf -= port->stats.rx_nombuf;
146653324971SDavid Marchand 
146753324971SDavid Marchand 		total_recv += stats.ipackets;
146853324971SDavid Marchand 		total_xmit += stats.opackets;
146953324971SDavid Marchand 		total_rx_dropped += stats.imissed;
147053324971SDavid Marchand 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
147153324971SDavid Marchand 		total_tx_dropped += stats.oerrors;
147253324971SDavid Marchand 		total_rx_nombuf  += stats.rx_nombuf;
147353324971SDavid Marchand 
147453324971SDavid Marchand 		printf("\n  %s Forward statistics for port %-2d %s\n",
147553324971SDavid Marchand 		       fwd_stats_border, pt_id, fwd_stats_border);
147653324971SDavid Marchand 
147753324971SDavid Marchand 		if (!port->rx_queue_stats_mapping_enabled &&
147853324971SDavid Marchand 		    !port->tx_queue_stats_mapping_enabled) {
147953324971SDavid Marchand 			printf("  RX-packets: %-14"PRIu64
148053324971SDavid Marchand 			       " RX-dropped: %-14"PRIu64
148153324971SDavid Marchand 			       "RX-total: %-"PRIu64"\n",
148253324971SDavid Marchand 			       stats.ipackets, stats.imissed,
148353324971SDavid Marchand 			       stats.ipackets + stats.imissed);
148453324971SDavid Marchand 
148553324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
148653324971SDavid Marchand 				printf("  Bad-ipcsum: %-14"PRIu64
148753324971SDavid Marchand 				       " Bad-l4csum: %-14"PRIu64
148853324971SDavid Marchand 				       "Bad-outer-l4csum: %-14"PRIu64"\n",
148953324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
149053324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
149153324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
149253324971SDavid Marchand 			if (stats.ierrors + stats.rx_nombuf > 0) {
149353324971SDavid Marchand 				printf("  RX-error: %-"PRIu64"\n",
149453324971SDavid Marchand 				       stats.ierrors);
149553324971SDavid Marchand 				printf("  RX-nombufs: %-14"PRIu64"\n",
149653324971SDavid Marchand 				       stats.rx_nombuf);
149753324971SDavid Marchand 			}
149853324971SDavid Marchand 
149953324971SDavid Marchand 			printf("  TX-packets: %-14"PRIu64
150053324971SDavid Marchand 			       " TX-dropped: %-14"PRIu64
150153324971SDavid Marchand 			       "TX-total: %-"PRIu64"\n",
150253324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
150353324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
150453324971SDavid Marchand 		} else {
150553324971SDavid Marchand 			printf("  RX-packets:             %14"PRIu64
150653324971SDavid Marchand 			       "    RX-dropped:%14"PRIu64
150753324971SDavid Marchand 			       "    RX-total:%14"PRIu64"\n",
150853324971SDavid Marchand 			       stats.ipackets, stats.imissed,
150953324971SDavid Marchand 			       stats.ipackets + stats.imissed);
151053324971SDavid Marchand 
151153324971SDavid Marchand 			if (cur_fwd_eng == &csum_fwd_engine)
151253324971SDavid Marchand 				printf("  Bad-ipcsum:%14"PRIu64
151353324971SDavid Marchand 				       "    Bad-l4csum:%14"PRIu64
151453324971SDavid Marchand 				       "    Bad-outer-l4csum: %-14"PRIu64"\n",
151553324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_ip_csum,
151653324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_l4_csum,
151753324971SDavid Marchand 				       ports_stats[pt_id].rx_bad_outer_l4_csum);
151853324971SDavid Marchand 			if ((stats.ierrors + stats.rx_nombuf) > 0) {
151953324971SDavid Marchand 				printf("  RX-error:%"PRIu64"\n", stats.ierrors);
152053324971SDavid Marchand 				printf("  RX-nombufs:             %14"PRIu64"\n",
152153324971SDavid Marchand 				       stats.rx_nombuf);
152253324971SDavid Marchand 			}
152353324971SDavid Marchand 
152453324971SDavid Marchand 			printf("  TX-packets:             %14"PRIu64
152553324971SDavid Marchand 			       "    TX-dropped:%14"PRIu64
152653324971SDavid Marchand 			       "    TX-total:%14"PRIu64"\n",
152753324971SDavid Marchand 			       stats.opackets, ports_stats[pt_id].tx_dropped,
152853324971SDavid Marchand 			       stats.opackets + ports_stats[pt_id].tx_dropped);
152953324971SDavid Marchand 		}
153053324971SDavid Marchand 
153153324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
153253324971SDavid Marchand 		if (ports_stats[pt_id].rx_stream)
153353324971SDavid Marchand 			pkt_burst_stats_display("RX",
153453324971SDavid Marchand 				&ports_stats[pt_id].rx_stream->rx_burst_stats);
153553324971SDavid Marchand 		if (ports_stats[pt_id].tx_stream)
153653324971SDavid Marchand 			pkt_burst_stats_display("TX",
153753324971SDavid Marchand 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
153853324971SDavid Marchand #endif
153953324971SDavid Marchand 
154053324971SDavid Marchand 		if (port->rx_queue_stats_mapping_enabled) {
154153324971SDavid Marchand 			printf("\n");
154253324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
154353324971SDavid Marchand 				printf("  Stats reg %2d RX-packets:%14"PRIu64
154453324971SDavid Marchand 				       "     RX-errors:%14"PRIu64
154553324971SDavid Marchand 				       "    RX-bytes:%14"PRIu64"\n",
154653324971SDavid Marchand 				       j, stats.q_ipackets[j],
154753324971SDavid Marchand 				       stats.q_errors[j], stats.q_ibytes[j]);
154853324971SDavid Marchand 			}
154953324971SDavid Marchand 			printf("\n");
155053324971SDavid Marchand 		}
155153324971SDavid Marchand 		if (port->tx_queue_stats_mapping_enabled) {
155253324971SDavid Marchand 			for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
155353324971SDavid Marchand 				printf("  Stats reg %2d TX-packets:%14"PRIu64
155453324971SDavid Marchand 				       "                                 TX-bytes:%14"
155553324971SDavid Marchand 				       PRIu64"\n",
155653324971SDavid Marchand 				       j, stats.q_opackets[j],
155753324971SDavid Marchand 				       stats.q_obytes[j]);
155853324971SDavid Marchand 			}
155953324971SDavid Marchand 		}
156053324971SDavid Marchand 
156153324971SDavid Marchand 		printf("  %s--------------------------------%s\n",
156253324971SDavid Marchand 		       fwd_stats_border, fwd_stats_border);
156353324971SDavid Marchand 	}
156453324971SDavid Marchand 
156553324971SDavid Marchand 	printf("\n  %s Accumulated forward statistics for all ports"
156653324971SDavid Marchand 	       "%s\n",
156753324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
156853324971SDavid Marchand 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
156953324971SDavid Marchand 	       "%-"PRIu64"\n"
157053324971SDavid Marchand 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
157153324971SDavid Marchand 	       "%-"PRIu64"\n",
157253324971SDavid Marchand 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
157353324971SDavid Marchand 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
157453324971SDavid Marchand 	if (total_rx_nombuf > 0)
157553324971SDavid Marchand 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
157653324971SDavid Marchand 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
157753324971SDavid Marchand 	       "%s\n",
157853324971SDavid Marchand 	       acc_stats_border, acc_stats_border);
157953324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
158053324971SDavid Marchand 	if (total_recv > 0)
158153324971SDavid Marchand 		printf("\n  CPU cycles/packet=%u (total cycles="
158253324971SDavid Marchand 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
158353324971SDavid Marchand 		       (unsigned int)(fwd_cycles / total_recv),
158453324971SDavid Marchand 		       fwd_cycles, total_recv);
158553324971SDavid Marchand #endif
158653324971SDavid Marchand }
158753324971SDavid Marchand 
158853324971SDavid Marchand void
158953324971SDavid Marchand fwd_stats_reset(void)
159053324971SDavid Marchand {
159153324971SDavid Marchand 	streamid_t sm_id;
159253324971SDavid Marchand 	portid_t pt_id;
159353324971SDavid Marchand 	int i;
159453324971SDavid Marchand 
159553324971SDavid Marchand 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
159653324971SDavid Marchand 		pt_id = fwd_ports_ids[i];
159753324971SDavid Marchand 		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
159853324971SDavid Marchand 	}
159953324971SDavid Marchand 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
160053324971SDavid Marchand 		struct fwd_stream *fs = fwd_streams[sm_id];
160153324971SDavid Marchand 
160253324971SDavid Marchand 		fs->rx_packets = 0;
160353324971SDavid Marchand 		fs->tx_packets = 0;
160453324971SDavid Marchand 		fs->fwd_dropped = 0;
160553324971SDavid Marchand 		fs->rx_bad_ip_csum = 0;
160653324971SDavid Marchand 		fs->rx_bad_l4_csum = 0;
160753324971SDavid Marchand 		fs->rx_bad_outer_l4_csum = 0;
160853324971SDavid Marchand 
160953324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
161053324971SDavid Marchand 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
161153324971SDavid Marchand 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
161253324971SDavid Marchand #endif
161353324971SDavid Marchand #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
161453324971SDavid Marchand 		fs->core_cycles = 0;
161553324971SDavid Marchand #endif
161653324971SDavid Marchand 	}
161753324971SDavid Marchand }
161853324971SDavid Marchand 
1619af75078fSIntel static void
16207741e4cfSIntel flush_fwd_rx_queues(void)
1621af75078fSIntel {
1622af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1623af75078fSIntel 	portid_t  rxp;
16247741e4cfSIntel 	portid_t port_id;
1625af75078fSIntel 	queueid_t rxq;
1626af75078fSIntel 	uint16_t  nb_rx;
1627af75078fSIntel 	uint16_t  i;
1628af75078fSIntel 	uint8_t   j;
1629f487715fSReshma Pattan 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1630594302c7SJames Poole 	uint64_t timer_period;
1631f487715fSReshma Pattan 
1632f487715fSReshma Pattan 	/* convert to number of cycles */
1633594302c7SJames Poole 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1634af75078fSIntel 
1635af75078fSIntel 	for (j = 0; j < 2; j++) {
16367741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1637af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
16387741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
1639f487715fSReshma Pattan 				/**
1640f487715fSReshma Pattan 				* testpmd can stuck in the below do while loop
1641f487715fSReshma Pattan 				* if rte_eth_rx_burst() always returns nonzero
1642f487715fSReshma Pattan 				* packets. So timer is added to exit this loop
1643f487715fSReshma Pattan 				* after 1sec timer expiry.
1644f487715fSReshma Pattan 				*/
1645f487715fSReshma Pattan 				prev_tsc = rte_rdtsc();
1646af75078fSIntel 				do {
16477741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1648013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
1649af75078fSIntel 					for (i = 0; i < nb_rx; i++)
1650af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
1651f487715fSReshma Pattan 
1652f487715fSReshma Pattan 					cur_tsc = rte_rdtsc();
1653f487715fSReshma Pattan 					diff_tsc = cur_tsc - prev_tsc;
1654f487715fSReshma Pattan 					timer_tsc += diff_tsc;
1655f487715fSReshma Pattan 				} while ((nb_rx > 0) &&
1656f487715fSReshma Pattan 					(timer_tsc < timer_period));
1657f487715fSReshma Pattan 				timer_tsc = 0;
1658af75078fSIntel 			}
1659af75078fSIntel 		}
1660af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1661af75078fSIntel 	}
1662af75078fSIntel }
1663af75078fSIntel 
1664af75078fSIntel static void
1665af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1666af75078fSIntel {
1667af75078fSIntel 	struct fwd_stream **fsm;
1668af75078fSIntel 	streamid_t nb_fs;
1669af75078fSIntel 	streamid_t sm_id;
16707e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
16717e4441c8SRemy Horton 	uint64_t tics_per_1sec;
16727e4441c8SRemy Horton 	uint64_t tics_datum;
16737e4441c8SRemy Horton 	uint64_t tics_current;
16744918a357SXiaoyun Li 	uint16_t i, cnt_ports;
1675af75078fSIntel 
16764918a357SXiaoyun Li 	cnt_ports = nb_ports;
16777e4441c8SRemy Horton 	tics_datum = rte_rdtsc();
16787e4441c8SRemy Horton 	tics_per_1sec = rte_get_timer_hz();
16797e4441c8SRemy Horton #endif
1680af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
1681af75078fSIntel 	nb_fs = fc->stream_nb;
1682af75078fSIntel 	do {
1683af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1684af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
16857e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
1686e25e6c70SRemy Horton 		if (bitrate_enabled != 0 &&
1687e25e6c70SRemy Horton 				bitrate_lcore_id == rte_lcore_id()) {
16887e4441c8SRemy Horton 			tics_current = rte_rdtsc();
16897e4441c8SRemy Horton 			if (tics_current - tics_datum >= tics_per_1sec) {
16907e4441c8SRemy Horton 				/* Periodic bitrate calculation */
16914918a357SXiaoyun Li 				for (i = 0; i < cnt_ports; i++)
1692e25e6c70SRemy Horton 					rte_stats_bitrate_calc(bitrate_data,
16934918a357SXiaoyun Li 						ports_ids[i]);
16947e4441c8SRemy Horton 				tics_datum = tics_current;
16957e4441c8SRemy Horton 			}
1696e25e6c70SRemy Horton 		}
16977e4441c8SRemy Horton #endif
169862d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
169965eb1e54SPablo de Lara 		if (latencystats_enabled != 0 &&
170065eb1e54SPablo de Lara 				latencystats_lcore_id == rte_lcore_id())
170162d3216dSReshma Pattan 			rte_latencystats_update();
170262d3216dSReshma Pattan #endif
170362d3216dSReshma Pattan 
1704af75078fSIntel 	} while (! fc->stopped);
1705af75078fSIntel }
1706af75078fSIntel 
1707af75078fSIntel static int
1708af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
1709af75078fSIntel {
1710af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1711af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
1712af75078fSIntel 	return 0;
1713af75078fSIntel }
1714af75078fSIntel 
1715af75078fSIntel /*
1716af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1717af75078fSIntel  * Used to start communication flows in network loopback test configurations.
1718af75078fSIntel  */
1719af75078fSIntel static int
1720af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
1721af75078fSIntel {
1722af75078fSIntel 	struct fwd_lcore *fwd_lc;
1723af75078fSIntel 	struct fwd_lcore tmp_lcore;
1724af75078fSIntel 
1725af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1726af75078fSIntel 	tmp_lcore = *fwd_lc;
1727af75078fSIntel 	tmp_lcore.stopped = 1;
1728af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1729af75078fSIntel 	return 0;
1730af75078fSIntel }
1731af75078fSIntel 
1732af75078fSIntel /*
1733af75078fSIntel  * Launch packet forwarding:
1734af75078fSIntel  *     - Setup per-port forwarding context.
1735af75078fSIntel  *     - launch logical cores with their forwarding configuration.
1736af75078fSIntel  */
1737af75078fSIntel static void
1738af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1739af75078fSIntel {
1740af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1741af75078fSIntel 	unsigned int i;
1742af75078fSIntel 	unsigned int lc_id;
1743af75078fSIntel 	int diag;
1744af75078fSIntel 
1745af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1746af75078fSIntel 	if (port_fwd_begin != NULL) {
1747af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1748af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
1749af75078fSIntel 	}
1750af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1751af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
1752af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1753af75078fSIntel 			fwd_lcores[i]->stopped = 0;
1754af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1755af75078fSIntel 						     fwd_lcores[i], lc_id);
1756af75078fSIntel 			if (diag != 0)
1757af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
1758af75078fSIntel 				       lc_id, diag);
1759af75078fSIntel 		}
1760af75078fSIntel 	}
1761af75078fSIntel }
1762af75078fSIntel 
1763af75078fSIntel /*
1764af75078fSIntel  * Launch packet forwarding configuration.
1765af75078fSIntel  */
1766af75078fSIntel void
1767af75078fSIntel start_packet_forwarding(int with_tx_first)
1768af75078fSIntel {
1769af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
1770af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1771af75078fSIntel 	struct rte_port *port;
1772af75078fSIntel 	unsigned int i;
1773af75078fSIntel 	portid_t   pt_id;
1774af75078fSIntel 
17755a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
17765a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
17775a8fb55cSReshma Pattan 
17785a8fb55cSReshma Pattan 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
17795a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
17805a8fb55cSReshma Pattan 
17815a8fb55cSReshma Pattan 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
17825a8fb55cSReshma Pattan 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
17835a8fb55cSReshma Pattan 		(!nb_rxq || !nb_txq))
17845a8fb55cSReshma Pattan 		rte_exit(EXIT_FAILURE,
17855a8fb55cSReshma Pattan 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
17865a8fb55cSReshma Pattan 			cur_fwd_eng->fwd_mode_name);
17875a8fb55cSReshma Pattan 
1788ce8d5614SIntel 	if (all_ports_started() == 0) {
1789ce8d5614SIntel 		printf("Not all ports were started\n");
1790ce8d5614SIntel 		return;
1791ce8d5614SIntel 	}
1792af75078fSIntel 	if (test_done == 0) {
1793af75078fSIntel 		printf("Packet forwarding already started\n");
1794af75078fSIntel 		return;
1795af75078fSIntel 	}
1796edf87b4aSBernard Iremonger 
1797edf87b4aSBernard Iremonger 
17987741e4cfSIntel 	if(dcb_test) {
17997741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
18007741e4cfSIntel 			pt_id = fwd_ports_ids[i];
18017741e4cfSIntel 			port = &ports[pt_id];
18027741e4cfSIntel 			if (!port->dcb_flag) {
18037741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
18047741e4cfSIntel                                        "be configured in this mode.\n");
1805013af9b6SIntel 				return;
1806013af9b6SIntel 			}
18077741e4cfSIntel 		}
18087741e4cfSIntel 		if (nb_fwd_lcores == 1) {
18097741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
18107741e4cfSIntel                                "should be larger than 1.\n");
18117741e4cfSIntel 			return;
18127741e4cfSIntel 		}
18137741e4cfSIntel 	}
1814af75078fSIntel 	test_done = 0;
18157741e4cfSIntel 
181647a767b2SMatan Azrad 	fwd_config_setup();
181747a767b2SMatan Azrad 
18187741e4cfSIntel 	if(!no_flush_rx)
18197741e4cfSIntel 		flush_fwd_rx_queues();
18207741e4cfSIntel 
1821933617d8SZhihong Wang 	pkt_fwd_config_display(&cur_fwd_config);
1822af75078fSIntel 	rxtx_config_display();
1823af75078fSIntel 
182453324971SDavid Marchand 	fwd_stats_reset();
1825af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1826af75078fSIntel 		pt_id = fwd_ports_ids[i];
1827af75078fSIntel 		port = &ports[pt_id];
1828013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
1829af75078fSIntel 	}
1830af75078fSIntel 	if (with_tx_first) {
1831af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1832af75078fSIntel 		if (port_fwd_begin != NULL) {
1833af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1834af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
1835af75078fSIntel 		}
1836acbf77a6SZhihong Wang 		while (with_tx_first--) {
1837acbf77a6SZhihong Wang 			launch_packet_forwarding(
1838acbf77a6SZhihong Wang 					run_one_txonly_burst_on_core);
1839af75078fSIntel 			rte_eal_mp_wait_lcore();
1840acbf77a6SZhihong Wang 		}
1841af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
1842af75078fSIntel 		if (port_fwd_end != NULL) {
1843af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1844af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
1845af75078fSIntel 		}
1846af75078fSIntel 	}
1847af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
1848af75078fSIntel }
1849af75078fSIntel 
1850af75078fSIntel void
1851af75078fSIntel stop_packet_forwarding(void)
1852af75078fSIntel {
1853af75078fSIntel 	port_fwd_end_t port_fwd_end;
1854af75078fSIntel 	lcoreid_t lc_id;
185553324971SDavid Marchand 	portid_t pt_id;
185653324971SDavid Marchand 	int i;
1857af75078fSIntel 
1858af75078fSIntel 	if (test_done) {
1859af75078fSIntel 		printf("Packet forwarding not started\n");
1860af75078fSIntel 		return;
1861af75078fSIntel 	}
1862af75078fSIntel 	printf("Telling cores to stop...");
1863af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1864af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1865af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1866af75078fSIntel 	rte_eal_mp_wait_lcore();
1867af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1868af75078fSIntel 	if (port_fwd_end != NULL) {
1869af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1870af75078fSIntel 			pt_id = fwd_ports_ids[i];
1871af75078fSIntel 			(*port_fwd_end)(pt_id);
1872af75078fSIntel 		}
1873af75078fSIntel 	}
1874c185d42cSDavid Marchand 
187553324971SDavid Marchand 	fwd_stats_display();
187658d475b7SJerin Jacob 
1877af75078fSIntel 	printf("\nDone.\n");
1878af75078fSIntel 	test_done = 1;
1879af75078fSIntel }
1880af75078fSIntel 
1881cfae07fdSOuyang Changchun void
1882cfae07fdSOuyang Changchun dev_set_link_up(portid_t pid)
1883cfae07fdSOuyang Changchun {
1884492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_up(pid) < 0)
1885cfae07fdSOuyang Changchun 		printf("\nSet link up fail.\n");
1886cfae07fdSOuyang Changchun }
1887cfae07fdSOuyang Changchun 
1888cfae07fdSOuyang Changchun void
1889cfae07fdSOuyang Changchun dev_set_link_down(portid_t pid)
1890cfae07fdSOuyang Changchun {
1891492ab604SZhiyong Yang 	if (rte_eth_dev_set_link_down(pid) < 0)
1892cfae07fdSOuyang Changchun 		printf("\nSet link down fail.\n");
1893cfae07fdSOuyang Changchun }
1894cfae07fdSOuyang Changchun 
1895ce8d5614SIntel static int
1896ce8d5614SIntel all_ports_started(void)
1897ce8d5614SIntel {
1898ce8d5614SIntel 	portid_t pi;
1899ce8d5614SIntel 	struct rte_port *port;
1900ce8d5614SIntel 
19017d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1902ce8d5614SIntel 		port = &ports[pi];
1903ce8d5614SIntel 		/* Check if there is a port which is not started */
190441b05095SBernard Iremonger 		if ((port->port_status != RTE_PORT_STARTED) &&
190541b05095SBernard Iremonger 			(port->slave_flag == 0))
1906ce8d5614SIntel 			return 0;
1907ce8d5614SIntel 	}
1908ce8d5614SIntel 
1909ce8d5614SIntel 	/* No port is not started */
1910ce8d5614SIntel 	return 1;
1911ce8d5614SIntel }
1912ce8d5614SIntel 
1913148f963fSBruce Richardson int
19146018eb8cSShahaf Shuler port_is_stopped(portid_t port_id)
19156018eb8cSShahaf Shuler {
19166018eb8cSShahaf Shuler 	struct rte_port *port = &ports[port_id];
19176018eb8cSShahaf Shuler 
19186018eb8cSShahaf Shuler 	if ((port->port_status != RTE_PORT_STOPPED) &&
19196018eb8cSShahaf Shuler 	    (port->slave_flag == 0))
19206018eb8cSShahaf Shuler 		return 0;
19216018eb8cSShahaf Shuler 	return 1;
19226018eb8cSShahaf Shuler }
19236018eb8cSShahaf Shuler 
19246018eb8cSShahaf Shuler int
1925edab33b1STetsuya Mukawa all_ports_stopped(void)
1926edab33b1STetsuya Mukawa {
1927edab33b1STetsuya Mukawa 	portid_t pi;
1928edab33b1STetsuya Mukawa 
19297d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
19306018eb8cSShahaf Shuler 		if (!port_is_stopped(pi))
1931edab33b1STetsuya Mukawa 			return 0;
1932edab33b1STetsuya Mukawa 	}
1933edab33b1STetsuya Mukawa 
1934edab33b1STetsuya Mukawa 	return 1;
1935edab33b1STetsuya Mukawa }
1936edab33b1STetsuya Mukawa 
1937edab33b1STetsuya Mukawa int
1938edab33b1STetsuya Mukawa port_is_started(portid_t port_id)
1939edab33b1STetsuya Mukawa {
1940edab33b1STetsuya Mukawa 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1941edab33b1STetsuya Mukawa 		return 0;
1942edab33b1STetsuya Mukawa 
1943edab33b1STetsuya Mukawa 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1944edab33b1STetsuya Mukawa 		return 0;
1945edab33b1STetsuya Mukawa 
1946edab33b1STetsuya Mukawa 	return 1;
1947edab33b1STetsuya Mukawa }
1948edab33b1STetsuya Mukawa 
1949edab33b1STetsuya Mukawa int
1950ce8d5614SIntel start_port(portid_t pid)
1951ce8d5614SIntel {
195292d2703eSMichael Qiu 	int diag, need_check_link_status = -1;
1953ce8d5614SIntel 	portid_t pi;
1954ce8d5614SIntel 	queueid_t qi;
1955ce8d5614SIntel 	struct rte_port *port;
19562950a769SDeclan Doherty 	struct ether_addr mac_addr;
1957ce8d5614SIntel 
19584468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
19594468635fSMichael Qiu 		return 0;
19604468635fSMichael Qiu 
1961ce8d5614SIntel 	if(dcb_config)
1962ce8d5614SIntel 		dcb_test = 1;
19637d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
1964edab33b1STetsuya Mukawa 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1965ce8d5614SIntel 			continue;
1966ce8d5614SIntel 
196792d2703eSMichael Qiu 		need_check_link_status = 0;
1968ce8d5614SIntel 		port = &ports[pi];
1969ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1970ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
1971ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1972ce8d5614SIntel 			continue;
1973ce8d5614SIntel 		}
1974ce8d5614SIntel 
1975ce8d5614SIntel 		if (port->need_reconfig > 0) {
1976ce8d5614SIntel 			port->need_reconfig = 0;
1977ce8d5614SIntel 
19787ee3e944SVasily Philipov 			if (flow_isolate_all) {
19797ee3e944SVasily Philipov 				int ret = port_flow_isolate(pi, 1);
19807ee3e944SVasily Philipov 				if (ret) {
19817ee3e944SVasily Philipov 					printf("Failed to apply isolated"
19827ee3e944SVasily Philipov 					       " mode on port %d\n", pi);
19837ee3e944SVasily Philipov 					return -1;
19847ee3e944SVasily Philipov 				}
19857ee3e944SVasily Philipov 			}
1986b5b38ed8SRaslan Darawsheh 			configure_rxtx_dump_callbacks(0);
19875706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
198820a0286fSLiu Xiaofeng 					port->socket_id);
1989ce8d5614SIntel 			/* configure port */
1990ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1991ce8d5614SIntel 						&(port->dev_conf));
1992ce8d5614SIntel 			if (diag != 0) {
1993ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1994ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1995ce8d5614SIntel 					printf("Port %d can not be set back "
1996ce8d5614SIntel 							"to stopped\n", pi);
1997ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
1998ce8d5614SIntel 				/* try to reconfigure port next time */
1999ce8d5614SIntel 				port->need_reconfig = 1;
2000148f963fSBruce Richardson 				return -1;
2001ce8d5614SIntel 			}
2002ce8d5614SIntel 		}
2003ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
2004ce8d5614SIntel 			port->need_reconfig_queues = 0;
2005ce8d5614SIntel 			/* setup tx queues */
2006ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
2007b6ea6408SIntel 				if ((numa_support) &&
2008b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
2009b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2010d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2011d44f8a48SQi Zhang 						txring_numa[pi],
2012d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2013b6ea6408SIntel 				else
2014b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
2015d44f8a48SQi Zhang 						port->nb_tx_desc[qi],
2016d44f8a48SQi Zhang 						port->socket_id,
2017d44f8a48SQi Zhang 						&(port->tx_conf[qi]));
2018b6ea6408SIntel 
2019ce8d5614SIntel 				if (diag == 0)
2020ce8d5614SIntel 					continue;
2021ce8d5614SIntel 
2022ce8d5614SIntel 				/* Fail to setup tx queue, return */
2023ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2024ce8d5614SIntel 							RTE_PORT_HANDLING,
2025ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2026ce8d5614SIntel 					printf("Port %d can not be set back "
2027ce8d5614SIntel 							"to stopped\n", pi);
2028d44f8a48SQi Zhang 				printf("Fail to configure port %d tx queues\n",
2029d44f8a48SQi Zhang 				       pi);
2030ce8d5614SIntel 				/* try to reconfigure queues next time */
2031ce8d5614SIntel 				port->need_reconfig_queues = 1;
2032148f963fSBruce Richardson 				return -1;
2033ce8d5614SIntel 			}
2034ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
2035d44f8a48SQi Zhang 				/* setup rx queues */
2036b6ea6408SIntel 				if ((numa_support) &&
2037b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2038b6ea6408SIntel 					struct rte_mempool * mp =
2039b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
2040b6ea6408SIntel 					if (mp == NULL) {
2041b6ea6408SIntel 						printf("Failed to setup RX queue:"
2042b6ea6408SIntel 							"No mempool allocation"
2043b6ea6408SIntel 							" on the socket %d\n",
2044b6ea6408SIntel 							rxring_numa[pi]);
2045148f963fSBruce Richardson 						return -1;
2046b6ea6408SIntel 					}
2047b6ea6408SIntel 
2048b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2049d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2050d44f8a48SQi Zhang 					     rxring_numa[pi],
2051d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2052d44f8a48SQi Zhang 					     mp);
20531e1d6bddSBernard Iremonger 				} else {
20541e1d6bddSBernard Iremonger 					struct rte_mempool *mp =
20551e1d6bddSBernard Iremonger 						mbuf_pool_find(port->socket_id);
20561e1d6bddSBernard Iremonger 					if (mp == NULL) {
20571e1d6bddSBernard Iremonger 						printf("Failed to setup RX queue:"
20581e1d6bddSBernard Iremonger 							"No mempool allocation"
20591e1d6bddSBernard Iremonger 							" on the socket %d\n",
20601e1d6bddSBernard Iremonger 							port->socket_id);
20611e1d6bddSBernard Iremonger 						return -1;
2062b6ea6408SIntel 					}
2063b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
2064d4930794SFerruh Yigit 					     port->nb_rx_desc[qi],
2065d44f8a48SQi Zhang 					     port->socket_id,
2066d44f8a48SQi Zhang 					     &(port->rx_conf[qi]),
2067d44f8a48SQi Zhang 					     mp);
20681e1d6bddSBernard Iremonger 				}
2069ce8d5614SIntel 				if (diag == 0)
2070ce8d5614SIntel 					continue;
2071ce8d5614SIntel 
2072ce8d5614SIntel 				/* Fail to setup rx queue, return */
2073ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
2074ce8d5614SIntel 							RTE_PORT_HANDLING,
2075ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
2076ce8d5614SIntel 					printf("Port %d can not be set back "
2077ce8d5614SIntel 							"to stopped\n", pi);
2078d44f8a48SQi Zhang 				printf("Fail to configure port %d rx queues\n",
2079d44f8a48SQi Zhang 				       pi);
2080ce8d5614SIntel 				/* try to reconfigure queues next time */
2081ce8d5614SIntel 				port->need_reconfig_queues = 1;
2082148f963fSBruce Richardson 				return -1;
2083ce8d5614SIntel 			}
2084ce8d5614SIntel 		}
2085b5b38ed8SRaslan Darawsheh 		configure_rxtx_dump_callbacks(verbose_level);
2086ce8d5614SIntel 		/* start port */
2087ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
2088ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
2089ce8d5614SIntel 
2090ce8d5614SIntel 			/* Fail to setup rx queue, return */
2091ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
2092ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2093ce8d5614SIntel 				printf("Port %d can not be set back to "
2094ce8d5614SIntel 							"stopped\n", pi);
2095ce8d5614SIntel 			continue;
2096ce8d5614SIntel 		}
2097ce8d5614SIntel 
2098ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2099ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2100ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
2101ce8d5614SIntel 
21022950a769SDeclan Doherty 		rte_eth_macaddr_get(pi, &mac_addr);
2103d8c89163SZijie Pan 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
21042950a769SDeclan Doherty 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
21052950a769SDeclan Doherty 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
21062950a769SDeclan Doherty 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2107d8c89163SZijie Pan 
2108ce8d5614SIntel 		/* at least one port started, need checking link status */
2109ce8d5614SIntel 		need_check_link_status = 1;
2110ce8d5614SIntel 	}
2111ce8d5614SIntel 
211292d2703eSMichael Qiu 	if (need_check_link_status == 1 && !no_link_check)
2113edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
211492d2703eSMichael Qiu 	else if (need_check_link_status == 0)
2115ce8d5614SIntel 		printf("Please stop the ports first\n");
2116ce8d5614SIntel 
2117ce8d5614SIntel 	printf("Done\n");
2118148f963fSBruce Richardson 	return 0;
2119ce8d5614SIntel }
2120ce8d5614SIntel 
2121ce8d5614SIntel void
2122ce8d5614SIntel stop_port(portid_t pid)
2123ce8d5614SIntel {
2124ce8d5614SIntel 	portid_t pi;
2125ce8d5614SIntel 	struct rte_port *port;
2126ce8d5614SIntel 	int need_check_link_status = 0;
2127ce8d5614SIntel 
2128ce8d5614SIntel 	if (dcb_test) {
2129ce8d5614SIntel 		dcb_test = 0;
2130ce8d5614SIntel 		dcb_config = 0;
2131ce8d5614SIntel 	}
21324468635fSMichael Qiu 
21334468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
21344468635fSMichael Qiu 		return;
21354468635fSMichael Qiu 
2136ce8d5614SIntel 	printf("Stopping ports...\n");
2137ce8d5614SIntel 
21387d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
21394468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2140ce8d5614SIntel 			continue;
2141ce8d5614SIntel 
2142a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2143a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2144a8ef3e3aSBernard Iremonger 			continue;
2145a8ef3e3aSBernard Iremonger 		}
2146a8ef3e3aSBernard Iremonger 
21470e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
21480e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
21490e545d30SBernard Iremonger 			continue;
21500e545d30SBernard Iremonger 		}
21510e545d30SBernard Iremonger 
2152ce8d5614SIntel 		port = &ports[pi];
2153ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2154ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
2155ce8d5614SIntel 			continue;
2156ce8d5614SIntel 
2157ce8d5614SIntel 		rte_eth_dev_stop(pi);
2158ce8d5614SIntel 
2159ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2160ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2161ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
2162ce8d5614SIntel 		need_check_link_status = 1;
2163ce8d5614SIntel 	}
2164bc202406SDavid Marchand 	if (need_check_link_status && !no_link_check)
2165edab33b1STetsuya Mukawa 		check_all_ports_link_status(RTE_PORT_ALL);
2166ce8d5614SIntel 
2167ce8d5614SIntel 	printf("Done\n");
2168ce8d5614SIntel }
2169ce8d5614SIntel 
2170ce6959bfSWisam Jaddo static void
21714f1de450SThomas Monjalon remove_invalid_ports_in(portid_t *array, portid_t *total)
2172ce6959bfSWisam Jaddo {
21734f1de450SThomas Monjalon 	portid_t i;
21744f1de450SThomas Monjalon 	portid_t new_total = 0;
2175ce6959bfSWisam Jaddo 
21764f1de450SThomas Monjalon 	for (i = 0; i < *total; i++)
21774f1de450SThomas Monjalon 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
21784f1de450SThomas Monjalon 			array[new_total] = array[i];
21794f1de450SThomas Monjalon 			new_total++;
2180ce6959bfSWisam Jaddo 		}
21814f1de450SThomas Monjalon 	*total = new_total;
21824f1de450SThomas Monjalon }
21834f1de450SThomas Monjalon 
21844f1de450SThomas Monjalon static void
21854f1de450SThomas Monjalon remove_invalid_ports(void)
21864f1de450SThomas Monjalon {
21874f1de450SThomas Monjalon 	remove_invalid_ports_in(ports_ids, &nb_ports);
21884f1de450SThomas Monjalon 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
21894f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
2190ce6959bfSWisam Jaddo }
2191ce6959bfSWisam Jaddo 
2192ce8d5614SIntel void
2193ce8d5614SIntel close_port(portid_t pid)
2194ce8d5614SIntel {
2195ce8d5614SIntel 	portid_t pi;
2196ce8d5614SIntel 	struct rte_port *port;
2197ce8d5614SIntel 
21984468635fSMichael Qiu 	if (port_id_is_invalid(pid, ENABLED_WARN))
21994468635fSMichael Qiu 		return;
22004468635fSMichael Qiu 
2201ce8d5614SIntel 	printf("Closing ports...\n");
2202ce8d5614SIntel 
22037d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pi) {
22044468635fSMichael Qiu 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2205ce8d5614SIntel 			continue;
2206ce8d5614SIntel 
2207a8ef3e3aSBernard Iremonger 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2208a8ef3e3aSBernard Iremonger 			printf("Please remove port %d from forwarding configuration.\n", pi);
2209a8ef3e3aSBernard Iremonger 			continue;
2210a8ef3e3aSBernard Iremonger 		}
2211a8ef3e3aSBernard Iremonger 
22120e545d30SBernard Iremonger 		if (port_is_bonding_slave(pi)) {
22130e545d30SBernard Iremonger 			printf("Please remove port %d from bonded device.\n", pi);
22140e545d30SBernard Iremonger 			continue;
22150e545d30SBernard Iremonger 		}
22160e545d30SBernard Iremonger 
2217ce8d5614SIntel 		port = &ports[pi];
2218ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2219d4e8ad64SMichael Qiu 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2220d4e8ad64SMichael Qiu 			printf("Port %d is already closed\n", pi);
2221d4e8ad64SMichael Qiu 			continue;
2222d4e8ad64SMichael Qiu 		}
2223d4e8ad64SMichael Qiu 
2224d4e8ad64SMichael Qiu 		if (rte_atomic16_cmpset(&(port->port_status),
2225ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2226ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
2227ce8d5614SIntel 			continue;
2228ce8d5614SIntel 		}
2229ce8d5614SIntel 
2230938a184aSAdrien Mazarguil 		if (port->flow_list)
2231938a184aSAdrien Mazarguil 			port_flow_flush(pi);
2232ce8d5614SIntel 		rte_eth_dev_close(pi);
2233ce8d5614SIntel 
22344f1de450SThomas Monjalon 		remove_invalid_ports();
223523ea57a2SThomas Monjalon 
2236ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
2237ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2238b38bb262SPablo de Lara 			printf("Port %d cannot be set to closed\n", pi);
2239ce8d5614SIntel 	}
2240ce8d5614SIntel 
2241ce8d5614SIntel 	printf("Done\n");
2242ce8d5614SIntel }
2243ce8d5614SIntel 
2244edab33b1STetsuya Mukawa void
224597f1e196SWei Dai reset_port(portid_t pid)
224697f1e196SWei Dai {
224797f1e196SWei Dai 	int diag;
224897f1e196SWei Dai 	portid_t pi;
224997f1e196SWei Dai 	struct rte_port *port;
225097f1e196SWei Dai 
225197f1e196SWei Dai 	if (port_id_is_invalid(pid, ENABLED_WARN))
225297f1e196SWei Dai 		return;
225397f1e196SWei Dai 
225497f1e196SWei Dai 	printf("Resetting ports...\n");
225597f1e196SWei Dai 
225697f1e196SWei Dai 	RTE_ETH_FOREACH_DEV(pi) {
225797f1e196SWei Dai 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
225897f1e196SWei Dai 			continue;
225997f1e196SWei Dai 
226097f1e196SWei Dai 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
226197f1e196SWei Dai 			printf("Please remove port %d from forwarding "
226297f1e196SWei Dai 			       "configuration.\n", pi);
226397f1e196SWei Dai 			continue;
226497f1e196SWei Dai 		}
226597f1e196SWei Dai 
226697f1e196SWei Dai 		if (port_is_bonding_slave(pi)) {
226797f1e196SWei Dai 			printf("Please remove port %d from bonded device.\n",
226897f1e196SWei Dai 			       pi);
226997f1e196SWei Dai 			continue;
227097f1e196SWei Dai 		}
227197f1e196SWei Dai 
227297f1e196SWei Dai 		diag = rte_eth_dev_reset(pi);
227397f1e196SWei Dai 		if (diag == 0) {
227497f1e196SWei Dai 			port = &ports[pi];
227597f1e196SWei Dai 			port->need_reconfig = 1;
227697f1e196SWei Dai 			port->need_reconfig_queues = 1;
227797f1e196SWei Dai 		} else {
227897f1e196SWei Dai 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
227997f1e196SWei Dai 		}
228097f1e196SWei Dai 	}
228197f1e196SWei Dai 
228297f1e196SWei Dai 	printf("Done\n");
228397f1e196SWei Dai }
228497f1e196SWei Dai 
228597f1e196SWei Dai void
2286edab33b1STetsuya Mukawa attach_port(char *identifier)
2287ce8d5614SIntel {
22884f1ed78eSThomas Monjalon 	portid_t pi;
2289c9cce428SThomas Monjalon 	struct rte_dev_iterator iterator;
2290ce8d5614SIntel 
2291edab33b1STetsuya Mukawa 	printf("Attaching a new port...\n");
2292edab33b1STetsuya Mukawa 
2293edab33b1STetsuya Mukawa 	if (identifier == NULL) {
2294edab33b1STetsuya Mukawa 		printf("Invalid parameters are specified\n");
2295edab33b1STetsuya Mukawa 		return;
2296ce8d5614SIntel 	}
2297ce8d5614SIntel 
2298c9cce428SThomas Monjalon 	if (rte_dev_probe(identifier) != 0) {
2299c9cce428SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2300edab33b1STetsuya Mukawa 		return;
2301c9cce428SThomas Monjalon 	}
2302c9cce428SThomas Monjalon 
23034f1ed78eSThomas Monjalon 	/* first attach mode: event */
23044f1ed78eSThomas Monjalon 	if (setup_on_probe_event) {
23054f1ed78eSThomas Monjalon 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
23064f1ed78eSThomas Monjalon 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
23074f1ed78eSThomas Monjalon 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
23084f1ed78eSThomas Monjalon 					ports[pi].need_setup != 0)
23094f1ed78eSThomas Monjalon 				setup_attached_port(pi);
23104f1ed78eSThomas Monjalon 		return;
23114f1ed78eSThomas Monjalon 	}
23124f1ed78eSThomas Monjalon 
23134f1ed78eSThomas Monjalon 	/* second attach mode: iterator */
231486fa5de1SThomas Monjalon 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
23154f1ed78eSThomas Monjalon 		/* setup ports matching the devargs used for probing */
231686fa5de1SThomas Monjalon 		if (port_is_forwarding(pi))
231786fa5de1SThomas Monjalon 			continue; /* port was already attached before */
2318c9cce428SThomas Monjalon 		setup_attached_port(pi);
2319c9cce428SThomas Monjalon 	}
232086fa5de1SThomas Monjalon }
2321c9cce428SThomas Monjalon 
2322c9cce428SThomas Monjalon static void
2323c9cce428SThomas Monjalon setup_attached_port(portid_t pi)
2324c9cce428SThomas Monjalon {
2325c9cce428SThomas Monjalon 	unsigned int socket_id;
2326edab33b1STetsuya Mukawa 
2327931126baSBernard Iremonger 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
232829841336SPhil Yang 	/* if socket_id is invalid, set to the first available socket. */
2329931126baSBernard Iremonger 	if (check_socket_id(socket_id) < 0)
233029841336SPhil Yang 		socket_id = socket_ids[0];
2331931126baSBernard Iremonger 	reconfig(pi, socket_id);
2332edab33b1STetsuya Mukawa 	rte_eth_promiscuous_enable(pi);
2333edab33b1STetsuya Mukawa 
23344f1de450SThomas Monjalon 	ports_ids[nb_ports++] = pi;
23354f1de450SThomas Monjalon 	fwd_ports_ids[nb_fwd_ports++] = pi;
23364f1de450SThomas Monjalon 	nb_cfg_ports = nb_fwd_ports;
23374f1ed78eSThomas Monjalon 	ports[pi].need_setup = 0;
2338edab33b1STetsuya Mukawa 	ports[pi].port_status = RTE_PORT_STOPPED;
2339edab33b1STetsuya Mukawa 
2340edab33b1STetsuya Mukawa 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2341edab33b1STetsuya Mukawa 	printf("Done\n");
2342edab33b1STetsuya Mukawa }
2343edab33b1STetsuya Mukawa 
2344edab33b1STetsuya Mukawa void
2345f8e5baa2SThomas Monjalon detach_port_device(portid_t port_id)
23465f4ec54fSChen Jing D(Mark) {
2347f8e5baa2SThomas Monjalon 	struct rte_device *dev;
2348f8e5baa2SThomas Monjalon 	portid_t sibling;
2349f8e5baa2SThomas Monjalon 
2350c9cce428SThomas Monjalon 	printf("Removing a device...\n");
23515f4ec54fSChen Jing D(Mark) 
2352f8e5baa2SThomas Monjalon 	dev = rte_eth_devices[port_id].device;
2353f8e5baa2SThomas Monjalon 	if (dev == NULL) {
2354f8e5baa2SThomas Monjalon 		printf("Device already removed\n");
2355f8e5baa2SThomas Monjalon 		return;
2356f8e5baa2SThomas Monjalon 	}
2357f8e5baa2SThomas Monjalon 
235823ea57a2SThomas Monjalon 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
23593f4a8370SThomas Monjalon 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
23603f4a8370SThomas Monjalon 			printf("Port not stopped\n");
2361edab33b1STetsuya Mukawa 			return;
2362edab33b1STetsuya Mukawa 		}
23633f4a8370SThomas Monjalon 		printf("Port was not closed\n");
2364938a184aSAdrien Mazarguil 		if (ports[port_id].flow_list)
2365938a184aSAdrien Mazarguil 			port_flow_flush(port_id);
23663f4a8370SThomas Monjalon 	}
2367938a184aSAdrien Mazarguil 
2368f8e5baa2SThomas Monjalon 	if (rte_dev_remove(dev) != 0) {
2369f8e5baa2SThomas Monjalon 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2370edab33b1STetsuya Mukawa 		return;
23713070419eSGaetan Rivet 	}
2372edab33b1STetsuya Mukawa 
2373f8e5baa2SThomas Monjalon 	for (sibling = 0; sibling < RTE_MAX_ETHPORTS; sibling++) {
2374f8e5baa2SThomas Monjalon 		if (rte_eth_devices[sibling].device != dev)
2375f8e5baa2SThomas Monjalon 			continue;
2376f8e5baa2SThomas Monjalon 		/* reset mapping between old ports and removed device */
2377f8e5baa2SThomas Monjalon 		rte_eth_devices[sibling].device = NULL;
2378f8e5baa2SThomas Monjalon 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2379f8e5baa2SThomas Monjalon 			/* sibling ports are forced to be closed */
2380f8e5baa2SThomas Monjalon 			ports[sibling].port_status = RTE_PORT_CLOSED;
2381f8e5baa2SThomas Monjalon 			printf("Port %u is closed\n", sibling);
2382f8e5baa2SThomas Monjalon 		}
2383f8e5baa2SThomas Monjalon 	}
2384f8e5baa2SThomas Monjalon 
23854f1de450SThomas Monjalon 	remove_invalid_ports();
238603ce2c53SMatan Azrad 
2387f8e5baa2SThomas Monjalon 	printf("Device of port %u is detached\n", port_id);
2388f8e5baa2SThomas Monjalon 	printf("Now total ports is %d\n", nb_ports);
2389edab33b1STetsuya Mukawa 	printf("Done\n");
2390edab33b1STetsuya Mukawa 	return;
23915f4ec54fSChen Jing D(Mark) }
23925f4ec54fSChen Jing D(Mark) 
2393af75078fSIntel void
2394af75078fSIntel pmd_test_exit(void)
2395af75078fSIntel {
2396124909d7SZhiyong Yang 	struct rte_device *device;
2397af75078fSIntel 	portid_t pt_id;
2398fb73e096SJeff Guo 	int ret;
2399af75078fSIntel 
24008210ec25SPablo de Lara 	if (test_done == 0)
24018210ec25SPablo de Lara 		stop_packet_forwarding();
24028210ec25SPablo de Lara 
2403d3a274ceSZhihong Wang 	if (ports != NULL) {
2404d3a274ceSZhihong Wang 		no_link_check = 1;
24057d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(pt_id) {
240608fd782bSCristian Dumitrescu 			printf("\nStopping port %d...\n", pt_id);
2407af75078fSIntel 			fflush(stdout);
2408d3a274ceSZhihong Wang 			stop_port(pt_id);
240908fd782bSCristian Dumitrescu 		}
241008fd782bSCristian Dumitrescu 		RTE_ETH_FOREACH_DEV(pt_id) {
241108fd782bSCristian Dumitrescu 			printf("\nShutting down port %d...\n", pt_id);
241208fd782bSCristian Dumitrescu 			fflush(stdout);
2413d3a274ceSZhihong Wang 			close_port(pt_id);
2414124909d7SZhiyong Yang 
2415124909d7SZhiyong Yang 			/*
2416124909d7SZhiyong Yang 			 * This is a workaround to fix a virtio-user issue that
2417124909d7SZhiyong Yang 			 * requires to call clean-up routine to remove existing
2418124909d7SZhiyong Yang 			 * socket.
2419124909d7SZhiyong Yang 			 * This workaround valid only for testpmd, needs a fix
2420124909d7SZhiyong Yang 			 * valid for all applications.
2421124909d7SZhiyong Yang 			 * TODO: Implement proper resource cleanup
2422124909d7SZhiyong Yang 			 */
2423124909d7SZhiyong Yang 			device = rte_eth_devices[pt_id].device;
2424124909d7SZhiyong Yang 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2425f8e5baa2SThomas Monjalon 				detach_port_device(pt_id);
2426af75078fSIntel 		}
2427d3a274ceSZhihong Wang 	}
2428fb73e096SJeff Guo 
2429fb73e096SJeff Guo 	if (hot_plug) {
2430fb73e096SJeff Guo 		ret = rte_dev_event_monitor_stop();
24312049c511SJeff Guo 		if (ret) {
2432fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
2433fb73e096SJeff Guo 				"fail to stop device event monitor.");
24342049c511SJeff Guo 			return;
24352049c511SJeff Guo 		}
2436fb73e096SJeff Guo 
24372049c511SJeff Guo 		ret = rte_dev_event_callback_unregister(NULL,
2438cc1bf307SJeff Guo 			dev_event_callback, NULL);
24392049c511SJeff Guo 		if (ret < 0) {
2440fb73e096SJeff Guo 			RTE_LOG(ERR, EAL,
24412049c511SJeff Guo 				"fail to unregister device event callback.\n");
24422049c511SJeff Guo 			return;
24432049c511SJeff Guo 		}
24442049c511SJeff Guo 
24452049c511SJeff Guo 		ret = rte_dev_hotplug_handle_disable();
24462049c511SJeff Guo 		if (ret) {
24472049c511SJeff Guo 			RTE_LOG(ERR, EAL,
24482049c511SJeff Guo 				"fail to disable hotplug handling.\n");
24492049c511SJeff Guo 			return;
24502049c511SJeff Guo 		}
2451fb73e096SJeff Guo 	}
2452fb73e096SJeff Guo 
2453d3a274ceSZhihong Wang 	printf("\nBye...\n");
2454af75078fSIntel }
2455af75078fSIntel 
2456af75078fSIntel typedef void (*cmd_func_t)(void);
2457af75078fSIntel struct pmd_test_command {
2458af75078fSIntel 	const char *cmd_name;
2459af75078fSIntel 	cmd_func_t cmd_func;
2460af75078fSIntel };
2461af75078fSIntel 
2462af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2463af75078fSIntel 
2464ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
2465af75078fSIntel static void
2466edab33b1STetsuya Mukawa check_all_ports_link_status(uint32_t port_mask)
2467af75078fSIntel {
2468ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
2469ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2470f8244c63SZhiyong Yang 	portid_t portid;
2471f8244c63SZhiyong Yang 	uint8_t count, all_ports_up, print_flag = 0;
2472ce8d5614SIntel 	struct rte_eth_link link;
2473ce8d5614SIntel 
2474ce8d5614SIntel 	printf("Checking link statuses...\n");
2475ce8d5614SIntel 	fflush(stdout);
2476ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2477ce8d5614SIntel 		all_ports_up = 1;
24787d89b261SGaetan Rivet 		RTE_ETH_FOREACH_DEV(portid) {
2479ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
2480ce8d5614SIntel 				continue;
2481ce8d5614SIntel 			memset(&link, 0, sizeof(link));
2482ce8d5614SIntel 			rte_eth_link_get_nowait(portid, &link);
2483ce8d5614SIntel 			/* print link status if flag set */
2484ce8d5614SIntel 			if (print_flag == 1) {
2485ce8d5614SIntel 				if (link.link_status)
2486f8244c63SZhiyong Yang 					printf(
2487f8244c63SZhiyong Yang 					"Port%d Link Up. speed %u Mbps- %s\n",
2488f8244c63SZhiyong Yang 					portid, link.link_speed,
2489ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2490ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
2491ce8d5614SIntel 				else
2492f8244c63SZhiyong Yang 					printf("Port %d Link Down\n", portid);
2493ce8d5614SIntel 				continue;
2494ce8d5614SIntel 			}
2495ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
249609419f23SThomas Monjalon 			if (link.link_status == ETH_LINK_DOWN) {
2497ce8d5614SIntel 				all_ports_up = 0;
2498ce8d5614SIntel 				break;
2499ce8d5614SIntel 			}
2500ce8d5614SIntel 		}
2501ce8d5614SIntel 		/* after finally printing all link status, get out */
2502ce8d5614SIntel 		if (print_flag == 1)
2503ce8d5614SIntel 			break;
2504ce8d5614SIntel 
2505ce8d5614SIntel 		if (all_ports_up == 0) {
2506ce8d5614SIntel 			fflush(stdout);
2507ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
2508ce8d5614SIntel 		}
2509ce8d5614SIntel 
2510ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
2511ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2512ce8d5614SIntel 			print_flag = 1;
2513ce8d5614SIntel 		}
25148ea656f8SGaetan Rivet 
25158ea656f8SGaetan Rivet 		if (lsc_interrupt)
25168ea656f8SGaetan Rivet 			break;
2517ce8d5614SIntel 	}
2518af75078fSIntel }
2519af75078fSIntel 
2520cc1bf307SJeff Guo /*
2521cc1bf307SJeff Guo  * This callback is for remove a port for a device. It has limitation because
2522cc1bf307SJeff Guo  * it is not for multiple port removal for a device.
2523cc1bf307SJeff Guo  * TODO: the device detach invoke will plan to be removed from user side to
2524cc1bf307SJeff Guo  * eal. And convert all PMDs to free port resources on ether device closing.
2525cc1bf307SJeff Guo  */
2526284c908cSGaetan Rivet static void
2527cc1bf307SJeff Guo rmv_port_callback(void *arg)
2528284c908cSGaetan Rivet {
25293b97888aSMatan Azrad 	int need_to_start = 0;
25300da2a62bSMatan Azrad 	int org_no_link_check = no_link_check;
253128caa76aSZhiyong Yang 	portid_t port_id = (intptr_t)arg;
2532284c908cSGaetan Rivet 
2533284c908cSGaetan Rivet 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2534284c908cSGaetan Rivet 
25353b97888aSMatan Azrad 	if (!test_done && port_is_forwarding(port_id)) {
25363b97888aSMatan Azrad 		need_to_start = 1;
25373b97888aSMatan Azrad 		stop_packet_forwarding();
25383b97888aSMatan Azrad 	}
25390da2a62bSMatan Azrad 	no_link_check = 1;
2540284c908cSGaetan Rivet 	stop_port(port_id);
25410da2a62bSMatan Azrad 	no_link_check = org_no_link_check;
2542284c908cSGaetan Rivet 	close_port(port_id);
2543f8e5baa2SThomas Monjalon 	detach_port_device(port_id);
25443b97888aSMatan Azrad 	if (need_to_start)
25453b97888aSMatan Azrad 		start_packet_forwarding(0);
2546284c908cSGaetan Rivet }
2547284c908cSGaetan Rivet 
254876ad4a2dSGaetan Rivet /* This function is used by the interrupt thread */
2549d6af1a13SBernard Iremonger static int
2550f8244c63SZhiyong Yang eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2551d6af1a13SBernard Iremonger 		  void *ret_param)
255276ad4a2dSGaetan Rivet {
255376ad4a2dSGaetan Rivet 	RTE_SET_USED(param);
2554d6af1a13SBernard Iremonger 	RTE_SET_USED(ret_param);
255576ad4a2dSGaetan Rivet 
255676ad4a2dSGaetan Rivet 	if (type >= RTE_ETH_EVENT_MAX) {
2557f431e010SHerakliusz Lipiec 		fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
255876ad4a2dSGaetan Rivet 			port_id, __func__, type);
255976ad4a2dSGaetan Rivet 		fflush(stderr);
25603af72783SGaetan Rivet 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2561f431e010SHerakliusz Lipiec 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
256297b5d8b5SThomas Monjalon 			eth_event_desc[type]);
256376ad4a2dSGaetan Rivet 		fflush(stdout);
256476ad4a2dSGaetan Rivet 	}
2565284c908cSGaetan Rivet 
2566284c908cSGaetan Rivet 	switch (type) {
25674f1ed78eSThomas Monjalon 	case RTE_ETH_EVENT_NEW:
25684f1ed78eSThomas Monjalon 		ports[port_id].need_setup = 1;
25694f1ed78eSThomas Monjalon 		ports[port_id].port_status = RTE_PORT_HANDLING;
25704f1ed78eSThomas Monjalon 		break;
2571284c908cSGaetan Rivet 	case RTE_ETH_EVENT_INTR_RMV:
25724f1ed78eSThomas Monjalon 		if (port_id_is_invalid(port_id, DISABLED_WARN))
25734f1ed78eSThomas Monjalon 			break;
2574284c908cSGaetan Rivet 		if (rte_eal_alarm_set(100000,
2575cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
2576284c908cSGaetan Rivet 			fprintf(stderr, "Could not set up deferred device removal\n");
2577284c908cSGaetan Rivet 		break;
2578284c908cSGaetan Rivet 	default:
2579284c908cSGaetan Rivet 		break;
2580284c908cSGaetan Rivet 	}
2581d6af1a13SBernard Iremonger 	return 0;
258276ad4a2dSGaetan Rivet }
258376ad4a2dSGaetan Rivet 
258497b5d8b5SThomas Monjalon static int
258597b5d8b5SThomas Monjalon register_eth_event_callback(void)
258697b5d8b5SThomas Monjalon {
258797b5d8b5SThomas Monjalon 	int ret;
258897b5d8b5SThomas Monjalon 	enum rte_eth_event_type event;
258997b5d8b5SThomas Monjalon 
259097b5d8b5SThomas Monjalon 	for (event = RTE_ETH_EVENT_UNKNOWN;
259197b5d8b5SThomas Monjalon 			event < RTE_ETH_EVENT_MAX; event++) {
259297b5d8b5SThomas Monjalon 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
259397b5d8b5SThomas Monjalon 				event,
259497b5d8b5SThomas Monjalon 				eth_event_callback,
259597b5d8b5SThomas Monjalon 				NULL);
259697b5d8b5SThomas Monjalon 		if (ret != 0) {
259797b5d8b5SThomas Monjalon 			TESTPMD_LOG(ERR, "Failed to register callback for "
259897b5d8b5SThomas Monjalon 					"%s event\n", eth_event_desc[event]);
259997b5d8b5SThomas Monjalon 			return -1;
260097b5d8b5SThomas Monjalon 		}
260197b5d8b5SThomas Monjalon 	}
260297b5d8b5SThomas Monjalon 
260397b5d8b5SThomas Monjalon 	return 0;
260497b5d8b5SThomas Monjalon }
260597b5d8b5SThomas Monjalon 
2606fb73e096SJeff Guo /* This function is used by the interrupt thread */
2607fb73e096SJeff Guo static void
2608cc1bf307SJeff Guo dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2609fb73e096SJeff Guo 			     __rte_unused void *arg)
2610fb73e096SJeff Guo {
26112049c511SJeff Guo 	uint16_t port_id;
26122049c511SJeff Guo 	int ret;
26132049c511SJeff Guo 
2614fb73e096SJeff Guo 	if (type >= RTE_DEV_EVENT_MAX) {
2615fb73e096SJeff Guo 		fprintf(stderr, "%s called upon invalid event %d\n",
2616fb73e096SJeff Guo 			__func__, type);
2617fb73e096SJeff Guo 		fflush(stderr);
2618fb73e096SJeff Guo 	}
2619fb73e096SJeff Guo 
2620fb73e096SJeff Guo 	switch (type) {
2621fb73e096SJeff Guo 	case RTE_DEV_EVENT_REMOVE:
2622cc1bf307SJeff Guo 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
2623fb73e096SJeff Guo 			device_name);
26242049c511SJeff Guo 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
26252049c511SJeff Guo 		if (ret) {
26262049c511SJeff Guo 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
26272049c511SJeff Guo 				device_name);
26282049c511SJeff Guo 			return;
26292049c511SJeff Guo 		}
2630cc1bf307SJeff Guo 		/*
2631cc1bf307SJeff Guo 		 * Because the user's callback is invoked in eal interrupt
2632cc1bf307SJeff Guo 		 * callback, the interrupt callback need to be finished before
2633cc1bf307SJeff Guo 		 * it can be unregistered when detaching device. So finish
2634cc1bf307SJeff Guo 		 * callback soon and use a deferred removal to detach device
2635cc1bf307SJeff Guo 		 * is need. It is a workaround, once the device detaching be
2636cc1bf307SJeff Guo 		 * moved into the eal in the future, the deferred removal could
2637cc1bf307SJeff Guo 		 * be deleted.
2638cc1bf307SJeff Guo 		 */
2639cc1bf307SJeff Guo 		if (rte_eal_alarm_set(100000,
2640cc1bf307SJeff Guo 				rmv_port_callback, (void *)(intptr_t)port_id))
2641cc1bf307SJeff Guo 			RTE_LOG(ERR, EAL,
2642cc1bf307SJeff Guo 				"Could not set up deferred device removal\n");
2643fb73e096SJeff Guo 		break;
2644fb73e096SJeff Guo 	case RTE_DEV_EVENT_ADD:
2645fb73e096SJeff Guo 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2646fb73e096SJeff Guo 			device_name);
2647fb73e096SJeff Guo 		/* TODO: After finish kernel driver binding,
2648fb73e096SJeff Guo 		 * begin to attach port.
2649fb73e096SJeff Guo 		 */
2650fb73e096SJeff Guo 		break;
2651fb73e096SJeff Guo 	default:
2652fb73e096SJeff Guo 		break;
2653fb73e096SJeff Guo 	}
2654fb73e096SJeff Guo }
2655fb73e096SJeff Guo 
2656013af9b6SIntel static int
265728caa76aSZhiyong Yang set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2658af75078fSIntel {
2659013af9b6SIntel 	uint16_t i;
2660af75078fSIntel 	int diag;
2661013af9b6SIntel 	uint8_t mapping_found = 0;
2662af75078fSIntel 
2663013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2664013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2665013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2666013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2667013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
2668013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
2669013af9b6SIntel 			if (diag != 0)
2670013af9b6SIntel 				return diag;
2671013af9b6SIntel 			mapping_found = 1;
2672af75078fSIntel 		}
2673013af9b6SIntel 	}
2674013af9b6SIntel 	if (mapping_found)
2675013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
2676013af9b6SIntel 	return 0;
2677013af9b6SIntel }
2678013af9b6SIntel 
2679013af9b6SIntel static int
268028caa76aSZhiyong Yang set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2681013af9b6SIntel {
2682013af9b6SIntel 	uint16_t i;
2683013af9b6SIntel 	int diag;
2684013af9b6SIntel 	uint8_t mapping_found = 0;
2685013af9b6SIntel 
2686013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2687013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2688013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2689013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2690013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
2691013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
2692013af9b6SIntel 			if (diag != 0)
2693013af9b6SIntel 				return diag;
2694013af9b6SIntel 			mapping_found = 1;
2695013af9b6SIntel 		}
2696013af9b6SIntel 	}
2697013af9b6SIntel 	if (mapping_found)
2698013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
2699013af9b6SIntel 	return 0;
2700013af9b6SIntel }
2701013af9b6SIntel 
2702013af9b6SIntel static void
270328caa76aSZhiyong Yang map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2704013af9b6SIntel {
2705013af9b6SIntel 	int diag = 0;
2706013af9b6SIntel 
2707013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2708af75078fSIntel 	if (diag != 0) {
2709013af9b6SIntel 		if (diag == -ENOTSUP) {
2710013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
2711013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2712013af9b6SIntel 		}
2713013af9b6SIntel 		else
2714013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2715013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
2716013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2717af75078fSIntel 					pi, diag);
2718af75078fSIntel 	}
2719013af9b6SIntel 
2720013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2721af75078fSIntel 	if (diag != 0) {
2722013af9b6SIntel 		if (diag == -ENOTSUP) {
2723013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
2724013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2725013af9b6SIntel 		}
2726013af9b6SIntel 		else
2727013af9b6SIntel 			rte_exit(EXIT_FAILURE,
2728013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
2729013af9b6SIntel 					"failed for port id=%d diag=%d\n",
2730af75078fSIntel 					pi, diag);
2731af75078fSIntel 	}
2732af75078fSIntel }
2733af75078fSIntel 
2734f2c5125aSPablo de Lara static void
2735f2c5125aSPablo de Lara rxtx_port_config(struct rte_port *port)
2736f2c5125aSPablo de Lara {
2737d44f8a48SQi Zhang 	uint16_t qid;
2738f2c5125aSPablo de Lara 
2739d44f8a48SQi Zhang 	for (qid = 0; qid < nb_rxq; qid++) {
2740d44f8a48SQi Zhang 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2741d44f8a48SQi Zhang 
2742d44f8a48SQi Zhang 		/* Check if any Rx parameters have been passed */
2743f2c5125aSPablo de Lara 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2744d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2745f2c5125aSPablo de Lara 
2746f2c5125aSPablo de Lara 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2747d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2748f2c5125aSPablo de Lara 
2749f2c5125aSPablo de Lara 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2750d44f8a48SQi Zhang 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2751f2c5125aSPablo de Lara 
2752f2c5125aSPablo de Lara 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2753d44f8a48SQi Zhang 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2754f2c5125aSPablo de Lara 
2755f2c5125aSPablo de Lara 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2756d44f8a48SQi Zhang 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2757f2c5125aSPablo de Lara 
2758d44f8a48SQi Zhang 		port->nb_rx_desc[qid] = nb_rxd;
2759d44f8a48SQi Zhang 	}
2760d44f8a48SQi Zhang 
2761d44f8a48SQi Zhang 	for (qid = 0; qid < nb_txq; qid++) {
2762d44f8a48SQi Zhang 		port->tx_conf[qid] = port->dev_info.default_txconf;
2763d44f8a48SQi Zhang 
2764d44f8a48SQi Zhang 		/* Check if any Tx parameters have been passed */
2765f2c5125aSPablo de Lara 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2766d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2767f2c5125aSPablo de Lara 
2768f2c5125aSPablo de Lara 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2769d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2770f2c5125aSPablo de Lara 
2771f2c5125aSPablo de Lara 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2772d44f8a48SQi Zhang 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2773f2c5125aSPablo de Lara 
2774f2c5125aSPablo de Lara 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2775d44f8a48SQi Zhang 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2776f2c5125aSPablo de Lara 
2777f2c5125aSPablo de Lara 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2778d44f8a48SQi Zhang 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2779d44f8a48SQi Zhang 
2780d44f8a48SQi Zhang 		port->nb_tx_desc[qid] = nb_txd;
2781d44f8a48SQi Zhang 	}
2782f2c5125aSPablo de Lara }
2783f2c5125aSPablo de Lara 
2784013af9b6SIntel void
2785013af9b6SIntel init_port_config(void)
2786013af9b6SIntel {
2787013af9b6SIntel 	portid_t pid;
2788013af9b6SIntel 	struct rte_port *port;
2789013af9b6SIntel 
27907d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(pid) {
2791013af9b6SIntel 		port = &ports[pid];
2792013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
2793422515b9SAdrien Mazarguil 		rte_eth_dev_info_get(pid, &port->dev_info);
27943ce690d3SBruce Richardson 		if (nb_rxq > 1) {
2795013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
279690892962SQi Zhang 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2797422515b9SAdrien Mazarguil 				rss_hf & port->dev_info.flow_type_rss_offloads;
2798af75078fSIntel 		} else {
2799013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2800013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2801af75078fSIntel 		}
28023ce690d3SBruce Richardson 
28035f592039SJingjing Wu 		if (port->dcb_flag == 0) {
28043ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
28053ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
28063ce690d3SBruce Richardson 			else
28073ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
28083ce690d3SBruce Richardson 		}
28093ce690d3SBruce Richardson 
2810f2c5125aSPablo de Lara 		rxtx_port_config(port);
2811013af9b6SIntel 
2812013af9b6SIntel 		rte_eth_macaddr_get(pid, &port->eth_addr);
2813013af9b6SIntel 
2814013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
281550c4440eSThomas Monjalon #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2816e261265eSRadu Nicolau 		rte_pmd_ixgbe_bypass_init(pid);
28177b7e5ba7SIntel #endif
28188ea656f8SGaetan Rivet 
28198ea656f8SGaetan Rivet 		if (lsc_interrupt &&
28208ea656f8SGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
28218ea656f8SGaetan Rivet 		     RTE_ETH_DEV_INTR_LSC))
28228ea656f8SGaetan Rivet 			port->dev_conf.intr_conf.lsc = 1;
2823284c908cSGaetan Rivet 		if (rmv_interrupt &&
2824284c908cSGaetan Rivet 		    (rte_eth_devices[pid].data->dev_flags &
2825284c908cSGaetan Rivet 		     RTE_ETH_DEV_INTR_RMV))
2826284c908cSGaetan Rivet 			port->dev_conf.intr_conf.rmv = 1;
2827013af9b6SIntel 	}
2828013af9b6SIntel }
2829013af9b6SIntel 
283041b05095SBernard Iremonger void set_port_slave_flag(portid_t slave_pid)
283141b05095SBernard Iremonger {
283241b05095SBernard Iremonger 	struct rte_port *port;
283341b05095SBernard Iremonger 
283441b05095SBernard Iremonger 	port = &ports[slave_pid];
283541b05095SBernard Iremonger 	port->slave_flag = 1;
283641b05095SBernard Iremonger }
283741b05095SBernard Iremonger 
283841b05095SBernard Iremonger void clear_port_slave_flag(portid_t slave_pid)
283941b05095SBernard Iremonger {
284041b05095SBernard Iremonger 	struct rte_port *port;
284141b05095SBernard Iremonger 
284241b05095SBernard Iremonger 	port = &ports[slave_pid];
284341b05095SBernard Iremonger 	port->slave_flag = 0;
284441b05095SBernard Iremonger }
284541b05095SBernard Iremonger 
28460e545d30SBernard Iremonger uint8_t port_is_bonding_slave(portid_t slave_pid)
28470e545d30SBernard Iremonger {
28480e545d30SBernard Iremonger 	struct rte_port *port;
28490e545d30SBernard Iremonger 
28500e545d30SBernard Iremonger 	port = &ports[slave_pid];
2851b8b8b344SMatan Azrad 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2852b8b8b344SMatan Azrad 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2853b8b8b344SMatan Azrad 		return 1;
2854b8b8b344SMatan Azrad 	return 0;
28550e545d30SBernard Iremonger }
28560e545d30SBernard Iremonger 
2857013af9b6SIntel const uint16_t vlan_tags[] = {
2858013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
2859013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
2860013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
2861013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
2862013af9b6SIntel };
2863013af9b6SIntel 
2864013af9b6SIntel static  int
2865ac7c491cSKonstantin Ananyev get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
28661a572499SJingjing Wu 		 enum dcb_mode_enable dcb_mode,
28671a572499SJingjing Wu 		 enum rte_eth_nb_tcs num_tcs,
28681a572499SJingjing Wu 		 uint8_t pfc_en)
2869013af9b6SIntel {
2870013af9b6SIntel 	uint8_t i;
2871ac7c491cSKonstantin Ananyev 	int32_t rc;
2872ac7c491cSKonstantin Ananyev 	struct rte_eth_rss_conf rss_conf;
2873af75078fSIntel 
2874af75078fSIntel 	/*
2875013af9b6SIntel 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2876013af9b6SIntel 	 * given above, and the number of traffic classes available for use.
2877af75078fSIntel 	 */
28781a572499SJingjing Wu 	if (dcb_mode == DCB_VT_ENABLED) {
28791a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
28801a572499SJingjing Wu 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
28811a572499SJingjing Wu 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
28821a572499SJingjing Wu 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2883013af9b6SIntel 
2884547d946cSNirmoy Das 		/* VMDQ+DCB RX and TX configurations */
28851a572499SJingjing Wu 		vmdq_rx_conf->enable_default_pool = 0;
28861a572499SJingjing Wu 		vmdq_rx_conf->default_pool = 0;
28871a572499SJingjing Wu 		vmdq_rx_conf->nb_queue_pools =
28881a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
28891a572499SJingjing Wu 		vmdq_tx_conf->nb_queue_pools =
28901a572499SJingjing Wu 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2891013af9b6SIntel 
28921a572499SJingjing Wu 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
28931a572499SJingjing Wu 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
28941a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
28951a572499SJingjing Wu 			vmdq_rx_conf->pool_map[i].pools =
28961a572499SJingjing Wu 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2897af75078fSIntel 		}
2898013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2899f59908feSWei Dai 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2900f59908feSWei Dai 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2901013af9b6SIntel 		}
2902013af9b6SIntel 
2903013af9b6SIntel 		/* set DCB mode of RX and TX of multiple queues */
290432e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
290532e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
29061a572499SJingjing Wu 	} else {
29071a572499SJingjing Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
29081a572499SJingjing Wu 				&eth_conf->rx_adv_conf.dcb_rx_conf;
29091a572499SJingjing Wu 		struct rte_eth_dcb_tx_conf *tx_conf =
29101a572499SJingjing Wu 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2911013af9b6SIntel 
2912ac7c491cSKonstantin Ananyev 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2913ac7c491cSKonstantin Ananyev 		if (rc != 0)
2914ac7c491cSKonstantin Ananyev 			return rc;
2915ac7c491cSKonstantin Ananyev 
29161a572499SJingjing Wu 		rx_conf->nb_tcs = num_tcs;
29171a572499SJingjing Wu 		tx_conf->nb_tcs = num_tcs;
29181a572499SJingjing Wu 
2919bcd0e432SJingjing Wu 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2920bcd0e432SJingjing Wu 			rx_conf->dcb_tc[i] = i % num_tcs;
2921bcd0e432SJingjing Wu 			tx_conf->dcb_tc[i] = i % num_tcs;
2922013af9b6SIntel 		}
2923ac7c491cSKonstantin Ananyev 
29241a572499SJingjing Wu 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2925ac7c491cSKonstantin Ananyev 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
292632e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
29271a572499SJingjing Wu 	}
29281a572499SJingjing Wu 
29291a572499SJingjing Wu 	if (pfc_en)
29301a572499SJingjing Wu 		eth_conf->dcb_capability_en =
29311a572499SJingjing Wu 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2932013af9b6SIntel 	else
2933013af9b6SIntel 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2934013af9b6SIntel 
2935013af9b6SIntel 	return 0;
2936013af9b6SIntel }
2937013af9b6SIntel 
2938013af9b6SIntel int
29391a572499SJingjing Wu init_port_dcb_config(portid_t pid,
29401a572499SJingjing Wu 		     enum dcb_mode_enable dcb_mode,
29411a572499SJingjing Wu 		     enum rte_eth_nb_tcs num_tcs,
29421a572499SJingjing Wu 		     uint8_t pfc_en)
2943013af9b6SIntel {
2944013af9b6SIntel 	struct rte_eth_conf port_conf;
2945013af9b6SIntel 	struct rte_port *rte_port;
2946013af9b6SIntel 	int retval;
2947013af9b6SIntel 	uint16_t i;
2948013af9b6SIntel 
29492a977b89SWenzhuo Lu 	rte_port = &ports[pid];
2950013af9b6SIntel 
2951013af9b6SIntel 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2952013af9b6SIntel 	/* Enter DCB configuration status */
2953013af9b6SIntel 	dcb_config = 1;
2954013af9b6SIntel 
2955d5354e89SYanglong Wu 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2956d5354e89SYanglong Wu 	port_conf.txmode = rte_port->dev_conf.txmode;
2957d5354e89SYanglong Wu 
2958013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2959ac7c491cSKonstantin Ananyev 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2960013af9b6SIntel 	if (retval < 0)
2961013af9b6SIntel 		return retval;
29620074d02fSShahaf Shuler 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2963013af9b6SIntel 
29642f203d44SQi Zhang 	/* re-configure the device . */
29652f203d44SQi Zhang 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
29662a977b89SWenzhuo Lu 
29672a977b89SWenzhuo Lu 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
29682a977b89SWenzhuo Lu 
29692a977b89SWenzhuo Lu 	/* If dev_info.vmdq_pool_base is greater than 0,
29702a977b89SWenzhuo Lu 	 * the queue id of vmdq pools is started after pf queues.
29712a977b89SWenzhuo Lu 	 */
29722a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED &&
29732a977b89SWenzhuo Lu 	    rte_port->dev_info.vmdq_pool_base > 0) {
29742a977b89SWenzhuo Lu 		printf("VMDQ_DCB multi-queue mode is nonsensical"
29752a977b89SWenzhuo Lu 			" for port %d.", pid);
29762a977b89SWenzhuo Lu 		return -1;
29772a977b89SWenzhuo Lu 	}
29782a977b89SWenzhuo Lu 
29792a977b89SWenzhuo Lu 	/* Assume the ports in testpmd have the same dcb capability
29802a977b89SWenzhuo Lu 	 * and has the same number of rxq and txq in dcb mode
29812a977b89SWenzhuo Lu 	 */
29822a977b89SWenzhuo Lu 	if (dcb_mode == DCB_VT_ENABLED) {
298386ef65eeSBernard Iremonger 		if (rte_port->dev_info.max_vfs > 0) {
298486ef65eeSBernard Iremonger 			nb_rxq = rte_port->dev_info.nb_rx_queues;
298586ef65eeSBernard Iremonger 			nb_txq = rte_port->dev_info.nb_tx_queues;
298686ef65eeSBernard Iremonger 		} else {
29872a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
29882a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
298986ef65eeSBernard Iremonger 		}
29902a977b89SWenzhuo Lu 	} else {
29912a977b89SWenzhuo Lu 		/*if vt is disabled, use all pf queues */
29922a977b89SWenzhuo Lu 		if (rte_port->dev_info.vmdq_pool_base == 0) {
29932a977b89SWenzhuo Lu 			nb_rxq = rte_port->dev_info.max_rx_queues;
29942a977b89SWenzhuo Lu 			nb_txq = rte_port->dev_info.max_tx_queues;
29952a977b89SWenzhuo Lu 		} else {
29962a977b89SWenzhuo Lu 			nb_rxq = (queueid_t)num_tcs;
29972a977b89SWenzhuo Lu 			nb_txq = (queueid_t)num_tcs;
29982a977b89SWenzhuo Lu 
29992a977b89SWenzhuo Lu 		}
30002a977b89SWenzhuo Lu 	}
30012a977b89SWenzhuo Lu 	rx_free_thresh = 64;
30022a977b89SWenzhuo Lu 
3003013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3004013af9b6SIntel 
3005f2c5125aSPablo de Lara 	rxtx_port_config(rte_port);
3006013af9b6SIntel 	/* VLAN filter */
30070074d02fSShahaf Shuler 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
30081a572499SJingjing Wu 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
3009013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
3010013af9b6SIntel 
3011013af9b6SIntel 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
3012013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
3013013af9b6SIntel 
30147741e4cfSIntel 	rte_port->dcb_flag = 1;
30157741e4cfSIntel 
3016013af9b6SIntel 	return 0;
3017af75078fSIntel }
3018af75078fSIntel 
3019ffc468ffSTetsuya Mukawa static void
3020ffc468ffSTetsuya Mukawa init_port(void)
3021ffc468ffSTetsuya Mukawa {
3022ffc468ffSTetsuya Mukawa 	/* Configuration of Ethernet ports. */
3023ffc468ffSTetsuya Mukawa 	ports = rte_zmalloc("testpmd: ports",
3024ffc468ffSTetsuya Mukawa 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3025ffc468ffSTetsuya Mukawa 			    RTE_CACHE_LINE_SIZE);
3026ffc468ffSTetsuya Mukawa 	if (ports == NULL) {
3027ffc468ffSTetsuya Mukawa 		rte_exit(EXIT_FAILURE,
3028ffc468ffSTetsuya Mukawa 				"rte_zmalloc(%d struct rte_port) failed\n",
3029ffc468ffSTetsuya Mukawa 				RTE_MAX_ETHPORTS);
3030ffc468ffSTetsuya Mukawa 	}
303129841336SPhil Yang 
303229841336SPhil Yang 	/* Initialize ports NUMA structures */
303329841336SPhil Yang 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
303429841336SPhil Yang 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
303529841336SPhil Yang 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3036ffc468ffSTetsuya Mukawa }
3037ffc468ffSTetsuya Mukawa 
3038d3a274ceSZhihong Wang static void
3039d3a274ceSZhihong Wang force_quit(void)
3040d3a274ceSZhihong Wang {
3041d3a274ceSZhihong Wang 	pmd_test_exit();
3042d3a274ceSZhihong Wang 	prompt_exit();
3043d3a274ceSZhihong Wang }
3044d3a274ceSZhihong Wang 
3045d3a274ceSZhihong Wang static void
3046cfea1f30SPablo de Lara print_stats(void)
3047cfea1f30SPablo de Lara {
3048cfea1f30SPablo de Lara 	uint8_t i;
3049cfea1f30SPablo de Lara 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3050cfea1f30SPablo de Lara 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3051cfea1f30SPablo de Lara 
3052cfea1f30SPablo de Lara 	/* Clear screen and move to top left */
3053cfea1f30SPablo de Lara 	printf("%s%s", clr, top_left);
3054cfea1f30SPablo de Lara 
3055cfea1f30SPablo de Lara 	printf("\nPort statistics ====================================");
3056cfea1f30SPablo de Lara 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3057cfea1f30SPablo de Lara 		nic_stats_display(fwd_ports_ids[i]);
3058683d1e82SIgor Romanov 
3059683d1e82SIgor Romanov 	fflush(stdout);
3060cfea1f30SPablo de Lara }
3061cfea1f30SPablo de Lara 
3062cfea1f30SPablo de Lara static void
3063d3a274ceSZhihong Wang signal_handler(int signum)
3064d3a274ceSZhihong Wang {
3065d3a274ceSZhihong Wang 	if (signum == SIGINT || signum == SIGTERM) {
3066d3a274ceSZhihong Wang 		printf("\nSignal %d received, preparing to exit...\n",
3067d3a274ceSZhihong Wang 				signum);
3068102b7329SReshma Pattan #ifdef RTE_LIBRTE_PDUMP
3069102b7329SReshma Pattan 		/* uninitialize packet capture framework */
3070102b7329SReshma Pattan 		rte_pdump_uninit();
3071102b7329SReshma Pattan #endif
307262d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
307362d3216dSReshma Pattan 		rte_latencystats_uninit();
307462d3216dSReshma Pattan #endif
3075d3a274ceSZhihong Wang 		force_quit();
3076d9a191a0SPhil Yang 		/* Set flag to indicate the force termination. */
3077d9a191a0SPhil Yang 		f_quit = 1;
3078d3a274ceSZhihong Wang 		/* exit with the expected status */
3079d3a274ceSZhihong Wang 		signal(signum, SIG_DFL);
3080d3a274ceSZhihong Wang 		kill(getpid(), signum);
3081d3a274ceSZhihong Wang 	}
3082d3a274ceSZhihong Wang }
3083d3a274ceSZhihong Wang 
3084af75078fSIntel int
3085af75078fSIntel main(int argc, char** argv)
3086af75078fSIntel {
3087af75078fSIntel 	int diag;
3088f8244c63SZhiyong Yang 	portid_t port_id;
30894918a357SXiaoyun Li 	uint16_t count;
3090fb73e096SJeff Guo 	int ret;
3091af75078fSIntel 
3092d3a274ceSZhihong Wang 	signal(SIGINT, signal_handler);
3093d3a274ceSZhihong Wang 	signal(SIGTERM, signal_handler);
3094d3a274ceSZhihong Wang 
3095af75078fSIntel 	diag = rte_eal_init(argc, argv);
3096af75078fSIntel 	if (diag < 0)
3097af75078fSIntel 		rte_panic("Cannot init EAL\n");
3098af75078fSIntel 
3099285fd101SOlivier Matz 	testpmd_logtype = rte_log_register("testpmd");
3100285fd101SOlivier Matz 	if (testpmd_logtype < 0)
3101285fd101SOlivier Matz 		rte_panic("Cannot register log type");
3102285fd101SOlivier Matz 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3103285fd101SOlivier Matz 
310497b5d8b5SThomas Monjalon 	ret = register_eth_event_callback();
310597b5d8b5SThomas Monjalon 	if (ret != 0)
310697b5d8b5SThomas Monjalon 		rte_panic("Cannot register for ethdev events");
310797b5d8b5SThomas Monjalon 
31084aa0d012SAnatoly Burakov #ifdef RTE_LIBRTE_PDUMP
31094aa0d012SAnatoly Burakov 	/* initialize packet capture framework */
3110e9436f54STiwei Bie 	rte_pdump_init();
31114aa0d012SAnatoly Burakov #endif
31124aa0d012SAnatoly Burakov 
31134918a357SXiaoyun Li 	count = 0;
31144918a357SXiaoyun Li 	RTE_ETH_FOREACH_DEV(port_id) {
31154918a357SXiaoyun Li 		ports_ids[count] = port_id;
31164918a357SXiaoyun Li 		count++;
31174918a357SXiaoyun Li 	}
31184918a357SXiaoyun Li 	nb_ports = (portid_t) count;
31194aa0d012SAnatoly Burakov 	if (nb_ports == 0)
31204aa0d012SAnatoly Burakov 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
31214aa0d012SAnatoly Burakov 
31224aa0d012SAnatoly Burakov 	/* allocate port structures, and init them */
31234aa0d012SAnatoly Burakov 	init_port();
31244aa0d012SAnatoly Burakov 
31254aa0d012SAnatoly Burakov 	set_def_fwd_config();
31264aa0d012SAnatoly Burakov 	if (nb_lcores == 0)
31274aa0d012SAnatoly Burakov 		rte_panic("Empty set of forwarding logical cores - check the "
31284aa0d012SAnatoly Burakov 			  "core mask supplied in the command parameters\n");
31294aa0d012SAnatoly Burakov 
3130e505d84cSAnatoly Burakov 	/* Bitrate/latency stats disabled by default */
3131e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_BITRATE
3132e505d84cSAnatoly Burakov 	bitrate_enabled = 0;
3133e505d84cSAnatoly Burakov #endif
3134e505d84cSAnatoly Burakov #ifdef RTE_LIBRTE_LATENCY_STATS
3135e505d84cSAnatoly Burakov 	latencystats_enabled = 0;
3136e505d84cSAnatoly Burakov #endif
3137e505d84cSAnatoly Burakov 
3138fb7b8b32SAnatoly Burakov 	/* on FreeBSD, mlockall() is disabled by default */
31395fbc1d49SBruce Richardson #ifdef RTE_EXEC_ENV_FREEBSD
3140fb7b8b32SAnatoly Burakov 	do_mlockall = 0;
3141fb7b8b32SAnatoly Burakov #else
3142fb7b8b32SAnatoly Burakov 	do_mlockall = 1;
3143fb7b8b32SAnatoly Burakov #endif
3144fb7b8b32SAnatoly Burakov 
3145e505d84cSAnatoly Burakov 	argc -= diag;
3146e505d84cSAnatoly Burakov 	argv += diag;
3147e505d84cSAnatoly Burakov 	if (argc > 1)
3148e505d84cSAnatoly Burakov 		launch_args_parse(argc, argv);
3149e505d84cSAnatoly Burakov 
3150e505d84cSAnatoly Burakov 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3151285fd101SOlivier Matz 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
31521c036b16SEelco Chaudron 			strerror(errno));
31531c036b16SEelco Chaudron 	}
31541c036b16SEelco Chaudron 
315599cabef0SPablo de Lara 	if (tx_first && interactive)
315699cabef0SPablo de Lara 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
315799cabef0SPablo de Lara 				"interactive mode.\n");
31588820cba4SDavid Hunt 
31598820cba4SDavid Hunt 	if (tx_first && lsc_interrupt) {
31608820cba4SDavid Hunt 		printf("Warning: lsc_interrupt needs to be off when "
31618820cba4SDavid Hunt 				" using tx_first. Disabling.\n");
31628820cba4SDavid Hunt 		lsc_interrupt = 0;
31638820cba4SDavid Hunt 	}
31648820cba4SDavid Hunt 
31655a8fb55cSReshma Pattan 	if (!nb_rxq && !nb_txq)
31665a8fb55cSReshma Pattan 		printf("Warning: Either rx or tx queues should be non-zero\n");
31675a8fb55cSReshma Pattan 
31685a8fb55cSReshma Pattan 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3169af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3170af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
3171af75078fSIntel 		       nb_rxq, nb_txq);
3172af75078fSIntel 
3173af75078fSIntel 	init_config();
3174fb73e096SJeff Guo 
3175fb73e096SJeff Guo 	if (hot_plug) {
31762049c511SJeff Guo 		ret = rte_dev_hotplug_handle_enable();
3177fb73e096SJeff Guo 		if (ret) {
31782049c511SJeff Guo 			RTE_LOG(ERR, EAL,
31792049c511SJeff Guo 				"fail to enable hotplug handling.");
3180fb73e096SJeff Guo 			return -1;
3181fb73e096SJeff Guo 		}
3182fb73e096SJeff Guo 
31832049c511SJeff Guo 		ret = rte_dev_event_monitor_start();
31842049c511SJeff Guo 		if (ret) {
31852049c511SJeff Guo 			RTE_LOG(ERR, EAL,
31862049c511SJeff Guo 				"fail to start device event monitoring.");
31872049c511SJeff Guo 			return -1;
31882049c511SJeff Guo 		}
31892049c511SJeff Guo 
31902049c511SJeff Guo 		ret = rte_dev_event_callback_register(NULL,
3191cc1bf307SJeff Guo 			dev_event_callback, NULL);
31922049c511SJeff Guo 		if (ret) {
31932049c511SJeff Guo 			RTE_LOG(ERR, EAL,
31942049c511SJeff Guo 				"fail  to register device event callback\n");
31952049c511SJeff Guo 			return -1;
31962049c511SJeff Guo 		}
3197fb73e096SJeff Guo 	}
3198fb73e096SJeff Guo 
3199148f963fSBruce Richardson 	if (start_port(RTE_PORT_ALL) != 0)
3200148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3201af75078fSIntel 
3202ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
32037d89b261SGaetan Rivet 	RTE_ETH_FOREACH_DEV(port_id)
3204ce8d5614SIntel 		rte_eth_promiscuous_enable(port_id);
3205af75078fSIntel 
32067e4441c8SRemy Horton 	/* Init metrics library */
32077e4441c8SRemy Horton 	rte_metrics_init(rte_socket_id());
32087e4441c8SRemy Horton 
320962d3216dSReshma Pattan #ifdef RTE_LIBRTE_LATENCY_STATS
321062d3216dSReshma Pattan 	if (latencystats_enabled != 0) {
321162d3216dSReshma Pattan 		int ret = rte_latencystats_init(1, NULL);
321262d3216dSReshma Pattan 		if (ret)
321362d3216dSReshma Pattan 			printf("Warning: latencystats init()"
321462d3216dSReshma Pattan 				" returned error %d\n",	ret);
321562d3216dSReshma Pattan 		printf("Latencystats running on lcore %d\n",
321662d3216dSReshma Pattan 			latencystats_lcore_id);
321762d3216dSReshma Pattan 	}
321862d3216dSReshma Pattan #endif
321962d3216dSReshma Pattan 
32207e4441c8SRemy Horton 	/* Setup bitrate stats */
32217e4441c8SRemy Horton #ifdef RTE_LIBRTE_BITRATE
3222e25e6c70SRemy Horton 	if (bitrate_enabled != 0) {
32237e4441c8SRemy Horton 		bitrate_data = rte_stats_bitrate_create();
32247e4441c8SRemy Horton 		if (bitrate_data == NULL)
3225e25e6c70SRemy Horton 			rte_exit(EXIT_FAILURE,
3226e25e6c70SRemy Horton 				"Could not allocate bitrate data.\n");
32277e4441c8SRemy Horton 		rte_stats_bitrate_reg(bitrate_data);
3228e25e6c70SRemy Horton 	}
32297e4441c8SRemy Horton #endif
32307e4441c8SRemy Horton 
32310d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
323281ef862bSAllain Legacy 	if (strlen(cmdline_filename) != 0)
323381ef862bSAllain Legacy 		cmdline_read_from_file(cmdline_filename);
323481ef862bSAllain Legacy 
3235ca7feb22SCyril Chemparathy 	if (interactive == 1) {
3236ca7feb22SCyril Chemparathy 		if (auto_start) {
3237ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
3238ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
3239ca7feb22SCyril Chemparathy 		}
3240af75078fSIntel 		prompt();
32410de738cfSJiayu Hu 		pmd_test_exit();
3242ca7feb22SCyril Chemparathy 	} else
32430d56cb81SThomas Monjalon #endif
32440d56cb81SThomas Monjalon 	{
3245af75078fSIntel 		char c;
3246af75078fSIntel 		int rc;
3247af75078fSIntel 
3248d9a191a0SPhil Yang 		f_quit = 0;
3249d9a191a0SPhil Yang 
3250af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
325199cabef0SPablo de Lara 		start_packet_forwarding(tx_first);
3252cfea1f30SPablo de Lara 		if (stats_period != 0) {
3253cfea1f30SPablo de Lara 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3254cfea1f30SPablo de Lara 			uint64_t timer_period;
3255cfea1f30SPablo de Lara 
3256cfea1f30SPablo de Lara 			/* Convert to number of cycles */
3257cfea1f30SPablo de Lara 			timer_period = stats_period * rte_get_timer_hz();
3258cfea1f30SPablo de Lara 
3259d9a191a0SPhil Yang 			while (f_quit == 0) {
3260cfea1f30SPablo de Lara 				cur_time = rte_get_timer_cycles();
3261cfea1f30SPablo de Lara 				diff_time += cur_time - prev_time;
3262cfea1f30SPablo de Lara 
3263cfea1f30SPablo de Lara 				if (diff_time >= timer_period) {
3264cfea1f30SPablo de Lara 					print_stats();
3265cfea1f30SPablo de Lara 					/* Reset the timer */
3266cfea1f30SPablo de Lara 					diff_time = 0;
3267cfea1f30SPablo de Lara 				}
3268cfea1f30SPablo de Lara 				/* Sleep to avoid unnecessary checks */
3269cfea1f30SPablo de Lara 				prev_time = cur_time;
3270cfea1f30SPablo de Lara 				sleep(1);
3271cfea1f30SPablo de Lara 			}
3272cfea1f30SPablo de Lara 		}
3273cfea1f30SPablo de Lara 
3274af75078fSIntel 		printf("Press enter to exit\n");
3275af75078fSIntel 		rc = read(0, &c, 1);
3276d3a274ceSZhihong Wang 		pmd_test_exit();
3277af75078fSIntel 		if (rc < 0)
3278af75078fSIntel 			return 1;
3279af75078fSIntel 	}
3280af75078fSIntel 
3281af75078fSIntel 	return 0;
3282af75078fSIntel }
3283