xref: /dpdk/app/test-pmd/testpmd.c (revision 5706de65334e11bae4847e9ace5e58b71d743f42)
1af75078fSIntel /*-
2af75078fSIntel  *   BSD LICENSE
3af75078fSIntel  *
4e9d48c00SBruce Richardson  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5af75078fSIntel  *   All rights reserved.
6af75078fSIntel  *
7af75078fSIntel  *   Redistribution and use in source and binary forms, with or without
8af75078fSIntel  *   modification, are permitted provided that the following conditions
9af75078fSIntel  *   are met:
10af75078fSIntel  *
11af75078fSIntel  *     * Redistributions of source code must retain the above copyright
12af75078fSIntel  *       notice, this list of conditions and the following disclaimer.
13af75078fSIntel  *     * Redistributions in binary form must reproduce the above copyright
14af75078fSIntel  *       notice, this list of conditions and the following disclaimer in
15af75078fSIntel  *       the documentation and/or other materials provided with the
16af75078fSIntel  *       distribution.
17af75078fSIntel  *     * Neither the name of Intel Corporation nor the names of its
18af75078fSIntel  *       contributors may be used to endorse or promote products derived
19af75078fSIntel  *       from this software without specific prior written permission.
20af75078fSIntel  *
21af75078fSIntel  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22af75078fSIntel  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23af75078fSIntel  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24af75078fSIntel  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25af75078fSIntel  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26af75078fSIntel  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27af75078fSIntel  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28af75078fSIntel  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29af75078fSIntel  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30af75078fSIntel  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31af75078fSIntel  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32af75078fSIntel  */
33af75078fSIntel 
34af75078fSIntel #include <stdarg.h>
35af75078fSIntel #include <stdio.h>
36af75078fSIntel #include <stdlib.h>
37af75078fSIntel #include <signal.h>
38af75078fSIntel #include <string.h>
39af75078fSIntel #include <time.h>
40af75078fSIntel #include <fcntl.h>
41af75078fSIntel #include <sys/types.h>
42af75078fSIntel #include <errno.h>
43af75078fSIntel 
44af75078fSIntel #include <sys/queue.h>
45af75078fSIntel #include <sys/stat.h>
46af75078fSIntel 
47af75078fSIntel #include <stdint.h>
48af75078fSIntel #include <unistd.h>
49af75078fSIntel #include <inttypes.h>
50af75078fSIntel 
51af75078fSIntel #include <rte_common.h>
52af75078fSIntel #include <rte_byteorder.h>
53af75078fSIntel #include <rte_log.h>
54af75078fSIntel #include <rte_debug.h>
55af75078fSIntel #include <rte_cycles.h>
56af75078fSIntel #include <rte_memory.h>
57af75078fSIntel #include <rte_memcpy.h>
58af75078fSIntel #include <rte_memzone.h>
59af75078fSIntel #include <rte_launch.h>
60af75078fSIntel #include <rte_tailq.h>
61af75078fSIntel #include <rte_eal.h>
62af75078fSIntel #include <rte_per_lcore.h>
63af75078fSIntel #include <rte_lcore.h>
64af75078fSIntel #include <rte_atomic.h>
65af75078fSIntel #include <rte_branch_prediction.h>
66af75078fSIntel #include <rte_ring.h>
67af75078fSIntel #include <rte_mempool.h>
68af75078fSIntel #include <rte_malloc.h>
69af75078fSIntel #include <rte_mbuf.h>
70af75078fSIntel #include <rte_interrupts.h>
71af75078fSIntel #include <rte_pci.h>
72af75078fSIntel #include <rte_ether.h>
73af75078fSIntel #include <rte_ethdev.h>
74af75078fSIntel #include <rte_string_fns.h>
75148f963fSBruce Richardson #ifdef RTE_LIBRTE_PMD_XENVIRT
76148f963fSBruce Richardson #include <rte_eth_xenvirt.h>
77148f963fSBruce Richardson #endif
78af75078fSIntel 
79af75078fSIntel #include "testpmd.h"
80148f963fSBruce Richardson #include "mempool_osdep.h"
81af75078fSIntel 
82af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
83af75078fSIntel 
84af75078fSIntel /* use master core for command line ? */
85af75078fSIntel uint8_t interactive = 0;
86af75078fSIntel 
87af75078fSIntel /*
88af75078fSIntel  * NUMA support configuration.
89af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
90af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
91af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
92af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
93af75078fSIntel  */
94af75078fSIntel uint8_t numa_support = 0; /**< No numa support by default */
95af75078fSIntel 
96af75078fSIntel /*
97b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
98b6ea6408SIntel  * not configured.
99b6ea6408SIntel  */
100b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
101b6ea6408SIntel 
102b6ea6408SIntel /*
103148f963fSBruce Richardson  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
104148f963fSBruce Richardson  */
105148f963fSBruce Richardson uint8_t mp_anon = 0;
106148f963fSBruce Richardson 
107148f963fSBruce Richardson /*
108af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
109af75078fSIntel  * forwarded.
110af75078fSIntel  * Must be instanciated with the ethernet addresses of peer traffic generator
111af75078fSIntel  * ports.
112af75078fSIntel  */
113af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
114af75078fSIntel portid_t nb_peer_eth_addrs = 0;
115af75078fSIntel 
116af75078fSIntel /*
117af75078fSIntel  * Probed Target Environment.
118af75078fSIntel  */
119af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
120af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
121af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
122af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
123af75078fSIntel 
124af75078fSIntel /*
125af75078fSIntel  * Test Forwarding Configuration.
126af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
127af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
128af75078fSIntel  */
129af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
130af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
131af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
132af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
133af75078fSIntel 
134af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
135af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
136af75078fSIntel 
137af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
138af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
139af75078fSIntel 
140af75078fSIntel /*
141af75078fSIntel  * Forwarding engines.
142af75078fSIntel  */
143af75078fSIntel struct fwd_engine * fwd_engines[] = {
144af75078fSIntel 	&io_fwd_engine,
145af75078fSIntel 	&mac_fwd_engine,
14657e85242SBruce Richardson 	&mac_retry_fwd_engine,
147af75078fSIntel 	&rx_only_engine,
148af75078fSIntel 	&tx_only_engine,
149af75078fSIntel 	&csum_fwd_engine,
150af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
151af75078fSIntel 	&ieee1588_fwd_engine,
152af75078fSIntel #endif
153af75078fSIntel 	NULL,
154af75078fSIntel };
155af75078fSIntel 
156af75078fSIntel struct fwd_config cur_fwd_config;
157af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
158af75078fSIntel 
159af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
160c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
161c8798818SIntel                                       * specified on command-line. */
162af75078fSIntel 
163af75078fSIntel /*
164af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
165af75078fSIntel  */
166af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
167af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
168af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
169af75078fSIntel };
170af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
171af75078fSIntel 
172af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
173af75078fSIntel uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
174af75078fSIntel 
175900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
176900550deSIntel uint8_t dcb_config = 0;
177900550deSIntel 
178900550deSIntel /* Whether the dcb is in testing status */
179900550deSIntel uint8_t dcb_test = 0;
180900550deSIntel 
181900550deSIntel /* DCB on and VT on mapping is default */
182900550deSIntel enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
183af75078fSIntel 
184af75078fSIntel /*
185af75078fSIntel  * Configurable number of RX/TX queues.
186af75078fSIntel  */
187af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
188af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
189af75078fSIntel 
190af75078fSIntel /*
191af75078fSIntel  * Configurable number of RX/TX ring descriptors.
192af75078fSIntel  */
193af75078fSIntel #define RTE_TEST_RX_DESC_DEFAULT 128
194af75078fSIntel #define RTE_TEST_TX_DESC_DEFAULT 512
195af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
196af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
197af75078fSIntel 
198af75078fSIntel /*
199af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
200af75078fSIntel  */
201af75078fSIntel #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
202af75078fSIntel #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
203af75078fSIntel #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
204af75078fSIntel 
205af75078fSIntel #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
206af75078fSIntel #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
207af75078fSIntel #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
208af75078fSIntel 
209af75078fSIntel struct rte_eth_thresh rx_thresh = {
210af75078fSIntel 	.pthresh = RX_PTHRESH,
211af75078fSIntel 	.hthresh = RX_HTHRESH,
212af75078fSIntel 	.wthresh = RX_WTHRESH,
213af75078fSIntel };
214af75078fSIntel 
215af75078fSIntel struct rte_eth_thresh tx_thresh = {
216af75078fSIntel 	.pthresh = TX_PTHRESH,
217af75078fSIntel 	.hthresh = TX_HTHRESH,
218af75078fSIntel 	.wthresh = TX_WTHRESH,
219af75078fSIntel };
220af75078fSIntel 
221af75078fSIntel /*
222af75078fSIntel  * Configurable value of RX free threshold.
223af75078fSIntel  */
224af75078fSIntel uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
225af75078fSIntel 
226af75078fSIntel /*
227ce8d5614SIntel  * Configurable value of RX drop enable.
228ce8d5614SIntel  */
229ce8d5614SIntel uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
230ce8d5614SIntel 
231ce8d5614SIntel /*
232af75078fSIntel  * Configurable value of TX free threshold.
233af75078fSIntel  */
234af75078fSIntel uint16_t tx_free_thresh = 0; /* Use default values. */
235af75078fSIntel 
236af75078fSIntel /*
237af75078fSIntel  * Configurable value of TX RS bit threshold.
238af75078fSIntel  */
239af75078fSIntel uint16_t tx_rs_thresh = 0; /* Use default values. */
240af75078fSIntel 
241af75078fSIntel /*
242ce8d5614SIntel  * Configurable value of TX queue flags.
243ce8d5614SIntel  */
244ce8d5614SIntel uint32_t txq_flags = 0; /* No flags set. */
245ce8d5614SIntel 
246ce8d5614SIntel /*
247af75078fSIntel  * Receive Side Scaling (RSS) configuration.
248af75078fSIntel  */
249af75078fSIntel uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
250af75078fSIntel 
251af75078fSIntel /*
252af75078fSIntel  * Port topology configuration
253af75078fSIntel  */
254af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
255af75078fSIntel 
2567741e4cfSIntel /*
2577741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
2587741e4cfSIntel  */
2597741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
2607741e4cfSIntel 
261af75078fSIntel /*
2627b7e5ba7SIntel  * NIC bypass mode configuration options.
2637b7e5ba7SIntel  */
2647b7e5ba7SIntel #ifdef RTE_NIC_BYPASS
2657b7e5ba7SIntel 
2667b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
2677b7e5ba7SIntel uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
2687b7e5ba7SIntel 
2697b7e5ba7SIntel #endif
2707b7e5ba7SIntel 
2717b7e5ba7SIntel /*
272af75078fSIntel  * Ethernet device configuration.
273af75078fSIntel  */
274af75078fSIntel struct rte_eth_rxmode rx_mode = {
275af75078fSIntel 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276af75078fSIntel 	.split_hdr_size = 0,
277af75078fSIntel 	.header_split   = 0, /**< Header Split disabled. */
278af75078fSIntel 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
279af75078fSIntel 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
280a47aa8b9SIntel 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
281a47aa8b9SIntel 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
282af75078fSIntel 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
283af75078fSIntel 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
284af75078fSIntel };
285af75078fSIntel 
286af75078fSIntel struct rte_fdir_conf fdir_conf = {
287af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
288af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
289af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
290af75078fSIntel 	.flexbytes_offset = 0x6,
291af75078fSIntel 	.drop_queue = 127,
292af75078fSIntel };
293af75078fSIntel 
294af75078fSIntel static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
295af75078fSIntel 
296ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
297ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
298ed30d9b6SIntel 
299ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
300ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
301ed30d9b6SIntel 
302ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
303ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
304ed30d9b6SIntel 
305ed30d9b6SIntel /* Forward function declarations */
306ed30d9b6SIntel static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
307ce8d5614SIntel static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
308ce8d5614SIntel 
309ce8d5614SIntel /*
310ce8d5614SIntel  * Check if all the ports are started.
311ce8d5614SIntel  * If yes, return positive value. If not, return zero.
312ce8d5614SIntel  */
313ce8d5614SIntel static int all_ports_started(void);
314ed30d9b6SIntel 
315af75078fSIntel /*
316af75078fSIntel  * Setup default configuration.
317af75078fSIntel  */
318af75078fSIntel static void
319af75078fSIntel set_default_fwd_lcores_config(void)
320af75078fSIntel {
321af75078fSIntel 	unsigned int i;
322af75078fSIntel 	unsigned int nb_lc;
323af75078fSIntel 
324af75078fSIntel 	nb_lc = 0;
325af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
326af75078fSIntel 		if (! rte_lcore_is_enabled(i))
327af75078fSIntel 			continue;
328af75078fSIntel 		if (i == rte_get_master_lcore())
329af75078fSIntel 			continue;
330af75078fSIntel 		fwd_lcores_cpuids[nb_lc++] = i;
331af75078fSIntel 	}
332af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
333af75078fSIntel 	nb_cfg_lcores = nb_lcores;
334af75078fSIntel 	nb_fwd_lcores = 1;
335af75078fSIntel }
336af75078fSIntel 
337af75078fSIntel static void
338af75078fSIntel set_def_peer_eth_addrs(void)
339af75078fSIntel {
340af75078fSIntel 	portid_t i;
341af75078fSIntel 
342af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
343af75078fSIntel 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
344af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
345af75078fSIntel 	}
346af75078fSIntel }
347af75078fSIntel 
348af75078fSIntel static void
349af75078fSIntel set_default_fwd_ports_config(void)
350af75078fSIntel {
351af75078fSIntel 	portid_t pt_id;
352af75078fSIntel 
353af75078fSIntel 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
354af75078fSIntel 		fwd_ports_ids[pt_id] = pt_id;
355af75078fSIntel 
356af75078fSIntel 	nb_cfg_ports = nb_ports;
357af75078fSIntel 	nb_fwd_ports = nb_ports;
358af75078fSIntel }
359af75078fSIntel 
360af75078fSIntel void
361af75078fSIntel set_def_fwd_config(void)
362af75078fSIntel {
363af75078fSIntel 	set_default_fwd_lcores_config();
364af75078fSIntel 	set_def_peer_eth_addrs();
365af75078fSIntel 	set_default_fwd_ports_config();
366af75078fSIntel }
367af75078fSIntel 
368af75078fSIntel /*
369af75078fSIntel  * Configuration initialisation done once at init time.
370af75078fSIntel  */
371af75078fSIntel struct mbuf_ctor_arg {
372af75078fSIntel 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
373af75078fSIntel 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
374af75078fSIntel };
375af75078fSIntel 
376af75078fSIntel struct mbuf_pool_ctor_arg {
377af75078fSIntel 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
378af75078fSIntel };
379af75078fSIntel 
380af75078fSIntel static void
381af75078fSIntel testpmd_mbuf_ctor(struct rte_mempool *mp,
382af75078fSIntel 		  void *opaque_arg,
383af75078fSIntel 		  void *raw_mbuf,
384af75078fSIntel 		  __attribute__((unused)) unsigned i)
385af75078fSIntel {
386af75078fSIntel 	struct mbuf_ctor_arg *mb_ctor_arg;
387af75078fSIntel 	struct rte_mbuf    *mb;
388af75078fSIntel 
389af75078fSIntel 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
390af75078fSIntel 	mb = (struct rte_mbuf *) raw_mbuf;
391af75078fSIntel 
392e788fe1cSIvan Boule 	mb->type         = RTE_MBUF_PKT;
393af75078fSIntel 	mb->pool         = mp;
394af75078fSIntel 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
395af75078fSIntel 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
396af75078fSIntel 			mb_ctor_arg->seg_buf_offset);
397af75078fSIntel 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
398af75078fSIntel 	mb->type         = RTE_MBUF_PKT;
399af75078fSIntel 	mb->ol_flags     = 0;
400af75078fSIntel 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
401af75078fSIntel 	mb->pkt.nb_segs  = 1;
40242d71416SIntel 	mb->pkt.vlan_macip.data = 0;
403af75078fSIntel 	mb->pkt.hash.rss = 0;
404af75078fSIntel }
405af75078fSIntel 
406af75078fSIntel static void
407af75078fSIntel testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
408af75078fSIntel 		       void *opaque_arg)
409af75078fSIntel {
410af75078fSIntel 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
411af75078fSIntel 	struct rte_pktmbuf_pool_private *mbp_priv;
412af75078fSIntel 
413af75078fSIntel 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
414af75078fSIntel 		printf("%s(%s) private_data_size %d < %d\n",
415af75078fSIntel 		       __func__, mp->name, (int) mp->private_data_size,
416af75078fSIntel 		       (int) sizeof(struct rte_pktmbuf_pool_private));
417af75078fSIntel 		return;
418af75078fSIntel 	}
419af75078fSIntel 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
420148f963fSBruce Richardson 	mbp_priv = rte_mempool_get_priv(mp);
421af75078fSIntel 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
422af75078fSIntel }
423af75078fSIntel 
424af75078fSIntel static void
425af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
426af75078fSIntel 		 unsigned int socket_id)
427af75078fSIntel {
428af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
429af75078fSIntel 	struct rte_mempool *rte_mp;
430af75078fSIntel 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
431af75078fSIntel 	struct mbuf_ctor_arg mb_ctor_arg;
432af75078fSIntel 	uint32_t mb_size;
433af75078fSIntel 
434af75078fSIntel 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
435af75078fSIntel 						mbuf_seg_size);
436af75078fSIntel 	mb_ctor_arg.seg_buf_offset =
437af75078fSIntel 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
438af75078fSIntel 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
439af75078fSIntel 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
440af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
441148f963fSBruce Richardson 
442148f963fSBruce Richardson #ifdef RTE_LIBRTE_PMD_XENVIRT
443148f963fSBruce Richardson 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
444af75078fSIntel                                    (unsigned) mb_mempool_cache,
445af75078fSIntel                                    sizeof(struct rte_pktmbuf_pool_private),
446af75078fSIntel                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
447af75078fSIntel                                    testpmd_mbuf_ctor, &mb_ctor_arg,
448af75078fSIntel                                    socket_id, 0);
449148f963fSBruce Richardson 
450148f963fSBruce Richardson 
451148f963fSBruce Richardson 
452148f963fSBruce Richardson #else
453148f963fSBruce Richardson 	if (mp_anon != 0)
454148f963fSBruce Richardson 		rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
455148f963fSBruce Richardson 				    (unsigned) mb_mempool_cache,
456148f963fSBruce Richardson 				    sizeof(struct rte_pktmbuf_pool_private),
457148f963fSBruce Richardson 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
458148f963fSBruce Richardson 				    testpmd_mbuf_ctor, &mb_ctor_arg,
459148f963fSBruce Richardson 				    socket_id, 0);
460148f963fSBruce Richardson 	else
461148f963fSBruce Richardson 		rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
462148f963fSBruce Richardson 				    (unsigned) mb_mempool_cache,
463148f963fSBruce Richardson 				    sizeof(struct rte_pktmbuf_pool_private),
464148f963fSBruce Richardson 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
465148f963fSBruce Richardson 				    testpmd_mbuf_ctor, &mb_ctor_arg,
466148f963fSBruce Richardson 				    socket_id, 0);
467148f963fSBruce Richardson 
468148f963fSBruce Richardson #endif
469148f963fSBruce Richardson 
470af75078fSIntel 	if (rte_mp == NULL) {
471ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
472ce8d5614SIntel 						"failed\n", socket_id);
473148f963fSBruce Richardson 	} else if (verbose_level > 0) {
474148f963fSBruce Richardson 		rte_mempool_dump(rte_mp);
475af75078fSIntel 	}
476af75078fSIntel }
477af75078fSIntel 
47820a0286fSLiu Xiaofeng /*
47920a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
48020a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
48120a0286fSLiu Xiaofeng  */
48220a0286fSLiu Xiaofeng static int
48320a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
48420a0286fSLiu Xiaofeng {
48520a0286fSLiu Xiaofeng 	static int warning_once = 0;
48620a0286fSLiu Xiaofeng 
48720a0286fSLiu Xiaofeng 	if (socket_id >= MAX_SOCKET) {
48820a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
48920a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
49020a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
49120a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
49220a0286fSLiu Xiaofeng 			       " --numa.\n");
49320a0286fSLiu Xiaofeng 		warning_once = 1;
49420a0286fSLiu Xiaofeng 		return -1;
49520a0286fSLiu Xiaofeng 	}
49620a0286fSLiu Xiaofeng 	return 0;
49720a0286fSLiu Xiaofeng }
49820a0286fSLiu Xiaofeng 
499af75078fSIntel static void
500af75078fSIntel init_config(void)
501af75078fSIntel {
502ce8d5614SIntel 	portid_t pid;
503af75078fSIntel 	struct rte_port *port;
504af75078fSIntel 	struct rte_mempool *mbp;
505af75078fSIntel 	unsigned int nb_mbuf_per_pool;
506af75078fSIntel 	lcoreid_t  lc_id;
507b6ea6408SIntel 	uint8_t port_per_socket[MAX_SOCKET];
508af75078fSIntel 
509b6ea6408SIntel 	memset(port_per_socket,0,MAX_SOCKET);
510af75078fSIntel 	/* Configuration of logical cores. */
511af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
512af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
513af75078fSIntel 				CACHE_LINE_SIZE);
514af75078fSIntel 	if (fwd_lcores == NULL) {
515ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
516ce8d5614SIntel 							"failed\n", nb_lcores);
517af75078fSIntel 	}
518af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
519af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
520af75078fSIntel 					       sizeof(struct fwd_lcore),
521af75078fSIntel 					       CACHE_LINE_SIZE);
522af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
523ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
524ce8d5614SIntel 								"failed\n");
525af75078fSIntel 		}
526af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
527af75078fSIntel 	}
528af75078fSIntel 
529af75078fSIntel 	/*
530af75078fSIntel 	 * Create pools of mbuf.
531af75078fSIntel 	 * If NUMA support is disabled, create a single pool of mbuf in
532b6ea6408SIntel 	 * socket 0 memory by default.
533af75078fSIntel 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
534c8798818SIntel 	 *
535c8798818SIntel 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
536c8798818SIntel 	 * nb_txd can be configured at run time.
537af75078fSIntel 	 */
538c8798818SIntel 	if (param_total_num_mbufs)
539c8798818SIntel 		nb_mbuf_per_pool = param_total_num_mbufs;
540c8798818SIntel 	else {
541c8798818SIntel 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
542c8798818SIntel 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
543b6ea6408SIntel 
544b6ea6408SIntel 		if (!numa_support)
545c8798818SIntel 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
546c8798818SIntel 	}
547af75078fSIntel 
548b6ea6408SIntel 	if (!numa_support) {
549b6ea6408SIntel 		if (socket_num == UMA_NO_CONFIG)
550b6ea6408SIntel 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
551b6ea6408SIntel 		else
552b6ea6408SIntel 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
553b6ea6408SIntel 						 socket_num);
554b6ea6408SIntel 	}
555af75078fSIntel 
556af75078fSIntel 	/* Configuration of Ethernet ports. */
557af75078fSIntel 	ports = rte_zmalloc("testpmd: ports",
558af75078fSIntel 			    sizeof(struct rte_port) * nb_ports,
559af75078fSIntel 			    CACHE_LINE_SIZE);
560af75078fSIntel 	if (ports == NULL) {
561ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
562ce8d5614SIntel 							"failed\n", nb_ports);
563af75078fSIntel 	}
564af75078fSIntel 
565ce8d5614SIntel 	for (pid = 0; pid < nb_ports; pid++) {
566ce8d5614SIntel 		port = &ports[pid];
567ce8d5614SIntel 		rte_eth_dev_info_get(pid, &port->dev_info);
568ce8d5614SIntel 
569b6ea6408SIntel 		if (numa_support) {
570b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
571b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
572b6ea6408SIntel 			else {
573b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
57420a0286fSLiu Xiaofeng 
57520a0286fSLiu Xiaofeng 				/* if socket_id is invalid, set to 0 */
57620a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
57720a0286fSLiu Xiaofeng 					socket_id = 0;
578b6ea6408SIntel 				port_per_socket[socket_id]++;
579b6ea6408SIntel 			}
580b6ea6408SIntel 		}
581b6ea6408SIntel 
582ce8d5614SIntel 		/* set flag to initialize port/queue */
583ce8d5614SIntel 		port->need_reconfig = 1;
584ce8d5614SIntel 		port->need_reconfig_queues = 1;
585ce8d5614SIntel 	}
586ce8d5614SIntel 
587b6ea6408SIntel 	if (numa_support) {
588b6ea6408SIntel 		uint8_t i;
589b6ea6408SIntel 		unsigned int nb_mbuf;
590ce8d5614SIntel 
591b6ea6408SIntel 		if (param_total_num_mbufs)
592b6ea6408SIntel 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
593b6ea6408SIntel 
594b6ea6408SIntel 		for (i = 0; i < MAX_SOCKET; i++) {
595b6ea6408SIntel 			nb_mbuf = (nb_mbuf_per_pool *
596b6ea6408SIntel 						port_per_socket[i]);
597b6ea6408SIntel 			if (nb_mbuf)
598b6ea6408SIntel 				mbuf_pool_create(mbuf_data_size,
599b6ea6408SIntel 						nb_mbuf,i);
600b6ea6408SIntel 		}
601b6ea6408SIntel 	}
602b6ea6408SIntel 	init_port_config();
6035886ae07SAdrien Mazarguil 
6045886ae07SAdrien Mazarguil 	/*
6055886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
6065886ae07SAdrien Mazarguil 	 */
6075886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
6085886ae07SAdrien Mazarguil 		mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
6095886ae07SAdrien Mazarguil 		if (mbp == NULL)
6105886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
6115886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
6125886ae07SAdrien Mazarguil 	}
6135886ae07SAdrien Mazarguil 
614ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
615ce8d5614SIntel 	if (init_fwd_streams() < 0)
616ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
617ce8d5614SIntel }
618ce8d5614SIntel 
619ce8d5614SIntel int
620ce8d5614SIntel init_fwd_streams(void)
621ce8d5614SIntel {
622ce8d5614SIntel 	portid_t pid;
623ce8d5614SIntel 	struct rte_port *port;
624ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
625ce8d5614SIntel 
626ce8d5614SIntel 	/* set socket id according to numa or not */
627ce8d5614SIntel 	for (pid = 0; pid < nb_ports; pid++) {
628ce8d5614SIntel 		port = &ports[pid];
629ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
630ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
631ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
632ce8d5614SIntel 				port->dev_info.max_rx_queues);
633ce8d5614SIntel 			return -1;
634ce8d5614SIntel 		}
635ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
636ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
637ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
638ce8d5614SIntel 				port->dev_info.max_tx_queues);
639ce8d5614SIntel 			return -1;
640ce8d5614SIntel 		}
64120a0286fSLiu Xiaofeng 		if (numa_support) {
64220a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
64320a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
64420a0286fSLiu Xiaofeng 			else {
645b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
64620a0286fSLiu Xiaofeng 
64720a0286fSLiu Xiaofeng 				/* if socket_id is invalid, set to 0 */
64820a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
64920a0286fSLiu Xiaofeng 					port->socket_id = 0;
65020a0286fSLiu Xiaofeng 			}
65120a0286fSLiu Xiaofeng 		}
652b6ea6408SIntel 		else {
653b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
654af75078fSIntel 				port->socket_id = 0;
655b6ea6408SIntel 			else
656b6ea6408SIntel 				port->socket_id = socket_num;
657b6ea6408SIntel 		}
658af75078fSIntel 	}
659af75078fSIntel 
660ce8d5614SIntel 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
661ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
662ce8d5614SIntel 		return 0;
663ce8d5614SIntel 	/* clear the old */
664ce8d5614SIntel 	if (fwd_streams != NULL) {
665ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
666ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
667ce8d5614SIntel 				continue;
668ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
669ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
670af75078fSIntel 		}
671ce8d5614SIntel 		rte_free(fwd_streams);
672ce8d5614SIntel 		fwd_streams = NULL;
673ce8d5614SIntel 	}
674ce8d5614SIntel 
675ce8d5614SIntel 	/* init new */
676ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
677ce8d5614SIntel 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
678ce8d5614SIntel 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
679ce8d5614SIntel 	if (fwd_streams == NULL)
680ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
681ce8d5614SIntel 						"failed\n", nb_fwd_streams);
682ce8d5614SIntel 
683af75078fSIntel 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
684af75078fSIntel 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
685ce8d5614SIntel 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
686ce8d5614SIntel 		if (fwd_streams[sm_id] == NULL)
687ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
688ce8d5614SIntel 								" failed\n");
689af75078fSIntel 	}
690ce8d5614SIntel 
691ce8d5614SIntel 	return 0;
692af75078fSIntel }
693af75078fSIntel 
694af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
695af75078fSIntel static void
696af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
697af75078fSIntel {
698af75078fSIntel 	unsigned int total_burst;
699af75078fSIntel 	unsigned int nb_burst;
700af75078fSIntel 	unsigned int burst_stats[3];
701af75078fSIntel 	uint16_t pktnb_stats[3];
702af75078fSIntel 	uint16_t nb_pkt;
703af75078fSIntel 	int burst_percent[3];
704af75078fSIntel 
705af75078fSIntel 	/*
706af75078fSIntel 	 * First compute the total number of packet bursts and the
707af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
708af75078fSIntel 	 */
709af75078fSIntel 	total_burst = 0;
710af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
711af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
712af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
713af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
714af75078fSIntel 		if (nb_burst == 0)
715af75078fSIntel 			continue;
716af75078fSIntel 		total_burst += nb_burst;
717af75078fSIntel 		if (nb_burst > burst_stats[0]) {
718af75078fSIntel 			burst_stats[1] = burst_stats[0];
719af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
720af75078fSIntel 			burst_stats[0] = nb_burst;
721af75078fSIntel 			pktnb_stats[0] = nb_pkt;
722af75078fSIntel 		}
723af75078fSIntel 	}
724af75078fSIntel 	if (total_burst == 0)
725af75078fSIntel 		return;
726af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
727af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
728af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
729af75078fSIntel 	if (burst_stats[0] == total_burst) {
730af75078fSIntel 		printf("]\n");
731af75078fSIntel 		return;
732af75078fSIntel 	}
733af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
734af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
735af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
736af75078fSIntel 		return;
737af75078fSIntel 	}
738af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
739af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
740af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
741af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
742af75078fSIntel 		return;
743af75078fSIntel 	}
744af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
745af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
746af75078fSIntel }
747af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
748af75078fSIntel 
749af75078fSIntel static void
750af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
751af75078fSIntel {
752af75078fSIntel 	struct rte_port *port;
753013af9b6SIntel 	uint8_t i;
754af75078fSIntel 
755af75078fSIntel 	static const char *fwd_stats_border = "----------------------";
756af75078fSIntel 
757af75078fSIntel 	port = &ports[port_id];
758af75078fSIntel 	printf("\n  %s Forward statistics for port %-2d %s\n",
759af75078fSIntel 	       fwd_stats_border, port_id, fwd_stats_border);
760013af9b6SIntel 
761013af9b6SIntel 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
762af75078fSIntel 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
763af75078fSIntel 		       "%-"PRIu64"\n",
764af75078fSIntel 		       stats->ipackets, stats->ierrors,
765af75078fSIntel 		       (uint64_t) (stats->ipackets + stats->ierrors));
766af75078fSIntel 
767af75078fSIntel 		if (cur_fwd_eng == &csum_fwd_engine)
768af75078fSIntel 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
769af75078fSIntel 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
770af75078fSIntel 
771af75078fSIntel 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
772af75078fSIntel 		       "%-"PRIu64"\n",
773af75078fSIntel 		       stats->opackets, port->tx_dropped,
774af75078fSIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
775af75078fSIntel 
776af75078fSIntel 		if (stats->rx_nombuf > 0)
777af75078fSIntel 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
778013af9b6SIntel 
779013af9b6SIntel 	}
780013af9b6SIntel 	else {
781013af9b6SIntel 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
782013af9b6SIntel 		       "%14"PRIu64"\n",
783013af9b6SIntel 		       stats->ipackets, stats->ierrors,
784013af9b6SIntel 		       (uint64_t) (stats->ipackets + stats->ierrors));
785013af9b6SIntel 
786013af9b6SIntel 		if (cur_fwd_eng == &csum_fwd_engine)
787013af9b6SIntel 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
788013af9b6SIntel 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
789013af9b6SIntel 
790013af9b6SIntel 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
791013af9b6SIntel 		       "%14"PRIu64"\n",
792013af9b6SIntel 		       stats->opackets, port->tx_dropped,
793013af9b6SIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
794013af9b6SIntel 
795013af9b6SIntel 		if (stats->rx_nombuf > 0)
796013af9b6SIntel 			printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
797013af9b6SIntel 	}
798e659b6b4SIvan Boule 
799e659b6b4SIvan Boule 	/* Display statistics of XON/XOFF pause frames, if any. */
800e659b6b4SIvan Boule 	if ((stats->tx_pause_xon  | stats->rx_pause_xon |
801e659b6b4SIvan Boule 	     stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
802e659b6b4SIvan Boule 		printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
803e659b6b4SIvan Boule 		       stats->rx_pause_xoff, stats->rx_pause_xon);
804e659b6b4SIvan Boule 		printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
805e659b6b4SIvan Boule 		       stats->tx_pause_xoff, stats->tx_pause_xon);
806e659b6b4SIvan Boule 	}
807e659b6b4SIvan Boule 
808af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
809af75078fSIntel 	if (port->rx_stream)
810013af9b6SIntel 		pkt_burst_stats_display("RX",
811013af9b6SIntel 			&port->rx_stream->rx_burst_stats);
812af75078fSIntel 	if (port->tx_stream)
813013af9b6SIntel 		pkt_burst_stats_display("TX",
814013af9b6SIntel 			&port->tx_stream->tx_burst_stats);
815af75078fSIntel #endif
816af75078fSIntel 	/* stats fdir */
817af75078fSIntel 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
818013af9b6SIntel 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
819af75078fSIntel 		       stats->fdirmiss,
820af75078fSIntel 		       stats->fdirmatch);
821af75078fSIntel 
822013af9b6SIntel 	if (port->rx_queue_stats_mapping_enabled) {
823013af9b6SIntel 		printf("\n");
824013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
825013af9b6SIntel 			printf("  Stats reg %2d RX-packets:%14"PRIu64
826013af9b6SIntel 			       "     RX-errors:%14"PRIu64
827013af9b6SIntel 			       "    RX-bytes:%14"PRIu64"\n",
828013af9b6SIntel 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
829013af9b6SIntel 		}
830013af9b6SIntel 		printf("\n");
831013af9b6SIntel 	}
832013af9b6SIntel 	if (port->tx_queue_stats_mapping_enabled) {
833013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
834013af9b6SIntel 			printf("  Stats reg %2d TX-packets:%14"PRIu64
835013af9b6SIntel 			       "                                 TX-bytes:%14"PRIu64"\n",
836013af9b6SIntel 			       i, stats->q_opackets[i], stats->q_obytes[i]);
837013af9b6SIntel 		}
838013af9b6SIntel 	}
839013af9b6SIntel 
840af75078fSIntel 	printf("  %s--------------------------------%s\n",
841af75078fSIntel 	       fwd_stats_border, fwd_stats_border);
842af75078fSIntel }
843af75078fSIntel 
844af75078fSIntel static void
845af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
846af75078fSIntel {
847af75078fSIntel 	struct fwd_stream *fs;
848af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
849af75078fSIntel 
850af75078fSIntel 	fs = fwd_streams[stream_id];
851af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
852af75078fSIntel 	    (fs->fwd_dropped == 0))
853af75078fSIntel 		return;
854af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
855af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
856af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
857af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
858af75078fSIntel 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
859af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
860af75078fSIntel 
861af75078fSIntel 	/* if checksum mode */
862af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
863013af9b6SIntel 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
864013af9b6SIntel 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
865af75078fSIntel 	}
866af75078fSIntel 
867af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
868af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
869af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
870af75078fSIntel #endif
871af75078fSIntel }
872af75078fSIntel 
873af75078fSIntel static void
8747741e4cfSIntel flush_fwd_rx_queues(void)
875af75078fSIntel {
876af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
877af75078fSIntel 	portid_t  rxp;
8787741e4cfSIntel 	portid_t port_id;
879af75078fSIntel 	queueid_t rxq;
880af75078fSIntel 	uint16_t  nb_rx;
881af75078fSIntel 	uint16_t  i;
882af75078fSIntel 	uint8_t   j;
883af75078fSIntel 
884af75078fSIntel 	for (j = 0; j < 2; j++) {
8857741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
886af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
8877741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
888af75078fSIntel 				do {
8897741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
890013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
891af75078fSIntel 					for (i = 0; i < nb_rx; i++)
892af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
893af75078fSIntel 				} while (nb_rx > 0);
894af75078fSIntel 			}
895af75078fSIntel 		}
896af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
897af75078fSIntel 	}
898af75078fSIntel }
899af75078fSIntel 
900af75078fSIntel static void
901af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
902af75078fSIntel {
903af75078fSIntel 	struct fwd_stream **fsm;
904af75078fSIntel 	streamid_t nb_fs;
905af75078fSIntel 	streamid_t sm_id;
906af75078fSIntel 
907af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
908af75078fSIntel 	nb_fs = fc->stream_nb;
909af75078fSIntel 	do {
910af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
911af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
912af75078fSIntel 	} while (! fc->stopped);
913af75078fSIntel }
914af75078fSIntel 
915af75078fSIntel static int
916af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
917af75078fSIntel {
918af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
919af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
920af75078fSIntel 	return 0;
921af75078fSIntel }
922af75078fSIntel 
923af75078fSIntel /*
924af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
925af75078fSIntel  * Used to start communication flows in network loopback test configurations.
926af75078fSIntel  */
927af75078fSIntel static int
928af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
929af75078fSIntel {
930af75078fSIntel 	struct fwd_lcore *fwd_lc;
931af75078fSIntel 	struct fwd_lcore tmp_lcore;
932af75078fSIntel 
933af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
934af75078fSIntel 	tmp_lcore = *fwd_lc;
935af75078fSIntel 	tmp_lcore.stopped = 1;
936af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
937af75078fSIntel 	return 0;
938af75078fSIntel }
939af75078fSIntel 
940af75078fSIntel /*
941af75078fSIntel  * Launch packet forwarding:
942af75078fSIntel  *     - Setup per-port forwarding context.
943af75078fSIntel  *     - launch logical cores with their forwarding configuration.
944af75078fSIntel  */
945af75078fSIntel static void
946af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
947af75078fSIntel {
948af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
949af75078fSIntel 	unsigned int i;
950af75078fSIntel 	unsigned int lc_id;
951af75078fSIntel 	int diag;
952af75078fSIntel 
953af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
954af75078fSIntel 	if (port_fwd_begin != NULL) {
955af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
956af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
957af75078fSIntel 	}
958af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
959af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
960af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
961af75078fSIntel 			fwd_lcores[i]->stopped = 0;
962af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
963af75078fSIntel 						     fwd_lcores[i], lc_id);
964af75078fSIntel 			if (diag != 0)
965af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
966af75078fSIntel 				       lc_id, diag);
967af75078fSIntel 		}
968af75078fSIntel 	}
969af75078fSIntel }
970af75078fSIntel 
971af75078fSIntel /*
972af75078fSIntel  * Launch packet forwarding configuration.
973af75078fSIntel  */
974af75078fSIntel void
975af75078fSIntel start_packet_forwarding(int with_tx_first)
976af75078fSIntel {
977af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
978af75078fSIntel 	port_fwd_end_t  port_fwd_end;
979af75078fSIntel 	struct rte_port *port;
980af75078fSIntel 	unsigned int i;
981af75078fSIntel 	portid_t   pt_id;
982af75078fSIntel 	streamid_t sm_id;
983af75078fSIntel 
984ce8d5614SIntel 	if (all_ports_started() == 0) {
985ce8d5614SIntel 		printf("Not all ports were started\n");
986ce8d5614SIntel 		return;
987ce8d5614SIntel 	}
988af75078fSIntel 	if (test_done == 0) {
989af75078fSIntel 		printf("Packet forwarding already started\n");
990af75078fSIntel 		return;
991af75078fSIntel 	}
9927741e4cfSIntel 	if(dcb_test) {
9937741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
9947741e4cfSIntel 			pt_id = fwd_ports_ids[i];
9957741e4cfSIntel 			port = &ports[pt_id];
9967741e4cfSIntel 			if (!port->dcb_flag) {
9977741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
9987741e4cfSIntel                                        "be configured in this mode.\n");
999013af9b6SIntel 				return;
1000013af9b6SIntel 			}
10017741e4cfSIntel 		}
10027741e4cfSIntel 		if (nb_fwd_lcores == 1) {
10037741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
10047741e4cfSIntel                                "should be larger than 1.\n");
10057741e4cfSIntel 			return;
10067741e4cfSIntel 		}
10077741e4cfSIntel 	}
1008af75078fSIntel 	test_done = 0;
10097741e4cfSIntel 
10107741e4cfSIntel 	if(!no_flush_rx)
10117741e4cfSIntel 		flush_fwd_rx_queues();
10127741e4cfSIntel 
1013af75078fSIntel 	fwd_config_setup();
1014af75078fSIntel 	rxtx_config_display();
1015af75078fSIntel 
1016af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1017af75078fSIntel 		pt_id = fwd_ports_ids[i];
1018af75078fSIntel 		port = &ports[pt_id];
1019af75078fSIntel 		rte_eth_stats_get(pt_id, &port->stats);
1020af75078fSIntel 		port->tx_dropped = 0;
1021013af9b6SIntel 
1022013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
1023af75078fSIntel 	}
1024af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1025af75078fSIntel 		fwd_streams[sm_id]->rx_packets = 0;
1026af75078fSIntel 		fwd_streams[sm_id]->tx_packets = 0;
1027af75078fSIntel 		fwd_streams[sm_id]->fwd_dropped = 0;
1028af75078fSIntel 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1029af75078fSIntel 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1030af75078fSIntel 
1031af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1032af75078fSIntel 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1033af75078fSIntel 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1034af75078fSIntel 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1035af75078fSIntel 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1036af75078fSIntel #endif
1037af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1038af75078fSIntel 		fwd_streams[sm_id]->core_cycles = 0;
1039af75078fSIntel #endif
1040af75078fSIntel 	}
1041af75078fSIntel 	if (with_tx_first) {
1042af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1043af75078fSIntel 		if (port_fwd_begin != NULL) {
1044af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1045af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
1046af75078fSIntel 		}
1047af75078fSIntel 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1048af75078fSIntel 		rte_eal_mp_wait_lcore();
1049af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
1050af75078fSIntel 		if (port_fwd_end != NULL) {
1051af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1052af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
1053af75078fSIntel 		}
1054af75078fSIntel 	}
1055af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
1056af75078fSIntel }
1057af75078fSIntel 
1058af75078fSIntel void
1059af75078fSIntel stop_packet_forwarding(void)
1060af75078fSIntel {
1061af75078fSIntel 	struct rte_eth_stats stats;
1062af75078fSIntel 	struct rte_port *port;
1063af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1064af75078fSIntel 	int i;
1065af75078fSIntel 	portid_t   pt_id;
1066af75078fSIntel 	streamid_t sm_id;
1067af75078fSIntel 	lcoreid_t  lc_id;
1068af75078fSIntel 	uint64_t total_recv;
1069af75078fSIntel 	uint64_t total_xmit;
1070af75078fSIntel 	uint64_t total_rx_dropped;
1071af75078fSIntel 	uint64_t total_tx_dropped;
1072af75078fSIntel 	uint64_t total_rx_nombuf;
1073af75078fSIntel 	uint64_t tx_dropped;
1074af75078fSIntel 	uint64_t rx_bad_ip_csum;
1075af75078fSIntel 	uint64_t rx_bad_l4_csum;
1076af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1077af75078fSIntel 	uint64_t fwd_cycles;
1078af75078fSIntel #endif
1079af75078fSIntel 	static const char *acc_stats_border = "+++++++++++++++";
1080af75078fSIntel 
1081ce8d5614SIntel 	if (all_ports_started() == 0) {
1082ce8d5614SIntel 		printf("Not all ports were started\n");
1083ce8d5614SIntel 		return;
1084ce8d5614SIntel 	}
1085af75078fSIntel 	if (test_done) {
1086af75078fSIntel 		printf("Packet forwarding not started\n");
1087af75078fSIntel 		return;
1088af75078fSIntel 	}
1089af75078fSIntel 	printf("Telling cores to stop...");
1090af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1091af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1092af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1093af75078fSIntel 	rte_eal_mp_wait_lcore();
1094af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1095af75078fSIntel 	if (port_fwd_end != NULL) {
1096af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1097af75078fSIntel 			pt_id = fwd_ports_ids[i];
1098af75078fSIntel 			(*port_fwd_end)(pt_id);
1099af75078fSIntel 		}
1100af75078fSIntel 	}
1101af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1102af75078fSIntel 	fwd_cycles = 0;
1103af75078fSIntel #endif
1104af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1105af75078fSIntel 		if (cur_fwd_config.nb_fwd_streams >
1106af75078fSIntel 		    cur_fwd_config.nb_fwd_ports) {
1107af75078fSIntel 			fwd_stream_stats_display(sm_id);
1108af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1109af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1110af75078fSIntel 		} else {
1111af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1112af75078fSIntel 				fwd_streams[sm_id];
1113af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1114af75078fSIntel 				fwd_streams[sm_id];
1115af75078fSIntel 		}
1116af75078fSIntel 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1117af75078fSIntel 		tx_dropped = (uint64_t) (tx_dropped +
1118af75078fSIntel 					 fwd_streams[sm_id]->fwd_dropped);
1119af75078fSIntel 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1120af75078fSIntel 
1121013af9b6SIntel 		rx_bad_ip_csum =
1122013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1123af75078fSIntel 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1124af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1125013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1126013af9b6SIntel 							rx_bad_ip_csum;
1127af75078fSIntel 
1128013af9b6SIntel 		rx_bad_l4_csum =
1129013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1130af75078fSIntel 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1131af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1132013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1133013af9b6SIntel 							rx_bad_l4_csum;
1134af75078fSIntel 
1135af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1136af75078fSIntel 		fwd_cycles = (uint64_t) (fwd_cycles +
1137af75078fSIntel 					 fwd_streams[sm_id]->core_cycles);
1138af75078fSIntel #endif
1139af75078fSIntel 	}
1140af75078fSIntel 	total_recv = 0;
1141af75078fSIntel 	total_xmit = 0;
1142af75078fSIntel 	total_rx_dropped = 0;
1143af75078fSIntel 	total_tx_dropped = 0;
1144af75078fSIntel 	total_rx_nombuf  = 0;
11457741e4cfSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1146af75078fSIntel 		pt_id = fwd_ports_ids[i];
1147af75078fSIntel 
1148af75078fSIntel 		port = &ports[pt_id];
1149af75078fSIntel 		rte_eth_stats_get(pt_id, &stats);
1150af75078fSIntel 		stats.ipackets -= port->stats.ipackets;
1151af75078fSIntel 		port->stats.ipackets = 0;
1152af75078fSIntel 		stats.opackets -= port->stats.opackets;
1153af75078fSIntel 		port->stats.opackets = 0;
1154af75078fSIntel 		stats.ibytes   -= port->stats.ibytes;
1155af75078fSIntel 		port->stats.ibytes = 0;
1156af75078fSIntel 		stats.obytes   -= port->stats.obytes;
1157af75078fSIntel 		port->stats.obytes = 0;
1158af75078fSIntel 		stats.ierrors  -= port->stats.ierrors;
1159af75078fSIntel 		port->stats.ierrors = 0;
1160af75078fSIntel 		stats.oerrors  -= port->stats.oerrors;
1161af75078fSIntel 		port->stats.oerrors = 0;
1162af75078fSIntel 		stats.rx_nombuf -= port->stats.rx_nombuf;
1163af75078fSIntel 		port->stats.rx_nombuf = 0;
1164af75078fSIntel 		stats.fdirmatch -= port->stats.fdirmatch;
1165af75078fSIntel 		port->stats.rx_nombuf = 0;
1166af75078fSIntel 		stats.fdirmiss -= port->stats.fdirmiss;
1167af75078fSIntel 		port->stats.rx_nombuf = 0;
1168af75078fSIntel 
1169af75078fSIntel 		total_recv += stats.ipackets;
1170af75078fSIntel 		total_xmit += stats.opackets;
1171af75078fSIntel 		total_rx_dropped += stats.ierrors;
1172af75078fSIntel 		total_tx_dropped += port->tx_dropped;
1173af75078fSIntel 		total_rx_nombuf  += stats.rx_nombuf;
1174af75078fSIntel 
1175af75078fSIntel 		fwd_port_stats_display(pt_id, &stats);
1176af75078fSIntel 	}
1177af75078fSIntel 	printf("\n  %s Accumulated forward statistics for all ports"
1178af75078fSIntel 	       "%s\n",
1179af75078fSIntel 	       acc_stats_border, acc_stats_border);
1180af75078fSIntel 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1181af75078fSIntel 	       "%-"PRIu64"\n"
1182af75078fSIntel 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1183af75078fSIntel 	       "%-"PRIu64"\n",
1184af75078fSIntel 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1185af75078fSIntel 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1186af75078fSIntel 	if (total_rx_nombuf > 0)
1187af75078fSIntel 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1188af75078fSIntel 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1189af75078fSIntel 	       "%s\n",
1190af75078fSIntel 	       acc_stats_border, acc_stats_border);
1191af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1192af75078fSIntel 	if (total_recv > 0)
1193af75078fSIntel 		printf("\n  CPU cycles/packet=%u (total cycles="
1194af75078fSIntel 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1195af75078fSIntel 		       (unsigned int)(fwd_cycles / total_recv),
1196af75078fSIntel 		       fwd_cycles, total_recv);
1197af75078fSIntel #endif
1198af75078fSIntel 	printf("\nDone.\n");
1199af75078fSIntel 	test_done = 1;
1200af75078fSIntel }
1201af75078fSIntel 
1202ce8d5614SIntel static int
1203ce8d5614SIntel all_ports_started(void)
1204ce8d5614SIntel {
1205ce8d5614SIntel 	portid_t pi;
1206ce8d5614SIntel 	struct rte_port *port;
1207ce8d5614SIntel 
1208ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1209ce8d5614SIntel 		port = &ports[pi];
1210ce8d5614SIntel 		/* Check if there is a port which is not started */
1211ce8d5614SIntel 		if (port->port_status != RTE_PORT_STARTED)
1212ce8d5614SIntel 			return 0;
1213ce8d5614SIntel 	}
1214ce8d5614SIntel 
1215ce8d5614SIntel 	/* No port is not started */
1216ce8d5614SIntel 	return 1;
1217ce8d5614SIntel }
1218ce8d5614SIntel 
1219148f963fSBruce Richardson int
1220ce8d5614SIntel start_port(portid_t pid)
1221ce8d5614SIntel {
1222ce8d5614SIntel 	int diag, need_check_link_status = 0;
1223ce8d5614SIntel 	portid_t pi;
1224ce8d5614SIntel 	queueid_t qi;
1225ce8d5614SIntel 	struct rte_port *port;
1226ce8d5614SIntel 
1227ce8d5614SIntel 	if (test_done == 0) {
1228ce8d5614SIntel 		printf("Please stop forwarding first\n");
1229148f963fSBruce Richardson 		return -1;
1230ce8d5614SIntel 	}
1231ce8d5614SIntel 
1232ce8d5614SIntel 	if (init_fwd_streams() < 0) {
1233ce8d5614SIntel 		printf("Fail from init_fwd_streams()\n");
1234148f963fSBruce Richardson 		return -1;
1235ce8d5614SIntel 	}
1236ce8d5614SIntel 
1237ce8d5614SIntel 	if(dcb_config)
1238ce8d5614SIntel 		dcb_test = 1;
1239ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1240ce8d5614SIntel 		if (pid < nb_ports && pid != pi)
1241ce8d5614SIntel 			continue;
1242ce8d5614SIntel 
1243ce8d5614SIntel 		port = &ports[pi];
1244ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1245ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
1246ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1247ce8d5614SIntel 			continue;
1248ce8d5614SIntel 		}
1249ce8d5614SIntel 
1250ce8d5614SIntel 		if (port->need_reconfig > 0) {
1251ce8d5614SIntel 			port->need_reconfig = 0;
1252ce8d5614SIntel 
1253*5706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
125420a0286fSLiu Xiaofeng 					port->socket_id);
1255ce8d5614SIntel 			/* configure port */
1256ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1257ce8d5614SIntel 						&(port->dev_conf));
1258ce8d5614SIntel 			if (diag != 0) {
1259ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1260ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1261ce8d5614SIntel 					printf("Port %d can not be set back "
1262ce8d5614SIntel 							"to stopped\n", pi);
1263ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
1264ce8d5614SIntel 				/* try to reconfigure port next time */
1265ce8d5614SIntel 				port->need_reconfig = 1;
1266148f963fSBruce Richardson 				return -1;
1267ce8d5614SIntel 			}
1268ce8d5614SIntel 		}
1269ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
1270ce8d5614SIntel 			port->need_reconfig_queues = 0;
1271ce8d5614SIntel 			/* setup tx queues */
1272ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
1273b6ea6408SIntel 				if ((numa_support) &&
1274b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
1275b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1276b6ea6408SIntel 						nb_txd,txring_numa[pi],
1277b6ea6408SIntel 						&(port->tx_conf));
1278b6ea6408SIntel 				else
1279b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1280b6ea6408SIntel 						nb_txd,port->socket_id,
1281b6ea6408SIntel 						&(port->tx_conf));
1282b6ea6408SIntel 
1283ce8d5614SIntel 				if (diag == 0)
1284ce8d5614SIntel 					continue;
1285ce8d5614SIntel 
1286ce8d5614SIntel 				/* Fail to setup tx queue, return */
1287ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1288ce8d5614SIntel 							RTE_PORT_HANDLING,
1289ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
1290ce8d5614SIntel 					printf("Port %d can not be set back "
1291ce8d5614SIntel 							"to stopped\n", pi);
1292ce8d5614SIntel 				printf("Fail to configure port %d tx queues\n", pi);
1293ce8d5614SIntel 				/* try to reconfigure queues next time */
1294ce8d5614SIntel 				port->need_reconfig_queues = 1;
1295148f963fSBruce Richardson 				return -1;
1296ce8d5614SIntel 			}
1297ce8d5614SIntel 			/* setup rx queues */
1298ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
1299b6ea6408SIntel 				if ((numa_support) &&
1300b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1301b6ea6408SIntel 					struct rte_mempool * mp =
1302b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
1303b6ea6408SIntel 					if (mp == NULL) {
1304b6ea6408SIntel 						printf("Failed to setup RX queue:"
1305b6ea6408SIntel 							"No mempool allocation"
1306b6ea6408SIntel 							"on the socket %d\n",
1307b6ea6408SIntel 							rxring_numa[pi]);
1308148f963fSBruce Richardson 						return -1;
1309b6ea6408SIntel 					}
1310b6ea6408SIntel 
1311b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
1312b6ea6408SIntel 					     nb_rxd,rxring_numa[pi],
1313b6ea6408SIntel 					     &(port->rx_conf),mp);
1314b6ea6408SIntel 				}
1315b6ea6408SIntel 				else
1316b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
1317b6ea6408SIntel 					     nb_rxd,port->socket_id,
1318b6ea6408SIntel 					     &(port->rx_conf),
1319ce8d5614SIntel 				             mbuf_pool_find(port->socket_id));
1320b6ea6408SIntel 
1321ce8d5614SIntel 				if (diag == 0)
1322ce8d5614SIntel 					continue;
1323ce8d5614SIntel 
1324b6ea6408SIntel 
1325ce8d5614SIntel 				/* Fail to setup rx queue, return */
1326ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1327ce8d5614SIntel 							RTE_PORT_HANDLING,
1328ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
1329ce8d5614SIntel 					printf("Port %d can not be set back "
1330ce8d5614SIntel 							"to stopped\n", pi);
1331ce8d5614SIntel 				printf("Fail to configure port %d rx queues\n", pi);
1332ce8d5614SIntel 				/* try to reconfigure queues next time */
1333ce8d5614SIntel 				port->need_reconfig_queues = 1;
1334148f963fSBruce Richardson 				return -1;
1335ce8d5614SIntel 			}
1336ce8d5614SIntel 		}
1337ce8d5614SIntel 		/* start port */
1338ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
1339ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
1340ce8d5614SIntel 
1341ce8d5614SIntel 			/* Fail to setup rx queue, return */
1342ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
1343ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1344ce8d5614SIntel 				printf("Port %d can not be set back to "
1345ce8d5614SIntel 							"stopped\n", pi);
1346ce8d5614SIntel 			continue;
1347ce8d5614SIntel 		}
1348ce8d5614SIntel 
1349ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1350ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1351ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
1352ce8d5614SIntel 
1353ce8d5614SIntel 		/* at least one port started, need checking link status */
1354ce8d5614SIntel 		need_check_link_status = 1;
1355ce8d5614SIntel 	}
1356ce8d5614SIntel 
1357ce8d5614SIntel 	if (need_check_link_status)
1358ce8d5614SIntel 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1359ce8d5614SIntel 	else
1360ce8d5614SIntel 		printf("Please stop the ports first\n");
1361ce8d5614SIntel 
1362ce8d5614SIntel 	printf("Done\n");
1363148f963fSBruce Richardson 	return 0;
1364ce8d5614SIntel }
1365ce8d5614SIntel 
1366ce8d5614SIntel void
1367ce8d5614SIntel stop_port(portid_t pid)
1368ce8d5614SIntel {
1369ce8d5614SIntel 	portid_t pi;
1370ce8d5614SIntel 	struct rte_port *port;
1371ce8d5614SIntel 	int need_check_link_status = 0;
1372ce8d5614SIntel 
1373ce8d5614SIntel 	if (test_done == 0) {
1374ce8d5614SIntel 		printf("Please stop forwarding first\n");
1375ce8d5614SIntel 		return;
1376ce8d5614SIntel 	}
1377ce8d5614SIntel 	if (dcb_test) {
1378ce8d5614SIntel 		dcb_test = 0;
1379ce8d5614SIntel 		dcb_config = 0;
1380ce8d5614SIntel 	}
1381ce8d5614SIntel 	printf("Stopping ports...\n");
1382ce8d5614SIntel 
1383ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1384ce8d5614SIntel 		if (pid < nb_ports && pid != pi)
1385ce8d5614SIntel 			continue;
1386ce8d5614SIntel 
1387ce8d5614SIntel 		port = &ports[pi];
1388ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1389ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
1390ce8d5614SIntel 			continue;
1391ce8d5614SIntel 
1392ce8d5614SIntel 		rte_eth_dev_stop(pi);
1393ce8d5614SIntel 
1394ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1395ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1396ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
1397ce8d5614SIntel 		need_check_link_status = 1;
1398ce8d5614SIntel 	}
1399ce8d5614SIntel 	if (need_check_link_status)
1400ce8d5614SIntel 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1401ce8d5614SIntel 
1402ce8d5614SIntel 	printf("Done\n");
1403ce8d5614SIntel }
1404ce8d5614SIntel 
1405ce8d5614SIntel void
1406ce8d5614SIntel close_port(portid_t pid)
1407ce8d5614SIntel {
1408ce8d5614SIntel 	portid_t pi;
1409ce8d5614SIntel 	struct rte_port *port;
1410ce8d5614SIntel 
1411ce8d5614SIntel 	if (test_done == 0) {
1412ce8d5614SIntel 		printf("Please stop forwarding first\n");
1413ce8d5614SIntel 		return;
1414ce8d5614SIntel 	}
1415ce8d5614SIntel 
1416ce8d5614SIntel 	printf("Closing ports...\n");
1417ce8d5614SIntel 
1418ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1419ce8d5614SIntel 		if (pid < nb_ports && pid != pi)
1420ce8d5614SIntel 			continue;
1421ce8d5614SIntel 
1422ce8d5614SIntel 		port = &ports[pi];
1423ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1424ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1425ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1426ce8d5614SIntel 			continue;
1427ce8d5614SIntel 		}
1428ce8d5614SIntel 
1429ce8d5614SIntel 		rte_eth_dev_close(pi);
1430ce8d5614SIntel 
1431ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1432ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1433ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
1434ce8d5614SIntel 	}
1435ce8d5614SIntel 
1436ce8d5614SIntel 	printf("Done\n");
1437ce8d5614SIntel }
1438ce8d5614SIntel 
1439ce8d5614SIntel int
1440ce8d5614SIntel all_ports_stopped(void)
1441ce8d5614SIntel {
1442ce8d5614SIntel 	portid_t pi;
1443ce8d5614SIntel 	struct rte_port *port;
1444ce8d5614SIntel 
1445ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1446ce8d5614SIntel 		port = &ports[pi];
1447ce8d5614SIntel 		if (port->port_status != RTE_PORT_STOPPED)
1448ce8d5614SIntel 			return 0;
1449ce8d5614SIntel 	}
1450ce8d5614SIntel 
1451ce8d5614SIntel 	return 1;
1452ce8d5614SIntel }
1453ce8d5614SIntel 
1454af75078fSIntel void
1455af75078fSIntel pmd_test_exit(void)
1456af75078fSIntel {
1457af75078fSIntel 	portid_t pt_id;
1458af75078fSIntel 
1459af75078fSIntel 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1460af75078fSIntel 		printf("Stopping port %d...", pt_id);
1461af75078fSIntel 		fflush(stdout);
1462af75078fSIntel 		rte_eth_dev_close(pt_id);
1463af75078fSIntel 		printf("done\n");
1464af75078fSIntel 	}
1465af75078fSIntel 	printf("bye...\n");
1466af75078fSIntel }
1467af75078fSIntel 
1468af75078fSIntel typedef void (*cmd_func_t)(void);
1469af75078fSIntel struct pmd_test_command {
1470af75078fSIntel 	const char *cmd_name;
1471af75078fSIntel 	cmd_func_t cmd_func;
1472af75078fSIntel };
1473af75078fSIntel 
1474af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1475af75078fSIntel 
1476ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
1477af75078fSIntel static void
1478ce8d5614SIntel check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1479af75078fSIntel {
1480ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
1481ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1482ce8d5614SIntel 	uint8_t portid, count, all_ports_up, print_flag = 0;
1483ce8d5614SIntel 	struct rte_eth_link link;
1484ce8d5614SIntel 
1485ce8d5614SIntel 	printf("Checking link statuses...\n");
1486ce8d5614SIntel 	fflush(stdout);
1487ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1488ce8d5614SIntel 		all_ports_up = 1;
1489ce8d5614SIntel 		for (portid = 0; portid < port_num; portid++) {
1490ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
1491ce8d5614SIntel 				continue;
1492ce8d5614SIntel 			memset(&link, 0, sizeof(link));
1493ce8d5614SIntel 			rte_eth_link_get_nowait(portid, &link);
1494ce8d5614SIntel 			/* print link status if flag set */
1495ce8d5614SIntel 			if (print_flag == 1) {
1496ce8d5614SIntel 				if (link.link_status)
1497ce8d5614SIntel 					printf("Port %d Link Up - speed %u "
1498ce8d5614SIntel 						"Mbps - %s\n", (uint8_t)portid,
1499ce8d5614SIntel 						(unsigned)link.link_speed,
1500ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1501ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
1502ce8d5614SIntel 				else
1503ce8d5614SIntel 					printf("Port %d Link Down\n",
1504ce8d5614SIntel 						(uint8_t)portid);
1505ce8d5614SIntel 				continue;
1506ce8d5614SIntel 			}
1507ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
1508ce8d5614SIntel 			if (link.link_status == 0) {
1509ce8d5614SIntel 				all_ports_up = 0;
1510ce8d5614SIntel 				break;
1511ce8d5614SIntel 			}
1512ce8d5614SIntel 		}
1513ce8d5614SIntel 		/* after finally printing all link status, get out */
1514ce8d5614SIntel 		if (print_flag == 1)
1515ce8d5614SIntel 			break;
1516ce8d5614SIntel 
1517ce8d5614SIntel 		if (all_ports_up == 0) {
1518ce8d5614SIntel 			fflush(stdout);
1519ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
1520ce8d5614SIntel 		}
1521ce8d5614SIntel 
1522ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
1523ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1524ce8d5614SIntel 			print_flag = 1;
1525ce8d5614SIntel 		}
1526ce8d5614SIntel 	}
1527af75078fSIntel }
1528af75078fSIntel 
1529013af9b6SIntel static int
1530013af9b6SIntel set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1531af75078fSIntel {
1532013af9b6SIntel 	uint16_t i;
1533af75078fSIntel 	int diag;
1534013af9b6SIntel 	uint8_t mapping_found = 0;
1535af75078fSIntel 
1536013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1537013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1538013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1539013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1540013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
1541013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
1542013af9b6SIntel 			if (diag != 0)
1543013af9b6SIntel 				return diag;
1544013af9b6SIntel 			mapping_found = 1;
1545af75078fSIntel 		}
1546013af9b6SIntel 	}
1547013af9b6SIntel 	if (mapping_found)
1548013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
1549013af9b6SIntel 	return 0;
1550013af9b6SIntel }
1551013af9b6SIntel 
1552013af9b6SIntel static int
1553013af9b6SIntel set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1554013af9b6SIntel {
1555013af9b6SIntel 	uint16_t i;
1556013af9b6SIntel 	int diag;
1557013af9b6SIntel 	uint8_t mapping_found = 0;
1558013af9b6SIntel 
1559013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1560013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1561013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1562013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1563013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
1564013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
1565013af9b6SIntel 			if (diag != 0)
1566013af9b6SIntel 				return diag;
1567013af9b6SIntel 			mapping_found = 1;
1568013af9b6SIntel 		}
1569013af9b6SIntel 	}
1570013af9b6SIntel 	if (mapping_found)
1571013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
1572013af9b6SIntel 	return 0;
1573013af9b6SIntel }
1574013af9b6SIntel 
1575013af9b6SIntel static void
1576013af9b6SIntel map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1577013af9b6SIntel {
1578013af9b6SIntel 	int diag = 0;
1579013af9b6SIntel 
1580013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1581af75078fSIntel 	if (diag != 0) {
1582013af9b6SIntel 		if (diag == -ENOTSUP) {
1583013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
1584013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1585013af9b6SIntel 		}
1586013af9b6SIntel 		else
1587013af9b6SIntel 			rte_exit(EXIT_FAILURE,
1588013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
1589013af9b6SIntel 					"failed for port id=%d diag=%d\n",
1590af75078fSIntel 					pi, diag);
1591af75078fSIntel 	}
1592013af9b6SIntel 
1593013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1594af75078fSIntel 	if (diag != 0) {
1595013af9b6SIntel 		if (diag == -ENOTSUP) {
1596013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
1597013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1598013af9b6SIntel 		}
1599013af9b6SIntel 		else
1600013af9b6SIntel 			rte_exit(EXIT_FAILURE,
1601013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
1602013af9b6SIntel 					"failed for port id=%d diag=%d\n",
1603af75078fSIntel 					pi, diag);
1604af75078fSIntel 	}
1605af75078fSIntel }
1606af75078fSIntel 
1607013af9b6SIntel void
1608013af9b6SIntel init_port_config(void)
1609013af9b6SIntel {
1610013af9b6SIntel 	portid_t pid;
1611013af9b6SIntel 	struct rte_port *port;
1612013af9b6SIntel 
1613013af9b6SIntel 	for (pid = 0; pid < nb_ports; pid++) {
1614013af9b6SIntel 		port = &ports[pid];
1615013af9b6SIntel 		port->dev_conf.rxmode = rx_mode;
1616013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
16173ce690d3SBruce Richardson 		if (nb_rxq > 1) {
1618013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1619013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1620af75078fSIntel 		} else {
1621013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1622013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1623af75078fSIntel 		}
16243ce690d3SBruce Richardson 
16253ce690d3SBruce Richardson 		/* In SR-IOV mode, RSS mode is not available */
16263ce690d3SBruce Richardson 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
16273ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
16283ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
16293ce690d3SBruce Richardson 			else
16303ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
16313ce690d3SBruce Richardson 		}
16323ce690d3SBruce Richardson 
1633013af9b6SIntel 		port->rx_conf.rx_thresh = rx_thresh;
1634013af9b6SIntel 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1635013af9b6SIntel 		port->rx_conf.rx_drop_en = rx_drop_en;
1636013af9b6SIntel 		port->tx_conf.tx_thresh = tx_thresh;
1637013af9b6SIntel 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1638013af9b6SIntel 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1639013af9b6SIntel 		port->tx_conf.txq_flags = txq_flags;
1640013af9b6SIntel 
1641013af9b6SIntel 		rte_eth_macaddr_get(pid, &port->eth_addr);
1642013af9b6SIntel 
1643013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
16447b7e5ba7SIntel #ifdef RTE_NIC_BYPASS
16457b7e5ba7SIntel 		rte_eth_dev_bypass_init(pid);
16467b7e5ba7SIntel #endif
1647013af9b6SIntel 	}
1648013af9b6SIntel }
1649013af9b6SIntel 
1650013af9b6SIntel const uint16_t vlan_tags[] = {
1651013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
1652013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
1653013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
1654013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
1655013af9b6SIntel };
1656013af9b6SIntel 
1657013af9b6SIntel static  int
1658013af9b6SIntel get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1659013af9b6SIntel {
1660013af9b6SIntel         uint8_t i;
1661af75078fSIntel 
1662af75078fSIntel  	/*
1663013af9b6SIntel  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1664013af9b6SIntel  	 * given above, and the number of traffic classes available for use.
1665af75078fSIntel  	 */
1666013af9b6SIntel 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1667013af9b6SIntel 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1668013af9b6SIntel 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1669013af9b6SIntel 
1670013af9b6SIntel 		/* VMDQ+DCB RX and TX configrations */
1671013af9b6SIntel 		vmdq_rx_conf.enable_default_pool = 0;
1672013af9b6SIntel 		vmdq_rx_conf.default_pool = 0;
1673013af9b6SIntel 		vmdq_rx_conf.nb_queue_pools =
1674013af9b6SIntel 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1675013af9b6SIntel 		vmdq_tx_conf.nb_queue_pools =
1676013af9b6SIntel 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1677013af9b6SIntel 
1678013af9b6SIntel 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1679013af9b6SIntel 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1680013af9b6SIntel 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1681013af9b6SIntel 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1682af75078fSIntel 		}
1683013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1684013af9b6SIntel 			vmdq_rx_conf.dcb_queue[i] = i;
1685013af9b6SIntel 			vmdq_tx_conf.dcb_queue[i] = i;
1686013af9b6SIntel 		}
1687013af9b6SIntel 
1688013af9b6SIntel 		/*set DCB mode of RX and TX of multiple queues*/
168932e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
169032e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1691013af9b6SIntel 		if (dcb_conf->pfc_en)
1692013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1693013af9b6SIntel 		else
1694013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1695013af9b6SIntel 
1696013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1697013af9b6SIntel                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1698013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1699013af9b6SIntel                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1700013af9b6SIntel 	}
1701013af9b6SIntel 	else {
1702013af9b6SIntel 		struct rte_eth_dcb_rx_conf rx_conf;
1703013af9b6SIntel 		struct rte_eth_dcb_tx_conf tx_conf;
1704013af9b6SIntel 
1705013af9b6SIntel 		/* queue mapping configuration of DCB RX and TX */
1706013af9b6SIntel 		if (dcb_conf->num_tcs == ETH_4_TCS)
1707013af9b6SIntel 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1708013af9b6SIntel 		else
1709013af9b6SIntel 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1710013af9b6SIntel 
1711013af9b6SIntel 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1712013af9b6SIntel 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1713013af9b6SIntel 
1714013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1715013af9b6SIntel 			rx_conf.dcb_queue[i] = i;
1716013af9b6SIntel 			tx_conf.dcb_queue[i] = i;
1717013af9b6SIntel 		}
171832e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
171932e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1720013af9b6SIntel 		if (dcb_conf->pfc_en)
1721013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1722013af9b6SIntel 		else
1723013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1724013af9b6SIntel 
1725013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1726013af9b6SIntel                                 sizeof(struct rte_eth_dcb_rx_conf)));
1727013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1728013af9b6SIntel                                 sizeof(struct rte_eth_dcb_tx_conf)));
1729013af9b6SIntel 	}
1730013af9b6SIntel 
1731013af9b6SIntel 	return 0;
1732013af9b6SIntel }
1733013af9b6SIntel 
1734013af9b6SIntel int
1735013af9b6SIntel init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1736013af9b6SIntel {
1737013af9b6SIntel 	struct rte_eth_conf port_conf;
1738013af9b6SIntel 	struct rte_port *rte_port;
1739013af9b6SIntel 	int retval;
1740013af9b6SIntel 	uint16_t nb_vlan;
1741013af9b6SIntel 	uint16_t i;
1742013af9b6SIntel 
1743013af9b6SIntel 	/* rxq and txq configuration in dcb mode */
1744013af9b6SIntel 	nb_rxq = 128;
1745013af9b6SIntel 	nb_txq = 128;
1746013af9b6SIntel 	rx_free_thresh = 64;
1747013af9b6SIntel 
1748013af9b6SIntel 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1749013af9b6SIntel 	/* Enter DCB configuration status */
1750013af9b6SIntel 	dcb_config = 1;
1751013af9b6SIntel 
1752013af9b6SIntel 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1753013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1754013af9b6SIntel 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1755013af9b6SIntel 	if (retval < 0)
1756013af9b6SIntel 		return retval;
1757013af9b6SIntel 
1758013af9b6SIntel 	rte_port = &ports[pid];
1759013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1760013af9b6SIntel 
1761013af9b6SIntel 	rte_port->rx_conf.rx_thresh = rx_thresh;
1762013af9b6SIntel 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1763013af9b6SIntel 	rte_port->tx_conf.tx_thresh = tx_thresh;
1764013af9b6SIntel 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1765013af9b6SIntel 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1766013af9b6SIntel 	/* VLAN filter */
1767013af9b6SIntel 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1768013af9b6SIntel 	for (i = 0; i < nb_vlan; i++){
1769013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
1770013af9b6SIntel 	}
1771013af9b6SIntel 
1772013af9b6SIntel 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1773013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
1774013af9b6SIntel 
17757741e4cfSIntel 	rte_port->dcb_flag = 1;
17767741e4cfSIntel 
1777013af9b6SIntel 	return 0;
1778af75078fSIntel }
1779af75078fSIntel 
1780af75078fSIntel #ifdef RTE_EXEC_ENV_BAREMETAL
1781af75078fSIntel #define main _main
1782af75078fSIntel #endif
1783af75078fSIntel 
1784af75078fSIntel int
1785af75078fSIntel main(int argc, char** argv)
1786af75078fSIntel {
1787af75078fSIntel 	int  diag;
1788013af9b6SIntel 	uint8_t port_id;
1789af75078fSIntel 
1790af75078fSIntel 	diag = rte_eal_init(argc, argv);
1791af75078fSIntel 	if (diag < 0)
1792af75078fSIntel 		rte_panic("Cannot init EAL\n");
1793af75078fSIntel 
179469d22b8eSIntel 	if (rte_pmd_init_all())
179569d22b8eSIntel 		rte_panic("Cannot init PMD\n");
1796af75078fSIntel 
1797af75078fSIntel 	if (rte_eal_pci_probe())
1798af75078fSIntel 		rte_panic("Cannot probe PCI\n");
1799af75078fSIntel 
1800af75078fSIntel 	nb_ports = (portid_t) rte_eth_dev_count();
1801af75078fSIntel 	if (nb_ports == 0)
1802013af9b6SIntel 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1803013af9b6SIntel 							"check that "
1804af75078fSIntel 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
180569d22b8eSIntel 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1806af75078fSIntel 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1807af75078fSIntel 			  "configuration file\n");
1808af75078fSIntel 
1809af75078fSIntel 	set_def_fwd_config();
1810af75078fSIntel 	if (nb_lcores == 0)
1811af75078fSIntel 		rte_panic("Empty set of forwarding logical cores - check the "
1812af75078fSIntel 			  "core mask supplied in the command parameters\n");
1813af75078fSIntel 
1814af75078fSIntel 	argc -= diag;
1815af75078fSIntel 	argv += diag;
1816af75078fSIntel 	if (argc > 1)
1817af75078fSIntel 		launch_args_parse(argc, argv);
1818af75078fSIntel 
1819af75078fSIntel 	if (nb_rxq > nb_txq)
1820af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1821af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
1822af75078fSIntel 		       nb_rxq, nb_txq);
1823af75078fSIntel 
1824af75078fSIntel 	init_config();
1825148f963fSBruce Richardson 	if (start_port(RTE_PORT_ALL) != 0)
1826148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
1827af75078fSIntel 
1828ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
1829ce8d5614SIntel 	for (port_id = 0; port_id < nb_ports; port_id++)
1830ce8d5614SIntel 		rte_eth_promiscuous_enable(port_id);
1831af75078fSIntel 
18320d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
1833af75078fSIntel 	if (interactive == 1)
1834af75078fSIntel 		prompt();
18350d56cb81SThomas Monjalon 	else
18360d56cb81SThomas Monjalon #endif
18370d56cb81SThomas Monjalon 	{
1838af75078fSIntel 		char c;
1839af75078fSIntel 		int rc;
1840af75078fSIntel 
1841af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
1842af75078fSIntel 		start_packet_forwarding(0);
1843af75078fSIntel 		printf("Press enter to exit\n");
1844af75078fSIntel 		rc = read(0, &c, 1);
1845af75078fSIntel 		if (rc < 0)
1846af75078fSIntel 			return 1;
1847af75078fSIntel 	}
1848af75078fSIntel 
1849af75078fSIntel 	return 0;
1850af75078fSIntel }
1851