xref: /dpdk/app/test-pmd/testpmd.c (revision ca7feb2273665ac7544979461f83dbde3ed69c22)
1af75078fSIntel /*-
2af75078fSIntel  *   BSD LICENSE
3af75078fSIntel  *
4e9d48c00SBruce Richardson  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5af75078fSIntel  *   All rights reserved.
6af75078fSIntel  *
7af75078fSIntel  *   Redistribution and use in source and binary forms, with or without
8af75078fSIntel  *   modification, are permitted provided that the following conditions
9af75078fSIntel  *   are met:
10af75078fSIntel  *
11af75078fSIntel  *     * Redistributions of source code must retain the above copyright
12af75078fSIntel  *       notice, this list of conditions and the following disclaimer.
13af75078fSIntel  *     * Redistributions in binary form must reproduce the above copyright
14af75078fSIntel  *       notice, this list of conditions and the following disclaimer in
15af75078fSIntel  *       the documentation and/or other materials provided with the
16af75078fSIntel  *       distribution.
17af75078fSIntel  *     * Neither the name of Intel Corporation nor the names of its
18af75078fSIntel  *       contributors may be used to endorse or promote products derived
19af75078fSIntel  *       from this software without specific prior written permission.
20af75078fSIntel  *
21af75078fSIntel  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22af75078fSIntel  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23af75078fSIntel  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24af75078fSIntel  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25af75078fSIntel  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26af75078fSIntel  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27af75078fSIntel  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28af75078fSIntel  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29af75078fSIntel  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30af75078fSIntel  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31af75078fSIntel  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32af75078fSIntel  */
33af75078fSIntel 
34af75078fSIntel #include <stdarg.h>
35af75078fSIntel #include <stdio.h>
36af75078fSIntel #include <stdlib.h>
37af75078fSIntel #include <signal.h>
38af75078fSIntel #include <string.h>
39af75078fSIntel #include <time.h>
40af75078fSIntel #include <fcntl.h>
41af75078fSIntel #include <sys/types.h>
42af75078fSIntel #include <errno.h>
43af75078fSIntel 
44af75078fSIntel #include <sys/queue.h>
45af75078fSIntel #include <sys/stat.h>
46af75078fSIntel 
47af75078fSIntel #include <stdint.h>
48af75078fSIntel #include <unistd.h>
49af75078fSIntel #include <inttypes.h>
50af75078fSIntel 
51af75078fSIntel #include <rte_common.h>
52af75078fSIntel #include <rte_byteorder.h>
53af75078fSIntel #include <rte_log.h>
54af75078fSIntel #include <rte_debug.h>
55af75078fSIntel #include <rte_cycles.h>
56af75078fSIntel #include <rte_memory.h>
57af75078fSIntel #include <rte_memcpy.h>
58af75078fSIntel #include <rte_memzone.h>
59af75078fSIntel #include <rte_launch.h>
60af75078fSIntel #include <rte_tailq.h>
61af75078fSIntel #include <rte_eal.h>
62af75078fSIntel #include <rte_per_lcore.h>
63af75078fSIntel #include <rte_lcore.h>
64af75078fSIntel #include <rte_atomic.h>
65af75078fSIntel #include <rte_branch_prediction.h>
66af75078fSIntel #include <rte_ring.h>
67af75078fSIntel #include <rte_mempool.h>
68af75078fSIntel #include <rte_malloc.h>
69af75078fSIntel #include <rte_mbuf.h>
70af75078fSIntel #include <rte_interrupts.h>
71af75078fSIntel #include <rte_pci.h>
72af75078fSIntel #include <rte_ether.h>
73af75078fSIntel #include <rte_ethdev.h>
74af75078fSIntel #include <rte_string_fns.h>
75148f963fSBruce Richardson #ifdef RTE_LIBRTE_PMD_XENVIRT
76148f963fSBruce Richardson #include <rte_eth_xenvirt.h>
77148f963fSBruce Richardson #endif
78af75078fSIntel 
79af75078fSIntel #include "testpmd.h"
80148f963fSBruce Richardson #include "mempool_osdep.h"
81af75078fSIntel 
82af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
83af75078fSIntel 
84af75078fSIntel /* use master core for command line ? */
85af75078fSIntel uint8_t interactive = 0;
86*ca7feb22SCyril Chemparathy uint8_t auto_start = 0;
87af75078fSIntel 
88af75078fSIntel /*
89af75078fSIntel  * NUMA support configuration.
90af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
91af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
93af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
94af75078fSIntel  */
95af75078fSIntel uint8_t numa_support = 0; /**< No numa support by default */
96af75078fSIntel 
97af75078fSIntel /*
98b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99b6ea6408SIntel  * not configured.
100b6ea6408SIntel  */
101b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
102b6ea6408SIntel 
103b6ea6408SIntel /*
104148f963fSBruce Richardson  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105148f963fSBruce Richardson  */
106148f963fSBruce Richardson uint8_t mp_anon = 0;
107148f963fSBruce Richardson 
108148f963fSBruce Richardson /*
109af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
110af75078fSIntel  * forwarded.
111af75078fSIntel  * Must be instanciated with the ethernet addresses of peer traffic generator
112af75078fSIntel  * ports.
113af75078fSIntel  */
114af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115af75078fSIntel portid_t nb_peer_eth_addrs = 0;
116af75078fSIntel 
117af75078fSIntel /*
118af75078fSIntel  * Probed Target Environment.
119af75078fSIntel  */
120af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
121af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
122af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124af75078fSIntel 
125af75078fSIntel /*
126af75078fSIntel  * Test Forwarding Configuration.
127af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129af75078fSIntel  */
130af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134af75078fSIntel 
135af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137af75078fSIntel 
138af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140af75078fSIntel 
141af75078fSIntel /*
142af75078fSIntel  * Forwarding engines.
143af75078fSIntel  */
144af75078fSIntel struct fwd_engine * fwd_engines[] = {
145af75078fSIntel 	&io_fwd_engine,
146af75078fSIntel 	&mac_fwd_engine,
14757e85242SBruce Richardson 	&mac_retry_fwd_engine,
148af75078fSIntel 	&rx_only_engine,
149af75078fSIntel 	&tx_only_engine,
150af75078fSIntel 	&csum_fwd_engine,
151af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
152af75078fSIntel 	&ieee1588_fwd_engine,
153af75078fSIntel #endif
154af75078fSIntel 	NULL,
155af75078fSIntel };
156af75078fSIntel 
157af75078fSIntel struct fwd_config cur_fwd_config;
158af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
159af75078fSIntel 
160af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
161c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
162c8798818SIntel                                       * specified on command-line. */
163af75078fSIntel 
164af75078fSIntel /*
165af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
166af75078fSIntel  */
167af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
168af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
169af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
170af75078fSIntel };
171af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
172af75078fSIntel 
173af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
174af75078fSIntel uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
175af75078fSIntel 
176900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
177900550deSIntel uint8_t dcb_config = 0;
178900550deSIntel 
179900550deSIntel /* Whether the dcb is in testing status */
180900550deSIntel uint8_t dcb_test = 0;
181900550deSIntel 
182900550deSIntel /* DCB on and VT on mapping is default */
183900550deSIntel enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
184af75078fSIntel 
185af75078fSIntel /*
186af75078fSIntel  * Configurable number of RX/TX queues.
187af75078fSIntel  */
188af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
189af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
190af75078fSIntel 
191af75078fSIntel /*
192af75078fSIntel  * Configurable number of RX/TX ring descriptors.
193af75078fSIntel  */
194af75078fSIntel #define RTE_TEST_RX_DESC_DEFAULT 128
195af75078fSIntel #define RTE_TEST_TX_DESC_DEFAULT 512
196af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
197af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
198af75078fSIntel 
199af75078fSIntel /*
200af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
201af75078fSIntel  */
202af75078fSIntel #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
203af75078fSIntel #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
204af75078fSIntel #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
205af75078fSIntel 
206af75078fSIntel #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
207af75078fSIntel #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
208af75078fSIntel #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
209af75078fSIntel 
210af75078fSIntel struct rte_eth_thresh rx_thresh = {
211af75078fSIntel 	.pthresh = RX_PTHRESH,
212af75078fSIntel 	.hthresh = RX_HTHRESH,
213af75078fSIntel 	.wthresh = RX_WTHRESH,
214af75078fSIntel };
215af75078fSIntel 
216af75078fSIntel struct rte_eth_thresh tx_thresh = {
217af75078fSIntel 	.pthresh = TX_PTHRESH,
218af75078fSIntel 	.hthresh = TX_HTHRESH,
219af75078fSIntel 	.wthresh = TX_WTHRESH,
220af75078fSIntel };
221af75078fSIntel 
222af75078fSIntel /*
223af75078fSIntel  * Configurable value of RX free threshold.
224af75078fSIntel  */
225af75078fSIntel uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
226af75078fSIntel 
227af75078fSIntel /*
228ce8d5614SIntel  * Configurable value of RX drop enable.
229ce8d5614SIntel  */
230ce8d5614SIntel uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
231ce8d5614SIntel 
232ce8d5614SIntel /*
233af75078fSIntel  * Configurable value of TX free threshold.
234af75078fSIntel  */
235af75078fSIntel uint16_t tx_free_thresh = 0; /* Use default values. */
236af75078fSIntel 
237af75078fSIntel /*
238af75078fSIntel  * Configurable value of TX RS bit threshold.
239af75078fSIntel  */
240af75078fSIntel uint16_t tx_rs_thresh = 0; /* Use default values. */
241af75078fSIntel 
242af75078fSIntel /*
243ce8d5614SIntel  * Configurable value of TX queue flags.
244ce8d5614SIntel  */
245ce8d5614SIntel uint32_t txq_flags = 0; /* No flags set. */
246ce8d5614SIntel 
247ce8d5614SIntel /*
248af75078fSIntel  * Receive Side Scaling (RSS) configuration.
249af75078fSIntel  */
250af75078fSIntel uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
251af75078fSIntel 
252af75078fSIntel /*
253af75078fSIntel  * Port topology configuration
254af75078fSIntel  */
255af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
256af75078fSIntel 
2577741e4cfSIntel /*
2587741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
2597741e4cfSIntel  */
2607741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
2617741e4cfSIntel 
262af75078fSIntel /*
2637b7e5ba7SIntel  * NIC bypass mode configuration options.
2647b7e5ba7SIntel  */
2657b7e5ba7SIntel #ifdef RTE_NIC_BYPASS
2667b7e5ba7SIntel 
2677b7e5ba7SIntel /* The NIC bypass watchdog timeout. */
2687b7e5ba7SIntel uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
2697b7e5ba7SIntel 
2707b7e5ba7SIntel #endif
2717b7e5ba7SIntel 
2727b7e5ba7SIntel /*
273af75078fSIntel  * Ethernet device configuration.
274af75078fSIntel  */
275af75078fSIntel struct rte_eth_rxmode rx_mode = {
276af75078fSIntel 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
277af75078fSIntel 	.split_hdr_size = 0,
278af75078fSIntel 	.header_split   = 0, /**< Header Split disabled. */
279af75078fSIntel 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
280af75078fSIntel 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
281a47aa8b9SIntel 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
282a47aa8b9SIntel 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
283af75078fSIntel 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
284af75078fSIntel 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
285af75078fSIntel };
286af75078fSIntel 
287af75078fSIntel struct rte_fdir_conf fdir_conf = {
288af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
289af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
290af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
291af75078fSIntel 	.flexbytes_offset = 0x6,
292af75078fSIntel 	.drop_queue = 127,
293af75078fSIntel };
294af75078fSIntel 
295af75078fSIntel static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
296af75078fSIntel 
297ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
298ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
299ed30d9b6SIntel 
300ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
301ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
302ed30d9b6SIntel 
303ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
304ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
305ed30d9b6SIntel 
306ed30d9b6SIntel /* Forward function declarations */
307ed30d9b6SIntel static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
308ce8d5614SIntel static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
309ce8d5614SIntel 
310ce8d5614SIntel /*
311ce8d5614SIntel  * Check if all the ports are started.
312ce8d5614SIntel  * If yes, return positive value. If not, return zero.
313ce8d5614SIntel  */
314ce8d5614SIntel static int all_ports_started(void);
315ed30d9b6SIntel 
316af75078fSIntel /*
317af75078fSIntel  * Setup default configuration.
318af75078fSIntel  */
319af75078fSIntel static void
320af75078fSIntel set_default_fwd_lcores_config(void)
321af75078fSIntel {
322af75078fSIntel 	unsigned int i;
323af75078fSIntel 	unsigned int nb_lc;
324af75078fSIntel 
325af75078fSIntel 	nb_lc = 0;
326af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
327af75078fSIntel 		if (! rte_lcore_is_enabled(i))
328af75078fSIntel 			continue;
329af75078fSIntel 		if (i == rte_get_master_lcore())
330af75078fSIntel 			continue;
331af75078fSIntel 		fwd_lcores_cpuids[nb_lc++] = i;
332af75078fSIntel 	}
333af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
334af75078fSIntel 	nb_cfg_lcores = nb_lcores;
335af75078fSIntel 	nb_fwd_lcores = 1;
336af75078fSIntel }
337af75078fSIntel 
338af75078fSIntel static void
339af75078fSIntel set_def_peer_eth_addrs(void)
340af75078fSIntel {
341af75078fSIntel 	portid_t i;
342af75078fSIntel 
343af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
344af75078fSIntel 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
345af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
346af75078fSIntel 	}
347af75078fSIntel }
348af75078fSIntel 
349af75078fSIntel static void
350af75078fSIntel set_default_fwd_ports_config(void)
351af75078fSIntel {
352af75078fSIntel 	portid_t pt_id;
353af75078fSIntel 
354af75078fSIntel 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
355af75078fSIntel 		fwd_ports_ids[pt_id] = pt_id;
356af75078fSIntel 
357af75078fSIntel 	nb_cfg_ports = nb_ports;
358af75078fSIntel 	nb_fwd_ports = nb_ports;
359af75078fSIntel }
360af75078fSIntel 
361af75078fSIntel void
362af75078fSIntel set_def_fwd_config(void)
363af75078fSIntel {
364af75078fSIntel 	set_default_fwd_lcores_config();
365af75078fSIntel 	set_def_peer_eth_addrs();
366af75078fSIntel 	set_default_fwd_ports_config();
367af75078fSIntel }
368af75078fSIntel 
369af75078fSIntel /*
370af75078fSIntel  * Configuration initialisation done once at init time.
371af75078fSIntel  */
372af75078fSIntel struct mbuf_ctor_arg {
373af75078fSIntel 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
374af75078fSIntel 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
375af75078fSIntel };
376af75078fSIntel 
377af75078fSIntel struct mbuf_pool_ctor_arg {
378af75078fSIntel 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
379af75078fSIntel };
380af75078fSIntel 
381af75078fSIntel static void
382af75078fSIntel testpmd_mbuf_ctor(struct rte_mempool *mp,
383af75078fSIntel 		  void *opaque_arg,
384af75078fSIntel 		  void *raw_mbuf,
385af75078fSIntel 		  __attribute__((unused)) unsigned i)
386af75078fSIntel {
387af75078fSIntel 	struct mbuf_ctor_arg *mb_ctor_arg;
388af75078fSIntel 	struct rte_mbuf    *mb;
389af75078fSIntel 
390af75078fSIntel 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
391af75078fSIntel 	mb = (struct rte_mbuf *) raw_mbuf;
392af75078fSIntel 
393e788fe1cSIvan Boule 	mb->type         = RTE_MBUF_PKT;
394af75078fSIntel 	mb->pool         = mp;
395af75078fSIntel 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
396af75078fSIntel 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
397af75078fSIntel 			mb_ctor_arg->seg_buf_offset);
398af75078fSIntel 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
399af75078fSIntel 	mb->type         = RTE_MBUF_PKT;
400af75078fSIntel 	mb->ol_flags     = 0;
401af75078fSIntel 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
402af75078fSIntel 	mb->pkt.nb_segs  = 1;
40342d71416SIntel 	mb->pkt.vlan_macip.data = 0;
404af75078fSIntel 	mb->pkt.hash.rss = 0;
405af75078fSIntel }
406af75078fSIntel 
407af75078fSIntel static void
408af75078fSIntel testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
409af75078fSIntel 		       void *opaque_arg)
410af75078fSIntel {
411af75078fSIntel 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
412af75078fSIntel 	struct rte_pktmbuf_pool_private *mbp_priv;
413af75078fSIntel 
414af75078fSIntel 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
415af75078fSIntel 		printf("%s(%s) private_data_size %d < %d\n",
416af75078fSIntel 		       __func__, mp->name, (int) mp->private_data_size,
417af75078fSIntel 		       (int) sizeof(struct rte_pktmbuf_pool_private));
418af75078fSIntel 		return;
419af75078fSIntel 	}
420af75078fSIntel 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
421148f963fSBruce Richardson 	mbp_priv = rte_mempool_get_priv(mp);
422af75078fSIntel 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
423af75078fSIntel }
424af75078fSIntel 
425af75078fSIntel static void
426af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
427af75078fSIntel 		 unsigned int socket_id)
428af75078fSIntel {
429af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
430af75078fSIntel 	struct rte_mempool *rte_mp;
431af75078fSIntel 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
432af75078fSIntel 	struct mbuf_ctor_arg mb_ctor_arg;
433af75078fSIntel 	uint32_t mb_size;
434af75078fSIntel 
435af75078fSIntel 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
436af75078fSIntel 						mbuf_seg_size);
437af75078fSIntel 	mb_ctor_arg.seg_buf_offset =
438af75078fSIntel 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
439af75078fSIntel 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
440af75078fSIntel 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
441af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
442148f963fSBruce Richardson 
443148f963fSBruce Richardson #ifdef RTE_LIBRTE_PMD_XENVIRT
444148f963fSBruce Richardson 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
445af75078fSIntel                                    (unsigned) mb_mempool_cache,
446af75078fSIntel                                    sizeof(struct rte_pktmbuf_pool_private),
447af75078fSIntel                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
448af75078fSIntel                                    testpmd_mbuf_ctor, &mb_ctor_arg,
449af75078fSIntel                                    socket_id, 0);
450148f963fSBruce Richardson 
451148f963fSBruce Richardson 
452148f963fSBruce Richardson 
453148f963fSBruce Richardson #else
454148f963fSBruce Richardson 	if (mp_anon != 0)
455148f963fSBruce Richardson 		rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
456148f963fSBruce Richardson 				    (unsigned) mb_mempool_cache,
457148f963fSBruce Richardson 				    sizeof(struct rte_pktmbuf_pool_private),
458148f963fSBruce Richardson 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
459148f963fSBruce Richardson 				    testpmd_mbuf_ctor, &mb_ctor_arg,
460148f963fSBruce Richardson 				    socket_id, 0);
461148f963fSBruce Richardson 	else
462148f963fSBruce Richardson 		rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
463148f963fSBruce Richardson 				    (unsigned) mb_mempool_cache,
464148f963fSBruce Richardson 				    sizeof(struct rte_pktmbuf_pool_private),
465148f963fSBruce Richardson 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
466148f963fSBruce Richardson 				    testpmd_mbuf_ctor, &mb_ctor_arg,
467148f963fSBruce Richardson 				    socket_id, 0);
468148f963fSBruce Richardson 
469148f963fSBruce Richardson #endif
470148f963fSBruce Richardson 
471af75078fSIntel 	if (rte_mp == NULL) {
472ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
473ce8d5614SIntel 						"failed\n", socket_id);
474148f963fSBruce Richardson 	} else if (verbose_level > 0) {
475148f963fSBruce Richardson 		rte_mempool_dump(rte_mp);
476af75078fSIntel 	}
477af75078fSIntel }
478af75078fSIntel 
47920a0286fSLiu Xiaofeng /*
48020a0286fSLiu Xiaofeng  * Check given socket id is valid or not with NUMA mode,
48120a0286fSLiu Xiaofeng  * if valid, return 0, else return -1
48220a0286fSLiu Xiaofeng  */
48320a0286fSLiu Xiaofeng static int
48420a0286fSLiu Xiaofeng check_socket_id(const unsigned int socket_id)
48520a0286fSLiu Xiaofeng {
48620a0286fSLiu Xiaofeng 	static int warning_once = 0;
48720a0286fSLiu Xiaofeng 
48820a0286fSLiu Xiaofeng 	if (socket_id >= MAX_SOCKET) {
48920a0286fSLiu Xiaofeng 		if (!warning_once && numa_support)
49020a0286fSLiu Xiaofeng 			printf("Warning: NUMA should be configured manually by"
49120a0286fSLiu Xiaofeng 			       " using --port-numa-config and"
49220a0286fSLiu Xiaofeng 			       " --ring-numa-config parameters along with"
49320a0286fSLiu Xiaofeng 			       " --numa.\n");
49420a0286fSLiu Xiaofeng 		warning_once = 1;
49520a0286fSLiu Xiaofeng 		return -1;
49620a0286fSLiu Xiaofeng 	}
49720a0286fSLiu Xiaofeng 	return 0;
49820a0286fSLiu Xiaofeng }
49920a0286fSLiu Xiaofeng 
500af75078fSIntel static void
501af75078fSIntel init_config(void)
502af75078fSIntel {
503ce8d5614SIntel 	portid_t pid;
504af75078fSIntel 	struct rte_port *port;
505af75078fSIntel 	struct rte_mempool *mbp;
506af75078fSIntel 	unsigned int nb_mbuf_per_pool;
507af75078fSIntel 	lcoreid_t  lc_id;
508b6ea6408SIntel 	uint8_t port_per_socket[MAX_SOCKET];
509af75078fSIntel 
510b6ea6408SIntel 	memset(port_per_socket,0,MAX_SOCKET);
511af75078fSIntel 	/* Configuration of logical cores. */
512af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
513af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
514af75078fSIntel 				CACHE_LINE_SIZE);
515af75078fSIntel 	if (fwd_lcores == NULL) {
516ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
517ce8d5614SIntel 							"failed\n", nb_lcores);
518af75078fSIntel 	}
519af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
520af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
521af75078fSIntel 					       sizeof(struct fwd_lcore),
522af75078fSIntel 					       CACHE_LINE_SIZE);
523af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
524ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
525ce8d5614SIntel 								"failed\n");
526af75078fSIntel 		}
527af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
528af75078fSIntel 	}
529af75078fSIntel 
530af75078fSIntel 	/*
531af75078fSIntel 	 * Create pools of mbuf.
532af75078fSIntel 	 * If NUMA support is disabled, create a single pool of mbuf in
533b6ea6408SIntel 	 * socket 0 memory by default.
534af75078fSIntel 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
535c8798818SIntel 	 *
536c8798818SIntel 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
537c8798818SIntel 	 * nb_txd can be configured at run time.
538af75078fSIntel 	 */
539c8798818SIntel 	if (param_total_num_mbufs)
540c8798818SIntel 		nb_mbuf_per_pool = param_total_num_mbufs;
541c8798818SIntel 	else {
542c8798818SIntel 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
543c8798818SIntel 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
544b6ea6408SIntel 
545b6ea6408SIntel 		if (!numa_support)
546c8798818SIntel 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
547c8798818SIntel 	}
548af75078fSIntel 
549b6ea6408SIntel 	if (!numa_support) {
550b6ea6408SIntel 		if (socket_num == UMA_NO_CONFIG)
551b6ea6408SIntel 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
552b6ea6408SIntel 		else
553b6ea6408SIntel 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
554b6ea6408SIntel 						 socket_num);
555b6ea6408SIntel 	}
556af75078fSIntel 
557af75078fSIntel 	/* Configuration of Ethernet ports. */
558af75078fSIntel 	ports = rte_zmalloc("testpmd: ports",
559af75078fSIntel 			    sizeof(struct rte_port) * nb_ports,
560af75078fSIntel 			    CACHE_LINE_SIZE);
561af75078fSIntel 	if (ports == NULL) {
562ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
563ce8d5614SIntel 							"failed\n", nb_ports);
564af75078fSIntel 	}
565af75078fSIntel 
566ce8d5614SIntel 	for (pid = 0; pid < nb_ports; pid++) {
567ce8d5614SIntel 		port = &ports[pid];
568ce8d5614SIntel 		rte_eth_dev_info_get(pid, &port->dev_info);
569ce8d5614SIntel 
570b6ea6408SIntel 		if (numa_support) {
571b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
572b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
573b6ea6408SIntel 			else {
574b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
57520a0286fSLiu Xiaofeng 
57620a0286fSLiu Xiaofeng 				/* if socket_id is invalid, set to 0 */
57720a0286fSLiu Xiaofeng 				if (check_socket_id(socket_id) < 0)
57820a0286fSLiu Xiaofeng 					socket_id = 0;
579b6ea6408SIntel 				port_per_socket[socket_id]++;
580b6ea6408SIntel 			}
581b6ea6408SIntel 		}
582b6ea6408SIntel 
583ce8d5614SIntel 		/* set flag to initialize port/queue */
584ce8d5614SIntel 		port->need_reconfig = 1;
585ce8d5614SIntel 		port->need_reconfig_queues = 1;
586ce8d5614SIntel 	}
587ce8d5614SIntel 
588b6ea6408SIntel 	if (numa_support) {
589b6ea6408SIntel 		uint8_t i;
590b6ea6408SIntel 		unsigned int nb_mbuf;
591ce8d5614SIntel 
592b6ea6408SIntel 		if (param_total_num_mbufs)
593b6ea6408SIntel 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
594b6ea6408SIntel 
595b6ea6408SIntel 		for (i = 0; i < MAX_SOCKET; i++) {
596b6ea6408SIntel 			nb_mbuf = (nb_mbuf_per_pool *
597b6ea6408SIntel 						port_per_socket[i]);
598b6ea6408SIntel 			if (nb_mbuf)
599b6ea6408SIntel 				mbuf_pool_create(mbuf_data_size,
600b6ea6408SIntel 						nb_mbuf,i);
601b6ea6408SIntel 		}
602b6ea6408SIntel 	}
603b6ea6408SIntel 	init_port_config();
6045886ae07SAdrien Mazarguil 
6055886ae07SAdrien Mazarguil 	/*
6065886ae07SAdrien Mazarguil 	 * Records which Mbuf pool to use by each logical core, if needed.
6075886ae07SAdrien Mazarguil 	 */
6085886ae07SAdrien Mazarguil 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
6095886ae07SAdrien Mazarguil 		mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
6105886ae07SAdrien Mazarguil 		if (mbp == NULL)
6115886ae07SAdrien Mazarguil 			mbp = mbuf_pool_find(0);
6125886ae07SAdrien Mazarguil 		fwd_lcores[lc_id]->mbp = mbp;
6135886ae07SAdrien Mazarguil 	}
6145886ae07SAdrien Mazarguil 
615ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
616ce8d5614SIntel 	if (init_fwd_streams() < 0)
617ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
618ce8d5614SIntel }
619ce8d5614SIntel 
620ce8d5614SIntel int
621ce8d5614SIntel init_fwd_streams(void)
622ce8d5614SIntel {
623ce8d5614SIntel 	portid_t pid;
624ce8d5614SIntel 	struct rte_port *port;
625ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
626ce8d5614SIntel 
627ce8d5614SIntel 	/* set socket id according to numa or not */
628ce8d5614SIntel 	for (pid = 0; pid < nb_ports; pid++) {
629ce8d5614SIntel 		port = &ports[pid];
630ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
631ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
632ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
633ce8d5614SIntel 				port->dev_info.max_rx_queues);
634ce8d5614SIntel 			return -1;
635ce8d5614SIntel 		}
636ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
637ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
638ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
639ce8d5614SIntel 				port->dev_info.max_tx_queues);
640ce8d5614SIntel 			return -1;
641ce8d5614SIntel 		}
64220a0286fSLiu Xiaofeng 		if (numa_support) {
64320a0286fSLiu Xiaofeng 			if (port_numa[pid] != NUMA_NO_CONFIG)
64420a0286fSLiu Xiaofeng 				port->socket_id = port_numa[pid];
64520a0286fSLiu Xiaofeng 			else {
646b6ea6408SIntel 				port->socket_id = rte_eth_dev_socket_id(pid);
64720a0286fSLiu Xiaofeng 
64820a0286fSLiu Xiaofeng 				/* if socket_id is invalid, set to 0 */
64920a0286fSLiu Xiaofeng 				if (check_socket_id(port->socket_id) < 0)
65020a0286fSLiu Xiaofeng 					port->socket_id = 0;
65120a0286fSLiu Xiaofeng 			}
65220a0286fSLiu Xiaofeng 		}
653b6ea6408SIntel 		else {
654b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
655af75078fSIntel 				port->socket_id = 0;
656b6ea6408SIntel 			else
657b6ea6408SIntel 				port->socket_id = socket_num;
658b6ea6408SIntel 		}
659af75078fSIntel 	}
660af75078fSIntel 
661ce8d5614SIntel 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
662ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
663ce8d5614SIntel 		return 0;
664ce8d5614SIntel 	/* clear the old */
665ce8d5614SIntel 	if (fwd_streams != NULL) {
666ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
667ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
668ce8d5614SIntel 				continue;
669ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
670ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
671af75078fSIntel 		}
672ce8d5614SIntel 		rte_free(fwd_streams);
673ce8d5614SIntel 		fwd_streams = NULL;
674ce8d5614SIntel 	}
675ce8d5614SIntel 
676ce8d5614SIntel 	/* init new */
677ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
678ce8d5614SIntel 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
679ce8d5614SIntel 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
680ce8d5614SIntel 	if (fwd_streams == NULL)
681ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
682ce8d5614SIntel 						"failed\n", nb_fwd_streams);
683ce8d5614SIntel 
684af75078fSIntel 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
685af75078fSIntel 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
686ce8d5614SIntel 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
687ce8d5614SIntel 		if (fwd_streams[sm_id] == NULL)
688ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
689ce8d5614SIntel 								" failed\n");
690af75078fSIntel 	}
691ce8d5614SIntel 
692ce8d5614SIntel 	return 0;
693af75078fSIntel }
694af75078fSIntel 
695af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
696af75078fSIntel static void
697af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
698af75078fSIntel {
699af75078fSIntel 	unsigned int total_burst;
700af75078fSIntel 	unsigned int nb_burst;
701af75078fSIntel 	unsigned int burst_stats[3];
702af75078fSIntel 	uint16_t pktnb_stats[3];
703af75078fSIntel 	uint16_t nb_pkt;
704af75078fSIntel 	int burst_percent[3];
705af75078fSIntel 
706af75078fSIntel 	/*
707af75078fSIntel 	 * First compute the total number of packet bursts and the
708af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
709af75078fSIntel 	 */
710af75078fSIntel 	total_burst = 0;
711af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
712af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
713af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
714af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
715af75078fSIntel 		if (nb_burst == 0)
716af75078fSIntel 			continue;
717af75078fSIntel 		total_burst += nb_burst;
718af75078fSIntel 		if (nb_burst > burst_stats[0]) {
719af75078fSIntel 			burst_stats[1] = burst_stats[0];
720af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
721af75078fSIntel 			burst_stats[0] = nb_burst;
722af75078fSIntel 			pktnb_stats[0] = nb_pkt;
723af75078fSIntel 		}
724af75078fSIntel 	}
725af75078fSIntel 	if (total_burst == 0)
726af75078fSIntel 		return;
727af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
728af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
729af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
730af75078fSIntel 	if (burst_stats[0] == total_burst) {
731af75078fSIntel 		printf("]\n");
732af75078fSIntel 		return;
733af75078fSIntel 	}
734af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
735af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
736af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
737af75078fSIntel 		return;
738af75078fSIntel 	}
739af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
740af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
741af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
742af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
743af75078fSIntel 		return;
744af75078fSIntel 	}
745af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
746af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
747af75078fSIntel }
748af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
749af75078fSIntel 
750af75078fSIntel static void
751af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
752af75078fSIntel {
753af75078fSIntel 	struct rte_port *port;
754013af9b6SIntel 	uint8_t i;
755af75078fSIntel 
756af75078fSIntel 	static const char *fwd_stats_border = "----------------------";
757af75078fSIntel 
758af75078fSIntel 	port = &ports[port_id];
759af75078fSIntel 	printf("\n  %s Forward statistics for port %-2d %s\n",
760af75078fSIntel 	       fwd_stats_border, port_id, fwd_stats_border);
761013af9b6SIntel 
762013af9b6SIntel 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
763af75078fSIntel 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
764af75078fSIntel 		       "%-"PRIu64"\n",
765af75078fSIntel 		       stats->ipackets, stats->ierrors,
766af75078fSIntel 		       (uint64_t) (stats->ipackets + stats->ierrors));
767af75078fSIntel 
768af75078fSIntel 		if (cur_fwd_eng == &csum_fwd_engine)
769af75078fSIntel 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
770af75078fSIntel 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
771af75078fSIntel 
772af75078fSIntel 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
773af75078fSIntel 		       "%-"PRIu64"\n",
774af75078fSIntel 		       stats->opackets, port->tx_dropped,
775af75078fSIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
776af75078fSIntel 
777af75078fSIntel 		if (stats->rx_nombuf > 0)
778af75078fSIntel 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
779013af9b6SIntel 
780013af9b6SIntel 	}
781013af9b6SIntel 	else {
782013af9b6SIntel 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
783013af9b6SIntel 		       "%14"PRIu64"\n",
784013af9b6SIntel 		       stats->ipackets, stats->ierrors,
785013af9b6SIntel 		       (uint64_t) (stats->ipackets + stats->ierrors));
786013af9b6SIntel 
787013af9b6SIntel 		if (cur_fwd_eng == &csum_fwd_engine)
788013af9b6SIntel 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
789013af9b6SIntel 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
790013af9b6SIntel 
791013af9b6SIntel 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
792013af9b6SIntel 		       "%14"PRIu64"\n",
793013af9b6SIntel 		       stats->opackets, port->tx_dropped,
794013af9b6SIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
795013af9b6SIntel 
796013af9b6SIntel 		if (stats->rx_nombuf > 0)
797013af9b6SIntel 			printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
798013af9b6SIntel 	}
799e659b6b4SIvan Boule 
800e659b6b4SIvan Boule 	/* Display statistics of XON/XOFF pause frames, if any. */
801e659b6b4SIvan Boule 	if ((stats->tx_pause_xon  | stats->rx_pause_xon |
802e659b6b4SIvan Boule 	     stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
803e659b6b4SIvan Boule 		printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
804e659b6b4SIvan Boule 		       stats->rx_pause_xoff, stats->rx_pause_xon);
805e659b6b4SIvan Boule 		printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
806e659b6b4SIvan Boule 		       stats->tx_pause_xoff, stats->tx_pause_xon);
807e659b6b4SIvan Boule 	}
808e659b6b4SIvan Boule 
809af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
810af75078fSIntel 	if (port->rx_stream)
811013af9b6SIntel 		pkt_burst_stats_display("RX",
812013af9b6SIntel 			&port->rx_stream->rx_burst_stats);
813af75078fSIntel 	if (port->tx_stream)
814013af9b6SIntel 		pkt_burst_stats_display("TX",
815013af9b6SIntel 			&port->tx_stream->tx_burst_stats);
816af75078fSIntel #endif
817af75078fSIntel 	/* stats fdir */
818af75078fSIntel 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
819013af9b6SIntel 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
820af75078fSIntel 		       stats->fdirmiss,
821af75078fSIntel 		       stats->fdirmatch);
822af75078fSIntel 
823013af9b6SIntel 	if (port->rx_queue_stats_mapping_enabled) {
824013af9b6SIntel 		printf("\n");
825013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
826013af9b6SIntel 			printf("  Stats reg %2d RX-packets:%14"PRIu64
827013af9b6SIntel 			       "     RX-errors:%14"PRIu64
828013af9b6SIntel 			       "    RX-bytes:%14"PRIu64"\n",
829013af9b6SIntel 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
830013af9b6SIntel 		}
831013af9b6SIntel 		printf("\n");
832013af9b6SIntel 	}
833013af9b6SIntel 	if (port->tx_queue_stats_mapping_enabled) {
834013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
835013af9b6SIntel 			printf("  Stats reg %2d TX-packets:%14"PRIu64
836013af9b6SIntel 			       "                                 TX-bytes:%14"PRIu64"\n",
837013af9b6SIntel 			       i, stats->q_opackets[i], stats->q_obytes[i]);
838013af9b6SIntel 		}
839013af9b6SIntel 	}
840013af9b6SIntel 
841af75078fSIntel 	printf("  %s--------------------------------%s\n",
842af75078fSIntel 	       fwd_stats_border, fwd_stats_border);
843af75078fSIntel }
844af75078fSIntel 
845af75078fSIntel static void
846af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
847af75078fSIntel {
848af75078fSIntel 	struct fwd_stream *fs;
849af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
850af75078fSIntel 
851af75078fSIntel 	fs = fwd_streams[stream_id];
852af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
853af75078fSIntel 	    (fs->fwd_dropped == 0))
854af75078fSIntel 		return;
855af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
856af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
857af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
858af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
859af75078fSIntel 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
860af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
861af75078fSIntel 
862af75078fSIntel 	/* if checksum mode */
863af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
864013af9b6SIntel 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
865013af9b6SIntel 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
866af75078fSIntel 	}
867af75078fSIntel 
868af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
869af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
870af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
871af75078fSIntel #endif
872af75078fSIntel }
873af75078fSIntel 
874af75078fSIntel static void
8757741e4cfSIntel flush_fwd_rx_queues(void)
876af75078fSIntel {
877af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
878af75078fSIntel 	portid_t  rxp;
8797741e4cfSIntel 	portid_t port_id;
880af75078fSIntel 	queueid_t rxq;
881af75078fSIntel 	uint16_t  nb_rx;
882af75078fSIntel 	uint16_t  i;
883af75078fSIntel 	uint8_t   j;
884af75078fSIntel 
885af75078fSIntel 	for (j = 0; j < 2; j++) {
8867741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
887af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
8887741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
889af75078fSIntel 				do {
8907741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
891013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
892af75078fSIntel 					for (i = 0; i < nb_rx; i++)
893af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
894af75078fSIntel 				} while (nb_rx > 0);
895af75078fSIntel 			}
896af75078fSIntel 		}
897af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
898af75078fSIntel 	}
899af75078fSIntel }
900af75078fSIntel 
901af75078fSIntel static void
902af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
903af75078fSIntel {
904af75078fSIntel 	struct fwd_stream **fsm;
905af75078fSIntel 	streamid_t nb_fs;
906af75078fSIntel 	streamid_t sm_id;
907af75078fSIntel 
908af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
909af75078fSIntel 	nb_fs = fc->stream_nb;
910af75078fSIntel 	do {
911af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
912af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
913af75078fSIntel 	} while (! fc->stopped);
914af75078fSIntel }
915af75078fSIntel 
916af75078fSIntel static int
917af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
918af75078fSIntel {
919af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
920af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
921af75078fSIntel 	return 0;
922af75078fSIntel }
923af75078fSIntel 
924af75078fSIntel /*
925af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
926af75078fSIntel  * Used to start communication flows in network loopback test configurations.
927af75078fSIntel  */
928af75078fSIntel static int
929af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
930af75078fSIntel {
931af75078fSIntel 	struct fwd_lcore *fwd_lc;
932af75078fSIntel 	struct fwd_lcore tmp_lcore;
933af75078fSIntel 
934af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
935af75078fSIntel 	tmp_lcore = *fwd_lc;
936af75078fSIntel 	tmp_lcore.stopped = 1;
937af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
938af75078fSIntel 	return 0;
939af75078fSIntel }
940af75078fSIntel 
941af75078fSIntel /*
942af75078fSIntel  * Launch packet forwarding:
943af75078fSIntel  *     - Setup per-port forwarding context.
944af75078fSIntel  *     - launch logical cores with their forwarding configuration.
945af75078fSIntel  */
946af75078fSIntel static void
947af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
948af75078fSIntel {
949af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
950af75078fSIntel 	unsigned int i;
951af75078fSIntel 	unsigned int lc_id;
952af75078fSIntel 	int diag;
953af75078fSIntel 
954af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
955af75078fSIntel 	if (port_fwd_begin != NULL) {
956af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
957af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
958af75078fSIntel 	}
959af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
960af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
961af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
962af75078fSIntel 			fwd_lcores[i]->stopped = 0;
963af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
964af75078fSIntel 						     fwd_lcores[i], lc_id);
965af75078fSIntel 			if (diag != 0)
966af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
967af75078fSIntel 				       lc_id, diag);
968af75078fSIntel 		}
969af75078fSIntel 	}
970af75078fSIntel }
971af75078fSIntel 
972af75078fSIntel /*
973af75078fSIntel  * Launch packet forwarding configuration.
974af75078fSIntel  */
975af75078fSIntel void
976af75078fSIntel start_packet_forwarding(int with_tx_first)
977af75078fSIntel {
978af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
979af75078fSIntel 	port_fwd_end_t  port_fwd_end;
980af75078fSIntel 	struct rte_port *port;
981af75078fSIntel 	unsigned int i;
982af75078fSIntel 	portid_t   pt_id;
983af75078fSIntel 	streamid_t sm_id;
984af75078fSIntel 
985ce8d5614SIntel 	if (all_ports_started() == 0) {
986ce8d5614SIntel 		printf("Not all ports were started\n");
987ce8d5614SIntel 		return;
988ce8d5614SIntel 	}
989af75078fSIntel 	if (test_done == 0) {
990af75078fSIntel 		printf("Packet forwarding already started\n");
991af75078fSIntel 		return;
992af75078fSIntel 	}
9937741e4cfSIntel 	if(dcb_test) {
9947741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
9957741e4cfSIntel 			pt_id = fwd_ports_ids[i];
9967741e4cfSIntel 			port = &ports[pt_id];
9977741e4cfSIntel 			if (!port->dcb_flag) {
9987741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
9997741e4cfSIntel                                        "be configured in this mode.\n");
1000013af9b6SIntel 				return;
1001013af9b6SIntel 			}
10027741e4cfSIntel 		}
10037741e4cfSIntel 		if (nb_fwd_lcores == 1) {
10047741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
10057741e4cfSIntel                                "should be larger than 1.\n");
10067741e4cfSIntel 			return;
10077741e4cfSIntel 		}
10087741e4cfSIntel 	}
1009af75078fSIntel 	test_done = 0;
10107741e4cfSIntel 
10117741e4cfSIntel 	if(!no_flush_rx)
10127741e4cfSIntel 		flush_fwd_rx_queues();
10137741e4cfSIntel 
1014af75078fSIntel 	fwd_config_setup();
1015af75078fSIntel 	rxtx_config_display();
1016af75078fSIntel 
1017af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1018af75078fSIntel 		pt_id = fwd_ports_ids[i];
1019af75078fSIntel 		port = &ports[pt_id];
1020af75078fSIntel 		rte_eth_stats_get(pt_id, &port->stats);
1021af75078fSIntel 		port->tx_dropped = 0;
1022013af9b6SIntel 
1023013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
1024af75078fSIntel 	}
1025af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1026af75078fSIntel 		fwd_streams[sm_id]->rx_packets = 0;
1027af75078fSIntel 		fwd_streams[sm_id]->tx_packets = 0;
1028af75078fSIntel 		fwd_streams[sm_id]->fwd_dropped = 0;
1029af75078fSIntel 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1030af75078fSIntel 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1031af75078fSIntel 
1032af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1033af75078fSIntel 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1034af75078fSIntel 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1035af75078fSIntel 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1036af75078fSIntel 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1037af75078fSIntel #endif
1038af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1039af75078fSIntel 		fwd_streams[sm_id]->core_cycles = 0;
1040af75078fSIntel #endif
1041af75078fSIntel 	}
1042af75078fSIntel 	if (with_tx_first) {
1043af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1044af75078fSIntel 		if (port_fwd_begin != NULL) {
1045af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1046af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
1047af75078fSIntel 		}
1048af75078fSIntel 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1049af75078fSIntel 		rte_eal_mp_wait_lcore();
1050af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
1051af75078fSIntel 		if (port_fwd_end != NULL) {
1052af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1053af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
1054af75078fSIntel 		}
1055af75078fSIntel 	}
1056af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
1057af75078fSIntel }
1058af75078fSIntel 
1059af75078fSIntel void
1060af75078fSIntel stop_packet_forwarding(void)
1061af75078fSIntel {
1062af75078fSIntel 	struct rte_eth_stats stats;
1063af75078fSIntel 	struct rte_port *port;
1064af75078fSIntel 	port_fwd_end_t  port_fwd_end;
1065af75078fSIntel 	int i;
1066af75078fSIntel 	portid_t   pt_id;
1067af75078fSIntel 	streamid_t sm_id;
1068af75078fSIntel 	lcoreid_t  lc_id;
1069af75078fSIntel 	uint64_t total_recv;
1070af75078fSIntel 	uint64_t total_xmit;
1071af75078fSIntel 	uint64_t total_rx_dropped;
1072af75078fSIntel 	uint64_t total_tx_dropped;
1073af75078fSIntel 	uint64_t total_rx_nombuf;
1074af75078fSIntel 	uint64_t tx_dropped;
1075af75078fSIntel 	uint64_t rx_bad_ip_csum;
1076af75078fSIntel 	uint64_t rx_bad_l4_csum;
1077af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1078af75078fSIntel 	uint64_t fwd_cycles;
1079af75078fSIntel #endif
1080af75078fSIntel 	static const char *acc_stats_border = "+++++++++++++++";
1081af75078fSIntel 
1082ce8d5614SIntel 	if (all_ports_started() == 0) {
1083ce8d5614SIntel 		printf("Not all ports were started\n");
1084ce8d5614SIntel 		return;
1085ce8d5614SIntel 	}
1086af75078fSIntel 	if (test_done) {
1087af75078fSIntel 		printf("Packet forwarding not started\n");
1088af75078fSIntel 		return;
1089af75078fSIntel 	}
1090af75078fSIntel 	printf("Telling cores to stop...");
1091af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1092af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1093af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1094af75078fSIntel 	rte_eal_mp_wait_lcore();
1095af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1096af75078fSIntel 	if (port_fwd_end != NULL) {
1097af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1098af75078fSIntel 			pt_id = fwd_ports_ids[i];
1099af75078fSIntel 			(*port_fwd_end)(pt_id);
1100af75078fSIntel 		}
1101af75078fSIntel 	}
1102af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1103af75078fSIntel 	fwd_cycles = 0;
1104af75078fSIntel #endif
1105af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1106af75078fSIntel 		if (cur_fwd_config.nb_fwd_streams >
1107af75078fSIntel 		    cur_fwd_config.nb_fwd_ports) {
1108af75078fSIntel 			fwd_stream_stats_display(sm_id);
1109af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1110af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1111af75078fSIntel 		} else {
1112af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1113af75078fSIntel 				fwd_streams[sm_id];
1114af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1115af75078fSIntel 				fwd_streams[sm_id];
1116af75078fSIntel 		}
1117af75078fSIntel 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1118af75078fSIntel 		tx_dropped = (uint64_t) (tx_dropped +
1119af75078fSIntel 					 fwd_streams[sm_id]->fwd_dropped);
1120af75078fSIntel 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1121af75078fSIntel 
1122013af9b6SIntel 		rx_bad_ip_csum =
1123013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1124af75078fSIntel 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1125af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1126013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1127013af9b6SIntel 							rx_bad_ip_csum;
1128af75078fSIntel 
1129013af9b6SIntel 		rx_bad_l4_csum =
1130013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1131af75078fSIntel 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1132af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1133013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1134013af9b6SIntel 							rx_bad_l4_csum;
1135af75078fSIntel 
1136af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1137af75078fSIntel 		fwd_cycles = (uint64_t) (fwd_cycles +
1138af75078fSIntel 					 fwd_streams[sm_id]->core_cycles);
1139af75078fSIntel #endif
1140af75078fSIntel 	}
1141af75078fSIntel 	total_recv = 0;
1142af75078fSIntel 	total_xmit = 0;
1143af75078fSIntel 	total_rx_dropped = 0;
1144af75078fSIntel 	total_tx_dropped = 0;
1145af75078fSIntel 	total_rx_nombuf  = 0;
11467741e4cfSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1147af75078fSIntel 		pt_id = fwd_ports_ids[i];
1148af75078fSIntel 
1149af75078fSIntel 		port = &ports[pt_id];
1150af75078fSIntel 		rte_eth_stats_get(pt_id, &stats);
1151af75078fSIntel 		stats.ipackets -= port->stats.ipackets;
1152af75078fSIntel 		port->stats.ipackets = 0;
1153af75078fSIntel 		stats.opackets -= port->stats.opackets;
1154af75078fSIntel 		port->stats.opackets = 0;
1155af75078fSIntel 		stats.ibytes   -= port->stats.ibytes;
1156af75078fSIntel 		port->stats.ibytes = 0;
1157af75078fSIntel 		stats.obytes   -= port->stats.obytes;
1158af75078fSIntel 		port->stats.obytes = 0;
1159af75078fSIntel 		stats.ierrors  -= port->stats.ierrors;
1160af75078fSIntel 		port->stats.ierrors = 0;
1161af75078fSIntel 		stats.oerrors  -= port->stats.oerrors;
1162af75078fSIntel 		port->stats.oerrors = 0;
1163af75078fSIntel 		stats.rx_nombuf -= port->stats.rx_nombuf;
1164af75078fSIntel 		port->stats.rx_nombuf = 0;
1165af75078fSIntel 		stats.fdirmatch -= port->stats.fdirmatch;
1166af75078fSIntel 		port->stats.rx_nombuf = 0;
1167af75078fSIntel 		stats.fdirmiss -= port->stats.fdirmiss;
1168af75078fSIntel 		port->stats.rx_nombuf = 0;
1169af75078fSIntel 
1170af75078fSIntel 		total_recv += stats.ipackets;
1171af75078fSIntel 		total_xmit += stats.opackets;
1172af75078fSIntel 		total_rx_dropped += stats.ierrors;
1173af75078fSIntel 		total_tx_dropped += port->tx_dropped;
1174af75078fSIntel 		total_rx_nombuf  += stats.rx_nombuf;
1175af75078fSIntel 
1176af75078fSIntel 		fwd_port_stats_display(pt_id, &stats);
1177af75078fSIntel 	}
1178af75078fSIntel 	printf("\n  %s Accumulated forward statistics for all ports"
1179af75078fSIntel 	       "%s\n",
1180af75078fSIntel 	       acc_stats_border, acc_stats_border);
1181af75078fSIntel 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1182af75078fSIntel 	       "%-"PRIu64"\n"
1183af75078fSIntel 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1184af75078fSIntel 	       "%-"PRIu64"\n",
1185af75078fSIntel 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1186af75078fSIntel 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1187af75078fSIntel 	if (total_rx_nombuf > 0)
1188af75078fSIntel 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1189af75078fSIntel 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1190af75078fSIntel 	       "%s\n",
1191af75078fSIntel 	       acc_stats_border, acc_stats_border);
1192af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1193af75078fSIntel 	if (total_recv > 0)
1194af75078fSIntel 		printf("\n  CPU cycles/packet=%u (total cycles="
1195af75078fSIntel 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1196af75078fSIntel 		       (unsigned int)(fwd_cycles / total_recv),
1197af75078fSIntel 		       fwd_cycles, total_recv);
1198af75078fSIntel #endif
1199af75078fSIntel 	printf("\nDone.\n");
1200af75078fSIntel 	test_done = 1;
1201af75078fSIntel }
1202af75078fSIntel 
1203ce8d5614SIntel static int
1204ce8d5614SIntel all_ports_started(void)
1205ce8d5614SIntel {
1206ce8d5614SIntel 	portid_t pi;
1207ce8d5614SIntel 	struct rte_port *port;
1208ce8d5614SIntel 
1209ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1210ce8d5614SIntel 		port = &ports[pi];
1211ce8d5614SIntel 		/* Check if there is a port which is not started */
1212ce8d5614SIntel 		if (port->port_status != RTE_PORT_STARTED)
1213ce8d5614SIntel 			return 0;
1214ce8d5614SIntel 	}
1215ce8d5614SIntel 
1216ce8d5614SIntel 	/* No port is not started */
1217ce8d5614SIntel 	return 1;
1218ce8d5614SIntel }
1219ce8d5614SIntel 
1220148f963fSBruce Richardson int
1221ce8d5614SIntel start_port(portid_t pid)
1222ce8d5614SIntel {
1223ce8d5614SIntel 	int diag, need_check_link_status = 0;
1224ce8d5614SIntel 	portid_t pi;
1225ce8d5614SIntel 	queueid_t qi;
1226ce8d5614SIntel 	struct rte_port *port;
1227ce8d5614SIntel 
1228ce8d5614SIntel 	if (test_done == 0) {
1229ce8d5614SIntel 		printf("Please stop forwarding first\n");
1230148f963fSBruce Richardson 		return -1;
1231ce8d5614SIntel 	}
1232ce8d5614SIntel 
1233ce8d5614SIntel 	if (init_fwd_streams() < 0) {
1234ce8d5614SIntel 		printf("Fail from init_fwd_streams()\n");
1235148f963fSBruce Richardson 		return -1;
1236ce8d5614SIntel 	}
1237ce8d5614SIntel 
1238ce8d5614SIntel 	if(dcb_config)
1239ce8d5614SIntel 		dcb_test = 1;
1240ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1241ce8d5614SIntel 		if (pid < nb_ports && pid != pi)
1242ce8d5614SIntel 			continue;
1243ce8d5614SIntel 
1244ce8d5614SIntel 		port = &ports[pi];
1245ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1246ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
1247ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1248ce8d5614SIntel 			continue;
1249ce8d5614SIntel 		}
1250ce8d5614SIntel 
1251ce8d5614SIntel 		if (port->need_reconfig > 0) {
1252ce8d5614SIntel 			port->need_reconfig = 0;
1253ce8d5614SIntel 
12545706de65SJulien Cretin 			printf("Configuring Port %d (socket %u)\n", pi,
125520a0286fSLiu Xiaofeng 					port->socket_id);
1256ce8d5614SIntel 			/* configure port */
1257ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1258ce8d5614SIntel 						&(port->dev_conf));
1259ce8d5614SIntel 			if (diag != 0) {
1260ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1261ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1262ce8d5614SIntel 					printf("Port %d can not be set back "
1263ce8d5614SIntel 							"to stopped\n", pi);
1264ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
1265ce8d5614SIntel 				/* try to reconfigure port next time */
1266ce8d5614SIntel 				port->need_reconfig = 1;
1267148f963fSBruce Richardson 				return -1;
1268ce8d5614SIntel 			}
1269ce8d5614SIntel 		}
1270ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
1271ce8d5614SIntel 			port->need_reconfig_queues = 0;
1272ce8d5614SIntel 			/* setup tx queues */
1273ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
1274b6ea6408SIntel 				if ((numa_support) &&
1275b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
1276b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1277b6ea6408SIntel 						nb_txd,txring_numa[pi],
1278b6ea6408SIntel 						&(port->tx_conf));
1279b6ea6408SIntel 				else
1280b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1281b6ea6408SIntel 						nb_txd,port->socket_id,
1282b6ea6408SIntel 						&(port->tx_conf));
1283b6ea6408SIntel 
1284ce8d5614SIntel 				if (diag == 0)
1285ce8d5614SIntel 					continue;
1286ce8d5614SIntel 
1287ce8d5614SIntel 				/* Fail to setup tx queue, return */
1288ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1289ce8d5614SIntel 							RTE_PORT_HANDLING,
1290ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
1291ce8d5614SIntel 					printf("Port %d can not be set back "
1292ce8d5614SIntel 							"to stopped\n", pi);
1293ce8d5614SIntel 				printf("Fail to configure port %d tx queues\n", pi);
1294ce8d5614SIntel 				/* try to reconfigure queues next time */
1295ce8d5614SIntel 				port->need_reconfig_queues = 1;
1296148f963fSBruce Richardson 				return -1;
1297ce8d5614SIntel 			}
1298ce8d5614SIntel 			/* setup rx queues */
1299ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
1300b6ea6408SIntel 				if ((numa_support) &&
1301b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1302b6ea6408SIntel 					struct rte_mempool * mp =
1303b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
1304b6ea6408SIntel 					if (mp == NULL) {
1305b6ea6408SIntel 						printf("Failed to setup RX queue:"
1306b6ea6408SIntel 							"No mempool allocation"
1307b6ea6408SIntel 							"on the socket %d\n",
1308b6ea6408SIntel 							rxring_numa[pi]);
1309148f963fSBruce Richardson 						return -1;
1310b6ea6408SIntel 					}
1311b6ea6408SIntel 
1312b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
1313b6ea6408SIntel 					     nb_rxd,rxring_numa[pi],
1314b6ea6408SIntel 					     &(port->rx_conf),mp);
1315b6ea6408SIntel 				}
1316b6ea6408SIntel 				else
1317b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
1318b6ea6408SIntel 					     nb_rxd,port->socket_id,
1319b6ea6408SIntel 					     &(port->rx_conf),
1320ce8d5614SIntel 				             mbuf_pool_find(port->socket_id));
1321b6ea6408SIntel 
1322ce8d5614SIntel 				if (diag == 0)
1323ce8d5614SIntel 					continue;
1324ce8d5614SIntel 
1325b6ea6408SIntel 
1326ce8d5614SIntel 				/* Fail to setup rx queue, return */
1327ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1328ce8d5614SIntel 							RTE_PORT_HANDLING,
1329ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
1330ce8d5614SIntel 					printf("Port %d can not be set back "
1331ce8d5614SIntel 							"to stopped\n", pi);
1332ce8d5614SIntel 				printf("Fail to configure port %d rx queues\n", pi);
1333ce8d5614SIntel 				/* try to reconfigure queues next time */
1334ce8d5614SIntel 				port->need_reconfig_queues = 1;
1335148f963fSBruce Richardson 				return -1;
1336ce8d5614SIntel 			}
1337ce8d5614SIntel 		}
1338ce8d5614SIntel 		/* start port */
1339ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
1340ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
1341ce8d5614SIntel 
1342ce8d5614SIntel 			/* Fail to setup rx queue, return */
1343ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
1344ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1345ce8d5614SIntel 				printf("Port %d can not be set back to "
1346ce8d5614SIntel 							"stopped\n", pi);
1347ce8d5614SIntel 			continue;
1348ce8d5614SIntel 		}
1349ce8d5614SIntel 
1350ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1351ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1352ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
1353ce8d5614SIntel 
1354ce8d5614SIntel 		/* at least one port started, need checking link status */
1355ce8d5614SIntel 		need_check_link_status = 1;
1356ce8d5614SIntel 	}
1357ce8d5614SIntel 
1358ce8d5614SIntel 	if (need_check_link_status)
1359ce8d5614SIntel 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1360ce8d5614SIntel 	else
1361ce8d5614SIntel 		printf("Please stop the ports first\n");
1362ce8d5614SIntel 
1363ce8d5614SIntel 	printf("Done\n");
1364148f963fSBruce Richardson 	return 0;
1365ce8d5614SIntel }
1366ce8d5614SIntel 
1367ce8d5614SIntel void
1368ce8d5614SIntel stop_port(portid_t pid)
1369ce8d5614SIntel {
1370ce8d5614SIntel 	portid_t pi;
1371ce8d5614SIntel 	struct rte_port *port;
1372ce8d5614SIntel 	int need_check_link_status = 0;
1373ce8d5614SIntel 
1374ce8d5614SIntel 	if (test_done == 0) {
1375ce8d5614SIntel 		printf("Please stop forwarding first\n");
1376ce8d5614SIntel 		return;
1377ce8d5614SIntel 	}
1378ce8d5614SIntel 	if (dcb_test) {
1379ce8d5614SIntel 		dcb_test = 0;
1380ce8d5614SIntel 		dcb_config = 0;
1381ce8d5614SIntel 	}
1382ce8d5614SIntel 	printf("Stopping ports...\n");
1383ce8d5614SIntel 
1384ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1385ce8d5614SIntel 		if (pid < nb_ports && pid != pi)
1386ce8d5614SIntel 			continue;
1387ce8d5614SIntel 
1388ce8d5614SIntel 		port = &ports[pi];
1389ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1390ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
1391ce8d5614SIntel 			continue;
1392ce8d5614SIntel 
1393ce8d5614SIntel 		rte_eth_dev_stop(pi);
1394ce8d5614SIntel 
1395ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1396ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1397ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
1398ce8d5614SIntel 		need_check_link_status = 1;
1399ce8d5614SIntel 	}
1400ce8d5614SIntel 	if (need_check_link_status)
1401ce8d5614SIntel 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1402ce8d5614SIntel 
1403ce8d5614SIntel 	printf("Done\n");
1404ce8d5614SIntel }
1405ce8d5614SIntel 
1406ce8d5614SIntel void
1407ce8d5614SIntel close_port(portid_t pid)
1408ce8d5614SIntel {
1409ce8d5614SIntel 	portid_t pi;
1410ce8d5614SIntel 	struct rte_port *port;
1411ce8d5614SIntel 
1412ce8d5614SIntel 	if (test_done == 0) {
1413ce8d5614SIntel 		printf("Please stop forwarding first\n");
1414ce8d5614SIntel 		return;
1415ce8d5614SIntel 	}
1416ce8d5614SIntel 
1417ce8d5614SIntel 	printf("Closing ports...\n");
1418ce8d5614SIntel 
1419ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1420ce8d5614SIntel 		if (pid < nb_ports && pid != pi)
1421ce8d5614SIntel 			continue;
1422ce8d5614SIntel 
1423ce8d5614SIntel 		port = &ports[pi];
1424ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1425ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1426ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1427ce8d5614SIntel 			continue;
1428ce8d5614SIntel 		}
1429ce8d5614SIntel 
1430ce8d5614SIntel 		rte_eth_dev_close(pi);
1431ce8d5614SIntel 
1432ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1433ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1434ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
1435ce8d5614SIntel 	}
1436ce8d5614SIntel 
1437ce8d5614SIntel 	printf("Done\n");
1438ce8d5614SIntel }
1439ce8d5614SIntel 
1440ce8d5614SIntel int
1441ce8d5614SIntel all_ports_stopped(void)
1442ce8d5614SIntel {
1443ce8d5614SIntel 	portid_t pi;
1444ce8d5614SIntel 	struct rte_port *port;
1445ce8d5614SIntel 
1446ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1447ce8d5614SIntel 		port = &ports[pi];
1448ce8d5614SIntel 		if (port->port_status != RTE_PORT_STOPPED)
1449ce8d5614SIntel 			return 0;
1450ce8d5614SIntel 	}
1451ce8d5614SIntel 
1452ce8d5614SIntel 	return 1;
1453ce8d5614SIntel }
1454ce8d5614SIntel 
1455af75078fSIntel void
1456af75078fSIntel pmd_test_exit(void)
1457af75078fSIntel {
1458af75078fSIntel 	portid_t pt_id;
1459af75078fSIntel 
1460af75078fSIntel 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1461af75078fSIntel 		printf("Stopping port %d...", pt_id);
1462af75078fSIntel 		fflush(stdout);
1463af75078fSIntel 		rte_eth_dev_close(pt_id);
1464af75078fSIntel 		printf("done\n");
1465af75078fSIntel 	}
1466af75078fSIntel 	printf("bye...\n");
1467af75078fSIntel }
1468af75078fSIntel 
1469af75078fSIntel typedef void (*cmd_func_t)(void);
1470af75078fSIntel struct pmd_test_command {
1471af75078fSIntel 	const char *cmd_name;
1472af75078fSIntel 	cmd_func_t cmd_func;
1473af75078fSIntel };
1474af75078fSIntel 
1475af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1476af75078fSIntel 
1477ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
1478af75078fSIntel static void
1479ce8d5614SIntel check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1480af75078fSIntel {
1481ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
1482ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1483ce8d5614SIntel 	uint8_t portid, count, all_ports_up, print_flag = 0;
1484ce8d5614SIntel 	struct rte_eth_link link;
1485ce8d5614SIntel 
1486ce8d5614SIntel 	printf("Checking link statuses...\n");
1487ce8d5614SIntel 	fflush(stdout);
1488ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1489ce8d5614SIntel 		all_ports_up = 1;
1490ce8d5614SIntel 		for (portid = 0; portid < port_num; portid++) {
1491ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
1492ce8d5614SIntel 				continue;
1493ce8d5614SIntel 			memset(&link, 0, sizeof(link));
1494ce8d5614SIntel 			rte_eth_link_get_nowait(portid, &link);
1495ce8d5614SIntel 			/* print link status if flag set */
1496ce8d5614SIntel 			if (print_flag == 1) {
1497ce8d5614SIntel 				if (link.link_status)
1498ce8d5614SIntel 					printf("Port %d Link Up - speed %u "
1499ce8d5614SIntel 						"Mbps - %s\n", (uint8_t)portid,
1500ce8d5614SIntel 						(unsigned)link.link_speed,
1501ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1502ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
1503ce8d5614SIntel 				else
1504ce8d5614SIntel 					printf("Port %d Link Down\n",
1505ce8d5614SIntel 						(uint8_t)portid);
1506ce8d5614SIntel 				continue;
1507ce8d5614SIntel 			}
1508ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
1509ce8d5614SIntel 			if (link.link_status == 0) {
1510ce8d5614SIntel 				all_ports_up = 0;
1511ce8d5614SIntel 				break;
1512ce8d5614SIntel 			}
1513ce8d5614SIntel 		}
1514ce8d5614SIntel 		/* after finally printing all link status, get out */
1515ce8d5614SIntel 		if (print_flag == 1)
1516ce8d5614SIntel 			break;
1517ce8d5614SIntel 
1518ce8d5614SIntel 		if (all_ports_up == 0) {
1519ce8d5614SIntel 			fflush(stdout);
1520ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
1521ce8d5614SIntel 		}
1522ce8d5614SIntel 
1523ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
1524ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1525ce8d5614SIntel 			print_flag = 1;
1526ce8d5614SIntel 		}
1527ce8d5614SIntel 	}
1528af75078fSIntel }
1529af75078fSIntel 
1530013af9b6SIntel static int
1531013af9b6SIntel set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1532af75078fSIntel {
1533013af9b6SIntel 	uint16_t i;
1534af75078fSIntel 	int diag;
1535013af9b6SIntel 	uint8_t mapping_found = 0;
1536af75078fSIntel 
1537013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1538013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1539013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1540013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1541013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
1542013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
1543013af9b6SIntel 			if (diag != 0)
1544013af9b6SIntel 				return diag;
1545013af9b6SIntel 			mapping_found = 1;
1546af75078fSIntel 		}
1547013af9b6SIntel 	}
1548013af9b6SIntel 	if (mapping_found)
1549013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
1550013af9b6SIntel 	return 0;
1551013af9b6SIntel }
1552013af9b6SIntel 
1553013af9b6SIntel static int
1554013af9b6SIntel set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1555013af9b6SIntel {
1556013af9b6SIntel 	uint16_t i;
1557013af9b6SIntel 	int diag;
1558013af9b6SIntel 	uint8_t mapping_found = 0;
1559013af9b6SIntel 
1560013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1561013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1562013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1563013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1564013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
1565013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
1566013af9b6SIntel 			if (diag != 0)
1567013af9b6SIntel 				return diag;
1568013af9b6SIntel 			mapping_found = 1;
1569013af9b6SIntel 		}
1570013af9b6SIntel 	}
1571013af9b6SIntel 	if (mapping_found)
1572013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
1573013af9b6SIntel 	return 0;
1574013af9b6SIntel }
1575013af9b6SIntel 
1576013af9b6SIntel static void
1577013af9b6SIntel map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1578013af9b6SIntel {
1579013af9b6SIntel 	int diag = 0;
1580013af9b6SIntel 
1581013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1582af75078fSIntel 	if (diag != 0) {
1583013af9b6SIntel 		if (diag == -ENOTSUP) {
1584013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
1585013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1586013af9b6SIntel 		}
1587013af9b6SIntel 		else
1588013af9b6SIntel 			rte_exit(EXIT_FAILURE,
1589013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
1590013af9b6SIntel 					"failed for port id=%d diag=%d\n",
1591af75078fSIntel 					pi, diag);
1592af75078fSIntel 	}
1593013af9b6SIntel 
1594013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1595af75078fSIntel 	if (diag != 0) {
1596013af9b6SIntel 		if (diag == -ENOTSUP) {
1597013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
1598013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1599013af9b6SIntel 		}
1600013af9b6SIntel 		else
1601013af9b6SIntel 			rte_exit(EXIT_FAILURE,
1602013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
1603013af9b6SIntel 					"failed for port id=%d diag=%d\n",
1604af75078fSIntel 					pi, diag);
1605af75078fSIntel 	}
1606af75078fSIntel }
1607af75078fSIntel 
1608013af9b6SIntel void
1609013af9b6SIntel init_port_config(void)
1610013af9b6SIntel {
1611013af9b6SIntel 	portid_t pid;
1612013af9b6SIntel 	struct rte_port *port;
1613013af9b6SIntel 
1614013af9b6SIntel 	for (pid = 0; pid < nb_ports; pid++) {
1615013af9b6SIntel 		port = &ports[pid];
1616013af9b6SIntel 		port->dev_conf.rxmode = rx_mode;
1617013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
16183ce690d3SBruce Richardson 		if (nb_rxq > 1) {
1619013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1620013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1621af75078fSIntel 		} else {
1622013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1623013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1624af75078fSIntel 		}
16253ce690d3SBruce Richardson 
16263ce690d3SBruce Richardson 		/* In SR-IOV mode, RSS mode is not available */
16273ce690d3SBruce Richardson 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
16283ce690d3SBruce Richardson 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
16293ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
16303ce690d3SBruce Richardson 			else
16313ce690d3SBruce Richardson 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
16323ce690d3SBruce Richardson 		}
16333ce690d3SBruce Richardson 
1634013af9b6SIntel 		port->rx_conf.rx_thresh = rx_thresh;
1635013af9b6SIntel 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1636013af9b6SIntel 		port->rx_conf.rx_drop_en = rx_drop_en;
1637013af9b6SIntel 		port->tx_conf.tx_thresh = tx_thresh;
1638013af9b6SIntel 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1639013af9b6SIntel 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1640013af9b6SIntel 		port->tx_conf.txq_flags = txq_flags;
1641013af9b6SIntel 
1642013af9b6SIntel 		rte_eth_macaddr_get(pid, &port->eth_addr);
1643013af9b6SIntel 
1644013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
16457b7e5ba7SIntel #ifdef RTE_NIC_BYPASS
16467b7e5ba7SIntel 		rte_eth_dev_bypass_init(pid);
16477b7e5ba7SIntel #endif
1648013af9b6SIntel 	}
1649013af9b6SIntel }
1650013af9b6SIntel 
1651013af9b6SIntel const uint16_t vlan_tags[] = {
1652013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
1653013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
1654013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
1655013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
1656013af9b6SIntel };
1657013af9b6SIntel 
1658013af9b6SIntel static  int
1659013af9b6SIntel get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1660013af9b6SIntel {
1661013af9b6SIntel         uint8_t i;
1662af75078fSIntel 
1663af75078fSIntel  	/*
1664013af9b6SIntel  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1665013af9b6SIntel  	 * given above, and the number of traffic classes available for use.
1666af75078fSIntel  	 */
1667013af9b6SIntel 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1668013af9b6SIntel 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1669013af9b6SIntel 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1670013af9b6SIntel 
1671013af9b6SIntel 		/* VMDQ+DCB RX and TX configrations */
1672013af9b6SIntel 		vmdq_rx_conf.enable_default_pool = 0;
1673013af9b6SIntel 		vmdq_rx_conf.default_pool = 0;
1674013af9b6SIntel 		vmdq_rx_conf.nb_queue_pools =
1675013af9b6SIntel 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1676013af9b6SIntel 		vmdq_tx_conf.nb_queue_pools =
1677013af9b6SIntel 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1678013af9b6SIntel 
1679013af9b6SIntel 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1680013af9b6SIntel 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1681013af9b6SIntel 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1682013af9b6SIntel 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1683af75078fSIntel 		}
1684013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1685013af9b6SIntel 			vmdq_rx_conf.dcb_queue[i] = i;
1686013af9b6SIntel 			vmdq_tx_conf.dcb_queue[i] = i;
1687013af9b6SIntel 		}
1688013af9b6SIntel 
1689013af9b6SIntel 		/*set DCB mode of RX and TX of multiple queues*/
169032e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
169132e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1692013af9b6SIntel 		if (dcb_conf->pfc_en)
1693013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1694013af9b6SIntel 		else
1695013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1696013af9b6SIntel 
1697013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1698013af9b6SIntel                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1699013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1700013af9b6SIntel                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1701013af9b6SIntel 	}
1702013af9b6SIntel 	else {
1703013af9b6SIntel 		struct rte_eth_dcb_rx_conf rx_conf;
1704013af9b6SIntel 		struct rte_eth_dcb_tx_conf tx_conf;
1705013af9b6SIntel 
1706013af9b6SIntel 		/* queue mapping configuration of DCB RX and TX */
1707013af9b6SIntel 		if (dcb_conf->num_tcs == ETH_4_TCS)
1708013af9b6SIntel 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1709013af9b6SIntel 		else
1710013af9b6SIntel 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1711013af9b6SIntel 
1712013af9b6SIntel 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1713013af9b6SIntel 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1714013af9b6SIntel 
1715013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1716013af9b6SIntel 			rx_conf.dcb_queue[i] = i;
1717013af9b6SIntel 			tx_conf.dcb_queue[i] = i;
1718013af9b6SIntel 		}
171932e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
172032e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1721013af9b6SIntel 		if (dcb_conf->pfc_en)
1722013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1723013af9b6SIntel 		else
1724013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1725013af9b6SIntel 
1726013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1727013af9b6SIntel                                 sizeof(struct rte_eth_dcb_rx_conf)));
1728013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1729013af9b6SIntel                                 sizeof(struct rte_eth_dcb_tx_conf)));
1730013af9b6SIntel 	}
1731013af9b6SIntel 
1732013af9b6SIntel 	return 0;
1733013af9b6SIntel }
1734013af9b6SIntel 
1735013af9b6SIntel int
1736013af9b6SIntel init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1737013af9b6SIntel {
1738013af9b6SIntel 	struct rte_eth_conf port_conf;
1739013af9b6SIntel 	struct rte_port *rte_port;
1740013af9b6SIntel 	int retval;
1741013af9b6SIntel 	uint16_t nb_vlan;
1742013af9b6SIntel 	uint16_t i;
1743013af9b6SIntel 
1744013af9b6SIntel 	/* rxq and txq configuration in dcb mode */
1745013af9b6SIntel 	nb_rxq = 128;
1746013af9b6SIntel 	nb_txq = 128;
1747013af9b6SIntel 	rx_free_thresh = 64;
1748013af9b6SIntel 
1749013af9b6SIntel 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1750013af9b6SIntel 	/* Enter DCB configuration status */
1751013af9b6SIntel 	dcb_config = 1;
1752013af9b6SIntel 
1753013af9b6SIntel 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1754013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1755013af9b6SIntel 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1756013af9b6SIntel 	if (retval < 0)
1757013af9b6SIntel 		return retval;
1758013af9b6SIntel 
1759013af9b6SIntel 	rte_port = &ports[pid];
1760013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1761013af9b6SIntel 
1762013af9b6SIntel 	rte_port->rx_conf.rx_thresh = rx_thresh;
1763013af9b6SIntel 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1764013af9b6SIntel 	rte_port->tx_conf.tx_thresh = tx_thresh;
1765013af9b6SIntel 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1766013af9b6SIntel 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1767013af9b6SIntel 	/* VLAN filter */
1768013af9b6SIntel 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1769013af9b6SIntel 	for (i = 0; i < nb_vlan; i++){
1770013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
1771013af9b6SIntel 	}
1772013af9b6SIntel 
1773013af9b6SIntel 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1774013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
1775013af9b6SIntel 
17767741e4cfSIntel 	rte_port->dcb_flag = 1;
17777741e4cfSIntel 
1778013af9b6SIntel 	return 0;
1779af75078fSIntel }
1780af75078fSIntel 
1781af75078fSIntel #ifdef RTE_EXEC_ENV_BAREMETAL
1782af75078fSIntel #define main _main
1783af75078fSIntel #endif
1784af75078fSIntel 
1785af75078fSIntel int
1786af75078fSIntel main(int argc, char** argv)
1787af75078fSIntel {
1788af75078fSIntel 	int  diag;
1789013af9b6SIntel 	uint8_t port_id;
1790af75078fSIntel 
1791af75078fSIntel 	diag = rte_eal_init(argc, argv);
1792af75078fSIntel 	if (diag < 0)
1793af75078fSIntel 		rte_panic("Cannot init EAL\n");
1794af75078fSIntel 
179569d22b8eSIntel 	if (rte_pmd_init_all())
179669d22b8eSIntel 		rte_panic("Cannot init PMD\n");
1797af75078fSIntel 
1798af75078fSIntel 	if (rte_eal_pci_probe())
1799af75078fSIntel 		rte_panic("Cannot probe PCI\n");
1800af75078fSIntel 
1801af75078fSIntel 	nb_ports = (portid_t) rte_eth_dev_count();
1802af75078fSIntel 	if (nb_ports == 0)
1803013af9b6SIntel 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1804013af9b6SIntel 							"check that "
1805af75078fSIntel 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
180669d22b8eSIntel 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1807af75078fSIntel 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1808af75078fSIntel 			  "configuration file\n");
1809af75078fSIntel 
1810af75078fSIntel 	set_def_fwd_config();
1811af75078fSIntel 	if (nb_lcores == 0)
1812af75078fSIntel 		rte_panic("Empty set of forwarding logical cores - check the "
1813af75078fSIntel 			  "core mask supplied in the command parameters\n");
1814af75078fSIntel 
1815af75078fSIntel 	argc -= diag;
1816af75078fSIntel 	argv += diag;
1817af75078fSIntel 	if (argc > 1)
1818af75078fSIntel 		launch_args_parse(argc, argv);
1819af75078fSIntel 
1820af75078fSIntel 	if (nb_rxq > nb_txq)
1821af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1822af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
1823af75078fSIntel 		       nb_rxq, nb_txq);
1824af75078fSIntel 
1825af75078fSIntel 	init_config();
1826148f963fSBruce Richardson 	if (start_port(RTE_PORT_ALL) != 0)
1827148f963fSBruce Richardson 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
1828af75078fSIntel 
1829ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
1830ce8d5614SIntel 	for (port_id = 0; port_id < nb_ports; port_id++)
1831ce8d5614SIntel 		rte_eth_promiscuous_enable(port_id);
1832af75078fSIntel 
18330d56cb81SThomas Monjalon #ifdef RTE_LIBRTE_CMDLINE
1834*ca7feb22SCyril Chemparathy 	if (interactive == 1) {
1835*ca7feb22SCyril Chemparathy 		if (auto_start) {
1836*ca7feb22SCyril Chemparathy 			printf("Start automatic packet forwarding\n");
1837*ca7feb22SCyril Chemparathy 			start_packet_forwarding(0);
1838*ca7feb22SCyril Chemparathy 		}
1839af75078fSIntel 		prompt();
1840*ca7feb22SCyril Chemparathy 	} else
18410d56cb81SThomas Monjalon #endif
18420d56cb81SThomas Monjalon 	{
1843af75078fSIntel 		char c;
1844af75078fSIntel 		int rc;
1845af75078fSIntel 
1846af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
1847af75078fSIntel 		start_packet_forwarding(0);
1848af75078fSIntel 		printf("Press enter to exit\n");
1849af75078fSIntel 		rc = read(0, &c, 1);
1850af75078fSIntel 		if (rc < 0)
1851af75078fSIntel 			return 1;
1852af75078fSIntel 	}
1853af75078fSIntel 
1854af75078fSIntel 	return 0;
1855af75078fSIntel }
1856