xref: /dpdk/app/test-pmd/testpmd.c (revision 7741e4cf16c0548e15d07b847fd96691b44efb29)
1af75078fSIntel /*-
2af75078fSIntel  *   BSD LICENSE
3af75078fSIntel  *
4b6df9fc8SIntel  *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
5af75078fSIntel  *   All rights reserved.
6af75078fSIntel  *
7af75078fSIntel  *   Redistribution and use in source and binary forms, with or without
8af75078fSIntel  *   modification, are permitted provided that the following conditions
9af75078fSIntel  *   are met:
10af75078fSIntel  *
11af75078fSIntel  *     * Redistributions of source code must retain the above copyright
12af75078fSIntel  *       notice, this list of conditions and the following disclaimer.
13af75078fSIntel  *     * Redistributions in binary form must reproduce the above copyright
14af75078fSIntel  *       notice, this list of conditions and the following disclaimer in
15af75078fSIntel  *       the documentation and/or other materials provided with the
16af75078fSIntel  *       distribution.
17af75078fSIntel  *     * Neither the name of Intel Corporation nor the names of its
18af75078fSIntel  *       contributors may be used to endorse or promote products derived
19af75078fSIntel  *       from this software without specific prior written permission.
20af75078fSIntel  *
21af75078fSIntel  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22af75078fSIntel  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23af75078fSIntel  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24af75078fSIntel  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25af75078fSIntel  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26af75078fSIntel  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27af75078fSIntel  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28af75078fSIntel  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29af75078fSIntel  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30af75078fSIntel  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31af75078fSIntel  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32af75078fSIntel  */
33af75078fSIntel 
34af75078fSIntel #include <stdarg.h>
35af75078fSIntel #include <stdio.h>
36af75078fSIntel #include <stdlib.h>
37af75078fSIntel #include <signal.h>
38af75078fSIntel #include <string.h>
39af75078fSIntel #include <time.h>
40af75078fSIntel #include <fcntl.h>
41af75078fSIntel #include <sys/types.h>
42af75078fSIntel #include <errno.h>
43af75078fSIntel 
44af75078fSIntel #include <sys/queue.h>
45af75078fSIntel #include <sys/stat.h>
46af75078fSIntel 
47af75078fSIntel #include <stdint.h>
48af75078fSIntel #include <unistd.h>
49af75078fSIntel #include <inttypes.h>
50af75078fSIntel 
51af75078fSIntel #include <rte_common.h>
52af75078fSIntel #include <rte_byteorder.h>
53af75078fSIntel #include <rte_log.h>
54af75078fSIntel #include <rte_debug.h>
55af75078fSIntel #include <rte_cycles.h>
56af75078fSIntel #include <rte_memory.h>
57af75078fSIntel #include <rte_memcpy.h>
58af75078fSIntel #include <rte_memzone.h>
59af75078fSIntel #include <rte_launch.h>
60af75078fSIntel #include <rte_tailq.h>
61af75078fSIntel #include <rte_eal.h>
62af75078fSIntel #include <rte_per_lcore.h>
63af75078fSIntel #include <rte_lcore.h>
64af75078fSIntel #include <rte_atomic.h>
65af75078fSIntel #include <rte_branch_prediction.h>
66af75078fSIntel #include <rte_ring.h>
67af75078fSIntel #include <rte_mempool.h>
68af75078fSIntel #include <rte_malloc.h>
69af75078fSIntel #include <rte_mbuf.h>
70af75078fSIntel #include <rte_interrupts.h>
71af75078fSIntel #include <rte_pci.h>
72af75078fSIntel #include <rte_ether.h>
73af75078fSIntel #include <rte_ethdev.h>
74af75078fSIntel #include <rte_string_fns.h>
75af75078fSIntel 
76af75078fSIntel #include "testpmd.h"
77af75078fSIntel 
78af75078fSIntel uint16_t verbose_level = 0; /**< Silent by default. */
79af75078fSIntel 
80af75078fSIntel /* use master core for command line ? */
81af75078fSIntel uint8_t interactive = 0;
82af75078fSIntel 
83af75078fSIntel /*
84af75078fSIntel  * NUMA support configuration.
85af75078fSIntel  * When set, the NUMA support attempts to dispatch the allocation of the
86af75078fSIntel  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
87af75078fSIntel  * probed ports among the CPU sockets 0 and 1.
88af75078fSIntel  * Otherwise, all memory is allocated from CPU socket 0.
89af75078fSIntel  */
90af75078fSIntel uint8_t numa_support = 0; /**< No numa support by default */
91af75078fSIntel 
92af75078fSIntel /*
93b6ea6408SIntel  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
94b6ea6408SIntel  * not configured.
95b6ea6408SIntel  */
96b6ea6408SIntel uint8_t socket_num = UMA_NO_CONFIG;
97b6ea6408SIntel 
98b6ea6408SIntel /*
99af75078fSIntel  * Record the Ethernet address of peer target ports to which packets are
100af75078fSIntel  * forwarded.
101af75078fSIntel  * Must be instanciated with the ethernet addresses of peer traffic generator
102af75078fSIntel  * ports.
103af75078fSIntel  */
104af75078fSIntel struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
105af75078fSIntel portid_t nb_peer_eth_addrs = 0;
106af75078fSIntel 
107af75078fSIntel /*
108af75078fSIntel  * Probed Target Environment.
109af75078fSIntel  */
110af75078fSIntel struct rte_port *ports;	       /**< For all probed ethernet ports. */
111af75078fSIntel portid_t nb_ports;             /**< Number of probed ethernet ports. */
112af75078fSIntel struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
113af75078fSIntel lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
114af75078fSIntel 
115af75078fSIntel /*
116af75078fSIntel  * Test Forwarding Configuration.
117af75078fSIntel  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
118af75078fSIntel  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
119af75078fSIntel  */
120af75078fSIntel lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
121af75078fSIntel lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
122af75078fSIntel portid_t  nb_cfg_ports;  /**< Number of configured ports. */
123af75078fSIntel portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
124af75078fSIntel 
125af75078fSIntel unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
126af75078fSIntel portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
127af75078fSIntel 
128af75078fSIntel struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
129af75078fSIntel streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
130af75078fSIntel 
131af75078fSIntel /*
132af75078fSIntel  * Forwarding engines.
133af75078fSIntel  */
134af75078fSIntel struct fwd_engine * fwd_engines[] = {
135af75078fSIntel 	&io_fwd_engine,
136af75078fSIntel 	&mac_fwd_engine,
137af75078fSIntel 	&rx_only_engine,
138af75078fSIntel 	&tx_only_engine,
139af75078fSIntel 	&csum_fwd_engine,
140af75078fSIntel #ifdef RTE_LIBRTE_IEEE1588
141af75078fSIntel 	&ieee1588_fwd_engine,
142af75078fSIntel #endif
143af75078fSIntel 	NULL,
144af75078fSIntel };
145af75078fSIntel 
146af75078fSIntel struct fwd_config cur_fwd_config;
147af75078fSIntel struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
148af75078fSIntel 
149af75078fSIntel uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
150c8798818SIntel uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
151c8798818SIntel                                       * specified on command-line. */
152af75078fSIntel 
153af75078fSIntel /*
154af75078fSIntel  * Configuration of packet segments used by the "txonly" processing engine.
155af75078fSIntel  */
156af75078fSIntel uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
157af75078fSIntel uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
158af75078fSIntel 	TXONLY_DEF_PACKET_LEN,
159af75078fSIntel };
160af75078fSIntel uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
161af75078fSIntel 
162af75078fSIntel uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
163af75078fSIntel uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
164af75078fSIntel 
165900550deSIntel /* current configuration is in DCB or not,0 means it is not in DCB mode */
166900550deSIntel uint8_t dcb_config = 0;
167900550deSIntel 
168900550deSIntel /* Whether the dcb is in testing status */
169900550deSIntel uint8_t dcb_test = 0;
170900550deSIntel 
171900550deSIntel /* DCB on and VT on mapping is default */
172900550deSIntel enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
173af75078fSIntel 
174af75078fSIntel /*
175af75078fSIntel  * Configurable number of RX/TX queues.
176af75078fSIntel  */
177af75078fSIntel queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
178af75078fSIntel queueid_t nb_txq = 1; /**< Number of TX queues per port. */
179af75078fSIntel 
180af75078fSIntel /*
181af75078fSIntel  * Configurable number of RX/TX ring descriptors.
182af75078fSIntel  */
183af75078fSIntel #define RTE_TEST_RX_DESC_DEFAULT 128
184af75078fSIntel #define RTE_TEST_TX_DESC_DEFAULT 512
185af75078fSIntel uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
186af75078fSIntel uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
187af75078fSIntel 
188af75078fSIntel /*
189af75078fSIntel  * Configurable values of RX and TX ring threshold registers.
190af75078fSIntel  */
191af75078fSIntel #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
192af75078fSIntel #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
193af75078fSIntel #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
194af75078fSIntel 
195af75078fSIntel #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
196af75078fSIntel #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
197af75078fSIntel #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
198af75078fSIntel 
199af75078fSIntel struct rte_eth_thresh rx_thresh = {
200af75078fSIntel 	.pthresh = RX_PTHRESH,
201af75078fSIntel 	.hthresh = RX_HTHRESH,
202af75078fSIntel 	.wthresh = RX_WTHRESH,
203af75078fSIntel };
204af75078fSIntel 
205af75078fSIntel struct rte_eth_thresh tx_thresh = {
206af75078fSIntel 	.pthresh = TX_PTHRESH,
207af75078fSIntel 	.hthresh = TX_HTHRESH,
208af75078fSIntel 	.wthresh = TX_WTHRESH,
209af75078fSIntel };
210af75078fSIntel 
211af75078fSIntel /*
212af75078fSIntel  * Configurable value of RX free threshold.
213af75078fSIntel  */
214af75078fSIntel uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
215af75078fSIntel 
216af75078fSIntel /*
217ce8d5614SIntel  * Configurable value of RX drop enable.
218ce8d5614SIntel  */
219ce8d5614SIntel uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
220ce8d5614SIntel 
221ce8d5614SIntel /*
222af75078fSIntel  * Configurable value of TX free threshold.
223af75078fSIntel  */
224af75078fSIntel uint16_t tx_free_thresh = 0; /* Use default values. */
225af75078fSIntel 
226af75078fSIntel /*
227af75078fSIntel  * Configurable value of TX RS bit threshold.
228af75078fSIntel  */
229af75078fSIntel uint16_t tx_rs_thresh = 0; /* Use default values. */
230af75078fSIntel 
231af75078fSIntel /*
232ce8d5614SIntel  * Configurable value of TX queue flags.
233ce8d5614SIntel  */
234ce8d5614SIntel uint32_t txq_flags = 0; /* No flags set. */
235ce8d5614SIntel 
236ce8d5614SIntel /*
237af75078fSIntel  * Receive Side Scaling (RSS) configuration.
238af75078fSIntel  */
239af75078fSIntel uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
240af75078fSIntel 
241af75078fSIntel /*
242af75078fSIntel  * Port topology configuration
243af75078fSIntel  */
244af75078fSIntel uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
245af75078fSIntel 
246*7741e4cfSIntel 
247*7741e4cfSIntel /*
248*7741e4cfSIntel  * Avoids to flush all the RX streams before starts forwarding.
249*7741e4cfSIntel  */
250*7741e4cfSIntel uint8_t no_flush_rx = 0; /* flush by default */
251*7741e4cfSIntel 
252af75078fSIntel /*
253af75078fSIntel  * Ethernet device configuration.
254af75078fSIntel  */
255af75078fSIntel struct rte_eth_rxmode rx_mode = {
256af75078fSIntel 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
257af75078fSIntel 	.split_hdr_size = 0,
258af75078fSIntel 	.header_split   = 0, /**< Header Split disabled. */
259af75078fSIntel 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
260af75078fSIntel 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
261a47aa8b9SIntel 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
262a47aa8b9SIntel 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
263af75078fSIntel 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
264af75078fSIntel 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
265af75078fSIntel };
266af75078fSIntel 
267af75078fSIntel struct rte_fdir_conf fdir_conf = {
268af75078fSIntel 	.mode = RTE_FDIR_MODE_NONE,
269af75078fSIntel 	.pballoc = RTE_FDIR_PBALLOC_64K,
270af75078fSIntel 	.status = RTE_FDIR_REPORT_STATUS,
271af75078fSIntel 	.flexbytes_offset = 0x6,
272af75078fSIntel 	.drop_queue = 127,
273af75078fSIntel };
274af75078fSIntel 
275af75078fSIntel static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
276af75078fSIntel 
277ed30d9b6SIntel struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
278ed30d9b6SIntel struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
279ed30d9b6SIntel 
280ed30d9b6SIntel struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
281ed30d9b6SIntel struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
282ed30d9b6SIntel 
283ed30d9b6SIntel uint16_t nb_tx_queue_stats_mappings = 0;
284ed30d9b6SIntel uint16_t nb_rx_queue_stats_mappings = 0;
285ed30d9b6SIntel 
286ed30d9b6SIntel /* Forward function declarations */
287ed30d9b6SIntel static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
288ce8d5614SIntel static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
289ce8d5614SIntel 
290ce8d5614SIntel /*
291ce8d5614SIntel  * Check if all the ports are started.
292ce8d5614SIntel  * If yes, return positive value. If not, return zero.
293ce8d5614SIntel  */
294ce8d5614SIntel static int all_ports_started(void);
295ed30d9b6SIntel 
296af75078fSIntel /*
297af75078fSIntel  * Setup default configuration.
298af75078fSIntel  */
299af75078fSIntel static void
300af75078fSIntel set_default_fwd_lcores_config(void)
301af75078fSIntel {
302af75078fSIntel 	unsigned int i;
303af75078fSIntel 	unsigned int nb_lc;
304af75078fSIntel 
305af75078fSIntel 	nb_lc = 0;
306af75078fSIntel 	for (i = 0; i < RTE_MAX_LCORE; i++) {
307af75078fSIntel 		if (! rte_lcore_is_enabled(i))
308af75078fSIntel 			continue;
309af75078fSIntel 		if (i == rte_get_master_lcore())
310af75078fSIntel 			continue;
311af75078fSIntel 		fwd_lcores_cpuids[nb_lc++] = i;
312af75078fSIntel 	}
313af75078fSIntel 	nb_lcores = (lcoreid_t) nb_lc;
314af75078fSIntel 	nb_cfg_lcores = nb_lcores;
315af75078fSIntel 	nb_fwd_lcores = 1;
316af75078fSIntel }
317af75078fSIntel 
318af75078fSIntel static void
319af75078fSIntel set_def_peer_eth_addrs(void)
320af75078fSIntel {
321af75078fSIntel 	portid_t i;
322af75078fSIntel 
323af75078fSIntel 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
324af75078fSIntel 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
325af75078fSIntel 		peer_eth_addrs[i].addr_bytes[5] = i;
326af75078fSIntel 	}
327af75078fSIntel }
328af75078fSIntel 
329af75078fSIntel static void
330af75078fSIntel set_default_fwd_ports_config(void)
331af75078fSIntel {
332af75078fSIntel 	portid_t pt_id;
333af75078fSIntel 
334af75078fSIntel 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
335af75078fSIntel 		fwd_ports_ids[pt_id] = pt_id;
336af75078fSIntel 
337af75078fSIntel 	nb_cfg_ports = nb_ports;
338af75078fSIntel 	nb_fwd_ports = nb_ports;
339af75078fSIntel }
340af75078fSIntel 
341af75078fSIntel void
342af75078fSIntel set_def_fwd_config(void)
343af75078fSIntel {
344af75078fSIntel 	set_default_fwd_lcores_config();
345af75078fSIntel 	set_def_peer_eth_addrs();
346af75078fSIntel 	set_default_fwd_ports_config();
347af75078fSIntel }
348af75078fSIntel 
349af75078fSIntel /*
350af75078fSIntel  * Configuration initialisation done once at init time.
351af75078fSIntel  */
352af75078fSIntel struct mbuf_ctor_arg {
353af75078fSIntel 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
354af75078fSIntel 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
355af75078fSIntel };
356af75078fSIntel 
357af75078fSIntel struct mbuf_pool_ctor_arg {
358af75078fSIntel 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
359af75078fSIntel };
360af75078fSIntel 
361af75078fSIntel static void
362af75078fSIntel testpmd_mbuf_ctor(struct rte_mempool *mp,
363af75078fSIntel 		  void *opaque_arg,
364af75078fSIntel 		  void *raw_mbuf,
365af75078fSIntel 		  __attribute__((unused)) unsigned i)
366af75078fSIntel {
367af75078fSIntel 	struct mbuf_ctor_arg *mb_ctor_arg;
368af75078fSIntel 	struct rte_mbuf    *mb;
369af75078fSIntel 
370af75078fSIntel 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
371af75078fSIntel 	mb = (struct rte_mbuf *) raw_mbuf;
372af75078fSIntel 
373e788fe1cSIvan Boule 	mb->type         = RTE_MBUF_PKT;
374af75078fSIntel 	mb->pool         = mp;
375af75078fSIntel 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
376af75078fSIntel 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
377af75078fSIntel 			mb_ctor_arg->seg_buf_offset);
378af75078fSIntel 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
379af75078fSIntel 	mb->type         = RTE_MBUF_PKT;
380af75078fSIntel 	mb->ol_flags     = 0;
381af75078fSIntel 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
382af75078fSIntel 	mb->pkt.nb_segs  = 1;
38342d71416SIntel 	mb->pkt.vlan_macip.data = 0;
384af75078fSIntel 	mb->pkt.hash.rss = 0;
385af75078fSIntel }
386af75078fSIntel 
387af75078fSIntel static void
388af75078fSIntel testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
389af75078fSIntel 		       void *opaque_arg)
390af75078fSIntel {
391af75078fSIntel 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
392af75078fSIntel 	struct rte_pktmbuf_pool_private *mbp_priv;
393af75078fSIntel 
394af75078fSIntel 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
395af75078fSIntel 		printf("%s(%s) private_data_size %d < %d\n",
396af75078fSIntel 		       __func__, mp->name, (int) mp->private_data_size,
397af75078fSIntel 		       (int) sizeof(struct rte_pktmbuf_pool_private));
398af75078fSIntel 		return;
399af75078fSIntel 	}
400af75078fSIntel 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
401af75078fSIntel 	mbp_priv = (struct rte_pktmbuf_pool_private *)
402af75078fSIntel 		((char *)mp + sizeof(struct rte_mempool));
403af75078fSIntel 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
404af75078fSIntel }
405af75078fSIntel 
406af75078fSIntel static void
407af75078fSIntel mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
408af75078fSIntel 		 unsigned int socket_id)
409af75078fSIntel {
410af75078fSIntel 	char pool_name[RTE_MEMPOOL_NAMESIZE];
411af75078fSIntel 	struct rte_mempool *rte_mp;
412af75078fSIntel 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
413af75078fSIntel 	struct mbuf_ctor_arg mb_ctor_arg;
414af75078fSIntel 	uint32_t mb_size;
415af75078fSIntel 
416af75078fSIntel 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
417af75078fSIntel 						mbuf_seg_size);
418af75078fSIntel 	mb_ctor_arg.seg_buf_offset =
419af75078fSIntel 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
420af75078fSIntel 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
421af75078fSIntel 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
422af75078fSIntel 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
423af75078fSIntel 	rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
424af75078fSIntel 				    (unsigned) mb_mempool_cache,
425af75078fSIntel 				    sizeof(struct rte_pktmbuf_pool_private),
426af75078fSIntel 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
427af75078fSIntel 				    testpmd_mbuf_ctor, &mb_ctor_arg,
428af75078fSIntel 				    socket_id, 0);
429af75078fSIntel 	if (rte_mp == NULL) {
430ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
431ce8d5614SIntel 						"failed\n", socket_id);
432af75078fSIntel 	}
433af75078fSIntel }
434af75078fSIntel 
435af75078fSIntel static void
436af75078fSIntel init_config(void)
437af75078fSIntel {
438ce8d5614SIntel 	portid_t pid;
439af75078fSIntel 	struct rte_port *port;
440af75078fSIntel 	struct rte_mempool *mbp;
441af75078fSIntel 	unsigned int nb_mbuf_per_pool;
442af75078fSIntel 	lcoreid_t  lc_id;
443b6ea6408SIntel 	uint8_t port_per_socket[MAX_SOCKET];
444af75078fSIntel 
445b6ea6408SIntel 	memset(port_per_socket,0,MAX_SOCKET);
446af75078fSIntel 	/* Configuration of logical cores. */
447af75078fSIntel 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
448af75078fSIntel 				sizeof(struct fwd_lcore *) * nb_lcores,
449af75078fSIntel 				CACHE_LINE_SIZE);
450af75078fSIntel 	if (fwd_lcores == NULL) {
451ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
452ce8d5614SIntel 							"failed\n", nb_lcores);
453af75078fSIntel 	}
454af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
455af75078fSIntel 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
456af75078fSIntel 					       sizeof(struct fwd_lcore),
457af75078fSIntel 					       CACHE_LINE_SIZE);
458af75078fSIntel 		if (fwd_lcores[lc_id] == NULL) {
459ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
460ce8d5614SIntel 								"failed\n");
461af75078fSIntel 		}
462af75078fSIntel 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
463af75078fSIntel 	}
464af75078fSIntel 
465af75078fSIntel 	/*
466af75078fSIntel 	 * Create pools of mbuf.
467af75078fSIntel 	 * If NUMA support is disabled, create a single pool of mbuf in
468b6ea6408SIntel 	 * socket 0 memory by default.
469af75078fSIntel 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
470c8798818SIntel 	 *
471c8798818SIntel 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
472c8798818SIntel 	 * nb_txd can be configured at run time.
473af75078fSIntel 	 */
474c8798818SIntel 	if (param_total_num_mbufs)
475c8798818SIntel 		nb_mbuf_per_pool = param_total_num_mbufs;
476c8798818SIntel 	else {
477c8798818SIntel 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
478c8798818SIntel 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
479b6ea6408SIntel 
480b6ea6408SIntel 		if (!numa_support)
481c8798818SIntel 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
482c8798818SIntel 	}
483af75078fSIntel 
484b6ea6408SIntel 	if (!numa_support) {
485b6ea6408SIntel 		if (socket_num == UMA_NO_CONFIG)
486b6ea6408SIntel 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
487b6ea6408SIntel 		else
488b6ea6408SIntel 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
489b6ea6408SIntel 						 socket_num);
490b6ea6408SIntel 	}
491af75078fSIntel 	/*
492af75078fSIntel 	 * Records which Mbuf pool to use by each logical core, if needed.
493af75078fSIntel 	 */
494af75078fSIntel 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
495af75078fSIntel 		mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
496af75078fSIntel 		if (mbp == NULL)
497af75078fSIntel 			mbp = mbuf_pool_find(0);
498af75078fSIntel 		fwd_lcores[lc_id]->mbp = mbp;
499af75078fSIntel 	}
500af75078fSIntel 
501af75078fSIntel 	/* Configuration of Ethernet ports. */
502af75078fSIntel 	ports = rte_zmalloc("testpmd: ports",
503af75078fSIntel 			    sizeof(struct rte_port) * nb_ports,
504af75078fSIntel 			    CACHE_LINE_SIZE);
505af75078fSIntel 	if (ports == NULL) {
506ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
507ce8d5614SIntel 							"failed\n", nb_ports);
508af75078fSIntel 	}
509af75078fSIntel 
510ce8d5614SIntel 	for (pid = 0; pid < nb_ports; pid++) {
511ce8d5614SIntel 		port = &ports[pid];
512ce8d5614SIntel 		rte_eth_dev_info_get(pid, &port->dev_info);
513ce8d5614SIntel 
514b6ea6408SIntel 		if (numa_support) {
515b6ea6408SIntel 			if (port_numa[pid] != NUMA_NO_CONFIG)
516b6ea6408SIntel 				port_per_socket[port_numa[pid]]++;
517b6ea6408SIntel 			else {
518b6ea6408SIntel 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
519b6ea6408SIntel 				port_per_socket[socket_id]++;
520b6ea6408SIntel 			}
521b6ea6408SIntel 		}
522b6ea6408SIntel 
523ce8d5614SIntel 		/* set flag to initialize port/queue */
524ce8d5614SIntel 		port->need_reconfig = 1;
525ce8d5614SIntel 		port->need_reconfig_queues = 1;
526ce8d5614SIntel 	}
527ce8d5614SIntel 
528b6ea6408SIntel 	if (numa_support) {
529b6ea6408SIntel 		uint8_t i;
530b6ea6408SIntel 		unsigned int nb_mbuf;
531ce8d5614SIntel 
532b6ea6408SIntel 		if (param_total_num_mbufs)
533b6ea6408SIntel 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
534b6ea6408SIntel 
535b6ea6408SIntel 		for (i = 0; i < MAX_SOCKET; i++) {
536b6ea6408SIntel 			nb_mbuf = (nb_mbuf_per_pool *
537b6ea6408SIntel 						port_per_socket[i]);
538b6ea6408SIntel 			if (nb_mbuf)
539b6ea6408SIntel 				mbuf_pool_create(mbuf_data_size,
540b6ea6408SIntel 						nb_mbuf,i);
541b6ea6408SIntel 		}
542b6ea6408SIntel 	}
543b6ea6408SIntel 	init_port_config();
544ce8d5614SIntel 	/* Configuration of packet forwarding streams. */
545ce8d5614SIntel 	if (init_fwd_streams() < 0)
546ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
547ce8d5614SIntel }
548ce8d5614SIntel 
549ce8d5614SIntel int
550ce8d5614SIntel init_fwd_streams(void)
551ce8d5614SIntel {
552ce8d5614SIntel 	portid_t pid;
553ce8d5614SIntel 	struct rte_port *port;
554ce8d5614SIntel 	streamid_t sm_id, nb_fwd_streams_new;
555ce8d5614SIntel 
556ce8d5614SIntel 	/* set socket id according to numa or not */
557ce8d5614SIntel 	for (pid = 0; pid < nb_ports; pid++) {
558ce8d5614SIntel 		port = &ports[pid];
559ce8d5614SIntel 		if (nb_rxq > port->dev_info.max_rx_queues) {
560ce8d5614SIntel 			printf("Fail: nb_rxq(%d) is greater than "
561ce8d5614SIntel 				"max_rx_queues(%d)\n", nb_rxq,
562ce8d5614SIntel 				port->dev_info.max_rx_queues);
563ce8d5614SIntel 			return -1;
564ce8d5614SIntel 		}
565ce8d5614SIntel 		if (nb_txq > port->dev_info.max_tx_queues) {
566ce8d5614SIntel 			printf("Fail: nb_txq(%d) is greater than "
567ce8d5614SIntel 				"max_tx_queues(%d)\n", nb_txq,
568ce8d5614SIntel 				port->dev_info.max_tx_queues);
569ce8d5614SIntel 			return -1;
570ce8d5614SIntel 		}
571af75078fSIntel 		if (numa_support)
572b6ea6408SIntel 			port->socket_id = rte_eth_dev_socket_id(pid);
573b6ea6408SIntel 		else {
574b6ea6408SIntel 			if (socket_num == UMA_NO_CONFIG)
575af75078fSIntel 				port->socket_id = 0;
576b6ea6408SIntel 			else
577b6ea6408SIntel 				port->socket_id = socket_num;
578b6ea6408SIntel 		}
579af75078fSIntel 	}
580af75078fSIntel 
581ce8d5614SIntel 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
582ce8d5614SIntel 	if (nb_fwd_streams_new == nb_fwd_streams)
583ce8d5614SIntel 		return 0;
584ce8d5614SIntel 	/* clear the old */
585ce8d5614SIntel 	if (fwd_streams != NULL) {
586ce8d5614SIntel 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
587ce8d5614SIntel 			if (fwd_streams[sm_id] == NULL)
588ce8d5614SIntel 				continue;
589ce8d5614SIntel 			rte_free(fwd_streams[sm_id]);
590ce8d5614SIntel 			fwd_streams[sm_id] = NULL;
591af75078fSIntel 		}
592ce8d5614SIntel 		rte_free(fwd_streams);
593ce8d5614SIntel 		fwd_streams = NULL;
594ce8d5614SIntel 	}
595ce8d5614SIntel 
596ce8d5614SIntel 	/* init new */
597ce8d5614SIntel 	nb_fwd_streams = nb_fwd_streams_new;
598ce8d5614SIntel 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
599ce8d5614SIntel 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
600ce8d5614SIntel 	if (fwd_streams == NULL)
601ce8d5614SIntel 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
602ce8d5614SIntel 						"failed\n", nb_fwd_streams);
603ce8d5614SIntel 
604af75078fSIntel 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
605af75078fSIntel 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
606ce8d5614SIntel 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
607ce8d5614SIntel 		if (fwd_streams[sm_id] == NULL)
608ce8d5614SIntel 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
609ce8d5614SIntel 								" failed\n");
610af75078fSIntel 	}
611ce8d5614SIntel 
612ce8d5614SIntel 	return 0;
613af75078fSIntel }
614af75078fSIntel 
615af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
616af75078fSIntel static void
617af75078fSIntel pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
618af75078fSIntel {
619af75078fSIntel 	unsigned int total_burst;
620af75078fSIntel 	unsigned int nb_burst;
621af75078fSIntel 	unsigned int burst_stats[3];
622af75078fSIntel 	uint16_t pktnb_stats[3];
623af75078fSIntel 	uint16_t nb_pkt;
624af75078fSIntel 	int burst_percent[3];
625af75078fSIntel 
626af75078fSIntel 	/*
627af75078fSIntel 	 * First compute the total number of packet bursts and the
628af75078fSIntel 	 * two highest numbers of bursts of the same number of packets.
629af75078fSIntel 	 */
630af75078fSIntel 	total_burst = 0;
631af75078fSIntel 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
632af75078fSIntel 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
633af75078fSIntel 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
634af75078fSIntel 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
635af75078fSIntel 		if (nb_burst == 0)
636af75078fSIntel 			continue;
637af75078fSIntel 		total_burst += nb_burst;
638af75078fSIntel 		if (nb_burst > burst_stats[0]) {
639af75078fSIntel 			burst_stats[1] = burst_stats[0];
640af75078fSIntel 			pktnb_stats[1] = pktnb_stats[0];
641af75078fSIntel 			burst_stats[0] = nb_burst;
642af75078fSIntel 			pktnb_stats[0] = nb_pkt;
643af75078fSIntel 		}
644af75078fSIntel 	}
645af75078fSIntel 	if (total_burst == 0)
646af75078fSIntel 		return;
647af75078fSIntel 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
648af75078fSIntel 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
649af75078fSIntel 	       burst_percent[0], (int) pktnb_stats[0]);
650af75078fSIntel 	if (burst_stats[0] == total_burst) {
651af75078fSIntel 		printf("]\n");
652af75078fSIntel 		return;
653af75078fSIntel 	}
654af75078fSIntel 	if (burst_stats[0] + burst_stats[1] == total_burst) {
655af75078fSIntel 		printf(" + %d%% of %d pkts]\n",
656af75078fSIntel 		       100 - burst_percent[0], pktnb_stats[1]);
657af75078fSIntel 		return;
658af75078fSIntel 	}
659af75078fSIntel 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
660af75078fSIntel 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
661af75078fSIntel 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
662af75078fSIntel 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
663af75078fSIntel 		return;
664af75078fSIntel 	}
665af75078fSIntel 	printf(" + %d%% of %d pkts + %d%% of others]\n",
666af75078fSIntel 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
667af75078fSIntel }
668af75078fSIntel #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
669af75078fSIntel 
670af75078fSIntel static void
671af75078fSIntel fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
672af75078fSIntel {
673af75078fSIntel 	struct rte_port *port;
674013af9b6SIntel 	uint8_t i;
675af75078fSIntel 
676af75078fSIntel 	static const char *fwd_stats_border = "----------------------";
677af75078fSIntel 
678af75078fSIntel 	port = &ports[port_id];
679af75078fSIntel 	printf("\n  %s Forward statistics for port %-2d %s\n",
680af75078fSIntel 	       fwd_stats_border, port_id, fwd_stats_border);
681013af9b6SIntel 
682013af9b6SIntel 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
683af75078fSIntel 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
684af75078fSIntel 		       "%-"PRIu64"\n",
685af75078fSIntel 		       stats->ipackets, stats->ierrors,
686af75078fSIntel 		       (uint64_t) (stats->ipackets + stats->ierrors));
687af75078fSIntel 
688af75078fSIntel 		if (cur_fwd_eng == &csum_fwd_engine)
689af75078fSIntel 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
690af75078fSIntel 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
691af75078fSIntel 
692af75078fSIntel 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
693af75078fSIntel 		       "%-"PRIu64"\n",
694af75078fSIntel 		       stats->opackets, port->tx_dropped,
695af75078fSIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
696af75078fSIntel 
697af75078fSIntel 		if (stats->rx_nombuf > 0)
698af75078fSIntel 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
699013af9b6SIntel 
700013af9b6SIntel 	}
701013af9b6SIntel 	else {
702013af9b6SIntel 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
703013af9b6SIntel 		       "%14"PRIu64"\n",
704013af9b6SIntel 		       stats->ipackets, stats->ierrors,
705013af9b6SIntel 		       (uint64_t) (stats->ipackets + stats->ierrors));
706013af9b6SIntel 
707013af9b6SIntel 		if (cur_fwd_eng == &csum_fwd_engine)
708013af9b6SIntel 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
709013af9b6SIntel 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
710013af9b6SIntel 
711013af9b6SIntel 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
712013af9b6SIntel 		       "%14"PRIu64"\n",
713013af9b6SIntel 		       stats->opackets, port->tx_dropped,
714013af9b6SIntel 		       (uint64_t) (stats->opackets + port->tx_dropped));
715013af9b6SIntel 
716013af9b6SIntel 		if (stats->rx_nombuf > 0)
717013af9b6SIntel 			printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
718013af9b6SIntel 	}
719af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
720af75078fSIntel 	if (port->rx_stream)
721013af9b6SIntel 		pkt_burst_stats_display("RX",
722013af9b6SIntel 			&port->rx_stream->rx_burst_stats);
723af75078fSIntel 	if (port->tx_stream)
724013af9b6SIntel 		pkt_burst_stats_display("TX",
725013af9b6SIntel 			&port->tx_stream->tx_burst_stats);
726af75078fSIntel #endif
727af75078fSIntel 	/* stats fdir */
728af75078fSIntel 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
729013af9b6SIntel 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
730af75078fSIntel 		       stats->fdirmiss,
731af75078fSIntel 		       stats->fdirmatch);
732af75078fSIntel 
733013af9b6SIntel 	if (port->rx_queue_stats_mapping_enabled) {
734013af9b6SIntel 		printf("\n");
735013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
736013af9b6SIntel 			printf("  Stats reg %2d RX-packets:%14"PRIu64
737013af9b6SIntel 			       "     RX-errors:%14"PRIu64
738013af9b6SIntel 			       "    RX-bytes:%14"PRIu64"\n",
739013af9b6SIntel 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
740013af9b6SIntel 		}
741013af9b6SIntel 		printf("\n");
742013af9b6SIntel 	}
743013af9b6SIntel 	if (port->tx_queue_stats_mapping_enabled) {
744013af9b6SIntel 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
745013af9b6SIntel 			printf("  Stats reg %2d TX-packets:%14"PRIu64
746013af9b6SIntel 			       "                                 TX-bytes:%14"PRIu64"\n",
747013af9b6SIntel 			       i, stats->q_opackets[i], stats->q_obytes[i]);
748013af9b6SIntel 		}
749013af9b6SIntel 	}
750013af9b6SIntel 
751af75078fSIntel 	printf("  %s--------------------------------%s\n",
752af75078fSIntel 	       fwd_stats_border, fwd_stats_border);
753af75078fSIntel }
754af75078fSIntel 
755af75078fSIntel static void
756af75078fSIntel fwd_stream_stats_display(streamid_t stream_id)
757af75078fSIntel {
758af75078fSIntel 	struct fwd_stream *fs;
759af75078fSIntel 	static const char *fwd_top_stats_border = "-------";
760af75078fSIntel 
761af75078fSIntel 	fs = fwd_streams[stream_id];
762af75078fSIntel 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
763af75078fSIntel 	    (fs->fwd_dropped == 0))
764af75078fSIntel 		return;
765af75078fSIntel 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
766af75078fSIntel 	       "TX Port=%2d/Queue=%2d %s\n",
767af75078fSIntel 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
768af75078fSIntel 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
769af75078fSIntel 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
770af75078fSIntel 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
771af75078fSIntel 
772af75078fSIntel 	/* if checksum mode */
773af75078fSIntel 	if (cur_fwd_eng == &csum_fwd_engine) {
774013af9b6SIntel 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
775013af9b6SIntel 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
776af75078fSIntel 	}
777af75078fSIntel 
778af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
779af75078fSIntel 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
780af75078fSIntel 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
781af75078fSIntel #endif
782af75078fSIntel }
783af75078fSIntel 
784af75078fSIntel static void
785*7741e4cfSIntel flush_fwd_rx_queues(void)
786af75078fSIntel {
787af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
788af75078fSIntel 	portid_t  rxp;
789*7741e4cfSIntel 	portid_t port_id;
790af75078fSIntel 	queueid_t rxq;
791af75078fSIntel 	uint16_t  nb_rx;
792af75078fSIntel 	uint16_t  i;
793af75078fSIntel 	uint8_t   j;
794af75078fSIntel 
795af75078fSIntel 	for (j = 0; j < 2; j++) {
796*7741e4cfSIntel 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
797af75078fSIntel 			for (rxq = 0; rxq < nb_rxq; rxq++) {
798*7741e4cfSIntel 				port_id = fwd_ports_ids[rxp];
799af75078fSIntel 				do {
800*7741e4cfSIntel 					nb_rx = rte_eth_rx_burst(port_id, rxq,
801013af9b6SIntel 						pkts_burst, MAX_PKT_BURST);
802af75078fSIntel 					for (i = 0; i < nb_rx; i++)
803af75078fSIntel 						rte_pktmbuf_free(pkts_burst[i]);
804af75078fSIntel 				} while (nb_rx > 0);
805af75078fSIntel 			}
806af75078fSIntel 		}
807af75078fSIntel 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
808af75078fSIntel 	}
809af75078fSIntel }
810af75078fSIntel 
811af75078fSIntel static void
812af75078fSIntel run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
813af75078fSIntel {
814af75078fSIntel 	struct fwd_stream **fsm;
815af75078fSIntel 	streamid_t nb_fs;
816af75078fSIntel 	streamid_t sm_id;
817af75078fSIntel 
818af75078fSIntel 	fsm = &fwd_streams[fc->stream_idx];
819af75078fSIntel 	nb_fs = fc->stream_nb;
820af75078fSIntel 	do {
821af75078fSIntel 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
822af75078fSIntel 			(*pkt_fwd)(fsm[sm_id]);
823af75078fSIntel 	} while (! fc->stopped);
824af75078fSIntel }
825af75078fSIntel 
826af75078fSIntel static int
827af75078fSIntel start_pkt_forward_on_core(void *fwd_arg)
828af75078fSIntel {
829af75078fSIntel 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
830af75078fSIntel 			     cur_fwd_config.fwd_eng->packet_fwd);
831af75078fSIntel 	return 0;
832af75078fSIntel }
833af75078fSIntel 
834af75078fSIntel /*
835af75078fSIntel  * Run the TXONLY packet forwarding engine to send a single burst of packets.
836af75078fSIntel  * Used to start communication flows in network loopback test configurations.
837af75078fSIntel  */
838af75078fSIntel static int
839af75078fSIntel run_one_txonly_burst_on_core(void *fwd_arg)
840af75078fSIntel {
841af75078fSIntel 	struct fwd_lcore *fwd_lc;
842af75078fSIntel 	struct fwd_lcore tmp_lcore;
843af75078fSIntel 
844af75078fSIntel 	fwd_lc = (struct fwd_lcore *) fwd_arg;
845af75078fSIntel 	tmp_lcore = *fwd_lc;
846af75078fSIntel 	tmp_lcore.stopped = 1;
847af75078fSIntel 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
848af75078fSIntel 	return 0;
849af75078fSIntel }
850af75078fSIntel 
851af75078fSIntel /*
852af75078fSIntel  * Launch packet forwarding:
853af75078fSIntel  *     - Setup per-port forwarding context.
854af75078fSIntel  *     - launch logical cores with their forwarding configuration.
855af75078fSIntel  */
856af75078fSIntel static void
857af75078fSIntel launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
858af75078fSIntel {
859af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
860af75078fSIntel 	unsigned int i;
861af75078fSIntel 	unsigned int lc_id;
862af75078fSIntel 	int diag;
863af75078fSIntel 
864af75078fSIntel 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
865af75078fSIntel 	if (port_fwd_begin != NULL) {
866af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
867af75078fSIntel 			(*port_fwd_begin)(fwd_ports_ids[i]);
868af75078fSIntel 	}
869af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
870af75078fSIntel 		lc_id = fwd_lcores_cpuids[i];
871af75078fSIntel 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
872af75078fSIntel 			fwd_lcores[i]->stopped = 0;
873af75078fSIntel 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
874af75078fSIntel 						     fwd_lcores[i], lc_id);
875af75078fSIntel 			if (diag != 0)
876af75078fSIntel 				printf("launch lcore %u failed - diag=%d\n",
877af75078fSIntel 				       lc_id, diag);
878af75078fSIntel 		}
879af75078fSIntel 	}
880af75078fSIntel }
881af75078fSIntel 
882af75078fSIntel /*
883af75078fSIntel  * Launch packet forwarding configuration.
884af75078fSIntel  */
885af75078fSIntel void
886af75078fSIntel start_packet_forwarding(int with_tx_first)
887af75078fSIntel {
888af75078fSIntel 	port_fwd_begin_t port_fwd_begin;
889af75078fSIntel 	port_fwd_end_t  port_fwd_end;
890af75078fSIntel 	struct rte_port *port;
891af75078fSIntel 	unsigned int i;
892af75078fSIntel 	portid_t   pt_id;
893af75078fSIntel 	streamid_t sm_id;
894af75078fSIntel 
895ce8d5614SIntel 	if (all_ports_started() == 0) {
896ce8d5614SIntel 		printf("Not all ports were started\n");
897ce8d5614SIntel 		return;
898ce8d5614SIntel 	}
899af75078fSIntel 	if (test_done == 0) {
900af75078fSIntel 		printf("Packet forwarding already started\n");
901af75078fSIntel 		return;
902af75078fSIntel 	}
903*7741e4cfSIntel 	if(dcb_test) {
904*7741e4cfSIntel 		for (i = 0; i < nb_fwd_ports; i++) {
905*7741e4cfSIntel 			pt_id = fwd_ports_ids[i];
906*7741e4cfSIntel 			port = &ports[pt_id];
907*7741e4cfSIntel 			if (!port->dcb_flag) {
908*7741e4cfSIntel 				printf("In DCB mode, all forwarding ports must "
909*7741e4cfSIntel                                        "be configured in this mode.\n");
910013af9b6SIntel 				return;
911013af9b6SIntel 			}
912*7741e4cfSIntel 		}
913*7741e4cfSIntel 		if (nb_fwd_lcores == 1) {
914*7741e4cfSIntel 			printf("In DCB mode,the nb forwarding cores "
915*7741e4cfSIntel                                "should be larger than 1.\n");
916*7741e4cfSIntel 			return;
917*7741e4cfSIntel 		}
918*7741e4cfSIntel 	}
919af75078fSIntel 	test_done = 0;
920*7741e4cfSIntel 
921*7741e4cfSIntel 	if(!no_flush_rx)
922*7741e4cfSIntel 		flush_fwd_rx_queues();
923*7741e4cfSIntel 
924af75078fSIntel 	fwd_config_setup();
925af75078fSIntel 	rxtx_config_display();
926af75078fSIntel 
927af75078fSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
928af75078fSIntel 		pt_id = fwd_ports_ids[i];
929af75078fSIntel 		port = &ports[pt_id];
930af75078fSIntel 		rte_eth_stats_get(pt_id, &port->stats);
931af75078fSIntel 		port->tx_dropped = 0;
932013af9b6SIntel 
933013af9b6SIntel 		map_port_queue_stats_mapping_registers(pt_id, port);
934af75078fSIntel 	}
935af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
936af75078fSIntel 		fwd_streams[sm_id]->rx_packets = 0;
937af75078fSIntel 		fwd_streams[sm_id]->tx_packets = 0;
938af75078fSIntel 		fwd_streams[sm_id]->fwd_dropped = 0;
939af75078fSIntel 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
940af75078fSIntel 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
941af75078fSIntel 
942af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
943af75078fSIntel 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
944af75078fSIntel 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
945af75078fSIntel 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
946af75078fSIntel 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
947af75078fSIntel #endif
948af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
949af75078fSIntel 		fwd_streams[sm_id]->core_cycles = 0;
950af75078fSIntel #endif
951af75078fSIntel 	}
952af75078fSIntel 	if (with_tx_first) {
953af75078fSIntel 		port_fwd_begin = tx_only_engine.port_fwd_begin;
954af75078fSIntel 		if (port_fwd_begin != NULL) {
955af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
956af75078fSIntel 				(*port_fwd_begin)(fwd_ports_ids[i]);
957af75078fSIntel 		}
958af75078fSIntel 		launch_packet_forwarding(run_one_txonly_burst_on_core);
959af75078fSIntel 		rte_eal_mp_wait_lcore();
960af75078fSIntel 		port_fwd_end = tx_only_engine.port_fwd_end;
961af75078fSIntel 		if (port_fwd_end != NULL) {
962af75078fSIntel 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
963af75078fSIntel 				(*port_fwd_end)(fwd_ports_ids[i]);
964af75078fSIntel 		}
965af75078fSIntel 	}
966af75078fSIntel 	launch_packet_forwarding(start_pkt_forward_on_core);
967af75078fSIntel }
968af75078fSIntel 
969af75078fSIntel void
970af75078fSIntel stop_packet_forwarding(void)
971af75078fSIntel {
972af75078fSIntel 	struct rte_eth_stats stats;
973af75078fSIntel 	struct rte_port *port;
974af75078fSIntel 	port_fwd_end_t  port_fwd_end;
975af75078fSIntel 	int i;
976af75078fSIntel 	portid_t   pt_id;
977af75078fSIntel 	streamid_t sm_id;
978af75078fSIntel 	lcoreid_t  lc_id;
979af75078fSIntel 	uint64_t total_recv;
980af75078fSIntel 	uint64_t total_xmit;
981af75078fSIntel 	uint64_t total_rx_dropped;
982af75078fSIntel 	uint64_t total_tx_dropped;
983af75078fSIntel 	uint64_t total_rx_nombuf;
984af75078fSIntel 	uint64_t tx_dropped;
985af75078fSIntel 	uint64_t rx_bad_ip_csum;
986af75078fSIntel 	uint64_t rx_bad_l4_csum;
987af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
988af75078fSIntel 	uint64_t fwd_cycles;
989af75078fSIntel #endif
990af75078fSIntel 	static const char *acc_stats_border = "+++++++++++++++";
991af75078fSIntel 
992ce8d5614SIntel 	if (all_ports_started() == 0) {
993ce8d5614SIntel 		printf("Not all ports were started\n");
994ce8d5614SIntel 		return;
995ce8d5614SIntel 	}
996af75078fSIntel 	if (test_done) {
997af75078fSIntel 		printf("Packet forwarding not started\n");
998af75078fSIntel 		return;
999af75078fSIntel 	}
1000af75078fSIntel 	printf("Telling cores to stop...");
1001af75078fSIntel 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1002af75078fSIntel 		fwd_lcores[lc_id]->stopped = 1;
1003af75078fSIntel 	printf("\nWaiting for lcores to finish...\n");
1004af75078fSIntel 	rte_eal_mp_wait_lcore();
1005af75078fSIntel 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1006af75078fSIntel 	if (port_fwd_end != NULL) {
1007af75078fSIntel 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1008af75078fSIntel 			pt_id = fwd_ports_ids[i];
1009af75078fSIntel 			(*port_fwd_end)(pt_id);
1010af75078fSIntel 		}
1011af75078fSIntel 	}
1012af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1013af75078fSIntel 	fwd_cycles = 0;
1014af75078fSIntel #endif
1015af75078fSIntel 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1016af75078fSIntel 		if (cur_fwd_config.nb_fwd_streams >
1017af75078fSIntel 		    cur_fwd_config.nb_fwd_ports) {
1018af75078fSIntel 			fwd_stream_stats_display(sm_id);
1019af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1020af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1021af75078fSIntel 		} else {
1022af75078fSIntel 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1023af75078fSIntel 				fwd_streams[sm_id];
1024af75078fSIntel 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1025af75078fSIntel 				fwd_streams[sm_id];
1026af75078fSIntel 		}
1027af75078fSIntel 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1028af75078fSIntel 		tx_dropped = (uint64_t) (tx_dropped +
1029af75078fSIntel 					 fwd_streams[sm_id]->fwd_dropped);
1030af75078fSIntel 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1031af75078fSIntel 
1032013af9b6SIntel 		rx_bad_ip_csum =
1033013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1034af75078fSIntel 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1035af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1036013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1037013af9b6SIntel 							rx_bad_ip_csum;
1038af75078fSIntel 
1039013af9b6SIntel 		rx_bad_l4_csum =
1040013af9b6SIntel 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1041af75078fSIntel 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1042af75078fSIntel 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1043013af9b6SIntel 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1044013af9b6SIntel 							rx_bad_l4_csum;
1045af75078fSIntel 
1046af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1047af75078fSIntel 		fwd_cycles = (uint64_t) (fwd_cycles +
1048af75078fSIntel 					 fwd_streams[sm_id]->core_cycles);
1049af75078fSIntel #endif
1050af75078fSIntel 	}
1051af75078fSIntel 	total_recv = 0;
1052af75078fSIntel 	total_xmit = 0;
1053af75078fSIntel 	total_rx_dropped = 0;
1054af75078fSIntel 	total_tx_dropped = 0;
1055af75078fSIntel 	total_rx_nombuf  = 0;
1056*7741e4cfSIntel 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1057af75078fSIntel 		pt_id = fwd_ports_ids[i];
1058af75078fSIntel 
1059af75078fSIntel 		port = &ports[pt_id];
1060af75078fSIntel 		rte_eth_stats_get(pt_id, &stats);
1061af75078fSIntel 		stats.ipackets -= port->stats.ipackets;
1062af75078fSIntel 		port->stats.ipackets = 0;
1063af75078fSIntel 		stats.opackets -= port->stats.opackets;
1064af75078fSIntel 		port->stats.opackets = 0;
1065af75078fSIntel 		stats.ibytes   -= port->stats.ibytes;
1066af75078fSIntel 		port->stats.ibytes = 0;
1067af75078fSIntel 		stats.obytes   -= port->stats.obytes;
1068af75078fSIntel 		port->stats.obytes = 0;
1069af75078fSIntel 		stats.ierrors  -= port->stats.ierrors;
1070af75078fSIntel 		port->stats.ierrors = 0;
1071af75078fSIntel 		stats.oerrors  -= port->stats.oerrors;
1072af75078fSIntel 		port->stats.oerrors = 0;
1073af75078fSIntel 		stats.rx_nombuf -= port->stats.rx_nombuf;
1074af75078fSIntel 		port->stats.rx_nombuf = 0;
1075af75078fSIntel 		stats.fdirmatch -= port->stats.fdirmatch;
1076af75078fSIntel 		port->stats.rx_nombuf = 0;
1077af75078fSIntel 		stats.fdirmiss -= port->stats.fdirmiss;
1078af75078fSIntel 		port->stats.rx_nombuf = 0;
1079af75078fSIntel 
1080af75078fSIntel 		total_recv += stats.ipackets;
1081af75078fSIntel 		total_xmit += stats.opackets;
1082af75078fSIntel 		total_rx_dropped += stats.ierrors;
1083af75078fSIntel 		total_tx_dropped += port->tx_dropped;
1084af75078fSIntel 		total_rx_nombuf  += stats.rx_nombuf;
1085af75078fSIntel 
1086af75078fSIntel 		fwd_port_stats_display(pt_id, &stats);
1087af75078fSIntel 	}
1088af75078fSIntel 	printf("\n  %s Accumulated forward statistics for all ports"
1089af75078fSIntel 	       "%s\n",
1090af75078fSIntel 	       acc_stats_border, acc_stats_border);
1091af75078fSIntel 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1092af75078fSIntel 	       "%-"PRIu64"\n"
1093af75078fSIntel 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1094af75078fSIntel 	       "%-"PRIu64"\n",
1095af75078fSIntel 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1096af75078fSIntel 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1097af75078fSIntel 	if (total_rx_nombuf > 0)
1098af75078fSIntel 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1099af75078fSIntel 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1100af75078fSIntel 	       "%s\n",
1101af75078fSIntel 	       acc_stats_border, acc_stats_border);
1102af75078fSIntel #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1103af75078fSIntel 	if (total_recv > 0)
1104af75078fSIntel 		printf("\n  CPU cycles/packet=%u (total cycles="
1105af75078fSIntel 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1106af75078fSIntel 		       (unsigned int)(fwd_cycles / total_recv),
1107af75078fSIntel 		       fwd_cycles, total_recv);
1108af75078fSIntel #endif
1109af75078fSIntel 	printf("\nDone.\n");
1110af75078fSIntel 	test_done = 1;
1111af75078fSIntel }
1112af75078fSIntel 
1113ce8d5614SIntel static int
1114ce8d5614SIntel all_ports_started(void)
1115ce8d5614SIntel {
1116ce8d5614SIntel 	portid_t pi;
1117ce8d5614SIntel 	struct rte_port *port;
1118ce8d5614SIntel 
1119ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1120ce8d5614SIntel 		port = &ports[pi];
1121ce8d5614SIntel 		/* Check if there is a port which is not started */
1122ce8d5614SIntel 		if (port->port_status != RTE_PORT_STARTED)
1123ce8d5614SIntel 			return 0;
1124ce8d5614SIntel 	}
1125ce8d5614SIntel 
1126ce8d5614SIntel 	/* No port is not started */
1127ce8d5614SIntel 	return 1;
1128ce8d5614SIntel }
1129ce8d5614SIntel 
1130ce8d5614SIntel void
1131ce8d5614SIntel start_port(portid_t pid)
1132ce8d5614SIntel {
1133ce8d5614SIntel 	int diag, need_check_link_status = 0;
1134ce8d5614SIntel 	portid_t pi;
1135ce8d5614SIntel 	queueid_t qi;
1136ce8d5614SIntel 	struct rte_port *port;
1137ce8d5614SIntel 
1138ce8d5614SIntel 	if (test_done == 0) {
1139ce8d5614SIntel 		printf("Please stop forwarding first\n");
1140ce8d5614SIntel 		return;
1141ce8d5614SIntel 	}
1142ce8d5614SIntel 
1143ce8d5614SIntel 	if (init_fwd_streams() < 0) {
1144ce8d5614SIntel 		printf("Fail from init_fwd_streams()\n");
1145ce8d5614SIntel 		return;
1146ce8d5614SIntel 	}
1147ce8d5614SIntel 
1148ce8d5614SIntel 	if(dcb_config)
1149ce8d5614SIntel 		dcb_test = 1;
1150ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1151ce8d5614SIntel 		if (pid < nb_ports && pid != pi)
1152ce8d5614SIntel 			continue;
1153ce8d5614SIntel 
1154ce8d5614SIntel 		port = &ports[pi];
1155ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1156ce8d5614SIntel 						 RTE_PORT_HANDLING) == 0) {
1157ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1158ce8d5614SIntel 			continue;
1159ce8d5614SIntel 		}
1160ce8d5614SIntel 
1161ce8d5614SIntel 		if (port->need_reconfig > 0) {
1162ce8d5614SIntel 			port->need_reconfig = 0;
1163ce8d5614SIntel 
1164b6ea6408SIntel 			printf("Configuring Port %d (socket %d)\n", pi,
1165b6ea6408SIntel 					rte_eth_dev_socket_id(pi));
1166ce8d5614SIntel 			/* configure port */
1167ce8d5614SIntel 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1168ce8d5614SIntel 						&(port->dev_conf));
1169ce8d5614SIntel 			if (diag != 0) {
1170ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1171ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1172ce8d5614SIntel 					printf("Port %d can not be set back "
1173ce8d5614SIntel 							"to stopped\n", pi);
1174ce8d5614SIntel 				printf("Fail to configure port %d\n", pi);
1175ce8d5614SIntel 				/* try to reconfigure port next time */
1176ce8d5614SIntel 				port->need_reconfig = 1;
1177ce8d5614SIntel 				return;
1178ce8d5614SIntel 			}
1179ce8d5614SIntel 		}
1180ce8d5614SIntel 		if (port->need_reconfig_queues > 0) {
1181ce8d5614SIntel 			port->need_reconfig_queues = 0;
1182ce8d5614SIntel 			/* setup tx queues */
1183ce8d5614SIntel 			for (qi = 0; qi < nb_txq; qi++) {
1184b6ea6408SIntel 				if ((numa_support) &&
1185b6ea6408SIntel 					(txring_numa[pi] != NUMA_NO_CONFIG))
1186b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1187b6ea6408SIntel 						nb_txd,txring_numa[pi],
1188b6ea6408SIntel 						&(port->tx_conf));
1189b6ea6408SIntel 				else
1190b6ea6408SIntel 					diag = rte_eth_tx_queue_setup(pi, qi,
1191b6ea6408SIntel 						nb_txd,port->socket_id,
1192b6ea6408SIntel 						&(port->tx_conf));
1193b6ea6408SIntel 
1194ce8d5614SIntel 				if (diag == 0)
1195ce8d5614SIntel 					continue;
1196ce8d5614SIntel 
1197ce8d5614SIntel 				/* Fail to setup tx queue, return */
1198ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1199ce8d5614SIntel 							RTE_PORT_HANDLING,
1200ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
1201ce8d5614SIntel 					printf("Port %d can not be set back "
1202ce8d5614SIntel 							"to stopped\n", pi);
1203ce8d5614SIntel 				printf("Fail to configure port %d tx queues\n", pi);
1204ce8d5614SIntel 				/* try to reconfigure queues next time */
1205ce8d5614SIntel 				port->need_reconfig_queues = 1;
1206ce8d5614SIntel 				return;
1207ce8d5614SIntel 			}
1208ce8d5614SIntel 			/* setup rx queues */
1209ce8d5614SIntel 			for (qi = 0; qi < nb_rxq; qi++) {
1210b6ea6408SIntel 				if ((numa_support) &&
1211b6ea6408SIntel 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1212b6ea6408SIntel 					struct rte_mempool * mp =
1213b6ea6408SIntel 						mbuf_pool_find(rxring_numa[pi]);
1214b6ea6408SIntel 					if (mp == NULL) {
1215b6ea6408SIntel 						printf("Failed to setup RX queue:"
1216b6ea6408SIntel 							"No mempool allocation"
1217b6ea6408SIntel 							"on the socket %d\n",
1218b6ea6408SIntel 							rxring_numa[pi]);
1219b6ea6408SIntel 						return;
1220b6ea6408SIntel 					}
1221b6ea6408SIntel 
1222b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
1223b6ea6408SIntel 					     nb_rxd,rxring_numa[pi],
1224b6ea6408SIntel 					     &(port->rx_conf),mp);
1225b6ea6408SIntel 				}
1226b6ea6408SIntel 				else
1227b6ea6408SIntel 					diag = rte_eth_rx_queue_setup(pi, qi,
1228b6ea6408SIntel 					     nb_rxd,port->socket_id,
1229b6ea6408SIntel 					     &(port->rx_conf),
1230ce8d5614SIntel 				             mbuf_pool_find(port->socket_id));
1231b6ea6408SIntel 
1232ce8d5614SIntel 				if (diag == 0)
1233ce8d5614SIntel 					continue;
1234ce8d5614SIntel 
1235b6ea6408SIntel 
1236ce8d5614SIntel 				/* Fail to setup rx queue, return */
1237ce8d5614SIntel 				if (rte_atomic16_cmpset(&(port->port_status),
1238ce8d5614SIntel 							RTE_PORT_HANDLING,
1239ce8d5614SIntel 							RTE_PORT_STOPPED) == 0)
1240ce8d5614SIntel 					printf("Port %d can not be set back "
1241ce8d5614SIntel 							"to stopped\n", pi);
1242ce8d5614SIntel 				printf("Fail to configure port %d rx queues\n", pi);
1243ce8d5614SIntel 				/* try to reconfigure queues next time */
1244ce8d5614SIntel 				port->need_reconfig_queues = 1;
1245ce8d5614SIntel 				return;
1246ce8d5614SIntel 			}
1247ce8d5614SIntel 		}
1248ce8d5614SIntel 		/* start port */
1249ce8d5614SIntel 		if (rte_eth_dev_start(pi) < 0) {
1250ce8d5614SIntel 			printf("Fail to start port %d\n", pi);
1251ce8d5614SIntel 
1252ce8d5614SIntel 			/* Fail to setup rx queue, return */
1253ce8d5614SIntel 			if (rte_atomic16_cmpset(&(port->port_status),
1254ce8d5614SIntel 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1255ce8d5614SIntel 				printf("Port %d can not be set back to "
1256ce8d5614SIntel 							"stopped\n", pi);
1257ce8d5614SIntel 			continue;
1258ce8d5614SIntel 		}
1259ce8d5614SIntel 
1260ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1261ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1262ce8d5614SIntel 			printf("Port %d can not be set into started\n", pi);
1263ce8d5614SIntel 
1264ce8d5614SIntel 		/* at least one port started, need checking link status */
1265ce8d5614SIntel 		need_check_link_status = 1;
1266ce8d5614SIntel 	}
1267ce8d5614SIntel 
1268ce8d5614SIntel 	if (need_check_link_status)
1269ce8d5614SIntel 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1270ce8d5614SIntel 	else
1271ce8d5614SIntel 		printf("Please stop the ports first\n");
1272ce8d5614SIntel 
1273ce8d5614SIntel 	printf("Done\n");
1274ce8d5614SIntel }
1275ce8d5614SIntel 
1276ce8d5614SIntel void
1277ce8d5614SIntel stop_port(portid_t pid)
1278ce8d5614SIntel {
1279ce8d5614SIntel 	portid_t pi;
1280ce8d5614SIntel 	struct rte_port *port;
1281ce8d5614SIntel 	int need_check_link_status = 0;
1282ce8d5614SIntel 
1283ce8d5614SIntel 	if (test_done == 0) {
1284ce8d5614SIntel 		printf("Please stop forwarding first\n");
1285ce8d5614SIntel 		return;
1286ce8d5614SIntel 	}
1287ce8d5614SIntel 	if (dcb_test) {
1288ce8d5614SIntel 		dcb_test = 0;
1289ce8d5614SIntel 		dcb_config = 0;
1290ce8d5614SIntel 	}
1291ce8d5614SIntel 	printf("Stopping ports...\n");
1292ce8d5614SIntel 
1293ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1294ce8d5614SIntel 		if (pid < nb_ports && pid != pi)
1295ce8d5614SIntel 			continue;
1296ce8d5614SIntel 
1297ce8d5614SIntel 		port = &ports[pi];
1298ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1299ce8d5614SIntel 						RTE_PORT_HANDLING) == 0)
1300ce8d5614SIntel 			continue;
1301ce8d5614SIntel 
1302ce8d5614SIntel 		rte_eth_dev_stop(pi);
1303ce8d5614SIntel 
1304ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1305ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1306ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
1307ce8d5614SIntel 		need_check_link_status = 1;
1308ce8d5614SIntel 	}
1309ce8d5614SIntel 	if (need_check_link_status)
1310ce8d5614SIntel 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1311ce8d5614SIntel 
1312ce8d5614SIntel 	printf("Done\n");
1313ce8d5614SIntel }
1314ce8d5614SIntel 
1315ce8d5614SIntel void
1316ce8d5614SIntel close_port(portid_t pid)
1317ce8d5614SIntel {
1318ce8d5614SIntel 	portid_t pi;
1319ce8d5614SIntel 	struct rte_port *port;
1320ce8d5614SIntel 
1321ce8d5614SIntel 	if (test_done == 0) {
1322ce8d5614SIntel 		printf("Please stop forwarding first\n");
1323ce8d5614SIntel 		return;
1324ce8d5614SIntel 	}
1325ce8d5614SIntel 
1326ce8d5614SIntel 	printf("Closing ports...\n");
1327ce8d5614SIntel 
1328ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1329ce8d5614SIntel 		if (pid < nb_ports && pid != pi)
1330ce8d5614SIntel 			continue;
1331ce8d5614SIntel 
1332ce8d5614SIntel 		port = &ports[pi];
1333ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1334ce8d5614SIntel 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1335ce8d5614SIntel 			printf("Port %d is now not stopped\n", pi);
1336ce8d5614SIntel 			continue;
1337ce8d5614SIntel 		}
1338ce8d5614SIntel 
1339ce8d5614SIntel 		rte_eth_dev_close(pi);
1340ce8d5614SIntel 
1341ce8d5614SIntel 		if (rte_atomic16_cmpset(&(port->port_status),
1342ce8d5614SIntel 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1343ce8d5614SIntel 			printf("Port %d can not be set into stopped\n", pi);
1344ce8d5614SIntel 	}
1345ce8d5614SIntel 
1346ce8d5614SIntel 	printf("Done\n");
1347ce8d5614SIntel }
1348ce8d5614SIntel 
1349ce8d5614SIntel int
1350ce8d5614SIntel all_ports_stopped(void)
1351ce8d5614SIntel {
1352ce8d5614SIntel 	portid_t pi;
1353ce8d5614SIntel 	struct rte_port *port;
1354ce8d5614SIntel 
1355ce8d5614SIntel 	for (pi = 0; pi < nb_ports; pi++) {
1356ce8d5614SIntel 		port = &ports[pi];
1357ce8d5614SIntel 		if (port->port_status != RTE_PORT_STOPPED)
1358ce8d5614SIntel 			return 0;
1359ce8d5614SIntel 	}
1360ce8d5614SIntel 
1361ce8d5614SIntel 	return 1;
1362ce8d5614SIntel }
1363ce8d5614SIntel 
1364af75078fSIntel void
1365af75078fSIntel pmd_test_exit(void)
1366af75078fSIntel {
1367af75078fSIntel 	portid_t pt_id;
1368af75078fSIntel 
1369af75078fSIntel 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1370af75078fSIntel 		printf("Stopping port %d...", pt_id);
1371af75078fSIntel 		fflush(stdout);
1372af75078fSIntel 		rte_eth_dev_close(pt_id);
1373af75078fSIntel 		printf("done\n");
1374af75078fSIntel 	}
1375af75078fSIntel 	printf("bye...\n");
1376af75078fSIntel }
1377af75078fSIntel 
1378af75078fSIntel typedef void (*cmd_func_t)(void);
1379af75078fSIntel struct pmd_test_command {
1380af75078fSIntel 	const char *cmd_name;
1381af75078fSIntel 	cmd_func_t cmd_func;
1382af75078fSIntel };
1383af75078fSIntel 
1384af75078fSIntel #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1385af75078fSIntel 
1386ce8d5614SIntel /* Check the link status of all ports in up to 9s, and print them finally */
1387af75078fSIntel static void
1388ce8d5614SIntel check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1389af75078fSIntel {
1390ce8d5614SIntel #define CHECK_INTERVAL 100 /* 100ms */
1391ce8d5614SIntel #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1392ce8d5614SIntel 	uint8_t portid, count, all_ports_up, print_flag = 0;
1393ce8d5614SIntel 	struct rte_eth_link link;
1394ce8d5614SIntel 
1395ce8d5614SIntel 	printf("Checking link statuses...\n");
1396ce8d5614SIntel 	fflush(stdout);
1397ce8d5614SIntel 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1398ce8d5614SIntel 		all_ports_up = 1;
1399ce8d5614SIntel 		for (portid = 0; portid < port_num; portid++) {
1400ce8d5614SIntel 			if ((port_mask & (1 << portid)) == 0)
1401ce8d5614SIntel 				continue;
1402ce8d5614SIntel 			memset(&link, 0, sizeof(link));
1403ce8d5614SIntel 			rte_eth_link_get_nowait(portid, &link);
1404ce8d5614SIntel 			/* print link status if flag set */
1405ce8d5614SIntel 			if (print_flag == 1) {
1406ce8d5614SIntel 				if (link.link_status)
1407ce8d5614SIntel 					printf("Port %d Link Up - speed %u "
1408ce8d5614SIntel 						"Mbps - %s\n", (uint8_t)portid,
1409ce8d5614SIntel 						(unsigned)link.link_speed,
1410ce8d5614SIntel 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1411ce8d5614SIntel 					("full-duplex") : ("half-duplex\n"));
1412ce8d5614SIntel 				else
1413ce8d5614SIntel 					printf("Port %d Link Down\n",
1414ce8d5614SIntel 						(uint8_t)portid);
1415ce8d5614SIntel 				continue;
1416ce8d5614SIntel 			}
1417ce8d5614SIntel 			/* clear all_ports_up flag if any link down */
1418ce8d5614SIntel 			if (link.link_status == 0) {
1419ce8d5614SIntel 				all_ports_up = 0;
1420ce8d5614SIntel 				break;
1421ce8d5614SIntel 			}
1422ce8d5614SIntel 		}
1423ce8d5614SIntel 		/* after finally printing all link status, get out */
1424ce8d5614SIntel 		if (print_flag == 1)
1425ce8d5614SIntel 			break;
1426ce8d5614SIntel 
1427ce8d5614SIntel 		if (all_ports_up == 0) {
1428ce8d5614SIntel 			fflush(stdout);
1429ce8d5614SIntel 			rte_delay_ms(CHECK_INTERVAL);
1430ce8d5614SIntel 		}
1431ce8d5614SIntel 
1432ce8d5614SIntel 		/* set the print_flag if all ports up or timeout */
1433ce8d5614SIntel 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1434ce8d5614SIntel 			print_flag = 1;
1435ce8d5614SIntel 		}
1436ce8d5614SIntel 	}
1437af75078fSIntel }
1438af75078fSIntel 
1439013af9b6SIntel static int
1440013af9b6SIntel set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1441af75078fSIntel {
1442013af9b6SIntel 	uint16_t i;
1443af75078fSIntel 	int diag;
1444013af9b6SIntel 	uint8_t mapping_found = 0;
1445af75078fSIntel 
1446013af9b6SIntel 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1447013af9b6SIntel 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1448013af9b6SIntel 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1449013af9b6SIntel 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1450013af9b6SIntel 					tx_queue_stats_mappings[i].queue_id,
1451013af9b6SIntel 					tx_queue_stats_mappings[i].stats_counter_id);
1452013af9b6SIntel 			if (diag != 0)
1453013af9b6SIntel 				return diag;
1454013af9b6SIntel 			mapping_found = 1;
1455af75078fSIntel 		}
1456013af9b6SIntel 	}
1457013af9b6SIntel 	if (mapping_found)
1458013af9b6SIntel 		port->tx_queue_stats_mapping_enabled = 1;
1459013af9b6SIntel 	return 0;
1460013af9b6SIntel }
1461013af9b6SIntel 
1462013af9b6SIntel static int
1463013af9b6SIntel set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1464013af9b6SIntel {
1465013af9b6SIntel 	uint16_t i;
1466013af9b6SIntel 	int diag;
1467013af9b6SIntel 	uint8_t mapping_found = 0;
1468013af9b6SIntel 
1469013af9b6SIntel 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1470013af9b6SIntel 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1471013af9b6SIntel 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1472013af9b6SIntel 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1473013af9b6SIntel 					rx_queue_stats_mappings[i].queue_id,
1474013af9b6SIntel 					rx_queue_stats_mappings[i].stats_counter_id);
1475013af9b6SIntel 			if (diag != 0)
1476013af9b6SIntel 				return diag;
1477013af9b6SIntel 			mapping_found = 1;
1478013af9b6SIntel 		}
1479013af9b6SIntel 	}
1480013af9b6SIntel 	if (mapping_found)
1481013af9b6SIntel 		port->rx_queue_stats_mapping_enabled = 1;
1482013af9b6SIntel 	return 0;
1483013af9b6SIntel }
1484013af9b6SIntel 
1485013af9b6SIntel static void
1486013af9b6SIntel map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1487013af9b6SIntel {
1488013af9b6SIntel 	int diag = 0;
1489013af9b6SIntel 
1490013af9b6SIntel 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1491af75078fSIntel 	if (diag != 0) {
1492013af9b6SIntel 		if (diag == -ENOTSUP) {
1493013af9b6SIntel 			port->tx_queue_stats_mapping_enabled = 0;
1494013af9b6SIntel 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1495013af9b6SIntel 		}
1496013af9b6SIntel 		else
1497013af9b6SIntel 			rte_exit(EXIT_FAILURE,
1498013af9b6SIntel 					"set_tx_queue_stats_mapping_registers "
1499013af9b6SIntel 					"failed for port id=%d diag=%d\n",
1500af75078fSIntel 					pi, diag);
1501af75078fSIntel 	}
1502013af9b6SIntel 
1503013af9b6SIntel 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1504af75078fSIntel 	if (diag != 0) {
1505013af9b6SIntel 		if (diag == -ENOTSUP) {
1506013af9b6SIntel 			port->rx_queue_stats_mapping_enabled = 0;
1507013af9b6SIntel 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1508013af9b6SIntel 		}
1509013af9b6SIntel 		else
1510013af9b6SIntel 			rte_exit(EXIT_FAILURE,
1511013af9b6SIntel 					"set_rx_queue_stats_mapping_registers "
1512013af9b6SIntel 					"failed for port id=%d diag=%d\n",
1513af75078fSIntel 					pi, diag);
1514af75078fSIntel 	}
1515af75078fSIntel }
1516af75078fSIntel 
1517013af9b6SIntel void
1518013af9b6SIntel init_port_config(void)
1519013af9b6SIntel {
1520013af9b6SIntel 	portid_t pid;
1521013af9b6SIntel 	struct rte_port *port;
1522013af9b6SIntel 
1523013af9b6SIntel 	for (pid = 0; pid < nb_ports; pid++) {
1524013af9b6SIntel 		port = &ports[pid];
1525013af9b6SIntel 		port->dev_conf.rxmode = rx_mode;
1526013af9b6SIntel 		port->dev_conf.fdir_conf = fdir_conf;
1527013af9b6SIntel 		if (nb_rxq > 0) {
1528013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1529013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1530af75078fSIntel 		} else {
1531013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1532013af9b6SIntel 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1533af75078fSIntel 		}
1534013af9b6SIntel 		port->rx_conf.rx_thresh = rx_thresh;
1535013af9b6SIntel 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1536013af9b6SIntel 		port->rx_conf.rx_drop_en = rx_drop_en;
1537013af9b6SIntel 		port->tx_conf.tx_thresh = tx_thresh;
1538013af9b6SIntel 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1539013af9b6SIntel 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1540013af9b6SIntel 		port->tx_conf.txq_flags = txq_flags;
1541013af9b6SIntel 
1542013af9b6SIntel 		rte_eth_macaddr_get(pid, &port->eth_addr);
1543013af9b6SIntel 
1544013af9b6SIntel 		map_port_queue_stats_mapping_registers(pid, port);
1545013af9b6SIntel 	}
1546013af9b6SIntel }
1547013af9b6SIntel 
1548013af9b6SIntel const uint16_t vlan_tags[] = {
1549013af9b6SIntel 		0,  1,  2,  3,  4,  5,  6,  7,
1550013af9b6SIntel 		8,  9, 10, 11,  12, 13, 14, 15,
1551013af9b6SIntel 		16, 17, 18, 19, 20, 21, 22, 23,
1552013af9b6SIntel 		24, 25, 26, 27, 28, 29, 30, 31
1553013af9b6SIntel };
1554013af9b6SIntel 
1555013af9b6SIntel static  int
1556013af9b6SIntel get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1557013af9b6SIntel {
1558013af9b6SIntel         uint8_t i;
1559af75078fSIntel 
1560af75078fSIntel  	/*
1561013af9b6SIntel  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1562013af9b6SIntel  	 * given above, and the number of traffic classes available for use.
1563af75078fSIntel  	 */
1564013af9b6SIntel 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1565013af9b6SIntel 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1566013af9b6SIntel 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1567013af9b6SIntel 
1568013af9b6SIntel 		/* VMDQ+DCB RX and TX configrations */
1569013af9b6SIntel 		vmdq_rx_conf.enable_default_pool = 0;
1570013af9b6SIntel 		vmdq_rx_conf.default_pool = 0;
1571013af9b6SIntel 		vmdq_rx_conf.nb_queue_pools =
1572013af9b6SIntel 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1573013af9b6SIntel 		vmdq_tx_conf.nb_queue_pools =
1574013af9b6SIntel 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1575013af9b6SIntel 
1576013af9b6SIntel 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1577013af9b6SIntel 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1578013af9b6SIntel 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1579013af9b6SIntel 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1580af75078fSIntel 		}
1581013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1582013af9b6SIntel 			vmdq_rx_conf.dcb_queue[i] = i;
1583013af9b6SIntel 			vmdq_tx_conf.dcb_queue[i] = i;
1584013af9b6SIntel 		}
1585013af9b6SIntel 
1586013af9b6SIntel 		/*set DCB mode of RX and TX of multiple queues*/
158732e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
158832e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1589013af9b6SIntel 		if (dcb_conf->pfc_en)
1590013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1591013af9b6SIntel 		else
1592013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1593013af9b6SIntel 
1594013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1595013af9b6SIntel                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1596013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1597013af9b6SIntel                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1598013af9b6SIntel 	}
1599013af9b6SIntel 	else {
1600013af9b6SIntel 		struct rte_eth_dcb_rx_conf rx_conf;
1601013af9b6SIntel 		struct rte_eth_dcb_tx_conf tx_conf;
1602013af9b6SIntel 
1603013af9b6SIntel 		/* queue mapping configuration of DCB RX and TX */
1604013af9b6SIntel 		if (dcb_conf->num_tcs == ETH_4_TCS)
1605013af9b6SIntel 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1606013af9b6SIntel 		else
1607013af9b6SIntel 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1608013af9b6SIntel 
1609013af9b6SIntel 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1610013af9b6SIntel 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1611013af9b6SIntel 
1612013af9b6SIntel 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1613013af9b6SIntel 			rx_conf.dcb_queue[i] = i;
1614013af9b6SIntel 			tx_conf.dcb_queue[i] = i;
1615013af9b6SIntel 		}
161632e7aa0bSIntel 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
161732e7aa0bSIntel 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1618013af9b6SIntel 		if (dcb_conf->pfc_en)
1619013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1620013af9b6SIntel 		else
1621013af9b6SIntel 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1622013af9b6SIntel 
1623013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1624013af9b6SIntel                                 sizeof(struct rte_eth_dcb_rx_conf)));
1625013af9b6SIntel 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1626013af9b6SIntel                                 sizeof(struct rte_eth_dcb_tx_conf)));
1627013af9b6SIntel 	}
1628013af9b6SIntel 
1629013af9b6SIntel 	return 0;
1630013af9b6SIntel }
1631013af9b6SIntel 
1632013af9b6SIntel int
1633013af9b6SIntel init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1634013af9b6SIntel {
1635013af9b6SIntel 	struct rte_eth_conf port_conf;
1636013af9b6SIntel 	struct rte_port *rte_port;
1637013af9b6SIntel 	int retval;
1638013af9b6SIntel 	uint16_t nb_vlan;
1639013af9b6SIntel 	uint16_t i;
1640013af9b6SIntel 
1641013af9b6SIntel 	/* rxq and txq configuration in dcb mode */
1642013af9b6SIntel 	nb_rxq = 128;
1643013af9b6SIntel 	nb_txq = 128;
1644013af9b6SIntel 	rx_free_thresh = 64;
1645013af9b6SIntel 
1646013af9b6SIntel 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1647013af9b6SIntel 	/* Enter DCB configuration status */
1648013af9b6SIntel 	dcb_config = 1;
1649013af9b6SIntel 
1650013af9b6SIntel 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1651013af9b6SIntel 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1652013af9b6SIntel 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1653013af9b6SIntel 	if (retval < 0)
1654013af9b6SIntel 		return retval;
1655013af9b6SIntel 
1656013af9b6SIntel 	rte_port = &ports[pid];
1657013af9b6SIntel 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1658013af9b6SIntel 
1659013af9b6SIntel 	rte_port->rx_conf.rx_thresh = rx_thresh;
1660013af9b6SIntel 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1661013af9b6SIntel 	rte_port->tx_conf.tx_thresh = tx_thresh;
1662013af9b6SIntel 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1663013af9b6SIntel 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1664013af9b6SIntel 	/* VLAN filter */
1665013af9b6SIntel 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1666013af9b6SIntel 	for (i = 0; i < nb_vlan; i++){
1667013af9b6SIntel 		rx_vft_set(pid, vlan_tags[i], 1);
1668013af9b6SIntel 	}
1669013af9b6SIntel 
1670013af9b6SIntel 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1671013af9b6SIntel 	map_port_queue_stats_mapping_registers(pid, rte_port);
1672013af9b6SIntel 
1673*7741e4cfSIntel 	rte_port->dcb_flag = 1;
1674*7741e4cfSIntel 
1675013af9b6SIntel 	return 0;
1676af75078fSIntel }
1677af75078fSIntel 
1678af75078fSIntel #ifdef RTE_EXEC_ENV_BAREMETAL
1679af75078fSIntel #define main _main
1680af75078fSIntel #endif
1681af75078fSIntel 
1682af75078fSIntel int
1683af75078fSIntel main(int argc, char** argv)
1684af75078fSIntel {
1685af75078fSIntel 	int  diag;
1686013af9b6SIntel 	uint8_t port_id;
1687af75078fSIntel 
1688af75078fSIntel 	diag = rte_eal_init(argc, argv);
1689af75078fSIntel 	if (diag < 0)
1690af75078fSIntel 		rte_panic("Cannot init EAL\n");
1691af75078fSIntel 
169269d22b8eSIntel 	if (rte_pmd_init_all())
169369d22b8eSIntel 		rte_panic("Cannot init PMD\n");
1694af75078fSIntel 
1695af75078fSIntel 	if (rte_eal_pci_probe())
1696af75078fSIntel 		rte_panic("Cannot probe PCI\n");
1697af75078fSIntel 
1698af75078fSIntel 	nb_ports = (portid_t) rte_eth_dev_count();
1699af75078fSIntel 	if (nb_ports == 0)
1700013af9b6SIntel 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1701013af9b6SIntel 							"check that "
1702af75078fSIntel 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
170369d22b8eSIntel 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1704af75078fSIntel 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1705af75078fSIntel 			  "configuration file\n");
1706af75078fSIntel 
1707af75078fSIntel 	set_def_fwd_config();
1708af75078fSIntel 	if (nb_lcores == 0)
1709af75078fSIntel 		rte_panic("Empty set of forwarding logical cores - check the "
1710af75078fSIntel 			  "core mask supplied in the command parameters\n");
1711af75078fSIntel 
1712af75078fSIntel 	argc -= diag;
1713af75078fSIntel 	argv += diag;
1714af75078fSIntel 	if (argc > 1)
1715af75078fSIntel 		launch_args_parse(argc, argv);
1716af75078fSIntel 
1717af75078fSIntel 	if (nb_rxq > nb_txq)
1718af75078fSIntel 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1719af75078fSIntel 		       "but nb_txq=%d will prevent to fully test it.\n",
1720af75078fSIntel 		       nb_rxq, nb_txq);
1721af75078fSIntel 
1722af75078fSIntel 	init_config();
1723ce8d5614SIntel 	start_port(RTE_PORT_ALL);
1724af75078fSIntel 
1725ce8d5614SIntel 	/* set all ports to promiscuous mode by default */
1726ce8d5614SIntel 	for (port_id = 0; port_id < nb_ports; port_id++)
1727ce8d5614SIntel 		rte_eth_promiscuous_enable(port_id);
1728af75078fSIntel 
1729af75078fSIntel 	if (interactive == 1)
1730af75078fSIntel 		prompt();
1731af75078fSIntel 	else {
1732af75078fSIntel 		char c;
1733af75078fSIntel 		int rc;
1734af75078fSIntel 
1735af75078fSIntel 		printf("No commandline core given, start packet forwarding\n");
1736af75078fSIntel 		start_packet_forwarding(0);
1737af75078fSIntel 		printf("Press enter to exit\n");
1738af75078fSIntel 		rc = read(0, &c, 1);
1739af75078fSIntel 		if (rc < 0)
1740af75078fSIntel 			return 1;
1741af75078fSIntel 	}
1742af75078fSIntel 
1743af75078fSIntel 	return 0;
1744af75078fSIntel }
1745