xref: /dpdk/app/test-pmd/testpmd.c (revision 0d56cb81d54a49d516f55cf771ff93450b8dc310)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81 
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 
87 /*
88  * NUMA support configuration.
89  * When set, the NUMA support attempts to dispatch the allocation of the
90  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
91  * probed ports among the CPU sockets 0 and 1.
92  * Otherwise, all memory is allocated from CPU socket 0.
93  */
94 uint8_t numa_support = 0; /**< No numa support by default */
95 
96 /*
97  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
98  * not configured.
99  */
100 uint8_t socket_num = UMA_NO_CONFIG;
101 
102 /*
103  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
104  */
105 uint8_t mp_anon = 0;
106 
107 /*
108  * Record the Ethernet address of peer target ports to which packets are
109  * forwarded.
110  * Must be instanciated with the ethernet addresses of peer traffic generator
111  * ports.
112  */
113 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
114 portid_t nb_peer_eth_addrs = 0;
115 
116 /*
117  * Probed Target Environment.
118  */
119 struct rte_port *ports;	       /**< For all probed ethernet ports. */
120 portid_t nb_ports;             /**< Number of probed ethernet ports. */
121 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
122 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
123 
124 /*
125  * Test Forwarding Configuration.
126  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
127  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
128  */
129 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
130 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
131 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
132 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
133 
134 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
135 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
136 
137 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
138 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
139 
140 /*
141  * Forwarding engines.
142  */
143 struct fwd_engine * fwd_engines[] = {
144 	&io_fwd_engine,
145 	&mac_fwd_engine,
146 	&mac_retry_fwd_engine,
147 	&rx_only_engine,
148 	&tx_only_engine,
149 	&csum_fwd_engine,
150 #ifdef RTE_LIBRTE_IEEE1588
151 	&ieee1588_fwd_engine,
152 #endif
153 	NULL,
154 };
155 
156 struct fwd_config cur_fwd_config;
157 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
158 
159 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
160 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
161                                       * specified on command-line. */
162 
163 /*
164  * Configuration of packet segments used by the "txonly" processing engine.
165  */
166 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
167 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
168 	TXONLY_DEF_PACKET_LEN,
169 };
170 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
171 
172 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
173 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
174 
175 /* current configuration is in DCB or not,0 means it is not in DCB mode */
176 uint8_t dcb_config = 0;
177 
178 /* Whether the dcb is in testing status */
179 uint8_t dcb_test = 0;
180 
181 /* DCB on and VT on mapping is default */
182 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
183 
184 /*
185  * Configurable number of RX/TX queues.
186  */
187 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
188 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
189 
190 /*
191  * Configurable number of RX/TX ring descriptors.
192  */
193 #define RTE_TEST_RX_DESC_DEFAULT 128
194 #define RTE_TEST_TX_DESC_DEFAULT 512
195 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
196 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
197 
198 /*
199  * Configurable values of RX and TX ring threshold registers.
200  */
201 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
202 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
203 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
204 
205 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
206 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
207 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
208 
209 struct rte_eth_thresh rx_thresh = {
210 	.pthresh = RX_PTHRESH,
211 	.hthresh = RX_HTHRESH,
212 	.wthresh = RX_WTHRESH,
213 };
214 
215 struct rte_eth_thresh tx_thresh = {
216 	.pthresh = TX_PTHRESH,
217 	.hthresh = TX_HTHRESH,
218 	.wthresh = TX_WTHRESH,
219 };
220 
221 /*
222  * Configurable value of RX free threshold.
223  */
224 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
225 
226 /*
227  * Configurable value of RX drop enable.
228  */
229 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
230 
231 /*
232  * Configurable value of TX free threshold.
233  */
234 uint16_t tx_free_thresh = 0; /* Use default values. */
235 
236 /*
237  * Configurable value of TX RS bit threshold.
238  */
239 uint16_t tx_rs_thresh = 0; /* Use default values. */
240 
241 /*
242  * Configurable value of TX queue flags.
243  */
244 uint32_t txq_flags = 0; /* No flags set. */
245 
246 /*
247  * Receive Side Scaling (RSS) configuration.
248  */
249 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
250 
251 /*
252  * Port topology configuration
253  */
254 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
255 
256 /*
257  * Avoids to flush all the RX streams before starts forwarding.
258  */
259 uint8_t no_flush_rx = 0; /* flush by default */
260 
261 /*
262  * NIC bypass mode configuration options.
263  */
264 #ifdef RTE_NIC_BYPASS
265 
266 /* The NIC bypass watchdog timeout. */
267 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
268 
269 #endif
270 
271 /*
272  * Ethernet device configuration.
273  */
274 struct rte_eth_rxmode rx_mode = {
275 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276 	.split_hdr_size = 0,
277 	.header_split   = 0, /**< Header Split disabled. */
278 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
279 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
280 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
281 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
282 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
283 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
284 };
285 
286 struct rte_fdir_conf fdir_conf = {
287 	.mode = RTE_FDIR_MODE_NONE,
288 	.pballoc = RTE_FDIR_PBALLOC_64K,
289 	.status = RTE_FDIR_REPORT_STATUS,
290 	.flexbytes_offset = 0x6,
291 	.drop_queue = 127,
292 };
293 
294 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
295 
296 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
297 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
298 
299 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
300 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
301 
302 uint16_t nb_tx_queue_stats_mappings = 0;
303 uint16_t nb_rx_queue_stats_mappings = 0;
304 
305 /* Forward function declarations */
306 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
307 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
308 
309 /*
310  * Check if all the ports are started.
311  * If yes, return positive value. If not, return zero.
312  */
313 static int all_ports_started(void);
314 
315 /*
316  * Setup default configuration.
317  */
318 static void
319 set_default_fwd_lcores_config(void)
320 {
321 	unsigned int i;
322 	unsigned int nb_lc;
323 
324 	nb_lc = 0;
325 	for (i = 0; i < RTE_MAX_LCORE; i++) {
326 		if (! rte_lcore_is_enabled(i))
327 			continue;
328 		if (i == rte_get_master_lcore())
329 			continue;
330 		fwd_lcores_cpuids[nb_lc++] = i;
331 	}
332 	nb_lcores = (lcoreid_t) nb_lc;
333 	nb_cfg_lcores = nb_lcores;
334 	nb_fwd_lcores = 1;
335 }
336 
337 static void
338 set_def_peer_eth_addrs(void)
339 {
340 	portid_t i;
341 
342 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
343 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
344 		peer_eth_addrs[i].addr_bytes[5] = i;
345 	}
346 }
347 
348 static void
349 set_default_fwd_ports_config(void)
350 {
351 	portid_t pt_id;
352 
353 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
354 		fwd_ports_ids[pt_id] = pt_id;
355 
356 	nb_cfg_ports = nb_ports;
357 	nb_fwd_ports = nb_ports;
358 }
359 
360 void
361 set_def_fwd_config(void)
362 {
363 	set_default_fwd_lcores_config();
364 	set_def_peer_eth_addrs();
365 	set_default_fwd_ports_config();
366 }
367 
368 /*
369  * Configuration initialisation done once at init time.
370  */
371 struct mbuf_ctor_arg {
372 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
373 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
374 };
375 
376 struct mbuf_pool_ctor_arg {
377 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
378 };
379 
380 static void
381 testpmd_mbuf_ctor(struct rte_mempool *mp,
382 		  void *opaque_arg,
383 		  void *raw_mbuf,
384 		  __attribute__((unused)) unsigned i)
385 {
386 	struct mbuf_ctor_arg *mb_ctor_arg;
387 	struct rte_mbuf    *mb;
388 
389 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
390 	mb = (struct rte_mbuf *) raw_mbuf;
391 
392 	mb->type         = RTE_MBUF_PKT;
393 	mb->pool         = mp;
394 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
395 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
396 			mb_ctor_arg->seg_buf_offset);
397 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
398 	mb->type         = RTE_MBUF_PKT;
399 	mb->ol_flags     = 0;
400 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
401 	mb->pkt.nb_segs  = 1;
402 	mb->pkt.vlan_macip.data = 0;
403 	mb->pkt.hash.rss = 0;
404 }
405 
406 static void
407 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
408 		       void *opaque_arg)
409 {
410 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
411 	struct rte_pktmbuf_pool_private *mbp_priv;
412 
413 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
414 		printf("%s(%s) private_data_size %d < %d\n",
415 		       __func__, mp->name, (int) mp->private_data_size,
416 		       (int) sizeof(struct rte_pktmbuf_pool_private));
417 		return;
418 	}
419 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
420 	mbp_priv = rte_mempool_get_priv(mp);
421 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
422 }
423 
424 static void
425 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
426 		 unsigned int socket_id)
427 {
428 	char pool_name[RTE_MEMPOOL_NAMESIZE];
429 	struct rte_mempool *rte_mp;
430 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
431 	struct mbuf_ctor_arg mb_ctor_arg;
432 	uint32_t mb_size;
433 
434 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
435 						mbuf_seg_size);
436 	mb_ctor_arg.seg_buf_offset =
437 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
438 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
439 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
440 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
441 
442 #ifdef RTE_LIBRTE_PMD_XENVIRT
443 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
444                                    (unsigned) mb_mempool_cache,
445                                    sizeof(struct rte_pktmbuf_pool_private),
446                                    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
447                                    testpmd_mbuf_ctor, &mb_ctor_arg,
448                                    socket_id, 0);
449 
450 
451 
452 #else
453 	if (mp_anon != 0)
454 		rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
455 				    (unsigned) mb_mempool_cache,
456 				    sizeof(struct rte_pktmbuf_pool_private),
457 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
458 				    testpmd_mbuf_ctor, &mb_ctor_arg,
459 				    socket_id, 0);
460 	else
461 		rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
462 				    (unsigned) mb_mempool_cache,
463 				    sizeof(struct rte_pktmbuf_pool_private),
464 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
465 				    testpmd_mbuf_ctor, &mb_ctor_arg,
466 				    socket_id, 0);
467 
468 #endif
469 
470 	if (rte_mp == NULL) {
471 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
472 						"failed\n", socket_id);
473 	} else if (verbose_level > 0) {
474 		rte_mempool_dump(rte_mp);
475 	}
476 }
477 
478 static void
479 init_config(void)
480 {
481 	portid_t pid;
482 	struct rte_port *port;
483 	struct rte_mempool *mbp;
484 	unsigned int nb_mbuf_per_pool;
485 	lcoreid_t  lc_id;
486 	uint8_t port_per_socket[MAX_SOCKET];
487 
488 	memset(port_per_socket,0,MAX_SOCKET);
489 	/* Configuration of logical cores. */
490 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
491 				sizeof(struct fwd_lcore *) * nb_lcores,
492 				CACHE_LINE_SIZE);
493 	if (fwd_lcores == NULL) {
494 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
495 							"failed\n", nb_lcores);
496 	}
497 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
498 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
499 					       sizeof(struct fwd_lcore),
500 					       CACHE_LINE_SIZE);
501 		if (fwd_lcores[lc_id] == NULL) {
502 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
503 								"failed\n");
504 		}
505 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
506 	}
507 
508 	/*
509 	 * Create pools of mbuf.
510 	 * If NUMA support is disabled, create a single pool of mbuf in
511 	 * socket 0 memory by default.
512 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
513 	 *
514 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
515 	 * nb_txd can be configured at run time.
516 	 */
517 	if (param_total_num_mbufs)
518 		nb_mbuf_per_pool = param_total_num_mbufs;
519 	else {
520 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
521 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
522 
523 		if (!numa_support)
524 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
525 	}
526 
527 	if (!numa_support) {
528 		if (socket_num == UMA_NO_CONFIG)
529 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
530 		else
531 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
532 						 socket_num);
533 	}
534 	/*
535 	 * Records which Mbuf pool to use by each logical core, if needed.
536 	 */
537 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
538 		mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
539 		if (mbp == NULL)
540 			mbp = mbuf_pool_find(0);
541 		fwd_lcores[lc_id]->mbp = mbp;
542 	}
543 
544 	/* Configuration of Ethernet ports. */
545 	ports = rte_zmalloc("testpmd: ports",
546 			    sizeof(struct rte_port) * nb_ports,
547 			    CACHE_LINE_SIZE);
548 	if (ports == NULL) {
549 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
550 							"failed\n", nb_ports);
551 	}
552 
553 	for (pid = 0; pid < nb_ports; pid++) {
554 		port = &ports[pid];
555 		rte_eth_dev_info_get(pid, &port->dev_info);
556 
557 		if (numa_support) {
558 			if (port_numa[pid] != NUMA_NO_CONFIG)
559 				port_per_socket[port_numa[pid]]++;
560 			else {
561 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
562 				port_per_socket[socket_id]++;
563 			}
564 		}
565 
566 		/* set flag to initialize port/queue */
567 		port->need_reconfig = 1;
568 		port->need_reconfig_queues = 1;
569 	}
570 
571 	if (numa_support) {
572 		uint8_t i;
573 		unsigned int nb_mbuf;
574 
575 		if (param_total_num_mbufs)
576 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
577 
578 		for (i = 0; i < MAX_SOCKET; i++) {
579 			nb_mbuf = (nb_mbuf_per_pool *
580 						port_per_socket[i]);
581 			if (nb_mbuf)
582 				mbuf_pool_create(mbuf_data_size,
583 						nb_mbuf,i);
584 		}
585 	}
586 	init_port_config();
587 	/* Configuration of packet forwarding streams. */
588 	if (init_fwd_streams() < 0)
589 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
590 }
591 
592 int
593 init_fwd_streams(void)
594 {
595 	portid_t pid;
596 	struct rte_port *port;
597 	streamid_t sm_id, nb_fwd_streams_new;
598 
599 	/* set socket id according to numa or not */
600 	for (pid = 0; pid < nb_ports; pid++) {
601 		port = &ports[pid];
602 		if (nb_rxq > port->dev_info.max_rx_queues) {
603 			printf("Fail: nb_rxq(%d) is greater than "
604 				"max_rx_queues(%d)\n", nb_rxq,
605 				port->dev_info.max_rx_queues);
606 			return -1;
607 		}
608 		if (nb_txq > port->dev_info.max_tx_queues) {
609 			printf("Fail: nb_txq(%d) is greater than "
610 				"max_tx_queues(%d)\n", nb_txq,
611 				port->dev_info.max_tx_queues);
612 			return -1;
613 		}
614 		if (numa_support)
615 			port->socket_id = rte_eth_dev_socket_id(pid);
616 		else {
617 			if (socket_num == UMA_NO_CONFIG)
618 				port->socket_id = 0;
619 			else
620 				port->socket_id = socket_num;
621 		}
622 	}
623 
624 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
625 	if (nb_fwd_streams_new == nb_fwd_streams)
626 		return 0;
627 	/* clear the old */
628 	if (fwd_streams != NULL) {
629 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
630 			if (fwd_streams[sm_id] == NULL)
631 				continue;
632 			rte_free(fwd_streams[sm_id]);
633 			fwd_streams[sm_id] = NULL;
634 		}
635 		rte_free(fwd_streams);
636 		fwd_streams = NULL;
637 	}
638 
639 	/* init new */
640 	nb_fwd_streams = nb_fwd_streams_new;
641 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
642 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
643 	if (fwd_streams == NULL)
644 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
645 						"failed\n", nb_fwd_streams);
646 
647 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
648 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
649 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
650 		if (fwd_streams[sm_id] == NULL)
651 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
652 								" failed\n");
653 	}
654 
655 	return 0;
656 }
657 
658 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
659 static void
660 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
661 {
662 	unsigned int total_burst;
663 	unsigned int nb_burst;
664 	unsigned int burst_stats[3];
665 	uint16_t pktnb_stats[3];
666 	uint16_t nb_pkt;
667 	int burst_percent[3];
668 
669 	/*
670 	 * First compute the total number of packet bursts and the
671 	 * two highest numbers of bursts of the same number of packets.
672 	 */
673 	total_burst = 0;
674 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
675 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
676 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
677 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
678 		if (nb_burst == 0)
679 			continue;
680 		total_burst += nb_burst;
681 		if (nb_burst > burst_stats[0]) {
682 			burst_stats[1] = burst_stats[0];
683 			pktnb_stats[1] = pktnb_stats[0];
684 			burst_stats[0] = nb_burst;
685 			pktnb_stats[0] = nb_pkt;
686 		}
687 	}
688 	if (total_burst == 0)
689 		return;
690 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
691 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
692 	       burst_percent[0], (int) pktnb_stats[0]);
693 	if (burst_stats[0] == total_burst) {
694 		printf("]\n");
695 		return;
696 	}
697 	if (burst_stats[0] + burst_stats[1] == total_burst) {
698 		printf(" + %d%% of %d pkts]\n",
699 		       100 - burst_percent[0], pktnb_stats[1]);
700 		return;
701 	}
702 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
703 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
704 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
705 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
706 		return;
707 	}
708 	printf(" + %d%% of %d pkts + %d%% of others]\n",
709 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
710 }
711 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
712 
713 static void
714 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
715 {
716 	struct rte_port *port;
717 	uint8_t i;
718 
719 	static const char *fwd_stats_border = "----------------------";
720 
721 	port = &ports[port_id];
722 	printf("\n  %s Forward statistics for port %-2d %s\n",
723 	       fwd_stats_border, port_id, fwd_stats_border);
724 
725 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
726 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
727 		       "%-"PRIu64"\n",
728 		       stats->ipackets, stats->ierrors,
729 		       (uint64_t) (stats->ipackets + stats->ierrors));
730 
731 		if (cur_fwd_eng == &csum_fwd_engine)
732 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
733 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
734 
735 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
736 		       "%-"PRIu64"\n",
737 		       stats->opackets, port->tx_dropped,
738 		       (uint64_t) (stats->opackets + port->tx_dropped));
739 
740 		if (stats->rx_nombuf > 0)
741 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
742 
743 	}
744 	else {
745 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
746 		       "%14"PRIu64"\n",
747 		       stats->ipackets, stats->ierrors,
748 		       (uint64_t) (stats->ipackets + stats->ierrors));
749 
750 		if (cur_fwd_eng == &csum_fwd_engine)
751 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
752 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
753 
754 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
755 		       "%14"PRIu64"\n",
756 		       stats->opackets, port->tx_dropped,
757 		       (uint64_t) (stats->opackets + port->tx_dropped));
758 
759 		if (stats->rx_nombuf > 0)
760 			printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
761 	}
762 
763 	/* Display statistics of XON/XOFF pause frames, if any. */
764 	if ((stats->tx_pause_xon  | stats->rx_pause_xon |
765 	     stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
766 		printf("  RX-XOFF:    %-14"PRIu64" RX-XON:     %-14"PRIu64"\n",
767 		       stats->rx_pause_xoff, stats->rx_pause_xon);
768 		printf("  TX-XOFF:    %-14"PRIu64" TX-XON:     %-14"PRIu64"\n",
769 		       stats->tx_pause_xoff, stats->tx_pause_xon);
770 	}
771 
772 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
773 	if (port->rx_stream)
774 		pkt_burst_stats_display("RX",
775 			&port->rx_stream->rx_burst_stats);
776 	if (port->tx_stream)
777 		pkt_burst_stats_display("TX",
778 			&port->tx_stream->tx_burst_stats);
779 #endif
780 	/* stats fdir */
781 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
782 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
783 		       stats->fdirmiss,
784 		       stats->fdirmatch);
785 
786 	if (port->rx_queue_stats_mapping_enabled) {
787 		printf("\n");
788 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
789 			printf("  Stats reg %2d RX-packets:%14"PRIu64
790 			       "     RX-errors:%14"PRIu64
791 			       "    RX-bytes:%14"PRIu64"\n",
792 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
793 		}
794 		printf("\n");
795 	}
796 	if (port->tx_queue_stats_mapping_enabled) {
797 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
798 			printf("  Stats reg %2d TX-packets:%14"PRIu64
799 			       "                                 TX-bytes:%14"PRIu64"\n",
800 			       i, stats->q_opackets[i], stats->q_obytes[i]);
801 		}
802 	}
803 
804 	printf("  %s--------------------------------%s\n",
805 	       fwd_stats_border, fwd_stats_border);
806 }
807 
808 static void
809 fwd_stream_stats_display(streamid_t stream_id)
810 {
811 	struct fwd_stream *fs;
812 	static const char *fwd_top_stats_border = "-------";
813 
814 	fs = fwd_streams[stream_id];
815 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
816 	    (fs->fwd_dropped == 0))
817 		return;
818 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
819 	       "TX Port=%2d/Queue=%2d %s\n",
820 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
821 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
822 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
823 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
824 
825 	/* if checksum mode */
826 	if (cur_fwd_eng == &csum_fwd_engine) {
827 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
828 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
829 	}
830 
831 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
832 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
833 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
834 #endif
835 }
836 
837 static void
838 flush_fwd_rx_queues(void)
839 {
840 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
841 	portid_t  rxp;
842 	portid_t port_id;
843 	queueid_t rxq;
844 	uint16_t  nb_rx;
845 	uint16_t  i;
846 	uint8_t   j;
847 
848 	for (j = 0; j < 2; j++) {
849 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
850 			for (rxq = 0; rxq < nb_rxq; rxq++) {
851 				port_id = fwd_ports_ids[rxp];
852 				do {
853 					nb_rx = rte_eth_rx_burst(port_id, rxq,
854 						pkts_burst, MAX_PKT_BURST);
855 					for (i = 0; i < nb_rx; i++)
856 						rte_pktmbuf_free(pkts_burst[i]);
857 				} while (nb_rx > 0);
858 			}
859 		}
860 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
861 	}
862 }
863 
864 static void
865 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
866 {
867 	struct fwd_stream **fsm;
868 	streamid_t nb_fs;
869 	streamid_t sm_id;
870 
871 	fsm = &fwd_streams[fc->stream_idx];
872 	nb_fs = fc->stream_nb;
873 	do {
874 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
875 			(*pkt_fwd)(fsm[sm_id]);
876 	} while (! fc->stopped);
877 }
878 
879 static int
880 start_pkt_forward_on_core(void *fwd_arg)
881 {
882 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
883 			     cur_fwd_config.fwd_eng->packet_fwd);
884 	return 0;
885 }
886 
887 /*
888  * Run the TXONLY packet forwarding engine to send a single burst of packets.
889  * Used to start communication flows in network loopback test configurations.
890  */
891 static int
892 run_one_txonly_burst_on_core(void *fwd_arg)
893 {
894 	struct fwd_lcore *fwd_lc;
895 	struct fwd_lcore tmp_lcore;
896 
897 	fwd_lc = (struct fwd_lcore *) fwd_arg;
898 	tmp_lcore = *fwd_lc;
899 	tmp_lcore.stopped = 1;
900 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
901 	return 0;
902 }
903 
904 /*
905  * Launch packet forwarding:
906  *     - Setup per-port forwarding context.
907  *     - launch logical cores with their forwarding configuration.
908  */
909 static void
910 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
911 {
912 	port_fwd_begin_t port_fwd_begin;
913 	unsigned int i;
914 	unsigned int lc_id;
915 	int diag;
916 
917 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
918 	if (port_fwd_begin != NULL) {
919 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
920 			(*port_fwd_begin)(fwd_ports_ids[i]);
921 	}
922 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
923 		lc_id = fwd_lcores_cpuids[i];
924 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
925 			fwd_lcores[i]->stopped = 0;
926 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
927 						     fwd_lcores[i], lc_id);
928 			if (diag != 0)
929 				printf("launch lcore %u failed - diag=%d\n",
930 				       lc_id, diag);
931 		}
932 	}
933 }
934 
935 /*
936  * Launch packet forwarding configuration.
937  */
938 void
939 start_packet_forwarding(int with_tx_first)
940 {
941 	port_fwd_begin_t port_fwd_begin;
942 	port_fwd_end_t  port_fwd_end;
943 	struct rte_port *port;
944 	unsigned int i;
945 	portid_t   pt_id;
946 	streamid_t sm_id;
947 
948 	if (all_ports_started() == 0) {
949 		printf("Not all ports were started\n");
950 		return;
951 	}
952 	if (test_done == 0) {
953 		printf("Packet forwarding already started\n");
954 		return;
955 	}
956 	if(dcb_test) {
957 		for (i = 0; i < nb_fwd_ports; i++) {
958 			pt_id = fwd_ports_ids[i];
959 			port = &ports[pt_id];
960 			if (!port->dcb_flag) {
961 				printf("In DCB mode, all forwarding ports must "
962                                        "be configured in this mode.\n");
963 				return;
964 			}
965 		}
966 		if (nb_fwd_lcores == 1) {
967 			printf("In DCB mode,the nb forwarding cores "
968                                "should be larger than 1.\n");
969 			return;
970 		}
971 	}
972 	test_done = 0;
973 
974 	if(!no_flush_rx)
975 		flush_fwd_rx_queues();
976 
977 	fwd_config_setup();
978 	rxtx_config_display();
979 
980 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
981 		pt_id = fwd_ports_ids[i];
982 		port = &ports[pt_id];
983 		rte_eth_stats_get(pt_id, &port->stats);
984 		port->tx_dropped = 0;
985 
986 		map_port_queue_stats_mapping_registers(pt_id, port);
987 	}
988 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
989 		fwd_streams[sm_id]->rx_packets = 0;
990 		fwd_streams[sm_id]->tx_packets = 0;
991 		fwd_streams[sm_id]->fwd_dropped = 0;
992 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
993 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
994 
995 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
996 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
997 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
998 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
999 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1000 #endif
1001 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1002 		fwd_streams[sm_id]->core_cycles = 0;
1003 #endif
1004 	}
1005 	if (with_tx_first) {
1006 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1007 		if (port_fwd_begin != NULL) {
1008 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1009 				(*port_fwd_begin)(fwd_ports_ids[i]);
1010 		}
1011 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1012 		rte_eal_mp_wait_lcore();
1013 		port_fwd_end = tx_only_engine.port_fwd_end;
1014 		if (port_fwd_end != NULL) {
1015 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1016 				(*port_fwd_end)(fwd_ports_ids[i]);
1017 		}
1018 	}
1019 	launch_packet_forwarding(start_pkt_forward_on_core);
1020 }
1021 
1022 void
1023 stop_packet_forwarding(void)
1024 {
1025 	struct rte_eth_stats stats;
1026 	struct rte_port *port;
1027 	port_fwd_end_t  port_fwd_end;
1028 	int i;
1029 	portid_t   pt_id;
1030 	streamid_t sm_id;
1031 	lcoreid_t  lc_id;
1032 	uint64_t total_recv;
1033 	uint64_t total_xmit;
1034 	uint64_t total_rx_dropped;
1035 	uint64_t total_tx_dropped;
1036 	uint64_t total_rx_nombuf;
1037 	uint64_t tx_dropped;
1038 	uint64_t rx_bad_ip_csum;
1039 	uint64_t rx_bad_l4_csum;
1040 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1041 	uint64_t fwd_cycles;
1042 #endif
1043 	static const char *acc_stats_border = "+++++++++++++++";
1044 
1045 	if (all_ports_started() == 0) {
1046 		printf("Not all ports were started\n");
1047 		return;
1048 	}
1049 	if (test_done) {
1050 		printf("Packet forwarding not started\n");
1051 		return;
1052 	}
1053 	printf("Telling cores to stop...");
1054 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1055 		fwd_lcores[lc_id]->stopped = 1;
1056 	printf("\nWaiting for lcores to finish...\n");
1057 	rte_eal_mp_wait_lcore();
1058 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1059 	if (port_fwd_end != NULL) {
1060 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1061 			pt_id = fwd_ports_ids[i];
1062 			(*port_fwd_end)(pt_id);
1063 		}
1064 	}
1065 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1066 	fwd_cycles = 0;
1067 #endif
1068 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1069 		if (cur_fwd_config.nb_fwd_streams >
1070 		    cur_fwd_config.nb_fwd_ports) {
1071 			fwd_stream_stats_display(sm_id);
1072 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1073 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1074 		} else {
1075 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1076 				fwd_streams[sm_id];
1077 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1078 				fwd_streams[sm_id];
1079 		}
1080 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1081 		tx_dropped = (uint64_t) (tx_dropped +
1082 					 fwd_streams[sm_id]->fwd_dropped);
1083 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1084 
1085 		rx_bad_ip_csum =
1086 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1087 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1088 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1089 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1090 							rx_bad_ip_csum;
1091 
1092 		rx_bad_l4_csum =
1093 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1094 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1095 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1096 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1097 							rx_bad_l4_csum;
1098 
1099 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1100 		fwd_cycles = (uint64_t) (fwd_cycles +
1101 					 fwd_streams[sm_id]->core_cycles);
1102 #endif
1103 	}
1104 	total_recv = 0;
1105 	total_xmit = 0;
1106 	total_rx_dropped = 0;
1107 	total_tx_dropped = 0;
1108 	total_rx_nombuf  = 0;
1109 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1110 		pt_id = fwd_ports_ids[i];
1111 
1112 		port = &ports[pt_id];
1113 		rte_eth_stats_get(pt_id, &stats);
1114 		stats.ipackets -= port->stats.ipackets;
1115 		port->stats.ipackets = 0;
1116 		stats.opackets -= port->stats.opackets;
1117 		port->stats.opackets = 0;
1118 		stats.ibytes   -= port->stats.ibytes;
1119 		port->stats.ibytes = 0;
1120 		stats.obytes   -= port->stats.obytes;
1121 		port->stats.obytes = 0;
1122 		stats.ierrors  -= port->stats.ierrors;
1123 		port->stats.ierrors = 0;
1124 		stats.oerrors  -= port->stats.oerrors;
1125 		port->stats.oerrors = 0;
1126 		stats.rx_nombuf -= port->stats.rx_nombuf;
1127 		port->stats.rx_nombuf = 0;
1128 		stats.fdirmatch -= port->stats.fdirmatch;
1129 		port->stats.rx_nombuf = 0;
1130 		stats.fdirmiss -= port->stats.fdirmiss;
1131 		port->stats.rx_nombuf = 0;
1132 
1133 		total_recv += stats.ipackets;
1134 		total_xmit += stats.opackets;
1135 		total_rx_dropped += stats.ierrors;
1136 		total_tx_dropped += port->tx_dropped;
1137 		total_rx_nombuf  += stats.rx_nombuf;
1138 
1139 		fwd_port_stats_display(pt_id, &stats);
1140 	}
1141 	printf("\n  %s Accumulated forward statistics for all ports"
1142 	       "%s\n",
1143 	       acc_stats_border, acc_stats_border);
1144 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1145 	       "%-"PRIu64"\n"
1146 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1147 	       "%-"PRIu64"\n",
1148 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1149 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1150 	if (total_rx_nombuf > 0)
1151 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1152 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1153 	       "%s\n",
1154 	       acc_stats_border, acc_stats_border);
1155 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1156 	if (total_recv > 0)
1157 		printf("\n  CPU cycles/packet=%u (total cycles="
1158 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1159 		       (unsigned int)(fwd_cycles / total_recv),
1160 		       fwd_cycles, total_recv);
1161 #endif
1162 	printf("\nDone.\n");
1163 	test_done = 1;
1164 }
1165 
1166 static int
1167 all_ports_started(void)
1168 {
1169 	portid_t pi;
1170 	struct rte_port *port;
1171 
1172 	for (pi = 0; pi < nb_ports; pi++) {
1173 		port = &ports[pi];
1174 		/* Check if there is a port which is not started */
1175 		if (port->port_status != RTE_PORT_STARTED)
1176 			return 0;
1177 	}
1178 
1179 	/* No port is not started */
1180 	return 1;
1181 }
1182 
1183 int
1184 start_port(portid_t pid)
1185 {
1186 	int diag, need_check_link_status = 0;
1187 	portid_t pi;
1188 	queueid_t qi;
1189 	struct rte_port *port;
1190 
1191 	if (test_done == 0) {
1192 		printf("Please stop forwarding first\n");
1193 		return -1;
1194 	}
1195 
1196 	if (init_fwd_streams() < 0) {
1197 		printf("Fail from init_fwd_streams()\n");
1198 		return -1;
1199 	}
1200 
1201 	if(dcb_config)
1202 		dcb_test = 1;
1203 	for (pi = 0; pi < nb_ports; pi++) {
1204 		if (pid < nb_ports && pid != pi)
1205 			continue;
1206 
1207 		port = &ports[pi];
1208 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1209 						 RTE_PORT_HANDLING) == 0) {
1210 			printf("Port %d is now not stopped\n", pi);
1211 			continue;
1212 		}
1213 
1214 		if (port->need_reconfig > 0) {
1215 			port->need_reconfig = 0;
1216 
1217 			printf("Configuring Port %d (socket %d)\n", pi,
1218 					rte_eth_dev_socket_id(pi));
1219 			/* configure port */
1220 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1221 						&(port->dev_conf));
1222 			if (diag != 0) {
1223 				if (rte_atomic16_cmpset(&(port->port_status),
1224 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1225 					printf("Port %d can not be set back "
1226 							"to stopped\n", pi);
1227 				printf("Fail to configure port %d\n", pi);
1228 				/* try to reconfigure port next time */
1229 				port->need_reconfig = 1;
1230 				return -1;
1231 			}
1232 		}
1233 		if (port->need_reconfig_queues > 0) {
1234 			port->need_reconfig_queues = 0;
1235 			/* setup tx queues */
1236 			for (qi = 0; qi < nb_txq; qi++) {
1237 				if ((numa_support) &&
1238 					(txring_numa[pi] != NUMA_NO_CONFIG))
1239 					diag = rte_eth_tx_queue_setup(pi, qi,
1240 						nb_txd,txring_numa[pi],
1241 						&(port->tx_conf));
1242 				else
1243 					diag = rte_eth_tx_queue_setup(pi, qi,
1244 						nb_txd,port->socket_id,
1245 						&(port->tx_conf));
1246 
1247 				if (diag == 0)
1248 					continue;
1249 
1250 				/* Fail to setup tx queue, return */
1251 				if (rte_atomic16_cmpset(&(port->port_status),
1252 							RTE_PORT_HANDLING,
1253 							RTE_PORT_STOPPED) == 0)
1254 					printf("Port %d can not be set back "
1255 							"to stopped\n", pi);
1256 				printf("Fail to configure port %d tx queues\n", pi);
1257 				/* try to reconfigure queues next time */
1258 				port->need_reconfig_queues = 1;
1259 				return -1;
1260 			}
1261 			/* setup rx queues */
1262 			for (qi = 0; qi < nb_rxq; qi++) {
1263 				if ((numa_support) &&
1264 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1265 					struct rte_mempool * mp =
1266 						mbuf_pool_find(rxring_numa[pi]);
1267 					if (mp == NULL) {
1268 						printf("Failed to setup RX queue:"
1269 							"No mempool allocation"
1270 							"on the socket %d\n",
1271 							rxring_numa[pi]);
1272 						return -1;
1273 					}
1274 
1275 					diag = rte_eth_rx_queue_setup(pi, qi,
1276 					     nb_rxd,rxring_numa[pi],
1277 					     &(port->rx_conf),mp);
1278 				}
1279 				else
1280 					diag = rte_eth_rx_queue_setup(pi, qi,
1281 					     nb_rxd,port->socket_id,
1282 					     &(port->rx_conf),
1283 				             mbuf_pool_find(port->socket_id));
1284 
1285 				if (diag == 0)
1286 					continue;
1287 
1288 
1289 				/* Fail to setup rx queue, return */
1290 				if (rte_atomic16_cmpset(&(port->port_status),
1291 							RTE_PORT_HANDLING,
1292 							RTE_PORT_STOPPED) == 0)
1293 					printf("Port %d can not be set back "
1294 							"to stopped\n", pi);
1295 				printf("Fail to configure port %d rx queues\n", pi);
1296 				/* try to reconfigure queues next time */
1297 				port->need_reconfig_queues = 1;
1298 				return -1;
1299 			}
1300 		}
1301 		/* start port */
1302 		if (rte_eth_dev_start(pi) < 0) {
1303 			printf("Fail to start port %d\n", pi);
1304 
1305 			/* Fail to setup rx queue, return */
1306 			if (rte_atomic16_cmpset(&(port->port_status),
1307 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1308 				printf("Port %d can not be set back to "
1309 							"stopped\n", pi);
1310 			continue;
1311 		}
1312 
1313 		if (rte_atomic16_cmpset(&(port->port_status),
1314 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1315 			printf("Port %d can not be set into started\n", pi);
1316 
1317 		/* at least one port started, need checking link status */
1318 		need_check_link_status = 1;
1319 	}
1320 
1321 	if (need_check_link_status)
1322 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1323 	else
1324 		printf("Please stop the ports first\n");
1325 
1326 	printf("Done\n");
1327 	return 0;
1328 }
1329 
1330 void
1331 stop_port(portid_t pid)
1332 {
1333 	portid_t pi;
1334 	struct rte_port *port;
1335 	int need_check_link_status = 0;
1336 
1337 	if (test_done == 0) {
1338 		printf("Please stop forwarding first\n");
1339 		return;
1340 	}
1341 	if (dcb_test) {
1342 		dcb_test = 0;
1343 		dcb_config = 0;
1344 	}
1345 	printf("Stopping ports...\n");
1346 
1347 	for (pi = 0; pi < nb_ports; pi++) {
1348 		if (pid < nb_ports && pid != pi)
1349 			continue;
1350 
1351 		port = &ports[pi];
1352 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1353 						RTE_PORT_HANDLING) == 0)
1354 			continue;
1355 
1356 		rte_eth_dev_stop(pi);
1357 
1358 		if (rte_atomic16_cmpset(&(port->port_status),
1359 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1360 			printf("Port %d can not be set into stopped\n", pi);
1361 		need_check_link_status = 1;
1362 	}
1363 	if (need_check_link_status)
1364 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1365 
1366 	printf("Done\n");
1367 }
1368 
1369 void
1370 close_port(portid_t pid)
1371 {
1372 	portid_t pi;
1373 	struct rte_port *port;
1374 
1375 	if (test_done == 0) {
1376 		printf("Please stop forwarding first\n");
1377 		return;
1378 	}
1379 
1380 	printf("Closing ports...\n");
1381 
1382 	for (pi = 0; pi < nb_ports; pi++) {
1383 		if (pid < nb_ports && pid != pi)
1384 			continue;
1385 
1386 		port = &ports[pi];
1387 		if (rte_atomic16_cmpset(&(port->port_status),
1388 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1389 			printf("Port %d is now not stopped\n", pi);
1390 			continue;
1391 		}
1392 
1393 		rte_eth_dev_close(pi);
1394 
1395 		if (rte_atomic16_cmpset(&(port->port_status),
1396 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1397 			printf("Port %d can not be set into stopped\n", pi);
1398 	}
1399 
1400 	printf("Done\n");
1401 }
1402 
1403 int
1404 all_ports_stopped(void)
1405 {
1406 	portid_t pi;
1407 	struct rte_port *port;
1408 
1409 	for (pi = 0; pi < nb_ports; pi++) {
1410 		port = &ports[pi];
1411 		if (port->port_status != RTE_PORT_STOPPED)
1412 			return 0;
1413 	}
1414 
1415 	return 1;
1416 }
1417 
1418 void
1419 pmd_test_exit(void)
1420 {
1421 	portid_t pt_id;
1422 
1423 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1424 		printf("Stopping port %d...", pt_id);
1425 		fflush(stdout);
1426 		rte_eth_dev_close(pt_id);
1427 		printf("done\n");
1428 	}
1429 	printf("bye...\n");
1430 }
1431 
1432 typedef void (*cmd_func_t)(void);
1433 struct pmd_test_command {
1434 	const char *cmd_name;
1435 	cmd_func_t cmd_func;
1436 };
1437 
1438 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1439 
1440 /* Check the link status of all ports in up to 9s, and print them finally */
1441 static void
1442 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1443 {
1444 #define CHECK_INTERVAL 100 /* 100ms */
1445 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1446 	uint8_t portid, count, all_ports_up, print_flag = 0;
1447 	struct rte_eth_link link;
1448 
1449 	printf("Checking link statuses...\n");
1450 	fflush(stdout);
1451 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1452 		all_ports_up = 1;
1453 		for (portid = 0; portid < port_num; portid++) {
1454 			if ((port_mask & (1 << portid)) == 0)
1455 				continue;
1456 			memset(&link, 0, sizeof(link));
1457 			rte_eth_link_get_nowait(portid, &link);
1458 			/* print link status if flag set */
1459 			if (print_flag == 1) {
1460 				if (link.link_status)
1461 					printf("Port %d Link Up - speed %u "
1462 						"Mbps - %s\n", (uint8_t)portid,
1463 						(unsigned)link.link_speed,
1464 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1465 					("full-duplex") : ("half-duplex\n"));
1466 				else
1467 					printf("Port %d Link Down\n",
1468 						(uint8_t)portid);
1469 				continue;
1470 			}
1471 			/* clear all_ports_up flag if any link down */
1472 			if (link.link_status == 0) {
1473 				all_ports_up = 0;
1474 				break;
1475 			}
1476 		}
1477 		/* after finally printing all link status, get out */
1478 		if (print_flag == 1)
1479 			break;
1480 
1481 		if (all_ports_up == 0) {
1482 			fflush(stdout);
1483 			rte_delay_ms(CHECK_INTERVAL);
1484 		}
1485 
1486 		/* set the print_flag if all ports up or timeout */
1487 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1488 			print_flag = 1;
1489 		}
1490 	}
1491 }
1492 
1493 static int
1494 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1495 {
1496 	uint16_t i;
1497 	int diag;
1498 	uint8_t mapping_found = 0;
1499 
1500 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1501 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1502 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1503 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1504 					tx_queue_stats_mappings[i].queue_id,
1505 					tx_queue_stats_mappings[i].stats_counter_id);
1506 			if (diag != 0)
1507 				return diag;
1508 			mapping_found = 1;
1509 		}
1510 	}
1511 	if (mapping_found)
1512 		port->tx_queue_stats_mapping_enabled = 1;
1513 	return 0;
1514 }
1515 
1516 static int
1517 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1518 {
1519 	uint16_t i;
1520 	int diag;
1521 	uint8_t mapping_found = 0;
1522 
1523 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1524 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1525 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1526 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1527 					rx_queue_stats_mappings[i].queue_id,
1528 					rx_queue_stats_mappings[i].stats_counter_id);
1529 			if (diag != 0)
1530 				return diag;
1531 			mapping_found = 1;
1532 		}
1533 	}
1534 	if (mapping_found)
1535 		port->rx_queue_stats_mapping_enabled = 1;
1536 	return 0;
1537 }
1538 
1539 static void
1540 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1541 {
1542 	int diag = 0;
1543 
1544 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1545 	if (diag != 0) {
1546 		if (diag == -ENOTSUP) {
1547 			port->tx_queue_stats_mapping_enabled = 0;
1548 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1549 		}
1550 		else
1551 			rte_exit(EXIT_FAILURE,
1552 					"set_tx_queue_stats_mapping_registers "
1553 					"failed for port id=%d diag=%d\n",
1554 					pi, diag);
1555 	}
1556 
1557 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1558 	if (diag != 0) {
1559 		if (diag == -ENOTSUP) {
1560 			port->rx_queue_stats_mapping_enabled = 0;
1561 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1562 		}
1563 		else
1564 			rte_exit(EXIT_FAILURE,
1565 					"set_rx_queue_stats_mapping_registers "
1566 					"failed for port id=%d diag=%d\n",
1567 					pi, diag);
1568 	}
1569 }
1570 
1571 void
1572 init_port_config(void)
1573 {
1574 	portid_t pid;
1575 	struct rte_port *port;
1576 
1577 	for (pid = 0; pid < nb_ports; pid++) {
1578 		port = &ports[pid];
1579 		port->dev_conf.rxmode = rx_mode;
1580 		port->dev_conf.fdir_conf = fdir_conf;
1581 		if (nb_rxq > 1) {
1582 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1583 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1584 		} else {
1585 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1586 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1587 		}
1588 
1589 		/* In SR-IOV mode, RSS mode is not available */
1590 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1591 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1592 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1593 			else
1594 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1595 		}
1596 
1597 		port->rx_conf.rx_thresh = rx_thresh;
1598 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1599 		port->rx_conf.rx_drop_en = rx_drop_en;
1600 		port->tx_conf.tx_thresh = tx_thresh;
1601 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1602 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1603 		port->tx_conf.txq_flags = txq_flags;
1604 
1605 		rte_eth_macaddr_get(pid, &port->eth_addr);
1606 
1607 		map_port_queue_stats_mapping_registers(pid, port);
1608 #ifdef RTE_NIC_BYPASS
1609 		rte_eth_dev_bypass_init(pid);
1610 #endif
1611 	}
1612 }
1613 
1614 const uint16_t vlan_tags[] = {
1615 		0,  1,  2,  3,  4,  5,  6,  7,
1616 		8,  9, 10, 11,  12, 13, 14, 15,
1617 		16, 17, 18, 19, 20, 21, 22, 23,
1618 		24, 25, 26, 27, 28, 29, 30, 31
1619 };
1620 
1621 static  int
1622 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1623 {
1624         uint8_t i;
1625 
1626  	/*
1627  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1628  	 * given above, and the number of traffic classes available for use.
1629  	 */
1630 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1631 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1632 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1633 
1634 		/* VMDQ+DCB RX and TX configrations */
1635 		vmdq_rx_conf.enable_default_pool = 0;
1636 		vmdq_rx_conf.default_pool = 0;
1637 		vmdq_rx_conf.nb_queue_pools =
1638 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1639 		vmdq_tx_conf.nb_queue_pools =
1640 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1641 
1642 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1643 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1644 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1645 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1646 		}
1647 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1648 			vmdq_rx_conf.dcb_queue[i] = i;
1649 			vmdq_tx_conf.dcb_queue[i] = i;
1650 		}
1651 
1652 		/*set DCB mode of RX and TX of multiple queues*/
1653 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1654 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1655 		if (dcb_conf->pfc_en)
1656 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1657 		else
1658 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1659 
1660 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1661                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1662 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1663                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1664 	}
1665 	else {
1666 		struct rte_eth_dcb_rx_conf rx_conf;
1667 		struct rte_eth_dcb_tx_conf tx_conf;
1668 
1669 		/* queue mapping configuration of DCB RX and TX */
1670 		if (dcb_conf->num_tcs == ETH_4_TCS)
1671 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1672 		else
1673 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1674 
1675 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1676 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1677 
1678 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1679 			rx_conf.dcb_queue[i] = i;
1680 			tx_conf.dcb_queue[i] = i;
1681 		}
1682 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1683 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1684 		if (dcb_conf->pfc_en)
1685 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1686 		else
1687 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1688 
1689 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1690                                 sizeof(struct rte_eth_dcb_rx_conf)));
1691 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1692                                 sizeof(struct rte_eth_dcb_tx_conf)));
1693 	}
1694 
1695 	return 0;
1696 }
1697 
1698 int
1699 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1700 {
1701 	struct rte_eth_conf port_conf;
1702 	struct rte_port *rte_port;
1703 	int retval;
1704 	uint16_t nb_vlan;
1705 	uint16_t i;
1706 
1707 	/* rxq and txq configuration in dcb mode */
1708 	nb_rxq = 128;
1709 	nb_txq = 128;
1710 	rx_free_thresh = 64;
1711 
1712 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1713 	/* Enter DCB configuration status */
1714 	dcb_config = 1;
1715 
1716 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1717 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1718 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1719 	if (retval < 0)
1720 		return retval;
1721 
1722 	rte_port = &ports[pid];
1723 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1724 
1725 	rte_port->rx_conf.rx_thresh = rx_thresh;
1726 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1727 	rte_port->tx_conf.tx_thresh = tx_thresh;
1728 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1729 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1730 	/* VLAN filter */
1731 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1732 	for (i = 0; i < nb_vlan; i++){
1733 		rx_vft_set(pid, vlan_tags[i], 1);
1734 	}
1735 
1736 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1737 	map_port_queue_stats_mapping_registers(pid, rte_port);
1738 
1739 	rte_port->dcb_flag = 1;
1740 
1741 	return 0;
1742 }
1743 
1744 #ifdef RTE_EXEC_ENV_BAREMETAL
1745 #define main _main
1746 #endif
1747 
1748 int
1749 main(int argc, char** argv)
1750 {
1751 	int  diag;
1752 	uint8_t port_id;
1753 
1754 	diag = rte_eal_init(argc, argv);
1755 	if (diag < 0)
1756 		rte_panic("Cannot init EAL\n");
1757 
1758 	if (rte_pmd_init_all())
1759 		rte_panic("Cannot init PMD\n");
1760 
1761 	if (rte_eal_pci_probe())
1762 		rte_panic("Cannot probe PCI\n");
1763 
1764 	nb_ports = (portid_t) rte_eth_dev_count();
1765 	if (nb_ports == 0)
1766 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1767 							"check that "
1768 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1769 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1770 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1771 			  "configuration file\n");
1772 
1773 	set_def_fwd_config();
1774 	if (nb_lcores == 0)
1775 		rte_panic("Empty set of forwarding logical cores - check the "
1776 			  "core mask supplied in the command parameters\n");
1777 
1778 	argc -= diag;
1779 	argv += diag;
1780 	if (argc > 1)
1781 		launch_args_parse(argc, argv);
1782 
1783 	if (nb_rxq > nb_txq)
1784 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1785 		       "but nb_txq=%d will prevent to fully test it.\n",
1786 		       nb_rxq, nb_txq);
1787 
1788 	init_config();
1789 	if (start_port(RTE_PORT_ALL) != 0)
1790 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
1791 
1792 	/* set all ports to promiscuous mode by default */
1793 	for (port_id = 0; port_id < nb_ports; port_id++)
1794 		rte_eth_promiscuous_enable(port_id);
1795 
1796 #ifdef RTE_LIBRTE_CMDLINE
1797 	if (interactive == 1)
1798 		prompt();
1799 	else
1800 #endif
1801 	{
1802 		char c;
1803 		int rc;
1804 
1805 		printf("No commandline core given, start packet forwarding\n");
1806 		start_packet_forwarding(0);
1807 		printf("Press enter to exit\n");
1808 		rc = read(0, &c, 1);
1809 		if (rc < 0)
1810 			return 1;
1811 	}
1812 
1813 	return 0;
1814 }
1815