xref: /dpdk/app/test-pmd/testpmd.c (revision e9d48c0072d36eb6423b45fba4ec49d0def6c36f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 
76 #include "testpmd.h"
77 
78 uint16_t verbose_level = 0; /**< Silent by default. */
79 
80 /* use master core for command line ? */
81 uint8_t interactive = 0;
82 
83 /*
84  * NUMA support configuration.
85  * When set, the NUMA support attempts to dispatch the allocation of the
86  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
87  * probed ports among the CPU sockets 0 and 1.
88  * Otherwise, all memory is allocated from CPU socket 0.
89  */
90 uint8_t numa_support = 0; /**< No numa support by default */
91 
92 /*
93  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
94  * not configured.
95  */
96 uint8_t socket_num = UMA_NO_CONFIG;
97 
98 /*
99  * Record the Ethernet address of peer target ports to which packets are
100  * forwarded.
101  * Must be instanciated with the ethernet addresses of peer traffic generator
102  * ports.
103  */
104 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
105 portid_t nb_peer_eth_addrs = 0;
106 
107 /*
108  * Probed Target Environment.
109  */
110 struct rte_port *ports;	       /**< For all probed ethernet ports. */
111 portid_t nb_ports;             /**< Number of probed ethernet ports. */
112 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
113 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
114 
115 /*
116  * Test Forwarding Configuration.
117  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
118  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
119  */
120 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
121 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
122 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
123 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
124 
125 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
126 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
127 
128 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
129 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
130 
131 /*
132  * Forwarding engines.
133  */
134 struct fwd_engine * fwd_engines[] = {
135 	&io_fwd_engine,
136 	&mac_fwd_engine,
137 	&rx_only_engine,
138 	&tx_only_engine,
139 	&csum_fwd_engine,
140 #ifdef RTE_LIBRTE_IEEE1588
141 	&ieee1588_fwd_engine,
142 #endif
143 	NULL,
144 };
145 
146 struct fwd_config cur_fwd_config;
147 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
148 
149 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
150 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
151                                       * specified on command-line. */
152 
153 /*
154  * Configuration of packet segments used by the "txonly" processing engine.
155  */
156 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
157 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
158 	TXONLY_DEF_PACKET_LEN,
159 };
160 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
161 
162 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
163 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
164 
165 /* current configuration is in DCB or not,0 means it is not in DCB mode */
166 uint8_t dcb_config = 0;
167 
168 /* Whether the dcb is in testing status */
169 uint8_t dcb_test = 0;
170 
171 /* DCB on and VT on mapping is default */
172 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
173 
174 /*
175  * Configurable number of RX/TX queues.
176  */
177 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
178 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
179 
180 /*
181  * Configurable number of RX/TX ring descriptors.
182  */
183 #define RTE_TEST_RX_DESC_DEFAULT 128
184 #define RTE_TEST_TX_DESC_DEFAULT 512
185 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
186 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
187 
188 /*
189  * Configurable values of RX and TX ring threshold registers.
190  */
191 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
192 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
193 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
194 
195 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
196 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
197 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
198 
199 struct rte_eth_thresh rx_thresh = {
200 	.pthresh = RX_PTHRESH,
201 	.hthresh = RX_HTHRESH,
202 	.wthresh = RX_WTHRESH,
203 };
204 
205 struct rte_eth_thresh tx_thresh = {
206 	.pthresh = TX_PTHRESH,
207 	.hthresh = TX_HTHRESH,
208 	.wthresh = TX_WTHRESH,
209 };
210 
211 /*
212  * Configurable value of RX free threshold.
213  */
214 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
215 
216 /*
217  * Configurable value of RX drop enable.
218  */
219 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
220 
221 /*
222  * Configurable value of TX free threshold.
223  */
224 uint16_t tx_free_thresh = 0; /* Use default values. */
225 
226 /*
227  * Configurable value of TX RS bit threshold.
228  */
229 uint16_t tx_rs_thresh = 0; /* Use default values. */
230 
231 /*
232  * Configurable value of TX queue flags.
233  */
234 uint32_t txq_flags = 0; /* No flags set. */
235 
236 /*
237  * Receive Side Scaling (RSS) configuration.
238  */
239 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
240 
241 /*
242  * Port topology configuration
243  */
244 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
245 
246 /*
247  * Avoids to flush all the RX streams before starts forwarding.
248  */
249 uint8_t no_flush_rx = 0; /* flush by default */
250 
251 /*
252  * NIC bypass mode configuration options.
253  */
254 #ifdef RTE_NIC_BYPASS
255 
256 /* The NIC bypass watchdog timeout. */
257 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
258 
259 #endif
260 
261 /*
262  * Ethernet device configuration.
263  */
264 struct rte_eth_rxmode rx_mode = {
265 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
266 	.split_hdr_size = 0,
267 	.header_split   = 0, /**< Header Split disabled. */
268 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
269 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
270 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
271 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
272 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
273 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
274 };
275 
276 struct rte_fdir_conf fdir_conf = {
277 	.mode = RTE_FDIR_MODE_NONE,
278 	.pballoc = RTE_FDIR_PBALLOC_64K,
279 	.status = RTE_FDIR_REPORT_STATUS,
280 	.flexbytes_offset = 0x6,
281 	.drop_queue = 127,
282 };
283 
284 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
285 
286 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
287 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
288 
289 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
290 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
291 
292 uint16_t nb_tx_queue_stats_mappings = 0;
293 uint16_t nb_rx_queue_stats_mappings = 0;
294 
295 /* Forward function declarations */
296 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
297 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
298 
299 /*
300  * Check if all the ports are started.
301  * If yes, return positive value. If not, return zero.
302  */
303 static int all_ports_started(void);
304 
305 /*
306  * Setup default configuration.
307  */
308 static void
309 set_default_fwd_lcores_config(void)
310 {
311 	unsigned int i;
312 	unsigned int nb_lc;
313 
314 	nb_lc = 0;
315 	for (i = 0; i < RTE_MAX_LCORE; i++) {
316 		if (! rte_lcore_is_enabled(i))
317 			continue;
318 		if (i == rte_get_master_lcore())
319 			continue;
320 		fwd_lcores_cpuids[nb_lc++] = i;
321 	}
322 	nb_lcores = (lcoreid_t) nb_lc;
323 	nb_cfg_lcores = nb_lcores;
324 	nb_fwd_lcores = 1;
325 }
326 
327 static void
328 set_def_peer_eth_addrs(void)
329 {
330 	portid_t i;
331 
332 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
333 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
334 		peer_eth_addrs[i].addr_bytes[5] = i;
335 	}
336 }
337 
338 static void
339 set_default_fwd_ports_config(void)
340 {
341 	portid_t pt_id;
342 
343 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
344 		fwd_ports_ids[pt_id] = pt_id;
345 
346 	nb_cfg_ports = nb_ports;
347 	nb_fwd_ports = nb_ports;
348 }
349 
350 void
351 set_def_fwd_config(void)
352 {
353 	set_default_fwd_lcores_config();
354 	set_def_peer_eth_addrs();
355 	set_default_fwd_ports_config();
356 }
357 
358 /*
359  * Configuration initialisation done once at init time.
360  */
361 struct mbuf_ctor_arg {
362 	uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
363 	uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
364 };
365 
366 struct mbuf_pool_ctor_arg {
367 	uint16_t seg_buf_size; /**< size of data segment in mbuf. */
368 };
369 
370 static void
371 testpmd_mbuf_ctor(struct rte_mempool *mp,
372 		  void *opaque_arg,
373 		  void *raw_mbuf,
374 		  __attribute__((unused)) unsigned i)
375 {
376 	struct mbuf_ctor_arg *mb_ctor_arg;
377 	struct rte_mbuf    *mb;
378 
379 	mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
380 	mb = (struct rte_mbuf *) raw_mbuf;
381 
382 	mb->type         = RTE_MBUF_PKT;
383 	mb->pool         = mp;
384 	mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
385 	mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
386 			mb_ctor_arg->seg_buf_offset);
387 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
388 	mb->type         = RTE_MBUF_PKT;
389 	mb->ol_flags     = 0;
390 	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
391 	mb->pkt.nb_segs  = 1;
392 	mb->pkt.vlan_macip.data = 0;
393 	mb->pkt.hash.rss = 0;
394 }
395 
396 static void
397 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
398 		       void *opaque_arg)
399 {
400 	struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
401 	struct rte_pktmbuf_pool_private *mbp_priv;
402 
403 	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
404 		printf("%s(%s) private_data_size %d < %d\n",
405 		       __func__, mp->name, (int) mp->private_data_size,
406 		       (int) sizeof(struct rte_pktmbuf_pool_private));
407 		return;
408 	}
409 	mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
410 	mbp_priv = (struct rte_pktmbuf_pool_private *)
411 		((char *)mp + sizeof(struct rte_mempool));
412 	mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
413 }
414 
415 static void
416 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
417 		 unsigned int socket_id)
418 {
419 	char pool_name[RTE_MEMPOOL_NAMESIZE];
420 	struct rte_mempool *rte_mp;
421 	struct mbuf_pool_ctor_arg mbp_ctor_arg;
422 	struct mbuf_ctor_arg mb_ctor_arg;
423 	uint32_t mb_size;
424 
425 	mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
426 						mbuf_seg_size);
427 	mb_ctor_arg.seg_buf_offset =
428 		(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
429 	mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
430 	mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
431 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
432 	rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
433 				    (unsigned) mb_mempool_cache,
434 				    sizeof(struct rte_pktmbuf_pool_private),
435 				    testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
436 				    testpmd_mbuf_ctor, &mb_ctor_arg,
437 				    socket_id, 0);
438 	if (rte_mp == NULL) {
439 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
440 						"failed\n", socket_id);
441 	}
442 }
443 
444 static void
445 init_config(void)
446 {
447 	portid_t pid;
448 	struct rte_port *port;
449 	struct rte_mempool *mbp;
450 	unsigned int nb_mbuf_per_pool;
451 	lcoreid_t  lc_id;
452 	uint8_t port_per_socket[MAX_SOCKET];
453 
454 	memset(port_per_socket,0,MAX_SOCKET);
455 	/* Configuration of logical cores. */
456 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
457 				sizeof(struct fwd_lcore *) * nb_lcores,
458 				CACHE_LINE_SIZE);
459 	if (fwd_lcores == NULL) {
460 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
461 							"failed\n", nb_lcores);
462 	}
463 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
464 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
465 					       sizeof(struct fwd_lcore),
466 					       CACHE_LINE_SIZE);
467 		if (fwd_lcores[lc_id] == NULL) {
468 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
469 								"failed\n");
470 		}
471 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
472 	}
473 
474 	/*
475 	 * Create pools of mbuf.
476 	 * If NUMA support is disabled, create a single pool of mbuf in
477 	 * socket 0 memory by default.
478 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
479 	 *
480 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
481 	 * nb_txd can be configured at run time.
482 	 */
483 	if (param_total_num_mbufs)
484 		nb_mbuf_per_pool = param_total_num_mbufs;
485 	else {
486 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
487 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
488 
489 		if (!numa_support)
490 			nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
491 	}
492 
493 	if (!numa_support) {
494 		if (socket_num == UMA_NO_CONFIG)
495 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
496 		else
497 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
498 						 socket_num);
499 	}
500 	/*
501 	 * Records which Mbuf pool to use by each logical core, if needed.
502 	 */
503 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
504 		mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
505 		if (mbp == NULL)
506 			mbp = mbuf_pool_find(0);
507 		fwd_lcores[lc_id]->mbp = mbp;
508 	}
509 
510 	/* Configuration of Ethernet ports. */
511 	ports = rte_zmalloc("testpmd: ports",
512 			    sizeof(struct rte_port) * nb_ports,
513 			    CACHE_LINE_SIZE);
514 	if (ports == NULL) {
515 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
516 							"failed\n", nb_ports);
517 	}
518 
519 	for (pid = 0; pid < nb_ports; pid++) {
520 		port = &ports[pid];
521 		rte_eth_dev_info_get(pid, &port->dev_info);
522 
523 		if (numa_support) {
524 			if (port_numa[pid] != NUMA_NO_CONFIG)
525 				port_per_socket[port_numa[pid]]++;
526 			else {
527 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
528 				port_per_socket[socket_id]++;
529 			}
530 		}
531 
532 		/* set flag to initialize port/queue */
533 		port->need_reconfig = 1;
534 		port->need_reconfig_queues = 1;
535 	}
536 
537 	if (numa_support) {
538 		uint8_t i;
539 		unsigned int nb_mbuf;
540 
541 		if (param_total_num_mbufs)
542 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
543 
544 		for (i = 0; i < MAX_SOCKET; i++) {
545 			nb_mbuf = (nb_mbuf_per_pool *
546 						port_per_socket[i]);
547 			if (nb_mbuf)
548 				mbuf_pool_create(mbuf_data_size,
549 						nb_mbuf,i);
550 		}
551 	}
552 	init_port_config();
553 	/* Configuration of packet forwarding streams. */
554 	if (init_fwd_streams() < 0)
555 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
556 }
557 
558 int
559 init_fwd_streams(void)
560 {
561 	portid_t pid;
562 	struct rte_port *port;
563 	streamid_t sm_id, nb_fwd_streams_new;
564 
565 	/* set socket id according to numa or not */
566 	for (pid = 0; pid < nb_ports; pid++) {
567 		port = &ports[pid];
568 		if (nb_rxq > port->dev_info.max_rx_queues) {
569 			printf("Fail: nb_rxq(%d) is greater than "
570 				"max_rx_queues(%d)\n", nb_rxq,
571 				port->dev_info.max_rx_queues);
572 			return -1;
573 		}
574 		if (nb_txq > port->dev_info.max_tx_queues) {
575 			printf("Fail: nb_txq(%d) is greater than "
576 				"max_tx_queues(%d)\n", nb_txq,
577 				port->dev_info.max_tx_queues);
578 			return -1;
579 		}
580 		if (numa_support)
581 			port->socket_id = rte_eth_dev_socket_id(pid);
582 		else {
583 			if (socket_num == UMA_NO_CONFIG)
584 				port->socket_id = 0;
585 			else
586 				port->socket_id = socket_num;
587 		}
588 	}
589 
590 	nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
591 	if (nb_fwd_streams_new == nb_fwd_streams)
592 		return 0;
593 	/* clear the old */
594 	if (fwd_streams != NULL) {
595 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
596 			if (fwd_streams[sm_id] == NULL)
597 				continue;
598 			rte_free(fwd_streams[sm_id]);
599 			fwd_streams[sm_id] = NULL;
600 		}
601 		rte_free(fwd_streams);
602 		fwd_streams = NULL;
603 	}
604 
605 	/* init new */
606 	nb_fwd_streams = nb_fwd_streams_new;
607 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
608 		sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
609 	if (fwd_streams == NULL)
610 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
611 						"failed\n", nb_fwd_streams);
612 
613 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
614 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
615 				sizeof(struct fwd_stream), CACHE_LINE_SIZE);
616 		if (fwd_streams[sm_id] == NULL)
617 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
618 								" failed\n");
619 	}
620 
621 	return 0;
622 }
623 
624 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
625 static void
626 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
627 {
628 	unsigned int total_burst;
629 	unsigned int nb_burst;
630 	unsigned int burst_stats[3];
631 	uint16_t pktnb_stats[3];
632 	uint16_t nb_pkt;
633 	int burst_percent[3];
634 
635 	/*
636 	 * First compute the total number of packet bursts and the
637 	 * two highest numbers of bursts of the same number of packets.
638 	 */
639 	total_burst = 0;
640 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
641 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
642 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
643 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
644 		if (nb_burst == 0)
645 			continue;
646 		total_burst += nb_burst;
647 		if (nb_burst > burst_stats[0]) {
648 			burst_stats[1] = burst_stats[0];
649 			pktnb_stats[1] = pktnb_stats[0];
650 			burst_stats[0] = nb_burst;
651 			pktnb_stats[0] = nb_pkt;
652 		}
653 	}
654 	if (total_burst == 0)
655 		return;
656 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
657 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
658 	       burst_percent[0], (int) pktnb_stats[0]);
659 	if (burst_stats[0] == total_burst) {
660 		printf("]\n");
661 		return;
662 	}
663 	if (burst_stats[0] + burst_stats[1] == total_burst) {
664 		printf(" + %d%% of %d pkts]\n",
665 		       100 - burst_percent[0], pktnb_stats[1]);
666 		return;
667 	}
668 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
669 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
670 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
671 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
672 		return;
673 	}
674 	printf(" + %d%% of %d pkts + %d%% of others]\n",
675 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
676 }
677 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
678 
679 static void
680 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
681 {
682 	struct rte_port *port;
683 	uint8_t i;
684 
685 	static const char *fwd_stats_border = "----------------------";
686 
687 	port = &ports[port_id];
688 	printf("\n  %s Forward statistics for port %-2d %s\n",
689 	       fwd_stats_border, port_id, fwd_stats_border);
690 
691 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
692 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
693 		       "%-"PRIu64"\n",
694 		       stats->ipackets, stats->ierrors,
695 		       (uint64_t) (stats->ipackets + stats->ierrors));
696 
697 		if (cur_fwd_eng == &csum_fwd_engine)
698 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
699 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
700 
701 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
702 		       "%-"PRIu64"\n",
703 		       stats->opackets, port->tx_dropped,
704 		       (uint64_t) (stats->opackets + port->tx_dropped));
705 
706 		if (stats->rx_nombuf > 0)
707 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
708 
709 	}
710 	else {
711 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
712 		       "%14"PRIu64"\n",
713 		       stats->ipackets, stats->ierrors,
714 		       (uint64_t) (stats->ipackets + stats->ierrors));
715 
716 		if (cur_fwd_eng == &csum_fwd_engine)
717 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
718 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
719 
720 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
721 		       "%14"PRIu64"\n",
722 		       stats->opackets, port->tx_dropped,
723 		       (uint64_t) (stats->opackets + port->tx_dropped));
724 
725 		if (stats->rx_nombuf > 0)
726 			printf("  RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
727 	}
728 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
729 	if (port->rx_stream)
730 		pkt_burst_stats_display("RX",
731 			&port->rx_stream->rx_burst_stats);
732 	if (port->tx_stream)
733 		pkt_burst_stats_display("TX",
734 			&port->tx_stream->tx_burst_stats);
735 #endif
736 	/* stats fdir */
737 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
738 		printf("  Fdirmiss:%14"PRIu64"	  Fdirmatch:%14"PRIu64"\n",
739 		       stats->fdirmiss,
740 		       stats->fdirmatch);
741 
742 	if (port->rx_queue_stats_mapping_enabled) {
743 		printf("\n");
744 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
745 			printf("  Stats reg %2d RX-packets:%14"PRIu64
746 			       "     RX-errors:%14"PRIu64
747 			       "    RX-bytes:%14"PRIu64"\n",
748 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
749 		}
750 		printf("\n");
751 	}
752 	if (port->tx_queue_stats_mapping_enabled) {
753 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
754 			printf("  Stats reg %2d TX-packets:%14"PRIu64
755 			       "                                 TX-bytes:%14"PRIu64"\n",
756 			       i, stats->q_opackets[i], stats->q_obytes[i]);
757 		}
758 	}
759 
760 	printf("  %s--------------------------------%s\n",
761 	       fwd_stats_border, fwd_stats_border);
762 }
763 
764 static void
765 fwd_stream_stats_display(streamid_t stream_id)
766 {
767 	struct fwd_stream *fs;
768 	static const char *fwd_top_stats_border = "-------";
769 
770 	fs = fwd_streams[stream_id];
771 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
772 	    (fs->fwd_dropped == 0))
773 		return;
774 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
775 	       "TX Port=%2d/Queue=%2d %s\n",
776 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
777 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
778 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
779 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
780 
781 	/* if checksum mode */
782 	if (cur_fwd_eng == &csum_fwd_engine) {
783 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
784 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
785 	}
786 
787 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
788 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
789 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
790 #endif
791 }
792 
793 static void
794 flush_fwd_rx_queues(void)
795 {
796 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
797 	portid_t  rxp;
798 	portid_t port_id;
799 	queueid_t rxq;
800 	uint16_t  nb_rx;
801 	uint16_t  i;
802 	uint8_t   j;
803 
804 	for (j = 0; j < 2; j++) {
805 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
806 			for (rxq = 0; rxq < nb_rxq; rxq++) {
807 				port_id = fwd_ports_ids[rxp];
808 				do {
809 					nb_rx = rte_eth_rx_burst(port_id, rxq,
810 						pkts_burst, MAX_PKT_BURST);
811 					for (i = 0; i < nb_rx; i++)
812 						rte_pktmbuf_free(pkts_burst[i]);
813 				} while (nb_rx > 0);
814 			}
815 		}
816 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
817 	}
818 }
819 
820 static void
821 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
822 {
823 	struct fwd_stream **fsm;
824 	streamid_t nb_fs;
825 	streamid_t sm_id;
826 
827 	fsm = &fwd_streams[fc->stream_idx];
828 	nb_fs = fc->stream_nb;
829 	do {
830 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
831 			(*pkt_fwd)(fsm[sm_id]);
832 	} while (! fc->stopped);
833 }
834 
835 static int
836 start_pkt_forward_on_core(void *fwd_arg)
837 {
838 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
839 			     cur_fwd_config.fwd_eng->packet_fwd);
840 	return 0;
841 }
842 
843 /*
844  * Run the TXONLY packet forwarding engine to send a single burst of packets.
845  * Used to start communication flows in network loopback test configurations.
846  */
847 static int
848 run_one_txonly_burst_on_core(void *fwd_arg)
849 {
850 	struct fwd_lcore *fwd_lc;
851 	struct fwd_lcore tmp_lcore;
852 
853 	fwd_lc = (struct fwd_lcore *) fwd_arg;
854 	tmp_lcore = *fwd_lc;
855 	tmp_lcore.stopped = 1;
856 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
857 	return 0;
858 }
859 
860 /*
861  * Launch packet forwarding:
862  *     - Setup per-port forwarding context.
863  *     - launch logical cores with their forwarding configuration.
864  */
865 static void
866 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
867 {
868 	port_fwd_begin_t port_fwd_begin;
869 	unsigned int i;
870 	unsigned int lc_id;
871 	int diag;
872 
873 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
874 	if (port_fwd_begin != NULL) {
875 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
876 			(*port_fwd_begin)(fwd_ports_ids[i]);
877 	}
878 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
879 		lc_id = fwd_lcores_cpuids[i];
880 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
881 			fwd_lcores[i]->stopped = 0;
882 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
883 						     fwd_lcores[i], lc_id);
884 			if (diag != 0)
885 				printf("launch lcore %u failed - diag=%d\n",
886 				       lc_id, diag);
887 		}
888 	}
889 }
890 
891 /*
892  * Launch packet forwarding configuration.
893  */
894 void
895 start_packet_forwarding(int with_tx_first)
896 {
897 	port_fwd_begin_t port_fwd_begin;
898 	port_fwd_end_t  port_fwd_end;
899 	struct rte_port *port;
900 	unsigned int i;
901 	portid_t   pt_id;
902 	streamid_t sm_id;
903 
904 	if (all_ports_started() == 0) {
905 		printf("Not all ports were started\n");
906 		return;
907 	}
908 	if (test_done == 0) {
909 		printf("Packet forwarding already started\n");
910 		return;
911 	}
912 	if(dcb_test) {
913 		for (i = 0; i < nb_fwd_ports; i++) {
914 			pt_id = fwd_ports_ids[i];
915 			port = &ports[pt_id];
916 			if (!port->dcb_flag) {
917 				printf("In DCB mode, all forwarding ports must "
918                                        "be configured in this mode.\n");
919 				return;
920 			}
921 		}
922 		if (nb_fwd_lcores == 1) {
923 			printf("In DCB mode,the nb forwarding cores "
924                                "should be larger than 1.\n");
925 			return;
926 		}
927 	}
928 	test_done = 0;
929 
930 	if(!no_flush_rx)
931 		flush_fwd_rx_queues();
932 
933 	fwd_config_setup();
934 	rxtx_config_display();
935 
936 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
937 		pt_id = fwd_ports_ids[i];
938 		port = &ports[pt_id];
939 		rte_eth_stats_get(pt_id, &port->stats);
940 		port->tx_dropped = 0;
941 
942 		map_port_queue_stats_mapping_registers(pt_id, port);
943 	}
944 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
945 		fwd_streams[sm_id]->rx_packets = 0;
946 		fwd_streams[sm_id]->tx_packets = 0;
947 		fwd_streams[sm_id]->fwd_dropped = 0;
948 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
949 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
950 
951 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
952 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
953 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
954 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
955 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
956 #endif
957 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
958 		fwd_streams[sm_id]->core_cycles = 0;
959 #endif
960 	}
961 	if (with_tx_first) {
962 		port_fwd_begin = tx_only_engine.port_fwd_begin;
963 		if (port_fwd_begin != NULL) {
964 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
965 				(*port_fwd_begin)(fwd_ports_ids[i]);
966 		}
967 		launch_packet_forwarding(run_one_txonly_burst_on_core);
968 		rte_eal_mp_wait_lcore();
969 		port_fwd_end = tx_only_engine.port_fwd_end;
970 		if (port_fwd_end != NULL) {
971 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
972 				(*port_fwd_end)(fwd_ports_ids[i]);
973 		}
974 	}
975 	launch_packet_forwarding(start_pkt_forward_on_core);
976 }
977 
978 void
979 stop_packet_forwarding(void)
980 {
981 	struct rte_eth_stats stats;
982 	struct rte_port *port;
983 	port_fwd_end_t  port_fwd_end;
984 	int i;
985 	portid_t   pt_id;
986 	streamid_t sm_id;
987 	lcoreid_t  lc_id;
988 	uint64_t total_recv;
989 	uint64_t total_xmit;
990 	uint64_t total_rx_dropped;
991 	uint64_t total_tx_dropped;
992 	uint64_t total_rx_nombuf;
993 	uint64_t tx_dropped;
994 	uint64_t rx_bad_ip_csum;
995 	uint64_t rx_bad_l4_csum;
996 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
997 	uint64_t fwd_cycles;
998 #endif
999 	static const char *acc_stats_border = "+++++++++++++++";
1000 
1001 	if (all_ports_started() == 0) {
1002 		printf("Not all ports were started\n");
1003 		return;
1004 	}
1005 	if (test_done) {
1006 		printf("Packet forwarding not started\n");
1007 		return;
1008 	}
1009 	printf("Telling cores to stop...");
1010 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1011 		fwd_lcores[lc_id]->stopped = 1;
1012 	printf("\nWaiting for lcores to finish...\n");
1013 	rte_eal_mp_wait_lcore();
1014 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1015 	if (port_fwd_end != NULL) {
1016 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1017 			pt_id = fwd_ports_ids[i];
1018 			(*port_fwd_end)(pt_id);
1019 		}
1020 	}
1021 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1022 	fwd_cycles = 0;
1023 #endif
1024 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1025 		if (cur_fwd_config.nb_fwd_streams >
1026 		    cur_fwd_config.nb_fwd_ports) {
1027 			fwd_stream_stats_display(sm_id);
1028 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1029 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1030 		} else {
1031 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1032 				fwd_streams[sm_id];
1033 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1034 				fwd_streams[sm_id];
1035 		}
1036 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1037 		tx_dropped = (uint64_t) (tx_dropped +
1038 					 fwd_streams[sm_id]->fwd_dropped);
1039 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1040 
1041 		rx_bad_ip_csum =
1042 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1043 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1044 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1045 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1046 							rx_bad_ip_csum;
1047 
1048 		rx_bad_l4_csum =
1049 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1050 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1051 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1052 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1053 							rx_bad_l4_csum;
1054 
1055 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1056 		fwd_cycles = (uint64_t) (fwd_cycles +
1057 					 fwd_streams[sm_id]->core_cycles);
1058 #endif
1059 	}
1060 	total_recv = 0;
1061 	total_xmit = 0;
1062 	total_rx_dropped = 0;
1063 	total_tx_dropped = 0;
1064 	total_rx_nombuf  = 0;
1065 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1066 		pt_id = fwd_ports_ids[i];
1067 
1068 		port = &ports[pt_id];
1069 		rte_eth_stats_get(pt_id, &stats);
1070 		stats.ipackets -= port->stats.ipackets;
1071 		port->stats.ipackets = 0;
1072 		stats.opackets -= port->stats.opackets;
1073 		port->stats.opackets = 0;
1074 		stats.ibytes   -= port->stats.ibytes;
1075 		port->stats.ibytes = 0;
1076 		stats.obytes   -= port->stats.obytes;
1077 		port->stats.obytes = 0;
1078 		stats.ierrors  -= port->stats.ierrors;
1079 		port->stats.ierrors = 0;
1080 		stats.oerrors  -= port->stats.oerrors;
1081 		port->stats.oerrors = 0;
1082 		stats.rx_nombuf -= port->stats.rx_nombuf;
1083 		port->stats.rx_nombuf = 0;
1084 		stats.fdirmatch -= port->stats.fdirmatch;
1085 		port->stats.rx_nombuf = 0;
1086 		stats.fdirmiss -= port->stats.fdirmiss;
1087 		port->stats.rx_nombuf = 0;
1088 
1089 		total_recv += stats.ipackets;
1090 		total_xmit += stats.opackets;
1091 		total_rx_dropped += stats.ierrors;
1092 		total_tx_dropped += port->tx_dropped;
1093 		total_rx_nombuf  += stats.rx_nombuf;
1094 
1095 		fwd_port_stats_display(pt_id, &stats);
1096 	}
1097 	printf("\n  %s Accumulated forward statistics for all ports"
1098 	       "%s\n",
1099 	       acc_stats_border, acc_stats_border);
1100 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1101 	       "%-"PRIu64"\n"
1102 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1103 	       "%-"PRIu64"\n",
1104 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1105 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1106 	if (total_rx_nombuf > 0)
1107 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1108 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1109 	       "%s\n",
1110 	       acc_stats_border, acc_stats_border);
1111 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1112 	if (total_recv > 0)
1113 		printf("\n  CPU cycles/packet=%u (total cycles="
1114 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1115 		       (unsigned int)(fwd_cycles / total_recv),
1116 		       fwd_cycles, total_recv);
1117 #endif
1118 	printf("\nDone.\n");
1119 	test_done = 1;
1120 }
1121 
1122 static int
1123 all_ports_started(void)
1124 {
1125 	portid_t pi;
1126 	struct rte_port *port;
1127 
1128 	for (pi = 0; pi < nb_ports; pi++) {
1129 		port = &ports[pi];
1130 		/* Check if there is a port which is not started */
1131 		if (port->port_status != RTE_PORT_STARTED)
1132 			return 0;
1133 	}
1134 
1135 	/* No port is not started */
1136 	return 1;
1137 }
1138 
1139 void
1140 start_port(portid_t pid)
1141 {
1142 	int diag, need_check_link_status = 0;
1143 	portid_t pi;
1144 	queueid_t qi;
1145 	struct rte_port *port;
1146 
1147 	if (test_done == 0) {
1148 		printf("Please stop forwarding first\n");
1149 		return;
1150 	}
1151 
1152 	if (init_fwd_streams() < 0) {
1153 		printf("Fail from init_fwd_streams()\n");
1154 		return;
1155 	}
1156 
1157 	if(dcb_config)
1158 		dcb_test = 1;
1159 	for (pi = 0; pi < nb_ports; pi++) {
1160 		if (pid < nb_ports && pid != pi)
1161 			continue;
1162 
1163 		port = &ports[pi];
1164 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1165 						 RTE_PORT_HANDLING) == 0) {
1166 			printf("Port %d is now not stopped\n", pi);
1167 			continue;
1168 		}
1169 
1170 		if (port->need_reconfig > 0) {
1171 			port->need_reconfig = 0;
1172 
1173 			printf("Configuring Port %d (socket %d)\n", pi,
1174 					rte_eth_dev_socket_id(pi));
1175 			/* configure port */
1176 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1177 						&(port->dev_conf));
1178 			if (diag != 0) {
1179 				if (rte_atomic16_cmpset(&(port->port_status),
1180 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1181 					printf("Port %d can not be set back "
1182 							"to stopped\n", pi);
1183 				printf("Fail to configure port %d\n", pi);
1184 				/* try to reconfigure port next time */
1185 				port->need_reconfig = 1;
1186 				return;
1187 			}
1188 		}
1189 		if (port->need_reconfig_queues > 0) {
1190 			port->need_reconfig_queues = 0;
1191 			/* setup tx queues */
1192 			for (qi = 0; qi < nb_txq; qi++) {
1193 				if ((numa_support) &&
1194 					(txring_numa[pi] != NUMA_NO_CONFIG))
1195 					diag = rte_eth_tx_queue_setup(pi, qi,
1196 						nb_txd,txring_numa[pi],
1197 						&(port->tx_conf));
1198 				else
1199 					diag = rte_eth_tx_queue_setup(pi, qi,
1200 						nb_txd,port->socket_id,
1201 						&(port->tx_conf));
1202 
1203 				if (diag == 0)
1204 					continue;
1205 
1206 				/* Fail to setup tx queue, return */
1207 				if (rte_atomic16_cmpset(&(port->port_status),
1208 							RTE_PORT_HANDLING,
1209 							RTE_PORT_STOPPED) == 0)
1210 					printf("Port %d can not be set back "
1211 							"to stopped\n", pi);
1212 				printf("Fail to configure port %d tx queues\n", pi);
1213 				/* try to reconfigure queues next time */
1214 				port->need_reconfig_queues = 1;
1215 				return;
1216 			}
1217 			/* setup rx queues */
1218 			for (qi = 0; qi < nb_rxq; qi++) {
1219 				if ((numa_support) &&
1220 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1221 					struct rte_mempool * mp =
1222 						mbuf_pool_find(rxring_numa[pi]);
1223 					if (mp == NULL) {
1224 						printf("Failed to setup RX queue:"
1225 							"No mempool allocation"
1226 							"on the socket %d\n",
1227 							rxring_numa[pi]);
1228 						return;
1229 					}
1230 
1231 					diag = rte_eth_rx_queue_setup(pi, qi,
1232 					     nb_rxd,rxring_numa[pi],
1233 					     &(port->rx_conf),mp);
1234 				}
1235 				else
1236 					diag = rte_eth_rx_queue_setup(pi, qi,
1237 					     nb_rxd,port->socket_id,
1238 					     &(port->rx_conf),
1239 				             mbuf_pool_find(port->socket_id));
1240 
1241 				if (diag == 0)
1242 					continue;
1243 
1244 
1245 				/* Fail to setup rx queue, return */
1246 				if (rte_atomic16_cmpset(&(port->port_status),
1247 							RTE_PORT_HANDLING,
1248 							RTE_PORT_STOPPED) == 0)
1249 					printf("Port %d can not be set back "
1250 							"to stopped\n", pi);
1251 				printf("Fail to configure port %d rx queues\n", pi);
1252 				/* try to reconfigure queues next time */
1253 				port->need_reconfig_queues = 1;
1254 				return;
1255 			}
1256 		}
1257 		/* start port */
1258 		if (rte_eth_dev_start(pi) < 0) {
1259 			printf("Fail to start port %d\n", pi);
1260 
1261 			/* Fail to setup rx queue, return */
1262 			if (rte_atomic16_cmpset(&(port->port_status),
1263 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1264 				printf("Port %d can not be set back to "
1265 							"stopped\n", pi);
1266 			continue;
1267 		}
1268 
1269 		if (rte_atomic16_cmpset(&(port->port_status),
1270 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1271 			printf("Port %d can not be set into started\n", pi);
1272 
1273 		/* at least one port started, need checking link status */
1274 		need_check_link_status = 1;
1275 	}
1276 
1277 	if (need_check_link_status)
1278 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1279 	else
1280 		printf("Please stop the ports first\n");
1281 
1282 	printf("Done\n");
1283 }
1284 
1285 void
1286 stop_port(portid_t pid)
1287 {
1288 	portid_t pi;
1289 	struct rte_port *port;
1290 	int need_check_link_status = 0;
1291 
1292 	if (test_done == 0) {
1293 		printf("Please stop forwarding first\n");
1294 		return;
1295 	}
1296 	if (dcb_test) {
1297 		dcb_test = 0;
1298 		dcb_config = 0;
1299 	}
1300 	printf("Stopping ports...\n");
1301 
1302 	for (pi = 0; pi < nb_ports; pi++) {
1303 		if (pid < nb_ports && pid != pi)
1304 			continue;
1305 
1306 		port = &ports[pi];
1307 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1308 						RTE_PORT_HANDLING) == 0)
1309 			continue;
1310 
1311 		rte_eth_dev_stop(pi);
1312 
1313 		if (rte_atomic16_cmpset(&(port->port_status),
1314 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1315 			printf("Port %d can not be set into stopped\n", pi);
1316 		need_check_link_status = 1;
1317 	}
1318 	if (need_check_link_status)
1319 		check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1320 
1321 	printf("Done\n");
1322 }
1323 
1324 void
1325 close_port(portid_t pid)
1326 {
1327 	portid_t pi;
1328 	struct rte_port *port;
1329 
1330 	if (test_done == 0) {
1331 		printf("Please stop forwarding first\n");
1332 		return;
1333 	}
1334 
1335 	printf("Closing ports...\n");
1336 
1337 	for (pi = 0; pi < nb_ports; pi++) {
1338 		if (pid < nb_ports && pid != pi)
1339 			continue;
1340 
1341 		port = &ports[pi];
1342 		if (rte_atomic16_cmpset(&(port->port_status),
1343 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1344 			printf("Port %d is now not stopped\n", pi);
1345 			continue;
1346 		}
1347 
1348 		rte_eth_dev_close(pi);
1349 
1350 		if (rte_atomic16_cmpset(&(port->port_status),
1351 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1352 			printf("Port %d can not be set into stopped\n", pi);
1353 	}
1354 
1355 	printf("Done\n");
1356 }
1357 
1358 int
1359 all_ports_stopped(void)
1360 {
1361 	portid_t pi;
1362 	struct rte_port *port;
1363 
1364 	for (pi = 0; pi < nb_ports; pi++) {
1365 		port = &ports[pi];
1366 		if (port->port_status != RTE_PORT_STOPPED)
1367 			return 0;
1368 	}
1369 
1370 	return 1;
1371 }
1372 
1373 void
1374 pmd_test_exit(void)
1375 {
1376 	portid_t pt_id;
1377 
1378 	for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1379 		printf("Stopping port %d...", pt_id);
1380 		fflush(stdout);
1381 		rte_eth_dev_close(pt_id);
1382 		printf("done\n");
1383 	}
1384 	printf("bye...\n");
1385 }
1386 
1387 typedef void (*cmd_func_t)(void);
1388 struct pmd_test_command {
1389 	const char *cmd_name;
1390 	cmd_func_t cmd_func;
1391 };
1392 
1393 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1394 
1395 /* Check the link status of all ports in up to 9s, and print them finally */
1396 static void
1397 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1398 {
1399 #define CHECK_INTERVAL 100 /* 100ms */
1400 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1401 	uint8_t portid, count, all_ports_up, print_flag = 0;
1402 	struct rte_eth_link link;
1403 
1404 	printf("Checking link statuses...\n");
1405 	fflush(stdout);
1406 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1407 		all_ports_up = 1;
1408 		for (portid = 0; portid < port_num; portid++) {
1409 			if ((port_mask & (1 << portid)) == 0)
1410 				continue;
1411 			memset(&link, 0, sizeof(link));
1412 			rte_eth_link_get_nowait(portid, &link);
1413 			/* print link status if flag set */
1414 			if (print_flag == 1) {
1415 				if (link.link_status)
1416 					printf("Port %d Link Up - speed %u "
1417 						"Mbps - %s\n", (uint8_t)portid,
1418 						(unsigned)link.link_speed,
1419 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1420 					("full-duplex") : ("half-duplex\n"));
1421 				else
1422 					printf("Port %d Link Down\n",
1423 						(uint8_t)portid);
1424 				continue;
1425 			}
1426 			/* clear all_ports_up flag if any link down */
1427 			if (link.link_status == 0) {
1428 				all_ports_up = 0;
1429 				break;
1430 			}
1431 		}
1432 		/* after finally printing all link status, get out */
1433 		if (print_flag == 1)
1434 			break;
1435 
1436 		if (all_ports_up == 0) {
1437 			fflush(stdout);
1438 			rte_delay_ms(CHECK_INTERVAL);
1439 		}
1440 
1441 		/* set the print_flag if all ports up or timeout */
1442 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1443 			print_flag = 1;
1444 		}
1445 	}
1446 }
1447 
1448 static int
1449 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1450 {
1451 	uint16_t i;
1452 	int diag;
1453 	uint8_t mapping_found = 0;
1454 
1455 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1456 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1457 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1458 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1459 					tx_queue_stats_mappings[i].queue_id,
1460 					tx_queue_stats_mappings[i].stats_counter_id);
1461 			if (diag != 0)
1462 				return diag;
1463 			mapping_found = 1;
1464 		}
1465 	}
1466 	if (mapping_found)
1467 		port->tx_queue_stats_mapping_enabled = 1;
1468 	return 0;
1469 }
1470 
1471 static int
1472 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1473 {
1474 	uint16_t i;
1475 	int diag;
1476 	uint8_t mapping_found = 0;
1477 
1478 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1479 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1480 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1481 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1482 					rx_queue_stats_mappings[i].queue_id,
1483 					rx_queue_stats_mappings[i].stats_counter_id);
1484 			if (diag != 0)
1485 				return diag;
1486 			mapping_found = 1;
1487 		}
1488 	}
1489 	if (mapping_found)
1490 		port->rx_queue_stats_mapping_enabled = 1;
1491 	return 0;
1492 }
1493 
1494 static void
1495 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1496 {
1497 	int diag = 0;
1498 
1499 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1500 	if (diag != 0) {
1501 		if (diag == -ENOTSUP) {
1502 			port->tx_queue_stats_mapping_enabled = 0;
1503 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1504 		}
1505 		else
1506 			rte_exit(EXIT_FAILURE,
1507 					"set_tx_queue_stats_mapping_registers "
1508 					"failed for port id=%d diag=%d\n",
1509 					pi, diag);
1510 	}
1511 
1512 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1513 	if (diag != 0) {
1514 		if (diag == -ENOTSUP) {
1515 			port->rx_queue_stats_mapping_enabled = 0;
1516 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1517 		}
1518 		else
1519 			rte_exit(EXIT_FAILURE,
1520 					"set_rx_queue_stats_mapping_registers "
1521 					"failed for port id=%d diag=%d\n",
1522 					pi, diag);
1523 	}
1524 }
1525 
1526 void
1527 init_port_config(void)
1528 {
1529 	portid_t pid;
1530 	struct rte_port *port;
1531 
1532 	for (pid = 0; pid < nb_ports; pid++) {
1533 		port = &ports[pid];
1534 		port->dev_conf.rxmode = rx_mode;
1535 		port->dev_conf.fdir_conf = fdir_conf;
1536 		if (nb_rxq > 0) {
1537 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1538 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1539 		} else {
1540 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1541 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1542 		}
1543 		port->rx_conf.rx_thresh = rx_thresh;
1544 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1545 		port->rx_conf.rx_drop_en = rx_drop_en;
1546 		port->tx_conf.tx_thresh = tx_thresh;
1547 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1548 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1549 		port->tx_conf.txq_flags = txq_flags;
1550 
1551 		rte_eth_macaddr_get(pid, &port->eth_addr);
1552 
1553 		map_port_queue_stats_mapping_registers(pid, port);
1554 #ifdef RTE_NIC_BYPASS
1555 		rte_eth_dev_bypass_init(pid);
1556 #endif
1557 	}
1558 }
1559 
1560 const uint16_t vlan_tags[] = {
1561 		0,  1,  2,  3,  4,  5,  6,  7,
1562 		8,  9, 10, 11,  12, 13, 14, 15,
1563 		16, 17, 18, 19, 20, 21, 22, 23,
1564 		24, 25, 26, 27, 28, 29, 30, 31
1565 };
1566 
1567 static  int
1568 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1569 {
1570         uint8_t i;
1571 
1572  	/*
1573  	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1574  	 * given above, and the number of traffic classes available for use.
1575  	 */
1576 	if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1577 		struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1578 		struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1579 
1580 		/* VMDQ+DCB RX and TX configrations */
1581 		vmdq_rx_conf.enable_default_pool = 0;
1582 		vmdq_rx_conf.default_pool = 0;
1583 		vmdq_rx_conf.nb_queue_pools =
1584 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1585 		vmdq_tx_conf.nb_queue_pools =
1586 			(dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1587 
1588 		vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1589 		for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1590 			vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1591 			vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1592 		}
1593 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1594 			vmdq_rx_conf.dcb_queue[i] = i;
1595 			vmdq_tx_conf.dcb_queue[i] = i;
1596 		}
1597 
1598 		/*set DCB mode of RX and TX of multiple queues*/
1599 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1600 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1601 		if (dcb_conf->pfc_en)
1602 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1603 		else
1604 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1605 
1606 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1607                                 sizeof(struct rte_eth_vmdq_dcb_conf)));
1608 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1609                                 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1610 	}
1611 	else {
1612 		struct rte_eth_dcb_rx_conf rx_conf;
1613 		struct rte_eth_dcb_tx_conf tx_conf;
1614 
1615 		/* queue mapping configuration of DCB RX and TX */
1616 		if (dcb_conf->num_tcs == ETH_4_TCS)
1617 			dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1618 		else
1619 			dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1620 
1621 		rx_conf.nb_tcs = dcb_conf->num_tcs;
1622 		tx_conf.nb_tcs = dcb_conf->num_tcs;
1623 
1624 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1625 			rx_conf.dcb_queue[i] = i;
1626 			tx_conf.dcb_queue[i] = i;
1627 		}
1628 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1629 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1630 		if (dcb_conf->pfc_en)
1631 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1632 		else
1633 			eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1634 
1635 		(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1636                                 sizeof(struct rte_eth_dcb_rx_conf)));
1637 		(void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1638                                 sizeof(struct rte_eth_dcb_tx_conf)));
1639 	}
1640 
1641 	return 0;
1642 }
1643 
1644 int
1645 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1646 {
1647 	struct rte_eth_conf port_conf;
1648 	struct rte_port *rte_port;
1649 	int retval;
1650 	uint16_t nb_vlan;
1651 	uint16_t i;
1652 
1653 	/* rxq and txq configuration in dcb mode */
1654 	nb_rxq = 128;
1655 	nb_txq = 128;
1656 	rx_free_thresh = 64;
1657 
1658 	memset(&port_conf,0,sizeof(struct rte_eth_conf));
1659 	/* Enter DCB configuration status */
1660 	dcb_config = 1;
1661 
1662 	nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1663 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1664 	retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1665 	if (retval < 0)
1666 		return retval;
1667 
1668 	rte_port = &ports[pid];
1669 	memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1670 
1671 	rte_port->rx_conf.rx_thresh = rx_thresh;
1672 	rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1673 	rte_port->tx_conf.tx_thresh = tx_thresh;
1674 	rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1675 	rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1676 	/* VLAN filter */
1677 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1678 	for (i = 0; i < nb_vlan; i++){
1679 		rx_vft_set(pid, vlan_tags[i], 1);
1680 	}
1681 
1682 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1683 	map_port_queue_stats_mapping_registers(pid, rte_port);
1684 
1685 	rte_port->dcb_flag = 1;
1686 
1687 	return 0;
1688 }
1689 
1690 #ifdef RTE_EXEC_ENV_BAREMETAL
1691 #define main _main
1692 #endif
1693 
1694 int
1695 main(int argc, char** argv)
1696 {
1697 	int  diag;
1698 	uint8_t port_id;
1699 
1700 	diag = rte_eal_init(argc, argv);
1701 	if (diag < 0)
1702 		rte_panic("Cannot init EAL\n");
1703 
1704 	if (rte_pmd_init_all())
1705 		rte_panic("Cannot init PMD\n");
1706 
1707 	if (rte_eal_pci_probe())
1708 		rte_panic("Cannot probe PCI\n");
1709 
1710 	nb_ports = (portid_t) rte_eth_dev_count();
1711 	if (nb_ports == 0)
1712 		rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1713 							"check that "
1714 			  "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1715 			  "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1716 			  "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1717 			  "configuration file\n");
1718 
1719 	set_def_fwd_config();
1720 	if (nb_lcores == 0)
1721 		rte_panic("Empty set of forwarding logical cores - check the "
1722 			  "core mask supplied in the command parameters\n");
1723 
1724 	argc -= diag;
1725 	argv += diag;
1726 	if (argc > 1)
1727 		launch_args_parse(argc, argv);
1728 
1729 	if (nb_rxq > nb_txq)
1730 		printf("Warning: nb_rxq=%d enables RSS configuration, "
1731 		       "but nb_txq=%d will prevent to fully test it.\n",
1732 		       nb_rxq, nb_txq);
1733 
1734 	init_config();
1735 	start_port(RTE_PORT_ALL);
1736 
1737 	/* set all ports to promiscuous mode by default */
1738 	for (port_id = 0; port_id < nb_ports; port_id++)
1739 		rte_eth_promiscuous_enable(port_id);
1740 
1741 	if (interactive == 1)
1742 		prompt();
1743 	else {
1744 		char c;
1745 		int rc;
1746 
1747 		printf("No commandline core given, start packet forwarding\n");
1748 		start_packet_forwarding(0);
1749 		printf("Press enter to exit\n");
1750 		rc = read(0, &c, 1);
1751 		if (rc < 0)
1752 			return 1;
1753 	}
1754 
1755 	return 0;
1756 }
1757