xref: /dpdk/examples/vmdq_dcb/main.c (revision ce6b8c31548b4d71a986d9807cd06cf3a616d1ab)
13998e2a0SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
23998e2a0SBruce Richardson  * Copyright(c) 2010-2014 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdint.h>
6af75078fSIntel #include <sys/queue.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <string.h>
9af75078fSIntel #include <stdio.h>
10af75078fSIntel #include <assert.h>
11af75078fSIntel #include <errno.h>
12af75078fSIntel #include <signal.h>
13af75078fSIntel #include <stdarg.h>
14af75078fSIntel #include <inttypes.h>
151d8d954bSIntel #include <getopt.h>
16af75078fSIntel 
17af75078fSIntel #include <rte_common.h>
18af75078fSIntel #include <rte_log.h>
19af75078fSIntel #include <rte_memory.h>
20af75078fSIntel #include <rte_memcpy.h>
21af75078fSIntel #include <rte_eal.h>
22af75078fSIntel #include <rte_launch.h>
23af75078fSIntel #include <rte_atomic.h>
24af75078fSIntel #include <rte_cycles.h>
25af75078fSIntel #include <rte_prefetch.h>
26af75078fSIntel #include <rte_lcore.h>
27af75078fSIntel #include <rte_per_lcore.h>
28af75078fSIntel #include <rte_branch_prediction.h>
29af75078fSIntel #include <rte_interrupts.h>
30af75078fSIntel #include <rte_random.h>
31af75078fSIntel #include <rte_debug.h>
32af75078fSIntel #include <rte_ether.h>
33af75078fSIntel #include <rte_ethdev.h>
34af75078fSIntel #include <rte_mempool.h>
35af75078fSIntel #include <rte_mbuf.h>
36af75078fSIntel 
37af75078fSIntel /* basic constants used in application */
388cc72f28SJingjing Wu #define MAX_QUEUES 1024
398cc72f28SJingjing Wu /*
408cc72f28SJingjing Wu  * 1024 queues require to meet the needs of a large number of vmdq_pools.
418cc72f28SJingjing Wu  * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
428cc72f28SJingjing Wu  */
438cc72f28SJingjing Wu #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \
448cc72f28SJingjing Wu 						RTE_TEST_TX_DESC_DEFAULT))
45af75078fSIntel #define MBUF_CACHE_SIZE 64
46af75078fSIntel 
478cc72f28SJingjing Wu #define MAX_PKT_BURST 32
488cc72f28SJingjing Wu 
498cc72f28SJingjing Wu /*
508cc72f28SJingjing Wu  * Configurable number of RX/TX ring descriptors
518cc72f28SJingjing Wu  */
52867a6c66SKevin Laatz #define RTE_TEST_RX_DESC_DEFAULT 1024
53867a6c66SKevin Laatz #define RTE_TEST_TX_DESC_DEFAULT 1024
548cc72f28SJingjing Wu 
55d4f37b09SIntel #define INVALID_PORT_ID 0xFF
56d4f37b09SIntel 
57d4f37b09SIntel /* mask of enabled ports */
588cc72f28SJingjing Wu static uint32_t enabled_port_mask;
5947523597SZhiyong Yang static uint16_t ports[RTE_MAX_ETHPORTS];
608cc72f28SJingjing Wu static unsigned num_ports;
61af75078fSIntel 
628cc72f28SJingjing Wu /* number of pools (if user does not specify any, 32 by default */
638cc72f28SJingjing Wu static enum rte_eth_nb_pools num_pools = ETH_32_POOLS;
648cc72f28SJingjing Wu static enum rte_eth_nb_tcs   num_tcs   = ETH_4_TCS;
658cc72f28SJingjing Wu static uint16_t num_queues, num_vmdq_queues;
668cc72f28SJingjing Wu static uint16_t vmdq_pool_base, vmdq_queue_base;
678cc72f28SJingjing Wu static uint8_t rss_enable;
681d8d954bSIntel 
69af75078fSIntel /* empty vmdq+dcb configuration structure. Filled in programatically */
70af75078fSIntel static const struct rte_eth_conf vmdq_dcb_conf_default = {
71af75078fSIntel 	.rxmode = {
7232e7aa0bSIntel 		.mq_mode        = ETH_MQ_RX_VMDQ_DCB,
73af75078fSIntel 		.split_hdr_size = 0,
74af75078fSIntel 	},
75af75078fSIntel 	.txmode = {
768cc72f28SJingjing Wu 		.mq_mode = ETH_MQ_TX_VMDQ_DCB,
77af75078fSIntel 	},
78af75078fSIntel 	/*
79af75078fSIntel 	 * should be overridden separately in code with
80af75078fSIntel 	 * appropriate values
81af75078fSIntel 	 */
828cc72f28SJingjing Wu 	.rx_adv_conf = {
83af75078fSIntel 		.vmdq_dcb_conf = {
848cc72f28SJingjing Wu 			.nb_queue_pools = ETH_32_POOLS,
85af75078fSIntel 			.enable_default_pool = 0,
86af75078fSIntel 			.default_pool = 0,
87af75078fSIntel 			.nb_pool_maps = 0,
88af75078fSIntel 			.pool_map = {{0, 0},},
89cb60ede6SJingjing Wu 			.dcb_tc = {0},
90af75078fSIntel 		},
918cc72f28SJingjing Wu 		.dcb_rx_conf = {
928cc72f28SJingjing Wu 				.nb_tcs = ETH_4_TCS,
938cc72f28SJingjing Wu 				/** Traffic class each UP mapped to. */
948cc72f28SJingjing Wu 				.dcb_tc = {0},
958cc72f28SJingjing Wu 		},
968cc72f28SJingjing Wu 		.vmdq_rx_conf = {
978cc72f28SJingjing Wu 			.nb_queue_pools = ETH_32_POOLS,
988cc72f28SJingjing Wu 			.enable_default_pool = 0,
998cc72f28SJingjing Wu 			.default_pool = 0,
1008cc72f28SJingjing Wu 			.nb_pool_maps = 0,
1018cc72f28SJingjing Wu 			.pool_map = {{0, 0},},
1028cc72f28SJingjing Wu 		},
1038cc72f28SJingjing Wu 	},
1048cc72f28SJingjing Wu 	.tx_adv_conf = {
1058cc72f28SJingjing Wu 		.vmdq_dcb_tx_conf = {
1068cc72f28SJingjing Wu 			.nb_queue_pools = ETH_32_POOLS,
1078cc72f28SJingjing Wu 			.dcb_tc = {0},
1088cc72f28SJingjing Wu 		},
109af75078fSIntel 	},
110af75078fSIntel };
111af75078fSIntel 
112af75078fSIntel /* array used for printing out statistics */
1138cc72f28SJingjing Wu volatile unsigned long rxPackets[MAX_QUEUES] = {0};
114af75078fSIntel 
115af75078fSIntel const uint16_t vlan_tags[] = {
116af75078fSIntel 	0,  1,  2,  3,  4,  5,  6,  7,
117af75078fSIntel 	8,  9, 10, 11,	12, 13, 14, 15,
118af75078fSIntel 	16, 17, 18, 19, 20, 21, 22, 23,
119af75078fSIntel 	24, 25, 26, 27, 28, 29, 30, 31
120af75078fSIntel };
121af75078fSIntel 
1228cc72f28SJingjing Wu const uint16_t num_vlans = RTE_DIM(vlan_tags);
1238cc72f28SJingjing Wu /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
1246d13ea8eSOlivier Matz static struct rte_ether_addr pool_addr_template = {
1258cc72f28SJingjing Wu 	.addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
1268cc72f28SJingjing Wu };
1278cc72f28SJingjing Wu 
1288cc72f28SJingjing Wu /* ethernet addresses of ports */
1296d13ea8eSOlivier Matz static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
1308cc72f28SJingjing Wu 
131af75078fSIntel /* Builds up the correct configuration for vmdq+dcb based on the vlan tags array
132af75078fSIntel  * given above, and the number of traffic classes available for use. */
133af75078fSIntel static inline int
1348cc72f28SJingjing Wu get_eth_conf(struct rte_eth_conf *eth_conf)
135af75078fSIntel {
136af75078fSIntel 	struct rte_eth_vmdq_dcb_conf conf;
1378cc72f28SJingjing Wu 	struct rte_eth_vmdq_rx_conf  vmdq_conf;
1388cc72f28SJingjing Wu 	struct rte_eth_dcb_rx_conf   dcb_conf;
1398cc72f28SJingjing Wu 	struct rte_eth_vmdq_dcb_tx_conf tx_conf;
1408cc72f28SJingjing Wu 	uint8_t i;
141af75078fSIntel 
1428cc72f28SJingjing Wu 	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
1438cc72f28SJingjing Wu 	vmdq_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
1448cc72f28SJingjing Wu 	tx_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
1458cc72f28SJingjing Wu 	conf.nb_pool_maps = num_pools;
1468cc72f28SJingjing Wu 	vmdq_conf.nb_pool_maps = num_pools;
147af75078fSIntel 	conf.enable_default_pool = 0;
1488cc72f28SJingjing Wu 	vmdq_conf.enable_default_pool = 0;
1491d8d954bSIntel 	conf.default_pool = 0; /* set explicit value, even if not used */
1508cc72f28SJingjing Wu 	vmdq_conf.default_pool = 0;
1518cc72f28SJingjing Wu 
152af75078fSIntel 	for (i = 0; i < conf.nb_pool_maps; i++) {
153af75078fSIntel 		conf.pool_map[i].vlan_id = vlan_tags[i];
1548cc72f28SJingjing Wu 		vmdq_conf.pool_map[i].vlan_id = vlan_tags[i];
1558cc72f28SJingjing Wu 		conf.pool_map[i].pools = 1UL << i;
1568cc72f28SJingjing Wu 		vmdq_conf.pool_map[i].pools = 1UL << i;
157af75078fSIntel 	}
158af75078fSIntel 	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1598cc72f28SJingjing Wu 		conf.dcb_tc[i] = i % num_tcs;
1608cc72f28SJingjing Wu 		dcb_conf.dcb_tc[i] = i % num_tcs;
1618cc72f28SJingjing Wu 		tx_conf.dcb_tc[i] = i % num_tcs;
162af75078fSIntel 	}
1638cc72f28SJingjing Wu 	dcb_conf.nb_tcs = (enum rte_eth_nb_tcs)num_tcs;
1641d8d954bSIntel 	(void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf)));
1651d8d954bSIntel 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
1668cc72f28SJingjing Wu 			  sizeof(conf)));
1678cc72f28SJingjing Wu 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &dcb_conf,
1688cc72f28SJingjing Wu 			  sizeof(dcb_conf)));
1698cc72f28SJingjing Wu 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &vmdq_conf,
1708cc72f28SJingjing Wu 			  sizeof(vmdq_conf)));
1718cc72f28SJingjing Wu 	(void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
1728cc72f28SJingjing Wu 			  sizeof(tx_conf)));
1738cc72f28SJingjing Wu 	if (rss_enable) {
1748cc72f28SJingjing Wu 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
1758cc72f28SJingjing Wu 		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
1768cc72f28SJingjing Wu 							ETH_RSS_UDP |
1778cc72f28SJingjing Wu 							ETH_RSS_TCP |
1788cc72f28SJingjing Wu 							ETH_RSS_SCTP;
1798cc72f28SJingjing Wu 	}
180af75078fSIntel 	return 0;
181af75078fSIntel }
182af75078fSIntel 
183af75078fSIntel /*
184af75078fSIntel  * Initialises a given port using global settings and with the rx buffers
185af75078fSIntel  * coming from the mbuf_pool passed as parameter
186af75078fSIntel  */
187af75078fSIntel static inline int
18847523597SZhiyong Yang port_init(uint16_t port, struct rte_mempool *mbuf_pool)
189af75078fSIntel {
1908cc72f28SJingjing Wu 	struct rte_eth_dev_info dev_info;
1918cc72f28SJingjing Wu 	struct rte_eth_conf port_conf = {0};
19260efb44fSRoman Zhukov 	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
19360efb44fSRoman Zhukov 	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
194af75078fSIntel 	int retval;
195af75078fSIntel 	uint16_t q;
1968cc72f28SJingjing Wu 	uint16_t queues_per_pool;
1978cc72f28SJingjing Wu 	uint32_t max_nb_pools;
198044c63ffSShahaf Shuler 	struct rte_eth_txconf txq_conf;
1994f5701f2SFerruh Yigit 	uint64_t rss_hf_tmp;
200af75078fSIntel 
2018cc72f28SJingjing Wu 	/*
2028cc72f28SJingjing Wu 	 * The max pool number from dev_info will be used to validate the pool
2038cc72f28SJingjing Wu 	 * number specified in cmd line
2048cc72f28SJingjing Wu 	 */
205089e5ed7SIvan Ilchenko 	retval = rte_eth_dev_info_get(port, &dev_info);
206089e5ed7SIvan Ilchenko 	if (retval != 0) {
207089e5ed7SIvan Ilchenko 		printf("Error during getting device (port %u) info: %s\n",
208089e5ed7SIvan Ilchenko 				port, strerror(-retval));
209089e5ed7SIvan Ilchenko 
210089e5ed7SIvan Ilchenko 		return retval;
211089e5ed7SIvan Ilchenko 	}
212089e5ed7SIvan Ilchenko 
2138cc72f28SJingjing Wu 	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
2148cc72f28SJingjing Wu 	/*
2158cc72f28SJingjing Wu 	 * We allow to process part of VMDQ pools specified by num_pools in
2168cc72f28SJingjing Wu 	 * command line.
2178cc72f28SJingjing Wu 	 */
2188cc72f28SJingjing Wu 	if (num_pools > max_nb_pools) {
2198cc72f28SJingjing Wu 		printf("num_pools %d >max_nb_pools %d\n",
2208cc72f28SJingjing Wu 			num_pools, max_nb_pools);
2218cc72f28SJingjing Wu 		return -1;
2228cc72f28SJingjing Wu 	}
2238cc72f28SJingjing Wu 
2248cc72f28SJingjing Wu 	/*
2258cc72f28SJingjing Wu 	 * NIC queues are divided into pf queues and vmdq queues.
2268cc72f28SJingjing Wu 	 * There is assumption here all ports have the same configuration!
2278cc72f28SJingjing Wu 	*/
2288cc72f28SJingjing Wu 	vmdq_queue_base = dev_info.vmdq_queue_base;
2298cc72f28SJingjing Wu 	vmdq_pool_base  = dev_info.vmdq_pool_base;
2308cc72f28SJingjing Wu 	printf("vmdq queue base: %d pool base %d\n",
2318cc72f28SJingjing Wu 		vmdq_queue_base, vmdq_pool_base);
2328cc72f28SJingjing Wu 	if (vmdq_pool_base == 0) {
2338cc72f28SJingjing Wu 		num_vmdq_queues = dev_info.max_rx_queues;
2348cc72f28SJingjing Wu 		num_queues = dev_info.max_rx_queues;
2358cc72f28SJingjing Wu 		if (num_tcs != num_vmdq_queues / num_pools) {
2368cc72f28SJingjing Wu 			printf("nb_tcs %d is invalid considering with"
2378cc72f28SJingjing Wu 				" nb_pools %d, nb_tcs * nb_pools should = %d\n",
2388cc72f28SJingjing Wu 				num_tcs, num_pools, num_vmdq_queues);
2398cc72f28SJingjing Wu 			return -1;
2408cc72f28SJingjing Wu 		}
2418cc72f28SJingjing Wu 	} else {
2428cc72f28SJingjing Wu 		queues_per_pool = dev_info.vmdq_queue_num /
2438cc72f28SJingjing Wu 				  dev_info.max_vmdq_pools;
2448cc72f28SJingjing Wu 		if (num_tcs > queues_per_pool) {
2458cc72f28SJingjing Wu 			printf("num_tcs %d > num of queues per pool %d\n",
2468cc72f28SJingjing Wu 				num_tcs, queues_per_pool);
2478cc72f28SJingjing Wu 			return -1;
2488cc72f28SJingjing Wu 		}
2498cc72f28SJingjing Wu 		num_vmdq_queues = num_pools * queues_per_pool;
2508cc72f28SJingjing Wu 		num_queues = vmdq_queue_base + num_vmdq_queues;
2518cc72f28SJingjing Wu 		printf("Configured vmdq pool num: %u,"
2528cc72f28SJingjing Wu 			" each vmdq pool has %u queues\n",
2538cc72f28SJingjing Wu 			num_pools, queues_per_pool);
2548cc72f28SJingjing Wu 	}
2558cc72f28SJingjing Wu 
256a9dbe180SThomas Monjalon 	if (!rte_eth_dev_is_valid_port(port))
2578cc72f28SJingjing Wu 		return -1;
2588cc72f28SJingjing Wu 
2598cc72f28SJingjing Wu 	retval = get_eth_conf(&port_conf);
2601d8d954bSIntel 	if (retval < 0)
2611d8d954bSIntel 		return retval;
262af75078fSIntel 
263089e5ed7SIvan Ilchenko 	retval = rte_eth_dev_info_get(port, &dev_info);
264089e5ed7SIvan Ilchenko 	if (retval != 0) {
265089e5ed7SIvan Ilchenko 		printf("Error during getting device (port %u) info: %s\n",
266089e5ed7SIvan Ilchenko 				port, strerror(-retval));
267089e5ed7SIvan Ilchenko 
268089e5ed7SIvan Ilchenko 		return retval;
269089e5ed7SIvan Ilchenko 	}
270089e5ed7SIvan Ilchenko 
271044c63ffSShahaf Shuler 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
272044c63ffSShahaf Shuler 		port_conf.txmode.offloads |=
273044c63ffSShahaf Shuler 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
2744f5701f2SFerruh Yigit 
2754f5701f2SFerruh Yigit 	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
2764f5701f2SFerruh Yigit 	port_conf.rx_adv_conf.rss_conf.rss_hf &=
2774f5701f2SFerruh Yigit 		dev_info.flow_type_rss_offloads;
2784f5701f2SFerruh Yigit 	if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
2794f5701f2SFerruh Yigit 		printf("Port %u modified RSS hash function based on hardware support,"
2804f5701f2SFerruh Yigit 			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
2814f5701f2SFerruh Yigit 			port,
2824f5701f2SFerruh Yigit 			rss_hf_tmp,
2834f5701f2SFerruh Yigit 			port_conf.rx_adv_conf.rss_conf.rss_hf);
2844f5701f2SFerruh Yigit 	}
2854f5701f2SFerruh Yigit 
2868cc72f28SJingjing Wu 	/*
2878cc72f28SJingjing Wu 	 * Though in this example, all queues including pf queues are setup.
2888cc72f28SJingjing Wu 	 * This is because VMDQ queues doesn't always start from zero, and the
2898cc72f28SJingjing Wu 	 * PMD layer doesn't support selectively initialising part of rx/tx
2908cc72f28SJingjing Wu 	 * queues.
2918cc72f28SJingjing Wu 	 */
2928cc72f28SJingjing Wu 	retval = rte_eth_dev_configure(port, num_queues, num_queues, &port_conf);
293af75078fSIntel 	if (retval != 0)
294af75078fSIntel 		return retval;
295af75078fSIntel 
29660efb44fSRoman Zhukov 	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
29760efb44fSRoman Zhukov 				&txRingSize);
29860efb44fSRoman Zhukov 	if (retval != 0)
29960efb44fSRoman Zhukov 		return retval;
30060efb44fSRoman Zhukov 	if (RTE_MAX(rxRingSize, txRingSize) >
30160efb44fSRoman Zhukov 	    RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT)) {
30260efb44fSRoman Zhukov 		printf("Mbuf pool has an insufficient size for port %u.\n",
30360efb44fSRoman Zhukov 			port);
30460efb44fSRoman Zhukov 		return -1;
30560efb44fSRoman Zhukov 	}
30660efb44fSRoman Zhukov 
3078cc72f28SJingjing Wu 	for (q = 0; q < num_queues; q++) {
308af75078fSIntel 		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
30981f7ecd9SPablo de Lara 					rte_eth_dev_socket_id(port),
31081f7ecd9SPablo de Lara 					NULL,
311af75078fSIntel 					mbuf_pool);
3128cc72f28SJingjing Wu 		if (retval < 0) {
3138cc72f28SJingjing Wu 			printf("initialize rx queue %d failed\n", q);
314af75078fSIntel 			return retval;
315af75078fSIntel 		}
3168cc72f28SJingjing Wu 	}
317af75078fSIntel 
318044c63ffSShahaf Shuler 	txq_conf = dev_info.default_txconf;
319044c63ffSShahaf Shuler 	txq_conf.offloads = port_conf.txmode.offloads;
3208cc72f28SJingjing Wu 	for (q = 0; q < num_queues; q++) {
321af75078fSIntel 		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
32281f7ecd9SPablo de Lara 					rte_eth_dev_socket_id(port),
323044c63ffSShahaf Shuler 					&txq_conf);
3248cc72f28SJingjing Wu 		if (retval < 0) {
3258cc72f28SJingjing Wu 			printf("initialize tx queue %d failed\n", q);
326af75078fSIntel 			return retval;
327af75078fSIntel 		}
3288cc72f28SJingjing Wu 	}
329af75078fSIntel 
330af75078fSIntel 	retval  = rte_eth_dev_start(port);
3318cc72f28SJingjing Wu 	if (retval < 0) {
3328cc72f28SJingjing Wu 		printf("port %d start failed\n", port);
333af75078fSIntel 		return retval;
3348cc72f28SJingjing Wu 	}
335af75078fSIntel 
33670febdcfSIgor Romanov 	retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
33770febdcfSIgor Romanov 	if (retval < 0) {
33870febdcfSIgor Romanov 		printf("port %d MAC address get failed: %s\n", port,
33970febdcfSIgor Romanov 		       rte_strerror(-retval));
34070febdcfSIgor Romanov 		return retval;
34170febdcfSIgor Romanov 	}
342967b6294SIntel 	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
343967b6294SIntel 			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
344967b6294SIntel 			(unsigned)port,
3458cc72f28SJingjing Wu 			vmdq_ports_eth_addr[port].addr_bytes[0],
3468cc72f28SJingjing Wu 			vmdq_ports_eth_addr[port].addr_bytes[1],
3478cc72f28SJingjing Wu 			vmdq_ports_eth_addr[port].addr_bytes[2],
3488cc72f28SJingjing Wu 			vmdq_ports_eth_addr[port].addr_bytes[3],
3498cc72f28SJingjing Wu 			vmdq_ports_eth_addr[port].addr_bytes[4],
3508cc72f28SJingjing Wu 			vmdq_ports_eth_addr[port].addr_bytes[5]);
3518cc72f28SJingjing Wu 
3528cc72f28SJingjing Wu 	/* Set mac for each pool.*/
3538cc72f28SJingjing Wu 	for (q = 0; q < num_pools; q++) {
3546d13ea8eSOlivier Matz 		struct rte_ether_addr mac;
3558cc72f28SJingjing Wu 
3568cc72f28SJingjing Wu 		mac = pool_addr_template;
3578cc72f28SJingjing Wu 		mac.addr_bytes[4] = port;
3588cc72f28SJingjing Wu 		mac.addr_bytes[5] = q;
3598cc72f28SJingjing Wu 		printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
3608cc72f28SJingjing Wu 			port, q,
3618cc72f28SJingjing Wu 			mac.addr_bytes[0], mac.addr_bytes[1],
3628cc72f28SJingjing Wu 			mac.addr_bytes[2], mac.addr_bytes[3],
3638cc72f28SJingjing Wu 			mac.addr_bytes[4], mac.addr_bytes[5]);
3648cc72f28SJingjing Wu 		retval = rte_eth_dev_mac_addr_add(port, &mac,
3658cc72f28SJingjing Wu 				q + vmdq_pool_base);
3668cc72f28SJingjing Wu 		if (retval) {
3678cc72f28SJingjing Wu 			printf("mac addr add failed at pool %d\n", q);
3688cc72f28SJingjing Wu 			return retval;
3698cc72f28SJingjing Wu 		}
3708cc72f28SJingjing Wu 	}
371967b6294SIntel 
372af75078fSIntel 	return 0;
373af75078fSIntel }
374af75078fSIntel 
3751d8d954bSIntel /* Check num_pools parameter and set it if OK*/
3761d8d954bSIntel static int
3771d8d954bSIntel vmdq_parse_num_pools(const char *q_arg)
3781d8d954bSIntel {
3791d8d954bSIntel 	char *end = NULL;
3801d8d954bSIntel 	int n;
3811d8d954bSIntel 
3821d8d954bSIntel 	/* parse number string */
3831d8d954bSIntel 	n = strtol(q_arg, &end, 10);
3841d8d954bSIntel 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
3851d8d954bSIntel 		return -1;
3861d8d954bSIntel 	if (n != 16 && n != 32)
3871d8d954bSIntel 		return -1;
3881d8d954bSIntel 	if (n == 16)
3891d8d954bSIntel 		num_pools = ETH_16_POOLS;
3901d8d954bSIntel 	else
3911d8d954bSIntel 		num_pools = ETH_32_POOLS;
3921d8d954bSIntel 
3931d8d954bSIntel 	return 0;
3941d8d954bSIntel }
3951d8d954bSIntel 
3968cc72f28SJingjing Wu /* Check num_tcs parameter and set it if OK*/
3978cc72f28SJingjing Wu static int
3988cc72f28SJingjing Wu vmdq_parse_num_tcs(const char *q_arg)
3998cc72f28SJingjing Wu {
4008cc72f28SJingjing Wu 	char *end = NULL;
4018cc72f28SJingjing Wu 	int n;
4028cc72f28SJingjing Wu 
4038cc72f28SJingjing Wu 	/* parse number string */
4048cc72f28SJingjing Wu 	n = strtol(q_arg, &end, 10);
4058cc72f28SJingjing Wu 	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
4068cc72f28SJingjing Wu 		return -1;
4078cc72f28SJingjing Wu 
4088cc72f28SJingjing Wu 	if (n != 4 && n != 8)
4098cc72f28SJingjing Wu 		return -1;
4108cc72f28SJingjing Wu 	if (n == 4)
4118cc72f28SJingjing Wu 		num_tcs = ETH_4_TCS;
4128cc72f28SJingjing Wu 	else
4138cc72f28SJingjing Wu 		num_tcs = ETH_8_TCS;
4148cc72f28SJingjing Wu 
4158cc72f28SJingjing Wu 	return 0;
4168cc72f28SJingjing Wu }
4178cc72f28SJingjing Wu 
418d4f37b09SIntel static int
419d4f37b09SIntel parse_portmask(const char *portmask)
420d4f37b09SIntel {
421d4f37b09SIntel 	char *end = NULL;
422d4f37b09SIntel 	unsigned long pm;
423d4f37b09SIntel 
424d4f37b09SIntel 	/* parse hexadecimal string */
425d4f37b09SIntel 	pm = strtoul(portmask, &end, 16);
426d4f37b09SIntel 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
427*ce6b8c31SSarosh Arif 		return 0;
428d4f37b09SIntel 
429d4f37b09SIntel 	return pm;
430d4f37b09SIntel }
431d4f37b09SIntel 
4321d8d954bSIntel /* Display usage */
4331d8d954bSIntel static void
4341d8d954bSIntel vmdq_usage(const char *prgname)
4351d8d954bSIntel {
436d4f37b09SIntel 	printf("%s [EAL options] -- -p PORTMASK]\n"
4378cc72f28SJingjing Wu 	"  --nb-pools NP: number of pools (32 default, 16)\n"
4388cc72f28SJingjing Wu 	"  --nb-tcs NP: number of TCs (4 default, 8)\n"
4398cc72f28SJingjing Wu 	"  --enable-rss: enable RSS (disabled by default)\n",
4401d8d954bSIntel 	       prgname);
4411d8d954bSIntel }
4421d8d954bSIntel 
4431d8d954bSIntel /*  Parse the argument (num_pools) given in the command line of the application */
4441d8d954bSIntel static int
4451d8d954bSIntel vmdq_parse_args(int argc, char **argv)
4461d8d954bSIntel {
4471d8d954bSIntel 	int opt;
4481d8d954bSIntel 	int option_index;
449d4f37b09SIntel 	unsigned i;
4501d8d954bSIntel 	const char *prgname = argv[0];
4511d8d954bSIntel 	static struct option long_option[] = {
4521d8d954bSIntel 		{"nb-pools", required_argument, NULL, 0},
4538cc72f28SJingjing Wu 		{"nb-tcs", required_argument, NULL, 0},
4548cc72f28SJingjing Wu 		{"enable-rss", 0, NULL, 0},
4551d8d954bSIntel 		{NULL, 0, 0, 0}
4561d8d954bSIntel 	};
4571d8d954bSIntel 
4581d8d954bSIntel 	/* Parse command line */
4598cc72f28SJingjing Wu 	while ((opt = getopt_long(argc, argv, "p:", long_option,
4608cc72f28SJingjing Wu 		&option_index)) != EOF) {
4611d8d954bSIntel 		switch (opt) {
462d4f37b09SIntel 		/* portmask */
463d4f37b09SIntel 		case 'p':
464d4f37b09SIntel 			enabled_port_mask = parse_portmask(optarg);
465d4f37b09SIntel 			if (enabled_port_mask == 0) {
466d4f37b09SIntel 				printf("invalid portmask\n");
467d4f37b09SIntel 				vmdq_usage(prgname);
468d4f37b09SIntel 				return -1;
469d4f37b09SIntel 			}
470d4f37b09SIntel 			break;
4711d8d954bSIntel 		case 0:
4728cc72f28SJingjing Wu 			if (!strcmp(long_option[option_index].name, "nb-pools")) {
4731d8d954bSIntel 				if (vmdq_parse_num_pools(optarg) == -1) {
4741d8d954bSIntel 					printf("invalid number of pools\n");
4751d8d954bSIntel 					return -1;
4761d8d954bSIntel 				}
4778cc72f28SJingjing Wu 			}
4788cc72f28SJingjing Wu 
4798cc72f28SJingjing Wu 			if (!strcmp(long_option[option_index].name, "nb-tcs")) {
4808cc72f28SJingjing Wu 				if (vmdq_parse_num_tcs(optarg) == -1) {
4818cc72f28SJingjing Wu 					printf("invalid number of tcs\n");
4828cc72f28SJingjing Wu 					return -1;
4838cc72f28SJingjing Wu 				}
4848cc72f28SJingjing Wu 			}
4858cc72f28SJingjing Wu 
4868cc72f28SJingjing Wu 			if (!strcmp(long_option[option_index].name, "enable-rss"))
4878cc72f28SJingjing Wu 				rss_enable = 1;
4881d8d954bSIntel 			break;
4898cc72f28SJingjing Wu 
4901d8d954bSIntel 		default:
4911d8d954bSIntel 			vmdq_usage(prgname);
4921d8d954bSIntel 			return -1;
4931d8d954bSIntel 		}
4941d8d954bSIntel 	}
495d4f37b09SIntel 
4968cc72f28SJingjing Wu 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
497d4f37b09SIntel 		if (enabled_port_mask & (1 << i))
498d4f37b09SIntel 			ports[num_ports++] = (uint8_t)i;
499d4f37b09SIntel 	}
500d4f37b09SIntel 
501d4f37b09SIntel 	if (num_ports < 2 || num_ports % 2) {
502d4f37b09SIntel 		printf("Current enabled port number is %u,"
503d4f37b09SIntel 			" but it should be even and at least 2\n", num_ports);
504d4f37b09SIntel 		return -1;
505d4f37b09SIntel 	}
506d4f37b09SIntel 
5071d8d954bSIntel 	return 0;
5081d8d954bSIntel }
5091d8d954bSIntel 
5108cc72f28SJingjing Wu static void
5118cc72f28SJingjing Wu update_mac_address(struct rte_mbuf *m, unsigned dst_port)
5128cc72f28SJingjing Wu {
5136d13ea8eSOlivier Matz 	struct rte_ether_hdr *eth;
5148cc72f28SJingjing Wu 	void *tmp;
5158cc72f28SJingjing Wu 
5166d13ea8eSOlivier Matz 	eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
5178cc72f28SJingjing Wu 
5188cc72f28SJingjing Wu 	/* 02:00:00:00:00:xx */
5198cc72f28SJingjing Wu 	tmp = &eth->d_addr.addr_bytes[0];
5208cc72f28SJingjing Wu 	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
5218cc72f28SJingjing Wu 
5228cc72f28SJingjing Wu 	/* src addr */
523538da7a1SOlivier Matz 	rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], &eth->s_addr);
5248cc72f28SJingjing Wu }
5251d8d954bSIntel 
526af75078fSIntel /* When we receive a HUP signal, print out our stats */
527af75078fSIntel static void
528af75078fSIntel sighup_handler(int signum)
529af75078fSIntel {
5308cc72f28SJingjing Wu 	unsigned q = vmdq_queue_base;
5318cc72f28SJingjing Wu 
5328cc72f28SJingjing Wu 	for (; q < num_queues; q++) {
5338cc72f28SJingjing Wu 		if (q % (num_vmdq_queues / num_pools) == 0)
5348cc72f28SJingjing Wu 			printf("\nPool %u: ", (q - vmdq_queue_base) /
5358cc72f28SJingjing Wu 					      (num_vmdq_queues / num_pools));
536af75078fSIntel 		printf("%lu ", rxPackets[q]);
537af75078fSIntel 	}
538af75078fSIntel 	printf("\nFinished handling signal %d\n", signum);
539af75078fSIntel }
540af75078fSIntel 
541af75078fSIntel /*
542af75078fSIntel  * Main thread that does the work, reading from INPUT_PORT
543af75078fSIntel  * and writing to OUTPUT_PORT
544af75078fSIntel  */
5458cc72f28SJingjing Wu static int
546af75078fSIntel lcore_main(void *arg)
547af75078fSIntel {
548af75078fSIntel 	const uintptr_t core_num = (uintptr_t)arg;
549af75078fSIntel 	const unsigned num_cores = rte_lcore_count();
5508cc72f28SJingjing Wu 	uint16_t startQueue, endQueue;
551d4f37b09SIntel 	uint16_t q, i, p;
5528cc72f28SJingjing Wu 	const uint16_t quot = (uint16_t)(num_vmdq_queues / num_cores);
5538cc72f28SJingjing Wu 	const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
554af75078fSIntel 
5558cc72f28SJingjing Wu 
5568cc72f28SJingjing Wu 	if (remainder) {
5578cc72f28SJingjing Wu 		if (core_num < remainder) {
5588cc72f28SJingjing Wu 			startQueue = (uint16_t)(core_num * (quot + 1));
5598cc72f28SJingjing Wu 			endQueue = (uint16_t)(startQueue + quot + 1);
5608cc72f28SJingjing Wu 		} else {
5618cc72f28SJingjing Wu 			startQueue = (uint16_t)(core_num * quot + remainder);
5628cc72f28SJingjing Wu 			endQueue = (uint16_t)(startQueue + quot);
5638cc72f28SJingjing Wu 		}
5648cc72f28SJingjing Wu 	} else {
5658cc72f28SJingjing Wu 		startQueue = (uint16_t)(core_num * quot);
5668cc72f28SJingjing Wu 		endQueue = (uint16_t)(startQueue + quot);
5678cc72f28SJingjing Wu 	}
5688cc72f28SJingjing Wu 
5698cc72f28SJingjing Wu 	/* vmdq queue idx doesn't always start from zero.*/
5708cc72f28SJingjing Wu 	startQueue += vmdq_queue_base;
5718cc72f28SJingjing Wu 	endQueue   += vmdq_queue_base;
572af75078fSIntel 	printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num,
573af75078fSIntel 	       rte_lcore_id(), startQueue, endQueue - 1);
574af75078fSIntel 
5758cc72f28SJingjing Wu 	if (startQueue == endQueue) {
5768cc72f28SJingjing Wu 		printf("lcore %u has nothing to do\n", (unsigned)core_num);
5778cc72f28SJingjing Wu 		return 0;
5788cc72f28SJingjing Wu 	}
5798cc72f28SJingjing Wu 
580af75078fSIntel 	for (;;) {
5818cc72f28SJingjing Wu 		struct rte_mbuf *buf[MAX_PKT_BURST];
5827efe28bdSPavan Nikhilesh 		const uint16_t buf_size = RTE_DIM(buf);
583d4f37b09SIntel 		for (p = 0; p < num_ports; p++) {
584d4f37b09SIntel 			const uint8_t src = ports[p];
585d4f37b09SIntel 			const uint8_t dst = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */
586d4f37b09SIntel 
587d4f37b09SIntel 			if ((src == INVALID_PORT_ID) || (dst == INVALID_PORT_ID))
588d4f37b09SIntel 				continue;
589af75078fSIntel 
590af75078fSIntel 			for (q = startQueue; q < endQueue; q++) {
591d4f37b09SIntel 				const uint16_t rxCount = rte_eth_rx_burst(src,
592af75078fSIntel 					q, buf, buf_size);
5938cc72f28SJingjing Wu 
5948cc72f28SJingjing Wu 				if (unlikely(rxCount == 0))
595af75078fSIntel 					continue;
5968cc72f28SJingjing Wu 
597af75078fSIntel 				rxPackets[q] += rxCount;
598af75078fSIntel 
5998cc72f28SJingjing Wu 				for (i = 0; i < rxCount; i++)
6008cc72f28SJingjing Wu 					update_mac_address(buf[i], dst);
6018cc72f28SJingjing Wu 
602d4f37b09SIntel 				const uint16_t txCount = rte_eth_tx_burst(dst,
6038cc72f28SJingjing Wu 					q, buf, rxCount);
604af75078fSIntel 				if (txCount != rxCount) {
605af75078fSIntel 					for (i = txCount; i < rxCount; i++)
606af75078fSIntel 						rte_pktmbuf_free(buf[i]);
607af75078fSIntel 				}
608af75078fSIntel 			}
609af75078fSIntel 		}
610af75078fSIntel 	}
611d4f37b09SIntel }
612d4f37b09SIntel 
613d4f37b09SIntel /*
614d4f37b09SIntel  * Update the global var NUM_PORTS and array PORTS according to system ports number
615d4f37b09SIntel  * and return valid ports number
616d4f37b09SIntel  */
617d4f37b09SIntel static unsigned check_ports_num(unsigned nb_ports)
618d4f37b09SIntel {
619d4f37b09SIntel 	unsigned valid_num_ports = num_ports;
620d4f37b09SIntel 	unsigned portid;
621d4f37b09SIntel 
622d4f37b09SIntel 	if (num_ports > nb_ports) {
623d4f37b09SIntel 		printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
624d4f37b09SIntel 			num_ports, nb_ports);
625d4f37b09SIntel 		num_ports = nb_ports;
626d4f37b09SIntel 	}
627d4f37b09SIntel 
628d4f37b09SIntel 	for (portid = 0; portid < num_ports; portid++) {
629a9dbe180SThomas Monjalon 		if (!rte_eth_dev_is_valid_port(ports[portid])) {
630a9dbe180SThomas Monjalon 			printf("\nSpecified port ID(%u) is not valid\n",
631a9dbe180SThomas Monjalon 				ports[portid]);
632d4f37b09SIntel 			ports[portid] = INVALID_PORT_ID;
633d4f37b09SIntel 			valid_num_ports--;
634d4f37b09SIntel 		}
635d4f37b09SIntel 	}
636d4f37b09SIntel 	return valid_num_ports;
637d4f37b09SIntel }
638d4f37b09SIntel 
639af75078fSIntel 
640af75078fSIntel /* Main function, does initialisation and calls the per-lcore functions */
641af75078fSIntel int
64298a16481SDavid Marchand main(int argc, char *argv[])
643af75078fSIntel {
644af75078fSIntel 	unsigned cores;
645af75078fSIntel 	struct rte_mempool *mbuf_pool;
646af75078fSIntel 	unsigned lcore_id;
647af75078fSIntel 	uintptr_t i;
6481d8d954bSIntel 	int ret;
649d4f37b09SIntel 	unsigned nb_ports, valid_num_ports;
65047523597SZhiyong Yang 	uint16_t portid;
651af75078fSIntel 
652af75078fSIntel 	signal(SIGHUP, sighup_handler);
653af75078fSIntel 
6541d8d954bSIntel 	/* init EAL */
6551d8d954bSIntel 	ret = rte_eal_init(argc, argv);
6561d8d954bSIntel 	if (ret < 0)
657af75078fSIntel 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
6581d8d954bSIntel 	argc -= ret;
6591d8d954bSIntel 	argv += ret;
6601d8d954bSIntel 
6611d8d954bSIntel 	/* parse app arguments */
6621d8d954bSIntel 	ret = vmdq_parse_args(argc, argv);
6631d8d954bSIntel 	if (ret < 0)
6641d8d954bSIntel 		rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
6651d8d954bSIntel 
666af75078fSIntel 	cores = rte_lcore_count();
6678cc72f28SJingjing Wu 	if ((cores & (cores - 1)) != 0 || cores > RTE_MAX_LCORE) {
668d4f37b09SIntel 		rte_exit(EXIT_FAILURE,"This program can only run on an even"
6698cc72f28SJingjing Wu 				" number of cores(1-%d)\n\n", RTE_MAX_LCORE);
670af75078fSIntel 	}
671af75078fSIntel 
672d9a42a69SThomas Monjalon 	nb_ports = rte_eth_dev_count_avail();
673d4f37b09SIntel 
674d4f37b09SIntel 	/*
675d4f37b09SIntel 	 * Update the global var NUM_PORTS and global array PORTS
676d4f37b09SIntel 	 * and get value of var VALID_NUM_PORTS according to system ports number
677d4f37b09SIntel 	 */
678d4f37b09SIntel 	valid_num_ports = check_ports_num(nb_ports);
679d4f37b09SIntel 
680d4f37b09SIntel 	if (valid_num_ports < 2 || valid_num_ports % 2) {
681d4f37b09SIntel 		printf("Current valid ports number is %u\n", valid_num_ports);
682d4f37b09SIntel 		rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
683d4f37b09SIntel 	}
684d4f37b09SIntel 
6858cc72f28SJingjing Wu 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
6868cc72f28SJingjing Wu 		NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
6878cc72f28SJingjing Wu 		0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
688af75078fSIntel 	if (mbuf_pool == NULL)
689af75078fSIntel 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
690af75078fSIntel 
691d4f37b09SIntel 	/* initialize all ports */
6928728ccf3SThomas Monjalon 	RTE_ETH_FOREACH_DEV(portid) {
693d4f37b09SIntel 		/* skip ports that are not enabled */
694d4f37b09SIntel 		if ((enabled_port_mask & (1 << portid)) == 0) {
695d4f37b09SIntel 			printf("\nSkipping disabled port %d\n", portid);
696d4f37b09SIntel 			continue;
697d4f37b09SIntel 		}
698d4f37b09SIntel 		if (port_init(portid, mbuf_pool) != 0)
699af75078fSIntel 			rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
700d4f37b09SIntel 	}
701af75078fSIntel 
702af75078fSIntel 	/* call lcore_main() on every slave lcore */
703af75078fSIntel 	i = 0;
704af75078fSIntel 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
705af75078fSIntel 		rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id);
706af75078fSIntel 	}
707af75078fSIntel 	/* call on master too */
708af75078fSIntel 	(void) lcore_main((void*)i);
709af75078fSIntel 
710af75078fSIntel 	return 0;
711af75078fSIntel }
712