13998e2a0SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
23998e2a0SBruce Richardson * Copyright(c) 2010-2014 Intel Corporation
3af75078fSIntel */
4af75078fSIntel
5af75078fSIntel #include <stdint.h>
6af75078fSIntel #include <sys/queue.h>
7af75078fSIntel #include <stdlib.h>
8af75078fSIntel #include <string.h>
9af75078fSIntel #include <stdio.h>
10af75078fSIntel #include <assert.h>
11af75078fSIntel #include <errno.h>
12af75078fSIntel #include <signal.h>
13af75078fSIntel #include <stdarg.h>
14af75078fSIntel #include <inttypes.h>
151d8d954bSIntel #include <getopt.h>
16af75078fSIntel
17af75078fSIntel #include <rte_common.h>
18af75078fSIntel #include <rte_log.h>
19af75078fSIntel #include <rte_memory.h>
20af75078fSIntel #include <rte_memcpy.h>
21af75078fSIntel #include <rte_eal.h>
22af75078fSIntel #include <rte_launch.h>
23af75078fSIntel #include <rte_cycles.h>
24af75078fSIntel #include <rte_prefetch.h>
25af75078fSIntel #include <rte_lcore.h>
26af75078fSIntel #include <rte_per_lcore.h>
27af75078fSIntel #include <rte_branch_prediction.h>
28af75078fSIntel #include <rte_interrupts.h>
29af75078fSIntel #include <rte_random.h>
30af75078fSIntel #include <rte_debug.h>
31af75078fSIntel #include <rte_ether.h>
32af75078fSIntel #include <rte_ethdev.h>
33af75078fSIntel #include <rte_mempool.h>
34af75078fSIntel #include <rte_mbuf.h>
35af75078fSIntel
36af75078fSIntel /* basic constants used in application */
378cc72f28SJingjing Wu #define MAX_QUEUES 1024
388cc72f28SJingjing Wu /*
398cc72f28SJingjing Wu * 1024 queues require to meet the needs of a large number of vmdq_pools.
408cc72f28SJingjing Wu * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
418cc72f28SJingjing Wu */
42*4ed89049SDavid Marchand #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RX_DESC_DEFAULT, \
43*4ed89049SDavid Marchand TX_DESC_DEFAULT))
44af75078fSIntel #define MBUF_CACHE_SIZE 64
45af75078fSIntel
468cc72f28SJingjing Wu #define MAX_PKT_BURST 32
478cc72f28SJingjing Wu
488cc72f28SJingjing Wu /*
498cc72f28SJingjing Wu * Configurable number of RX/TX ring descriptors
508cc72f28SJingjing Wu */
51*4ed89049SDavid Marchand #define RX_DESC_DEFAULT 1024
52*4ed89049SDavid Marchand #define TX_DESC_DEFAULT 1024
538cc72f28SJingjing Wu
54d4f37b09SIntel #define INVALID_PORT_ID 0xFF
55d4f37b09SIntel
56d4f37b09SIntel /* mask of enabled ports */
578cc72f28SJingjing Wu static uint32_t enabled_port_mask;
5847523597SZhiyong Yang static uint16_t ports[RTE_MAX_ETHPORTS];
598cc72f28SJingjing Wu static unsigned num_ports;
60af75078fSIntel
618cc72f28SJingjing Wu /* number of pools (if user does not specify any, 32 by default */
62295968d1SFerruh Yigit static enum rte_eth_nb_pools num_pools = RTE_ETH_32_POOLS;
63295968d1SFerruh Yigit static enum rte_eth_nb_tcs num_tcs = RTE_ETH_4_TCS;
648cc72f28SJingjing Wu static uint16_t num_queues, num_vmdq_queues;
658cc72f28SJingjing Wu static uint16_t vmdq_pool_base, vmdq_queue_base;
668cc72f28SJingjing Wu static uint8_t rss_enable;
671d8d954bSIntel
689a212dc0SConor Fogarty /* Empty vmdq+dcb configuration structure. Filled in programmatically. 8< */
69af75078fSIntel static const struct rte_eth_conf vmdq_dcb_conf_default = {
70af75078fSIntel .rxmode = {
71295968d1SFerruh Yigit .mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB,
72af75078fSIntel },
73af75078fSIntel .txmode = {
74295968d1SFerruh Yigit .mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB,
75af75078fSIntel },
76af75078fSIntel /*
77af75078fSIntel * should be overridden separately in code with
78af75078fSIntel * appropriate values
79af75078fSIntel */
808cc72f28SJingjing Wu .rx_adv_conf = {
81af75078fSIntel .vmdq_dcb_conf = {
82295968d1SFerruh Yigit .nb_queue_pools = RTE_ETH_32_POOLS,
83af75078fSIntel .enable_default_pool = 0,
84af75078fSIntel .default_pool = 0,
85af75078fSIntel .nb_pool_maps = 0,
86af75078fSIntel .pool_map = {{0, 0},},
87cb60ede6SJingjing Wu .dcb_tc = {0},
88af75078fSIntel },
898cc72f28SJingjing Wu .dcb_rx_conf = {
90295968d1SFerruh Yigit .nb_tcs = RTE_ETH_4_TCS,
918cc72f28SJingjing Wu /** Traffic class each UP mapped to. */
928cc72f28SJingjing Wu .dcb_tc = {0},
938cc72f28SJingjing Wu },
948cc72f28SJingjing Wu .vmdq_rx_conf = {
95295968d1SFerruh Yigit .nb_queue_pools = RTE_ETH_32_POOLS,
968cc72f28SJingjing Wu .enable_default_pool = 0,
978cc72f28SJingjing Wu .default_pool = 0,
988cc72f28SJingjing Wu .nb_pool_maps = 0,
998cc72f28SJingjing Wu .pool_map = {{0, 0},},
1008cc72f28SJingjing Wu },
1018cc72f28SJingjing Wu },
1028cc72f28SJingjing Wu .tx_adv_conf = {
1038cc72f28SJingjing Wu .vmdq_dcb_tx_conf = {
104295968d1SFerruh Yigit .nb_queue_pools = RTE_ETH_32_POOLS,
1058cc72f28SJingjing Wu .dcb_tc = {0},
1068cc72f28SJingjing Wu },
107af75078fSIntel },
108af75078fSIntel };
1099a212dc0SConor Fogarty /* >8 End of empty vmdq+dcb configuration structure. */
110af75078fSIntel
111af75078fSIntel /* array used for printing out statistics */
1128cc72f28SJingjing Wu volatile unsigned long rxPackets[MAX_QUEUES] = {0};
113af75078fSIntel
1149a212dc0SConor Fogarty /* Dividing up the possible user priority values. 8< */
115af75078fSIntel const uint16_t vlan_tags[] = {
116af75078fSIntel 0, 1, 2, 3, 4, 5, 6, 7,
117af75078fSIntel 8, 9, 10, 11, 12, 13, 14, 15,
118af75078fSIntel 16, 17, 18, 19, 20, 21, 22, 23,
119af75078fSIntel 24, 25, 26, 27, 28, 29, 30, 31
120af75078fSIntel };
121af75078fSIntel
1228cc72f28SJingjing Wu const uint16_t num_vlans = RTE_DIM(vlan_tags);
1238cc72f28SJingjing Wu /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
1246d13ea8eSOlivier Matz static struct rte_ether_addr pool_addr_template = {
1258cc72f28SJingjing Wu .addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
1268cc72f28SJingjing Wu };
1278cc72f28SJingjing Wu
1288cc72f28SJingjing Wu /* ethernet addresses of ports */
1296d13ea8eSOlivier Matz static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
1308cc72f28SJingjing Wu
131af75078fSIntel /* Builds up the correct configuration for vmdq+dcb based on the vlan tags array
132af75078fSIntel * given above, and the number of traffic classes available for use. */
133af75078fSIntel static inline int
get_eth_conf(struct rte_eth_conf * eth_conf)1348cc72f28SJingjing Wu get_eth_conf(struct rte_eth_conf *eth_conf)
135af75078fSIntel {
136af75078fSIntel struct rte_eth_vmdq_dcb_conf conf;
1378cc72f28SJingjing Wu struct rte_eth_vmdq_rx_conf vmdq_conf;
1388cc72f28SJingjing Wu struct rte_eth_dcb_rx_conf dcb_conf;
1398cc72f28SJingjing Wu struct rte_eth_vmdq_dcb_tx_conf tx_conf;
1408cc72f28SJingjing Wu uint8_t i;
141af75078fSIntel
1428cc72f28SJingjing Wu conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
1438cc72f28SJingjing Wu vmdq_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
1448cc72f28SJingjing Wu tx_conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
1458cc72f28SJingjing Wu conf.nb_pool_maps = num_pools;
1468cc72f28SJingjing Wu vmdq_conf.nb_pool_maps = num_pools;
147af75078fSIntel conf.enable_default_pool = 0;
1488cc72f28SJingjing Wu vmdq_conf.enable_default_pool = 0;
1491d8d954bSIntel conf.default_pool = 0; /* set explicit value, even if not used */
1508cc72f28SJingjing Wu vmdq_conf.default_pool = 0;
1518cc72f28SJingjing Wu
152af75078fSIntel for (i = 0; i < conf.nb_pool_maps; i++) {
153af75078fSIntel conf.pool_map[i].vlan_id = vlan_tags[i];
1548cc72f28SJingjing Wu vmdq_conf.pool_map[i].vlan_id = vlan_tags[i];
1558cc72f28SJingjing Wu conf.pool_map[i].pools = 1UL << i;
1568cc72f28SJingjing Wu vmdq_conf.pool_map[i].pools = 1UL << i;
157af75078fSIntel }
158295968d1SFerruh Yigit for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
1598cc72f28SJingjing Wu conf.dcb_tc[i] = i % num_tcs;
1608cc72f28SJingjing Wu dcb_conf.dcb_tc[i] = i % num_tcs;
1618cc72f28SJingjing Wu tx_conf.dcb_tc[i] = i % num_tcs;
162af75078fSIntel }
1638cc72f28SJingjing Wu dcb_conf.nb_tcs = (enum rte_eth_nb_tcs)num_tcs;
1641d8d954bSIntel (void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf)));
1651d8d954bSIntel (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
1668cc72f28SJingjing Wu sizeof(conf)));
1678cc72f28SJingjing Wu (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &dcb_conf,
1688cc72f28SJingjing Wu sizeof(dcb_conf)));
1698cc72f28SJingjing Wu (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &vmdq_conf,
1708cc72f28SJingjing Wu sizeof(vmdq_conf)));
1718cc72f28SJingjing Wu (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
1728cc72f28SJingjing Wu sizeof(tx_conf)));
1738cc72f28SJingjing Wu if (rss_enable) {
174295968d1SFerruh Yigit eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
175295968d1SFerruh Yigit eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
176295968d1SFerruh Yigit RTE_ETH_RSS_UDP |
177295968d1SFerruh Yigit RTE_ETH_RSS_TCP |
178295968d1SFerruh Yigit RTE_ETH_RSS_SCTP;
1798cc72f28SJingjing Wu }
180af75078fSIntel return 0;
181af75078fSIntel }
1829a212dc0SConor Fogarty /* >8 End of dividing up the possible user priority values. */
183af75078fSIntel
184af75078fSIntel /*
185af75078fSIntel * Initialises a given port using global settings and with the rx buffers
186af75078fSIntel * coming from the mbuf_pool passed as parameter
187af75078fSIntel */
188af75078fSIntel static inline int
port_init(uint16_t port,struct rte_mempool * mbuf_pool)18947523597SZhiyong Yang port_init(uint16_t port, struct rte_mempool *mbuf_pool)
190af75078fSIntel {
1918cc72f28SJingjing Wu struct rte_eth_dev_info dev_info;
1928cc72f28SJingjing Wu struct rte_eth_conf port_conf = {0};
193*4ed89049SDavid Marchand uint16_t rxRingSize = RX_DESC_DEFAULT;
194*4ed89049SDavid Marchand uint16_t txRingSize = TX_DESC_DEFAULT;
195af75078fSIntel int retval;
196af75078fSIntel uint16_t q;
1978cc72f28SJingjing Wu uint16_t queues_per_pool;
1988cc72f28SJingjing Wu uint32_t max_nb_pools;
199044c63ffSShahaf Shuler struct rte_eth_txconf txq_conf;
2004f5701f2SFerruh Yigit uint64_t rss_hf_tmp;
201af75078fSIntel
2028cc72f28SJingjing Wu /*
2038cc72f28SJingjing Wu * The max pool number from dev_info will be used to validate the pool
2048cc72f28SJingjing Wu * number specified in cmd line
2058cc72f28SJingjing Wu */
206089e5ed7SIvan Ilchenko retval = rte_eth_dev_info_get(port, &dev_info);
207089e5ed7SIvan Ilchenko if (retval != 0) {
208089e5ed7SIvan Ilchenko printf("Error during getting device (port %u) info: %s\n",
209089e5ed7SIvan Ilchenko port, strerror(-retval));
210089e5ed7SIvan Ilchenko
211089e5ed7SIvan Ilchenko return retval;
212089e5ed7SIvan Ilchenko }
213089e5ed7SIvan Ilchenko
2148cc72f28SJingjing Wu max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
2158cc72f28SJingjing Wu /*
2168cc72f28SJingjing Wu * We allow to process part of VMDQ pools specified by num_pools in
2178cc72f28SJingjing Wu * command line.
2188cc72f28SJingjing Wu */
2198cc72f28SJingjing Wu if (num_pools > max_nb_pools) {
2208cc72f28SJingjing Wu printf("num_pools %d >max_nb_pools %d\n",
2218cc72f28SJingjing Wu num_pools, max_nb_pools);
2228cc72f28SJingjing Wu return -1;
2238cc72f28SJingjing Wu }
2248cc72f28SJingjing Wu
2258cc72f28SJingjing Wu /*
2268cc72f28SJingjing Wu * NIC queues are divided into pf queues and vmdq queues.
2278cc72f28SJingjing Wu * There is assumption here all ports have the same configuration!
2288cc72f28SJingjing Wu */
2298cc72f28SJingjing Wu vmdq_queue_base = dev_info.vmdq_queue_base;
2308cc72f28SJingjing Wu vmdq_pool_base = dev_info.vmdq_pool_base;
2318cc72f28SJingjing Wu printf("vmdq queue base: %d pool base %d\n",
2328cc72f28SJingjing Wu vmdq_queue_base, vmdq_pool_base);
2338cc72f28SJingjing Wu if (vmdq_pool_base == 0) {
2348cc72f28SJingjing Wu num_vmdq_queues = dev_info.max_rx_queues;
2358cc72f28SJingjing Wu num_queues = dev_info.max_rx_queues;
2368cc72f28SJingjing Wu if (num_tcs != num_vmdq_queues / num_pools) {
2378cc72f28SJingjing Wu printf("nb_tcs %d is invalid considering with"
2388cc72f28SJingjing Wu " nb_pools %d, nb_tcs * nb_pools should = %d\n",
2398cc72f28SJingjing Wu num_tcs, num_pools, num_vmdq_queues);
2408cc72f28SJingjing Wu return -1;
2418cc72f28SJingjing Wu }
2428cc72f28SJingjing Wu } else {
2438cc72f28SJingjing Wu queues_per_pool = dev_info.vmdq_queue_num /
2448cc72f28SJingjing Wu dev_info.max_vmdq_pools;
2458cc72f28SJingjing Wu if (num_tcs > queues_per_pool) {
2468cc72f28SJingjing Wu printf("num_tcs %d > num of queues per pool %d\n",
2478cc72f28SJingjing Wu num_tcs, queues_per_pool);
2488cc72f28SJingjing Wu return -1;
2498cc72f28SJingjing Wu }
2508cc72f28SJingjing Wu num_vmdq_queues = num_pools * queues_per_pool;
2518cc72f28SJingjing Wu num_queues = vmdq_queue_base + num_vmdq_queues;
2528cc72f28SJingjing Wu printf("Configured vmdq pool num: %u,"
2538cc72f28SJingjing Wu " each vmdq pool has %u queues\n",
2548cc72f28SJingjing Wu num_pools, queues_per_pool);
2558cc72f28SJingjing Wu }
2568cc72f28SJingjing Wu
257a9dbe180SThomas Monjalon if (!rte_eth_dev_is_valid_port(port))
2588cc72f28SJingjing Wu return -1;
2598cc72f28SJingjing Wu
2608cc72f28SJingjing Wu retval = get_eth_conf(&port_conf);
2611d8d954bSIntel if (retval < 0)
2621d8d954bSIntel return retval;
263af75078fSIntel
264089e5ed7SIvan Ilchenko retval = rte_eth_dev_info_get(port, &dev_info);
265089e5ed7SIvan Ilchenko if (retval != 0) {
266089e5ed7SIvan Ilchenko printf("Error during getting device (port %u) info: %s\n",
267089e5ed7SIvan Ilchenko port, strerror(-retval));
268089e5ed7SIvan Ilchenko
269089e5ed7SIvan Ilchenko return retval;
270089e5ed7SIvan Ilchenko }
271089e5ed7SIvan Ilchenko
272295968d1SFerruh Yigit if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
273044c63ffSShahaf Shuler port_conf.txmode.offloads |=
274295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
2754f5701f2SFerruh Yigit
2764f5701f2SFerruh Yigit rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
2774f5701f2SFerruh Yigit port_conf.rx_adv_conf.rss_conf.rss_hf &=
2784f5701f2SFerruh Yigit dev_info.flow_type_rss_offloads;
2794f5701f2SFerruh Yigit if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
2804f5701f2SFerruh Yigit printf("Port %u modified RSS hash function based on hardware support,"
2814f5701f2SFerruh Yigit "requested:%#"PRIx64" configured:%#"PRIx64"\n",
2824f5701f2SFerruh Yigit port,
2834f5701f2SFerruh Yigit rss_hf_tmp,
2844f5701f2SFerruh Yigit port_conf.rx_adv_conf.rss_conf.rss_hf);
2854f5701f2SFerruh Yigit }
2864f5701f2SFerruh Yigit
2878cc72f28SJingjing Wu /*
2888cc72f28SJingjing Wu * Though in this example, all queues including pf queues are setup.
2898cc72f28SJingjing Wu * This is because VMDQ queues doesn't always start from zero, and the
2908cc72f28SJingjing Wu * PMD layer doesn't support selectively initialising part of rx/tx
2918cc72f28SJingjing Wu * queues.
2928cc72f28SJingjing Wu */
2938cc72f28SJingjing Wu retval = rte_eth_dev_configure(port, num_queues, num_queues, &port_conf);
294af75078fSIntel if (retval != 0)
295af75078fSIntel return retval;
296af75078fSIntel
29760efb44fSRoman Zhukov retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
29860efb44fSRoman Zhukov &txRingSize);
29960efb44fSRoman Zhukov if (retval != 0)
30060efb44fSRoman Zhukov return retval;
30160efb44fSRoman Zhukov if (RTE_MAX(rxRingSize, txRingSize) >
302*4ed89049SDavid Marchand RTE_MAX(RX_DESC_DEFAULT, TX_DESC_DEFAULT)) {
30360efb44fSRoman Zhukov printf("Mbuf pool has an insufficient size for port %u.\n",
30460efb44fSRoman Zhukov port);
30560efb44fSRoman Zhukov return -1;
30660efb44fSRoman Zhukov }
30760efb44fSRoman Zhukov
3088cc72f28SJingjing Wu for (q = 0; q < num_queues; q++) {
309af75078fSIntel retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
31081f7ecd9SPablo de Lara rte_eth_dev_socket_id(port),
31181f7ecd9SPablo de Lara NULL,
312af75078fSIntel mbuf_pool);
3138cc72f28SJingjing Wu if (retval < 0) {
3148cc72f28SJingjing Wu printf("initialize rx queue %d failed\n", q);
315af75078fSIntel return retval;
316af75078fSIntel }
3178cc72f28SJingjing Wu }
318af75078fSIntel
319044c63ffSShahaf Shuler txq_conf = dev_info.default_txconf;
320044c63ffSShahaf Shuler txq_conf.offloads = port_conf.txmode.offloads;
3218cc72f28SJingjing Wu for (q = 0; q < num_queues; q++) {
322af75078fSIntel retval = rte_eth_tx_queue_setup(port, q, txRingSize,
32381f7ecd9SPablo de Lara rte_eth_dev_socket_id(port),
324044c63ffSShahaf Shuler &txq_conf);
3258cc72f28SJingjing Wu if (retval < 0) {
3268cc72f28SJingjing Wu printf("initialize tx queue %d failed\n", q);
327af75078fSIntel return retval;
328af75078fSIntel }
3298cc72f28SJingjing Wu }
330af75078fSIntel
331af75078fSIntel retval = rte_eth_dev_start(port);
3328cc72f28SJingjing Wu if (retval < 0) {
3338cc72f28SJingjing Wu printf("port %d start failed\n", port);
334af75078fSIntel return retval;
3358cc72f28SJingjing Wu }
336af75078fSIntel
33770febdcfSIgor Romanov retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
33870febdcfSIgor Romanov if (retval < 0) {
33970febdcfSIgor Romanov printf("port %d MAC address get failed: %s\n", port,
34070febdcfSIgor Romanov rte_strerror(-retval));
34170febdcfSIgor Romanov return retval;
34270febdcfSIgor Romanov }
343967b6294SIntel printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
344967b6294SIntel " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
345967b6294SIntel (unsigned)port,
346a7db3afcSAman Deep Singh RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
3478cc72f28SJingjing Wu
3489a212dc0SConor Fogarty /* Set mac for each pool. 8< */
3498cc72f28SJingjing Wu for (q = 0; q < num_pools; q++) {
3506d13ea8eSOlivier Matz struct rte_ether_addr mac;
3518cc72f28SJingjing Wu
3528cc72f28SJingjing Wu mac = pool_addr_template;
3538cc72f28SJingjing Wu mac.addr_bytes[4] = port;
3548cc72f28SJingjing Wu mac.addr_bytes[5] = q;
355c2c4f87bSAman Deep Singh printf("Port %u vmdq pool %u set mac " RTE_ETHER_ADDR_PRT_FMT "\n",
356a7db3afcSAman Deep Singh port, q, RTE_ETHER_ADDR_BYTES(&mac));
3578cc72f28SJingjing Wu retval = rte_eth_dev_mac_addr_add(port, &mac,
3588cc72f28SJingjing Wu q + vmdq_pool_base);
3598cc72f28SJingjing Wu if (retval) {
3608cc72f28SJingjing Wu printf("mac addr add failed at pool %d\n", q);
3618cc72f28SJingjing Wu return retval;
3628cc72f28SJingjing Wu }
3638cc72f28SJingjing Wu }
3649a212dc0SConor Fogarty /* >8 End of set mac for each pool. */
365967b6294SIntel
366af75078fSIntel return 0;
367af75078fSIntel }
368af75078fSIntel
3691d8d954bSIntel /* Check num_pools parameter and set it if OK*/
3701d8d954bSIntel static int
vmdq_parse_num_pools(const char * q_arg)3711d8d954bSIntel vmdq_parse_num_pools(const char *q_arg)
3721d8d954bSIntel {
3731d8d954bSIntel char *end = NULL;
3741d8d954bSIntel int n;
3751d8d954bSIntel
3761d8d954bSIntel /* parse number string */
3771d8d954bSIntel n = strtol(q_arg, &end, 10);
3781d8d954bSIntel if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
3791d8d954bSIntel return -1;
3801d8d954bSIntel if (n != 16 && n != 32)
3811d8d954bSIntel return -1;
3821d8d954bSIntel if (n == 16)
383295968d1SFerruh Yigit num_pools = RTE_ETH_16_POOLS;
3841d8d954bSIntel else
385295968d1SFerruh Yigit num_pools = RTE_ETH_32_POOLS;
3861d8d954bSIntel
3871d8d954bSIntel return 0;
3881d8d954bSIntel }
3891d8d954bSIntel
3908cc72f28SJingjing Wu /* Check num_tcs parameter and set it if OK*/
3918cc72f28SJingjing Wu static int
vmdq_parse_num_tcs(const char * q_arg)3928cc72f28SJingjing Wu vmdq_parse_num_tcs(const char *q_arg)
3938cc72f28SJingjing Wu {
3948cc72f28SJingjing Wu char *end = NULL;
3958cc72f28SJingjing Wu int n;
3968cc72f28SJingjing Wu
3978cc72f28SJingjing Wu /* parse number string */
3988cc72f28SJingjing Wu n = strtol(q_arg, &end, 10);
3998cc72f28SJingjing Wu if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
4008cc72f28SJingjing Wu return -1;
4018cc72f28SJingjing Wu
4028cc72f28SJingjing Wu if (n != 4 && n != 8)
4038cc72f28SJingjing Wu return -1;
4048cc72f28SJingjing Wu if (n == 4)
405295968d1SFerruh Yigit num_tcs = RTE_ETH_4_TCS;
4068cc72f28SJingjing Wu else
407295968d1SFerruh Yigit num_tcs = RTE_ETH_8_TCS;
4088cc72f28SJingjing Wu
4098cc72f28SJingjing Wu return 0;
4108cc72f28SJingjing Wu }
4118cc72f28SJingjing Wu
412d4f37b09SIntel static int
parse_portmask(const char * portmask)413d4f37b09SIntel parse_portmask(const char *portmask)
414d4f37b09SIntel {
415d4f37b09SIntel char *end = NULL;
416d4f37b09SIntel unsigned long pm;
417d4f37b09SIntel
418d4f37b09SIntel /* parse hexadecimal string */
419d4f37b09SIntel pm = strtoul(portmask, &end, 16);
420d4f37b09SIntel if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
421ce6b8c31SSarosh Arif return 0;
422d4f37b09SIntel
423d4f37b09SIntel return pm;
424d4f37b09SIntel }
425d4f37b09SIntel
4261d8d954bSIntel /* Display usage */
4271d8d954bSIntel static void
vmdq_usage(const char * prgname)4281d8d954bSIntel vmdq_usage(const char *prgname)
4291d8d954bSIntel {
430d4f37b09SIntel printf("%s [EAL options] -- -p PORTMASK]\n"
4318cc72f28SJingjing Wu " --nb-pools NP: number of pools (32 default, 16)\n"
4328cc72f28SJingjing Wu " --nb-tcs NP: number of TCs (4 default, 8)\n"
4338cc72f28SJingjing Wu " --enable-rss: enable RSS (disabled by default)\n",
4341d8d954bSIntel prgname);
4351d8d954bSIntel }
4361d8d954bSIntel
4371d8d954bSIntel /* Parse the argument (num_pools) given in the command line of the application */
4381d8d954bSIntel static int
vmdq_parse_args(int argc,char ** argv)4391d8d954bSIntel vmdq_parse_args(int argc, char **argv)
4401d8d954bSIntel {
4411d8d954bSIntel int opt;
4421d8d954bSIntel int option_index;
443d4f37b09SIntel unsigned i;
4441d8d954bSIntel const char *prgname = argv[0];
4451d8d954bSIntel static struct option long_option[] = {
4461d8d954bSIntel {"nb-pools", required_argument, NULL, 0},
4478cc72f28SJingjing Wu {"nb-tcs", required_argument, NULL, 0},
4488cc72f28SJingjing Wu {"enable-rss", 0, NULL, 0},
4491d8d954bSIntel {NULL, 0, 0, 0}
4501d8d954bSIntel };
4511d8d954bSIntel
4521d8d954bSIntel /* Parse command line */
4538cc72f28SJingjing Wu while ((opt = getopt_long(argc, argv, "p:", long_option,
4548cc72f28SJingjing Wu &option_index)) != EOF) {
4551d8d954bSIntel switch (opt) {
456d4f37b09SIntel /* portmask */
457d4f37b09SIntel case 'p':
458d4f37b09SIntel enabled_port_mask = parse_portmask(optarg);
459d4f37b09SIntel if (enabled_port_mask == 0) {
460d4f37b09SIntel printf("invalid portmask\n");
461d4f37b09SIntel vmdq_usage(prgname);
462d4f37b09SIntel return -1;
463d4f37b09SIntel }
464d4f37b09SIntel break;
4651d8d954bSIntel case 0:
4668cc72f28SJingjing Wu if (!strcmp(long_option[option_index].name, "nb-pools")) {
4671d8d954bSIntel if (vmdq_parse_num_pools(optarg) == -1) {
4681d8d954bSIntel printf("invalid number of pools\n");
4691d8d954bSIntel return -1;
4701d8d954bSIntel }
4718cc72f28SJingjing Wu }
4728cc72f28SJingjing Wu
4738cc72f28SJingjing Wu if (!strcmp(long_option[option_index].name, "nb-tcs")) {
4748cc72f28SJingjing Wu if (vmdq_parse_num_tcs(optarg) == -1) {
4758cc72f28SJingjing Wu printf("invalid number of tcs\n");
4768cc72f28SJingjing Wu return -1;
4778cc72f28SJingjing Wu }
4788cc72f28SJingjing Wu }
4798cc72f28SJingjing Wu
4808cc72f28SJingjing Wu if (!strcmp(long_option[option_index].name, "enable-rss"))
4818cc72f28SJingjing Wu rss_enable = 1;
4821d8d954bSIntel break;
4838cc72f28SJingjing Wu
4841d8d954bSIntel default:
4851d8d954bSIntel vmdq_usage(prgname);
4861d8d954bSIntel return -1;
4871d8d954bSIntel }
4881d8d954bSIntel }
489d4f37b09SIntel
4908cc72f28SJingjing Wu for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
491d4f37b09SIntel if (enabled_port_mask & (1 << i))
492d4f37b09SIntel ports[num_ports++] = (uint8_t)i;
493d4f37b09SIntel }
494d4f37b09SIntel
495d4f37b09SIntel if (num_ports < 2 || num_ports % 2) {
496d4f37b09SIntel printf("Current enabled port number is %u,"
497d4f37b09SIntel " but it should be even and at least 2\n", num_ports);
498d4f37b09SIntel return -1;
499d4f37b09SIntel }
500d4f37b09SIntel
5011d8d954bSIntel return 0;
5021d8d954bSIntel }
5031d8d954bSIntel
5048cc72f28SJingjing Wu static void
update_mac_address(struct rte_mbuf * m,unsigned dst_port)5058cc72f28SJingjing Wu update_mac_address(struct rte_mbuf *m, unsigned dst_port)
5068cc72f28SJingjing Wu {
5076d13ea8eSOlivier Matz struct rte_ether_hdr *eth;
5088cc72f28SJingjing Wu void *tmp;
5098cc72f28SJingjing Wu
5106d13ea8eSOlivier Matz eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
5118cc72f28SJingjing Wu
5128cc72f28SJingjing Wu /* 02:00:00:00:00:xx */
51304d43857SDmitry Kozlyuk tmp = ð->dst_addr.addr_bytes[0];
5148cc72f28SJingjing Wu *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
5158cc72f28SJingjing Wu
5168cc72f28SJingjing Wu /* src addr */
51704d43857SDmitry Kozlyuk rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], ð->src_addr);
5188cc72f28SJingjing Wu }
5191d8d954bSIntel
520af75078fSIntel /* When we receive a HUP signal, print out our stats */
521af75078fSIntel static void
sighup_handler(int signum)522af75078fSIntel sighup_handler(int signum)
523af75078fSIntel {
5248cc72f28SJingjing Wu unsigned q = vmdq_queue_base;
5258cc72f28SJingjing Wu
5268cc72f28SJingjing Wu for (; q < num_queues; q++) {
5278cc72f28SJingjing Wu if (q % (num_vmdq_queues / num_pools) == 0)
5288cc72f28SJingjing Wu printf("\nPool %u: ", (q - vmdq_queue_base) /
5298cc72f28SJingjing Wu (num_vmdq_queues / num_pools));
530af75078fSIntel printf("%lu ", rxPackets[q]);
531af75078fSIntel }
532af75078fSIntel printf("\nFinished handling signal %d\n", signum);
533af75078fSIntel }
534af75078fSIntel
535af75078fSIntel /*
536af75078fSIntel * Main thread that does the work, reading from INPUT_PORT
537af75078fSIntel * and writing to OUTPUT_PORT
538af75078fSIntel */
5398cc72f28SJingjing Wu static int
lcore_main(void * arg)540af75078fSIntel lcore_main(void *arg)
541af75078fSIntel {
542af75078fSIntel const uintptr_t core_num = (uintptr_t)arg;
543af75078fSIntel const unsigned num_cores = rte_lcore_count();
5448cc72f28SJingjing Wu uint16_t startQueue, endQueue;
545d4f37b09SIntel uint16_t q, i, p;
5468cc72f28SJingjing Wu const uint16_t quot = (uint16_t)(num_vmdq_queues / num_cores);
5478cc72f28SJingjing Wu const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
548af75078fSIntel
5498cc72f28SJingjing Wu
5508cc72f28SJingjing Wu if (remainder) {
5518cc72f28SJingjing Wu if (core_num < remainder) {
5528cc72f28SJingjing Wu startQueue = (uint16_t)(core_num * (quot + 1));
5538cc72f28SJingjing Wu endQueue = (uint16_t)(startQueue + quot + 1);
5548cc72f28SJingjing Wu } else {
5558cc72f28SJingjing Wu startQueue = (uint16_t)(core_num * quot + remainder);
5568cc72f28SJingjing Wu endQueue = (uint16_t)(startQueue + quot);
5578cc72f28SJingjing Wu }
5588cc72f28SJingjing Wu } else {
5598cc72f28SJingjing Wu startQueue = (uint16_t)(core_num * quot);
5608cc72f28SJingjing Wu endQueue = (uint16_t)(startQueue + quot);
5618cc72f28SJingjing Wu }
5628cc72f28SJingjing Wu
5638cc72f28SJingjing Wu /* vmdq queue idx doesn't always start from zero.*/
5648cc72f28SJingjing Wu startQueue += vmdq_queue_base;
5658cc72f28SJingjing Wu endQueue += vmdq_queue_base;
566af75078fSIntel printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num,
567af75078fSIntel rte_lcore_id(), startQueue, endQueue - 1);
568af75078fSIntel
5698cc72f28SJingjing Wu if (startQueue == endQueue) {
5708cc72f28SJingjing Wu printf("lcore %u has nothing to do\n", (unsigned)core_num);
5718cc72f28SJingjing Wu return 0;
5728cc72f28SJingjing Wu }
5738cc72f28SJingjing Wu
574af75078fSIntel for (;;) {
5758cc72f28SJingjing Wu struct rte_mbuf *buf[MAX_PKT_BURST];
5767efe28bdSPavan Nikhilesh const uint16_t buf_size = RTE_DIM(buf);
577d4f37b09SIntel for (p = 0; p < num_ports; p++) {
578d4f37b09SIntel const uint8_t src = ports[p];
579d4f37b09SIntel const uint8_t dst = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */
580d4f37b09SIntel
581d4f37b09SIntel if ((src == INVALID_PORT_ID) || (dst == INVALID_PORT_ID))
582d4f37b09SIntel continue;
583af75078fSIntel
584af75078fSIntel for (q = startQueue; q < endQueue; q++) {
585d4f37b09SIntel const uint16_t rxCount = rte_eth_rx_burst(src,
586af75078fSIntel q, buf, buf_size);
5878cc72f28SJingjing Wu
5888cc72f28SJingjing Wu if (unlikely(rxCount == 0))
589af75078fSIntel continue;
5908cc72f28SJingjing Wu
591af75078fSIntel rxPackets[q] += rxCount;
592af75078fSIntel
5938cc72f28SJingjing Wu for (i = 0; i < rxCount; i++)
5948cc72f28SJingjing Wu update_mac_address(buf[i], dst);
5958cc72f28SJingjing Wu
596d4f37b09SIntel const uint16_t txCount = rte_eth_tx_burst(dst,
5978cc72f28SJingjing Wu q, buf, rxCount);
598af75078fSIntel if (txCount != rxCount) {
599af75078fSIntel for (i = txCount; i < rxCount; i++)
600af75078fSIntel rte_pktmbuf_free(buf[i]);
601af75078fSIntel }
602af75078fSIntel }
603af75078fSIntel }
604af75078fSIntel }
605d4f37b09SIntel }
606d4f37b09SIntel
607d4f37b09SIntel /*
608d4f37b09SIntel * Update the global var NUM_PORTS and array PORTS according to system ports number
609d4f37b09SIntel * and return valid ports number
610d4f37b09SIntel */
check_ports_num(unsigned nb_ports)611d4f37b09SIntel static unsigned check_ports_num(unsigned nb_ports)
612d4f37b09SIntel {
613d4f37b09SIntel unsigned valid_num_ports = num_ports;
614d4f37b09SIntel unsigned portid;
615d4f37b09SIntel
616d4f37b09SIntel if (num_ports > nb_ports) {
617d4f37b09SIntel printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
618d4f37b09SIntel num_ports, nb_ports);
619d4f37b09SIntel num_ports = nb_ports;
620d4f37b09SIntel }
621d4f37b09SIntel
622d4f37b09SIntel for (portid = 0; portid < num_ports; portid++) {
623a9dbe180SThomas Monjalon if (!rte_eth_dev_is_valid_port(ports[portid])) {
624a9dbe180SThomas Monjalon printf("\nSpecified port ID(%u) is not valid\n",
625a9dbe180SThomas Monjalon ports[portid]);
626d4f37b09SIntel ports[portid] = INVALID_PORT_ID;
627d4f37b09SIntel valid_num_ports--;
628d4f37b09SIntel }
629d4f37b09SIntel }
630d4f37b09SIntel return valid_num_ports;
631d4f37b09SIntel }
632d4f37b09SIntel
633af75078fSIntel
634af75078fSIntel /* Main function, does initialisation and calls the per-lcore functions */
635af75078fSIntel int
main(int argc,char * argv[])63698a16481SDavid Marchand main(int argc, char *argv[])
637af75078fSIntel {
638af75078fSIntel unsigned cores;
639af75078fSIntel struct rte_mempool *mbuf_pool;
640af75078fSIntel unsigned lcore_id;
641af75078fSIntel uintptr_t i;
6421d8d954bSIntel int ret;
643d4f37b09SIntel unsigned nb_ports, valid_num_ports;
64447523597SZhiyong Yang uint16_t portid;
645af75078fSIntel
646af75078fSIntel signal(SIGHUP, sighup_handler);
647af75078fSIntel
6481d8d954bSIntel /* init EAL */
6491d8d954bSIntel ret = rte_eal_init(argc, argv);
6501d8d954bSIntel if (ret < 0)
651af75078fSIntel rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
6521d8d954bSIntel argc -= ret;
6531d8d954bSIntel argv += ret;
6541d8d954bSIntel
6551d8d954bSIntel /* parse app arguments */
6561d8d954bSIntel ret = vmdq_parse_args(argc, argv);
6571d8d954bSIntel if (ret < 0)
6581d8d954bSIntel rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
6591d8d954bSIntel
660af75078fSIntel cores = rte_lcore_count();
6618cc72f28SJingjing Wu if ((cores & (cores - 1)) != 0 || cores > RTE_MAX_LCORE) {
662d4f37b09SIntel rte_exit(EXIT_FAILURE,"This program can only run on an even"
6638cc72f28SJingjing Wu " number of cores(1-%d)\n\n", RTE_MAX_LCORE);
664af75078fSIntel }
665af75078fSIntel
666d9a42a69SThomas Monjalon nb_ports = rte_eth_dev_count_avail();
667d4f37b09SIntel
668d4f37b09SIntel /*
669d4f37b09SIntel * Update the global var NUM_PORTS and global array PORTS
670d4f37b09SIntel * and get value of var VALID_NUM_PORTS according to system ports number
671d4f37b09SIntel */
672d4f37b09SIntel valid_num_ports = check_ports_num(nb_ports);
673d4f37b09SIntel
674d4f37b09SIntel if (valid_num_ports < 2 || valid_num_ports % 2) {
675d4f37b09SIntel printf("Current valid ports number is %u\n", valid_num_ports);
676d4f37b09SIntel rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
677d4f37b09SIntel }
678d4f37b09SIntel
6798cc72f28SJingjing Wu mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
6808cc72f28SJingjing Wu NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
6818cc72f28SJingjing Wu 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
682af75078fSIntel if (mbuf_pool == NULL)
683af75078fSIntel rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
684af75078fSIntel
685d4f37b09SIntel /* initialize all ports */
6868728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(portid) {
687d4f37b09SIntel /* skip ports that are not enabled */
688d4f37b09SIntel if ((enabled_port_mask & (1 << portid)) == 0) {
689d4f37b09SIntel printf("\nSkipping disabled port %d\n", portid);
690d4f37b09SIntel continue;
691d4f37b09SIntel }
692d4f37b09SIntel if (port_init(portid, mbuf_pool) != 0)
693af75078fSIntel rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
694d4f37b09SIntel }
695af75078fSIntel
696cb056611SStephen Hemminger /* call lcore_main() on every worker lcore */
697af75078fSIntel i = 0;
698cb056611SStephen Hemminger RTE_LCORE_FOREACH_WORKER(lcore_id) {
699af75078fSIntel rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id);
700af75078fSIntel }
701cb056611SStephen Hemminger /* call on main too */
702af75078fSIntel (void) lcore_main((void*)i);
703af75078fSIntel
70410aa3757SChengchang Tang /* clean up the EAL */
70510aa3757SChengchang Tang rte_eal_cleanup();
70610aa3757SChengchang Tang
707af75078fSIntel return 0;
708af75078fSIntel }
709