13998e2a0SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 23998e2a0SBruce Richardson * Copyright(c) 2010-2014 Intel Corporation 36bb97df5SIntel */ 46bb97df5SIntel 56bb97df5SIntel #include <stdint.h> 66bb97df5SIntel #include <sys/queue.h> 76bb97df5SIntel #include <stdlib.h> 86bb97df5SIntel #include <string.h> 96bb97df5SIntel #include <stdio.h> 106bb97df5SIntel #include <assert.h> 116bb97df5SIntel #include <errno.h> 126bb97df5SIntel #include <signal.h> 136bb97df5SIntel #include <stdarg.h> 146bb97df5SIntel #include <inttypes.h> 156bb97df5SIntel #include <getopt.h> 166bb97df5SIntel 176bb97df5SIntel #include <rte_common.h> 186bb97df5SIntel #include <rte_log.h> 196bb97df5SIntel #include <rte_memory.h> 206bb97df5SIntel #include <rte_memcpy.h> 216bb97df5SIntel #include <rte_eal.h> 226bb97df5SIntel #include <rte_launch.h> 236bb97df5SIntel #include <rte_cycles.h> 246bb97df5SIntel #include <rte_prefetch.h> 256bb97df5SIntel #include <rte_lcore.h> 266bb97df5SIntel #include <rte_per_lcore.h> 276bb97df5SIntel #include <rte_branch_prediction.h> 286bb97df5SIntel #include <rte_interrupts.h> 296bb97df5SIntel #include <rte_random.h> 306bb97df5SIntel #include <rte_debug.h> 316bb97df5SIntel #include <rte_ether.h> 326bb97df5SIntel #include <rte_ethdev.h> 336bb97df5SIntel #include <rte_mempool.h> 346bb97df5SIntel #include <rte_mbuf.h> 356bb97df5SIntel 36e4363e81SXutao Sun #define MAX_QUEUES 1024 376bb97df5SIntel /* 38e4363e81SXutao Sun * 1024 queues require to meet the needs of a large number of vmdq_pools. 39e4363e81SXutao Sun * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port. 406bb97df5SIntel */ 41e4363e81SXutao Sun #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \ 42e4363e81SXutao Sun RTE_TEST_TX_DESC_DEFAULT)) 436bb97df5SIntel #define MBUF_CACHE_SIZE 64 446bb97df5SIntel 456bb97df5SIntel #define MAX_PKT_BURST 32 466bb97df5SIntel 476bb97df5SIntel /* 486bb97df5SIntel * Configurable number of RX/TX ring descriptors 496bb97df5SIntel */ 50867a6c66SKevin Laatz #define RTE_TEST_RX_DESC_DEFAULT 1024 51867a6c66SKevin Laatz #define RTE_TEST_TX_DESC_DEFAULT 1024 526bb97df5SIntel 536bb97df5SIntel #define INVALID_PORT_ID 0xFF 546bb97df5SIntel 556bb97df5SIntel /* mask of enabled ports */ 56b30eb1d2SHuawei Xie static uint32_t enabled_port_mask; 576bb97df5SIntel 586bb97df5SIntel /* number of pools (if user does not specify any, 8 by default */ 596bb97df5SIntel static uint32_t num_queues = 8; 606bb97df5SIntel static uint32_t num_pools = 8; 618f5b4af7SJunyu Jiang static uint8_t rss_enable; 626bb97df5SIntel 639a212dc0SConor Fogarty /* Default structure for VMDq. 8< */ 649a212dc0SConor Fogarty 656bb97df5SIntel /* empty vmdq configuration structure. Filled in programatically */ 666bb97df5SIntel static const struct rte_eth_conf vmdq_conf_default = { 676bb97df5SIntel .rxmode = { 68295968d1SFerruh Yigit .mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY, 696bb97df5SIntel .split_hdr_size = 0, 706bb97df5SIntel }, 716bb97df5SIntel 726bb97df5SIntel .txmode = { 73295968d1SFerruh Yigit .mq_mode = RTE_ETH_MQ_TX_NONE, 746bb97df5SIntel }, 756bb97df5SIntel .rx_adv_conf = { 766bb97df5SIntel /* 776bb97df5SIntel * should be overridden separately in code with 786bb97df5SIntel * appropriate values 796bb97df5SIntel */ 806bb97df5SIntel .vmdq_rx_conf = { 81295968d1SFerruh Yigit .nb_queue_pools = RTE_ETH_8_POOLS, 826bb97df5SIntel .enable_default_pool = 0, 836bb97df5SIntel .default_pool = 0, 846bb97df5SIntel .nb_pool_maps = 0, 856bb97df5SIntel .pool_map = {{0, 0},}, 866bb97df5SIntel }, 876bb97df5SIntel }, 886bb97df5SIntel }; 899a212dc0SConor Fogarty /* >8 End of Empty vdmq configuration structure. */ 906bb97df5SIntel 916bb97df5SIntel static unsigned lcore_ids[RTE_MAX_LCORE]; 9247523597SZhiyong Yang static uint16_t ports[RTE_MAX_ETHPORTS]; 93b30eb1d2SHuawei Xie static unsigned num_ports; /**< The number of ports specified in command line */ 946bb97df5SIntel 956bb97df5SIntel /* array used for printing out statistics */ 966bb97df5SIntel volatile unsigned long rxPackets[MAX_QUEUES] = {0}; 976bb97df5SIntel 989a212dc0SConor Fogarty /* vlan_tags 8< */ 996bb97df5SIntel const uint16_t vlan_tags[] = { 1006bb97df5SIntel 0, 1, 2, 3, 4, 5, 6, 7, 1016bb97df5SIntel 8, 9, 10, 11, 12, 13, 14, 15, 1026bb97df5SIntel 16, 17, 18, 19, 20, 21, 22, 23, 1036bb97df5SIntel 24, 25, 26, 27, 28, 29, 30, 31, 1046bb97df5SIntel 32, 33, 34, 35, 36, 37, 38, 39, 1056bb97df5SIntel 40, 41, 42, 43, 44, 45, 46, 47, 1066bb97df5SIntel 48, 49, 50, 51, 52, 53, 54, 55, 1076bb97df5SIntel 56, 57, 58, 59, 60, 61, 62, 63, 1086bb97df5SIntel }; 1099a212dc0SConor Fogarty /* >8 End of vlan_tags. */ 1109a212dc0SConor Fogarty 1112a13a5a0SHuawei Xie const uint16_t num_vlans = RTE_DIM(vlan_tags); 1122a13a5a0SHuawei Xie static uint16_t num_pf_queues, num_vmdq_queues; 1132a13a5a0SHuawei Xie static uint16_t vmdq_pool_base, vmdq_queue_base; 1149a212dc0SConor Fogarty 1159a212dc0SConor Fogarty /* Pool mac address template. 8< */ 1169a212dc0SConor Fogarty 1172a13a5a0SHuawei Xie /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */ 1186d13ea8eSOlivier Matz static struct rte_ether_addr pool_addr_template = { 1192a13a5a0SHuawei Xie .addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00} 1202a13a5a0SHuawei Xie }; 1219a212dc0SConor Fogarty /* >8 End of mac addr template. */ 1226bb97df5SIntel 1236bb97df5SIntel /* ethernet addresses of ports */ 1246d13ea8eSOlivier Matz static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS]; 1256bb97df5SIntel 1266bb97df5SIntel #define MAX_QUEUE_NUM_10G 128 1276bb97df5SIntel #define MAX_QUEUE_NUM_1G 8 1286bb97df5SIntel #define MAX_POOL_MAP_NUM_10G 64 1296bb97df5SIntel #define MAX_POOL_MAP_NUM_1G 32 1306bb97df5SIntel #define MAX_POOL_NUM_10G 64 1316bb97df5SIntel #define MAX_POOL_NUM_1G 8 132b30eb1d2SHuawei Xie /* 133b30eb1d2SHuawei Xie * Builds up the correct configuration for vmdq based on the vlan tags array 134b30eb1d2SHuawei Xie * given above, and determine the queue number and pool map number according to 135b30eb1d2SHuawei Xie * valid pool number 136b30eb1d2SHuawei Xie */ 1379a212dc0SConor Fogarty 138*a2a43d3aSRay Kinsella /* Building correct configuration for vdmq. 8< */ 1396bb97df5SIntel static inline int 1406bb97df5SIntel get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools) 1416bb97df5SIntel { 1426bb97df5SIntel struct rte_eth_vmdq_rx_conf conf; 1436bb97df5SIntel unsigned i; 1446bb97df5SIntel 1456bb97df5SIntel conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools; 1462a13a5a0SHuawei Xie conf.nb_pool_maps = num_pools; 1476bb97df5SIntel conf.enable_default_pool = 0; 1486bb97df5SIntel conf.default_pool = 0; /* set explicit value, even if not used */ 1496bb97df5SIntel 1506bb97df5SIntel for (i = 0; i < conf.nb_pool_maps; i++) { 1516bb97df5SIntel conf.pool_map[i].vlan_id = vlan_tags[i]; 1526bb97df5SIntel conf.pool_map[i].pools = (1UL << (i % num_pools)); 1536bb97df5SIntel } 1546bb97df5SIntel 1556bb97df5SIntel (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf))); 1566bb97df5SIntel (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf, 1576bb97df5SIntel sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf))); 1588f5b4af7SJunyu Jiang if (rss_enable) { 159295968d1SFerruh Yigit eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS; 160295968d1SFerruh Yigit eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP | 161295968d1SFerruh Yigit RTE_ETH_RSS_UDP | 162295968d1SFerruh Yigit RTE_ETH_RSS_TCP | 163295968d1SFerruh Yigit RTE_ETH_RSS_SCTP; 1648f5b4af7SJunyu Jiang } 1656bb97df5SIntel return 0; 1666bb97df5SIntel } 1676bb97df5SIntel 1686bb97df5SIntel /* 1696bb97df5SIntel * Initialises a given port using global settings and with the rx buffers 1706bb97df5SIntel * coming from the mbuf_pool passed as parameter 1716bb97df5SIntel */ 1726bb97df5SIntel static inline int 17347523597SZhiyong Yang port_init(uint16_t port, struct rte_mempool *mbuf_pool) 1746bb97df5SIntel { 1756bb97df5SIntel struct rte_eth_dev_info dev_info; 17681f7ecd9SPablo de Lara struct rte_eth_rxconf *rxconf; 1778dd0befeSShahaf Shuler struct rte_eth_txconf *txconf; 1786bb97df5SIntel struct rte_eth_conf port_conf; 1792a13a5a0SHuawei Xie uint16_t rxRings, txRings; 18060efb44fSRoman Zhukov uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT; 18160efb44fSRoman Zhukov uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT; 1826bb97df5SIntel int retval; 1836bb97df5SIntel uint16_t q; 1842a13a5a0SHuawei Xie uint16_t queues_per_pool; 1856bb97df5SIntel uint32_t max_nb_pools; 1868f5b4af7SJunyu Jiang uint64_t rss_hf_tmp; 1876bb97df5SIntel 188b30eb1d2SHuawei Xie /* 189b30eb1d2SHuawei Xie * The max pool number from dev_info will be used to validate the pool 190b30eb1d2SHuawei Xie * number specified in cmd line 191b30eb1d2SHuawei Xie */ 192089e5ed7SIvan Ilchenko retval = rte_eth_dev_info_get(port, &dev_info); 193089e5ed7SIvan Ilchenko if (retval != 0) { 194089e5ed7SIvan Ilchenko printf("Error during getting device (port %u) info: %s\n", 195089e5ed7SIvan Ilchenko port, strerror(-retval)); 196089e5ed7SIvan Ilchenko return retval; 197089e5ed7SIvan Ilchenko } 198089e5ed7SIvan Ilchenko 1996bb97df5SIntel max_nb_pools = (uint32_t)dev_info.max_vmdq_pools; 2002a13a5a0SHuawei Xie /* 2012a13a5a0SHuawei Xie * We allow to process part of VMDQ pools specified by num_pools in 2022a13a5a0SHuawei Xie * command line. 2032a13a5a0SHuawei Xie */ 2042a13a5a0SHuawei Xie if (num_pools > max_nb_pools) { 2052a13a5a0SHuawei Xie printf("num_pools %d >max_nb_pools %d\n", 2062a13a5a0SHuawei Xie num_pools, max_nb_pools); 2072a13a5a0SHuawei Xie return -1; 2082a13a5a0SHuawei Xie } 2092a13a5a0SHuawei Xie retval = get_eth_conf(&port_conf, max_nb_pools); 2106bb97df5SIntel if (retval < 0) 2116bb97df5SIntel return retval; 2126bb97df5SIntel 2132a13a5a0SHuawei Xie /* 2142a13a5a0SHuawei Xie * NIC queues are divided into pf queues and vmdq queues. 2152a13a5a0SHuawei Xie */ 2162a13a5a0SHuawei Xie /* There is assumption here all ports have the same configuration! */ 2172a13a5a0SHuawei Xie num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num; 2182a13a5a0SHuawei Xie queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools; 2192a13a5a0SHuawei Xie num_vmdq_queues = num_pools * queues_per_pool; 2202a13a5a0SHuawei Xie num_queues = num_pf_queues + num_vmdq_queues; 2212a13a5a0SHuawei Xie vmdq_queue_base = dev_info.vmdq_queue_base; 2222a13a5a0SHuawei Xie vmdq_pool_base = dev_info.vmdq_pool_base; 2236bb97df5SIntel 2242a13a5a0SHuawei Xie printf("pf queue num: %u, configured vmdq pool num: %u," 2252a13a5a0SHuawei Xie " each vmdq pool has %u queues\n", 2262a13a5a0SHuawei Xie num_pf_queues, num_pools, queues_per_pool); 2272a13a5a0SHuawei Xie printf("vmdq queue base: %d pool base %d\n", 2282a13a5a0SHuawei Xie vmdq_queue_base, vmdq_pool_base); 229a9dbe180SThomas Monjalon if (!rte_eth_dev_is_valid_port(port)) 230b30eb1d2SHuawei Xie return -1; 2316bb97df5SIntel 2328f5b4af7SJunyu Jiang rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf; 2338f5b4af7SJunyu Jiang port_conf.rx_adv_conf.rss_conf.rss_hf &= 2348f5b4af7SJunyu Jiang dev_info.flow_type_rss_offloads; 2358f5b4af7SJunyu Jiang if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) { 2368f5b4af7SJunyu Jiang printf("Port %u modified RSS hash function based on hardware support," 2378f5b4af7SJunyu Jiang "requested:%#"PRIx64" configured:%#"PRIx64"\n", 2388f5b4af7SJunyu Jiang port, 2398f5b4af7SJunyu Jiang rss_hf_tmp, 2408f5b4af7SJunyu Jiang port_conf.rx_adv_conf.rss_conf.rss_hf); 2418f5b4af7SJunyu Jiang } 2428f5b4af7SJunyu Jiang 2432a13a5a0SHuawei Xie /* 2442a13a5a0SHuawei Xie * Though in this example, we only receive packets from the first queue 2452a13a5a0SHuawei Xie * of each pool and send packets through first rte_lcore_count() tx 2462a13a5a0SHuawei Xie * queues of vmdq queues, all queues including pf queues are setup. 2472a13a5a0SHuawei Xie * This is because VMDQ queues doesn't always start from zero, and the 2482a13a5a0SHuawei Xie * PMD layer doesn't support selectively initialising part of rx/tx 2492a13a5a0SHuawei Xie * queues. 2502a13a5a0SHuawei Xie */ 2512a13a5a0SHuawei Xie rxRings = (uint16_t)dev_info.max_rx_queues; 2522a13a5a0SHuawei Xie txRings = (uint16_t)dev_info.max_tx_queues; 2538dd0befeSShahaf Shuler 254089e5ed7SIvan Ilchenko retval = rte_eth_dev_info_get(port, &dev_info); 255089e5ed7SIvan Ilchenko if (retval != 0) { 256089e5ed7SIvan Ilchenko printf("Error during getting device (port %u) info: %s\n", 257089e5ed7SIvan Ilchenko port, strerror(-retval)); 258089e5ed7SIvan Ilchenko return retval; 259089e5ed7SIvan Ilchenko } 260089e5ed7SIvan Ilchenko 261295968d1SFerruh Yigit if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 2628dd0befeSShahaf Shuler port_conf.txmode.offloads |= 263295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 2646bb97df5SIntel retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf); 2656bb97df5SIntel if (retval != 0) 2666bb97df5SIntel return retval; 2676bb97df5SIntel 26860efb44fSRoman Zhukov retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize, 26960efb44fSRoman Zhukov &txRingSize); 27060efb44fSRoman Zhukov if (retval != 0) 27160efb44fSRoman Zhukov return retval; 27260efb44fSRoman Zhukov if (RTE_MAX(rxRingSize, txRingSize) > RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, 27360efb44fSRoman Zhukov RTE_TEST_TX_DESC_DEFAULT)) { 27460efb44fSRoman Zhukov printf("Mbuf pool has an insufficient size for port %u.\n", 27560efb44fSRoman Zhukov port); 27660efb44fSRoman Zhukov return -1; 27760efb44fSRoman Zhukov } 27860efb44fSRoman Zhukov 27981f7ecd9SPablo de Lara rxconf = &dev_info.default_rxconf; 28081f7ecd9SPablo de Lara rxconf->rx_drop_en = 1; 2818dd0befeSShahaf Shuler txconf = &dev_info.default_txconf; 2828dd0befeSShahaf Shuler txconf->offloads = port_conf.txmode.offloads; 2836bb97df5SIntel for (q = 0; q < rxRings; q++) { 2846bb97df5SIntel retval = rte_eth_rx_queue_setup(port, q, rxRingSize, 28581f7ecd9SPablo de Lara rte_eth_dev_socket_id(port), 28681f7ecd9SPablo de Lara rxconf, 2876bb97df5SIntel mbuf_pool); 2882a13a5a0SHuawei Xie if (retval < 0) { 2892a13a5a0SHuawei Xie printf("initialise rx queue %d failed\n", q); 2906bb97df5SIntel return retval; 2916bb97df5SIntel } 2922a13a5a0SHuawei Xie } 2936bb97df5SIntel 2946bb97df5SIntel for (q = 0; q < txRings; q++) { 2956bb97df5SIntel retval = rte_eth_tx_queue_setup(port, q, txRingSize, 29681f7ecd9SPablo de Lara rte_eth_dev_socket_id(port), 2978dd0befeSShahaf Shuler txconf); 2982a13a5a0SHuawei Xie if (retval < 0) { 2992a13a5a0SHuawei Xie printf("initialise tx queue %d failed\n", q); 3006bb97df5SIntel return retval; 3016bb97df5SIntel } 3022a13a5a0SHuawei Xie } 3036bb97df5SIntel 3046bb97df5SIntel retval = rte_eth_dev_start(port); 3052a13a5a0SHuawei Xie if (retval < 0) { 3062a13a5a0SHuawei Xie printf("port %d start failed\n", port); 3076bb97df5SIntel return retval; 3082a13a5a0SHuawei Xie } 3096bb97df5SIntel 31070febdcfSIgor Romanov retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); 31170febdcfSIgor Romanov if (retval < 0) { 31270febdcfSIgor Romanov printf("port %d MAC address get failed: %s\n", port, 31370febdcfSIgor Romanov rte_strerror(-retval)); 31470febdcfSIgor Romanov return retval; 31570febdcfSIgor Romanov } 3166bb97df5SIntel printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 3176bb97df5SIntel " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", 3186bb97df5SIntel (unsigned)port, 319a7db3afcSAman Deep Singh RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port])); 3206bb97df5SIntel 3212a13a5a0SHuawei Xie /* 3222a13a5a0SHuawei Xie * Set mac for each pool. 3232a13a5a0SHuawei Xie * There is no default mac for the pools in i40. 3242a13a5a0SHuawei Xie * Removes this after i40e fixes this issue. 3252a13a5a0SHuawei Xie */ 3262a13a5a0SHuawei Xie for (q = 0; q < num_pools; q++) { 3276d13ea8eSOlivier Matz struct rte_ether_addr mac; 3282a13a5a0SHuawei Xie mac = pool_addr_template; 3292a13a5a0SHuawei Xie mac.addr_bytes[4] = port; 3302a13a5a0SHuawei Xie mac.addr_bytes[5] = q; 331c2c4f87bSAman Deep Singh printf("Port %u vmdq pool %u set mac " RTE_ETHER_ADDR_PRT_FMT "\n", 332a7db3afcSAman Deep Singh port, q, RTE_ETHER_ADDR_BYTES(&mac)); 3332a13a5a0SHuawei Xie retval = rte_eth_dev_mac_addr_add(port, &mac, 3342a13a5a0SHuawei Xie q + vmdq_pool_base); 3352a13a5a0SHuawei Xie if (retval) { 3362a13a5a0SHuawei Xie printf("mac addr add failed at pool %d\n", q); 3372a13a5a0SHuawei Xie return retval; 3382a13a5a0SHuawei Xie } 3392a13a5a0SHuawei Xie } 3402a13a5a0SHuawei Xie 3416bb97df5SIntel return 0; 3426bb97df5SIntel } 3439a212dc0SConor Fogarty /* >8 End of get_eth_conf. */ 3446bb97df5SIntel 3456bb97df5SIntel /* Check num_pools parameter and set it if OK*/ 3466bb97df5SIntel static int 3476bb97df5SIntel vmdq_parse_num_pools(const char *q_arg) 3486bb97df5SIntel { 3496bb97df5SIntel char *end = NULL; 3506bb97df5SIntel int n; 3516bb97df5SIntel 3526bb97df5SIntel /* parse number string */ 3536bb97df5SIntel n = strtol(q_arg, &end, 10); 3546bb97df5SIntel if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) 3556bb97df5SIntel return -1; 3566bb97df5SIntel 3572a13a5a0SHuawei Xie if (num_pools > num_vlans) { 3582a13a5a0SHuawei Xie printf("num_pools %d > num_vlans %d\n", num_pools, num_vlans); 3592a13a5a0SHuawei Xie return -1; 3602a13a5a0SHuawei Xie } 3612a13a5a0SHuawei Xie 3626bb97df5SIntel num_pools = n; 3636bb97df5SIntel 3646bb97df5SIntel return 0; 3656bb97df5SIntel } 3666bb97df5SIntel 3676bb97df5SIntel 3686bb97df5SIntel static int 3696bb97df5SIntel parse_portmask(const char *portmask) 3706bb97df5SIntel { 3716bb97df5SIntel char *end = NULL; 3726bb97df5SIntel unsigned long pm; 3736bb97df5SIntel 3746bb97df5SIntel /* parse hexadecimal string */ 3756bb97df5SIntel pm = strtoul(portmask, &end, 16); 3766bb97df5SIntel if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 377ce6b8c31SSarosh Arif return 0; 3786bb97df5SIntel 3796bb97df5SIntel return pm; 3806bb97df5SIntel } 3816bb97df5SIntel 3826bb97df5SIntel /* Display usage */ 3836bb97df5SIntel static void 3846bb97df5SIntel vmdq_usage(const char *prgname) 3856bb97df5SIntel { 3866bb97df5SIntel printf("%s [EAL options] -- -p PORTMASK]\n" 3878f5b4af7SJunyu Jiang " --nb-pools NP: number of pools\n" 3888f5b4af7SJunyu Jiang " --enable-rss: enable RSS (disabled by default)\n", 3896bb97df5SIntel prgname); 3906bb97df5SIntel } 3916bb97df5SIntel 3926bb97df5SIntel /* Parse the argument (num_pools) given in the command line of the application */ 3936bb97df5SIntel static int 3946bb97df5SIntel vmdq_parse_args(int argc, char **argv) 3956bb97df5SIntel { 3966bb97df5SIntel int opt; 3976bb97df5SIntel int option_index; 3986bb97df5SIntel unsigned i; 3996bb97df5SIntel const char *prgname = argv[0]; 4006bb97df5SIntel static struct option long_option[] = { 4016bb97df5SIntel {"nb-pools", required_argument, NULL, 0}, 4028f5b4af7SJunyu Jiang {"enable-rss", 0, NULL, 0}, 4036bb97df5SIntel {NULL, 0, 0, 0} 4046bb97df5SIntel }; 4056bb97df5SIntel 4066bb97df5SIntel /* Parse command line */ 407b30eb1d2SHuawei Xie while ((opt = getopt_long(argc, argv, "p:", long_option, 408b30eb1d2SHuawei Xie &option_index)) != EOF) { 4096bb97df5SIntel switch (opt) { 4106bb97df5SIntel /* portmask */ 4116bb97df5SIntel case 'p': 4126bb97df5SIntel enabled_port_mask = parse_portmask(optarg); 4136bb97df5SIntel if (enabled_port_mask == 0) { 4146bb97df5SIntel printf("invalid portmask\n"); 4156bb97df5SIntel vmdq_usage(prgname); 4166bb97df5SIntel return -1; 4176bb97df5SIntel } 4186bb97df5SIntel break; 4196bb97df5SIntel case 0: 4208f5b4af7SJunyu Jiang if (!strcmp(long_option[option_index].name, 4218f5b4af7SJunyu Jiang "nb-pools")) { 4226bb97df5SIntel if (vmdq_parse_num_pools(optarg) == -1) { 4236bb97df5SIntel printf("invalid number of pools\n"); 4246bb97df5SIntel vmdq_usage(prgname); 4256bb97df5SIntel return -1; 4266bb97df5SIntel } 4278f5b4af7SJunyu Jiang } 4288f5b4af7SJunyu Jiang 4298f5b4af7SJunyu Jiang if (!strcmp(long_option[option_index].name, 4308f5b4af7SJunyu Jiang "enable-rss")) 4318f5b4af7SJunyu Jiang rss_enable = 1; 4326bb97df5SIntel break; 4336bb97df5SIntel 4346bb97df5SIntel default: 4356bb97df5SIntel vmdq_usage(prgname); 4366bb97df5SIntel return -1; 4376bb97df5SIntel } 4386bb97df5SIntel } 4396bb97df5SIntel 4406bb97df5SIntel for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 4416bb97df5SIntel if (enabled_port_mask & (1 << i)) 4426bb97df5SIntel ports[num_ports++] = (uint8_t)i; 4436bb97df5SIntel } 4446bb97df5SIntel 4456bb97df5SIntel if (num_ports < 2 || num_ports % 2) { 4466bb97df5SIntel printf("Current enabled port number is %u," 4476bb97df5SIntel "but it should be even and at least 2\n", num_ports); 4486bb97df5SIntel return -1; 4496bb97df5SIntel } 4506bb97df5SIntel 4516bb97df5SIntel return 0; 4526bb97df5SIntel } 4536bb97df5SIntel 4546bb97df5SIntel static void 4556bb97df5SIntel update_mac_address(struct rte_mbuf *m, unsigned dst_port) 4566bb97df5SIntel { 4576d13ea8eSOlivier Matz struct rte_ether_hdr *eth; 4586bb97df5SIntel void *tmp; 4596bb97df5SIntel 4606d13ea8eSOlivier Matz eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 4616bb97df5SIntel 4626bb97df5SIntel /* 02:00:00:00:00:xx */ 46304d43857SDmitry Kozlyuk tmp = ð->dst_addr.addr_bytes[0]; 4646bb97df5SIntel *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40); 4656bb97df5SIntel 4666bb97df5SIntel /* src addr */ 46704d43857SDmitry Kozlyuk rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], ð->src_addr); 4686bb97df5SIntel } 4696bb97df5SIntel 4706bb97df5SIntel /* When we receive a HUP signal, print out our stats */ 4716bb97df5SIntel static void 4726bb97df5SIntel sighup_handler(int signum) 4736bb97df5SIntel { 47470c37e32SJunyu Jiang unsigned int q = vmdq_queue_base; 47570c37e32SJunyu Jiang for (; q < num_queues; q++) { 47670c37e32SJunyu Jiang if ((q - vmdq_queue_base) % (num_vmdq_queues / num_pools) == 0) 47770c37e32SJunyu Jiang printf("\nPool %u: ", (q - vmdq_queue_base) / 47870c37e32SJunyu Jiang (num_vmdq_queues / num_pools)); 4796bb97df5SIntel printf("%lu ", rxPackets[q]); 4806bb97df5SIntel } 4816bb97df5SIntel printf("\nFinished handling signal %d\n", signum); 4826bb97df5SIntel } 4836bb97df5SIntel 4846bb97df5SIntel /* 4856bb97df5SIntel * Main thread that does the work, reading from INPUT_PORT 4866bb97df5SIntel * and writing to OUTPUT_PORT 4876bb97df5SIntel */ 48813c4ebd6SBruce Richardson static int 489f2fc83b4SThomas Monjalon lcore_main(__rte_unused void *dummy) 4906bb97df5SIntel { 4916bb97df5SIntel const uint16_t lcore_id = (uint16_t)rte_lcore_id(); 4926bb97df5SIntel const uint16_t num_cores = (uint16_t)rte_lcore_count(); 4936bb97df5SIntel uint16_t core_id = 0; 4946bb97df5SIntel uint16_t startQueue, endQueue; 4956bb97df5SIntel uint16_t q, i, p; 4962a13a5a0SHuawei Xie const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores); 4976bb97df5SIntel 4986bb97df5SIntel for (i = 0; i < num_cores; i++) 4996bb97df5SIntel if (lcore_ids[i] == lcore_id) { 5006bb97df5SIntel core_id = i; 5016bb97df5SIntel break; 5026bb97df5SIntel } 5036bb97df5SIntel 5046bb97df5SIntel if (remainder != 0) { 5056bb97df5SIntel if (core_id < remainder) { 5062a13a5a0SHuawei Xie startQueue = (uint16_t)(core_id * 5072a13a5a0SHuawei Xie (num_vmdq_queues / num_cores + 1)); 5082a13a5a0SHuawei Xie endQueue = (uint16_t)(startQueue + 5092a13a5a0SHuawei Xie (num_vmdq_queues / num_cores) + 1); 5106bb97df5SIntel } else { 5112a13a5a0SHuawei Xie startQueue = (uint16_t)(core_id * 5122a13a5a0SHuawei Xie (num_vmdq_queues / num_cores) + 5132a13a5a0SHuawei Xie remainder); 5142a13a5a0SHuawei Xie endQueue = (uint16_t)(startQueue + 5152a13a5a0SHuawei Xie (num_vmdq_queues / num_cores)); 5166bb97df5SIntel } 5176bb97df5SIntel } else { 5182a13a5a0SHuawei Xie startQueue = (uint16_t)(core_id * 5192a13a5a0SHuawei Xie (num_vmdq_queues / num_cores)); 5202a13a5a0SHuawei Xie endQueue = (uint16_t)(startQueue + 5212a13a5a0SHuawei Xie (num_vmdq_queues / num_cores)); 5226bb97df5SIntel } 5236bb97df5SIntel 5242a13a5a0SHuawei Xie /* vmdq queue idx doesn't always start from zero.*/ 5252a13a5a0SHuawei Xie startQueue += vmdq_queue_base; 5262a13a5a0SHuawei Xie endQueue += vmdq_queue_base; 5276bb97df5SIntel printf("core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_id, 5286bb97df5SIntel (unsigned)lcore_id, startQueue, endQueue - 1); 5296bb97df5SIntel 53013c4ebd6SBruce Richardson if (startQueue == endQueue) { 53113c4ebd6SBruce Richardson printf("lcore %u has nothing to do\n", lcore_id); 532b30eb1d2SHuawei Xie return 0; 53313c4ebd6SBruce Richardson } 53413c4ebd6SBruce Richardson 5356bb97df5SIntel for (;;) { 5366bb97df5SIntel struct rte_mbuf *buf[MAX_PKT_BURST]; 5377efe28bdSPavan Nikhilesh const uint16_t buf_size = RTE_DIM(buf); 5386bb97df5SIntel 5396bb97df5SIntel for (p = 0; p < num_ports; p++) { 5406bb97df5SIntel const uint8_t sport = ports[p]; 541b30eb1d2SHuawei Xie /* 0 <-> 1, 2 <-> 3 etc */ 542b30eb1d2SHuawei Xie const uint8_t dport = ports[p ^ 1]; 5436bb97df5SIntel if ((sport == INVALID_PORT_ID) || (dport == INVALID_PORT_ID)) 5446bb97df5SIntel continue; 5456bb97df5SIntel 5466bb97df5SIntel for (q = startQueue; q < endQueue; q++) { 5476bb97df5SIntel const uint16_t rxCount = rte_eth_rx_burst(sport, 5486bb97df5SIntel q, buf, buf_size); 5496bb97df5SIntel 5506bb97df5SIntel if (unlikely(rxCount == 0)) 5516bb97df5SIntel continue; 5526bb97df5SIntel 5536bb97df5SIntel rxPackets[q] += rxCount; 5546bb97df5SIntel 5556bb97df5SIntel for (i = 0; i < rxCount; i++) 5566bb97df5SIntel update_mac_address(buf[i], dport); 5576bb97df5SIntel 5586bb97df5SIntel const uint16_t txCount = rte_eth_tx_burst(dport, 5592a13a5a0SHuawei Xie vmdq_queue_base + core_id, 5602a13a5a0SHuawei Xie buf, 5612a13a5a0SHuawei Xie rxCount); 5626bb97df5SIntel 5636bb97df5SIntel if (txCount != rxCount) { 5646bb97df5SIntel for (i = txCount; i < rxCount; i++) 5656bb97df5SIntel rte_pktmbuf_free(buf[i]); 5666bb97df5SIntel } 5676bb97df5SIntel } 5686bb97df5SIntel } 5696bb97df5SIntel } 5706bb97df5SIntel } 5716bb97df5SIntel 5726bb97df5SIntel /* 5736bb97df5SIntel * Update the global var NUM_PORTS and array PORTS according to system ports number 5746bb97df5SIntel * and return valid ports number 5756bb97df5SIntel */ 5766bb97df5SIntel static unsigned check_ports_num(unsigned nb_ports) 5776bb97df5SIntel { 5786bb97df5SIntel unsigned valid_num_ports = num_ports; 5796bb97df5SIntel unsigned portid; 5806bb97df5SIntel 5816bb97df5SIntel if (num_ports > nb_ports) { 5826bb97df5SIntel printf("\nSpecified port number(%u) exceeds total system port number(%u)\n", 5836bb97df5SIntel num_ports, nb_ports); 5846bb97df5SIntel num_ports = nb_ports; 5856bb97df5SIntel } 5866bb97df5SIntel 5876bb97df5SIntel for (portid = 0; portid < num_ports; portid++) { 588a9dbe180SThomas Monjalon if (!rte_eth_dev_is_valid_port(ports[portid])) { 589a9dbe180SThomas Monjalon printf("\nSpecified port ID(%u) is not valid\n", 590a9dbe180SThomas Monjalon ports[portid]); 5916bb97df5SIntel ports[portid] = INVALID_PORT_ID; 5926bb97df5SIntel valid_num_ports--; 5936bb97df5SIntel } 5946bb97df5SIntel } 5956bb97df5SIntel return valid_num_ports; 5966bb97df5SIntel } 5976bb97df5SIntel 5986bb97df5SIntel /* Main function, does initialisation and calls the per-lcore functions */ 5996bb97df5SIntel int 60098a16481SDavid Marchand main(int argc, char *argv[]) 6016bb97df5SIntel { 6026bb97df5SIntel struct rte_mempool *mbuf_pool; 6036bb97df5SIntel unsigned lcore_id, core_id = 0; 6046bb97df5SIntel int ret; 6056bb97df5SIntel unsigned nb_ports, valid_num_ports; 60647523597SZhiyong Yang uint16_t portid; 6076bb97df5SIntel 6086bb97df5SIntel signal(SIGHUP, sighup_handler); 6096bb97df5SIntel 6106bb97df5SIntel /* init EAL */ 6116bb97df5SIntel ret = rte_eal_init(argc, argv); 6126bb97df5SIntel if (ret < 0) 6136bb97df5SIntel rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); 6146bb97df5SIntel argc -= ret; 6156bb97df5SIntel argv += ret; 6166bb97df5SIntel 6176bb97df5SIntel /* parse app arguments */ 6186bb97df5SIntel ret = vmdq_parse_args(argc, argv); 6196bb97df5SIntel if (ret < 0) 6206bb97df5SIntel rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n"); 6216bb97df5SIntel 6226bb97df5SIntel for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) 6236bb97df5SIntel if (rte_lcore_is_enabled(lcore_id)) 6246bb97df5SIntel lcore_ids[core_id++] = lcore_id; 6256bb97df5SIntel 6266bb97df5SIntel if (rte_lcore_count() > RTE_MAX_LCORE) 6276bb97df5SIntel rte_exit(EXIT_FAILURE, "Not enough cores\n"); 6286bb97df5SIntel 629d9a42a69SThomas Monjalon nb_ports = rte_eth_dev_count_avail(); 6306bb97df5SIntel 6316bb97df5SIntel /* 6326bb97df5SIntel * Update the global var NUM_PORTS and global array PORTS 6336bb97df5SIntel * and get value of var VALID_NUM_PORTS according to system ports number 6346bb97df5SIntel */ 6356bb97df5SIntel valid_num_ports = check_ports_num(nb_ports); 6366bb97df5SIntel 6376bb97df5SIntel if (valid_num_ports < 2 || valid_num_ports % 2) { 6386bb97df5SIntel printf("Current valid ports number is %u\n", valid_num_ports); 6396bb97df5SIntel rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n"); 6406bb97df5SIntel } 6416bb97df5SIntel 642ea0c20eaSOlivier Matz mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", 643ea0c20eaSOlivier Matz NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE, 644824cb29cSKonstantin Ananyev 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); 6456bb97df5SIntel if (mbuf_pool == NULL) 6466bb97df5SIntel rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 6476bb97df5SIntel 6486bb97df5SIntel /* initialize all ports */ 6498728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(portid) { 6506bb97df5SIntel /* skip ports that are not enabled */ 6516bb97df5SIntel if ((enabled_port_mask & (1 << portid)) == 0) { 6526bb97df5SIntel printf("\nSkipping disabled port %d\n", portid); 6536bb97df5SIntel continue; 6546bb97df5SIntel } 6556bb97df5SIntel if (port_init(portid, mbuf_pool) != 0) 6566bb97df5SIntel rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n"); 6576bb97df5SIntel } 6586bb97df5SIntel 6596bb97df5SIntel /* call lcore_main() on every lcore */ 660cb056611SStephen Hemminger rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MAIN); 661cb056611SStephen Hemminger RTE_LCORE_FOREACH_WORKER(lcore_id) { 6626bb97df5SIntel if (rte_eal_wait_lcore(lcore_id) < 0) 6636bb97df5SIntel return -1; 6646bb97df5SIntel } 6656bb97df5SIntel 66610aa3757SChengchang Tang /* clean up the EAL */ 66710aa3757SChengchang Tang rte_eal_cleanup(); 66810aa3757SChengchang Tang 6696bb97df5SIntel return 0; 6706bb97df5SIntel } 671