13998e2a0SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 23998e2a0SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 3d19533e8SHuawei Xie */ 4d19533e8SHuawei Xie 5d19533e8SHuawei Xie #include <arpa/inet.h> 6d19533e8SHuawei Xie #include <getopt.h> 7d19533e8SHuawei Xie #include <linux/if_ether.h> 8d19533e8SHuawei Xie #include <linux/if_vlan.h> 9d19533e8SHuawei Xie #include <linux/virtio_net.h> 10d19533e8SHuawei Xie #include <linux/virtio_ring.h> 11d19533e8SHuawei Xie #include <signal.h> 12d19533e8SHuawei Xie #include <stdint.h> 13d19533e8SHuawei Xie #include <sys/eventfd.h> 14d19533e8SHuawei Xie #include <sys/param.h> 15d19533e8SHuawei Xie #include <unistd.h> 16d19533e8SHuawei Xie 17d19533e8SHuawei Xie #include <rte_cycles.h> 18d19533e8SHuawei Xie #include <rte_ethdev.h> 19d19533e8SHuawei Xie #include <rte_log.h> 20d19533e8SHuawei Xie #include <rte_string_fns.h> 21d19533e8SHuawei Xie #include <rte_malloc.h> 22a798beb4SYuanhan Liu #include <rte_vhost.h> 23691693c6SJijiang Liu #include <rte_ip.h> 249fd72e3cSJijiang Liu #include <rte_tcp.h> 25577329e6SJerin Jacob #include <rte_pause.h> 26d19533e8SHuawei Xie 273a04ecb2SCheng Jiang #include "ioat.h" 28d19533e8SHuawei Xie #include "main.h" 29d19533e8SHuawei Xie 30f17eb179SBernard Iremonger #ifndef MAX_QUEUES 31f17eb179SBernard Iremonger #define MAX_QUEUES 128 32f17eb179SBernard Iremonger #endif 33d19533e8SHuawei Xie 34d19533e8SHuawei Xie /* the maximum number of external ports supported */ 35d19533e8SHuawei Xie #define MAX_SUP_PORTS 1 36d19533e8SHuawei Xie 37d19533e8SHuawei Xie #define MBUF_CACHE_SIZE 128 38824cb29cSKonstantin Ananyev #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE 39d19533e8SHuawei Xie 40d19533e8SHuawei Xie #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ 41d19533e8SHuawei Xie 42d19533e8SHuawei Xie #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */ 43d19533e8SHuawei Xie #define BURST_RX_RETRIES 4 /* Number of retries on RX. */ 44d19533e8SHuawei Xie 45d19533e8SHuawei Xie #define JUMBO_FRAME_MAX_SIZE 0x2600 46d19533e8SHuawei Xie 47d19533e8SHuawei Xie /* State of virtio device. */ 48d19533e8SHuawei Xie #define DEVICE_MAC_LEARNING 0 49d19533e8SHuawei Xie #define DEVICE_RX 1 50d19533e8SHuawei Xie #define DEVICE_SAFE_REMOVE 2 51d19533e8SHuawei Xie 52d19533e8SHuawei Xie /* Configurable number of RX/TX ring descriptors */ 53d19533e8SHuawei Xie #define RTE_TEST_RX_DESC_DEFAULT 1024 54d19533e8SHuawei Xie #define RTE_TEST_TX_DESC_DEFAULT 512 55d19533e8SHuawei Xie 56d19533e8SHuawei Xie #define INVALID_PORT_ID 0xFF 57d19533e8SHuawei Xie 58d19533e8SHuawei Xie /* Maximum long option length for option parsing. */ 59d19533e8SHuawei Xie #define MAX_LONG_OPT_SZ 64 60d19533e8SHuawei Xie 61d19533e8SHuawei Xie /* mask of enabled ports */ 62d19533e8SHuawei Xie static uint32_t enabled_port_mask = 0; 63d19533e8SHuawei Xie 6490924cafSOuyang Changchun /* Promiscuous mode */ 6590924cafSOuyang Changchun static uint32_t promiscuous; 6690924cafSOuyang Changchun 67d19533e8SHuawei Xie /* number of devices/queues to support*/ 68d19533e8SHuawei Xie static uint32_t num_queues = 0; 69a981294bSHuawei Xie static uint32_t num_devices; 70d19533e8SHuawei Xie 7168363d85SYuanhan Liu static struct rte_mempool *mbuf_pool; 7228deb020SHuawei Xie static int mergeable; 73d19533e8SHuawei Xie 74d19533e8SHuawei Xie /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */ 75d19533e8SHuawei Xie typedef enum { 76d19533e8SHuawei Xie VM2VM_DISABLED = 0, 77d19533e8SHuawei Xie VM2VM_SOFTWARE = 1, 78d19533e8SHuawei Xie VM2VM_HARDWARE = 2, 79d19533e8SHuawei Xie VM2VM_LAST 80d19533e8SHuawei Xie } vm2vm_type; 81d19533e8SHuawei Xie static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE; 82d19533e8SHuawei Xie 83d19533e8SHuawei Xie /* Enable stats. */ 84d19533e8SHuawei Xie static uint32_t enable_stats = 0; 85d19533e8SHuawei Xie /* Enable retries on RX. */ 86d19533e8SHuawei Xie static uint32_t enable_retry = 1; 879fd72e3cSJijiang Liu 889fd72e3cSJijiang Liu /* Disable TX checksum offload */ 899fd72e3cSJijiang Liu static uint32_t enable_tx_csum; 909fd72e3cSJijiang Liu 919fd72e3cSJijiang Liu /* Disable TSO offload */ 929fd72e3cSJijiang Liu static uint32_t enable_tso; 939fd72e3cSJijiang Liu 942345e3beSYuanhan Liu static int client_mode; 952345e3beSYuanhan Liu 96ca059fa5SYuanhan Liu static int builtin_net_driver; 97ca059fa5SYuanhan Liu 983a04ecb2SCheng Jiang static int async_vhost_driver; 993a04ecb2SCheng Jiang 1003a04ecb2SCheng Jiang static char dma_type[MAX_LONG_OPT_SZ]; 1013a04ecb2SCheng Jiang 102d19533e8SHuawei Xie /* Specify timeout (in useconds) between retries on RX. */ 103d19533e8SHuawei Xie static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US; 104d19533e8SHuawei Xie /* Specify the number of retries on RX. */ 105d19533e8SHuawei Xie static uint32_t burst_rx_retry_num = BURST_RX_RETRIES; 106d19533e8SHuawei Xie 107ad0eef4dSJiayu Hu /* Socket file paths. Can be set by user */ 108ad0eef4dSJiayu Hu static char *socket_files; 109ad0eef4dSJiayu Hu static int nb_sockets; 110d19533e8SHuawei Xie 111d19533e8SHuawei Xie /* empty vmdq configuration structure. Filled in programatically */ 112d19533e8SHuawei Xie static struct rte_eth_conf vmdq_conf_default = { 113d19533e8SHuawei Xie .rxmode = { 114d19533e8SHuawei Xie .mq_mode = ETH_MQ_RX_VMDQ_ONLY, 115d19533e8SHuawei Xie .split_hdr_size = 0, 116d19533e8SHuawei Xie /* 117cc22d8caSShahaf Shuler * VLAN strip is necessary for 1G NIC such as I350, 118d19533e8SHuawei Xie * this fixes bug of ipv4 forwarding in guest can't 119d19533e8SHuawei Xie * forward pakets from one virtio dev to another virtio dev. 120d19533e8SHuawei Xie */ 121323e7b66SFerruh Yigit .offloads = DEV_RX_OFFLOAD_VLAN_STRIP, 122d19533e8SHuawei Xie }, 123d19533e8SHuawei Xie 124d19533e8SHuawei Xie .txmode = { 125d19533e8SHuawei Xie .mq_mode = ETH_MQ_TX_NONE, 126cc22d8caSShahaf Shuler .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM | 127cc22d8caSShahaf Shuler DEV_TX_OFFLOAD_TCP_CKSUM | 128cc22d8caSShahaf Shuler DEV_TX_OFFLOAD_VLAN_INSERT | 129cc22d8caSShahaf Shuler DEV_TX_OFFLOAD_MULTI_SEGS | 130cc22d8caSShahaf Shuler DEV_TX_OFFLOAD_TCP_TSO), 131d19533e8SHuawei Xie }, 132d19533e8SHuawei Xie .rx_adv_conf = { 133d19533e8SHuawei Xie /* 134d19533e8SHuawei Xie * should be overridden separately in code with 135d19533e8SHuawei Xie * appropriate values 136d19533e8SHuawei Xie */ 137d19533e8SHuawei Xie .vmdq_rx_conf = { 138d19533e8SHuawei Xie .nb_queue_pools = ETH_8_POOLS, 139d19533e8SHuawei Xie .enable_default_pool = 0, 140d19533e8SHuawei Xie .default_pool = 0, 141d19533e8SHuawei Xie .nb_pool_maps = 0, 142d19533e8SHuawei Xie .pool_map = {{0, 0},}, 143d19533e8SHuawei Xie }, 144d19533e8SHuawei Xie }, 145d19533e8SHuawei Xie }; 146d19533e8SHuawei Xie 147cc22d8caSShahaf Shuler 148d19533e8SHuawei Xie static unsigned lcore_ids[RTE_MAX_LCORE]; 149f8244c63SZhiyong Yang static uint16_t ports[RTE_MAX_ETHPORTS]; 150d19533e8SHuawei Xie static unsigned num_ports = 0; /**< The number of ports specified in command line */ 15184b02d16SHuawei Xie static uint16_t num_pf_queues, num_vmdq_queues; 15284b02d16SHuawei Xie static uint16_t vmdq_pool_base, vmdq_queue_base; 15384b02d16SHuawei Xie static uint16_t queues_per_pool; 154d19533e8SHuawei Xie 155d19533e8SHuawei Xie const uint16_t vlan_tags[] = { 156d19533e8SHuawei Xie 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 157d19533e8SHuawei Xie 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 158d19533e8SHuawei Xie 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 159d19533e8SHuawei Xie 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 160d19533e8SHuawei Xie 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 161d19533e8SHuawei Xie 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 162d19533e8SHuawei Xie 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 163d19533e8SHuawei Xie 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 164d19533e8SHuawei Xie }; 165d19533e8SHuawei Xie 166d19533e8SHuawei Xie /* ethernet addresses of ports */ 1676d13ea8eSOlivier Matz static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS]; 168d19533e8SHuawei Xie 16945657a5cSYuanhan Liu static struct vhost_dev_tailq_list vhost_dev_list = 17045657a5cSYuanhan Liu TAILQ_HEAD_INITIALIZER(vhost_dev_list); 171d19533e8SHuawei Xie 172d19533e8SHuawei Xie static struct lcore_info lcore_info[RTE_MAX_LCORE]; 173d19533e8SHuawei Xie 174d19533e8SHuawei Xie /* Used for queueing bursts of TX packets. */ 175d19533e8SHuawei Xie struct mbuf_table { 176d19533e8SHuawei Xie unsigned len; 177d19533e8SHuawei Xie unsigned txq_id; 178d19533e8SHuawei Xie struct rte_mbuf *m_table[MAX_PKT_BURST]; 179d19533e8SHuawei Xie }; 180d19533e8SHuawei Xie 181a68ba8e0SCheng Jiang struct vhost_bufftable { 182a68ba8e0SCheng Jiang uint32_t len; 183a68ba8e0SCheng Jiang uint64_t pre_tsc; 184a68ba8e0SCheng Jiang struct rte_mbuf *m_table[MAX_PKT_BURST]; 185a68ba8e0SCheng Jiang }; 186a68ba8e0SCheng Jiang 187d19533e8SHuawei Xie /* TX queue for each data core. */ 188d19533e8SHuawei Xie struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE]; 189d19533e8SHuawei Xie 190a68ba8e0SCheng Jiang /* 191a68ba8e0SCheng Jiang * Vhost TX buffer for each data core. 192a68ba8e0SCheng Jiang * Every data core maintains a TX buffer for every vhost device, 193a68ba8e0SCheng Jiang * which is used for batch pkts enqueue for higher performance. 194a68ba8e0SCheng Jiang */ 195a68ba8e0SCheng Jiang struct vhost_bufftable *vhost_txbuff[RTE_MAX_LCORE * MAX_VHOST_DEVICE]; 196a68ba8e0SCheng Jiang 197273ecdbcSYuanhan Liu #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \ 198273ecdbcSYuanhan Liu / US_PER_S * BURST_TX_DRAIN_US) 199d19533e8SHuawei Xie #define VLAN_HLEN 4 200d19533e8SHuawei Xie 2013a04ecb2SCheng Jiang static inline int 2023a04ecb2SCheng Jiang open_dma(const char *value) 2033a04ecb2SCheng Jiang { 2043a04ecb2SCheng Jiang if (strncmp(dma_type, "ioat", 4) == 0) 2053a04ecb2SCheng Jiang return open_ioat(value); 2063a04ecb2SCheng Jiang 2073a04ecb2SCheng Jiang return -1; 2083a04ecb2SCheng Jiang } 2093a04ecb2SCheng Jiang 210d19533e8SHuawei Xie /* 211d19533e8SHuawei Xie * Builds up the correct configuration for VMDQ VLAN pool map 212d19533e8SHuawei Xie * according to the pool & queue limits. 213d19533e8SHuawei Xie */ 214d19533e8SHuawei Xie static inline int 215d19533e8SHuawei Xie get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices) 216d19533e8SHuawei Xie { 217d19533e8SHuawei Xie struct rte_eth_vmdq_rx_conf conf; 21890924cafSOuyang Changchun struct rte_eth_vmdq_rx_conf *def_conf = 21990924cafSOuyang Changchun &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf; 220d19533e8SHuawei Xie unsigned i; 221d19533e8SHuawei Xie 222d19533e8SHuawei Xie memset(&conf, 0, sizeof(conf)); 223d19533e8SHuawei Xie conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices; 224d19533e8SHuawei Xie conf.nb_pool_maps = num_devices; 22590924cafSOuyang Changchun conf.enable_loop_back = def_conf->enable_loop_back; 22690924cafSOuyang Changchun conf.rx_mode = def_conf->rx_mode; 227d19533e8SHuawei Xie 228d19533e8SHuawei Xie for (i = 0; i < conf.nb_pool_maps; i++) { 229d19533e8SHuawei Xie conf.pool_map[i].vlan_id = vlan_tags[ i ]; 230d19533e8SHuawei Xie conf.pool_map[i].pools = (1UL << i); 231d19533e8SHuawei Xie } 232d19533e8SHuawei Xie 233d19533e8SHuawei Xie (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf))); 234d19533e8SHuawei Xie (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf, 235d19533e8SHuawei Xie sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf))); 236d19533e8SHuawei Xie return 0; 237d19533e8SHuawei Xie } 238d19533e8SHuawei Xie 239d19533e8SHuawei Xie /* 240d19533e8SHuawei Xie * Initialises a given port using global settings and with the rx buffers 241d19533e8SHuawei Xie * coming from the mbuf_pool passed as parameter 242d19533e8SHuawei Xie */ 243d19533e8SHuawei Xie static inline int 244f8244c63SZhiyong Yang port_init(uint16_t port) 245d19533e8SHuawei Xie { 246d19533e8SHuawei Xie struct rte_eth_dev_info dev_info; 247d19533e8SHuawei Xie struct rte_eth_conf port_conf; 248db4014f2SHuawei Xie struct rte_eth_rxconf *rxconf; 249db4014f2SHuawei Xie struct rte_eth_txconf *txconf; 250db4014f2SHuawei Xie int16_t rx_rings, tx_rings; 251d19533e8SHuawei Xie uint16_t rx_ring_size, tx_ring_size; 252d19533e8SHuawei Xie int retval; 253d19533e8SHuawei Xie uint16_t q; 254d19533e8SHuawei Xie 255d19533e8SHuawei Xie /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */ 25637fb306cSIvan Ilchenko retval = rte_eth_dev_info_get(port, &dev_info); 25737fb306cSIvan Ilchenko if (retval != 0) { 25837fb306cSIvan Ilchenko RTE_LOG(ERR, VHOST_PORT, 25937fb306cSIvan Ilchenko "Error during getting device (port %u) info: %s\n", 26037fb306cSIvan Ilchenko port, strerror(-retval)); 26137fb306cSIvan Ilchenko 26237fb306cSIvan Ilchenko return retval; 26337fb306cSIvan Ilchenko } 264d19533e8SHuawei Xie 265db4014f2SHuawei Xie rxconf = &dev_info.default_rxconf; 266db4014f2SHuawei Xie txconf = &dev_info.default_txconf; 267db4014f2SHuawei Xie rxconf->rx_drop_en = 1; 268f0adccd4SOuyang Changchun 269d19533e8SHuawei Xie /*configure the number of supported virtio devices based on VMDQ limits */ 270d19533e8SHuawei Xie num_devices = dev_info.max_vmdq_pools; 271d19533e8SHuawei Xie 272d19533e8SHuawei Xie rx_ring_size = RTE_TEST_RX_DESC_DEFAULT; 273d19533e8SHuawei Xie tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; 27400b8b706SYuanhan Liu 275d19533e8SHuawei Xie tx_rings = (uint16_t)rte_lcore_count(); 276d19533e8SHuawei Xie 277d19533e8SHuawei Xie /* Get port configuration. */ 278d19533e8SHuawei Xie retval = get_eth_conf(&port_conf, num_devices); 279d19533e8SHuawei Xie if (retval < 0) 280d19533e8SHuawei Xie return retval; 28184b02d16SHuawei Xie /* NIC queues are divided into pf queues and vmdq queues. */ 28284b02d16SHuawei Xie num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num; 28384b02d16SHuawei Xie queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools; 28484b02d16SHuawei Xie num_vmdq_queues = num_devices * queues_per_pool; 28584b02d16SHuawei Xie num_queues = num_pf_queues + num_vmdq_queues; 28684b02d16SHuawei Xie vmdq_queue_base = dev_info.vmdq_queue_base; 28784b02d16SHuawei Xie vmdq_pool_base = dev_info.vmdq_pool_base; 28884b02d16SHuawei Xie printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n", 28984b02d16SHuawei Xie num_pf_queues, num_devices, queues_per_pool); 290d19533e8SHuawei Xie 291a9dbe180SThomas Monjalon if (!rte_eth_dev_is_valid_port(port)) 292a9dbe180SThomas Monjalon return -1; 293d19533e8SHuawei Xie 29484b02d16SHuawei Xie rx_rings = (uint16_t)dev_info.max_rx_queues; 295cc22d8caSShahaf Shuler if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 296cc22d8caSShahaf Shuler port_conf.txmode.offloads |= 297cc22d8caSShahaf Shuler DEV_TX_OFFLOAD_MBUF_FAST_FREE; 298d19533e8SHuawei Xie /* Configure ethernet device. */ 299d19533e8SHuawei Xie retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); 300bb7085b4SJianfeng Tan if (retval != 0) { 301bb7085b4SJianfeng Tan RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n", 302bb7085b4SJianfeng Tan port, strerror(-retval)); 303d19533e8SHuawei Xie return retval; 304bb7085b4SJianfeng Tan } 305d19533e8SHuawei Xie 30660efb44fSRoman Zhukov retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size, 30760efb44fSRoman Zhukov &tx_ring_size); 30860efb44fSRoman Zhukov if (retval != 0) { 30960efb44fSRoman Zhukov RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors " 31060efb44fSRoman Zhukov "for port %u: %s.\n", port, strerror(-retval)); 31160efb44fSRoman Zhukov return retval; 31260efb44fSRoman Zhukov } 31360efb44fSRoman Zhukov if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) { 31460efb44fSRoman Zhukov RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size " 31560efb44fSRoman Zhukov "for Rx queues on port %u.\n", port); 31660efb44fSRoman Zhukov return -1; 31760efb44fSRoman Zhukov } 31860efb44fSRoman Zhukov 319d19533e8SHuawei Xie /* Setup the queues. */ 320cc22d8caSShahaf Shuler rxconf->offloads = port_conf.rxmode.offloads; 321d19533e8SHuawei Xie for (q = 0; q < rx_rings; q ++) { 322d19533e8SHuawei Xie retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, 323db4014f2SHuawei Xie rte_eth_dev_socket_id(port), 324db4014f2SHuawei Xie rxconf, 32568363d85SYuanhan Liu mbuf_pool); 326bb7085b4SJianfeng Tan if (retval < 0) { 327bb7085b4SJianfeng Tan RTE_LOG(ERR, VHOST_PORT, 328bb7085b4SJianfeng Tan "Failed to setup rx queue %u of port %u: %s.\n", 329bb7085b4SJianfeng Tan q, port, strerror(-retval)); 330d19533e8SHuawei Xie return retval; 331d19533e8SHuawei Xie } 332bb7085b4SJianfeng Tan } 333cc22d8caSShahaf Shuler txconf->offloads = port_conf.txmode.offloads; 334d19533e8SHuawei Xie for (q = 0; q < tx_rings; q ++) { 335d19533e8SHuawei Xie retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, 336db4014f2SHuawei Xie rte_eth_dev_socket_id(port), 337db4014f2SHuawei Xie txconf); 338bb7085b4SJianfeng Tan if (retval < 0) { 339bb7085b4SJianfeng Tan RTE_LOG(ERR, VHOST_PORT, 340bb7085b4SJianfeng Tan "Failed to setup tx queue %u of port %u: %s.\n", 341bb7085b4SJianfeng Tan q, port, strerror(-retval)); 342d19533e8SHuawei Xie return retval; 343d19533e8SHuawei Xie } 344bb7085b4SJianfeng Tan } 345d19533e8SHuawei Xie 346d19533e8SHuawei Xie /* Start the device. */ 347d19533e8SHuawei Xie retval = rte_eth_dev_start(port); 348d19533e8SHuawei Xie if (retval < 0) { 349bb7085b4SJianfeng Tan RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n", 350bb7085b4SJianfeng Tan port, strerror(-retval)); 351d19533e8SHuawei Xie return retval; 352d19533e8SHuawei Xie } 353d19533e8SHuawei Xie 354f430bbceSIvan Ilchenko if (promiscuous) { 355f430bbceSIvan Ilchenko retval = rte_eth_promiscuous_enable(port); 356f430bbceSIvan Ilchenko if (retval != 0) { 357f430bbceSIvan Ilchenko RTE_LOG(ERR, VHOST_PORT, 358f430bbceSIvan Ilchenko "Failed to enable promiscuous mode on port %u: %s\n", 359f430bbceSIvan Ilchenko port, rte_strerror(-retval)); 360f430bbceSIvan Ilchenko return retval; 361f430bbceSIvan Ilchenko } 362f430bbceSIvan Ilchenko } 36390924cafSOuyang Changchun 36470febdcfSIgor Romanov retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); 36570febdcfSIgor Romanov if (retval < 0) { 36670febdcfSIgor Romanov RTE_LOG(ERR, VHOST_PORT, 36770febdcfSIgor Romanov "Failed to get MAC address on port %u: %s\n", 36870febdcfSIgor Romanov port, rte_strerror(-retval)); 36970febdcfSIgor Romanov return retval; 37070febdcfSIgor Romanov } 37170febdcfSIgor Romanov 372d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices); 373d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 374d19533e8SHuawei Xie " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", 375f8244c63SZhiyong Yang port, 376d19533e8SHuawei Xie vmdq_ports_eth_addr[port].addr_bytes[0], 377d19533e8SHuawei Xie vmdq_ports_eth_addr[port].addr_bytes[1], 378d19533e8SHuawei Xie vmdq_ports_eth_addr[port].addr_bytes[2], 379d19533e8SHuawei Xie vmdq_ports_eth_addr[port].addr_bytes[3], 380d19533e8SHuawei Xie vmdq_ports_eth_addr[port].addr_bytes[4], 381d19533e8SHuawei Xie vmdq_ports_eth_addr[port].addr_bytes[5]); 382d19533e8SHuawei Xie 383d19533e8SHuawei Xie return 0; 384d19533e8SHuawei Xie } 385d19533e8SHuawei Xie 386d19533e8SHuawei Xie /* 387bde19a4dSJiayu Hu * Set socket file path. 388d19533e8SHuawei Xie */ 389d19533e8SHuawei Xie static int 390bde19a4dSJiayu Hu us_vhost_parse_socket_path(const char *q_arg) 391d19533e8SHuawei Xie { 392d79035b7STiwei Bie char *old; 393d79035b7STiwei Bie 394d19533e8SHuawei Xie /* parse number string */ 395fa81d3b9SGang Jiang if (strnlen(q_arg, PATH_MAX) == PATH_MAX) 396d19533e8SHuawei Xie return -1; 397ad0eef4dSJiayu Hu 398d79035b7STiwei Bie old = socket_files; 399ad0eef4dSJiayu Hu socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1)); 400d79035b7STiwei Bie if (socket_files == NULL) { 401d79035b7STiwei Bie free(old); 402d79035b7STiwei Bie return -1; 403d79035b7STiwei Bie } 404d79035b7STiwei Bie 405f9acaf84SBruce Richardson strlcpy(socket_files + nb_sockets * PATH_MAX, q_arg, PATH_MAX); 406ad0eef4dSJiayu Hu nb_sockets++; 407d19533e8SHuawei Xie 408d19533e8SHuawei Xie return 0; 409d19533e8SHuawei Xie } 410d19533e8SHuawei Xie 411d19533e8SHuawei Xie /* 412d19533e8SHuawei Xie * Parse the portmask provided at run time. 413d19533e8SHuawei Xie */ 414d19533e8SHuawei Xie static int 415d19533e8SHuawei Xie parse_portmask(const char *portmask) 416d19533e8SHuawei Xie { 417d19533e8SHuawei Xie char *end = NULL; 418d19533e8SHuawei Xie unsigned long pm; 419d19533e8SHuawei Xie 420d19533e8SHuawei Xie errno = 0; 421d19533e8SHuawei Xie 422d19533e8SHuawei Xie /* parse hexadecimal string */ 423d19533e8SHuawei Xie pm = strtoul(portmask, &end, 16); 424d19533e8SHuawei Xie if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) 425ce6b8c31SSarosh Arif return 0; 426d19533e8SHuawei Xie 427d19533e8SHuawei Xie return pm; 428d19533e8SHuawei Xie 429d19533e8SHuawei Xie } 430d19533e8SHuawei Xie 431d19533e8SHuawei Xie /* 432d19533e8SHuawei Xie * Parse num options at run time. 433d19533e8SHuawei Xie */ 434d19533e8SHuawei Xie static int 435d19533e8SHuawei Xie parse_num_opt(const char *q_arg, uint32_t max_valid_value) 436d19533e8SHuawei Xie { 437d19533e8SHuawei Xie char *end = NULL; 438d19533e8SHuawei Xie unsigned long num; 439d19533e8SHuawei Xie 440d19533e8SHuawei Xie errno = 0; 441d19533e8SHuawei Xie 442d19533e8SHuawei Xie /* parse unsigned int string */ 443d19533e8SHuawei Xie num = strtoul(q_arg, &end, 10); 444d19533e8SHuawei Xie if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) 445d19533e8SHuawei Xie return -1; 446d19533e8SHuawei Xie 447d19533e8SHuawei Xie if (num > max_valid_value) 448d19533e8SHuawei Xie return -1; 449d19533e8SHuawei Xie 450d19533e8SHuawei Xie return num; 451d19533e8SHuawei Xie 452d19533e8SHuawei Xie } 453d19533e8SHuawei Xie 454d19533e8SHuawei Xie /* 455d19533e8SHuawei Xie * Display usage 456d19533e8SHuawei Xie */ 457d19533e8SHuawei Xie static void 458d19533e8SHuawei Xie us_vhost_usage(const char *prgname) 459d19533e8SHuawei Xie { 460d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n" 461d19533e8SHuawei Xie " --vm2vm [0|1|2]\n" 462d19533e8SHuawei Xie " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n" 463bde19a4dSJiayu Hu " --socket-file <path>\n" 464d19533e8SHuawei Xie " --nb-devices ND\n" 465d19533e8SHuawei Xie " -p PORTMASK: Set mask for ports to be used by application\n" 466d19533e8SHuawei Xie " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n" 467d19533e8SHuawei Xie " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n" 468d19533e8SHuawei Xie " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n" 469d19533e8SHuawei Xie " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n" 470d19533e8SHuawei Xie " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n" 471d19533e8SHuawei Xie " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n" 472bde19a4dSJiayu Hu " --socket-file: The path of the socket file.\n" 4739fd72e3cSJijiang Liu " --tx-csum [0|1] disable/enable TX checksum offload.\n" 4742345e3beSYuanhan Liu " --tso [0|1] disable/enable TCP segment offload.\n" 4753a04ecb2SCheng Jiang " --client register a vhost-user socket as client mode.\n" 4763a04ecb2SCheng Jiang " --dma-type register dma type for your vhost async driver. For example \"ioat\" for now.\n" 4773a04ecb2SCheng Jiang " --dmas register dma channel for specific vhost device.\n", 478d19533e8SHuawei Xie prgname); 479d19533e8SHuawei Xie } 480d19533e8SHuawei Xie 481d19533e8SHuawei Xie /* 482d19533e8SHuawei Xie * Parse the arguments given in the command line of the application. 483d19533e8SHuawei Xie */ 484d19533e8SHuawei Xie static int 485d19533e8SHuawei Xie us_vhost_parse_args(int argc, char **argv) 486d19533e8SHuawei Xie { 487d19533e8SHuawei Xie int opt, ret; 488d19533e8SHuawei Xie int option_index; 489d19533e8SHuawei Xie unsigned i; 490d19533e8SHuawei Xie const char *prgname = argv[0]; 491d19533e8SHuawei Xie static struct option long_option[] = { 492d19533e8SHuawei Xie {"vm2vm", required_argument, NULL, 0}, 493d19533e8SHuawei Xie {"rx-retry", required_argument, NULL, 0}, 494d19533e8SHuawei Xie {"rx-retry-delay", required_argument, NULL, 0}, 495d19533e8SHuawei Xie {"rx-retry-num", required_argument, NULL, 0}, 496d19533e8SHuawei Xie {"mergeable", required_argument, NULL, 0}, 497d19533e8SHuawei Xie {"stats", required_argument, NULL, 0}, 498bde19a4dSJiayu Hu {"socket-file", required_argument, NULL, 0}, 4999fd72e3cSJijiang Liu {"tx-csum", required_argument, NULL, 0}, 5009fd72e3cSJijiang Liu {"tso", required_argument, NULL, 0}, 5012345e3beSYuanhan Liu {"client", no_argument, &client_mode, 1}, 502ca059fa5SYuanhan Liu {"builtin-net-driver", no_argument, &builtin_net_driver, 1}, 5033a04ecb2SCheng Jiang {"dma-type", required_argument, NULL, 0}, 5043a04ecb2SCheng Jiang {"dmas", required_argument, NULL, 0}, 505d19533e8SHuawei Xie {NULL, 0, 0, 0}, 506d19533e8SHuawei Xie }; 507d19533e8SHuawei Xie 508d19533e8SHuawei Xie /* Parse command line */ 50990924cafSOuyang Changchun while ((opt = getopt_long(argc, argv, "p:P", 51090924cafSOuyang Changchun long_option, &option_index)) != EOF) { 511d19533e8SHuawei Xie switch (opt) { 512d19533e8SHuawei Xie /* Portmask */ 513d19533e8SHuawei Xie case 'p': 514d19533e8SHuawei Xie enabled_port_mask = parse_portmask(optarg); 515d19533e8SHuawei Xie if (enabled_port_mask == 0) { 516d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n"); 517d19533e8SHuawei Xie us_vhost_usage(prgname); 518d19533e8SHuawei Xie return -1; 519d19533e8SHuawei Xie } 520d19533e8SHuawei Xie break; 521d19533e8SHuawei Xie 52290924cafSOuyang Changchun case 'P': 52390924cafSOuyang Changchun promiscuous = 1; 52490924cafSOuyang Changchun vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode = 52590924cafSOuyang Changchun ETH_VMDQ_ACCEPT_BROADCAST | 52690924cafSOuyang Changchun ETH_VMDQ_ACCEPT_MULTICAST; 52790924cafSOuyang Changchun 52890924cafSOuyang Changchun break; 52990924cafSOuyang Changchun 530d19533e8SHuawei Xie case 0: 531d19533e8SHuawei Xie /* Enable/disable vm2vm comms. */ 532d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, "vm2vm", 533d19533e8SHuawei Xie MAX_LONG_OPT_SZ)) { 534d19533e8SHuawei Xie ret = parse_num_opt(optarg, (VM2VM_LAST - 1)); 535d19533e8SHuawei Xie if (ret == -1) { 536d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, 537d19533e8SHuawei Xie "Invalid argument for " 538d19533e8SHuawei Xie "vm2vm [0|1|2]\n"); 539d19533e8SHuawei Xie us_vhost_usage(prgname); 540d19533e8SHuawei Xie return -1; 541d19533e8SHuawei Xie } else { 542d19533e8SHuawei Xie vm2vm_mode = (vm2vm_type)ret; 543d19533e8SHuawei Xie } 544d19533e8SHuawei Xie } 545d19533e8SHuawei Xie 546d19533e8SHuawei Xie /* Enable/disable retries on RX. */ 547d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) { 548d19533e8SHuawei Xie ret = parse_num_opt(optarg, 1); 549d19533e8SHuawei Xie if (ret == -1) { 550d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n"); 551d19533e8SHuawei Xie us_vhost_usage(prgname); 552d19533e8SHuawei Xie return -1; 553d19533e8SHuawei Xie } else { 554d19533e8SHuawei Xie enable_retry = ret; 555d19533e8SHuawei Xie } 556d19533e8SHuawei Xie } 557d19533e8SHuawei Xie 5589fd72e3cSJijiang Liu /* Enable/disable TX checksum offload. */ 5599fd72e3cSJijiang Liu if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) { 5609fd72e3cSJijiang Liu ret = parse_num_opt(optarg, 1); 5619fd72e3cSJijiang Liu if (ret == -1) { 5629fd72e3cSJijiang Liu RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n"); 5639fd72e3cSJijiang Liu us_vhost_usage(prgname); 5649fd72e3cSJijiang Liu return -1; 5659fd72e3cSJijiang Liu } else 5669fd72e3cSJijiang Liu enable_tx_csum = ret; 5679fd72e3cSJijiang Liu } 5689fd72e3cSJijiang Liu 5699fd72e3cSJijiang Liu /* Enable/disable TSO offload. */ 5709fd72e3cSJijiang Liu if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) { 5719fd72e3cSJijiang Liu ret = parse_num_opt(optarg, 1); 5729fd72e3cSJijiang Liu if (ret == -1) { 5739fd72e3cSJijiang Liu RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n"); 5749fd72e3cSJijiang Liu us_vhost_usage(prgname); 5759fd72e3cSJijiang Liu return -1; 5769fd72e3cSJijiang Liu } else 5779fd72e3cSJijiang Liu enable_tso = ret; 5789fd72e3cSJijiang Liu } 5799fd72e3cSJijiang Liu 580d19533e8SHuawei Xie /* Specify the retries delay time (in useconds) on RX. */ 581d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) { 582d19533e8SHuawei Xie ret = parse_num_opt(optarg, INT32_MAX); 583d19533e8SHuawei Xie if (ret == -1) { 584d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n"); 585d19533e8SHuawei Xie us_vhost_usage(prgname); 586d19533e8SHuawei Xie return -1; 587d19533e8SHuawei Xie } else { 588d19533e8SHuawei Xie burst_rx_delay_time = ret; 589d19533e8SHuawei Xie } 590d19533e8SHuawei Xie } 591d19533e8SHuawei Xie 592d19533e8SHuawei Xie /* Specify the retries number on RX. */ 593d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) { 594d19533e8SHuawei Xie ret = parse_num_opt(optarg, INT32_MAX); 595d19533e8SHuawei Xie if (ret == -1) { 596d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n"); 597d19533e8SHuawei Xie us_vhost_usage(prgname); 598d19533e8SHuawei Xie return -1; 599d19533e8SHuawei Xie } else { 600d19533e8SHuawei Xie burst_rx_retry_num = ret; 601d19533e8SHuawei Xie } 602d19533e8SHuawei Xie } 603d19533e8SHuawei Xie 604d19533e8SHuawei Xie /* Enable/disable RX mergeable buffers. */ 605d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) { 606d19533e8SHuawei Xie ret = parse_num_opt(optarg, 1); 607d19533e8SHuawei Xie if (ret == -1) { 608d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n"); 609d19533e8SHuawei Xie us_vhost_usage(prgname); 610d19533e8SHuawei Xie return -1; 611d19533e8SHuawei Xie } else { 61228deb020SHuawei Xie mergeable = !!ret; 613d19533e8SHuawei Xie if (ret) { 614cc22d8caSShahaf Shuler vmdq_conf_default.rxmode.offloads |= 615cc22d8caSShahaf Shuler DEV_RX_OFFLOAD_JUMBO_FRAME; 616d19533e8SHuawei Xie vmdq_conf_default.rxmode.max_rx_pkt_len 617d19533e8SHuawei Xie = JUMBO_FRAME_MAX_SIZE; 618d19533e8SHuawei Xie } 619d19533e8SHuawei Xie } 620d19533e8SHuawei Xie } 621d19533e8SHuawei Xie 622d19533e8SHuawei Xie /* Enable/disable stats. */ 623d19533e8SHuawei Xie if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) { 624d19533e8SHuawei Xie ret = parse_num_opt(optarg, INT32_MAX); 625d19533e8SHuawei Xie if (ret == -1) { 626bde19a4dSJiayu Hu RTE_LOG(INFO, VHOST_CONFIG, 627bde19a4dSJiayu Hu "Invalid argument for stats [0..N]\n"); 628d19533e8SHuawei Xie us_vhost_usage(prgname); 629d19533e8SHuawei Xie return -1; 630d19533e8SHuawei Xie } else { 631d19533e8SHuawei Xie enable_stats = ret; 632d19533e8SHuawei Xie } 633d19533e8SHuawei Xie } 634d19533e8SHuawei Xie 635bde19a4dSJiayu Hu /* Set socket file path. */ 636bde19a4dSJiayu Hu if (!strncmp(long_option[option_index].name, 637bde19a4dSJiayu Hu "socket-file", MAX_LONG_OPT_SZ)) { 638bde19a4dSJiayu Hu if (us_vhost_parse_socket_path(optarg) == -1) { 639bde19a4dSJiayu Hu RTE_LOG(INFO, VHOST_CONFIG, 640bde19a4dSJiayu Hu "Invalid argument for socket name (Max %d characters)\n", 641bde19a4dSJiayu Hu PATH_MAX); 642d19533e8SHuawei Xie us_vhost_usage(prgname); 643d19533e8SHuawei Xie return -1; 644d19533e8SHuawei Xie } 645d19533e8SHuawei Xie } 646d19533e8SHuawei Xie 6473a04ecb2SCheng Jiang if (!strncmp(long_option[option_index].name, 6483a04ecb2SCheng Jiang "dma-type", MAX_LONG_OPT_SZ)) { 6492b7126f5SCheng Jiang if (strlen(optarg) >= MAX_LONG_OPT_SZ) { 6502b7126f5SCheng Jiang RTE_LOG(INFO, VHOST_CONFIG, 6512b7126f5SCheng Jiang "Wrong DMA type\n"); 6522b7126f5SCheng Jiang us_vhost_usage(prgname); 6532b7126f5SCheng Jiang return -1; 6542b7126f5SCheng Jiang } 6553a04ecb2SCheng Jiang strcpy(dma_type, optarg); 6563a04ecb2SCheng Jiang } 6573a04ecb2SCheng Jiang 6583a04ecb2SCheng Jiang if (!strncmp(long_option[option_index].name, 6593a04ecb2SCheng Jiang "dmas", MAX_LONG_OPT_SZ)) { 6603a04ecb2SCheng Jiang if (open_dma(optarg) == -1) { 6613a04ecb2SCheng Jiang RTE_LOG(INFO, VHOST_CONFIG, 6623a04ecb2SCheng Jiang "Wrong DMA args\n"); 6633a04ecb2SCheng Jiang us_vhost_usage(prgname); 6643a04ecb2SCheng Jiang return -1; 6653a04ecb2SCheng Jiang } 6663a04ecb2SCheng Jiang async_vhost_driver = 1; 6673a04ecb2SCheng Jiang } 6683a04ecb2SCheng Jiang 669d19533e8SHuawei Xie break; 670d19533e8SHuawei Xie 671d19533e8SHuawei Xie /* Invalid option - print options. */ 672d19533e8SHuawei Xie default: 673d19533e8SHuawei Xie us_vhost_usage(prgname); 674d19533e8SHuawei Xie return -1; 675d19533e8SHuawei Xie } 676d19533e8SHuawei Xie } 677d19533e8SHuawei Xie 678d19533e8SHuawei Xie for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 679d19533e8SHuawei Xie if (enabled_port_mask & (1 << i)) 680f8244c63SZhiyong Yang ports[num_ports++] = i; 681d19533e8SHuawei Xie } 682d19533e8SHuawei Xie 683d19533e8SHuawei Xie if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) { 684d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u," 685d19533e8SHuawei Xie "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS); 686d19533e8SHuawei Xie return -1; 687d19533e8SHuawei Xie } 688d19533e8SHuawei Xie 689d19533e8SHuawei Xie return 0; 690d19533e8SHuawei Xie } 691d19533e8SHuawei Xie 692d19533e8SHuawei Xie /* 693d19533e8SHuawei Xie * Update the global var NUM_PORTS and array PORTS according to system ports number 694d19533e8SHuawei Xie * and return valid ports number 695d19533e8SHuawei Xie */ 696d19533e8SHuawei Xie static unsigned check_ports_num(unsigned nb_ports) 697d19533e8SHuawei Xie { 698d19533e8SHuawei Xie unsigned valid_num_ports = num_ports; 699d19533e8SHuawei Xie unsigned portid; 700d19533e8SHuawei Xie 701d19533e8SHuawei Xie if (num_ports > nb_ports) { 702d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n", 703d19533e8SHuawei Xie num_ports, nb_ports); 704d19533e8SHuawei Xie num_ports = nb_ports; 705d19533e8SHuawei Xie } 706d19533e8SHuawei Xie 707d19533e8SHuawei Xie for (portid = 0; portid < num_ports; portid ++) { 708a9dbe180SThomas Monjalon if (!rte_eth_dev_is_valid_port(ports[portid])) { 709a9dbe180SThomas Monjalon RTE_LOG(INFO, VHOST_PORT, 710a9dbe180SThomas Monjalon "\nSpecified port ID(%u) is not valid\n", 711a9dbe180SThomas Monjalon ports[portid]); 712d19533e8SHuawei Xie ports[portid] = INVALID_PORT_ID; 713d19533e8SHuawei Xie valid_num_ports--; 714d19533e8SHuawei Xie } 715d19533e8SHuawei Xie } 716d19533e8SHuawei Xie return valid_num_ports; 717d19533e8SHuawei Xie } 718d19533e8SHuawei Xie 719c0583d98SJerin Jacob static __rte_always_inline struct vhost_dev * 7206d13ea8eSOlivier Matz find_vhost_dev(struct rte_ether_addr *mac) 72145657a5cSYuanhan Liu { 72245657a5cSYuanhan Liu struct vhost_dev *vdev; 72345657a5cSYuanhan Liu 72497daf19eSYuanhan Liu TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { 72545657a5cSYuanhan Liu if (vdev->ready == DEVICE_RX && 726538da7a1SOlivier Matz rte_is_same_ether_addr(mac, &vdev->mac_address)) 72745657a5cSYuanhan Liu return vdev; 72845657a5cSYuanhan Liu } 72945657a5cSYuanhan Liu 73045657a5cSYuanhan Liu return NULL; 73145657a5cSYuanhan Liu } 73245657a5cSYuanhan Liu 733d19533e8SHuawei Xie /* 734d19533e8SHuawei Xie * This function learns the MAC address of the device and registers this along with a 735d19533e8SHuawei Xie * vlan tag to a VMDQ. 736d19533e8SHuawei Xie */ 737d19533e8SHuawei Xie static int 738e571e6b4SHuawei Xie link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m) 739d19533e8SHuawei Xie { 7406d13ea8eSOlivier Matz struct rte_ether_hdr *pkt_hdr; 741d19533e8SHuawei Xie int i, ret; 742d19533e8SHuawei Xie 743d19533e8SHuawei Xie /* Learn MAC address of guest device from packet */ 7446d13ea8eSOlivier Matz pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 745d19533e8SHuawei Xie 74645657a5cSYuanhan Liu if (find_vhost_dev(&pkt_hdr->s_addr)) { 74745657a5cSYuanhan Liu RTE_LOG(ERR, VHOST_DATA, 748c08a3490SYuanhan Liu "(%d) device is using a registered MAC!\n", 749e2a1dd12SYuanhan Liu vdev->vid); 750d19533e8SHuawei Xie return -1; 751d19533e8SHuawei Xie } 752d19533e8SHuawei Xie 75335b2d13fSOlivier Matz for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 754e571e6b4SHuawei Xie vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i]; 755d19533e8SHuawei Xie 756d19533e8SHuawei Xie /* vlan_tag currently uses the device_id. */ 757e2a1dd12SYuanhan Liu vdev->vlan_tag = vlan_tags[vdev->vid]; 758d19533e8SHuawei Xie 759d19533e8SHuawei Xie /* Print out VMDQ registration info. */ 760c08a3490SYuanhan Liu RTE_LOG(INFO, VHOST_DATA, 761c08a3490SYuanhan Liu "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n", 762e2a1dd12SYuanhan Liu vdev->vid, 763e571e6b4SHuawei Xie vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1], 764e571e6b4SHuawei Xie vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3], 765e571e6b4SHuawei Xie vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5], 766e571e6b4SHuawei Xie vdev->vlan_tag); 767d19533e8SHuawei Xie 768d19533e8SHuawei Xie /* Register the MAC address. */ 76984b02d16SHuawei Xie ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address, 770e2a1dd12SYuanhan Liu (uint32_t)vdev->vid + vmdq_pool_base); 771d19533e8SHuawei Xie if (ret) 772c08a3490SYuanhan Liu RTE_LOG(ERR, VHOST_DATA, 773c08a3490SYuanhan Liu "(%d) failed to add device MAC address to VMDQ\n", 774e2a1dd12SYuanhan Liu vdev->vid); 775d19533e8SHuawei Xie 77665453928SJianfeng Tan rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1); 777d19533e8SHuawei Xie 778d19533e8SHuawei Xie /* Set device as ready for RX. */ 779e571e6b4SHuawei Xie vdev->ready = DEVICE_RX; 780d19533e8SHuawei Xie 781d19533e8SHuawei Xie return 0; 782d19533e8SHuawei Xie } 783d19533e8SHuawei Xie 784d19533e8SHuawei Xie /* 785d19533e8SHuawei Xie * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX 786d19533e8SHuawei Xie * queue before disabling RX on the device. 787d19533e8SHuawei Xie */ 788d19533e8SHuawei Xie static inline void 789e571e6b4SHuawei Xie unlink_vmdq(struct vhost_dev *vdev) 790d19533e8SHuawei Xie { 791d19533e8SHuawei Xie unsigned i = 0; 792d19533e8SHuawei Xie unsigned rx_count; 793d19533e8SHuawei Xie struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 794d19533e8SHuawei Xie 795e571e6b4SHuawei Xie if (vdev->ready == DEVICE_RX) { 796d19533e8SHuawei Xie /*clear MAC and VLAN settings*/ 797e571e6b4SHuawei Xie rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address); 798d19533e8SHuawei Xie for (i = 0; i < 6; i++) 799e571e6b4SHuawei Xie vdev->mac_address.addr_bytes[i] = 0; 800d19533e8SHuawei Xie 801e571e6b4SHuawei Xie vdev->vlan_tag = 0; 802d19533e8SHuawei Xie 803d19533e8SHuawei Xie /*Clear out the receive buffers*/ 804d19533e8SHuawei Xie rx_count = rte_eth_rx_burst(ports[0], 805e571e6b4SHuawei Xie (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST); 806d19533e8SHuawei Xie 807d19533e8SHuawei Xie while (rx_count) { 808d19533e8SHuawei Xie for (i = 0; i < rx_count; i++) 809d19533e8SHuawei Xie rte_pktmbuf_free(pkts_burst[i]); 810d19533e8SHuawei Xie 811d19533e8SHuawei Xie rx_count = rte_eth_rx_burst(ports[0], 812e571e6b4SHuawei Xie (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST); 813d19533e8SHuawei Xie } 814d19533e8SHuawei Xie 815e571e6b4SHuawei Xie vdev->ready = DEVICE_MAC_LEARNING; 816d19533e8SHuawei Xie } 817d19533e8SHuawei Xie } 818d19533e8SHuawei Xie 819a68ba8e0SCheng Jiang static inline void 820a68ba8e0SCheng Jiang free_pkts(struct rte_mbuf **pkts, uint16_t n) 821a68ba8e0SCheng Jiang { 822a68ba8e0SCheng Jiang while (n--) 823a68ba8e0SCheng Jiang rte_pktmbuf_free(pkts[n]); 824a68ba8e0SCheng Jiang } 825a68ba8e0SCheng Jiang 826c0583d98SJerin Jacob static __rte_always_inline void 827a68ba8e0SCheng Jiang complete_async_pkts(struct vhost_dev *vdev) 828a68ba8e0SCheng Jiang { 829a68ba8e0SCheng Jiang struct rte_mbuf *p_cpl[MAX_PKT_BURST]; 830a68ba8e0SCheng Jiang uint16_t complete_count; 831a68ba8e0SCheng Jiang 832a68ba8e0SCheng Jiang complete_count = rte_vhost_poll_enqueue_completed(vdev->vid, 833a68ba8e0SCheng Jiang VIRTIO_RXQ, p_cpl, MAX_PKT_BURST); 834d4d4c6feSCheng Jiang if (complete_count) 835a68ba8e0SCheng Jiang free_pkts(p_cpl, complete_count); 836a68ba8e0SCheng Jiang } 837a68ba8e0SCheng Jiang 838a68ba8e0SCheng Jiang static __rte_always_inline void 839a68ba8e0SCheng Jiang sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev, 8409c5ef512SYuanhan Liu struct rte_mbuf *m) 8419c5ef512SYuanhan Liu { 8429c5ef512SYuanhan Liu uint16_t ret; 8439c5ef512SYuanhan Liu 844ca059fa5SYuanhan Liu if (builtin_net_driver) { 845ca059fa5SYuanhan Liu ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1); 846ca059fa5SYuanhan Liu } else { 8474ecf22e3SYuanhan Liu ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1); 848ca059fa5SYuanhan Liu } 849ca059fa5SYuanhan Liu 8509c5ef512SYuanhan Liu if (enable_stats) { 851a68ba8e0SCheng Jiang __atomic_add_fetch(&dst_vdev->stats.rx_total_atomic, 1, 852a68ba8e0SCheng Jiang __ATOMIC_SEQ_CST); 853a68ba8e0SCheng Jiang __atomic_add_fetch(&dst_vdev->stats.rx_atomic, ret, 854a68ba8e0SCheng Jiang __ATOMIC_SEQ_CST); 85556fe86f8SYuanhan Liu src_vdev->stats.tx_total++; 85656fe86f8SYuanhan Liu src_vdev->stats.tx += ret; 8579c5ef512SYuanhan Liu } 8589c5ef512SYuanhan Liu } 8599c5ef512SYuanhan Liu 860a68ba8e0SCheng Jiang static __rte_always_inline void 861a68ba8e0SCheng Jiang drain_vhost(struct vhost_dev *vdev) 862a68ba8e0SCheng Jiang { 863a68ba8e0SCheng Jiang uint16_t ret; 864*ee6e451fSCheng Jiang uint32_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev->vid; 865a68ba8e0SCheng Jiang uint16_t nr_xmit = vhost_txbuff[buff_idx]->len; 866a68ba8e0SCheng Jiang struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table; 867a68ba8e0SCheng Jiang 868a68ba8e0SCheng Jiang if (builtin_net_driver) { 869a68ba8e0SCheng Jiang ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit); 870a68ba8e0SCheng Jiang } else if (async_vhost_driver) { 871a68ba8e0SCheng Jiang uint32_t cpu_cpl_nr = 0; 872a68ba8e0SCheng Jiang uint16_t enqueue_fail = 0; 873a68ba8e0SCheng Jiang struct rte_mbuf *m_cpu_cpl[nr_xmit]; 874a68ba8e0SCheng Jiang 875a68ba8e0SCheng Jiang complete_async_pkts(vdev); 876a68ba8e0SCheng Jiang ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, 877a68ba8e0SCheng Jiang m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr); 878a68ba8e0SCheng Jiang 879a68ba8e0SCheng Jiang if (cpu_cpl_nr) 880a68ba8e0SCheng Jiang free_pkts(m_cpu_cpl, cpu_cpl_nr); 881a68ba8e0SCheng Jiang 882a68ba8e0SCheng Jiang enqueue_fail = nr_xmit - ret; 883a68ba8e0SCheng Jiang if (enqueue_fail) 884a68ba8e0SCheng Jiang free_pkts(&m[ret], nr_xmit - ret); 885a68ba8e0SCheng Jiang } else { 886a68ba8e0SCheng Jiang ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ, 887a68ba8e0SCheng Jiang m, nr_xmit); 888a68ba8e0SCheng Jiang } 889a68ba8e0SCheng Jiang 890a68ba8e0SCheng Jiang if (enable_stats) { 891a68ba8e0SCheng Jiang __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit, 892a68ba8e0SCheng Jiang __ATOMIC_SEQ_CST); 893a68ba8e0SCheng Jiang __atomic_add_fetch(&vdev->stats.rx_atomic, ret, 894a68ba8e0SCheng Jiang __ATOMIC_SEQ_CST); 895a68ba8e0SCheng Jiang } 896a68ba8e0SCheng Jiang 897a68ba8e0SCheng Jiang if (!async_vhost_driver) 898a68ba8e0SCheng Jiang free_pkts(m, nr_xmit); 899a68ba8e0SCheng Jiang } 900a68ba8e0SCheng Jiang 901a68ba8e0SCheng Jiang static __rte_always_inline void 902a68ba8e0SCheng Jiang drain_vhost_table(void) 903a68ba8e0SCheng Jiang { 904a68ba8e0SCheng Jiang uint16_t lcore_id = rte_lcore_id(); 905a68ba8e0SCheng Jiang struct vhost_bufftable *vhost_txq; 906a68ba8e0SCheng Jiang struct vhost_dev *vdev; 907a68ba8e0SCheng Jiang uint64_t cur_tsc; 908a68ba8e0SCheng Jiang 909a68ba8e0SCheng Jiang TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { 910a68ba8e0SCheng Jiang vhost_txq = vhost_txbuff[lcore_id * MAX_VHOST_DEVICE 911a68ba8e0SCheng Jiang + vdev->vid]; 912a68ba8e0SCheng Jiang 913a68ba8e0SCheng Jiang cur_tsc = rte_rdtsc(); 914a68ba8e0SCheng Jiang if (unlikely(cur_tsc - vhost_txq->pre_tsc 915a68ba8e0SCheng Jiang > MBUF_TABLE_DRAIN_TSC)) { 916a68ba8e0SCheng Jiang RTE_LOG_DP(DEBUG, VHOST_DATA, 917a68ba8e0SCheng Jiang "Vhost TX queue drained after timeout with burst size %u\n", 918a68ba8e0SCheng Jiang vhost_txq->len); 919a68ba8e0SCheng Jiang drain_vhost(vdev); 920a68ba8e0SCheng Jiang vhost_txq->len = 0; 921a68ba8e0SCheng Jiang vhost_txq->pre_tsc = cur_tsc; 922a68ba8e0SCheng Jiang } 923a68ba8e0SCheng Jiang } 924a68ba8e0SCheng Jiang } 925a68ba8e0SCheng Jiang 926d19533e8SHuawei Xie /* 927d19533e8SHuawei Xie * Check if the packet destination MAC address is for a local device. If so then put 928d19533e8SHuawei Xie * the packet on that devices RX queue. If not then return. 929d19533e8SHuawei Xie */ 930c0583d98SJerin Jacob static __rte_always_inline int 931e571e6b4SHuawei Xie virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m) 932d19533e8SHuawei Xie { 9336d13ea8eSOlivier Matz struct rte_ether_hdr *pkt_hdr; 93445657a5cSYuanhan Liu struct vhost_dev *dst_vdev; 935a68ba8e0SCheng Jiang struct vhost_bufftable *vhost_txq; 936a68ba8e0SCheng Jiang uint16_t lcore_id = rte_lcore_id(); 9376d13ea8eSOlivier Matz pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 938d19533e8SHuawei Xie 93945657a5cSYuanhan Liu dst_vdev = find_vhost_dev(&pkt_hdr->d_addr); 94045657a5cSYuanhan Liu if (!dst_vdev) 941d19533e8SHuawei Xie return -1; 94245657a5cSYuanhan Liu 943e2a1dd12SYuanhan Liu if (vdev->vid == dst_vdev->vid) { 9445d8f0bafSOlivier Matz RTE_LOG_DP(DEBUG, VHOST_DATA, 945c08a3490SYuanhan Liu "(%d) TX: src and dst MAC is same. Dropping packet.\n", 946e2a1dd12SYuanhan Liu vdev->vid); 94745657a5cSYuanhan Liu return 0; 94845657a5cSYuanhan Liu } 94945657a5cSYuanhan Liu 9505d8f0bafSOlivier Matz RTE_LOG_DP(DEBUG, VHOST_DATA, 951e2a1dd12SYuanhan Liu "(%d) TX: MAC address is local\n", dst_vdev->vid); 95245657a5cSYuanhan Liu 95345657a5cSYuanhan Liu if (unlikely(dst_vdev->remove)) { 9545d8f0bafSOlivier Matz RTE_LOG_DP(DEBUG, VHOST_DATA, 955e2a1dd12SYuanhan Liu "(%d) device is marked for removal\n", dst_vdev->vid); 95645657a5cSYuanhan Liu return 0; 95745657a5cSYuanhan Liu } 95845657a5cSYuanhan Liu 959a68ba8e0SCheng Jiang vhost_txq = vhost_txbuff[lcore_id * MAX_VHOST_DEVICE + dst_vdev->vid]; 960a68ba8e0SCheng Jiang vhost_txq->m_table[vhost_txq->len++] = m; 961a68ba8e0SCheng Jiang 962a68ba8e0SCheng Jiang if (enable_stats) { 963a68ba8e0SCheng Jiang vdev->stats.tx_total++; 964a68ba8e0SCheng Jiang vdev->stats.tx++; 965a68ba8e0SCheng Jiang } 966a68ba8e0SCheng Jiang 967a68ba8e0SCheng Jiang if (unlikely(vhost_txq->len == MAX_PKT_BURST)) { 968a68ba8e0SCheng Jiang drain_vhost(dst_vdev); 969a68ba8e0SCheng Jiang vhost_txq->len = 0; 970a68ba8e0SCheng Jiang vhost_txq->pre_tsc = rte_rdtsc(); 971a68ba8e0SCheng Jiang } 97245657a5cSYuanhan Liu return 0; 973d19533e8SHuawei Xie } 974d19533e8SHuawei Xie 975d19533e8SHuawei Xie /* 97672ec8d77SOuyang Changchun * Check if the destination MAC of a packet is one local VM, 97772ec8d77SOuyang Changchun * and get its vlan tag, and offset if it is. 978d19533e8SHuawei Xie */ 979c0583d98SJerin Jacob static __rte_always_inline int 9807f262239SYuanhan Liu find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m, 98172ec8d77SOuyang Changchun uint32_t *offset, uint16_t *vlan_tag) 982d19533e8SHuawei Xie { 98345657a5cSYuanhan Liu struct vhost_dev *dst_vdev; 9846d13ea8eSOlivier Matz struct rte_ether_hdr *pkt_hdr = 9856d13ea8eSOlivier Matz rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 986d19533e8SHuawei Xie 98745657a5cSYuanhan Liu dst_vdev = find_vhost_dev(&pkt_hdr->d_addr); 98845657a5cSYuanhan Liu if (!dst_vdev) 98945657a5cSYuanhan Liu return 0; 99045657a5cSYuanhan Liu 991e2a1dd12SYuanhan Liu if (vdev->vid == dst_vdev->vid) { 9925d8f0bafSOlivier Matz RTE_LOG_DP(DEBUG, VHOST_DATA, 993c08a3490SYuanhan Liu "(%d) TX: src and dst MAC is same. Dropping packet.\n", 994e2a1dd12SYuanhan Liu vdev->vid); 99572ec8d77SOuyang Changchun return -1; 996d19533e8SHuawei Xie } 997e44fb8a4SOuyang Changchun 998e44fb8a4SOuyang Changchun /* 999e44fb8a4SOuyang Changchun * HW vlan strip will reduce the packet length 1000e44fb8a4SOuyang Changchun * by minus length of vlan tag, so need restore 1001e44fb8a4SOuyang Changchun * the packet length by plus it. 1002e44fb8a4SOuyang Changchun */ 100372ec8d77SOuyang Changchun *offset = VLAN_HLEN; 1004e2a1dd12SYuanhan Liu *vlan_tag = vlan_tags[vdev->vid]; 1005d19533e8SHuawei Xie 10065d8f0bafSOlivier Matz RTE_LOG_DP(DEBUG, VHOST_DATA, 10077f262239SYuanhan Liu "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n", 1008e2a1dd12SYuanhan Liu vdev->vid, dst_vdev->vid, *vlan_tag); 1009d19533e8SHuawei Xie 101072ec8d77SOuyang Changchun return 0; 101172ec8d77SOuyang Changchun } 101272ec8d77SOuyang Changchun 10139fd72e3cSJijiang Liu static uint16_t 10149fd72e3cSJijiang Liu get_psd_sum(void *l3_hdr, uint64_t ol_flags) 10159fd72e3cSJijiang Liu { 10169fd72e3cSJijiang Liu if (ol_flags & PKT_TX_IPV4) 10179fd72e3cSJijiang Liu return rte_ipv4_phdr_cksum(l3_hdr, ol_flags); 10180c9da755SDavid Marchand else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */ 10199fd72e3cSJijiang Liu return rte_ipv6_phdr_cksum(l3_hdr, ol_flags); 10209fd72e3cSJijiang Liu } 10219fd72e3cSJijiang Liu 10229fd72e3cSJijiang Liu static void virtio_tx_offload(struct rte_mbuf *m) 10239fd72e3cSJijiang Liu { 10249fd72e3cSJijiang Liu void *l3_hdr; 1025a7c528e5SOlivier Matz struct rte_ipv4_hdr *ipv4_hdr = NULL; 1026f41b5156SOlivier Matz struct rte_tcp_hdr *tcp_hdr = NULL; 10276d13ea8eSOlivier Matz struct rte_ether_hdr *eth_hdr = 10286d13ea8eSOlivier Matz rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 10299fd72e3cSJijiang Liu 10309fd72e3cSJijiang Liu l3_hdr = (char *)eth_hdr + m->l2_len; 10319fd72e3cSJijiang Liu 1032df40169aSYuanhan Liu if (m->ol_flags & PKT_TX_IPV4) { 1033df40169aSYuanhan Liu ipv4_hdr = l3_hdr; 10349fd72e3cSJijiang Liu ipv4_hdr->hdr_checksum = 0; 1035df40169aSYuanhan Liu m->ol_flags |= PKT_TX_IP_CKSUM; 1036df40169aSYuanhan Liu } 1037df40169aSYuanhan Liu 1038f41b5156SOlivier Matz tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + m->l3_len); 10399fd72e3cSJijiang Liu tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags); 10409fd72e3cSJijiang Liu } 10419fd72e3cSJijiang Liu 1042c0583d98SJerin Jacob static __rte_always_inline void 1043273ecdbcSYuanhan Liu do_drain_mbuf_table(struct mbuf_table *tx_q) 1044273ecdbcSYuanhan Liu { 1045273ecdbcSYuanhan Liu uint16_t count; 1046273ecdbcSYuanhan Liu 1047273ecdbcSYuanhan Liu count = rte_eth_tx_burst(ports[0], tx_q->txq_id, 1048273ecdbcSYuanhan Liu tx_q->m_table, tx_q->len); 1049273ecdbcSYuanhan Liu if (unlikely(count < tx_q->len)) 1050273ecdbcSYuanhan Liu free_pkts(&tx_q->m_table[count], tx_q->len - count); 1051273ecdbcSYuanhan Liu 1052273ecdbcSYuanhan Liu tx_q->len = 0; 1053273ecdbcSYuanhan Liu } 1054273ecdbcSYuanhan Liu 105572ec8d77SOuyang Changchun /* 1056273ecdbcSYuanhan Liu * This function routes the TX packet to the correct interface. This 1057273ecdbcSYuanhan Liu * may be a local device or the physical port. 105872ec8d77SOuyang Changchun */ 1059c0583d98SJerin Jacob static __rte_always_inline void 106072ec8d77SOuyang Changchun virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag) 106172ec8d77SOuyang Changchun { 106272ec8d77SOuyang Changchun struct mbuf_table *tx_q; 1063273ecdbcSYuanhan Liu unsigned offset = 0; 106472ec8d77SOuyang Changchun const uint16_t lcore_id = rte_lcore_id(); 10656d13ea8eSOlivier Matz struct rte_ether_hdr *nh; 106672ec8d77SOuyang Changchun 10679c5ef512SYuanhan Liu 10686d13ea8eSOlivier Matz nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 1069538da7a1SOlivier Matz if (unlikely(rte_is_broadcast_ether_addr(&nh->d_addr))) { 10709c5ef512SYuanhan Liu struct vhost_dev *vdev2; 10719c5ef512SYuanhan Liu 107297daf19eSYuanhan Liu TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) { 1073a3fdb532SJunjie Chen if (vdev2 != vdev) 1074a68ba8e0SCheng Jiang sync_virtio_xmit(vdev2, vdev, m); 10759c5ef512SYuanhan Liu } 10769c5ef512SYuanhan Liu goto queue2nic; 10779c5ef512SYuanhan Liu } 10789c5ef512SYuanhan Liu 107972ec8d77SOuyang Changchun /*check if destination is local VM*/ 1080a68ba8e0SCheng Jiang if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) 108172ec8d77SOuyang Changchun return; 108272ec8d77SOuyang Changchun 1083c2ab5162SOuyang Changchun if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) { 10847f262239SYuanhan Liu if (unlikely(find_local_dest(vdev, m, &offset, 10857f262239SYuanhan Liu &vlan_tag) != 0)) { 108672ec8d77SOuyang Changchun rte_pktmbuf_free(m); 108772ec8d77SOuyang Changchun return; 108872ec8d77SOuyang Changchun } 1089d19533e8SHuawei Xie } 1090d19533e8SHuawei Xie 10915d8f0bafSOlivier Matz RTE_LOG_DP(DEBUG, VHOST_DATA, 1092e2a1dd12SYuanhan Liu "(%d) TX: MAC address is external\n", vdev->vid); 1093d19533e8SHuawei Xie 10949c5ef512SYuanhan Liu queue2nic: 10959c5ef512SYuanhan Liu 1096d19533e8SHuawei Xie /*Add packet to the port tx queue*/ 1097d19533e8SHuawei Xie tx_q = &lcore_tx_queue[lcore_id]; 1098d19533e8SHuawei Xie 10996d13ea8eSOlivier Matz nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 110035b2d13fSOlivier Matz if (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) { 11018b9bb988SOuyang Changchun /* Guest has inserted the vlan tag. */ 11026d13ea8eSOlivier Matz struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1); 11038b9bb988SOuyang Changchun uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag); 11048b9bb988SOuyang Changchun if ((vm2vm_mode == VM2VM_HARDWARE) && 11058b9bb988SOuyang Changchun (vh->vlan_tci != vlan_tag_be)) 11068b9bb988SOuyang Changchun vh->vlan_tci = vlan_tag_be; 11078b9bb988SOuyang Changchun } else { 11089fd72e3cSJijiang Liu m->ol_flags |= PKT_TX_VLAN_PKT; 1109e44fb8a4SOuyang Changchun 1110c2ab5162SOuyang Changchun /* 1111c2ab5162SOuyang Changchun * Find the right seg to adjust the data len when offset is 1112c2ab5162SOuyang Changchun * bigger than tail room size. 1113c2ab5162SOuyang Changchun */ 1114c2ab5162SOuyang Changchun if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) { 1115c2ab5162SOuyang Changchun if (likely(offset <= rte_pktmbuf_tailroom(m))) 11164d50b6acSHuawei Xie m->data_len += offset; 1117c2ab5162SOuyang Changchun else { 1118c2ab5162SOuyang Changchun struct rte_mbuf *seg = m; 1119c2ab5162SOuyang Changchun 1120c2ab5162SOuyang Changchun while ((seg->next != NULL) && 1121c2ab5162SOuyang Changchun (offset > rte_pktmbuf_tailroom(seg))) 1122c2ab5162SOuyang Changchun seg = seg->next; 1123c2ab5162SOuyang Changchun 1124c2ab5162SOuyang Changchun seg->data_len += offset; 1125c2ab5162SOuyang Changchun } 1126e44fb8a4SOuyang Changchun m->pkt_len += offset; 1127c2ab5162SOuyang Changchun } 1128e44fb8a4SOuyang Changchun 11294d50b6acSHuawei Xie m->vlan_tci = vlan_tag; 11308b9bb988SOuyang Changchun } 1131d19533e8SHuawei Xie 11325674dad2SYuanhan Liu if (m->ol_flags & PKT_TX_TCP_SEG) 11339fd72e3cSJijiang Liu virtio_tx_offload(m); 11349fd72e3cSJijiang Liu 1135273ecdbcSYuanhan Liu tx_q->m_table[tx_q->len++] = m; 1136d19533e8SHuawei Xie if (enable_stats) { 113756fe86f8SYuanhan Liu vdev->stats.tx_total++; 113856fe86f8SYuanhan Liu vdev->stats.tx++; 1139d19533e8SHuawei Xie } 1140d19533e8SHuawei Xie 1141273ecdbcSYuanhan Liu if (unlikely(tx_q->len == MAX_PKT_BURST)) 1142273ecdbcSYuanhan Liu do_drain_mbuf_table(tx_q); 1143d19533e8SHuawei Xie } 1144d19533e8SHuawei Xie 1145d19533e8SHuawei Xie 1146c0583d98SJerin Jacob static __rte_always_inline void 1147273ecdbcSYuanhan Liu drain_mbuf_table(struct mbuf_table *tx_q) 1148273ecdbcSYuanhan Liu { 1149273ecdbcSYuanhan Liu static uint64_t prev_tsc; 1150273ecdbcSYuanhan Liu uint64_t cur_tsc; 1151273ecdbcSYuanhan Liu 1152273ecdbcSYuanhan Liu if (tx_q->len == 0) 1153d19533e8SHuawei Xie return; 1154273ecdbcSYuanhan Liu 1155273ecdbcSYuanhan Liu cur_tsc = rte_rdtsc(); 1156273ecdbcSYuanhan Liu if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) { 1157273ecdbcSYuanhan Liu prev_tsc = cur_tsc; 1158273ecdbcSYuanhan Liu 11595d8f0bafSOlivier Matz RTE_LOG_DP(DEBUG, VHOST_DATA, 1160273ecdbcSYuanhan Liu "TX queue drained after timeout with burst size %u\n", 1161273ecdbcSYuanhan Liu tx_q->len); 1162273ecdbcSYuanhan Liu do_drain_mbuf_table(tx_q); 1163d19533e8SHuawei Xie } 1164273ecdbcSYuanhan Liu } 1165273ecdbcSYuanhan Liu 1166c0583d98SJerin Jacob static __rte_always_inline void 1167273ecdbcSYuanhan Liu drain_eth_rx(struct vhost_dev *vdev) 1168273ecdbcSYuanhan Liu { 1169273ecdbcSYuanhan Liu uint16_t rx_count, enqueue_count; 1170a68ba8e0SCheng Jiang struct rte_mbuf *pkts[MAX_PKT_BURST]; 1171273ecdbcSYuanhan Liu 1172273ecdbcSYuanhan Liu rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q, 1173273ecdbcSYuanhan Liu pkts, MAX_PKT_BURST); 1174abec60e7SCheng Jiang 1175273ecdbcSYuanhan Liu if (!rx_count) 1176273ecdbcSYuanhan Liu return; 1177273ecdbcSYuanhan Liu 1178d19533e8SHuawei Xie /* 1179273ecdbcSYuanhan Liu * When "enable_retry" is set, here we wait and retry when there 1180273ecdbcSYuanhan Liu * is no enough free slots in the queue to hold @rx_count packets, 1181273ecdbcSYuanhan Liu * to diminish packet loss. 1182273ecdbcSYuanhan Liu */ 1183273ecdbcSYuanhan Liu if (enable_retry && 11844ecf22e3SYuanhan Liu unlikely(rx_count > rte_vhost_avail_entries(vdev->vid, 1185273ecdbcSYuanhan Liu VIRTIO_RXQ))) { 1186273ecdbcSYuanhan Liu uint32_t retry; 1187273ecdbcSYuanhan Liu 1188273ecdbcSYuanhan Liu for (retry = 0; retry < burst_rx_retry_num; retry++) { 1189273ecdbcSYuanhan Liu rte_delay_us(burst_rx_delay_time); 11904ecf22e3SYuanhan Liu if (rx_count <= rte_vhost_avail_entries(vdev->vid, 1191273ecdbcSYuanhan Liu VIRTIO_RXQ)) 1192273ecdbcSYuanhan Liu break; 1193273ecdbcSYuanhan Liu } 1194273ecdbcSYuanhan Liu } 1195273ecdbcSYuanhan Liu 1196ca059fa5SYuanhan Liu if (builtin_net_driver) { 1197ca059fa5SYuanhan Liu enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ, 1198ca059fa5SYuanhan Liu pkts, rx_count); 1199abec60e7SCheng Jiang } else if (async_vhost_driver) { 1200a68ba8e0SCheng Jiang uint32_t cpu_cpl_nr = 0; 1201a68ba8e0SCheng Jiang uint16_t enqueue_fail = 0; 1202a68ba8e0SCheng Jiang struct rte_mbuf *m_cpu_cpl[MAX_PKT_BURST]; 1203a68ba8e0SCheng Jiang 1204a68ba8e0SCheng Jiang complete_async_pkts(vdev); 1205abec60e7SCheng Jiang enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid, 1206a68ba8e0SCheng Jiang VIRTIO_RXQ, pkts, rx_count, 1207a68ba8e0SCheng Jiang m_cpu_cpl, &cpu_cpl_nr); 1208a68ba8e0SCheng Jiang if (cpu_cpl_nr) 1209a68ba8e0SCheng Jiang free_pkts(m_cpu_cpl, cpu_cpl_nr); 1210a68ba8e0SCheng Jiang 1211a68ba8e0SCheng Jiang enqueue_fail = rx_count - enqueue_count; 1212a68ba8e0SCheng Jiang if (enqueue_fail) 1213a68ba8e0SCheng Jiang free_pkts(&pkts[enqueue_count], enqueue_fail); 1214a68ba8e0SCheng Jiang 1215ca059fa5SYuanhan Liu } else { 12164ecf22e3SYuanhan Liu enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ, 1217273ecdbcSYuanhan Liu pkts, rx_count); 1218ca059fa5SYuanhan Liu } 1219abec60e7SCheng Jiang 1220273ecdbcSYuanhan Liu if (enable_stats) { 1221a68ba8e0SCheng Jiang __atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count, 1222a68ba8e0SCheng Jiang __ATOMIC_SEQ_CST); 1223a68ba8e0SCheng Jiang __atomic_add_fetch(&vdev->stats.rx_atomic, enqueue_count, 1224a68ba8e0SCheng Jiang __ATOMIC_SEQ_CST); 1225273ecdbcSYuanhan Liu } 1226273ecdbcSYuanhan Liu 1227abec60e7SCheng Jiang if (!async_vhost_driver) 1228273ecdbcSYuanhan Liu free_pkts(pkts, rx_count); 1229273ecdbcSYuanhan Liu } 1230273ecdbcSYuanhan Liu 1231c0583d98SJerin Jacob static __rte_always_inline void 1232273ecdbcSYuanhan Liu drain_virtio_tx(struct vhost_dev *vdev) 1233273ecdbcSYuanhan Liu { 1234273ecdbcSYuanhan Liu struct rte_mbuf *pkts[MAX_PKT_BURST]; 1235273ecdbcSYuanhan Liu uint16_t count; 1236273ecdbcSYuanhan Liu uint16_t i; 1237273ecdbcSYuanhan Liu 1238ca059fa5SYuanhan Liu if (builtin_net_driver) { 1239ca059fa5SYuanhan Liu count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool, 1240273ecdbcSYuanhan Liu pkts, MAX_PKT_BURST); 1241ca059fa5SYuanhan Liu } else { 1242ca059fa5SYuanhan Liu count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, 1243ca059fa5SYuanhan Liu mbuf_pool, pkts, MAX_PKT_BURST); 1244ca059fa5SYuanhan Liu } 1245273ecdbcSYuanhan Liu 1246273ecdbcSYuanhan Liu /* setup VMDq for the first packet */ 1247273ecdbcSYuanhan Liu if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) { 1248273ecdbcSYuanhan Liu if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1) 1249273ecdbcSYuanhan Liu free_pkts(pkts, count); 1250273ecdbcSYuanhan Liu } 1251273ecdbcSYuanhan Liu 12527f262239SYuanhan Liu for (i = 0; i < count; ++i) 1253e2a1dd12SYuanhan Liu virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]); 1254273ecdbcSYuanhan Liu } 1255273ecdbcSYuanhan Liu 1256273ecdbcSYuanhan Liu /* 1257273ecdbcSYuanhan Liu * Main function of vhost-switch. It basically does: 1258273ecdbcSYuanhan Liu * 1259273ecdbcSYuanhan Liu * for each vhost device { 1260273ecdbcSYuanhan Liu * - drain_eth_rx() 1261273ecdbcSYuanhan Liu * 1262273ecdbcSYuanhan Liu * Which drains the host eth Rx queue linked to the vhost device, 1263273ecdbcSYuanhan Liu * and deliver all of them to guest virito Rx ring associated with 1264273ecdbcSYuanhan Liu * this vhost device. 1265273ecdbcSYuanhan Liu * 1266273ecdbcSYuanhan Liu * - drain_virtio_tx() 1267273ecdbcSYuanhan Liu * 1268273ecdbcSYuanhan Liu * Which drains the guest virtio Tx queue and deliver all of them 1269273ecdbcSYuanhan Liu * to the target, which could be another vhost device, or the 1270273ecdbcSYuanhan Liu * physical eth dev. The route is done in function "virtio_tx_route". 1271273ecdbcSYuanhan Liu * } 1272d19533e8SHuawei Xie */ 1273d19533e8SHuawei Xie static int 1274273ecdbcSYuanhan Liu switch_worker(void *arg __rte_unused) 1275d19533e8SHuawei Xie { 1276273ecdbcSYuanhan Liu unsigned i; 1277273ecdbcSYuanhan Liu unsigned lcore_id = rte_lcore_id(); 1278273ecdbcSYuanhan Liu struct vhost_dev *vdev; 1279d19533e8SHuawei Xie struct mbuf_table *tx_q; 1280d19533e8SHuawei Xie 1281d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id); 1282d19533e8SHuawei Xie 1283d19533e8SHuawei Xie tx_q = &lcore_tx_queue[lcore_id]; 1284273ecdbcSYuanhan Liu for (i = 0; i < rte_lcore_count(); i++) { 1285d19533e8SHuawei Xie if (lcore_ids[i] == lcore_id) { 1286d19533e8SHuawei Xie tx_q->txq_id = i; 1287d19533e8SHuawei Xie break; 1288d19533e8SHuawei Xie } 1289d19533e8SHuawei Xie } 1290d19533e8SHuawei Xie 1291d19533e8SHuawei Xie while(1) { 1292273ecdbcSYuanhan Liu drain_mbuf_table(tx_q); 1293a68ba8e0SCheng Jiang drain_vhost_table(); 1294d19533e8SHuawei Xie /* 129545657a5cSYuanhan Liu * Inform the configuration core that we have exited the 129645657a5cSYuanhan Liu * linked list and that no devices are in use if requested. 1297d19533e8SHuawei Xie */ 129845657a5cSYuanhan Liu if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL) 129945657a5cSYuanhan Liu lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL; 1300d19533e8SHuawei Xie 1301d19533e8SHuawei Xie /* 1302273ecdbcSYuanhan Liu * Process vhost devices 1303d19533e8SHuawei Xie */ 130497daf19eSYuanhan Liu TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list, 130597daf19eSYuanhan Liu lcore_vdev_entry) { 1306364dddcdSHuawei Xie if (unlikely(vdev->remove)) { 1307e571e6b4SHuawei Xie unlink_vmdq(vdev); 1308e571e6b4SHuawei Xie vdev->ready = DEVICE_SAFE_REMOVE; 1309d19533e8SHuawei Xie continue; 1310d19533e8SHuawei Xie } 131145657a5cSYuanhan Liu 1312273ecdbcSYuanhan Liu if (likely(vdev->ready == DEVICE_RX)) 1313273ecdbcSYuanhan Liu drain_eth_rx(vdev); 1314d19533e8SHuawei Xie 1315273ecdbcSYuanhan Liu if (likely(!vdev->remove)) 1316273ecdbcSYuanhan Liu drain_virtio_tx(vdev); 1317d19533e8SHuawei Xie } 1318d19533e8SHuawei Xie } 1319d19533e8SHuawei Xie 1320d19533e8SHuawei Xie return 0; 1321d19533e8SHuawei Xie } 1322d19533e8SHuawei Xie 1323d19533e8SHuawei Xie /* 132445657a5cSYuanhan Liu * Remove a device from the specific data core linked list and from the 132545657a5cSYuanhan Liu * main linked list. Synchonization occurs through the use of the 132645657a5cSYuanhan Liu * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering 1327d19533e8SHuawei Xie * of dev->remove=1 which can cause an infinite loop in the rte_pause loop. 1328d19533e8SHuawei Xie */ 1329d19533e8SHuawei Xie static void 13304ecf22e3SYuanhan Liu destroy_device(int vid) 1331d19533e8SHuawei Xie { 133216ae8abeSYuanhan Liu struct vhost_dev *vdev = NULL; 1333d19533e8SHuawei Xie int lcore; 1334a68ba8e0SCheng Jiang uint16_t i; 1335d19533e8SHuawei Xie 133616ae8abeSYuanhan Liu TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { 13374ecf22e3SYuanhan Liu if (vdev->vid == vid) 133816ae8abeSYuanhan Liu break; 133916ae8abeSYuanhan Liu } 134016ae8abeSYuanhan Liu if (!vdev) 134116ae8abeSYuanhan Liu return; 1342d19533e8SHuawei Xie /*set the remove flag. */ 1343e571e6b4SHuawei Xie vdev->remove = 1; 1344e571e6b4SHuawei Xie while(vdev->ready != DEVICE_SAFE_REMOVE) { 1345d19533e8SHuawei Xie rte_pause(); 1346d19533e8SHuawei Xie } 1347d19533e8SHuawei Xie 1348a68ba8e0SCheng Jiang for (i = 0; i < RTE_MAX_LCORE; i++) 1349a68ba8e0SCheng Jiang rte_free(vhost_txbuff[i * MAX_VHOST_DEVICE + vid]); 1350a68ba8e0SCheng Jiang 1351ca059fa5SYuanhan Liu if (builtin_net_driver) 1352ca059fa5SYuanhan Liu vs_vhost_net_remove(vdev); 1353ca059fa5SYuanhan Liu 135497daf19eSYuanhan Liu TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev, 135597daf19eSYuanhan Liu lcore_vdev_entry); 135697daf19eSYuanhan Liu TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry); 135797daf19eSYuanhan Liu 1358d19533e8SHuawei Xie 1359d19533e8SHuawei Xie /* Set the dev_removal_flag on each lcore. */ 1360cb056611SStephen Hemminger RTE_LCORE_FOREACH_WORKER(lcore) 136145657a5cSYuanhan Liu lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL; 1362d19533e8SHuawei Xie 1363d19533e8SHuawei Xie /* 136445657a5cSYuanhan Liu * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL 136545657a5cSYuanhan Liu * we can be sure that they can no longer access the device removed 136645657a5cSYuanhan Liu * from the linked lists and that the devices are no longer in use. 1367d19533e8SHuawei Xie */ 1368cb056611SStephen Hemminger RTE_LCORE_FOREACH_WORKER(lcore) { 136945657a5cSYuanhan Liu while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL) 1370d19533e8SHuawei Xie rte_pause(); 1371d19533e8SHuawei Xie } 1372d19533e8SHuawei Xie 137345657a5cSYuanhan Liu lcore_info[vdev->coreid].device_num--; 1374d19533e8SHuawei Xie 137545657a5cSYuanhan Liu RTE_LOG(INFO, VHOST_DATA, 1376c08a3490SYuanhan Liu "(%d) device has been removed from data core\n", 1377e2a1dd12SYuanhan Liu vdev->vid); 1378d19533e8SHuawei Xie 1379abec60e7SCheng Jiang if (async_vhost_driver) 1380abec60e7SCheng Jiang rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ); 1381abec60e7SCheng Jiang 1382e571e6b4SHuawei Xie rte_free(vdev); 1383d19533e8SHuawei Xie } 1384d19533e8SHuawei Xie 1385d19533e8SHuawei Xie /* 1386d19533e8SHuawei Xie * A new device is added to a data core. First the device is added to the main linked list 138710b4270fSRami Rosen * and then allocated to a specific data core. 1388d19533e8SHuawei Xie */ 1389d19533e8SHuawei Xie static int 13904ecf22e3SYuanhan Liu new_device(int vid) 1391d19533e8SHuawei Xie { 1392d19533e8SHuawei Xie int lcore, core_add = 0; 1393a68ba8e0SCheng Jiang uint16_t i; 1394d19533e8SHuawei Xie uint32_t device_num_min = num_devices; 1395e571e6b4SHuawei Xie struct vhost_dev *vdev; 1396fdf20fa7SSergio Gonzalez Monroy vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE); 1397e571e6b4SHuawei Xie if (vdev == NULL) { 1398c08a3490SYuanhan Liu RTE_LOG(INFO, VHOST_DATA, 13997f262239SYuanhan Liu "(%d) couldn't allocate memory for vhost dev\n", 1400e2a1dd12SYuanhan Liu vid); 1401e571e6b4SHuawei Xie return -1; 1402e571e6b4SHuawei Xie } 1403e2a1dd12SYuanhan Liu vdev->vid = vid; 1404d19533e8SHuawei Xie 1405a68ba8e0SCheng Jiang for (i = 0; i < RTE_MAX_LCORE; i++) { 1406a68ba8e0SCheng Jiang vhost_txbuff[i * MAX_VHOST_DEVICE + vid] 1407a68ba8e0SCheng Jiang = rte_zmalloc("vhost bufftable", 1408a68ba8e0SCheng Jiang sizeof(struct vhost_bufftable), 1409a68ba8e0SCheng Jiang RTE_CACHE_LINE_SIZE); 1410a68ba8e0SCheng Jiang 1411a68ba8e0SCheng Jiang if (vhost_txbuff[i * MAX_VHOST_DEVICE + vid] == NULL) { 1412a68ba8e0SCheng Jiang RTE_LOG(INFO, VHOST_DATA, 1413a68ba8e0SCheng Jiang "(%d) couldn't allocate memory for vhost TX\n", vid); 1414a68ba8e0SCheng Jiang return -1; 1415a68ba8e0SCheng Jiang } 1416a68ba8e0SCheng Jiang } 1417a68ba8e0SCheng Jiang 1418ca059fa5SYuanhan Liu if (builtin_net_driver) 1419ca059fa5SYuanhan Liu vs_vhost_net_setup(vdev); 1420ca059fa5SYuanhan Liu 142197daf19eSYuanhan Liu TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry); 1422e2a1dd12SYuanhan Liu vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base; 1423d19533e8SHuawei Xie 1424d19533e8SHuawei Xie /*reset ready flag*/ 1425e571e6b4SHuawei Xie vdev->ready = DEVICE_MAC_LEARNING; 1426e571e6b4SHuawei Xie vdev->remove = 0; 1427d19533e8SHuawei Xie 1428d19533e8SHuawei Xie /* Find a suitable lcore to add the device. */ 1429cb056611SStephen Hemminger RTE_LCORE_FOREACH_WORKER(lcore) { 143045657a5cSYuanhan Liu if (lcore_info[lcore].device_num < device_num_min) { 143145657a5cSYuanhan Liu device_num_min = lcore_info[lcore].device_num; 1432d19533e8SHuawei Xie core_add = lcore; 1433d19533e8SHuawei Xie } 1434d19533e8SHuawei Xie } 1435e571e6b4SHuawei Xie vdev->coreid = core_add; 1436e571e6b4SHuawei Xie 143797daf19eSYuanhan Liu TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev, 143897daf19eSYuanhan Liu lcore_vdev_entry); 143945657a5cSYuanhan Liu lcore_info[vdev->coreid].device_num++; 1440d19533e8SHuawei Xie 1441d19533e8SHuawei Xie /* Disable notifications. */ 14424ecf22e3SYuanhan Liu rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0); 14434ecf22e3SYuanhan Liu rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0); 1444d19533e8SHuawei Xie 1445c08a3490SYuanhan Liu RTE_LOG(INFO, VHOST_DATA, 1446c08a3490SYuanhan Liu "(%d) device has been added to data core %d\n", 1447e2a1dd12SYuanhan Liu vid, vdev->coreid); 1448d19533e8SHuawei Xie 1449abec60e7SCheng Jiang if (async_vhost_driver) { 14506e9a9d2aSCheng Jiang struct rte_vhost_async_features f; 14516e9a9d2aSCheng Jiang struct rte_vhost_async_channel_ops channel_ops; 1452a68ba8e0SCheng Jiang 14536e9a9d2aSCheng Jiang if (strncmp(dma_type, "ioat", 4) == 0) { 14546e9a9d2aSCheng Jiang channel_ops.transfer_data = ioat_transfer_data_cb; 14556e9a9d2aSCheng Jiang channel_ops.check_completed_copies = 14566e9a9d2aSCheng Jiang ioat_check_completed_copies_cb; 1457a68ba8e0SCheng Jiang 1458abec60e7SCheng Jiang f.async_inorder = 1; 1459abec60e7SCheng Jiang f.async_threshold = 256; 1460a68ba8e0SCheng Jiang 1461abec60e7SCheng Jiang return rte_vhost_async_channel_register(vid, VIRTIO_RXQ, 1462abec60e7SCheng Jiang f.intval, &channel_ops); 1463abec60e7SCheng Jiang } 14646e9a9d2aSCheng Jiang } 1465abec60e7SCheng Jiang 1466d19533e8SHuawei Xie return 0; 1467d19533e8SHuawei Xie } 1468d19533e8SHuawei Xie 1469d19533e8SHuawei Xie /* 1470d19533e8SHuawei Xie * These callback allow devices to be added to the data core when configuration 1471d19533e8SHuawei Xie * has been fully complete. 1472d19533e8SHuawei Xie */ 14737c129037SYuanhan Liu static const struct vhost_device_ops virtio_net_device_ops = 1474d19533e8SHuawei Xie { 1475d19533e8SHuawei Xie .new_device = new_device, 1476d19533e8SHuawei Xie .destroy_device = destroy_device, 1477d19533e8SHuawei Xie }; 1478d19533e8SHuawei Xie 1479d19533e8SHuawei Xie /* 1480d19533e8SHuawei Xie * This is a thread will wake up after a period to print stats if the user has 1481d19533e8SHuawei Xie * enabled them. 1482d19533e8SHuawei Xie */ 1483fa204854SOlivier Matz static void * 1484fa204854SOlivier Matz print_stats(__rte_unused void *arg) 1485d19533e8SHuawei Xie { 148645657a5cSYuanhan Liu struct vhost_dev *vdev; 1487d19533e8SHuawei Xie uint64_t tx_dropped, rx_dropped; 1488d19533e8SHuawei Xie uint64_t tx, tx_total, rx, rx_total; 1489d19533e8SHuawei Xie const char clr[] = { 27, '[', '2', 'J', '\0' }; 1490d19533e8SHuawei Xie const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' }; 1491d19533e8SHuawei Xie 1492d19533e8SHuawei Xie while(1) { 1493d19533e8SHuawei Xie sleep(enable_stats); 1494d19533e8SHuawei Xie 1495d19533e8SHuawei Xie /* Clear screen and move to top left */ 149656fe86f8SYuanhan Liu printf("%s%s\n", clr, top_left); 149756fe86f8SYuanhan Liu printf("Device statistics =================================\n"); 1498d19533e8SHuawei Xie 149997daf19eSYuanhan Liu TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { 150056fe86f8SYuanhan Liu tx_total = vdev->stats.tx_total; 150156fe86f8SYuanhan Liu tx = vdev->stats.tx; 1502d19533e8SHuawei Xie tx_dropped = tx_total - tx; 150356fe86f8SYuanhan Liu 1504a68ba8e0SCheng Jiang rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic, 1505a68ba8e0SCheng Jiang __ATOMIC_SEQ_CST); 1506a68ba8e0SCheng Jiang rx = __atomic_load_n(&vdev->stats.rx_atomic, 1507a68ba8e0SCheng Jiang __ATOMIC_SEQ_CST); 1508d19533e8SHuawei Xie rx_dropped = rx_total - rx; 1509d19533e8SHuawei Xie 1510c08a3490SYuanhan Liu printf("Statistics for device %d\n" 151156fe86f8SYuanhan Liu "-----------------------\n" 151256fe86f8SYuanhan Liu "TX total: %" PRIu64 "\n" 151356fe86f8SYuanhan Liu "TX dropped: %" PRIu64 "\n" 151456fe86f8SYuanhan Liu "TX successful: %" PRIu64 "\n" 151556fe86f8SYuanhan Liu "RX total: %" PRIu64 "\n" 151656fe86f8SYuanhan Liu "RX dropped: %" PRIu64 "\n" 151756fe86f8SYuanhan Liu "RX successful: %" PRIu64 "\n", 15184ecf22e3SYuanhan Liu vdev->vid, 151956fe86f8SYuanhan Liu tx_total, tx_dropped, tx, 152056fe86f8SYuanhan Liu rx_total, rx_dropped, rx); 1521d19533e8SHuawei Xie } 152256fe86f8SYuanhan Liu 152356fe86f8SYuanhan Liu printf("===================================================\n"); 15243ee6f706SGeorgiy Levashov 15253ee6f706SGeorgiy Levashov fflush(stdout); 1526d19533e8SHuawei Xie } 1527fa204854SOlivier Matz 1528fa204854SOlivier Matz return NULL; 1529d19533e8SHuawei Xie } 1530d19533e8SHuawei Xie 1531ad0eef4dSJiayu Hu static void 1532ad0eef4dSJiayu Hu unregister_drivers(int socket_num) 1533ad0eef4dSJiayu Hu { 1534ad0eef4dSJiayu Hu int i, ret; 1535ad0eef4dSJiayu Hu 1536ad0eef4dSJiayu Hu for (i = 0; i < socket_num; i++) { 1537ad0eef4dSJiayu Hu ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX); 1538ad0eef4dSJiayu Hu if (ret != 0) 1539ad0eef4dSJiayu Hu RTE_LOG(ERR, VHOST_CONFIG, 1540ad0eef4dSJiayu Hu "Fail to unregister vhost driver for %s.\n", 1541ad0eef4dSJiayu Hu socket_files + i * PATH_MAX); 1542ad0eef4dSJiayu Hu } 1543ad0eef4dSJiayu Hu } 1544ad0eef4dSJiayu Hu 1545c83d2d00SOuyang Changchun /* When we receive a INT signal, unregister vhost driver */ 1546c83d2d00SOuyang Changchun static void 1547c83d2d00SOuyang Changchun sigint_handler(__rte_unused int signum) 1548c83d2d00SOuyang Changchun { 1549c83d2d00SOuyang Changchun /* Unregister vhost driver. */ 1550ad0eef4dSJiayu Hu unregister_drivers(nb_sockets); 1551ad0eef4dSJiayu Hu 1552c83d2d00SOuyang Changchun exit(0); 1553c83d2d00SOuyang Changchun } 1554d19533e8SHuawei Xie 1555d19533e8SHuawei Xie /* 1556bdb19b77SYuanhan Liu * While creating an mbuf pool, one key thing is to figure out how 1557bdb19b77SYuanhan Liu * many mbuf entries is enough for our use. FYI, here are some 1558bdb19b77SYuanhan Liu * guidelines: 1559bdb19b77SYuanhan Liu * 1560bdb19b77SYuanhan Liu * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage 1561bdb19b77SYuanhan Liu * 1562bdb19b77SYuanhan Liu * - For each switch core (A CPU core does the packet switch), we need 1563bdb19b77SYuanhan Liu * also make some reservation for receiving the packets from virtio 1564bdb19b77SYuanhan Liu * Tx queue. How many is enough depends on the usage. It's normally 1565bdb19b77SYuanhan Liu * a simple calculation like following: 1566bdb19b77SYuanhan Liu * 1567bdb19b77SYuanhan Liu * MAX_PKT_BURST * max packet size / mbuf size 1568bdb19b77SYuanhan Liu * 1569bdb19b77SYuanhan Liu * So, we definitely need allocate more mbufs when TSO is enabled. 1570bdb19b77SYuanhan Liu * 1571bdb19b77SYuanhan Liu * - Similarly, for each switching core, we should serve @nr_rx_desc 1572bdb19b77SYuanhan Liu * mbufs for receiving the packets from physical NIC device. 1573bdb19b77SYuanhan Liu * 1574bdb19b77SYuanhan Liu * - We also need make sure, for each switch core, we have allocated 1575bdb19b77SYuanhan Liu * enough mbufs to fill up the mbuf cache. 1576bdb19b77SYuanhan Liu */ 1577bdb19b77SYuanhan Liu static void 1578bdb19b77SYuanhan Liu create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size, 1579bdb19b77SYuanhan Liu uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache) 1580bdb19b77SYuanhan Liu { 1581bdb19b77SYuanhan Liu uint32_t nr_mbufs; 1582bdb19b77SYuanhan Liu uint32_t nr_mbufs_per_core; 1583bdb19b77SYuanhan Liu uint32_t mtu = 1500; 1584bdb19b77SYuanhan Liu 1585bdb19b77SYuanhan Liu if (mergeable) 1586bdb19b77SYuanhan Liu mtu = 9000; 1587bdb19b77SYuanhan Liu if (enable_tso) 1588bdb19b77SYuanhan Liu mtu = 64 * 1024; 1589bdb19b77SYuanhan Liu 1590bdb19b77SYuanhan Liu nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST / 159112ee45a3SYong Wang (mbuf_size - RTE_PKTMBUF_HEADROOM); 1592bdb19b77SYuanhan Liu nr_mbufs_per_core += nr_rx_desc; 1593bdb19b77SYuanhan Liu nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache); 1594bdb19b77SYuanhan Liu 1595bdb19b77SYuanhan Liu nr_mbufs = nr_queues * nr_rx_desc; 1596bdb19b77SYuanhan Liu nr_mbufs += nr_mbufs_per_core * nr_switch_core; 1597bdb19b77SYuanhan Liu nr_mbufs *= nr_port; 1598bdb19b77SYuanhan Liu 1599bdb19b77SYuanhan Liu mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs, 1600bdb19b77SYuanhan Liu nr_mbuf_cache, 0, mbuf_size, 1601bdb19b77SYuanhan Liu rte_socket_id()); 1602bdb19b77SYuanhan Liu if (mbuf_pool == NULL) 1603bdb19b77SYuanhan Liu rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 1604bdb19b77SYuanhan Liu } 1605bdb19b77SYuanhan Liu 1606bdb19b77SYuanhan Liu /* 1607164a601bSYuanhan Liu * Main function, does initialisation and calls the per-lcore functions. 1608d19533e8SHuawei Xie */ 1609d19533e8SHuawei Xie int 161098a16481SDavid Marchand main(int argc, char *argv[]) 1611d19533e8SHuawei Xie { 1612d19533e8SHuawei Xie unsigned lcore_id, core_id = 0; 1613d19533e8SHuawei Xie unsigned nb_ports, valid_num_ports; 1614ad0eef4dSJiayu Hu int ret, i; 1615f8244c63SZhiyong Yang uint16_t portid; 1616d19533e8SHuawei Xie static pthread_t tid; 16172345e3beSYuanhan Liu uint64_t flags = 0; 1618d19533e8SHuawei Xie 1619c83d2d00SOuyang Changchun signal(SIGINT, sigint_handler); 1620c83d2d00SOuyang Changchun 1621d19533e8SHuawei Xie /* init EAL */ 1622d19533e8SHuawei Xie ret = rte_eal_init(argc, argv); 1623d19533e8SHuawei Xie if (ret < 0) 1624d19533e8SHuawei Xie rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); 1625d19533e8SHuawei Xie argc -= ret; 1626d19533e8SHuawei Xie argv += ret; 1627d19533e8SHuawei Xie 1628d19533e8SHuawei Xie /* parse app arguments */ 1629d19533e8SHuawei Xie ret = us_vhost_parse_args(argc, argv); 1630d19533e8SHuawei Xie if (ret < 0) 1631d19533e8SHuawei Xie rte_exit(EXIT_FAILURE, "Invalid argument\n"); 1632d19533e8SHuawei Xie 1633b3bee7d8SYong Wang for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 163445657a5cSYuanhan Liu TAILQ_INIT(&lcore_info[lcore_id].vdev_list); 163545657a5cSYuanhan Liu 1636d19533e8SHuawei Xie if (rte_lcore_is_enabled(lcore_id)) 1637d19533e8SHuawei Xie lcore_ids[core_id++] = lcore_id; 1638b3bee7d8SYong Wang } 1639d19533e8SHuawei Xie 1640d19533e8SHuawei Xie if (rte_lcore_count() > RTE_MAX_LCORE) 1641d19533e8SHuawei Xie rte_exit(EXIT_FAILURE,"Not enough cores\n"); 1642d19533e8SHuawei Xie 1643d19533e8SHuawei Xie /* Get the number of physical ports. */ 1644d9a42a69SThomas Monjalon nb_ports = rte_eth_dev_count_avail(); 1645d19533e8SHuawei Xie 1646d19533e8SHuawei Xie /* 1647d19533e8SHuawei Xie * Update the global var NUM_PORTS and global array PORTS 1648d19533e8SHuawei Xie * and get value of var VALID_NUM_PORTS according to system ports number 1649d19533e8SHuawei Xie */ 1650d19533e8SHuawei Xie valid_num_ports = check_ports_num(nb_ports); 1651d19533e8SHuawei Xie 1652d19533e8SHuawei Xie if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) { 1653d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u," 1654d19533e8SHuawei Xie "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS); 1655d19533e8SHuawei Xie return -1; 1656d19533e8SHuawei Xie } 1657d19533e8SHuawei Xie 1658bdb19b77SYuanhan Liu /* 1659bdb19b77SYuanhan Liu * FIXME: here we are trying to allocate mbufs big enough for 1660bdb19b77SYuanhan Liu * @MAX_QUEUES, but the truth is we're never going to use that 1661bdb19b77SYuanhan Liu * many queues here. We probably should only do allocation for 1662bdb19b77SYuanhan Liu * those queues we are going to use. 1663bdb19b77SYuanhan Liu */ 1664bdb19b77SYuanhan Liu create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE, 1665bdb19b77SYuanhan Liu MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE); 1666d19533e8SHuawei Xie 1667d19533e8SHuawei Xie if (vm2vm_mode == VM2VM_HARDWARE) { 1668d19533e8SHuawei Xie /* Enable VT loop back to let L2 switch to do it. */ 1669d19533e8SHuawei Xie vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1; 16701f49ec15SThomas Monjalon RTE_LOG(DEBUG, VHOST_CONFIG, 1671d19533e8SHuawei Xie "Enable loop back for L2 switch in vmdq.\n"); 1672d19533e8SHuawei Xie } 1673d19533e8SHuawei Xie 1674d19533e8SHuawei Xie /* initialize all ports */ 16758728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(portid) { 1676d19533e8SHuawei Xie /* skip ports that are not enabled */ 1677d19533e8SHuawei Xie if ((enabled_port_mask & (1 << portid)) == 0) { 1678d19533e8SHuawei Xie RTE_LOG(INFO, VHOST_PORT, 1679d19533e8SHuawei Xie "Skipping disabled port %d\n", portid); 1680d19533e8SHuawei Xie continue; 1681d19533e8SHuawei Xie } 1682d19533e8SHuawei Xie if (port_init(portid) != 0) 1683d19533e8SHuawei Xie rte_exit(EXIT_FAILURE, 1684d19533e8SHuawei Xie "Cannot initialize network ports\n"); 1685d19533e8SHuawei Xie } 1686d19533e8SHuawei Xie 1687d19533e8SHuawei Xie /* Enable stats if the user option is set. */ 168867b6d303SRavi Kerur if (enable_stats) { 1689fa204854SOlivier Matz ret = rte_ctrl_thread_create(&tid, "print-stats", NULL, 1690fa204854SOlivier Matz print_stats, NULL); 1691fa204854SOlivier Matz if (ret < 0) 169267b6d303SRavi Kerur rte_exit(EXIT_FAILURE, 169367b6d303SRavi Kerur "Cannot create print-stats thread\n"); 169467b6d303SRavi Kerur } 1695d19533e8SHuawei Xie 1696d19533e8SHuawei Xie /* Launch all data cores. */ 1697cb056611SStephen Hemminger RTE_LCORE_FOREACH_WORKER(lcore_id) 169868363d85SYuanhan Liu rte_eal_remote_launch(switch_worker, NULL, lcore_id); 1699d19533e8SHuawei Xie 17002345e3beSYuanhan Liu if (client_mode) 17012345e3beSYuanhan Liu flags |= RTE_VHOST_USER_CLIENT; 17022345e3beSYuanhan Liu 1703bde19a4dSJiayu Hu /* Register vhost user driver to handle vhost messages. */ 1704ad0eef4dSJiayu Hu for (i = 0; i < nb_sockets; i++) { 17050917f9d1SYuanhan Liu char *file = socket_files + i * PATH_MAX; 1706a68ba8e0SCheng Jiang 1707abec60e7SCheng Jiang if (async_vhost_driver) 1708abec60e7SCheng Jiang flags = flags | RTE_VHOST_USER_ASYNC_COPY; 1709abec60e7SCheng Jiang 17100917f9d1SYuanhan Liu ret = rte_vhost_driver_register(file, flags); 1711ad0eef4dSJiayu Hu if (ret != 0) { 1712ad0eef4dSJiayu Hu unregister_drivers(i); 1713ad0eef4dSJiayu Hu rte_exit(EXIT_FAILURE, 1714ad0eef4dSJiayu Hu "vhost driver register failure.\n"); 1715ad0eef4dSJiayu Hu } 1716ca059fa5SYuanhan Liu 1717ca059fa5SYuanhan Liu if (builtin_net_driver) 1718ca059fa5SYuanhan Liu rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES); 1719ca059fa5SYuanhan Liu 17200917f9d1SYuanhan Liu if (mergeable == 0) { 17210917f9d1SYuanhan Liu rte_vhost_driver_disable_features(file, 17220917f9d1SYuanhan Liu 1ULL << VIRTIO_NET_F_MRG_RXBUF); 17230917f9d1SYuanhan Liu } 17240917f9d1SYuanhan Liu 17250917f9d1SYuanhan Liu if (enable_tx_csum == 0) { 17260917f9d1SYuanhan Liu rte_vhost_driver_disable_features(file, 17270917f9d1SYuanhan Liu 1ULL << VIRTIO_NET_F_CSUM); 17280917f9d1SYuanhan Liu } 17290917f9d1SYuanhan Liu 17300917f9d1SYuanhan Liu if (enable_tso == 0) { 17310917f9d1SYuanhan Liu rte_vhost_driver_disable_features(file, 17320917f9d1SYuanhan Liu 1ULL << VIRTIO_NET_F_HOST_TSO4); 17330917f9d1SYuanhan Liu rte_vhost_driver_disable_features(file, 17340917f9d1SYuanhan Liu 1ULL << VIRTIO_NET_F_HOST_TSO6); 17350917f9d1SYuanhan Liu rte_vhost_driver_disable_features(file, 17360917f9d1SYuanhan Liu 1ULL << VIRTIO_NET_F_GUEST_TSO4); 17370917f9d1SYuanhan Liu rte_vhost_driver_disable_features(file, 17380917f9d1SYuanhan Liu 1ULL << VIRTIO_NET_F_GUEST_TSO6); 17390917f9d1SYuanhan Liu } 17400917f9d1SYuanhan Liu 17410917f9d1SYuanhan Liu if (promiscuous) { 17420917f9d1SYuanhan Liu rte_vhost_driver_enable_features(file, 17430917f9d1SYuanhan Liu 1ULL << VIRTIO_NET_F_CTRL_RX); 17440917f9d1SYuanhan Liu } 1745d19533e8SHuawei Xie 174693433b63SYuanhan Liu ret = rte_vhost_driver_callback_register(file, 174793433b63SYuanhan Liu &virtio_net_device_ops); 174893433b63SYuanhan Liu if (ret != 0) { 174993433b63SYuanhan Liu rte_exit(EXIT_FAILURE, 175093433b63SYuanhan Liu "failed to register vhost driver callbacks.\n"); 175193433b63SYuanhan Liu } 1752af147591SYuanhan Liu 1753af147591SYuanhan Liu if (rte_vhost_driver_start(file) < 0) { 1754af147591SYuanhan Liu rte_exit(EXIT_FAILURE, 1755af147591SYuanhan Liu "failed to start vhost driver.\n"); 1756af147591SYuanhan Liu } 175793433b63SYuanhan Liu } 1758d19533e8SHuawei Xie 1759cb056611SStephen Hemminger RTE_LCORE_FOREACH_WORKER(lcore_id) 1760af147591SYuanhan Liu rte_eal_wait_lcore(lcore_id); 1761af147591SYuanhan Liu 1762d19533e8SHuawei Xie return 0; 1763d19533e8SHuawei Xie 1764d19533e8SHuawei Xie } 1765