15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 25566a3e3SBruce Richardson * Copyright(c) 2010-2016 Intel Corporation 3e9efa4d9SJianfeng Tan */ 4e9efa4d9SJianfeng Tan 5e9efa4d9SJianfeng Tan #include <stdint.h> 6e9efa4d9SJianfeng Tan #include <sys/types.h> 7e9efa4d9SJianfeng Tan #include <unistd.h> 8ef53b603SJianfeng Tan #include <fcntl.h> 9*f908b22eSAdrian Moreno #include <sys/stat.h> 10ef53b603SJianfeng Tan #include <sys/socket.h> 11e9efa4d9SJianfeng Tan 12ce2eabddSJianfeng Tan #include <rte_malloc.h> 13ce2eabddSJianfeng Tan #include <rte_kvargs.h> 14050fe6e9SJan Blunck #include <rte_ethdev_vdev.h> 15d4a586d2SJianfeng Tan #include <rte_bus_vdev.h> 16ef53b603SJianfeng Tan #include <rte_alarm.h> 176ebbf410SXuan Ding #include <rte_cycles.h> 18ce2eabddSJianfeng Tan 19ce2eabddSJianfeng Tan #include "virtio_ethdev.h" 20e9efa4d9SJianfeng Tan #include "virtio_logs.h" 21e9efa4d9SJianfeng Tan #include "virtio_pci.h" 22e9efa4d9SJianfeng Tan #include "virtqueue.h" 231b69528eSJianfeng Tan #include "virtio_rxtx.h" 24e9efa4d9SJianfeng Tan #include "virtio_user/virtio_user_dev.h" 257f468b2eSTiwei Bie #include "virtio_user/vhost.h" 26e9efa4d9SJianfeng Tan 27e9efa4d9SJianfeng Tan #define virtio_user_get_dev(hw) \ 28e9efa4d9SJianfeng Tan ((struct virtio_user_dev *)(hw)->virtio_user_dev) 29e9efa4d9SJianfeng Tan 306ebbf410SXuan Ding static void 316ebbf410SXuan Ding virtio_user_reset_queues_packed(struct rte_eth_dev *dev) 326ebbf410SXuan Ding { 336ebbf410SXuan Ding struct virtio_hw *hw = dev->data->dev_private; 346ebbf410SXuan Ding struct virtnet_rx *rxvq; 356ebbf410SXuan Ding struct virtnet_tx *txvq; 366ebbf410SXuan Ding uint16_t i; 376ebbf410SXuan Ding 386ebbf410SXuan Ding /* Add lock to avoid queue contention. */ 396ebbf410SXuan Ding rte_spinlock_lock(&hw->state_lock); 406ebbf410SXuan Ding hw->started = 0; 416ebbf410SXuan Ding 426ebbf410SXuan Ding /* 436ebbf410SXuan Ding * Waitting for datapath to complete before resetting queues. 446ebbf410SXuan Ding * 1 ms should be enough for the ongoing Tx/Rx function to finish. 456ebbf410SXuan Ding */ 466ebbf410SXuan Ding rte_delay_ms(1); 476ebbf410SXuan Ding 486ebbf410SXuan Ding /* Vring reset for each Tx queue and Rx queue. */ 496ebbf410SXuan Ding for (i = 0; i < dev->data->nb_rx_queues; i++) { 506ebbf410SXuan Ding rxvq = dev->data->rx_queues[i]; 516ebbf410SXuan Ding virtqueue_rxvq_reset_packed(rxvq->vq); 526ebbf410SXuan Ding virtio_dev_rx_queue_setup_finish(dev, i); 536ebbf410SXuan Ding } 546ebbf410SXuan Ding 556ebbf410SXuan Ding for (i = 0; i < dev->data->nb_tx_queues; i++) { 566ebbf410SXuan Ding txvq = dev->data->tx_queues[i]; 576ebbf410SXuan Ding virtqueue_txvq_reset_packed(txvq->vq); 586ebbf410SXuan Ding } 596ebbf410SXuan Ding 606ebbf410SXuan Ding hw->started = 1; 616ebbf410SXuan Ding rte_spinlock_unlock(&hw->state_lock); 626ebbf410SXuan Ding } 636ebbf410SXuan Ding 646ebbf410SXuan Ding 65bd8f50a4SZhiyong Yang static int 66bd8f50a4SZhiyong Yang virtio_user_server_reconnect(struct virtio_user_dev *dev) 67bd8f50a4SZhiyong Yang { 68bd8f50a4SZhiyong Yang int ret; 69bd8f50a4SZhiyong Yang int connectfd; 70bd8f50a4SZhiyong Yang struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id]; 716ebbf410SXuan Ding struct virtio_hw *hw = eth_dev->data->dev_private; 728e756105SMaxime Coquelin uint64_t protocol_features; 73bd8f50a4SZhiyong Yang 74bd8f50a4SZhiyong Yang connectfd = accept(dev->listenfd, NULL, NULL); 75bd8f50a4SZhiyong Yang if (connectfd < 0) 76bd8f50a4SZhiyong Yang return -1; 77bd8f50a4SZhiyong Yang 78bd8f50a4SZhiyong Yang dev->vhostfd = connectfd; 79201a4165SZhiyong Yang if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, 80201a4165SZhiyong Yang &dev->device_features) < 0) { 81201a4165SZhiyong Yang PMD_INIT_LOG(ERR, "get_features failed: %s", 82201a4165SZhiyong Yang strerror(errno)); 83201a4165SZhiyong Yang return -1; 84201a4165SZhiyong Yang } 85201a4165SZhiyong Yang 868e756105SMaxime Coquelin if (dev->device_features & 878e756105SMaxime Coquelin (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) { 888e756105SMaxime Coquelin if (dev->ops->send_request(dev, 898e756105SMaxime Coquelin VHOST_USER_GET_PROTOCOL_FEATURES, 908e756105SMaxime Coquelin &protocol_features)) 918e756105SMaxime Coquelin return -1; 928e756105SMaxime Coquelin 938e756105SMaxime Coquelin dev->protocol_features &= protocol_features; 948e756105SMaxime Coquelin 958e756105SMaxime Coquelin if (dev->ops->send_request(dev, 968e756105SMaxime Coquelin VHOST_USER_SET_PROTOCOL_FEATURES, 978e756105SMaxime Coquelin &dev->protocol_features)) 988e756105SMaxime Coquelin return -1; 998e756105SMaxime Coquelin 1008e756105SMaxime Coquelin if (!(dev->protocol_features & 1018e756105SMaxime Coquelin (1ULL << VHOST_USER_PROTOCOL_F_MQ))) 1028e756105SMaxime Coquelin dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ); 1038e756105SMaxime Coquelin } 1048e756105SMaxime Coquelin 105bb97d2ddSTiwei Bie dev->device_features |= dev->frontend_features; 106bb97d2ddSTiwei Bie 1077c66ff61SMarvin Liu /* umask vhost-user unsupported features */ 1087c66ff61SMarvin Liu dev->device_features &= ~(dev->unsupported_features); 109201a4165SZhiyong Yang 110201a4165SZhiyong Yang dev->features &= dev->device_features; 111201a4165SZhiyong Yang 1126ebbf410SXuan Ding /* For packed ring, resetting queues is required in reconnection. */ 11301996a03SMarvin Liu if (vtpci_packed_queue(hw) && 11401996a03SMarvin Liu (vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_DRIVER_OK)) { 1156ebbf410SXuan Ding PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped" 1166ebbf410SXuan Ding " when packed ring reconnecting."); 1176ebbf410SXuan Ding virtio_user_reset_queues_packed(eth_dev); 11892771257STiwei Bie } 1196ebbf410SXuan Ding 120bd8f50a4SZhiyong Yang ret = virtio_user_start_device(dev); 121bd8f50a4SZhiyong Yang if (ret < 0) 122bd8f50a4SZhiyong Yang return -1; 123bd8f50a4SZhiyong Yang 124201a4165SZhiyong Yang if (dev->queue_pairs > 1) { 125201a4165SZhiyong Yang ret = virtio_user_handle_mq(dev, dev->queue_pairs); 126201a4165SZhiyong Yang if (ret != 0) { 127201a4165SZhiyong Yang PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!"); 128201a4165SZhiyong Yang return -1; 129201a4165SZhiyong Yang } 130201a4165SZhiyong Yang } 131bd8f50a4SZhiyong Yang if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { 132bd8f50a4SZhiyong Yang if (rte_intr_disable(eth_dev->intr_handle) < 0) { 133bd8f50a4SZhiyong Yang PMD_DRV_LOG(ERR, "interrupt disable failed"); 134bd8f50a4SZhiyong Yang return -1; 135bd8f50a4SZhiyong Yang } 136bd8f50a4SZhiyong Yang rte_intr_callback_unregister(eth_dev->intr_handle, 137bd8f50a4SZhiyong Yang virtio_interrupt_handler, 138bd8f50a4SZhiyong Yang eth_dev); 139bd8f50a4SZhiyong Yang eth_dev->intr_handle->fd = connectfd; 140bd8f50a4SZhiyong Yang rte_intr_callback_register(eth_dev->intr_handle, 141bd8f50a4SZhiyong Yang virtio_interrupt_handler, eth_dev); 142bd8f50a4SZhiyong Yang 143bd8f50a4SZhiyong Yang if (rte_intr_enable(eth_dev->intr_handle) < 0) { 144bd8f50a4SZhiyong Yang PMD_DRV_LOG(ERR, "interrupt enable failed"); 145bd8f50a4SZhiyong Yang return -1; 146bd8f50a4SZhiyong Yang } 147bd8f50a4SZhiyong Yang } 148bd8f50a4SZhiyong Yang PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!"); 149bd8f50a4SZhiyong Yang return 0; 150bd8f50a4SZhiyong Yang } 151bd8f50a4SZhiyong Yang 152e9efa4d9SJianfeng Tan static void 153ef53b603SJianfeng Tan virtio_user_delayed_handler(void *param) 154ef53b603SJianfeng Tan { 155ef53b603SJianfeng Tan struct virtio_hw *hw = (struct virtio_hw *)param; 156bd8f50a4SZhiyong Yang struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id]; 157bd8f50a4SZhiyong Yang struct virtio_user_dev *dev = virtio_user_get_dev(hw); 158ef53b603SJianfeng Tan 159bd8f50a4SZhiyong Yang if (rte_intr_disable(eth_dev->intr_handle) < 0) { 160bd8f50a4SZhiyong Yang PMD_DRV_LOG(ERR, "interrupt disable failed"); 161bd8f50a4SZhiyong Yang return; 162bd8f50a4SZhiyong Yang } 163bd8f50a4SZhiyong Yang rte_intr_callback_unregister(eth_dev->intr_handle, 164bd8f50a4SZhiyong Yang virtio_interrupt_handler, eth_dev); 165bd8f50a4SZhiyong Yang if (dev->is_server) { 166bd8f50a4SZhiyong Yang if (dev->vhostfd >= 0) { 167bd8f50a4SZhiyong Yang close(dev->vhostfd); 168bd8f50a4SZhiyong Yang dev->vhostfd = -1; 169bd8f50a4SZhiyong Yang } 170bd8f50a4SZhiyong Yang eth_dev->intr_handle->fd = dev->listenfd; 171bd8f50a4SZhiyong Yang rte_intr_callback_register(eth_dev->intr_handle, 172bd8f50a4SZhiyong Yang virtio_interrupt_handler, eth_dev); 173bd8f50a4SZhiyong Yang if (rte_intr_enable(eth_dev->intr_handle) < 0) { 174bd8f50a4SZhiyong Yang PMD_DRV_LOG(ERR, "interrupt enable failed"); 175bd8f50a4SZhiyong Yang return; 176bd8f50a4SZhiyong Yang } 177bd8f50a4SZhiyong Yang } 178ef53b603SJianfeng Tan } 179ef53b603SJianfeng Tan 180ef53b603SJianfeng Tan static void 181e9efa4d9SJianfeng Tan virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, 182e9efa4d9SJianfeng Tan void *dst, int length) 183e9efa4d9SJianfeng Tan { 184e9efa4d9SJianfeng Tan int i; 185e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 186e9efa4d9SJianfeng Tan 187e9efa4d9SJianfeng Tan if (offset == offsetof(struct virtio_net_config, mac) && 18835b2d13fSOlivier Matz length == RTE_ETHER_ADDR_LEN) { 18935b2d13fSOlivier Matz for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 190e9efa4d9SJianfeng Tan ((uint8_t *)dst)[i] = dev->mac_addr[i]; 191e9efa4d9SJianfeng Tan return; 192e9efa4d9SJianfeng Tan } 193e9efa4d9SJianfeng Tan 194ef53b603SJianfeng Tan if (offset == offsetof(struct virtio_net_config, status)) { 195ef53b603SJianfeng Tan char buf[128]; 196ef53b603SJianfeng Tan 197ef53b603SJianfeng Tan if (dev->vhostfd >= 0) { 198ef53b603SJianfeng Tan int r; 199ef53b603SJianfeng Tan int flags; 200ef53b603SJianfeng Tan 201ef53b603SJianfeng Tan flags = fcntl(dev->vhostfd, F_GETFL); 2022fd826a3SSebastian Basierski if (fcntl(dev->vhostfd, F_SETFL, 2032fd826a3SSebastian Basierski flags | O_NONBLOCK) == -1) { 2042fd826a3SSebastian Basierski PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag"); 2052fd826a3SSebastian Basierski return; 2062fd826a3SSebastian Basierski } 207ef53b603SJianfeng Tan r = recv(dev->vhostfd, buf, 128, MSG_PEEK); 208ef53b603SJianfeng Tan if (r == 0 || (r < 0 && errno != EAGAIN)) { 209d0131e49SXiao Wang dev->net_status &= (~VIRTIO_NET_S_LINK_UP); 210ef53b603SJianfeng Tan PMD_DRV_LOG(ERR, "virtio-user port %u is down", 211ef53b603SJianfeng Tan hw->port_id); 212bd8f50a4SZhiyong Yang 213bd8f50a4SZhiyong Yang /* This function could be called in the process 214bd8f50a4SZhiyong Yang * of interrupt handling, callback cannot be 215bd8f50a4SZhiyong Yang * unregistered here, set an alarm to do it. 216ef53b603SJianfeng Tan */ 217ef53b603SJianfeng Tan rte_eal_alarm_set(1, 218ef53b603SJianfeng Tan virtio_user_delayed_handler, 219ef53b603SJianfeng Tan (void *)hw); 220ef53b603SJianfeng Tan } else { 221d0131e49SXiao Wang dev->net_status |= VIRTIO_NET_S_LINK_UP; 222ef53b603SJianfeng Tan } 223f76ef453SSebastian Basierski if (fcntl(dev->vhostfd, F_SETFL, 224f76ef453SSebastian Basierski flags & ~O_NONBLOCK) == -1) { 225f76ef453SSebastian Basierski PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag"); 226f76ef453SSebastian Basierski return; 227f76ef453SSebastian Basierski } 228bd8f50a4SZhiyong Yang } else if (dev->is_server) { 229d0131e49SXiao Wang dev->net_status &= (~VIRTIO_NET_S_LINK_UP); 230bd8f50a4SZhiyong Yang if (virtio_user_server_reconnect(dev) >= 0) 231d0131e49SXiao Wang dev->net_status |= VIRTIO_NET_S_LINK_UP; 232ef53b603SJianfeng Tan } 233bd8f50a4SZhiyong Yang 234d0131e49SXiao Wang *(uint16_t *)dst = dev->net_status; 235ef53b603SJianfeng Tan } 236e9efa4d9SJianfeng Tan 237e9efa4d9SJianfeng Tan if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs)) 238e9efa4d9SJianfeng Tan *(uint16_t *)dst = dev->max_queue_pairs; 239e9efa4d9SJianfeng Tan } 240e9efa4d9SJianfeng Tan 241e9efa4d9SJianfeng Tan static void 242e9efa4d9SJianfeng Tan virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset, 243e9efa4d9SJianfeng Tan const void *src, int length) 244e9efa4d9SJianfeng Tan { 245e9efa4d9SJianfeng Tan int i; 246e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 247e9efa4d9SJianfeng Tan 248e9efa4d9SJianfeng Tan if ((offset == offsetof(struct virtio_net_config, mac)) && 24935b2d13fSOlivier Matz (length == RTE_ETHER_ADDR_LEN)) 25035b2d13fSOlivier Matz for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 251e9efa4d9SJianfeng Tan dev->mac_addr[i] = ((const uint8_t *)src)[i]; 252e9efa4d9SJianfeng Tan else 253f2462150SFerruh Yigit PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d", 254e9efa4d9SJianfeng Tan offset, length); 255e9efa4d9SJianfeng Tan } 256e9efa4d9SJianfeng Tan 257e9efa4d9SJianfeng Tan static void 258c12a26eeSJianfeng Tan virtio_user_reset(struct virtio_hw *hw) 259c12a26eeSJianfeng Tan { 260c12a26eeSJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 261c12a26eeSJianfeng Tan 262c12a26eeSJianfeng Tan if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 263c12a26eeSJianfeng Tan virtio_user_stop_device(dev); 264c12a26eeSJianfeng Tan } 265c12a26eeSJianfeng Tan 266c12a26eeSJianfeng Tan static void 267e9efa4d9SJianfeng Tan virtio_user_set_status(struct virtio_hw *hw, uint8_t status) 268e9efa4d9SJianfeng Tan { 269e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 270e9efa4d9SJianfeng Tan 271e9efa4d9SJianfeng Tan if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 272e9efa4d9SJianfeng Tan virtio_user_start_device(dev); 273c12a26eeSJianfeng Tan else if (status == VIRTIO_CONFIG_STATUS_RESET) 274c12a26eeSJianfeng Tan virtio_user_reset(hw); 275e9efa4d9SJianfeng Tan dev->status = status; 27657912824SMaxime Coquelin virtio_user_send_status_update(dev, status); 277e9efa4d9SJianfeng Tan } 278e9efa4d9SJianfeng Tan 279e9efa4d9SJianfeng Tan static uint8_t 280e9efa4d9SJianfeng Tan virtio_user_get_status(struct virtio_hw *hw) 281e9efa4d9SJianfeng Tan { 282e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 283e9efa4d9SJianfeng Tan 2840b0dc66cSAdrian Moreno virtio_user_update_status(dev); 2850b0dc66cSAdrian Moreno 286e9efa4d9SJianfeng Tan return dev->status; 287e9efa4d9SJianfeng Tan } 288e9efa4d9SJianfeng Tan 289e9efa4d9SJianfeng Tan static uint64_t 290e9efa4d9SJianfeng Tan virtio_user_get_features(struct virtio_hw *hw) 291e9efa4d9SJianfeng Tan { 292e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 293e9efa4d9SJianfeng Tan 294142678d4SJianfeng Tan /* unmask feature bits defined in vhost user protocol */ 295142678d4SJianfeng Tan return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES; 296e9efa4d9SJianfeng Tan } 297e9efa4d9SJianfeng Tan 298e9efa4d9SJianfeng Tan static void 299e9efa4d9SJianfeng Tan virtio_user_set_features(struct virtio_hw *hw, uint64_t features) 300e9efa4d9SJianfeng Tan { 301e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 302e9efa4d9SJianfeng Tan 303142678d4SJianfeng Tan dev->features = features & dev->device_features; 304e9efa4d9SJianfeng Tan } 305e9efa4d9SJianfeng Tan 306e9efa4d9SJianfeng Tan static uint8_t 307e9efa4d9SJianfeng Tan virtio_user_get_isr(struct virtio_hw *hw __rte_unused) 308e9efa4d9SJianfeng Tan { 30935c4f855SJianfeng Tan /* rxq interrupts and config interrupt are separated in virtio-user, 31035c4f855SJianfeng Tan * here we only report config change. 311e9efa4d9SJianfeng Tan */ 31235c4f855SJianfeng Tan return VIRTIO_PCI_ISR_CONFIG; 313e9efa4d9SJianfeng Tan } 314e9efa4d9SJianfeng Tan 315e9efa4d9SJianfeng Tan static uint16_t 316e9efa4d9SJianfeng Tan virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused, 317e9efa4d9SJianfeng Tan uint16_t vec __rte_unused) 318e9efa4d9SJianfeng Tan { 31935c4f855SJianfeng Tan return 0; 320e9efa4d9SJianfeng Tan } 321e9efa4d9SJianfeng Tan 3223d4fb6fdSJianfeng Tan static uint16_t 3233d4fb6fdSJianfeng Tan virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused, 3243d4fb6fdSJianfeng Tan struct virtqueue *vq __rte_unused, 3253d4fb6fdSJianfeng Tan uint16_t vec) 3263d4fb6fdSJianfeng Tan { 3273d4fb6fdSJianfeng Tan /* pretend we have done that */ 3283d4fb6fdSJianfeng Tan return vec; 3293d4fb6fdSJianfeng Tan } 3303d4fb6fdSJianfeng Tan 331e9efa4d9SJianfeng Tan /* This function is to get the queue size, aka, number of descs, of a specified 332e9efa4d9SJianfeng Tan * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the 333e9efa4d9SJianfeng Tan * max supported queues. 334e9efa4d9SJianfeng Tan */ 335e9efa4d9SJianfeng Tan static uint16_t 336e9efa4d9SJianfeng Tan virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused) 337e9efa4d9SJianfeng Tan { 338e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 339e9efa4d9SJianfeng Tan 340e9efa4d9SJianfeng Tan /* Currently, each queue has same queue size */ 341e9efa4d9SJianfeng Tan return dev->queue_size; 342e9efa4d9SJianfeng Tan } 343e9efa4d9SJianfeng Tan 34448a44640SJens Freimann static void 34548a44640SJens Freimann virtio_user_setup_queue_packed(struct virtqueue *vq, 34648a44640SJens Freimann struct virtio_user_dev *dev) 347e9efa4d9SJianfeng Tan { 34848a44640SJens Freimann uint16_t queue_idx = vq->vq_queue_index; 34948a44640SJens Freimann struct vring_packed *vring; 35048a44640SJens Freimann uint64_t desc_addr; 35148a44640SJens Freimann uint64_t avail_addr; 35248a44640SJens Freimann uint64_t used_addr; 35348a44640SJens Freimann uint16_t i; 35448a44640SJens Freimann 35548a44640SJens Freimann vring = &dev->packed_vrings[queue_idx]; 35648a44640SJens Freimann desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 35748a44640SJens Freimann avail_addr = desc_addr + vq->vq_nentries * 35848a44640SJens Freimann sizeof(struct vring_packed_desc); 35948a44640SJens Freimann used_addr = RTE_ALIGN_CEIL(avail_addr + 36048a44640SJens Freimann sizeof(struct vring_packed_desc_event), 36148a44640SJens Freimann VIRTIO_PCI_VRING_ALIGN); 36248a44640SJens Freimann vring->num = vq->vq_nentries; 3634cdc4d98STiwei Bie vring->desc = (void *)(uintptr_t)desc_addr; 3644cdc4d98STiwei Bie vring->driver = (void *)(uintptr_t)avail_addr; 3654cdc4d98STiwei Bie vring->device = (void *)(uintptr_t)used_addr; 36648a44640SJens Freimann dev->packed_queues[queue_idx].avail_wrap_counter = true; 36748a44640SJens Freimann dev->packed_queues[queue_idx].used_wrap_counter = true; 36848a44640SJens Freimann 36945c224e7STiwei Bie for (i = 0; i < vring->num; i++) 3704cdc4d98STiwei Bie vring->desc[i].flags = 0; 37148a44640SJens Freimann } 37248a44640SJens Freimann 37348a44640SJens Freimann static void 37448a44640SJens Freimann virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev) 37548a44640SJens Freimann { 376e9efa4d9SJianfeng Tan uint16_t queue_idx = vq->vq_queue_index; 377e9efa4d9SJianfeng Tan uint64_t desc_addr, avail_addr, used_addr; 378e9efa4d9SJianfeng Tan 379e9efa4d9SJianfeng Tan desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 380e9efa4d9SJianfeng Tan avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 381e9efa4d9SJianfeng Tan used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 382e9efa4d9SJianfeng Tan ring[vq->vq_nentries]), 383e9efa4d9SJianfeng Tan VIRTIO_PCI_VRING_ALIGN); 384e9efa4d9SJianfeng Tan 385e9efa4d9SJianfeng Tan dev->vrings[queue_idx].num = vq->vq_nentries; 386e9efa4d9SJianfeng Tan dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr; 387e9efa4d9SJianfeng Tan dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr; 388e9efa4d9SJianfeng Tan dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr; 38948a44640SJens Freimann } 39048a44640SJens Freimann 39148a44640SJens Freimann static int 39248a44640SJens Freimann virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 39348a44640SJens Freimann { 39448a44640SJens Freimann struct virtio_user_dev *dev = virtio_user_get_dev(hw); 39548a44640SJens Freimann 39648a44640SJens Freimann if (vtpci_packed_queue(hw)) 39748a44640SJens Freimann virtio_user_setup_queue_packed(vq, dev); 39848a44640SJens Freimann else 39948a44640SJens Freimann virtio_user_setup_queue_split(vq, dev); 400e9efa4d9SJianfeng Tan 401e9efa4d9SJianfeng Tan return 0; 402e9efa4d9SJianfeng Tan } 403e9efa4d9SJianfeng Tan 404e9efa4d9SJianfeng Tan static void 405e9efa4d9SJianfeng Tan virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 406e9efa4d9SJianfeng Tan { 407e9efa4d9SJianfeng Tan /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU 408e9efa4d9SJianfeng Tan * correspondingly stops the ioeventfds, and reset the status of 409e9efa4d9SJianfeng Tan * the device. 410e9efa4d9SJianfeng Tan * For modern devices, set queue desc, avail, used in PCI bar to 0, 411e9efa4d9SJianfeng Tan * not see any more behavior in QEMU. 412e9efa4d9SJianfeng Tan * 413e9efa4d9SJianfeng Tan * Here we just care about what information to deliver to vhost-user 414e9efa4d9SJianfeng Tan * or vhost-kernel. So we just close ioeventfd for now. 415e9efa4d9SJianfeng Tan */ 416e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 417e9efa4d9SJianfeng Tan 418e9efa4d9SJianfeng Tan close(dev->callfds[vq->vq_queue_index]); 419e9efa4d9SJianfeng Tan close(dev->kickfds[vq->vq_queue_index]); 420e9efa4d9SJianfeng Tan } 421e9efa4d9SJianfeng Tan 422e9efa4d9SJianfeng Tan static void 423e9efa4d9SJianfeng Tan virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 424e9efa4d9SJianfeng Tan { 425e9efa4d9SJianfeng Tan uint64_t buf = 1; 426e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 427e9efa4d9SJianfeng Tan 4281b69528eSJianfeng Tan if (hw->cvq && (hw->cvq->vq == vq)) { 42948a44640SJens Freimann if (vtpci_packed_queue(vq->hw)) 43048a44640SJens Freimann virtio_user_handle_cq_packed(dev, vq->vq_queue_index); 43148a44640SJens Freimann else 4321b69528eSJianfeng Tan virtio_user_handle_cq(dev, vq->vq_queue_index); 4331b69528eSJianfeng Tan return; 4341b69528eSJianfeng Tan } 4351b69528eSJianfeng Tan 436e9efa4d9SJianfeng Tan if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0) 437f2462150SFerruh Yigit PMD_DRV_LOG(ERR, "failed to kick backend: %s", 438e9efa4d9SJianfeng Tan strerror(errno)); 439e9efa4d9SJianfeng Tan } 440e9efa4d9SJianfeng Tan 4416d890f8aSYuanhan Liu const struct virtio_pci_ops virtio_user_ops = { 442e9efa4d9SJianfeng Tan .read_dev_cfg = virtio_user_read_dev_config, 443e9efa4d9SJianfeng Tan .write_dev_cfg = virtio_user_write_dev_config, 444e9efa4d9SJianfeng Tan .get_status = virtio_user_get_status, 445e9efa4d9SJianfeng Tan .set_status = virtio_user_set_status, 446e9efa4d9SJianfeng Tan .get_features = virtio_user_get_features, 447e9efa4d9SJianfeng Tan .set_features = virtio_user_set_features, 448e9efa4d9SJianfeng Tan .get_isr = virtio_user_get_isr, 449e9efa4d9SJianfeng Tan .set_config_irq = virtio_user_set_config_irq, 4503d4fb6fdSJianfeng Tan .set_queue_irq = virtio_user_set_queue_irq, 451e9efa4d9SJianfeng Tan .get_queue_num = virtio_user_get_queue_num, 452e9efa4d9SJianfeng Tan .setup_queue = virtio_user_setup_queue, 453e9efa4d9SJianfeng Tan .del_queue = virtio_user_del_queue, 454e9efa4d9SJianfeng Tan .notify_queue = virtio_user_notify_queue, 455e9efa4d9SJianfeng Tan }; 456ce2eabddSJianfeng Tan 457ce2eabddSJianfeng Tan static const char *valid_args[] = { 458ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_QUEUES_NUM "queues" 459ce2eabddSJianfeng Tan VIRTIO_USER_ARG_QUEUES_NUM, 460ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_CQ_NUM "cq" 461ce2eabddSJianfeng Tan VIRTIO_USER_ARG_CQ_NUM, 462ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_MAC "mac" 463ce2eabddSJianfeng Tan VIRTIO_USER_ARG_MAC, 464ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_PATH "path" 465ce2eabddSJianfeng Tan VIRTIO_USER_ARG_PATH, 466ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size" 467ce2eabddSJianfeng Tan VIRTIO_USER_ARG_QUEUE_SIZE, 4684214a1b4SWenfeng Liu #define VIRTIO_USER_ARG_INTERFACE_NAME "iface" 4694214a1b4SWenfeng Liu VIRTIO_USER_ARG_INTERFACE_NAME, 470bd8f50a4SZhiyong Yang #define VIRTIO_USER_ARG_SERVER_MODE "server" 471bd8f50a4SZhiyong Yang VIRTIO_USER_ARG_SERVER_MODE, 472488ed97aSMarvin Liu #define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf" 473488ed97aSMarvin Liu VIRTIO_USER_ARG_MRG_RXBUF, 474488ed97aSMarvin Liu #define VIRTIO_USER_ARG_IN_ORDER "in_order" 475488ed97aSMarvin Liu VIRTIO_USER_ARG_IN_ORDER, 47634f3966cSYuanhan Liu #define VIRTIO_USER_ARG_PACKED_VQ "packed_vq" 47734f3966cSYuanhan Liu VIRTIO_USER_ARG_PACKED_VQ, 478b0db4beaSIvan Dyukov #define VIRTIO_USER_ARG_SPEED "speed" 479b0db4beaSIvan Dyukov VIRTIO_USER_ARG_SPEED, 4806b7eefbcSMarvin Liu #define VIRTIO_USER_ARG_VECTORIZED "vectorized" 4816b7eefbcSMarvin Liu VIRTIO_USER_ARG_VECTORIZED, 482ce2eabddSJianfeng Tan NULL 483ce2eabddSJianfeng Tan }; 484ce2eabddSJianfeng Tan 485ce2eabddSJianfeng Tan #define VIRTIO_USER_DEF_CQ_EN 0 486ce2eabddSJianfeng Tan #define VIRTIO_USER_DEF_Q_NUM 1 487ce2eabddSJianfeng Tan #define VIRTIO_USER_DEF_Q_SZ 256 488bd8f50a4SZhiyong Yang #define VIRTIO_USER_DEF_SERVER_MODE 0 489ce2eabddSJianfeng Tan 490ce2eabddSJianfeng Tan static int 491ce2eabddSJianfeng Tan get_string_arg(const char *key __rte_unused, 492ce2eabddSJianfeng Tan const char *value, void *extra_args) 493ce2eabddSJianfeng Tan { 494ce2eabddSJianfeng Tan if (!value || !extra_args) 495ce2eabddSJianfeng Tan return -EINVAL; 496ce2eabddSJianfeng Tan 497ce2eabddSJianfeng Tan *(char **)extra_args = strdup(value); 498ce2eabddSJianfeng Tan 4994214a1b4SWenfeng Liu if (!*(char **)extra_args) 5004214a1b4SWenfeng Liu return -ENOMEM; 5014214a1b4SWenfeng Liu 502ce2eabddSJianfeng Tan return 0; 503ce2eabddSJianfeng Tan } 504ce2eabddSJianfeng Tan 505ce2eabddSJianfeng Tan static int 506ce2eabddSJianfeng Tan get_integer_arg(const char *key __rte_unused, 507ce2eabddSJianfeng Tan const char *value, void *extra_args) 508ce2eabddSJianfeng Tan { 509bc5b6c11SIvan Dyukov uint64_t integer = 0; 510ce2eabddSJianfeng Tan if (!value || !extra_args) 511ce2eabddSJianfeng Tan return -EINVAL; 512bc5b6c11SIvan Dyukov errno = 0; 513bc5b6c11SIvan Dyukov integer = strtoull(value, NULL, 0); 514bc5b6c11SIvan Dyukov /* extra_args keeps default value, it should be replaced 515bc5b6c11SIvan Dyukov * only in case of successful parsing of the 'value' arg 516bc5b6c11SIvan Dyukov */ 517bc5b6c11SIvan Dyukov if (errno == 0) 518bc5b6c11SIvan Dyukov *(uint64_t *)extra_args = integer; 519bc5b6c11SIvan Dyukov return -errno; 520ce2eabddSJianfeng Tan } 521ce2eabddSJianfeng Tan 522*f908b22eSAdrian Moreno static enum virtio_user_backend_type 523*f908b22eSAdrian Moreno virtio_user_backend_type(const char *path) 524*f908b22eSAdrian Moreno { 525*f908b22eSAdrian Moreno struct stat sb; 526*f908b22eSAdrian Moreno 527*f908b22eSAdrian Moreno if (stat(path, &sb) == -1) 528*f908b22eSAdrian Moreno return VIRTIO_USER_BACKEND_UNKNOWN; 529*f908b22eSAdrian Moreno 530*f908b22eSAdrian Moreno return S_ISSOCK(sb.st_mode) ? 531*f908b22eSAdrian Moreno VIRTIO_USER_BACKEND_VHOST_USER : 532*f908b22eSAdrian Moreno VIRTIO_USER_BACKEND_VHOST_KERNEL; 533*f908b22eSAdrian Moreno } 534*f908b22eSAdrian Moreno 535ce2eabddSJianfeng Tan static struct rte_eth_dev * 536050fe6e9SJan Blunck virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev) 537ce2eabddSJianfeng Tan { 538ce2eabddSJianfeng Tan struct rte_eth_dev *eth_dev; 539ce2eabddSJianfeng Tan struct rte_eth_dev_data *data; 540ce2eabddSJianfeng Tan struct virtio_hw *hw; 541ce2eabddSJianfeng Tan struct virtio_user_dev *dev; 542ce2eabddSJianfeng Tan 543050fe6e9SJan Blunck eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*hw)); 544ce2eabddSJianfeng Tan if (!eth_dev) { 545ce2eabddSJianfeng Tan PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev"); 546ce2eabddSJianfeng Tan return NULL; 547ce2eabddSJianfeng Tan } 548ce2eabddSJianfeng Tan 549ce2eabddSJianfeng Tan data = eth_dev->data; 550050fe6e9SJan Blunck hw = eth_dev->data->dev_private; 551ce2eabddSJianfeng Tan 552ce2eabddSJianfeng Tan dev = rte_zmalloc(NULL, sizeof(*dev), 0); 553ce2eabddSJianfeng Tan if (!dev) { 554ce2eabddSJianfeng Tan PMD_INIT_LOG(ERR, "malloc virtio_user_dev failed"); 555ce2eabddSJianfeng Tan rte_eth_dev_release_port(eth_dev); 556ce2eabddSJianfeng Tan return NULL; 557ce2eabddSJianfeng Tan } 558ce2eabddSJianfeng Tan 559553f4593SYuanhan Liu hw->port_id = data->port_id; 5603d4fb6fdSJianfeng Tan dev->port_id = data->port_id; 561553f4593SYuanhan Liu virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops; 562ef53b603SJianfeng Tan /* 563ef53b603SJianfeng Tan * MSIX is required to enable LSC (see virtio_init_device). 564ef53b603SJianfeng Tan * Here just pretend that we support msix. 565ef53b603SJianfeng Tan */ 566ef53b603SJianfeng Tan hw->use_msix = 1; 567ce2eabddSJianfeng Tan hw->modern = 0; 5684710e16aSMarvin Liu hw->use_vec_rx = 0; 56977d66da8SMarvin Liu hw->use_vec_tx = 0; 5709f233f54SMarvin Liu hw->use_inorder_rx = 0; 5719f233f54SMarvin Liu hw->use_inorder_tx = 0; 572ce2eabddSJianfeng Tan hw->virtio_user_dev = dev; 573ce2eabddSJianfeng Tan return eth_dev; 574ce2eabddSJianfeng Tan } 575ce2eabddSJianfeng Tan 576ca8326a9SJianfeng Tan static void 577ca8326a9SJianfeng Tan virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev) 578ca8326a9SJianfeng Tan { 579ca8326a9SJianfeng Tan struct rte_eth_dev_data *data = eth_dev->data; 580ca8326a9SJianfeng Tan struct virtio_hw *hw = data->dev_private; 581ca8326a9SJianfeng Tan 582ca8326a9SJianfeng Tan rte_free(hw->virtio_user_dev); 583ca8326a9SJianfeng Tan rte_eth_dev_release_port(eth_dev); 584ca8326a9SJianfeng Tan } 585ca8326a9SJianfeng Tan 586ce2eabddSJianfeng Tan /* Dev initialization routine. Invoked once for each virtio vdev at 587c3b2fdfeSYong Wang * EAL init time, see rte_bus_probe(). 588ce2eabddSJianfeng Tan * Returns 0 on success. 589ce2eabddSJianfeng Tan */ 590ce2eabddSJianfeng Tan static int 5915d2aa461SJan Blunck virtio_user_pmd_probe(struct rte_vdev_device *dev) 592ce2eabddSJianfeng Tan { 59314f06474SJianfeng Tan struct rte_kvargs *kvlist = NULL; 594ce2eabddSJianfeng Tan struct rte_eth_dev *eth_dev; 595ce2eabddSJianfeng Tan struct virtio_hw *hw; 596*f908b22eSAdrian Moreno enum virtio_user_backend_type backend_type = VIRTIO_USER_BACKEND_UNKNOWN; 597ce2eabddSJianfeng Tan uint64_t queues = VIRTIO_USER_DEF_Q_NUM; 598ce2eabddSJianfeng Tan uint64_t cq = VIRTIO_USER_DEF_CQ_EN; 599ce2eabddSJianfeng Tan uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ; 600bd8f50a4SZhiyong Yang uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE; 601488ed97aSMarvin Liu uint64_t mrg_rxbuf = 1; 602488ed97aSMarvin Liu uint64_t in_order = 1; 6039070f88bSTiwei Bie uint64_t packed_vq = 0; 6046b7eefbcSMarvin Liu uint64_t vectorized = 0; 605ce2eabddSJianfeng Tan char *path = NULL; 6064214a1b4SWenfeng Liu char *ifname = NULL; 607ce2eabddSJianfeng Tan char *mac_addr = NULL; 608ce2eabddSJianfeng Tan int ret = -1; 609ce2eabddSJianfeng Tan 6101c8489daSTiwei Bie if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 6111c8489daSTiwei Bie const char *name = rte_vdev_device_name(dev); 6121c8489daSTiwei Bie eth_dev = rte_eth_dev_attach_secondary(name); 6131c8489daSTiwei Bie if (!eth_dev) { 61488fa5bb6SStephen Hemminger PMD_INIT_LOG(ERR, "Failed to probe %s", name); 6151c8489daSTiwei Bie return -1; 6161c8489daSTiwei Bie } 6171c8489daSTiwei Bie 6181c8489daSTiwei Bie if (eth_virtio_dev_init(eth_dev) < 0) { 6191c8489daSTiwei Bie PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 6201c8489daSTiwei Bie rte_eth_dev_release_port(eth_dev); 6211c8489daSTiwei Bie return -1; 6221c8489daSTiwei Bie } 6231c8489daSTiwei Bie 6241c8489daSTiwei Bie eth_dev->dev_ops = &virtio_user_secondary_eth_dev_ops; 6251c8489daSTiwei Bie eth_dev->device = &dev->device; 6261c8489daSTiwei Bie rte_eth_dev_probing_finish(eth_dev); 6271c8489daSTiwei Bie return 0; 6281c8489daSTiwei Bie } 6291c8489daSTiwei Bie 6305d2aa461SJan Blunck kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_args); 631ce2eabddSJianfeng Tan if (!kvlist) { 632ce2eabddSJianfeng Tan PMD_INIT_LOG(ERR, "error when parsing param"); 633ce2eabddSJianfeng Tan goto end; 634ce2eabddSJianfeng Tan } 635ce2eabddSJianfeng Tan 6369cca159eSMaxime Coquelin if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) { 637ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH, 638ca8326a9SJianfeng Tan &get_string_arg, &path) < 0) { 639404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 640404bd6bfSJianfeng Tan VIRTIO_USER_ARG_PATH); 641404bd6bfSJianfeng Tan goto end; 642404bd6bfSJianfeng Tan } 6439cca159eSMaxime Coquelin } else { 644f2462150SFerruh Yigit PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user", 6452dac0df4STiwei Bie VIRTIO_USER_ARG_PATH); 646ce2eabddSJianfeng Tan goto end; 647ce2eabddSJianfeng Tan } 648ce2eabddSJianfeng Tan 649*f908b22eSAdrian Moreno backend_type = virtio_user_backend_type(path); 650*f908b22eSAdrian Moreno if (backend_type == VIRTIO_USER_BACKEND_UNKNOWN) { 651*f908b22eSAdrian Moreno PMD_INIT_LOG(ERR, 652*f908b22eSAdrian Moreno "unable to determine backend type for path %s", 653*f908b22eSAdrian Moreno path); 654*f908b22eSAdrian Moreno goto end; 655*f908b22eSAdrian Moreno } 656*f908b22eSAdrian Moreno 657*f908b22eSAdrian Moreno 6584214a1b4SWenfeng Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) { 659*f908b22eSAdrian Moreno if (backend_type != VIRTIO_USER_BACKEND_VHOST_KERNEL) { 6604214a1b4SWenfeng Liu PMD_INIT_LOG(ERR, 6614214a1b4SWenfeng Liu "arg %s applies only to vhost-kernel backend", 6624214a1b4SWenfeng Liu VIRTIO_USER_ARG_INTERFACE_NAME); 6634214a1b4SWenfeng Liu goto end; 6644214a1b4SWenfeng Liu } 6654214a1b4SWenfeng Liu 6664214a1b4SWenfeng Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME, 6674214a1b4SWenfeng Liu &get_string_arg, &ifname) < 0) { 6684214a1b4SWenfeng Liu PMD_INIT_LOG(ERR, "error to parse %s", 6694214a1b4SWenfeng Liu VIRTIO_USER_ARG_INTERFACE_NAME); 6704214a1b4SWenfeng Liu goto end; 6714214a1b4SWenfeng Liu } 6724214a1b4SWenfeng Liu } 6734214a1b4SWenfeng Liu 674404bd6bfSJianfeng Tan if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) { 675ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC, 676ca8326a9SJianfeng Tan &get_string_arg, &mac_addr) < 0) { 677404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 678404bd6bfSJianfeng Tan VIRTIO_USER_ARG_MAC); 679404bd6bfSJianfeng Tan goto end; 680404bd6bfSJianfeng Tan } 681404bd6bfSJianfeng Tan } 682ce2eabddSJianfeng Tan 683404bd6bfSJianfeng Tan if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) { 684ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE, 685ca8326a9SJianfeng Tan &get_integer_arg, &queue_size) < 0) { 686404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 687404bd6bfSJianfeng Tan VIRTIO_USER_ARG_QUEUE_SIZE); 688404bd6bfSJianfeng Tan goto end; 689404bd6bfSJianfeng Tan } 690404bd6bfSJianfeng Tan } 691ce2eabddSJianfeng Tan 692404bd6bfSJianfeng Tan if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) { 693ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM, 694ca8326a9SJianfeng Tan &get_integer_arg, &queues) < 0) { 695404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 696404bd6bfSJianfeng Tan VIRTIO_USER_ARG_QUEUES_NUM); 697404bd6bfSJianfeng Tan goto end; 698404bd6bfSJianfeng Tan } 699404bd6bfSJianfeng Tan } 700ce2eabddSJianfeng Tan 701bd8f50a4SZhiyong Yang if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) { 702bd8f50a4SZhiyong Yang if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE, 703bd8f50a4SZhiyong Yang &get_integer_arg, &server_mode) < 0) { 704bd8f50a4SZhiyong Yang PMD_INIT_LOG(ERR, "error to parse %s", 705bd8f50a4SZhiyong Yang VIRTIO_USER_ARG_SERVER_MODE); 706bd8f50a4SZhiyong Yang goto end; 707bd8f50a4SZhiyong Yang } 708bd8f50a4SZhiyong Yang } 709bd8f50a4SZhiyong Yang 710404bd6bfSJianfeng Tan if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) { 711ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM, 712ca8326a9SJianfeng Tan &get_integer_arg, &cq) < 0) { 713404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 714404bd6bfSJianfeng Tan VIRTIO_USER_ARG_CQ_NUM); 715404bd6bfSJianfeng Tan goto end; 716404bd6bfSJianfeng Tan } 717404bd6bfSJianfeng Tan } else if (queues > 1) { 7181b69528eSJianfeng Tan cq = 1; 719404bd6bfSJianfeng Tan } 7201b69528eSJianfeng Tan 72134f3966cSYuanhan Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) { 72234f3966cSYuanhan Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ, 72334f3966cSYuanhan Liu &get_integer_arg, &packed_vq) < 0) { 72434f3966cSYuanhan Liu PMD_INIT_LOG(ERR, "error to parse %s", 72534f3966cSYuanhan Liu VIRTIO_USER_ARG_PACKED_VQ); 72634f3966cSYuanhan Liu goto end; 72734f3966cSYuanhan Liu } 72834f3966cSYuanhan Liu } 72934f3966cSYuanhan Liu 7306b7eefbcSMarvin Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_VECTORIZED) == 1) { 7316b7eefbcSMarvin Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_VECTORIZED, 7326b7eefbcSMarvin Liu &get_integer_arg, &vectorized) < 0) { 7336b7eefbcSMarvin Liu PMD_INIT_LOG(ERR, "error to parse %s", 7346b7eefbcSMarvin Liu VIRTIO_USER_ARG_VECTORIZED); 7356b7eefbcSMarvin Liu goto end; 7366b7eefbcSMarvin Liu } 7376b7eefbcSMarvin Liu } 7386b7eefbcSMarvin Liu 7391b69528eSJianfeng Tan if (queues > 1 && cq == 0) { 7401b69528eSJianfeng Tan PMD_INIT_LOG(ERR, "multi-q requires ctrl-q"); 7411b69528eSJianfeng Tan goto end; 7421b69528eSJianfeng Tan } 743ce2eabddSJianfeng Tan 7442269b9aeSWenfeng Liu if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) { 7452269b9aeSWenfeng Liu PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u", 7462269b9aeSWenfeng Liu VIRTIO_USER_ARG_QUEUES_NUM, queues, 7472269b9aeSWenfeng Liu VIRTIO_MAX_VIRTQUEUE_PAIRS); 7482269b9aeSWenfeng Liu goto end; 7492269b9aeSWenfeng Liu } 7502269b9aeSWenfeng Liu 751488ed97aSMarvin Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) { 752488ed97aSMarvin Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF, 753488ed97aSMarvin Liu &get_integer_arg, &mrg_rxbuf) < 0) { 754488ed97aSMarvin Liu PMD_INIT_LOG(ERR, "error to parse %s", 755488ed97aSMarvin Liu VIRTIO_USER_ARG_MRG_RXBUF); 756488ed97aSMarvin Liu goto end; 757488ed97aSMarvin Liu } 758488ed97aSMarvin Liu } 759488ed97aSMarvin Liu 760488ed97aSMarvin Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) { 761488ed97aSMarvin Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER, 762488ed97aSMarvin Liu &get_integer_arg, &in_order) < 0) { 763488ed97aSMarvin Liu PMD_INIT_LOG(ERR, "error to parse %s", 764488ed97aSMarvin Liu VIRTIO_USER_ARG_IN_ORDER); 765488ed97aSMarvin Liu goto end; 766488ed97aSMarvin Liu } 767488ed97aSMarvin Liu } 768488ed97aSMarvin Liu 769050fe6e9SJan Blunck eth_dev = virtio_user_eth_dev_alloc(dev); 770ce2eabddSJianfeng Tan if (!eth_dev) { 771e8df94b8SJianfeng Tan PMD_INIT_LOG(ERR, "virtio_user fails to alloc device"); 772ce2eabddSJianfeng Tan goto end; 773ce2eabddSJianfeng Tan } 774ce2eabddSJianfeng Tan 775ce2eabddSJianfeng Tan hw = eth_dev->data->dev_private; 776ce2eabddSJianfeng Tan if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq, 7771c8489daSTiwei Bie queue_size, mac_addr, &ifname, server_mode, 778*f908b22eSAdrian Moreno mrg_rxbuf, in_order, packed_vq, backend_type) < 0) { 779ca8326a9SJianfeng Tan PMD_INIT_LOG(ERR, "virtio_user_dev_init fails"); 780ca8326a9SJianfeng Tan virtio_user_eth_dev_free(eth_dev); 781ce2eabddSJianfeng Tan goto end; 782ca8326a9SJianfeng Tan } 783fbe90cddSThomas Monjalon 78487db93e0SDavid Marchand /* previously called by pci probing for physical dev */ 785ce2eabddSJianfeng Tan if (eth_virtio_dev_init(eth_dev) < 0) { 786ce2eabddSJianfeng Tan PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 787ca8326a9SJianfeng Tan virtio_user_eth_dev_free(eth_dev); 788ce2eabddSJianfeng Tan goto end; 789ce2eabddSJianfeng Tan } 790fbe90cddSThomas Monjalon 79177d66da8SMarvin Liu if (vectorized) { 79277d66da8SMarvin Liu if (packed_vq) { 79377d66da8SMarvin Liu #if defined(CC_AVX512_SUPPORT) 7946b7eefbcSMarvin Liu hw->use_vec_rx = 1; 79577d66da8SMarvin Liu hw->use_vec_tx = 1; 79677d66da8SMarvin Liu #else 79777d66da8SMarvin Liu PMD_INIT_LOG(INFO, 79877d66da8SMarvin Liu "building environment do not support packed ring vectorized"); 79977d66da8SMarvin Liu #endif 80077d66da8SMarvin Liu } else { 80177d66da8SMarvin Liu hw->use_vec_rx = 1; 80277d66da8SMarvin Liu } 80377d66da8SMarvin Liu } 8046b7eefbcSMarvin Liu 805fbe90cddSThomas Monjalon rte_eth_dev_probing_finish(eth_dev); 806ce2eabddSJianfeng Tan ret = 0; 807ce2eabddSJianfeng Tan 808ce2eabddSJianfeng Tan end: 80914f06474SJianfeng Tan if (kvlist) 81014f06474SJianfeng Tan rte_kvargs_free(kvlist); 811ce2eabddSJianfeng Tan if (path) 812ce2eabddSJianfeng Tan free(path); 813ce2eabddSJianfeng Tan if (mac_addr) 814ce2eabddSJianfeng Tan free(mac_addr); 8154214a1b4SWenfeng Liu if (ifname) 8164214a1b4SWenfeng Liu free(ifname); 817ce2eabddSJianfeng Tan return ret; 818ce2eabddSJianfeng Tan } 819ce2eabddSJianfeng Tan 820ce2eabddSJianfeng Tan static int 8215d2aa461SJan Blunck virtio_user_pmd_remove(struct rte_vdev_device *vdev) 822ce2eabddSJianfeng Tan { 8235d2aa461SJan Blunck const char *name; 824ce2eabddSJianfeng Tan struct rte_eth_dev *eth_dev; 825ce2eabddSJianfeng Tan 8265d2aa461SJan Blunck if (!vdev) 827ce2eabddSJianfeng Tan return -EINVAL; 828ce2eabddSJianfeng Tan 8295d2aa461SJan Blunck name = rte_vdev_device_name(vdev); 830f2462150SFerruh Yigit PMD_DRV_LOG(INFO, "Un-Initializing %s", name); 831ce2eabddSJianfeng Tan eth_dev = rte_eth_dev_allocated(name); 8327f468b2eSTiwei Bie /* Port has already been released by close. */ 833ce2eabddSJianfeng Tan if (!eth_dev) 8347f468b2eSTiwei Bie return 0; 835ce2eabddSJianfeng Tan 8361c8489daSTiwei Bie if (rte_eal_process_type() != RTE_PROC_PRIMARY) 8371c8489daSTiwei Bie return rte_eth_dev_release_port(eth_dev); 8381c8489daSTiwei Bie 839ce2eabddSJianfeng Tan /* make sure the device is stopped, queues freed */ 840ce2eabddSJianfeng Tan rte_eth_dev_close(eth_dev->data->port_id); 841ce2eabddSJianfeng Tan 842ce2eabddSJianfeng Tan return 0; 843ce2eabddSJianfeng Tan } 844ce2eabddSJianfeng Tan 84586e71eb2SMaxime Coquelin static int virtio_user_pmd_dma_map(struct rte_vdev_device *vdev, void *addr, 84686e71eb2SMaxime Coquelin uint64_t iova, size_t len) 84786e71eb2SMaxime Coquelin { 84886e71eb2SMaxime Coquelin const char *name; 84986e71eb2SMaxime Coquelin struct rte_eth_dev *eth_dev; 85086e71eb2SMaxime Coquelin struct virtio_user_dev *dev; 85186e71eb2SMaxime Coquelin struct virtio_hw *hw; 85286e71eb2SMaxime Coquelin 85386e71eb2SMaxime Coquelin if (!vdev) 85486e71eb2SMaxime Coquelin return -EINVAL; 85586e71eb2SMaxime Coquelin 85686e71eb2SMaxime Coquelin name = rte_vdev_device_name(vdev); 85786e71eb2SMaxime Coquelin eth_dev = rte_eth_dev_allocated(name); 85886e71eb2SMaxime Coquelin /* Port has already been released by close. */ 85986e71eb2SMaxime Coquelin if (!eth_dev) 86086e71eb2SMaxime Coquelin return 0; 86186e71eb2SMaxime Coquelin 86286e71eb2SMaxime Coquelin hw = (struct virtio_hw *)eth_dev->data->dev_private; 86386e71eb2SMaxime Coquelin dev = hw->virtio_user_dev; 86486e71eb2SMaxime Coquelin 86586e71eb2SMaxime Coquelin if (dev->ops->dma_map) 86686e71eb2SMaxime Coquelin return dev->ops->dma_map(dev, addr, iova, len); 86786e71eb2SMaxime Coquelin 86886e71eb2SMaxime Coquelin return 0; 86986e71eb2SMaxime Coquelin } 87086e71eb2SMaxime Coquelin 87186e71eb2SMaxime Coquelin static int virtio_user_pmd_dma_unmap(struct rte_vdev_device *vdev, void *addr, 87286e71eb2SMaxime Coquelin uint64_t iova, size_t len) 87386e71eb2SMaxime Coquelin { 87486e71eb2SMaxime Coquelin const char *name; 87586e71eb2SMaxime Coquelin struct rte_eth_dev *eth_dev; 87686e71eb2SMaxime Coquelin struct virtio_user_dev *dev; 87786e71eb2SMaxime Coquelin struct virtio_hw *hw; 87886e71eb2SMaxime Coquelin 87986e71eb2SMaxime Coquelin if (!vdev) 88086e71eb2SMaxime Coquelin return -EINVAL; 88186e71eb2SMaxime Coquelin 88286e71eb2SMaxime Coquelin name = rte_vdev_device_name(vdev); 88386e71eb2SMaxime Coquelin eth_dev = rte_eth_dev_allocated(name); 88486e71eb2SMaxime Coquelin /* Port has already been released by close. */ 88586e71eb2SMaxime Coquelin if (!eth_dev) 88686e71eb2SMaxime Coquelin return 0; 88786e71eb2SMaxime Coquelin 88886e71eb2SMaxime Coquelin hw = (struct virtio_hw *)eth_dev->data->dev_private; 88986e71eb2SMaxime Coquelin dev = hw->virtio_user_dev; 89086e71eb2SMaxime Coquelin 89186e71eb2SMaxime Coquelin if (dev->ops->dma_unmap) 89286e71eb2SMaxime Coquelin return dev->ops->dma_unmap(dev, addr, iova, len); 89386e71eb2SMaxime Coquelin 89486e71eb2SMaxime Coquelin return 0; 89586e71eb2SMaxime Coquelin } 89686e71eb2SMaxime Coquelin 897fe363dd4SJan Viktorin static struct rte_vdev_driver virtio_user_driver = { 89850a3345fSShreyansh Jain .probe = virtio_user_pmd_probe, 89950a3345fSShreyansh Jain .remove = virtio_user_pmd_remove, 90086e71eb2SMaxime Coquelin .dma_map = virtio_user_pmd_dma_map, 90186e71eb2SMaxime Coquelin .dma_unmap = virtio_user_pmd_dma_unmap, 902ce2eabddSJianfeng Tan }; 903ce2eabddSJianfeng Tan 90401f19227SShreyansh Jain RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver); 9059fa80cb2SJan Blunck RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user); 90601f19227SShreyansh Jain RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user, 90744e32a67SPablo de Lara "path=<path> " 90844e32a67SPablo de Lara "mac=<mac addr> " 90944e32a67SPablo de Lara "cq=<int> " 91044e32a67SPablo de Lara "queue_size=<int> " 9114214a1b4SWenfeng Liu "queues=<int> " 912488ed97aSMarvin Liu "iface=<string> " 91362758c76STiwei Bie "server=<0|1> " 914488ed97aSMarvin Liu "mrg_rxbuf=<0|1> " 9159070f88bSTiwei Bie "in_order=<0|1> " 916b0db4beaSIvan Dyukov "packed_vq=<0|1> " 9176b7eefbcSMarvin Liu "speed=<int> " 9186b7eefbcSMarvin Liu "vectorized=<0|1>"); 919