15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 25566a3e3SBruce Richardson * Copyright(c) 2010-2016 Intel Corporation 3e9efa4d9SJianfeng Tan */ 4e9efa4d9SJianfeng Tan 5e9efa4d9SJianfeng Tan #include <stdint.h> 6e9efa4d9SJianfeng Tan #include <sys/types.h> 7e9efa4d9SJianfeng Tan #include <unistd.h> 8ef53b603SJianfeng Tan #include <fcntl.h> 9*7d62bf6fSMaxime Coquelin #include <linux/major.h> 10f908b22eSAdrian Moreno #include <sys/stat.h> 11*7d62bf6fSMaxime Coquelin #include <sys/sysmacros.h> 12ef53b603SJianfeng Tan #include <sys/socket.h> 13e9efa4d9SJianfeng Tan 14ce2eabddSJianfeng Tan #include <rte_malloc.h> 15ce2eabddSJianfeng Tan #include <rte_kvargs.h> 16050fe6e9SJan Blunck #include <rte_ethdev_vdev.h> 17d4a586d2SJianfeng Tan #include <rte_bus_vdev.h> 18ef53b603SJianfeng Tan #include <rte_alarm.h> 196ebbf410SXuan Ding #include <rte_cycles.h> 20ce2eabddSJianfeng Tan 21ce2eabddSJianfeng Tan #include "virtio_ethdev.h" 22e9efa4d9SJianfeng Tan #include "virtio_logs.h" 23e9efa4d9SJianfeng Tan #include "virtio_pci.h" 24e9efa4d9SJianfeng Tan #include "virtqueue.h" 251b69528eSJianfeng Tan #include "virtio_rxtx.h" 26e9efa4d9SJianfeng Tan #include "virtio_user/virtio_user_dev.h" 277f468b2eSTiwei Bie #include "virtio_user/vhost.h" 28e9efa4d9SJianfeng Tan 29e9efa4d9SJianfeng Tan #define virtio_user_get_dev(hw) \ 30e9efa4d9SJianfeng Tan ((struct virtio_user_dev *)(hw)->virtio_user_dev) 31e9efa4d9SJianfeng Tan 326ebbf410SXuan Ding static void 336ebbf410SXuan Ding virtio_user_reset_queues_packed(struct rte_eth_dev *dev) 346ebbf410SXuan Ding { 356ebbf410SXuan Ding struct virtio_hw *hw = dev->data->dev_private; 366ebbf410SXuan Ding struct virtnet_rx *rxvq; 376ebbf410SXuan Ding struct virtnet_tx *txvq; 386ebbf410SXuan Ding uint16_t i; 396ebbf410SXuan Ding 406ebbf410SXuan Ding /* Add lock to avoid queue contention. */ 416ebbf410SXuan Ding rte_spinlock_lock(&hw->state_lock); 426ebbf410SXuan Ding hw->started = 0; 436ebbf410SXuan Ding 446ebbf410SXuan Ding /* 456ebbf410SXuan Ding * Waitting for datapath to complete before resetting queues. 466ebbf410SXuan Ding * 1 ms should be enough for the ongoing Tx/Rx function to finish. 476ebbf410SXuan Ding */ 486ebbf410SXuan Ding rte_delay_ms(1); 496ebbf410SXuan Ding 506ebbf410SXuan Ding /* Vring reset for each Tx queue and Rx queue. */ 516ebbf410SXuan Ding for (i = 0; i < dev->data->nb_rx_queues; i++) { 526ebbf410SXuan Ding rxvq = dev->data->rx_queues[i]; 536ebbf410SXuan Ding virtqueue_rxvq_reset_packed(rxvq->vq); 546ebbf410SXuan Ding virtio_dev_rx_queue_setup_finish(dev, i); 556ebbf410SXuan Ding } 566ebbf410SXuan Ding 576ebbf410SXuan Ding for (i = 0; i < dev->data->nb_tx_queues; i++) { 586ebbf410SXuan Ding txvq = dev->data->tx_queues[i]; 596ebbf410SXuan Ding virtqueue_txvq_reset_packed(txvq->vq); 606ebbf410SXuan Ding } 616ebbf410SXuan Ding 626ebbf410SXuan Ding hw->started = 1; 636ebbf410SXuan Ding rte_spinlock_unlock(&hw->state_lock); 646ebbf410SXuan Ding } 656ebbf410SXuan Ding 666ebbf410SXuan Ding 67bd8f50a4SZhiyong Yang static int 68bd8f50a4SZhiyong Yang virtio_user_server_reconnect(struct virtio_user_dev *dev) 69bd8f50a4SZhiyong Yang { 70bd8f50a4SZhiyong Yang int ret; 71bd8f50a4SZhiyong Yang int connectfd; 72bd8f50a4SZhiyong Yang struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id]; 736ebbf410SXuan Ding struct virtio_hw *hw = eth_dev->data->dev_private; 748e756105SMaxime Coquelin uint64_t protocol_features; 75bd8f50a4SZhiyong Yang 76bd8f50a4SZhiyong Yang connectfd = accept(dev->listenfd, NULL, NULL); 77bd8f50a4SZhiyong Yang if (connectfd < 0) 78bd8f50a4SZhiyong Yang return -1; 79bd8f50a4SZhiyong Yang 80bd8f50a4SZhiyong Yang dev->vhostfd = connectfd; 81201a4165SZhiyong Yang if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, 82201a4165SZhiyong Yang &dev->device_features) < 0) { 83201a4165SZhiyong Yang PMD_INIT_LOG(ERR, "get_features failed: %s", 84201a4165SZhiyong Yang strerror(errno)); 85201a4165SZhiyong Yang return -1; 86201a4165SZhiyong Yang } 87201a4165SZhiyong Yang 888e756105SMaxime Coquelin if (dev->device_features & 898e756105SMaxime Coquelin (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) { 908e756105SMaxime Coquelin if (dev->ops->send_request(dev, 918e756105SMaxime Coquelin VHOST_USER_GET_PROTOCOL_FEATURES, 928e756105SMaxime Coquelin &protocol_features)) 938e756105SMaxime Coquelin return -1; 948e756105SMaxime Coquelin 958e756105SMaxime Coquelin dev->protocol_features &= protocol_features; 968e756105SMaxime Coquelin 978e756105SMaxime Coquelin if (dev->ops->send_request(dev, 988e756105SMaxime Coquelin VHOST_USER_SET_PROTOCOL_FEATURES, 998e756105SMaxime Coquelin &dev->protocol_features)) 1008e756105SMaxime Coquelin return -1; 1018e756105SMaxime Coquelin 1028e756105SMaxime Coquelin if (!(dev->protocol_features & 1038e756105SMaxime Coquelin (1ULL << VHOST_USER_PROTOCOL_F_MQ))) 1048e756105SMaxime Coquelin dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ); 1058e756105SMaxime Coquelin } 1068e756105SMaxime Coquelin 107bb97d2ddSTiwei Bie dev->device_features |= dev->frontend_features; 108bb97d2ddSTiwei Bie 1097c66ff61SMarvin Liu /* umask vhost-user unsupported features */ 1107c66ff61SMarvin Liu dev->device_features &= ~(dev->unsupported_features); 111201a4165SZhiyong Yang 112201a4165SZhiyong Yang dev->features &= dev->device_features; 113201a4165SZhiyong Yang 1146ebbf410SXuan Ding /* For packed ring, resetting queues is required in reconnection. */ 11501996a03SMarvin Liu if (vtpci_packed_queue(hw) && 11601996a03SMarvin Liu (vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_DRIVER_OK)) { 1176ebbf410SXuan Ding PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped" 1186ebbf410SXuan Ding " when packed ring reconnecting."); 1196ebbf410SXuan Ding virtio_user_reset_queues_packed(eth_dev); 12092771257STiwei Bie } 1216ebbf410SXuan Ding 122bd8f50a4SZhiyong Yang ret = virtio_user_start_device(dev); 123bd8f50a4SZhiyong Yang if (ret < 0) 124bd8f50a4SZhiyong Yang return -1; 125bd8f50a4SZhiyong Yang 126201a4165SZhiyong Yang if (dev->queue_pairs > 1) { 127201a4165SZhiyong Yang ret = virtio_user_handle_mq(dev, dev->queue_pairs); 128201a4165SZhiyong Yang if (ret != 0) { 129201a4165SZhiyong Yang PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!"); 130201a4165SZhiyong Yang return -1; 131201a4165SZhiyong Yang } 132201a4165SZhiyong Yang } 133bd8f50a4SZhiyong Yang if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { 134bd8f50a4SZhiyong Yang if (rte_intr_disable(eth_dev->intr_handle) < 0) { 135bd8f50a4SZhiyong Yang PMD_DRV_LOG(ERR, "interrupt disable failed"); 136bd8f50a4SZhiyong Yang return -1; 137bd8f50a4SZhiyong Yang } 138bd8f50a4SZhiyong Yang rte_intr_callback_unregister(eth_dev->intr_handle, 139bd8f50a4SZhiyong Yang virtio_interrupt_handler, 140bd8f50a4SZhiyong Yang eth_dev); 141bd8f50a4SZhiyong Yang eth_dev->intr_handle->fd = connectfd; 142bd8f50a4SZhiyong Yang rte_intr_callback_register(eth_dev->intr_handle, 143bd8f50a4SZhiyong Yang virtio_interrupt_handler, eth_dev); 144bd8f50a4SZhiyong Yang 145bd8f50a4SZhiyong Yang if (rte_intr_enable(eth_dev->intr_handle) < 0) { 146bd8f50a4SZhiyong Yang PMD_DRV_LOG(ERR, "interrupt enable failed"); 147bd8f50a4SZhiyong Yang return -1; 148bd8f50a4SZhiyong Yang } 149bd8f50a4SZhiyong Yang } 150bd8f50a4SZhiyong Yang PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!"); 151bd8f50a4SZhiyong Yang return 0; 152bd8f50a4SZhiyong Yang } 153bd8f50a4SZhiyong Yang 154e9efa4d9SJianfeng Tan static void 155ef53b603SJianfeng Tan virtio_user_delayed_handler(void *param) 156ef53b603SJianfeng Tan { 157ef53b603SJianfeng Tan struct virtio_hw *hw = (struct virtio_hw *)param; 158bd8f50a4SZhiyong Yang struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id]; 159bd8f50a4SZhiyong Yang struct virtio_user_dev *dev = virtio_user_get_dev(hw); 160ef53b603SJianfeng Tan 161bd8f50a4SZhiyong Yang if (rte_intr_disable(eth_dev->intr_handle) < 0) { 162bd8f50a4SZhiyong Yang PMD_DRV_LOG(ERR, "interrupt disable failed"); 163bd8f50a4SZhiyong Yang return; 164bd8f50a4SZhiyong Yang } 165bd8f50a4SZhiyong Yang rte_intr_callback_unregister(eth_dev->intr_handle, 166bd8f50a4SZhiyong Yang virtio_interrupt_handler, eth_dev); 167bd8f50a4SZhiyong Yang if (dev->is_server) { 168bd8f50a4SZhiyong Yang if (dev->vhostfd >= 0) { 169bd8f50a4SZhiyong Yang close(dev->vhostfd); 170bd8f50a4SZhiyong Yang dev->vhostfd = -1; 171bd8f50a4SZhiyong Yang } 172bd8f50a4SZhiyong Yang eth_dev->intr_handle->fd = dev->listenfd; 173bd8f50a4SZhiyong Yang rte_intr_callback_register(eth_dev->intr_handle, 174bd8f50a4SZhiyong Yang virtio_interrupt_handler, eth_dev); 175bd8f50a4SZhiyong Yang if (rte_intr_enable(eth_dev->intr_handle) < 0) { 176bd8f50a4SZhiyong Yang PMD_DRV_LOG(ERR, "interrupt enable failed"); 177bd8f50a4SZhiyong Yang return; 178bd8f50a4SZhiyong Yang } 179bd8f50a4SZhiyong Yang } 180ef53b603SJianfeng Tan } 181ef53b603SJianfeng Tan 182ef53b603SJianfeng Tan static void 183e9efa4d9SJianfeng Tan virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, 184e9efa4d9SJianfeng Tan void *dst, int length) 185e9efa4d9SJianfeng Tan { 186e9efa4d9SJianfeng Tan int i; 187e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 188e9efa4d9SJianfeng Tan 189e9efa4d9SJianfeng Tan if (offset == offsetof(struct virtio_net_config, mac) && 19035b2d13fSOlivier Matz length == RTE_ETHER_ADDR_LEN) { 19135b2d13fSOlivier Matz for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 192e9efa4d9SJianfeng Tan ((uint8_t *)dst)[i] = dev->mac_addr[i]; 193e9efa4d9SJianfeng Tan return; 194e9efa4d9SJianfeng Tan } 195e9efa4d9SJianfeng Tan 196ef53b603SJianfeng Tan if (offset == offsetof(struct virtio_net_config, status)) { 197ef53b603SJianfeng Tan char buf[128]; 198ef53b603SJianfeng Tan 199ef53b603SJianfeng Tan if (dev->vhostfd >= 0) { 200ef53b603SJianfeng Tan int r; 201ef53b603SJianfeng Tan int flags; 202ef53b603SJianfeng Tan 203ef53b603SJianfeng Tan flags = fcntl(dev->vhostfd, F_GETFL); 2042fd826a3SSebastian Basierski if (fcntl(dev->vhostfd, F_SETFL, 2052fd826a3SSebastian Basierski flags | O_NONBLOCK) == -1) { 2062fd826a3SSebastian Basierski PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag"); 2072fd826a3SSebastian Basierski return; 2082fd826a3SSebastian Basierski } 209ef53b603SJianfeng Tan r = recv(dev->vhostfd, buf, 128, MSG_PEEK); 210ef53b603SJianfeng Tan if (r == 0 || (r < 0 && errno != EAGAIN)) { 211d0131e49SXiao Wang dev->net_status &= (~VIRTIO_NET_S_LINK_UP); 212ef53b603SJianfeng Tan PMD_DRV_LOG(ERR, "virtio-user port %u is down", 213ef53b603SJianfeng Tan hw->port_id); 214bd8f50a4SZhiyong Yang 215bd8f50a4SZhiyong Yang /* This function could be called in the process 216bd8f50a4SZhiyong Yang * of interrupt handling, callback cannot be 217bd8f50a4SZhiyong Yang * unregistered here, set an alarm to do it. 218ef53b603SJianfeng Tan */ 219ef53b603SJianfeng Tan rte_eal_alarm_set(1, 220ef53b603SJianfeng Tan virtio_user_delayed_handler, 221ef53b603SJianfeng Tan (void *)hw); 222ef53b603SJianfeng Tan } else { 223d0131e49SXiao Wang dev->net_status |= VIRTIO_NET_S_LINK_UP; 224ef53b603SJianfeng Tan } 225f76ef453SSebastian Basierski if (fcntl(dev->vhostfd, F_SETFL, 226f76ef453SSebastian Basierski flags & ~O_NONBLOCK) == -1) { 227f76ef453SSebastian Basierski PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag"); 228f76ef453SSebastian Basierski return; 229f76ef453SSebastian Basierski } 230bd8f50a4SZhiyong Yang } else if (dev->is_server) { 231d0131e49SXiao Wang dev->net_status &= (~VIRTIO_NET_S_LINK_UP); 232bd8f50a4SZhiyong Yang if (virtio_user_server_reconnect(dev) >= 0) 233d0131e49SXiao Wang dev->net_status |= VIRTIO_NET_S_LINK_UP; 234ef53b603SJianfeng Tan } 235bd8f50a4SZhiyong Yang 236d0131e49SXiao Wang *(uint16_t *)dst = dev->net_status; 237ef53b603SJianfeng Tan } 238e9efa4d9SJianfeng Tan 239e9efa4d9SJianfeng Tan if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs)) 240e9efa4d9SJianfeng Tan *(uint16_t *)dst = dev->max_queue_pairs; 241e9efa4d9SJianfeng Tan } 242e9efa4d9SJianfeng Tan 243e9efa4d9SJianfeng Tan static void 244e9efa4d9SJianfeng Tan virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset, 245e9efa4d9SJianfeng Tan const void *src, int length) 246e9efa4d9SJianfeng Tan { 247e9efa4d9SJianfeng Tan int i; 248e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 249e9efa4d9SJianfeng Tan 250e9efa4d9SJianfeng Tan if ((offset == offsetof(struct virtio_net_config, mac)) && 25135b2d13fSOlivier Matz (length == RTE_ETHER_ADDR_LEN)) 25235b2d13fSOlivier Matz for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 253e9efa4d9SJianfeng Tan dev->mac_addr[i] = ((const uint8_t *)src)[i]; 254e9efa4d9SJianfeng Tan else 255f2462150SFerruh Yigit PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d", 256e9efa4d9SJianfeng Tan offset, length); 257e9efa4d9SJianfeng Tan } 258e9efa4d9SJianfeng Tan 259e9efa4d9SJianfeng Tan static void 260c12a26eeSJianfeng Tan virtio_user_reset(struct virtio_hw *hw) 261c12a26eeSJianfeng Tan { 262c12a26eeSJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 263c12a26eeSJianfeng Tan 264c12a26eeSJianfeng Tan if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 265c12a26eeSJianfeng Tan virtio_user_stop_device(dev); 266c12a26eeSJianfeng Tan } 267c12a26eeSJianfeng Tan 268c12a26eeSJianfeng Tan static void 269e9efa4d9SJianfeng Tan virtio_user_set_status(struct virtio_hw *hw, uint8_t status) 270e9efa4d9SJianfeng Tan { 271e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 272e9efa4d9SJianfeng Tan 273e9efa4d9SJianfeng Tan if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 274e9efa4d9SJianfeng Tan virtio_user_start_device(dev); 275c12a26eeSJianfeng Tan else if (status == VIRTIO_CONFIG_STATUS_RESET) 276c12a26eeSJianfeng Tan virtio_user_reset(hw); 277e9efa4d9SJianfeng Tan dev->status = status; 27857912824SMaxime Coquelin virtio_user_send_status_update(dev, status); 279e9efa4d9SJianfeng Tan } 280e9efa4d9SJianfeng Tan 281e9efa4d9SJianfeng Tan static uint8_t 282e9efa4d9SJianfeng Tan virtio_user_get_status(struct virtio_hw *hw) 283e9efa4d9SJianfeng Tan { 284e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 285e9efa4d9SJianfeng Tan 2860b0dc66cSAdrian Moreno virtio_user_update_status(dev); 2870b0dc66cSAdrian Moreno 288e9efa4d9SJianfeng Tan return dev->status; 289e9efa4d9SJianfeng Tan } 290e9efa4d9SJianfeng Tan 291e9efa4d9SJianfeng Tan static uint64_t 292e9efa4d9SJianfeng Tan virtio_user_get_features(struct virtio_hw *hw) 293e9efa4d9SJianfeng Tan { 294e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 295e9efa4d9SJianfeng Tan 296142678d4SJianfeng Tan /* unmask feature bits defined in vhost user protocol */ 297142678d4SJianfeng Tan return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES; 298e9efa4d9SJianfeng Tan } 299e9efa4d9SJianfeng Tan 300e9efa4d9SJianfeng Tan static void 301e9efa4d9SJianfeng Tan virtio_user_set_features(struct virtio_hw *hw, uint64_t features) 302e9efa4d9SJianfeng Tan { 303e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 304e9efa4d9SJianfeng Tan 305142678d4SJianfeng Tan dev->features = features & dev->device_features; 306e9efa4d9SJianfeng Tan } 307e9efa4d9SJianfeng Tan 308e9efa4d9SJianfeng Tan static uint8_t 309e9efa4d9SJianfeng Tan virtio_user_get_isr(struct virtio_hw *hw __rte_unused) 310e9efa4d9SJianfeng Tan { 31135c4f855SJianfeng Tan /* rxq interrupts and config interrupt are separated in virtio-user, 31235c4f855SJianfeng Tan * here we only report config change. 313e9efa4d9SJianfeng Tan */ 31435c4f855SJianfeng Tan return VIRTIO_PCI_ISR_CONFIG; 315e9efa4d9SJianfeng Tan } 316e9efa4d9SJianfeng Tan 317e9efa4d9SJianfeng Tan static uint16_t 318e9efa4d9SJianfeng Tan virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused, 319e9efa4d9SJianfeng Tan uint16_t vec __rte_unused) 320e9efa4d9SJianfeng Tan { 32135c4f855SJianfeng Tan return 0; 322e9efa4d9SJianfeng Tan } 323e9efa4d9SJianfeng Tan 3243d4fb6fdSJianfeng Tan static uint16_t 3253d4fb6fdSJianfeng Tan virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused, 3263d4fb6fdSJianfeng Tan struct virtqueue *vq __rte_unused, 3273d4fb6fdSJianfeng Tan uint16_t vec) 3283d4fb6fdSJianfeng Tan { 3293d4fb6fdSJianfeng Tan /* pretend we have done that */ 3303d4fb6fdSJianfeng Tan return vec; 3313d4fb6fdSJianfeng Tan } 3323d4fb6fdSJianfeng Tan 333e9efa4d9SJianfeng Tan /* This function is to get the queue size, aka, number of descs, of a specified 334e9efa4d9SJianfeng Tan * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the 335e9efa4d9SJianfeng Tan * max supported queues. 336e9efa4d9SJianfeng Tan */ 337e9efa4d9SJianfeng Tan static uint16_t 338e9efa4d9SJianfeng Tan virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused) 339e9efa4d9SJianfeng Tan { 340e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 341e9efa4d9SJianfeng Tan 342e9efa4d9SJianfeng Tan /* Currently, each queue has same queue size */ 343e9efa4d9SJianfeng Tan return dev->queue_size; 344e9efa4d9SJianfeng Tan } 345e9efa4d9SJianfeng Tan 34648a44640SJens Freimann static void 34748a44640SJens Freimann virtio_user_setup_queue_packed(struct virtqueue *vq, 34848a44640SJens Freimann struct virtio_user_dev *dev) 349e9efa4d9SJianfeng Tan { 35048a44640SJens Freimann uint16_t queue_idx = vq->vq_queue_index; 35148a44640SJens Freimann struct vring_packed *vring; 35248a44640SJens Freimann uint64_t desc_addr; 35348a44640SJens Freimann uint64_t avail_addr; 35448a44640SJens Freimann uint64_t used_addr; 35548a44640SJens Freimann uint16_t i; 35648a44640SJens Freimann 35748a44640SJens Freimann vring = &dev->packed_vrings[queue_idx]; 35848a44640SJens Freimann desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 35948a44640SJens Freimann avail_addr = desc_addr + vq->vq_nentries * 36048a44640SJens Freimann sizeof(struct vring_packed_desc); 36148a44640SJens Freimann used_addr = RTE_ALIGN_CEIL(avail_addr + 36248a44640SJens Freimann sizeof(struct vring_packed_desc_event), 36348a44640SJens Freimann VIRTIO_PCI_VRING_ALIGN); 36448a44640SJens Freimann vring->num = vq->vq_nentries; 3654cdc4d98STiwei Bie vring->desc = (void *)(uintptr_t)desc_addr; 3664cdc4d98STiwei Bie vring->driver = (void *)(uintptr_t)avail_addr; 3674cdc4d98STiwei Bie vring->device = (void *)(uintptr_t)used_addr; 36848a44640SJens Freimann dev->packed_queues[queue_idx].avail_wrap_counter = true; 36948a44640SJens Freimann dev->packed_queues[queue_idx].used_wrap_counter = true; 37048a44640SJens Freimann 37145c224e7STiwei Bie for (i = 0; i < vring->num; i++) 3724cdc4d98STiwei Bie vring->desc[i].flags = 0; 37348a44640SJens Freimann } 37448a44640SJens Freimann 37548a44640SJens Freimann static void 37648a44640SJens Freimann virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev) 37748a44640SJens Freimann { 378e9efa4d9SJianfeng Tan uint16_t queue_idx = vq->vq_queue_index; 379e9efa4d9SJianfeng Tan uint64_t desc_addr, avail_addr, used_addr; 380e9efa4d9SJianfeng Tan 381e9efa4d9SJianfeng Tan desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 382e9efa4d9SJianfeng Tan avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 383e9efa4d9SJianfeng Tan used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 384e9efa4d9SJianfeng Tan ring[vq->vq_nentries]), 385e9efa4d9SJianfeng Tan VIRTIO_PCI_VRING_ALIGN); 386e9efa4d9SJianfeng Tan 387e9efa4d9SJianfeng Tan dev->vrings[queue_idx].num = vq->vq_nentries; 388e9efa4d9SJianfeng Tan dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr; 389e9efa4d9SJianfeng Tan dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr; 390e9efa4d9SJianfeng Tan dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr; 39148a44640SJens Freimann } 39248a44640SJens Freimann 39348a44640SJens Freimann static int 39448a44640SJens Freimann virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 39548a44640SJens Freimann { 39648a44640SJens Freimann struct virtio_user_dev *dev = virtio_user_get_dev(hw); 39748a44640SJens Freimann 39848a44640SJens Freimann if (vtpci_packed_queue(hw)) 39948a44640SJens Freimann virtio_user_setup_queue_packed(vq, dev); 40048a44640SJens Freimann else 40148a44640SJens Freimann virtio_user_setup_queue_split(vq, dev); 402e9efa4d9SJianfeng Tan 403e9efa4d9SJianfeng Tan return 0; 404e9efa4d9SJianfeng Tan } 405e9efa4d9SJianfeng Tan 406e9efa4d9SJianfeng Tan static void 407e9efa4d9SJianfeng Tan virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 408e9efa4d9SJianfeng Tan { 409e9efa4d9SJianfeng Tan /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU 410e9efa4d9SJianfeng Tan * correspondingly stops the ioeventfds, and reset the status of 411e9efa4d9SJianfeng Tan * the device. 412e9efa4d9SJianfeng Tan * For modern devices, set queue desc, avail, used in PCI bar to 0, 413e9efa4d9SJianfeng Tan * not see any more behavior in QEMU. 414e9efa4d9SJianfeng Tan * 415e9efa4d9SJianfeng Tan * Here we just care about what information to deliver to vhost-user 416e9efa4d9SJianfeng Tan * or vhost-kernel. So we just close ioeventfd for now. 417e9efa4d9SJianfeng Tan */ 418e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 419e9efa4d9SJianfeng Tan 420e9efa4d9SJianfeng Tan close(dev->callfds[vq->vq_queue_index]); 421e9efa4d9SJianfeng Tan close(dev->kickfds[vq->vq_queue_index]); 422e9efa4d9SJianfeng Tan } 423e9efa4d9SJianfeng Tan 424e9efa4d9SJianfeng Tan static void 425e9efa4d9SJianfeng Tan virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 426e9efa4d9SJianfeng Tan { 427e9efa4d9SJianfeng Tan uint64_t buf = 1; 428e9efa4d9SJianfeng Tan struct virtio_user_dev *dev = virtio_user_get_dev(hw); 429e9efa4d9SJianfeng Tan 4301b69528eSJianfeng Tan if (hw->cvq && (hw->cvq->vq == vq)) { 43148a44640SJens Freimann if (vtpci_packed_queue(vq->hw)) 43248a44640SJens Freimann virtio_user_handle_cq_packed(dev, vq->vq_queue_index); 43348a44640SJens Freimann else 4341b69528eSJianfeng Tan virtio_user_handle_cq(dev, vq->vq_queue_index); 4351b69528eSJianfeng Tan return; 4361b69528eSJianfeng Tan } 4371b69528eSJianfeng Tan 438e9efa4d9SJianfeng Tan if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0) 439f2462150SFerruh Yigit PMD_DRV_LOG(ERR, "failed to kick backend: %s", 440e9efa4d9SJianfeng Tan strerror(errno)); 441e9efa4d9SJianfeng Tan } 442e9efa4d9SJianfeng Tan 4436d890f8aSYuanhan Liu const struct virtio_pci_ops virtio_user_ops = { 444e9efa4d9SJianfeng Tan .read_dev_cfg = virtio_user_read_dev_config, 445e9efa4d9SJianfeng Tan .write_dev_cfg = virtio_user_write_dev_config, 446e9efa4d9SJianfeng Tan .get_status = virtio_user_get_status, 447e9efa4d9SJianfeng Tan .set_status = virtio_user_set_status, 448e9efa4d9SJianfeng Tan .get_features = virtio_user_get_features, 449e9efa4d9SJianfeng Tan .set_features = virtio_user_set_features, 450e9efa4d9SJianfeng Tan .get_isr = virtio_user_get_isr, 451e9efa4d9SJianfeng Tan .set_config_irq = virtio_user_set_config_irq, 4523d4fb6fdSJianfeng Tan .set_queue_irq = virtio_user_set_queue_irq, 453e9efa4d9SJianfeng Tan .get_queue_num = virtio_user_get_queue_num, 454e9efa4d9SJianfeng Tan .setup_queue = virtio_user_setup_queue, 455e9efa4d9SJianfeng Tan .del_queue = virtio_user_del_queue, 456e9efa4d9SJianfeng Tan .notify_queue = virtio_user_notify_queue, 457e9efa4d9SJianfeng Tan }; 458ce2eabddSJianfeng Tan 459ce2eabddSJianfeng Tan static const char *valid_args[] = { 460ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_QUEUES_NUM "queues" 461ce2eabddSJianfeng Tan VIRTIO_USER_ARG_QUEUES_NUM, 462ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_CQ_NUM "cq" 463ce2eabddSJianfeng Tan VIRTIO_USER_ARG_CQ_NUM, 464ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_MAC "mac" 465ce2eabddSJianfeng Tan VIRTIO_USER_ARG_MAC, 466ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_PATH "path" 467ce2eabddSJianfeng Tan VIRTIO_USER_ARG_PATH, 468ce2eabddSJianfeng Tan #define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size" 469ce2eabddSJianfeng Tan VIRTIO_USER_ARG_QUEUE_SIZE, 4704214a1b4SWenfeng Liu #define VIRTIO_USER_ARG_INTERFACE_NAME "iface" 4714214a1b4SWenfeng Liu VIRTIO_USER_ARG_INTERFACE_NAME, 472bd8f50a4SZhiyong Yang #define VIRTIO_USER_ARG_SERVER_MODE "server" 473bd8f50a4SZhiyong Yang VIRTIO_USER_ARG_SERVER_MODE, 474488ed97aSMarvin Liu #define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf" 475488ed97aSMarvin Liu VIRTIO_USER_ARG_MRG_RXBUF, 476488ed97aSMarvin Liu #define VIRTIO_USER_ARG_IN_ORDER "in_order" 477488ed97aSMarvin Liu VIRTIO_USER_ARG_IN_ORDER, 47834f3966cSYuanhan Liu #define VIRTIO_USER_ARG_PACKED_VQ "packed_vq" 47934f3966cSYuanhan Liu VIRTIO_USER_ARG_PACKED_VQ, 480b0db4beaSIvan Dyukov #define VIRTIO_USER_ARG_SPEED "speed" 481b0db4beaSIvan Dyukov VIRTIO_USER_ARG_SPEED, 4826b7eefbcSMarvin Liu #define VIRTIO_USER_ARG_VECTORIZED "vectorized" 4836b7eefbcSMarvin Liu VIRTIO_USER_ARG_VECTORIZED, 484ce2eabddSJianfeng Tan NULL 485ce2eabddSJianfeng Tan }; 486ce2eabddSJianfeng Tan 487ce2eabddSJianfeng Tan #define VIRTIO_USER_DEF_CQ_EN 0 488ce2eabddSJianfeng Tan #define VIRTIO_USER_DEF_Q_NUM 1 489ce2eabddSJianfeng Tan #define VIRTIO_USER_DEF_Q_SZ 256 490bd8f50a4SZhiyong Yang #define VIRTIO_USER_DEF_SERVER_MODE 0 491ce2eabddSJianfeng Tan 492ce2eabddSJianfeng Tan static int 493ce2eabddSJianfeng Tan get_string_arg(const char *key __rte_unused, 494ce2eabddSJianfeng Tan const char *value, void *extra_args) 495ce2eabddSJianfeng Tan { 496ce2eabddSJianfeng Tan if (!value || !extra_args) 497ce2eabddSJianfeng Tan return -EINVAL; 498ce2eabddSJianfeng Tan 499ce2eabddSJianfeng Tan *(char **)extra_args = strdup(value); 500ce2eabddSJianfeng Tan 5014214a1b4SWenfeng Liu if (!*(char **)extra_args) 5024214a1b4SWenfeng Liu return -ENOMEM; 5034214a1b4SWenfeng Liu 504ce2eabddSJianfeng Tan return 0; 505ce2eabddSJianfeng Tan } 506ce2eabddSJianfeng Tan 507ce2eabddSJianfeng Tan static int 508ce2eabddSJianfeng Tan get_integer_arg(const char *key __rte_unused, 509ce2eabddSJianfeng Tan const char *value, void *extra_args) 510ce2eabddSJianfeng Tan { 511bc5b6c11SIvan Dyukov uint64_t integer = 0; 512ce2eabddSJianfeng Tan if (!value || !extra_args) 513ce2eabddSJianfeng Tan return -EINVAL; 514bc5b6c11SIvan Dyukov errno = 0; 515bc5b6c11SIvan Dyukov integer = strtoull(value, NULL, 0); 516bc5b6c11SIvan Dyukov /* extra_args keeps default value, it should be replaced 517bc5b6c11SIvan Dyukov * only in case of successful parsing of the 'value' arg 518bc5b6c11SIvan Dyukov */ 519bc5b6c11SIvan Dyukov if (errno == 0) 520bc5b6c11SIvan Dyukov *(uint64_t *)extra_args = integer; 521bc5b6c11SIvan Dyukov return -errno; 522ce2eabddSJianfeng Tan } 523ce2eabddSJianfeng Tan 524*7d62bf6fSMaxime Coquelin static uint32_t 525*7d62bf6fSMaxime Coquelin vdpa_dynamic_major_num(void) 526*7d62bf6fSMaxime Coquelin { 527*7d62bf6fSMaxime Coquelin FILE *fp; 528*7d62bf6fSMaxime Coquelin char *line = NULL; 529*7d62bf6fSMaxime Coquelin size_t size; 530*7d62bf6fSMaxime Coquelin char name[11]; 531*7d62bf6fSMaxime Coquelin bool found = false; 532*7d62bf6fSMaxime Coquelin uint32_t num; 533*7d62bf6fSMaxime Coquelin 534*7d62bf6fSMaxime Coquelin fp = fopen("/proc/devices", "r"); 535*7d62bf6fSMaxime Coquelin if (fp == NULL) { 536*7d62bf6fSMaxime Coquelin PMD_INIT_LOG(ERR, "Cannot open /proc/devices: %s", 537*7d62bf6fSMaxime Coquelin strerror(errno)); 538*7d62bf6fSMaxime Coquelin return UNNAMED_MAJOR; 539*7d62bf6fSMaxime Coquelin } 540*7d62bf6fSMaxime Coquelin 541*7d62bf6fSMaxime Coquelin while (getline(&line, &size, fp) > 0) { 542*7d62bf6fSMaxime Coquelin char *stripped = line + strspn(line, " "); 543*7d62bf6fSMaxime Coquelin if ((sscanf(stripped, "%u %10s", &num, name) == 2) && 544*7d62bf6fSMaxime Coquelin (strncmp(name, "vhost-vdpa", 10) == 0)) { 545*7d62bf6fSMaxime Coquelin found = true; 546*7d62bf6fSMaxime Coquelin break; 547*7d62bf6fSMaxime Coquelin } 548*7d62bf6fSMaxime Coquelin } 549*7d62bf6fSMaxime Coquelin fclose(fp); 550*7d62bf6fSMaxime Coquelin return found ? num : UNNAMED_MAJOR; 551*7d62bf6fSMaxime Coquelin } 552*7d62bf6fSMaxime Coquelin 553f908b22eSAdrian Moreno static enum virtio_user_backend_type 554f908b22eSAdrian Moreno virtio_user_backend_type(const char *path) 555f908b22eSAdrian Moreno { 556f908b22eSAdrian Moreno struct stat sb; 557f908b22eSAdrian Moreno 558*7d62bf6fSMaxime Coquelin if (stat(path, &sb) == -1) { 559*7d62bf6fSMaxime Coquelin PMD_INIT_LOG(ERR, "Stat fails: %s (%s)\n", path, 560*7d62bf6fSMaxime Coquelin strerror(errno)); 561f908b22eSAdrian Moreno return VIRTIO_USER_BACKEND_UNKNOWN; 562*7d62bf6fSMaxime Coquelin } 563f908b22eSAdrian Moreno 564*7d62bf6fSMaxime Coquelin if (S_ISSOCK(sb.st_mode)) { 565*7d62bf6fSMaxime Coquelin return VIRTIO_USER_BACKEND_VHOST_USER; 566*7d62bf6fSMaxime Coquelin } else if (S_ISCHR(sb.st_mode)) { 567*7d62bf6fSMaxime Coquelin if (major(sb.st_rdev) == MISC_MAJOR) 568*7d62bf6fSMaxime Coquelin return VIRTIO_USER_BACKEND_VHOST_KERNEL; 569*7d62bf6fSMaxime Coquelin if (major(sb.st_rdev) == vdpa_dynamic_major_num()) 570*7d62bf6fSMaxime Coquelin return VIRTIO_USER_BACKEND_VHOST_VDPA; 571*7d62bf6fSMaxime Coquelin } 572*7d62bf6fSMaxime Coquelin return VIRTIO_USER_BACKEND_UNKNOWN; 573f908b22eSAdrian Moreno } 574f908b22eSAdrian Moreno 575ce2eabddSJianfeng Tan static struct rte_eth_dev * 576050fe6e9SJan Blunck virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev) 577ce2eabddSJianfeng Tan { 578ce2eabddSJianfeng Tan struct rte_eth_dev *eth_dev; 579ce2eabddSJianfeng Tan struct rte_eth_dev_data *data; 580ce2eabddSJianfeng Tan struct virtio_hw *hw; 581ce2eabddSJianfeng Tan struct virtio_user_dev *dev; 582ce2eabddSJianfeng Tan 583050fe6e9SJan Blunck eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*hw)); 584ce2eabddSJianfeng Tan if (!eth_dev) { 585ce2eabddSJianfeng Tan PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev"); 586ce2eabddSJianfeng Tan return NULL; 587ce2eabddSJianfeng Tan } 588ce2eabddSJianfeng Tan 589ce2eabddSJianfeng Tan data = eth_dev->data; 590050fe6e9SJan Blunck hw = eth_dev->data->dev_private; 591ce2eabddSJianfeng Tan 592ce2eabddSJianfeng Tan dev = rte_zmalloc(NULL, sizeof(*dev), 0); 593ce2eabddSJianfeng Tan if (!dev) { 594ce2eabddSJianfeng Tan PMD_INIT_LOG(ERR, "malloc virtio_user_dev failed"); 595ce2eabddSJianfeng Tan rte_eth_dev_release_port(eth_dev); 596ce2eabddSJianfeng Tan return NULL; 597ce2eabddSJianfeng Tan } 598ce2eabddSJianfeng Tan 599553f4593SYuanhan Liu hw->port_id = data->port_id; 6003d4fb6fdSJianfeng Tan dev->port_id = data->port_id; 601553f4593SYuanhan Liu virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops; 602ef53b603SJianfeng Tan /* 603ef53b603SJianfeng Tan * MSIX is required to enable LSC (see virtio_init_device). 604ef53b603SJianfeng Tan * Here just pretend that we support msix. 605ef53b603SJianfeng Tan */ 606ef53b603SJianfeng Tan hw->use_msix = 1; 607ce2eabddSJianfeng Tan hw->modern = 0; 6084710e16aSMarvin Liu hw->use_vec_rx = 0; 60977d66da8SMarvin Liu hw->use_vec_tx = 0; 6109f233f54SMarvin Liu hw->use_inorder_rx = 0; 6119f233f54SMarvin Liu hw->use_inorder_tx = 0; 612ce2eabddSJianfeng Tan hw->virtio_user_dev = dev; 613ce2eabddSJianfeng Tan return eth_dev; 614ce2eabddSJianfeng Tan } 615ce2eabddSJianfeng Tan 616ca8326a9SJianfeng Tan static void 617ca8326a9SJianfeng Tan virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev) 618ca8326a9SJianfeng Tan { 619ca8326a9SJianfeng Tan struct rte_eth_dev_data *data = eth_dev->data; 620ca8326a9SJianfeng Tan struct virtio_hw *hw = data->dev_private; 621ca8326a9SJianfeng Tan 622ca8326a9SJianfeng Tan rte_free(hw->virtio_user_dev); 623ca8326a9SJianfeng Tan rte_eth_dev_release_port(eth_dev); 624ca8326a9SJianfeng Tan } 625ca8326a9SJianfeng Tan 626ce2eabddSJianfeng Tan /* Dev initialization routine. Invoked once for each virtio vdev at 627c3b2fdfeSYong Wang * EAL init time, see rte_bus_probe(). 628ce2eabddSJianfeng Tan * Returns 0 on success. 629ce2eabddSJianfeng Tan */ 630ce2eabddSJianfeng Tan static int 6315d2aa461SJan Blunck virtio_user_pmd_probe(struct rte_vdev_device *dev) 632ce2eabddSJianfeng Tan { 63314f06474SJianfeng Tan struct rte_kvargs *kvlist = NULL; 634ce2eabddSJianfeng Tan struct rte_eth_dev *eth_dev; 635ce2eabddSJianfeng Tan struct virtio_hw *hw; 636f908b22eSAdrian Moreno enum virtio_user_backend_type backend_type = VIRTIO_USER_BACKEND_UNKNOWN; 637ce2eabddSJianfeng Tan uint64_t queues = VIRTIO_USER_DEF_Q_NUM; 638ce2eabddSJianfeng Tan uint64_t cq = VIRTIO_USER_DEF_CQ_EN; 639ce2eabddSJianfeng Tan uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ; 640bd8f50a4SZhiyong Yang uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE; 641488ed97aSMarvin Liu uint64_t mrg_rxbuf = 1; 642488ed97aSMarvin Liu uint64_t in_order = 1; 6439070f88bSTiwei Bie uint64_t packed_vq = 0; 6446b7eefbcSMarvin Liu uint64_t vectorized = 0; 645ce2eabddSJianfeng Tan char *path = NULL; 6464214a1b4SWenfeng Liu char *ifname = NULL; 647ce2eabddSJianfeng Tan char *mac_addr = NULL; 648ce2eabddSJianfeng Tan int ret = -1; 649ce2eabddSJianfeng Tan 6501c8489daSTiwei Bie if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 6511c8489daSTiwei Bie const char *name = rte_vdev_device_name(dev); 6521c8489daSTiwei Bie eth_dev = rte_eth_dev_attach_secondary(name); 6531c8489daSTiwei Bie if (!eth_dev) { 65488fa5bb6SStephen Hemminger PMD_INIT_LOG(ERR, "Failed to probe %s", name); 6551c8489daSTiwei Bie return -1; 6561c8489daSTiwei Bie } 6571c8489daSTiwei Bie 6581c8489daSTiwei Bie if (eth_virtio_dev_init(eth_dev) < 0) { 6591c8489daSTiwei Bie PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 6601c8489daSTiwei Bie rte_eth_dev_release_port(eth_dev); 6611c8489daSTiwei Bie return -1; 6621c8489daSTiwei Bie } 6631c8489daSTiwei Bie 6641c8489daSTiwei Bie eth_dev->dev_ops = &virtio_user_secondary_eth_dev_ops; 6651c8489daSTiwei Bie eth_dev->device = &dev->device; 6661c8489daSTiwei Bie rte_eth_dev_probing_finish(eth_dev); 6671c8489daSTiwei Bie return 0; 6681c8489daSTiwei Bie } 6691c8489daSTiwei Bie 6705d2aa461SJan Blunck kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_args); 671ce2eabddSJianfeng Tan if (!kvlist) { 672ce2eabddSJianfeng Tan PMD_INIT_LOG(ERR, "error when parsing param"); 673ce2eabddSJianfeng Tan goto end; 674ce2eabddSJianfeng Tan } 675ce2eabddSJianfeng Tan 6769cca159eSMaxime Coquelin if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) { 677ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH, 678ca8326a9SJianfeng Tan &get_string_arg, &path) < 0) { 679404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 680404bd6bfSJianfeng Tan VIRTIO_USER_ARG_PATH); 681404bd6bfSJianfeng Tan goto end; 682404bd6bfSJianfeng Tan } 6839cca159eSMaxime Coquelin } else { 684f2462150SFerruh Yigit PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user", 6852dac0df4STiwei Bie VIRTIO_USER_ARG_PATH); 686ce2eabddSJianfeng Tan goto end; 687ce2eabddSJianfeng Tan } 688ce2eabddSJianfeng Tan 689f908b22eSAdrian Moreno backend_type = virtio_user_backend_type(path); 690f908b22eSAdrian Moreno if (backend_type == VIRTIO_USER_BACKEND_UNKNOWN) { 691f908b22eSAdrian Moreno PMD_INIT_LOG(ERR, 692f908b22eSAdrian Moreno "unable to determine backend type for path %s", 693f908b22eSAdrian Moreno path); 694f908b22eSAdrian Moreno goto end; 695f908b22eSAdrian Moreno } 696f908b22eSAdrian Moreno 697f908b22eSAdrian Moreno 6984214a1b4SWenfeng Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) { 699f908b22eSAdrian Moreno if (backend_type != VIRTIO_USER_BACKEND_VHOST_KERNEL) { 7004214a1b4SWenfeng Liu PMD_INIT_LOG(ERR, 7014214a1b4SWenfeng Liu "arg %s applies only to vhost-kernel backend", 7024214a1b4SWenfeng Liu VIRTIO_USER_ARG_INTERFACE_NAME); 7034214a1b4SWenfeng Liu goto end; 7044214a1b4SWenfeng Liu } 7054214a1b4SWenfeng Liu 7064214a1b4SWenfeng Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME, 7074214a1b4SWenfeng Liu &get_string_arg, &ifname) < 0) { 7084214a1b4SWenfeng Liu PMD_INIT_LOG(ERR, "error to parse %s", 7094214a1b4SWenfeng Liu VIRTIO_USER_ARG_INTERFACE_NAME); 7104214a1b4SWenfeng Liu goto end; 7114214a1b4SWenfeng Liu } 7124214a1b4SWenfeng Liu } 7134214a1b4SWenfeng Liu 714404bd6bfSJianfeng Tan if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) { 715ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC, 716ca8326a9SJianfeng Tan &get_string_arg, &mac_addr) < 0) { 717404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 718404bd6bfSJianfeng Tan VIRTIO_USER_ARG_MAC); 719404bd6bfSJianfeng Tan goto end; 720404bd6bfSJianfeng Tan } 721404bd6bfSJianfeng Tan } 722ce2eabddSJianfeng Tan 723404bd6bfSJianfeng Tan if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) { 724ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE, 725ca8326a9SJianfeng Tan &get_integer_arg, &queue_size) < 0) { 726404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 727404bd6bfSJianfeng Tan VIRTIO_USER_ARG_QUEUE_SIZE); 728404bd6bfSJianfeng Tan goto end; 729404bd6bfSJianfeng Tan } 730404bd6bfSJianfeng Tan } 731ce2eabddSJianfeng Tan 732404bd6bfSJianfeng Tan if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) { 733ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM, 734ca8326a9SJianfeng Tan &get_integer_arg, &queues) < 0) { 735404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 736404bd6bfSJianfeng Tan VIRTIO_USER_ARG_QUEUES_NUM); 737404bd6bfSJianfeng Tan goto end; 738404bd6bfSJianfeng Tan } 739404bd6bfSJianfeng Tan } 740ce2eabddSJianfeng Tan 741bd8f50a4SZhiyong Yang if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) { 742bd8f50a4SZhiyong Yang if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE, 743bd8f50a4SZhiyong Yang &get_integer_arg, &server_mode) < 0) { 744bd8f50a4SZhiyong Yang PMD_INIT_LOG(ERR, "error to parse %s", 745bd8f50a4SZhiyong Yang VIRTIO_USER_ARG_SERVER_MODE); 746bd8f50a4SZhiyong Yang goto end; 747bd8f50a4SZhiyong Yang } 748bd8f50a4SZhiyong Yang } 749bd8f50a4SZhiyong Yang 750404bd6bfSJianfeng Tan if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) { 751ca8326a9SJianfeng Tan if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM, 752ca8326a9SJianfeng Tan &get_integer_arg, &cq) < 0) { 753404bd6bfSJianfeng Tan PMD_INIT_LOG(ERR, "error to parse %s", 754404bd6bfSJianfeng Tan VIRTIO_USER_ARG_CQ_NUM); 755404bd6bfSJianfeng Tan goto end; 756404bd6bfSJianfeng Tan } 757404bd6bfSJianfeng Tan } else if (queues > 1) { 7581b69528eSJianfeng Tan cq = 1; 759404bd6bfSJianfeng Tan } 7601b69528eSJianfeng Tan 76134f3966cSYuanhan Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) { 76234f3966cSYuanhan Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ, 76334f3966cSYuanhan Liu &get_integer_arg, &packed_vq) < 0) { 76434f3966cSYuanhan Liu PMD_INIT_LOG(ERR, "error to parse %s", 76534f3966cSYuanhan Liu VIRTIO_USER_ARG_PACKED_VQ); 76634f3966cSYuanhan Liu goto end; 76734f3966cSYuanhan Liu } 76834f3966cSYuanhan Liu } 76934f3966cSYuanhan Liu 7706b7eefbcSMarvin Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_VECTORIZED) == 1) { 7716b7eefbcSMarvin Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_VECTORIZED, 7726b7eefbcSMarvin Liu &get_integer_arg, &vectorized) < 0) { 7736b7eefbcSMarvin Liu PMD_INIT_LOG(ERR, "error to parse %s", 7746b7eefbcSMarvin Liu VIRTIO_USER_ARG_VECTORIZED); 7756b7eefbcSMarvin Liu goto end; 7766b7eefbcSMarvin Liu } 7776b7eefbcSMarvin Liu } 7786b7eefbcSMarvin Liu 7791b69528eSJianfeng Tan if (queues > 1 && cq == 0) { 7801b69528eSJianfeng Tan PMD_INIT_LOG(ERR, "multi-q requires ctrl-q"); 7811b69528eSJianfeng Tan goto end; 7821b69528eSJianfeng Tan } 783ce2eabddSJianfeng Tan 7842269b9aeSWenfeng Liu if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) { 7852269b9aeSWenfeng Liu PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u", 7862269b9aeSWenfeng Liu VIRTIO_USER_ARG_QUEUES_NUM, queues, 7872269b9aeSWenfeng Liu VIRTIO_MAX_VIRTQUEUE_PAIRS); 7882269b9aeSWenfeng Liu goto end; 7892269b9aeSWenfeng Liu } 7902269b9aeSWenfeng Liu 791488ed97aSMarvin Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) { 792488ed97aSMarvin Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF, 793488ed97aSMarvin Liu &get_integer_arg, &mrg_rxbuf) < 0) { 794488ed97aSMarvin Liu PMD_INIT_LOG(ERR, "error to parse %s", 795488ed97aSMarvin Liu VIRTIO_USER_ARG_MRG_RXBUF); 796488ed97aSMarvin Liu goto end; 797488ed97aSMarvin Liu } 798488ed97aSMarvin Liu } 799488ed97aSMarvin Liu 800488ed97aSMarvin Liu if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) { 801488ed97aSMarvin Liu if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER, 802488ed97aSMarvin Liu &get_integer_arg, &in_order) < 0) { 803488ed97aSMarvin Liu PMD_INIT_LOG(ERR, "error to parse %s", 804488ed97aSMarvin Liu VIRTIO_USER_ARG_IN_ORDER); 805488ed97aSMarvin Liu goto end; 806488ed97aSMarvin Liu } 807488ed97aSMarvin Liu } 808488ed97aSMarvin Liu 809050fe6e9SJan Blunck eth_dev = virtio_user_eth_dev_alloc(dev); 810ce2eabddSJianfeng Tan if (!eth_dev) { 811e8df94b8SJianfeng Tan PMD_INIT_LOG(ERR, "virtio_user fails to alloc device"); 812ce2eabddSJianfeng Tan goto end; 813ce2eabddSJianfeng Tan } 814ce2eabddSJianfeng Tan 815ce2eabddSJianfeng Tan hw = eth_dev->data->dev_private; 816ce2eabddSJianfeng Tan if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq, 8171c8489daSTiwei Bie queue_size, mac_addr, &ifname, server_mode, 818f908b22eSAdrian Moreno mrg_rxbuf, in_order, packed_vq, backend_type) < 0) { 819ca8326a9SJianfeng Tan PMD_INIT_LOG(ERR, "virtio_user_dev_init fails"); 820ca8326a9SJianfeng Tan virtio_user_eth_dev_free(eth_dev); 821ce2eabddSJianfeng Tan goto end; 822ca8326a9SJianfeng Tan } 823fbe90cddSThomas Monjalon 82487db93e0SDavid Marchand /* previously called by pci probing for physical dev */ 825ce2eabddSJianfeng Tan if (eth_virtio_dev_init(eth_dev) < 0) { 826ce2eabddSJianfeng Tan PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 827ca8326a9SJianfeng Tan virtio_user_eth_dev_free(eth_dev); 828ce2eabddSJianfeng Tan goto end; 829ce2eabddSJianfeng Tan } 830fbe90cddSThomas Monjalon 83177d66da8SMarvin Liu if (vectorized) { 83277d66da8SMarvin Liu if (packed_vq) { 83377d66da8SMarvin Liu #if defined(CC_AVX512_SUPPORT) 8346b7eefbcSMarvin Liu hw->use_vec_rx = 1; 83577d66da8SMarvin Liu hw->use_vec_tx = 1; 83677d66da8SMarvin Liu #else 83777d66da8SMarvin Liu PMD_INIT_LOG(INFO, 83877d66da8SMarvin Liu "building environment do not support packed ring vectorized"); 83977d66da8SMarvin Liu #endif 84077d66da8SMarvin Liu } else { 84177d66da8SMarvin Liu hw->use_vec_rx = 1; 84277d66da8SMarvin Liu } 84377d66da8SMarvin Liu } 8446b7eefbcSMarvin Liu 845fbe90cddSThomas Monjalon rte_eth_dev_probing_finish(eth_dev); 846ce2eabddSJianfeng Tan ret = 0; 847ce2eabddSJianfeng Tan 848ce2eabddSJianfeng Tan end: 84914f06474SJianfeng Tan if (kvlist) 85014f06474SJianfeng Tan rte_kvargs_free(kvlist); 851ce2eabddSJianfeng Tan if (path) 852ce2eabddSJianfeng Tan free(path); 853ce2eabddSJianfeng Tan if (mac_addr) 854ce2eabddSJianfeng Tan free(mac_addr); 8554214a1b4SWenfeng Liu if (ifname) 8564214a1b4SWenfeng Liu free(ifname); 857ce2eabddSJianfeng Tan return ret; 858ce2eabddSJianfeng Tan } 859ce2eabddSJianfeng Tan 860ce2eabddSJianfeng Tan static int 8615d2aa461SJan Blunck virtio_user_pmd_remove(struct rte_vdev_device *vdev) 862ce2eabddSJianfeng Tan { 8635d2aa461SJan Blunck const char *name; 864ce2eabddSJianfeng Tan struct rte_eth_dev *eth_dev; 865ce2eabddSJianfeng Tan 8665d2aa461SJan Blunck if (!vdev) 867ce2eabddSJianfeng Tan return -EINVAL; 868ce2eabddSJianfeng Tan 8695d2aa461SJan Blunck name = rte_vdev_device_name(vdev); 870f2462150SFerruh Yigit PMD_DRV_LOG(INFO, "Un-Initializing %s", name); 871ce2eabddSJianfeng Tan eth_dev = rte_eth_dev_allocated(name); 8727f468b2eSTiwei Bie /* Port has already been released by close. */ 873ce2eabddSJianfeng Tan if (!eth_dev) 8747f468b2eSTiwei Bie return 0; 875ce2eabddSJianfeng Tan 8761c8489daSTiwei Bie if (rte_eal_process_type() != RTE_PROC_PRIMARY) 8771c8489daSTiwei Bie return rte_eth_dev_release_port(eth_dev); 8781c8489daSTiwei Bie 879ce2eabddSJianfeng Tan /* make sure the device is stopped, queues freed */ 880ce2eabddSJianfeng Tan rte_eth_dev_close(eth_dev->data->port_id); 881ce2eabddSJianfeng Tan 882ce2eabddSJianfeng Tan return 0; 883ce2eabddSJianfeng Tan } 884ce2eabddSJianfeng Tan 88586e71eb2SMaxime Coquelin static int virtio_user_pmd_dma_map(struct rte_vdev_device *vdev, void *addr, 88686e71eb2SMaxime Coquelin uint64_t iova, size_t len) 88786e71eb2SMaxime Coquelin { 88886e71eb2SMaxime Coquelin const char *name; 88986e71eb2SMaxime Coquelin struct rte_eth_dev *eth_dev; 89086e71eb2SMaxime Coquelin struct virtio_user_dev *dev; 89186e71eb2SMaxime Coquelin struct virtio_hw *hw; 89286e71eb2SMaxime Coquelin 89386e71eb2SMaxime Coquelin if (!vdev) 89486e71eb2SMaxime Coquelin return -EINVAL; 89586e71eb2SMaxime Coquelin 89686e71eb2SMaxime Coquelin name = rte_vdev_device_name(vdev); 89786e71eb2SMaxime Coquelin eth_dev = rte_eth_dev_allocated(name); 89886e71eb2SMaxime Coquelin /* Port has already been released by close. */ 89986e71eb2SMaxime Coquelin if (!eth_dev) 90086e71eb2SMaxime Coquelin return 0; 90186e71eb2SMaxime Coquelin 90286e71eb2SMaxime Coquelin hw = (struct virtio_hw *)eth_dev->data->dev_private; 90386e71eb2SMaxime Coquelin dev = hw->virtio_user_dev; 90486e71eb2SMaxime Coquelin 90586e71eb2SMaxime Coquelin if (dev->ops->dma_map) 90686e71eb2SMaxime Coquelin return dev->ops->dma_map(dev, addr, iova, len); 90786e71eb2SMaxime Coquelin 90886e71eb2SMaxime Coquelin return 0; 90986e71eb2SMaxime Coquelin } 91086e71eb2SMaxime Coquelin 91186e71eb2SMaxime Coquelin static int virtio_user_pmd_dma_unmap(struct rte_vdev_device *vdev, void *addr, 91286e71eb2SMaxime Coquelin uint64_t iova, size_t len) 91386e71eb2SMaxime Coquelin { 91486e71eb2SMaxime Coquelin const char *name; 91586e71eb2SMaxime Coquelin struct rte_eth_dev *eth_dev; 91686e71eb2SMaxime Coquelin struct virtio_user_dev *dev; 91786e71eb2SMaxime Coquelin struct virtio_hw *hw; 91886e71eb2SMaxime Coquelin 91986e71eb2SMaxime Coquelin if (!vdev) 92086e71eb2SMaxime Coquelin return -EINVAL; 92186e71eb2SMaxime Coquelin 92286e71eb2SMaxime Coquelin name = rte_vdev_device_name(vdev); 92386e71eb2SMaxime Coquelin eth_dev = rte_eth_dev_allocated(name); 92486e71eb2SMaxime Coquelin /* Port has already been released by close. */ 92586e71eb2SMaxime Coquelin if (!eth_dev) 92686e71eb2SMaxime Coquelin return 0; 92786e71eb2SMaxime Coquelin 92886e71eb2SMaxime Coquelin hw = (struct virtio_hw *)eth_dev->data->dev_private; 92986e71eb2SMaxime Coquelin dev = hw->virtio_user_dev; 93086e71eb2SMaxime Coquelin 93186e71eb2SMaxime Coquelin if (dev->ops->dma_unmap) 93286e71eb2SMaxime Coquelin return dev->ops->dma_unmap(dev, addr, iova, len); 93386e71eb2SMaxime Coquelin 93486e71eb2SMaxime Coquelin return 0; 93586e71eb2SMaxime Coquelin } 93686e71eb2SMaxime Coquelin 937fe363dd4SJan Viktorin static struct rte_vdev_driver virtio_user_driver = { 93850a3345fSShreyansh Jain .probe = virtio_user_pmd_probe, 93950a3345fSShreyansh Jain .remove = virtio_user_pmd_remove, 94086e71eb2SMaxime Coquelin .dma_map = virtio_user_pmd_dma_map, 94186e71eb2SMaxime Coquelin .dma_unmap = virtio_user_pmd_dma_unmap, 942ce2eabddSJianfeng Tan }; 943ce2eabddSJianfeng Tan 94401f19227SShreyansh Jain RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver); 9459fa80cb2SJan Blunck RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user); 94601f19227SShreyansh Jain RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user, 94744e32a67SPablo de Lara "path=<path> " 94844e32a67SPablo de Lara "mac=<mac addr> " 94944e32a67SPablo de Lara "cq=<int> " 95044e32a67SPablo de Lara "queue_size=<int> " 9514214a1b4SWenfeng Liu "queues=<int> " 952488ed97aSMarvin Liu "iface=<string> " 95362758c76STiwei Bie "server=<0|1> " 954488ed97aSMarvin Liu "mrg_rxbuf=<0|1> " 9559070f88bSTiwei Bie "in_order=<0|1> " 956b0db4beaSIvan Dyukov "packed_vq=<0|1> " 9576b7eefbcSMarvin Liu "speed=<int> " 9586b7eefbcSMarvin Liu "vectorized=<0|1>"); 959