1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <sys/types.h> 7 #include <unistd.h> 8 #include <fcntl.h> 9 #include <sys/socket.h> 10 11 #include <rte_malloc.h> 12 #include <rte_kvargs.h> 13 #include <rte_ethdev_vdev.h> 14 #include <rte_bus_vdev.h> 15 #include <rte_alarm.h> 16 #include <rte_cycles.h> 17 18 #include "virtio_ethdev.h" 19 #include "virtio_logs.h" 20 #include "virtio_pci.h" 21 #include "virtqueue.h" 22 #include "virtio_rxtx.h" 23 #include "virtio_user/virtio_user_dev.h" 24 #include "virtio_user/vhost.h" 25 26 #define virtio_user_get_dev(hw) \ 27 ((struct virtio_user_dev *)(hw)->virtio_user_dev) 28 29 static void 30 virtio_user_reset_queues_packed(struct rte_eth_dev *dev) 31 { 32 struct virtio_hw *hw = dev->data->dev_private; 33 struct virtnet_rx *rxvq; 34 struct virtnet_tx *txvq; 35 uint16_t i; 36 37 /* Add lock to avoid queue contention. */ 38 rte_spinlock_lock(&hw->state_lock); 39 hw->started = 0; 40 41 /* 42 * Waitting for datapath to complete before resetting queues. 43 * 1 ms should be enough for the ongoing Tx/Rx function to finish. 44 */ 45 rte_delay_ms(1); 46 47 /* Vring reset for each Tx queue and Rx queue. */ 48 for (i = 0; i < dev->data->nb_rx_queues; i++) { 49 rxvq = dev->data->rx_queues[i]; 50 virtqueue_rxvq_reset_packed(rxvq->vq); 51 virtio_dev_rx_queue_setup_finish(dev, i); 52 } 53 54 for (i = 0; i < dev->data->nb_tx_queues; i++) { 55 txvq = dev->data->tx_queues[i]; 56 virtqueue_txvq_reset_packed(txvq->vq); 57 } 58 59 hw->started = 1; 60 rte_spinlock_unlock(&hw->state_lock); 61 } 62 63 64 static int 65 virtio_user_server_reconnect(struct virtio_user_dev *dev) 66 { 67 int ret; 68 int connectfd; 69 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id]; 70 struct virtio_hw *hw = eth_dev->data->dev_private; 71 72 connectfd = accept(dev->listenfd, NULL, NULL); 73 if (connectfd < 0) 74 return -1; 75 76 dev->vhostfd = connectfd; 77 if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, 78 &dev->device_features) < 0) { 79 PMD_INIT_LOG(ERR, "get_features failed: %s", 80 strerror(errno)); 81 return -1; 82 } 83 84 dev->device_features |= dev->frontend_features; 85 86 /* umask vhost-user unsupported features */ 87 dev->device_features &= ~(dev->unsupported_features); 88 89 dev->features &= dev->device_features; 90 91 /* For packed ring, resetting queues is required in reconnection. */ 92 if (vtpci_packed_queue(hw)) { 93 PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped" 94 " when packed ring reconnecting."); 95 virtio_user_reset_queues_packed(eth_dev); 96 } 97 98 ret = virtio_user_start_device(dev); 99 if (ret < 0) 100 return -1; 101 102 if (dev->queue_pairs > 1) { 103 ret = virtio_user_handle_mq(dev, dev->queue_pairs); 104 if (ret != 0) { 105 PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!"); 106 return -1; 107 } 108 } 109 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { 110 if (rte_intr_disable(eth_dev->intr_handle) < 0) { 111 PMD_DRV_LOG(ERR, "interrupt disable failed"); 112 return -1; 113 } 114 rte_intr_callback_unregister(eth_dev->intr_handle, 115 virtio_interrupt_handler, 116 eth_dev); 117 eth_dev->intr_handle->fd = connectfd; 118 rte_intr_callback_register(eth_dev->intr_handle, 119 virtio_interrupt_handler, eth_dev); 120 121 if (rte_intr_enable(eth_dev->intr_handle) < 0) { 122 PMD_DRV_LOG(ERR, "interrupt enable failed"); 123 return -1; 124 } 125 } 126 PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!"); 127 return 0; 128 } 129 130 static void 131 virtio_user_delayed_handler(void *param) 132 { 133 struct virtio_hw *hw = (struct virtio_hw *)param; 134 struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id]; 135 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 136 137 if (rte_intr_disable(eth_dev->intr_handle) < 0) { 138 PMD_DRV_LOG(ERR, "interrupt disable failed"); 139 return; 140 } 141 rte_intr_callback_unregister(eth_dev->intr_handle, 142 virtio_interrupt_handler, eth_dev); 143 if (dev->is_server) { 144 if (dev->vhostfd >= 0) { 145 close(dev->vhostfd); 146 dev->vhostfd = -1; 147 } 148 eth_dev->intr_handle->fd = dev->listenfd; 149 rte_intr_callback_register(eth_dev->intr_handle, 150 virtio_interrupt_handler, eth_dev); 151 if (rte_intr_enable(eth_dev->intr_handle) < 0) { 152 PMD_DRV_LOG(ERR, "interrupt enable failed"); 153 return; 154 } 155 } 156 } 157 158 static void 159 virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, 160 void *dst, int length) 161 { 162 int i; 163 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 164 165 if (offset == offsetof(struct virtio_net_config, mac) && 166 length == RTE_ETHER_ADDR_LEN) { 167 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 168 ((uint8_t *)dst)[i] = dev->mac_addr[i]; 169 return; 170 } 171 172 if (offset == offsetof(struct virtio_net_config, status)) { 173 char buf[128]; 174 175 if (dev->vhostfd >= 0) { 176 int r; 177 int flags; 178 179 flags = fcntl(dev->vhostfd, F_GETFL); 180 if (fcntl(dev->vhostfd, F_SETFL, 181 flags | O_NONBLOCK) == -1) { 182 PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag"); 183 return; 184 } 185 r = recv(dev->vhostfd, buf, 128, MSG_PEEK); 186 if (r == 0 || (r < 0 && errno != EAGAIN)) { 187 dev->status &= (~VIRTIO_NET_S_LINK_UP); 188 PMD_DRV_LOG(ERR, "virtio-user port %u is down", 189 hw->port_id); 190 191 /* This function could be called in the process 192 * of interrupt handling, callback cannot be 193 * unregistered here, set an alarm to do it. 194 */ 195 rte_eal_alarm_set(1, 196 virtio_user_delayed_handler, 197 (void *)hw); 198 } else { 199 dev->status |= VIRTIO_NET_S_LINK_UP; 200 } 201 if (fcntl(dev->vhostfd, F_SETFL, 202 flags & ~O_NONBLOCK) == -1) { 203 PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag"); 204 return; 205 } 206 } else if (dev->is_server) { 207 dev->status &= (~VIRTIO_NET_S_LINK_UP); 208 if (virtio_user_server_reconnect(dev) >= 0) 209 dev->status |= VIRTIO_NET_S_LINK_UP; 210 } 211 212 *(uint16_t *)dst = dev->status; 213 } 214 215 if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs)) 216 *(uint16_t *)dst = dev->max_queue_pairs; 217 } 218 219 static void 220 virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset, 221 const void *src, int length) 222 { 223 int i; 224 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 225 226 if ((offset == offsetof(struct virtio_net_config, mac)) && 227 (length == RTE_ETHER_ADDR_LEN)) 228 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) 229 dev->mac_addr[i] = ((const uint8_t *)src)[i]; 230 else 231 PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d", 232 offset, length); 233 } 234 235 static void 236 virtio_user_reset(struct virtio_hw *hw) 237 { 238 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 239 240 if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 241 virtio_user_stop_device(dev); 242 } 243 244 static void 245 virtio_user_set_status(struct virtio_hw *hw, uint8_t status) 246 { 247 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 248 249 if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) 250 virtio_user_start_device(dev); 251 else if (status == VIRTIO_CONFIG_STATUS_RESET) 252 virtio_user_reset(hw); 253 dev->status = status; 254 } 255 256 static uint8_t 257 virtio_user_get_status(struct virtio_hw *hw) 258 { 259 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 260 261 return dev->status; 262 } 263 264 static uint64_t 265 virtio_user_get_features(struct virtio_hw *hw) 266 { 267 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 268 269 /* unmask feature bits defined in vhost user protocol */ 270 return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES; 271 } 272 273 static void 274 virtio_user_set_features(struct virtio_hw *hw, uint64_t features) 275 { 276 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 277 278 dev->features = features & dev->device_features; 279 } 280 281 static uint8_t 282 virtio_user_get_isr(struct virtio_hw *hw __rte_unused) 283 { 284 /* rxq interrupts and config interrupt are separated in virtio-user, 285 * here we only report config change. 286 */ 287 return VIRTIO_PCI_ISR_CONFIG; 288 } 289 290 static uint16_t 291 virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused, 292 uint16_t vec __rte_unused) 293 { 294 return 0; 295 } 296 297 static uint16_t 298 virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused, 299 struct virtqueue *vq __rte_unused, 300 uint16_t vec) 301 { 302 /* pretend we have done that */ 303 return vec; 304 } 305 306 /* This function is to get the queue size, aka, number of descs, of a specified 307 * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the 308 * max supported queues. 309 */ 310 static uint16_t 311 virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused) 312 { 313 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 314 315 /* Currently, each queue has same queue size */ 316 return dev->queue_size; 317 } 318 319 static void 320 virtio_user_setup_queue_packed(struct virtqueue *vq, 321 struct virtio_user_dev *dev) 322 { 323 uint16_t queue_idx = vq->vq_queue_index; 324 struct vring_packed *vring; 325 uint64_t desc_addr; 326 uint64_t avail_addr; 327 uint64_t used_addr; 328 uint16_t i; 329 330 vring = &dev->packed_vrings[queue_idx]; 331 desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 332 avail_addr = desc_addr + vq->vq_nentries * 333 sizeof(struct vring_packed_desc); 334 used_addr = RTE_ALIGN_CEIL(avail_addr + 335 sizeof(struct vring_packed_desc_event), 336 VIRTIO_PCI_VRING_ALIGN); 337 vring->num = vq->vq_nentries; 338 vring->desc = (void *)(uintptr_t)desc_addr; 339 vring->driver = (void *)(uintptr_t)avail_addr; 340 vring->device = (void *)(uintptr_t)used_addr; 341 dev->packed_queues[queue_idx].avail_wrap_counter = true; 342 dev->packed_queues[queue_idx].used_wrap_counter = true; 343 344 for (i = 0; i < vring->num; i++) 345 vring->desc[i].flags = 0; 346 } 347 348 static void 349 virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev) 350 { 351 uint16_t queue_idx = vq->vq_queue_index; 352 uint64_t desc_addr, avail_addr, used_addr; 353 354 desc_addr = (uintptr_t)vq->vq_ring_virt_mem; 355 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 356 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 357 ring[vq->vq_nentries]), 358 VIRTIO_PCI_VRING_ALIGN); 359 360 dev->vrings[queue_idx].num = vq->vq_nentries; 361 dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr; 362 dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr; 363 dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr; 364 } 365 366 static int 367 virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 368 { 369 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 370 371 if (vtpci_packed_queue(hw)) 372 virtio_user_setup_queue_packed(vq, dev); 373 else 374 virtio_user_setup_queue_split(vq, dev); 375 376 return 0; 377 } 378 379 static void 380 virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 381 { 382 /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU 383 * correspondingly stops the ioeventfds, and reset the status of 384 * the device. 385 * For modern devices, set queue desc, avail, used in PCI bar to 0, 386 * not see any more behavior in QEMU. 387 * 388 * Here we just care about what information to deliver to vhost-user 389 * or vhost-kernel. So we just close ioeventfd for now. 390 */ 391 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 392 393 close(dev->callfds[vq->vq_queue_index]); 394 close(dev->kickfds[vq->vq_queue_index]); 395 } 396 397 static void 398 virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 399 { 400 uint64_t buf = 1; 401 struct virtio_user_dev *dev = virtio_user_get_dev(hw); 402 403 if (hw->cvq && (hw->cvq->vq == vq)) { 404 if (vtpci_packed_queue(vq->hw)) 405 virtio_user_handle_cq_packed(dev, vq->vq_queue_index); 406 else 407 virtio_user_handle_cq(dev, vq->vq_queue_index); 408 return; 409 } 410 411 if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0) 412 PMD_DRV_LOG(ERR, "failed to kick backend: %s", 413 strerror(errno)); 414 } 415 416 const struct virtio_pci_ops virtio_user_ops = { 417 .read_dev_cfg = virtio_user_read_dev_config, 418 .write_dev_cfg = virtio_user_write_dev_config, 419 .get_status = virtio_user_get_status, 420 .set_status = virtio_user_set_status, 421 .get_features = virtio_user_get_features, 422 .set_features = virtio_user_set_features, 423 .get_isr = virtio_user_get_isr, 424 .set_config_irq = virtio_user_set_config_irq, 425 .set_queue_irq = virtio_user_set_queue_irq, 426 .get_queue_num = virtio_user_get_queue_num, 427 .setup_queue = virtio_user_setup_queue, 428 .del_queue = virtio_user_del_queue, 429 .notify_queue = virtio_user_notify_queue, 430 }; 431 432 static const char *valid_args[] = { 433 #define VIRTIO_USER_ARG_QUEUES_NUM "queues" 434 VIRTIO_USER_ARG_QUEUES_NUM, 435 #define VIRTIO_USER_ARG_CQ_NUM "cq" 436 VIRTIO_USER_ARG_CQ_NUM, 437 #define VIRTIO_USER_ARG_MAC "mac" 438 VIRTIO_USER_ARG_MAC, 439 #define VIRTIO_USER_ARG_PATH "path" 440 VIRTIO_USER_ARG_PATH, 441 #define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size" 442 VIRTIO_USER_ARG_QUEUE_SIZE, 443 #define VIRTIO_USER_ARG_INTERFACE_NAME "iface" 444 VIRTIO_USER_ARG_INTERFACE_NAME, 445 #define VIRTIO_USER_ARG_SERVER_MODE "server" 446 VIRTIO_USER_ARG_SERVER_MODE, 447 #define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf" 448 VIRTIO_USER_ARG_MRG_RXBUF, 449 #define VIRTIO_USER_ARG_IN_ORDER "in_order" 450 VIRTIO_USER_ARG_IN_ORDER, 451 #define VIRTIO_USER_ARG_PACKED_VQ "packed_vq" 452 VIRTIO_USER_ARG_PACKED_VQ, 453 #define VIRTIO_USER_ARG_SPEED "speed" 454 VIRTIO_USER_ARG_SPEED, 455 NULL 456 }; 457 458 #define VIRTIO_USER_DEF_CQ_EN 0 459 #define VIRTIO_USER_DEF_Q_NUM 1 460 #define VIRTIO_USER_DEF_Q_SZ 256 461 #define VIRTIO_USER_DEF_SERVER_MODE 0 462 463 static int 464 get_string_arg(const char *key __rte_unused, 465 const char *value, void *extra_args) 466 { 467 if (!value || !extra_args) 468 return -EINVAL; 469 470 *(char **)extra_args = strdup(value); 471 472 if (!*(char **)extra_args) 473 return -ENOMEM; 474 475 return 0; 476 } 477 478 static int 479 get_integer_arg(const char *key __rte_unused, 480 const char *value, void *extra_args) 481 { 482 uint64_t integer = 0; 483 if (!value || !extra_args) 484 return -EINVAL; 485 errno = 0; 486 integer = strtoull(value, NULL, 0); 487 /* extra_args keeps default value, it should be replaced 488 * only in case of successful parsing of the 'value' arg 489 */ 490 if (errno == 0) 491 *(uint64_t *)extra_args = integer; 492 return -errno; 493 } 494 495 static struct rte_eth_dev * 496 virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev) 497 { 498 struct rte_eth_dev *eth_dev; 499 struct rte_eth_dev_data *data; 500 struct virtio_hw *hw; 501 struct virtio_user_dev *dev; 502 503 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*hw)); 504 if (!eth_dev) { 505 PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev"); 506 return NULL; 507 } 508 509 data = eth_dev->data; 510 hw = eth_dev->data->dev_private; 511 512 dev = rte_zmalloc(NULL, sizeof(*dev), 0); 513 if (!dev) { 514 PMD_INIT_LOG(ERR, "malloc virtio_user_dev failed"); 515 rte_eth_dev_release_port(eth_dev); 516 return NULL; 517 } 518 519 hw->port_id = data->port_id; 520 dev->port_id = data->port_id; 521 virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops; 522 /* 523 * MSIX is required to enable LSC (see virtio_init_device). 524 * Here just pretend that we support msix. 525 */ 526 hw->use_msix = 1; 527 hw->modern = 0; 528 hw->use_simple_rx = 0; 529 hw->use_inorder_rx = 0; 530 hw->use_inorder_tx = 0; 531 hw->virtio_user_dev = dev; 532 return eth_dev; 533 } 534 535 static void 536 virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev) 537 { 538 struct rte_eth_dev_data *data = eth_dev->data; 539 struct virtio_hw *hw = data->dev_private; 540 541 rte_free(hw->virtio_user_dev); 542 rte_eth_dev_release_port(eth_dev); 543 } 544 545 /* Dev initialization routine. Invoked once for each virtio vdev at 546 * EAL init time, see rte_bus_probe(). 547 * Returns 0 on success. 548 */ 549 static int 550 virtio_user_pmd_probe(struct rte_vdev_device *dev) 551 { 552 struct rte_kvargs *kvlist = NULL; 553 struct rte_eth_dev *eth_dev; 554 struct virtio_hw *hw; 555 uint64_t queues = VIRTIO_USER_DEF_Q_NUM; 556 uint64_t cq = VIRTIO_USER_DEF_CQ_EN; 557 uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ; 558 uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE; 559 uint64_t mrg_rxbuf = 1; 560 uint64_t in_order = 1; 561 uint64_t packed_vq = 0; 562 char *path = NULL; 563 char *ifname = NULL; 564 char *mac_addr = NULL; 565 int ret = -1; 566 567 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 568 const char *name = rte_vdev_device_name(dev); 569 eth_dev = rte_eth_dev_attach_secondary(name); 570 if (!eth_dev) { 571 PMD_INIT_LOG(ERR, "Failed to probe %s", name); 572 return -1; 573 } 574 575 if (eth_virtio_dev_init(eth_dev) < 0) { 576 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 577 rte_eth_dev_release_port(eth_dev); 578 return -1; 579 } 580 581 eth_dev->dev_ops = &virtio_user_secondary_eth_dev_ops; 582 eth_dev->device = &dev->device; 583 rte_eth_dev_probing_finish(eth_dev); 584 return 0; 585 } 586 587 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_args); 588 if (!kvlist) { 589 PMD_INIT_LOG(ERR, "error when parsing param"); 590 goto end; 591 } 592 593 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) { 594 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH, 595 &get_string_arg, &path) < 0) { 596 PMD_INIT_LOG(ERR, "error to parse %s", 597 VIRTIO_USER_ARG_PATH); 598 goto end; 599 } 600 } else { 601 PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user", 602 VIRTIO_USER_ARG_PATH); 603 goto end; 604 } 605 606 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) { 607 if (is_vhost_user_by_type(path)) { 608 PMD_INIT_LOG(ERR, 609 "arg %s applies only to vhost-kernel backend", 610 VIRTIO_USER_ARG_INTERFACE_NAME); 611 goto end; 612 } 613 614 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME, 615 &get_string_arg, &ifname) < 0) { 616 PMD_INIT_LOG(ERR, "error to parse %s", 617 VIRTIO_USER_ARG_INTERFACE_NAME); 618 goto end; 619 } 620 } 621 622 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) { 623 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC, 624 &get_string_arg, &mac_addr) < 0) { 625 PMD_INIT_LOG(ERR, "error to parse %s", 626 VIRTIO_USER_ARG_MAC); 627 goto end; 628 } 629 } 630 631 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) { 632 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE, 633 &get_integer_arg, &queue_size) < 0) { 634 PMD_INIT_LOG(ERR, "error to parse %s", 635 VIRTIO_USER_ARG_QUEUE_SIZE); 636 goto end; 637 } 638 } 639 640 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) { 641 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM, 642 &get_integer_arg, &queues) < 0) { 643 PMD_INIT_LOG(ERR, "error to parse %s", 644 VIRTIO_USER_ARG_QUEUES_NUM); 645 goto end; 646 } 647 } 648 649 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) { 650 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE, 651 &get_integer_arg, &server_mode) < 0) { 652 PMD_INIT_LOG(ERR, "error to parse %s", 653 VIRTIO_USER_ARG_SERVER_MODE); 654 goto end; 655 } 656 } 657 658 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) { 659 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM, 660 &get_integer_arg, &cq) < 0) { 661 PMD_INIT_LOG(ERR, "error to parse %s", 662 VIRTIO_USER_ARG_CQ_NUM); 663 goto end; 664 } 665 } else if (queues > 1) { 666 cq = 1; 667 } 668 669 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) { 670 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ, 671 &get_integer_arg, &packed_vq) < 0) { 672 PMD_INIT_LOG(ERR, "error to parse %s", 673 VIRTIO_USER_ARG_PACKED_VQ); 674 goto end; 675 } 676 } 677 678 if (queues > 1 && cq == 0) { 679 PMD_INIT_LOG(ERR, "multi-q requires ctrl-q"); 680 goto end; 681 } 682 683 if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) { 684 PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u", 685 VIRTIO_USER_ARG_QUEUES_NUM, queues, 686 VIRTIO_MAX_VIRTQUEUE_PAIRS); 687 goto end; 688 } 689 690 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) { 691 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF, 692 &get_integer_arg, &mrg_rxbuf) < 0) { 693 PMD_INIT_LOG(ERR, "error to parse %s", 694 VIRTIO_USER_ARG_MRG_RXBUF); 695 goto end; 696 } 697 } 698 699 if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) { 700 if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER, 701 &get_integer_arg, &in_order) < 0) { 702 PMD_INIT_LOG(ERR, "error to parse %s", 703 VIRTIO_USER_ARG_IN_ORDER); 704 goto end; 705 } 706 } 707 708 eth_dev = virtio_user_eth_dev_alloc(dev); 709 if (!eth_dev) { 710 PMD_INIT_LOG(ERR, "virtio_user fails to alloc device"); 711 goto end; 712 } 713 714 hw = eth_dev->data->dev_private; 715 if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq, 716 queue_size, mac_addr, &ifname, server_mode, 717 mrg_rxbuf, in_order, packed_vq) < 0) { 718 PMD_INIT_LOG(ERR, "virtio_user_dev_init fails"); 719 virtio_user_eth_dev_free(eth_dev); 720 goto end; 721 } 722 723 /* previously called by rte_pci_probe() for physical dev */ 724 if (eth_virtio_dev_init(eth_dev) < 0) { 725 PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); 726 virtio_user_eth_dev_free(eth_dev); 727 goto end; 728 } 729 730 rte_eth_dev_probing_finish(eth_dev); 731 ret = 0; 732 733 end: 734 if (kvlist) 735 rte_kvargs_free(kvlist); 736 if (path) 737 free(path); 738 if (mac_addr) 739 free(mac_addr); 740 if (ifname) 741 free(ifname); 742 return ret; 743 } 744 745 static int 746 virtio_user_pmd_remove(struct rte_vdev_device *vdev) 747 { 748 const char *name; 749 struct rte_eth_dev *eth_dev; 750 751 if (!vdev) 752 return -EINVAL; 753 754 name = rte_vdev_device_name(vdev); 755 PMD_DRV_LOG(INFO, "Un-Initializing %s", name); 756 eth_dev = rte_eth_dev_allocated(name); 757 /* Port has already been released by close. */ 758 if (!eth_dev) 759 return 0; 760 761 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 762 return rte_eth_dev_release_port(eth_dev); 763 764 /* make sure the device is stopped, queues freed */ 765 rte_eth_dev_close(eth_dev->data->port_id); 766 767 return 0; 768 } 769 770 static struct rte_vdev_driver virtio_user_driver = { 771 .probe = virtio_user_pmd_probe, 772 .remove = virtio_user_pmd_remove, 773 }; 774 775 RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver); 776 RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user); 777 RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user, 778 "path=<path> " 779 "mac=<mac addr> " 780 "cq=<int> " 781 "queue_size=<int> " 782 "queues=<int> " 783 "iface=<string> " 784 "server=<0|1> " 785 "mrg_rxbuf=<0|1> " 786 "in_order=<0|1> " 787 "packed_vq=<0|1> " 788 "speed=<int>"); 789