1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <fcntl.h> 9 #include <string.h> 10 #include <errno.h> 11 #include <sys/mman.h> 12 #include <unistd.h> 13 #include <sys/eventfd.h> 14 #include <sys/types.h> 15 #include <sys/stat.h> 16 17 #include <rte_alarm.h> 18 #include <rte_string_fns.h> 19 #include <rte_eal_memconfig.h> 20 #include <rte_malloc.h> 21 #include <rte_io.h> 22 23 #include "vhost.h" 24 #include "virtio_user_dev.h" 25 #include "../virtio_ethdev.h" 26 27 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb" 28 29 const char * const virtio_user_backend_strings[] = { 30 [VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN", 31 [VIRTIO_USER_BACKEND_VHOST_USER] = "VHOST_USER", 32 [VIRTIO_USER_BACKEND_VHOST_KERNEL] = "VHOST_NET", 33 [VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA", 34 }; 35 36 static int 37 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel) 38 { 39 /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come 40 * firstly because vhost depends on this msg to allocate virtqueue 41 * pair. 42 */ 43 struct vhost_vring_file file; 44 int ret; 45 46 file.index = queue_sel; 47 file.fd = dev->callfds[queue_sel]; 48 ret = dev->ops->set_vring_call(dev, &file); 49 if (ret < 0) { 50 PMD_INIT_LOG(ERR, "(%s) Failed to create queue %u", dev->path, queue_sel); 51 return -1; 52 } 53 54 return 0; 55 } 56 57 static int 58 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel) 59 { 60 int ret; 61 struct vhost_vring_file file; 62 struct vhost_vring_state state; 63 struct vring *vring = &dev->vrings.split[queue_sel]; 64 struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel]; 65 struct vhost_vring_addr addr = { 66 .index = queue_sel, 67 .log_guest_addr = 0, 68 .flags = 0, /* disable log */ 69 }; 70 71 if (queue_sel == dev->max_queue_pairs * 2) { 72 if (!dev->scvq) { 73 PMD_INIT_LOG(ERR, "(%s) Shadow control queue expected but missing", 74 dev->path); 75 goto err; 76 } 77 78 /* Use shadow control queue information */ 79 vring = &dev->scvq->vq_split.ring; 80 pq_vring = &dev->scvq->vq_packed.ring; 81 } 82 83 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) { 84 addr.desc_user_addr = 85 (uint64_t)(uintptr_t)pq_vring->desc; 86 addr.avail_user_addr = 87 (uint64_t)(uintptr_t)pq_vring->driver; 88 addr.used_user_addr = 89 (uint64_t)(uintptr_t)pq_vring->device; 90 } else { 91 addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc; 92 addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail; 93 addr.used_user_addr = (uint64_t)(uintptr_t)vring->used; 94 } 95 96 state.index = queue_sel; 97 state.num = vring->num; 98 ret = dev->ops->set_vring_num(dev, &state); 99 if (ret < 0) 100 goto err; 101 102 state.index = queue_sel; 103 state.num = 0; /* no reservation */ 104 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) 105 state.num |= (1 << 15); 106 ret = dev->ops->set_vring_base(dev, &state); 107 if (ret < 0) 108 goto err; 109 110 ret = dev->ops->set_vring_addr(dev, &addr); 111 if (ret < 0) 112 goto err; 113 114 /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes 115 * lastly because vhost depends on this msg to judge if 116 * virtio is ready. 117 */ 118 file.index = queue_sel; 119 file.fd = dev->kickfds[queue_sel]; 120 ret = dev->ops->set_vring_kick(dev, &file); 121 if (ret < 0) 122 goto err; 123 124 return 0; 125 err: 126 PMD_INIT_LOG(ERR, "(%s) Failed to kick queue %u", dev->path, queue_sel); 127 128 return -1; 129 } 130 131 static int 132 virtio_user_queue_setup(struct virtio_user_dev *dev, 133 int (*fn)(struct virtio_user_dev *, uint32_t)) 134 { 135 uint32_t i, nr_vq; 136 137 nr_vq = dev->max_queue_pairs * 2; 138 if (dev->hw_cvq) 139 nr_vq++; 140 141 for (i = 0; i < nr_vq; i++) { 142 if (fn(dev, i) < 0) { 143 PMD_DRV_LOG(ERR, "(%s) setup VQ %u failed", dev->path, i); 144 return -1; 145 } 146 } 147 148 return 0; 149 } 150 151 int 152 virtio_user_dev_set_features(struct virtio_user_dev *dev) 153 { 154 uint64_t features; 155 int ret = -1; 156 157 pthread_mutex_lock(&dev->mutex); 158 159 /* Step 0: tell vhost to create queues */ 160 if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0) 161 goto error; 162 163 features = dev->features; 164 165 /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */ 166 features &= ~(1ull << VIRTIO_NET_F_MAC); 167 /* Strip VIRTIO_NET_F_CTRL_VQ if the devices does not really support control VQ */ 168 if (!dev->hw_cvq) 169 features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ); 170 features &= ~(1ull << VIRTIO_NET_F_STATUS); 171 ret = dev->ops->set_features(dev, features); 172 if (ret < 0) 173 goto error; 174 PMD_DRV_LOG(INFO, "(%s) set features: 0x%" PRIx64, dev->path, features); 175 error: 176 pthread_mutex_unlock(&dev->mutex); 177 178 return ret; 179 } 180 181 int 182 virtio_user_start_device(struct virtio_user_dev *dev) 183 { 184 int ret; 185 186 /* 187 * XXX workaround! 188 * 189 * We need to make sure that the locks will be 190 * taken in the correct order to avoid deadlocks. 191 * 192 * Before releasing this lock, this thread should 193 * not trigger any memory hotplug events. 194 * 195 * This is a temporary workaround, and should be 196 * replaced when we get proper supports from the 197 * memory subsystem in the future. 198 */ 199 rte_mcfg_mem_read_lock(); 200 pthread_mutex_lock(&dev->mutex); 201 202 /* Step 2: share memory regions */ 203 ret = dev->ops->set_memory_table(dev); 204 if (ret < 0) 205 goto error; 206 207 /* Step 3: kick queues */ 208 ret = virtio_user_queue_setup(dev, virtio_user_kick_queue); 209 if (ret < 0) 210 goto error; 211 212 /* Step 4: enable queues 213 * we enable the 1st queue pair by default. 214 */ 215 ret = dev->ops->enable_qp(dev, 0, 1); 216 if (ret < 0) 217 goto error; 218 219 dev->started = true; 220 221 pthread_mutex_unlock(&dev->mutex); 222 rte_mcfg_mem_read_unlock(); 223 224 return 0; 225 error: 226 pthread_mutex_unlock(&dev->mutex); 227 rte_mcfg_mem_read_unlock(); 228 229 PMD_INIT_LOG(ERR, "(%s) Failed to start device", dev->path); 230 231 /* TODO: free resource here or caller to check */ 232 return -1; 233 } 234 235 int virtio_user_stop_device(struct virtio_user_dev *dev) 236 { 237 struct vhost_vring_state state; 238 uint32_t i; 239 int ret; 240 241 pthread_mutex_lock(&dev->mutex); 242 if (!dev->started) 243 goto out; 244 245 for (i = 0; i < dev->max_queue_pairs; ++i) { 246 ret = dev->ops->enable_qp(dev, i, 0); 247 if (ret < 0) 248 goto err; 249 } 250 251 /* Stop the backend. */ 252 for (i = 0; i < dev->max_queue_pairs * 2; ++i) { 253 state.index = i; 254 ret = dev->ops->get_vring_base(dev, &state); 255 if (ret < 0) { 256 PMD_DRV_LOG(ERR, "(%s) get_vring_base failed, index=%u", dev->path, i); 257 goto err; 258 } 259 } 260 261 dev->started = false; 262 263 out: 264 pthread_mutex_unlock(&dev->mutex); 265 266 return 0; 267 err: 268 pthread_mutex_unlock(&dev->mutex); 269 270 PMD_INIT_LOG(ERR, "(%s) Failed to stop device", dev->path); 271 272 return -1; 273 } 274 275 static int 276 virtio_user_dev_init_max_queue_pairs(struct virtio_user_dev *dev, uint32_t user_max_qp) 277 { 278 int ret; 279 280 if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MQ))) { 281 dev->max_queue_pairs = 1; 282 return 0; 283 } 284 285 if (!dev->ops->get_config) { 286 dev->max_queue_pairs = user_max_qp; 287 return 0; 288 } 289 290 ret = dev->ops->get_config(dev, (uint8_t *)&dev->max_queue_pairs, 291 offsetof(struct virtio_net_config, max_virtqueue_pairs), 292 sizeof(uint16_t)); 293 if (ret) { 294 /* 295 * We need to know the max queue pair from the device so that 296 * the control queue gets the right index. 297 */ 298 dev->max_queue_pairs = 1; 299 PMD_DRV_LOG(ERR, "(%s) Failed to get max queue pairs from device", dev->path); 300 301 return ret; 302 } 303 304 return 0; 305 } 306 307 int 308 virtio_user_dev_get_rss_config(struct virtio_user_dev *dev, void *dst, size_t offset, int length) 309 { 310 int ret = 0; 311 312 if (!(dev->device_features & (1ULL << VIRTIO_NET_F_RSS))) 313 return -ENOTSUP; 314 315 if (!dev->ops->get_config) 316 return -ENOTSUP; 317 318 ret = dev->ops->get_config(dev, dst, offset, length); 319 if (ret) 320 PMD_DRV_LOG(ERR, "(%s) Failed to get rss config in device", dev->path); 321 322 return ret; 323 } 324 325 int 326 virtio_user_dev_set_mac(struct virtio_user_dev *dev) 327 { 328 int ret = 0; 329 330 if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MAC))) 331 return -ENOTSUP; 332 333 if (!dev->ops->set_config) 334 return -ENOTSUP; 335 336 ret = dev->ops->set_config(dev, dev->mac_addr, 337 offsetof(struct virtio_net_config, mac), 338 RTE_ETHER_ADDR_LEN); 339 if (ret) 340 PMD_DRV_LOG(ERR, "(%s) Failed to set MAC address in device", dev->path); 341 342 return ret; 343 } 344 345 int 346 virtio_user_dev_get_mac(struct virtio_user_dev *dev) 347 { 348 int ret = 0; 349 350 if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MAC))) 351 return -ENOTSUP; 352 353 if (!dev->ops->get_config) 354 return -ENOTSUP; 355 356 ret = dev->ops->get_config(dev, dev->mac_addr, 357 offsetof(struct virtio_net_config, mac), 358 RTE_ETHER_ADDR_LEN); 359 if (ret) 360 PMD_DRV_LOG(ERR, "(%s) Failed to get MAC address from device", dev->path); 361 362 return ret; 363 } 364 365 static void 366 virtio_user_dev_init_mac(struct virtio_user_dev *dev, const char *mac) 367 { 368 struct rte_ether_addr cmdline_mac; 369 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 370 int ret; 371 372 if (mac && rte_ether_unformat_addr(mac, &cmdline_mac) == 0) { 373 /* 374 * MAC address was passed from command-line, try to store 375 * it in the device if it supports it. Otherwise try to use 376 * the device one. 377 */ 378 memcpy(dev->mac_addr, &cmdline_mac, RTE_ETHER_ADDR_LEN); 379 dev->mac_specified = 1; 380 381 /* Setting MAC may fail, continue to get the device one in this case */ 382 virtio_user_dev_set_mac(dev); 383 ret = virtio_user_dev_get_mac(dev); 384 if (ret == -ENOTSUP) 385 goto out; 386 387 if (memcmp(&cmdline_mac, dev->mac_addr, RTE_ETHER_ADDR_LEN)) 388 PMD_DRV_LOG(INFO, "(%s) Device MAC update failed", dev->path); 389 } else { 390 ret = virtio_user_dev_get_mac(dev); 391 if (ret) { 392 PMD_DRV_LOG(ERR, "(%s) No valid MAC in devargs or device, use random", 393 dev->path); 394 return; 395 } 396 397 dev->mac_specified = 1; 398 } 399 out: 400 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, 401 (struct rte_ether_addr *)dev->mac_addr); 402 PMD_DRV_LOG(INFO, "(%s) MAC %s specified", dev->path, buf); 403 } 404 405 static int 406 virtio_user_dev_init_notify(struct virtio_user_dev *dev) 407 { 408 uint32_t i, j, nr_vq; 409 int callfd; 410 int kickfd; 411 412 nr_vq = dev->max_queue_pairs * 2; 413 if (dev->hw_cvq) 414 nr_vq++; 415 416 for (i = 0; i < nr_vq; i++) { 417 /* May use invalid flag, but some backend uses kickfd and 418 * callfd as criteria to judge if dev is alive. so finally we 419 * use real event_fd. 420 */ 421 callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); 422 if (callfd < 0) { 423 PMD_DRV_LOG(ERR, "(%s) callfd error, %s", dev->path, strerror(errno)); 424 goto err; 425 } 426 kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); 427 if (kickfd < 0) { 428 close(callfd); 429 PMD_DRV_LOG(ERR, "(%s) kickfd error, %s", dev->path, strerror(errno)); 430 goto err; 431 } 432 dev->callfds[i] = callfd; 433 dev->kickfds[i] = kickfd; 434 } 435 436 if (dev->ops->map_notification_area) 437 if (dev->ops->map_notification_area(dev)) 438 goto err; 439 440 return 0; 441 err: 442 for (j = 0; j < i; j++) { 443 if (dev->kickfds[j] >= 0) { 444 close(dev->kickfds[j]); 445 dev->kickfds[j] = -1; 446 } 447 if (dev->callfds[j] >= 0) { 448 close(dev->callfds[j]); 449 dev->callfds[j] = -1; 450 } 451 } 452 453 return -1; 454 } 455 456 static void 457 virtio_user_dev_uninit_notify(struct virtio_user_dev *dev) 458 { 459 uint32_t i; 460 461 for (i = 0; i < dev->max_queue_pairs * 2; ++i) { 462 if (dev->kickfds[i] >= 0) { 463 close(dev->kickfds[i]); 464 dev->kickfds[i] = -1; 465 } 466 if (dev->callfds[i] >= 0) { 467 close(dev->callfds[i]); 468 dev->callfds[i] = -1; 469 } 470 } 471 if (dev->ops->unmap_notification_area && dev->notify_area) 472 dev->ops->unmap_notification_area(dev); 473 } 474 475 static int 476 virtio_user_fill_intr_handle(struct virtio_user_dev *dev) 477 { 478 uint32_t i; 479 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id]; 480 481 if (eth_dev->intr_handle == NULL) { 482 eth_dev->intr_handle = 483 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE); 484 if (eth_dev->intr_handle == NULL) { 485 PMD_DRV_LOG(ERR, "(%s) failed to allocate intr_handle", dev->path); 486 return -1; 487 } 488 } 489 490 for (i = 0; i < dev->max_queue_pairs; ++i) { 491 if (rte_intr_efds_index_set(eth_dev->intr_handle, i, 492 dev->callfds[2 * i + VTNET_SQ_RQ_QUEUE_IDX])) 493 return -rte_errno; 494 } 495 496 if (rte_intr_nb_efd_set(eth_dev->intr_handle, dev->max_queue_pairs)) 497 return -rte_errno; 498 499 if (rte_intr_max_intr_set(eth_dev->intr_handle, 500 dev->max_queue_pairs + 1)) 501 return -rte_errno; 502 503 if (rte_intr_type_set(eth_dev->intr_handle, RTE_INTR_HANDLE_VDEV)) 504 return -rte_errno; 505 506 /* For virtio vdev, no need to read counter for clean */ 507 if (rte_intr_efd_counter_size_set(eth_dev->intr_handle, 0)) 508 return -rte_errno; 509 510 if (rte_intr_fd_set(eth_dev->intr_handle, dev->ops->get_intr_fd(dev))) 511 return -rte_errno; 512 513 return 0; 514 } 515 516 static void 517 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused, 518 const void *addr, 519 size_t len __rte_unused, 520 void *arg) 521 { 522 struct virtio_user_dev *dev = arg; 523 struct rte_memseg_list *msl; 524 uint16_t i; 525 int ret = 0; 526 527 /* ignore externally allocated memory */ 528 msl = rte_mem_virt2memseg_list(addr); 529 if (msl->external) 530 return; 531 532 pthread_mutex_lock(&dev->mutex); 533 534 if (dev->started == false) 535 goto exit; 536 537 /* Step 1: pause the active queues */ 538 for (i = 0; i < dev->queue_pairs; i++) { 539 ret = dev->ops->enable_qp(dev, i, 0); 540 if (ret < 0) 541 goto exit; 542 } 543 544 /* Step 2: update memory regions */ 545 ret = dev->ops->set_memory_table(dev); 546 if (ret < 0) 547 goto exit; 548 549 /* Step 3: resume the active queues */ 550 for (i = 0; i < dev->queue_pairs; i++) { 551 ret = dev->ops->enable_qp(dev, i, 1); 552 if (ret < 0) 553 goto exit; 554 } 555 556 exit: 557 pthread_mutex_unlock(&dev->mutex); 558 559 if (ret < 0) 560 PMD_DRV_LOG(ERR, "(%s) Failed to update memory table", dev->path); 561 } 562 563 static int 564 virtio_user_dev_setup(struct virtio_user_dev *dev) 565 { 566 if (dev->is_server) { 567 if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) { 568 PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!"); 569 return -1; 570 } 571 } 572 573 switch (dev->backend_type) { 574 case VIRTIO_USER_BACKEND_VHOST_USER: 575 dev->ops = &virtio_ops_user; 576 break; 577 case VIRTIO_USER_BACKEND_VHOST_KERNEL: 578 dev->ops = &virtio_ops_kernel; 579 break; 580 case VIRTIO_USER_BACKEND_VHOST_VDPA: 581 dev->ops = &virtio_ops_vdpa; 582 break; 583 default: 584 PMD_DRV_LOG(ERR, "(%s) Unknown backend type", dev->path); 585 return -1; 586 } 587 588 if (dev->ops->setup(dev) < 0) { 589 PMD_INIT_LOG(ERR, "(%s) Failed to setup backend", dev->path); 590 return -1; 591 } 592 593 return 0; 594 } 595 596 static int 597 virtio_user_alloc_vrings(struct virtio_user_dev *dev) 598 { 599 int i, size, nr_vrings; 600 bool packed_ring = !!(dev->device_features & (1ull << VIRTIO_F_RING_PACKED)); 601 602 nr_vrings = dev->max_queue_pairs * 2; 603 if (dev->device_features & (1ull << VIRTIO_NET_F_MQ)) 604 nr_vrings++; 605 606 dev->callfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->callfds), 0); 607 if (!dev->callfds) { 608 PMD_INIT_LOG(ERR, "(%s) Failed to alloc callfds", dev->path); 609 return -1; 610 } 611 612 dev->kickfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->kickfds), 0); 613 if (!dev->kickfds) { 614 PMD_INIT_LOG(ERR, "(%s) Failed to alloc kickfds", dev->path); 615 goto free_callfds; 616 } 617 618 for (i = 0; i < nr_vrings; i++) { 619 dev->callfds[i] = -1; 620 dev->kickfds[i] = -1; 621 } 622 623 if (packed_ring) 624 size = sizeof(*dev->vrings.packed); 625 else 626 size = sizeof(*dev->vrings.split); 627 dev->vrings.ptr = rte_zmalloc("virtio_user_dev", nr_vrings * size, 0); 628 if (!dev->vrings.ptr) { 629 PMD_INIT_LOG(ERR, "(%s) Failed to alloc vrings metadata", dev->path); 630 goto free_kickfds; 631 } 632 633 if (packed_ring) { 634 dev->packed_queues = rte_zmalloc("virtio_user_dev", 635 nr_vrings * sizeof(*dev->packed_queues), 0); 636 if (!dev->packed_queues) { 637 PMD_INIT_LOG(ERR, "(%s) Failed to alloc packed queues metadata", 638 dev->path); 639 goto free_vrings; 640 } 641 } 642 643 dev->qp_enabled = rte_zmalloc("virtio_user_dev", 644 dev->max_queue_pairs * sizeof(*dev->qp_enabled), 0); 645 if (!dev->qp_enabled) { 646 PMD_INIT_LOG(ERR, "(%s) Failed to alloc QP enable states", dev->path); 647 goto free_packed_queues; 648 } 649 650 return 0; 651 652 free_packed_queues: 653 rte_free(dev->packed_queues); 654 dev->packed_queues = NULL; 655 free_vrings: 656 rte_free(dev->vrings.ptr); 657 dev->vrings.ptr = NULL; 658 free_kickfds: 659 rte_free(dev->kickfds); 660 dev->kickfds = NULL; 661 free_callfds: 662 rte_free(dev->callfds); 663 dev->callfds = NULL; 664 665 return -1; 666 } 667 668 static void 669 virtio_user_free_vrings(struct virtio_user_dev *dev) 670 { 671 rte_free(dev->qp_enabled); 672 dev->qp_enabled = NULL; 673 rte_free(dev->packed_queues); 674 dev->packed_queues = NULL; 675 rte_free(dev->vrings.ptr); 676 dev->vrings.ptr = NULL; 677 rte_free(dev->kickfds); 678 dev->kickfds = NULL; 679 rte_free(dev->callfds); 680 dev->callfds = NULL; 681 } 682 683 /* Use below macro to filter features from vhost backend */ 684 #define VIRTIO_USER_SUPPORTED_FEATURES \ 685 (1ULL << VIRTIO_NET_F_MAC | \ 686 1ULL << VIRTIO_NET_F_STATUS | \ 687 1ULL << VIRTIO_NET_F_MQ | \ 688 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \ 689 1ULL << VIRTIO_NET_F_CTRL_VQ | \ 690 1ULL << VIRTIO_NET_F_CTRL_RX | \ 691 1ULL << VIRTIO_NET_F_CTRL_VLAN | \ 692 1ULL << VIRTIO_NET_F_CSUM | \ 693 1ULL << VIRTIO_NET_F_HOST_TSO4 | \ 694 1ULL << VIRTIO_NET_F_HOST_TSO6 | \ 695 1ULL << VIRTIO_NET_F_MRG_RXBUF | \ 696 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \ 697 1ULL << VIRTIO_NET_F_GUEST_CSUM | \ 698 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \ 699 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \ 700 1ULL << VIRTIO_F_IN_ORDER | \ 701 1ULL << VIRTIO_F_VERSION_1 | \ 702 1ULL << VIRTIO_F_RING_PACKED | \ 703 1ULL << VIRTIO_F_NOTIFICATION_DATA | \ 704 1ULL << VIRTIO_NET_F_RSS) 705 706 int 707 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues, 708 int cq, int queue_size, const char *mac, char **ifname, 709 int server, int mrg_rxbuf, int in_order, int packed_vq, 710 enum virtio_user_backend_type backend_type) 711 { 712 uint64_t backend_features; 713 714 pthread_mutex_init(&dev->mutex, NULL); 715 strlcpy(dev->path, path, PATH_MAX); 716 717 dev->started = 0; 718 dev->queue_pairs = 1; /* mq disabled by default */ 719 dev->max_queue_pairs = queues; /* initialize to user requested value for kernel backend */ 720 dev->queue_size = queue_size; 721 dev->is_server = server; 722 dev->mac_specified = 0; 723 dev->frontend_features = 0; 724 dev->unsupported_features = 0; 725 dev->backend_type = backend_type; 726 dev->ifname = *ifname; 727 728 if (virtio_user_dev_setup(dev) < 0) { 729 PMD_INIT_LOG(ERR, "(%s) backend set up fails", dev->path); 730 return -1; 731 } 732 733 if (dev->ops->set_owner(dev) < 0) { 734 PMD_INIT_LOG(ERR, "(%s) Failed to set backend owner", dev->path); 735 goto destroy; 736 } 737 738 if (dev->ops->get_backend_features(&backend_features) < 0) { 739 PMD_INIT_LOG(ERR, "(%s) Failed to get backend features", dev->path); 740 goto destroy; 741 } 742 743 dev->unsupported_features = ~(VIRTIO_USER_SUPPORTED_FEATURES | backend_features); 744 745 if (dev->ops->get_features(dev, &dev->device_features) < 0) { 746 PMD_INIT_LOG(ERR, "(%s) Failed to get device features", dev->path); 747 goto destroy; 748 } 749 750 virtio_user_dev_init_mac(dev, mac); 751 752 if (virtio_user_dev_init_max_queue_pairs(dev, queues)) 753 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ); 754 755 if (dev->max_queue_pairs > 1) 756 cq = 1; 757 758 if (!mrg_rxbuf) 759 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF); 760 761 if (!in_order) 762 dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER); 763 764 if (!packed_vq) 765 dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED); 766 767 if (dev->mac_specified) 768 dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC); 769 else 770 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC); 771 772 if (cq) { 773 /* device does not really need to know anything about CQ, 774 * so if necessary, we just claim to support CQ 775 */ 776 dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); 777 } else { 778 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); 779 /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */ 780 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX); 781 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN); 782 dev->unsupported_features |= 783 (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE); 784 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ); 785 dev->unsupported_features |= 786 (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR); 787 } 788 789 /* The backend will not report this feature, we add it explicitly */ 790 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) 791 dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS); 792 793 dev->frontend_features &= ~dev->unsupported_features; 794 dev->device_features &= ~dev->unsupported_features; 795 796 if (virtio_user_alloc_vrings(dev) < 0) { 797 PMD_INIT_LOG(ERR, "(%s) Failed to allocate vring metadata", dev->path); 798 goto destroy; 799 } 800 801 if (virtio_user_dev_init_notify(dev) < 0) { 802 PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers", dev->path); 803 goto free_vrings; 804 } 805 806 if (virtio_user_fill_intr_handle(dev) < 0) { 807 PMD_INIT_LOG(ERR, "(%s) Failed to init interrupt handler", dev->path); 808 goto notify_uninit; 809 } 810 811 if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME, 812 virtio_user_mem_event_cb, dev)) { 813 if (rte_errno != ENOTSUP) { 814 PMD_INIT_LOG(ERR, "(%s) Failed to register mem event callback", 815 dev->path); 816 goto notify_uninit; 817 } 818 } 819 820 *ifname = NULL; 821 return 0; 822 823 notify_uninit: 824 virtio_user_dev_uninit_notify(dev); 825 free_vrings: 826 virtio_user_free_vrings(dev); 827 destroy: 828 dev->ops->destroy(dev); 829 830 return -1; 831 } 832 833 void 834 virtio_user_dev_uninit(struct virtio_user_dev *dev) 835 { 836 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id]; 837 838 rte_intr_instance_free(eth_dev->intr_handle); 839 eth_dev->intr_handle = NULL; 840 841 virtio_user_stop_device(dev); 842 843 rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev); 844 845 virtio_user_dev_uninit_notify(dev); 846 847 virtio_user_free_vrings(dev); 848 849 free(dev->ifname); 850 851 if (dev->is_server) 852 unlink(dev->path); 853 854 dev->ops->destroy(dev); 855 } 856 857 static uint8_t 858 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs) 859 { 860 uint16_t i; 861 uint8_t ret = 0; 862 863 if (q_pairs > dev->max_queue_pairs) { 864 PMD_INIT_LOG(ERR, "(%s) multi-q config %u, but only %u supported", 865 dev->path, q_pairs, dev->max_queue_pairs); 866 return -1; 867 } 868 869 for (i = 0; i < q_pairs; ++i) 870 ret |= dev->ops->enable_qp(dev, i, 1); 871 for (i = q_pairs; i < dev->max_queue_pairs; ++i) 872 ret |= dev->ops->enable_qp(dev, i, 0); 873 874 if (dev->scvq) 875 ret |= dev->ops->cvq_enable(dev, 1); 876 877 dev->queue_pairs = q_pairs; 878 879 return ret; 880 } 881 882 #define CVQ_MAX_DATA_DESCS 32 883 884 static uint32_t 885 virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vring, 886 uint16_t idx_hdr) 887 { 888 struct virtio_net_ctrl_hdr *hdr; 889 virtio_net_ctrl_ack status = ~0; 890 uint16_t i, idx_data, idx_status; 891 uint32_t n_descs = 0; 892 int dlen[CVQ_MAX_DATA_DESCS], nb_dlen = 0; 893 894 /* locate desc for header, data, and status */ 895 idx_data = vring->desc[idx_hdr].next; 896 n_descs++; 897 898 i = idx_data; 899 while (vring->desc[i].flags == VRING_DESC_F_NEXT) { 900 dlen[nb_dlen++] = vring->desc[i].len; 901 i = vring->desc[i].next; 902 n_descs++; 903 } 904 905 /* locate desc for status */ 906 idx_status = i; 907 n_descs++; 908 909 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; 910 if (hdr->class == VIRTIO_NET_CTRL_MQ && 911 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 912 uint16_t queues; 913 914 queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr; 915 status = virtio_user_handle_mq(dev, queues); 916 } else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) { 917 struct virtio_net_ctrl_rss *rss; 918 919 rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr; 920 status = virtio_user_handle_mq(dev, rss->max_tx_vq); 921 } else if (hdr->class == VIRTIO_NET_CTRL_RX || 922 hdr->class == VIRTIO_NET_CTRL_MAC || 923 hdr->class == VIRTIO_NET_CTRL_VLAN) { 924 status = 0; 925 } 926 927 if (!status && dev->scvq) 928 status = virtio_send_command(&dev->scvq->cq, 929 (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen); 930 931 /* Update status */ 932 *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status; 933 934 return n_descs; 935 } 936 937 static inline int 938 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) 939 { 940 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE); 941 942 return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) && 943 wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED); 944 } 945 946 static uint32_t 947 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev, 948 struct vring_packed *vring, 949 uint16_t idx_hdr) 950 { 951 struct virtio_net_ctrl_hdr *hdr; 952 virtio_net_ctrl_ack status = ~0; 953 uint16_t idx_data, idx_status; 954 /* initialize to one, header is first */ 955 uint32_t n_descs = 1; 956 int dlen[CVQ_MAX_DATA_DESCS], nb_dlen = 0; 957 958 /* locate desc for header, data, and status */ 959 idx_data = idx_hdr + 1; 960 if (idx_data >= dev->queue_size) 961 idx_data -= dev->queue_size; 962 963 n_descs++; 964 965 idx_status = idx_data; 966 while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) { 967 dlen[nb_dlen++] = vring->desc[idx_status].len; 968 idx_status++; 969 if (idx_status >= dev->queue_size) 970 idx_status -= dev->queue_size; 971 n_descs++; 972 } 973 974 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; 975 if (hdr->class == VIRTIO_NET_CTRL_MQ && 976 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 977 uint16_t queues; 978 979 queues = *(uint16_t *)(uintptr_t) 980 vring->desc[idx_data].addr; 981 status = virtio_user_handle_mq(dev, queues); 982 } else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) { 983 struct virtio_net_ctrl_rss *rss; 984 985 rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr; 986 status = virtio_user_handle_mq(dev, rss->max_tx_vq); 987 } else if (hdr->class == VIRTIO_NET_CTRL_RX || 988 hdr->class == VIRTIO_NET_CTRL_MAC || 989 hdr->class == VIRTIO_NET_CTRL_VLAN) { 990 status = 0; 991 } 992 993 if (!status && dev->scvq) 994 status = virtio_send_command(&dev->scvq->cq, 995 (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen); 996 997 /* Update status */ 998 *(virtio_net_ctrl_ack *)(uintptr_t) 999 vring->desc[idx_status].addr = status; 1000 1001 /* Update used descriptor */ 1002 vring->desc[idx_hdr].id = vring->desc[idx_status].id; 1003 vring->desc[idx_hdr].len = sizeof(status); 1004 1005 return n_descs; 1006 } 1007 1008 static void 1009 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx) 1010 { 1011 struct virtio_user_queue *vq = &dev->packed_queues[queue_idx]; 1012 struct vring_packed *vring = &dev->vrings.packed[queue_idx]; 1013 uint16_t n_descs, flags; 1014 1015 /* Perform a load-acquire barrier in desc_is_avail to 1016 * enforce the ordering between desc flags and desc 1017 * content. 1018 */ 1019 while (desc_is_avail(&vring->desc[vq->used_idx], 1020 vq->used_wrap_counter)) { 1021 1022 n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring, 1023 vq->used_idx); 1024 1025 flags = VRING_DESC_F_WRITE; 1026 if (vq->used_wrap_counter) 1027 flags |= VRING_PACKED_DESC_F_AVAIL_USED; 1028 1029 __atomic_store_n(&vring->desc[vq->used_idx].flags, flags, 1030 __ATOMIC_RELEASE); 1031 1032 vq->used_idx += n_descs; 1033 if (vq->used_idx >= dev->queue_size) { 1034 vq->used_idx -= dev->queue_size; 1035 vq->used_wrap_counter ^= 1; 1036 } 1037 } 1038 } 1039 1040 static void 1041 virtio_user_handle_cq_split(struct virtio_user_dev *dev, uint16_t queue_idx) 1042 { 1043 uint16_t avail_idx, desc_idx; 1044 struct vring_used_elem *uep; 1045 uint32_t n_descs; 1046 struct vring *vring = &dev->vrings.split[queue_idx]; 1047 1048 /* Consume avail ring, using used ring idx as first one */ 1049 while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) 1050 != vring->avail->idx) { 1051 avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) 1052 & (vring->num - 1); 1053 desc_idx = vring->avail->ring[avail_idx]; 1054 1055 n_descs = virtio_user_handle_ctrl_msg_split(dev, vring, desc_idx); 1056 1057 /* Update used ring */ 1058 uep = &vring->used->ring[avail_idx]; 1059 uep->id = desc_idx; 1060 uep->len = n_descs; 1061 1062 __atomic_fetch_add(&vring->used->idx, 1, __ATOMIC_RELAXED); 1063 } 1064 } 1065 1066 void 1067 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx) 1068 { 1069 if (virtio_with_packed_queue(&dev->hw)) 1070 virtio_user_handle_cq_packed(dev, queue_idx); 1071 else 1072 virtio_user_handle_cq_split(dev, queue_idx); 1073 } 1074 1075 static void 1076 virtio_user_control_queue_notify(struct virtqueue *vq, void *cookie) 1077 { 1078 struct virtio_user_dev *dev = cookie; 1079 uint64_t notify_data = 1; 1080 1081 if (!dev->notify_area) { 1082 if (write(dev->kickfds[vq->vq_queue_index], ¬ify_data, sizeof(notify_data)) < 0) 1083 PMD_DRV_LOG(ERR, "failed to kick backend: %s", 1084 strerror(errno)); 1085 return; 1086 } else if (!virtio_with_feature(&dev->hw, VIRTIO_F_NOTIFICATION_DATA)) { 1087 rte_write16(vq->vq_queue_index, vq->notify_addr); 1088 return; 1089 } 1090 1091 if (virtio_with_packed_queue(&dev->hw)) { 1092 /* Bit[0:15]: vq queue index 1093 * Bit[16:30]: avail index 1094 * Bit[31]: avail wrap counter 1095 */ 1096 notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags & 1097 VRING_PACKED_DESC_F_AVAIL)) << 31) | 1098 ((uint32_t)vq->vq_avail_idx << 16) | 1099 vq->vq_queue_index; 1100 } else { 1101 /* Bit[0:15]: vq queue index 1102 * Bit[16:31]: avail index 1103 */ 1104 notify_data = ((uint32_t)vq->vq_avail_idx << 16) | 1105 vq->vq_queue_index; 1106 } 1107 rte_write32(notify_data, vq->notify_addr); 1108 } 1109 1110 int 1111 virtio_user_dev_create_shadow_cvq(struct virtio_user_dev *dev, struct virtqueue *vq) 1112 { 1113 char name[VIRTQUEUE_MAX_NAME_SZ]; 1114 struct virtqueue *scvq; 1115 1116 snprintf(name, sizeof(name), "port%d_shadow_cvq", vq->hw->port_id); 1117 scvq = virtqueue_alloc(&dev->hw, vq->vq_queue_index, vq->vq_nentries, 1118 VTNET_CQ, SOCKET_ID_ANY, name); 1119 if (!scvq) { 1120 PMD_INIT_LOG(ERR, "(%s) Failed to alloc shadow control vq\n", dev->path); 1121 return -ENOMEM; 1122 } 1123 1124 scvq->cq.notify_queue = &virtio_user_control_queue_notify; 1125 scvq->cq.notify_cookie = dev; 1126 scvq->notify_addr = vq->notify_addr; 1127 dev->scvq = scvq; 1128 1129 return 0; 1130 } 1131 1132 void 1133 virtio_user_dev_destroy_shadow_cvq(struct virtio_user_dev *dev) 1134 { 1135 if (!dev->scvq) 1136 return; 1137 1138 virtqueue_free(dev->scvq); 1139 dev->scvq = NULL; 1140 } 1141 1142 int 1143 virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status) 1144 { 1145 int ret; 1146 1147 pthread_mutex_lock(&dev->mutex); 1148 dev->status = status; 1149 ret = dev->ops->set_status(dev, status); 1150 if (ret && ret != -ENOTSUP) 1151 PMD_INIT_LOG(ERR, "(%s) Failed to set backend status", dev->path); 1152 1153 pthread_mutex_unlock(&dev->mutex); 1154 return ret; 1155 } 1156 1157 int 1158 virtio_user_dev_update_status(struct virtio_user_dev *dev) 1159 { 1160 int ret; 1161 uint8_t status; 1162 1163 pthread_mutex_lock(&dev->mutex); 1164 1165 ret = dev->ops->get_status(dev, &status); 1166 if (!ret) { 1167 dev->status = status; 1168 PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n" 1169 "\t-RESET: %u\n" 1170 "\t-ACKNOWLEDGE: %u\n" 1171 "\t-DRIVER: %u\n" 1172 "\t-DRIVER_OK: %u\n" 1173 "\t-FEATURES_OK: %u\n" 1174 "\t-DEVICE_NEED_RESET: %u\n" 1175 "\t-FAILED: %u", 1176 dev->status, 1177 (dev->status == VIRTIO_CONFIG_STATUS_RESET), 1178 !!(dev->status & VIRTIO_CONFIG_STATUS_ACK), 1179 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER), 1180 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK), 1181 !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK), 1182 !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET), 1183 !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED)); 1184 } else if (ret != -ENOTSUP) { 1185 PMD_INIT_LOG(ERR, "(%s) Failed to get backend status", dev->path); 1186 } 1187 1188 pthread_mutex_unlock(&dev->mutex); 1189 return ret; 1190 } 1191 1192 int 1193 virtio_user_dev_update_link_state(struct virtio_user_dev *dev) 1194 { 1195 if (dev->ops->update_link_state) 1196 return dev->ops->update_link_state(dev); 1197 1198 return 0; 1199 } 1200 1201 static void 1202 virtio_user_dev_reset_queues_packed(struct rte_eth_dev *eth_dev) 1203 { 1204 struct virtio_user_dev *dev = eth_dev->data->dev_private; 1205 struct virtio_hw *hw = &dev->hw; 1206 struct virtnet_rx *rxvq; 1207 struct virtnet_tx *txvq; 1208 uint16_t i; 1209 1210 /* Add lock to avoid queue contention. */ 1211 rte_spinlock_lock(&hw->state_lock); 1212 hw->started = 0; 1213 1214 /* 1215 * Waiting for datapath to complete before resetting queues. 1216 * 1 ms should be enough for the ongoing Tx/Rx function to finish. 1217 */ 1218 rte_delay_ms(1); 1219 1220 /* Vring reset for each Tx queue and Rx queue. */ 1221 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1222 rxvq = eth_dev->data->rx_queues[i]; 1223 virtqueue_rxvq_reset_packed(virtnet_rxq_to_vq(rxvq)); 1224 virtio_dev_rx_queue_setup_finish(eth_dev, i); 1225 } 1226 1227 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { 1228 txvq = eth_dev->data->tx_queues[i]; 1229 virtqueue_txvq_reset_packed(virtnet_txq_to_vq(txvq)); 1230 } 1231 1232 hw->started = 1; 1233 rte_spinlock_unlock(&hw->state_lock); 1234 } 1235 1236 void 1237 virtio_user_dev_delayed_disconnect_handler(void *param) 1238 { 1239 struct virtio_user_dev *dev = param; 1240 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id]; 1241 1242 if (rte_intr_disable(eth_dev->intr_handle) < 0) { 1243 PMD_DRV_LOG(ERR, "interrupt disable failed"); 1244 return; 1245 } 1246 PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d", 1247 rte_intr_fd_get(eth_dev->intr_handle)); 1248 if (rte_intr_callback_unregister(eth_dev->intr_handle, 1249 virtio_interrupt_handler, 1250 eth_dev) != 1) 1251 PMD_DRV_LOG(ERR, "interrupt unregister failed"); 1252 1253 if (dev->is_server) { 1254 if (dev->ops->server_disconnect) 1255 dev->ops->server_disconnect(dev); 1256 1257 rte_intr_fd_set(eth_dev->intr_handle, 1258 dev->ops->get_intr_fd(dev)); 1259 1260 PMD_DRV_LOG(DEBUG, "Registering intr fd: %d", 1261 rte_intr_fd_get(eth_dev->intr_handle)); 1262 1263 if (rte_intr_callback_register(eth_dev->intr_handle, 1264 virtio_interrupt_handler, 1265 eth_dev)) 1266 PMD_DRV_LOG(ERR, "interrupt register failed"); 1267 1268 if (rte_intr_enable(eth_dev->intr_handle) < 0) { 1269 PMD_DRV_LOG(ERR, "interrupt enable failed"); 1270 return; 1271 } 1272 } 1273 } 1274 1275 static void 1276 virtio_user_dev_delayed_intr_reconfig_handler(void *param) 1277 { 1278 struct virtio_user_dev *dev = param; 1279 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id]; 1280 1281 PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d", 1282 rte_intr_fd_get(eth_dev->intr_handle)); 1283 1284 if (rte_intr_callback_unregister(eth_dev->intr_handle, 1285 virtio_interrupt_handler, 1286 eth_dev) != 1) 1287 PMD_DRV_LOG(ERR, "interrupt unregister failed"); 1288 1289 rte_intr_fd_set(eth_dev->intr_handle, dev->ops->get_intr_fd(dev)); 1290 1291 PMD_DRV_LOG(DEBUG, "Registering intr fd: %d", 1292 rte_intr_fd_get(eth_dev->intr_handle)); 1293 1294 if (rte_intr_callback_register(eth_dev->intr_handle, 1295 virtio_interrupt_handler, eth_dev)) 1296 PMD_DRV_LOG(ERR, "interrupt register failed"); 1297 1298 if (rte_intr_enable(eth_dev->intr_handle) < 0) 1299 PMD_DRV_LOG(ERR, "interrupt enable failed"); 1300 } 1301 1302 int 1303 virtio_user_dev_server_reconnect(struct virtio_user_dev *dev) 1304 { 1305 int ret, old_status; 1306 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id]; 1307 struct virtio_hw *hw = &dev->hw; 1308 1309 if (!dev->ops->server_reconnect) { 1310 PMD_DRV_LOG(ERR, "(%s) Missing server reconnect callback", dev->path); 1311 return -1; 1312 } 1313 1314 if (dev->ops->server_reconnect(dev)) { 1315 PMD_DRV_LOG(ERR, "(%s) Reconnect callback call failed", dev->path); 1316 return -1; 1317 } 1318 1319 old_status = dev->status; 1320 1321 virtio_reset(hw); 1322 1323 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK); 1324 1325 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER); 1326 1327 if (dev->ops->get_features(dev, &dev->device_features) < 0) { 1328 PMD_INIT_LOG(ERR, "get_features failed: %s", 1329 strerror(errno)); 1330 return -1; 1331 } 1332 1333 /* unmask vhost-user unsupported features */ 1334 dev->device_features &= ~(dev->unsupported_features); 1335 1336 dev->features &= (dev->device_features | dev->frontend_features); 1337 1338 /* For packed ring, resetting queues is required in reconnection. */ 1339 if (virtio_with_packed_queue(hw) && 1340 (old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) { 1341 PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped" 1342 " when packed ring reconnecting."); 1343 virtio_user_dev_reset_queues_packed(eth_dev); 1344 } 1345 1346 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK); 1347 1348 /* Start the device */ 1349 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK); 1350 if (!dev->started) 1351 return -1; 1352 1353 if (dev->queue_pairs > 1) { 1354 ret = virtio_user_handle_mq(dev, dev->queue_pairs); 1355 if (ret != 0) { 1356 PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!"); 1357 return -1; 1358 } 1359 } 1360 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { 1361 if (rte_intr_disable(eth_dev->intr_handle) < 0) { 1362 PMD_DRV_LOG(ERR, "interrupt disable failed"); 1363 return -1; 1364 } 1365 /* 1366 * This function can be called from the interrupt handler, so 1367 * we can't unregister interrupt handler here. Setting 1368 * alarm to do that later. 1369 */ 1370 rte_eal_alarm_set(1, 1371 virtio_user_dev_delayed_intr_reconfig_handler, 1372 (void *)dev); 1373 } 1374 PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!"); 1375 return 0; 1376 } 1377