1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdio.h> 7 #include <fcntl.h> 8 #include <string.h> 9 #include <errno.h> 10 #include <sys/mman.h> 11 #include <unistd.h> 12 #include <sys/eventfd.h> 13 #include <sys/types.h> 14 #include <sys/stat.h> 15 16 #include <rte_alarm.h> 17 #include <rte_string_fns.h> 18 #include <rte_eal_memconfig.h> 19 20 #include "vhost.h" 21 #include "virtio_user_dev.h" 22 #include "../virtio_ethdev.h" 23 24 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb" 25 26 const char * const virtio_user_backend_strings[] = { 27 [VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN", 28 [VIRTIO_USER_BACKEND_VHOST_USER] = "VHOST_USER", 29 [VIRTIO_USER_BACKEND_VHOST_KERNEL] = "VHOST_NET", 30 [VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA", 31 }; 32 33 static int 34 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel) 35 { 36 /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come 37 * firstly because vhost depends on this msg to allocate virtqueue 38 * pair. 39 */ 40 struct vhost_vring_file file; 41 int ret; 42 43 file.index = queue_sel; 44 file.fd = dev->callfds[queue_sel]; 45 ret = dev->ops->set_vring_call(dev, &file); 46 if (ret < 0) { 47 PMD_INIT_LOG(ERR, "(%s) Failed to create queue %u\n", dev->path, queue_sel); 48 return -1; 49 } 50 51 return 0; 52 } 53 54 static int 55 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel) 56 { 57 int ret; 58 struct vhost_vring_file file; 59 struct vhost_vring_state state; 60 struct vring *vring = &dev->vrings[queue_sel]; 61 struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel]; 62 struct vhost_vring_addr addr = { 63 .index = queue_sel, 64 .log_guest_addr = 0, 65 .flags = 0, /* disable log */ 66 }; 67 68 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) { 69 addr.desc_user_addr = 70 (uint64_t)(uintptr_t)pq_vring->desc; 71 addr.avail_user_addr = 72 (uint64_t)(uintptr_t)pq_vring->driver; 73 addr.used_user_addr = 74 (uint64_t)(uintptr_t)pq_vring->device; 75 } else { 76 addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc; 77 addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail; 78 addr.used_user_addr = (uint64_t)(uintptr_t)vring->used; 79 } 80 81 state.index = queue_sel; 82 state.num = vring->num; 83 ret = dev->ops->set_vring_num(dev, &state); 84 if (ret < 0) 85 goto err; 86 87 state.index = queue_sel; 88 state.num = 0; /* no reservation */ 89 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) 90 state.num |= (1 << 15); 91 ret = dev->ops->set_vring_base(dev, &state); 92 if (ret < 0) 93 goto err; 94 95 ret = dev->ops->set_vring_addr(dev, &addr); 96 if (ret < 0) 97 goto err; 98 99 /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes 100 * lastly because vhost depends on this msg to judge if 101 * virtio is ready. 102 */ 103 file.index = queue_sel; 104 file.fd = dev->kickfds[queue_sel]; 105 ret = dev->ops->set_vring_kick(dev, &file); 106 if (ret < 0) 107 goto err; 108 109 return 0; 110 err: 111 PMD_INIT_LOG(ERR, "(%s) Failed to kick queue %u\n", dev->path, queue_sel); 112 113 return -1; 114 } 115 116 static int 117 virtio_user_queue_setup(struct virtio_user_dev *dev, 118 int (*fn)(struct virtio_user_dev *, uint32_t)) 119 { 120 uint32_t i, queue_sel; 121 122 for (i = 0; i < dev->max_queue_pairs; ++i) { 123 queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX; 124 if (fn(dev, queue_sel) < 0) { 125 PMD_DRV_LOG(ERR, "(%s) setup rx vq %u failed", dev->path, i); 126 return -1; 127 } 128 } 129 for (i = 0; i < dev->max_queue_pairs; ++i) { 130 queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX; 131 if (fn(dev, queue_sel) < 0) { 132 PMD_DRV_LOG(INFO, "(%s) setup tx vq %u failed", dev->path, i); 133 return -1; 134 } 135 } 136 137 return 0; 138 } 139 140 int 141 virtio_user_dev_set_features(struct virtio_user_dev *dev) 142 { 143 uint64_t features; 144 int ret = -1; 145 146 pthread_mutex_lock(&dev->mutex); 147 148 /* Step 0: tell vhost to create queues */ 149 if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0) 150 goto error; 151 152 features = dev->features; 153 154 /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */ 155 features &= ~(1ull << VIRTIO_NET_F_MAC); 156 /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */ 157 features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ); 158 features &= ~(1ull << VIRTIO_NET_F_STATUS); 159 ret = dev->ops->set_features(dev, features); 160 if (ret < 0) 161 goto error; 162 PMD_DRV_LOG(INFO, "(%s) set features: 0x%" PRIx64, dev->path, features); 163 error: 164 pthread_mutex_unlock(&dev->mutex); 165 166 return ret; 167 } 168 169 int 170 virtio_user_start_device(struct virtio_user_dev *dev) 171 { 172 int ret; 173 174 /* 175 * XXX workaround! 176 * 177 * We need to make sure that the locks will be 178 * taken in the correct order to avoid deadlocks. 179 * 180 * Before releasing this lock, this thread should 181 * not trigger any memory hotplug events. 182 * 183 * This is a temporary workaround, and should be 184 * replaced when we get proper supports from the 185 * memory subsystem in the future. 186 */ 187 rte_mcfg_mem_read_lock(); 188 pthread_mutex_lock(&dev->mutex); 189 190 /* Step 2: share memory regions */ 191 ret = dev->ops->set_memory_table(dev); 192 if (ret < 0) 193 goto error; 194 195 /* Step 3: kick queues */ 196 ret = virtio_user_queue_setup(dev, virtio_user_kick_queue); 197 if (ret < 0) 198 goto error; 199 200 /* Step 4: enable queues 201 * we enable the 1st queue pair by default. 202 */ 203 ret = dev->ops->enable_qp(dev, 0, 1); 204 if (ret < 0) 205 goto error; 206 207 dev->started = true; 208 209 pthread_mutex_unlock(&dev->mutex); 210 rte_mcfg_mem_read_unlock(); 211 212 return 0; 213 error: 214 pthread_mutex_unlock(&dev->mutex); 215 rte_mcfg_mem_read_unlock(); 216 217 PMD_INIT_LOG(ERR, "(%s) Failed to start device\n", dev->path); 218 219 /* TODO: free resource here or caller to check */ 220 return -1; 221 } 222 223 int virtio_user_stop_device(struct virtio_user_dev *dev) 224 { 225 struct vhost_vring_state state; 226 uint32_t i; 227 int ret; 228 229 pthread_mutex_lock(&dev->mutex); 230 if (!dev->started) 231 goto out; 232 233 for (i = 0; i < dev->max_queue_pairs; ++i) { 234 ret = dev->ops->enable_qp(dev, i, 0); 235 if (ret < 0) 236 goto err; 237 } 238 239 /* Stop the backend. */ 240 for (i = 0; i < dev->max_queue_pairs * 2; ++i) { 241 state.index = i; 242 ret = dev->ops->get_vring_base(dev, &state); 243 if (ret < 0) { 244 PMD_DRV_LOG(ERR, "(%s) get_vring_base failed, index=%u", dev->path, i); 245 goto err; 246 } 247 } 248 249 dev->started = false; 250 251 out: 252 pthread_mutex_unlock(&dev->mutex); 253 254 return 0; 255 err: 256 pthread_mutex_unlock(&dev->mutex); 257 258 PMD_INIT_LOG(ERR, "(%s) Failed to stop device\n", dev->path); 259 260 return -1; 261 } 262 263 int 264 virtio_user_dev_set_mac(struct virtio_user_dev *dev) 265 { 266 int ret = 0; 267 268 if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MAC))) 269 return -ENOTSUP; 270 271 if (!dev->ops->set_config) 272 return -ENOTSUP; 273 274 ret = dev->ops->set_config(dev, dev->mac_addr, 275 offsetof(struct virtio_net_config, mac), 276 RTE_ETHER_ADDR_LEN); 277 if (ret) 278 PMD_DRV_LOG(ERR, "(%s) Failed to set MAC address in device", dev->path); 279 280 return ret; 281 } 282 283 int 284 virtio_user_dev_get_mac(struct virtio_user_dev *dev) 285 { 286 int ret = 0; 287 288 if (!(dev->device_features & (1ULL << VIRTIO_NET_F_MAC))) 289 return -ENOTSUP; 290 291 if (!dev->ops->get_config) 292 return -ENOTSUP; 293 294 ret = dev->ops->get_config(dev, dev->mac_addr, 295 offsetof(struct virtio_net_config, mac), 296 RTE_ETHER_ADDR_LEN); 297 if (ret) 298 PMD_DRV_LOG(ERR, "(%s) Failed to get MAC address from device", dev->path); 299 300 return ret; 301 } 302 303 static void 304 virtio_user_dev_init_mac(struct virtio_user_dev *dev, const char *mac) 305 { 306 struct rte_ether_addr cmdline_mac; 307 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 308 int ret; 309 310 if (mac && rte_ether_unformat_addr(mac, &cmdline_mac) == 0) { 311 /* 312 * MAC address was passed from command-line, try to store 313 * it in the device if it supports it. Otherwise try to use 314 * the device one. 315 */ 316 memcpy(dev->mac_addr, &cmdline_mac, RTE_ETHER_ADDR_LEN); 317 dev->mac_specified = 1; 318 319 /* Setting MAC may fail, continue to get the device one in this case */ 320 virtio_user_dev_set_mac(dev); 321 ret = virtio_user_dev_get_mac(dev); 322 if (ret == -ENOTSUP) 323 goto out; 324 325 if (memcmp(&cmdline_mac, dev->mac_addr, RTE_ETHER_ADDR_LEN)) 326 PMD_DRV_LOG(INFO, "(%s) Device MAC update failed", dev->path); 327 } else { 328 ret = virtio_user_dev_get_mac(dev); 329 if (ret) { 330 PMD_DRV_LOG(ERR, "(%s) No valid MAC in devargs or device, use random", 331 dev->path); 332 return; 333 } 334 335 dev->mac_specified = 1; 336 } 337 out: 338 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, 339 (struct rte_ether_addr *)dev->mac_addr); 340 PMD_DRV_LOG(INFO, "(%s) MAC %s specified", dev->path, buf); 341 } 342 343 static int 344 virtio_user_dev_init_notify(struct virtio_user_dev *dev) 345 { 346 uint32_t i, j; 347 int callfd; 348 int kickfd; 349 350 for (i = 0; i < dev->max_queue_pairs * 2; i++) { 351 /* May use invalid flag, but some backend uses kickfd and 352 * callfd as criteria to judge if dev is alive. so finally we 353 * use real event_fd. 354 */ 355 callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); 356 if (callfd < 0) { 357 PMD_DRV_LOG(ERR, "(%s) callfd error, %s", dev->path, strerror(errno)); 358 goto err; 359 } 360 kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); 361 if (kickfd < 0) { 362 close(callfd); 363 PMD_DRV_LOG(ERR, "(%s) kickfd error, %s", dev->path, strerror(errno)); 364 goto err; 365 } 366 dev->callfds[i] = callfd; 367 dev->kickfds[i] = kickfd; 368 } 369 370 return 0; 371 err: 372 for (j = 0; j < i; j++) { 373 if (dev->kickfds[j] >= 0) { 374 close(dev->kickfds[j]); 375 dev->kickfds[j] = -1; 376 } 377 if (dev->callfds[j] >= 0) { 378 close(dev->callfds[j]); 379 dev->callfds[j] = -1; 380 } 381 } 382 383 return -1; 384 } 385 386 static void 387 virtio_user_dev_uninit_notify(struct virtio_user_dev *dev) 388 { 389 uint32_t i; 390 391 for (i = 0; i < dev->max_queue_pairs * 2; ++i) { 392 if (dev->kickfds[i] >= 0) { 393 close(dev->kickfds[i]); 394 dev->kickfds[i] = -1; 395 } 396 if (dev->callfds[i] >= 0) { 397 close(dev->callfds[i]); 398 dev->callfds[i] = -1; 399 } 400 } 401 } 402 403 static int 404 virtio_user_fill_intr_handle(struct virtio_user_dev *dev) 405 { 406 uint32_t i; 407 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id]; 408 409 if (!eth_dev->intr_handle) { 410 eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle)); 411 if (!eth_dev->intr_handle) { 412 PMD_DRV_LOG(ERR, "(%s) failed to allocate intr_handle", dev->path); 413 return -1; 414 } 415 memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle)); 416 } 417 418 for (i = 0; i < dev->max_queue_pairs; ++i) 419 eth_dev->intr_handle->efds[i] = dev->callfds[i]; 420 eth_dev->intr_handle->nb_efd = dev->max_queue_pairs; 421 eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1; 422 eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV; 423 /* For virtio vdev, no need to read counter for clean */ 424 eth_dev->intr_handle->efd_counter_size = 0; 425 eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev); 426 427 return 0; 428 } 429 430 static void 431 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused, 432 const void *addr, 433 size_t len __rte_unused, 434 void *arg) 435 { 436 struct virtio_user_dev *dev = arg; 437 struct rte_memseg_list *msl; 438 uint16_t i; 439 int ret = 0; 440 441 /* ignore externally allocated memory */ 442 msl = rte_mem_virt2memseg_list(addr); 443 if (msl->external) 444 return; 445 446 pthread_mutex_lock(&dev->mutex); 447 448 if (dev->started == false) 449 goto exit; 450 451 /* Step 1: pause the active queues */ 452 for (i = 0; i < dev->queue_pairs; i++) { 453 ret = dev->ops->enable_qp(dev, i, 0); 454 if (ret < 0) 455 goto exit; 456 } 457 458 /* Step 2: update memory regions */ 459 ret = dev->ops->set_memory_table(dev); 460 if (ret < 0) 461 goto exit; 462 463 /* Step 3: resume the active queues */ 464 for (i = 0; i < dev->queue_pairs; i++) { 465 ret = dev->ops->enable_qp(dev, i, 1); 466 if (ret < 0) 467 goto exit; 468 } 469 470 exit: 471 pthread_mutex_unlock(&dev->mutex); 472 473 if (ret < 0) 474 PMD_DRV_LOG(ERR, "(%s) Failed to update memory table\n", dev->path); 475 } 476 477 static int 478 virtio_user_dev_setup(struct virtio_user_dev *dev) 479 { 480 if (dev->is_server) { 481 if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) { 482 PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!"); 483 return -1; 484 } 485 } 486 487 switch (dev->backend_type) { 488 case VIRTIO_USER_BACKEND_VHOST_USER: 489 dev->ops = &virtio_ops_user; 490 break; 491 case VIRTIO_USER_BACKEND_VHOST_KERNEL: 492 dev->ops = &virtio_ops_kernel; 493 break; 494 case VIRTIO_USER_BACKEND_VHOST_VDPA: 495 dev->ops = &virtio_ops_vdpa; 496 break; 497 default: 498 PMD_DRV_LOG(ERR, "(%s) Unknown backend type", dev->path); 499 return -1; 500 } 501 502 if (dev->ops->setup(dev) < 0) { 503 PMD_INIT_LOG(ERR, "(%s) Failed to setup backend\n", dev->path); 504 return -1; 505 } 506 507 if (virtio_user_dev_init_notify(dev) < 0) { 508 PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers\n", dev->path); 509 goto destroy; 510 } 511 512 if (virtio_user_fill_intr_handle(dev) < 0) { 513 PMD_INIT_LOG(ERR, "(%s) Failed to init interrupt handler\n", dev->path); 514 goto uninit; 515 } 516 517 return 0; 518 519 uninit: 520 virtio_user_dev_uninit_notify(dev); 521 destroy: 522 dev->ops->destroy(dev); 523 524 return -1; 525 } 526 527 /* Use below macro to filter features from vhost backend */ 528 #define VIRTIO_USER_SUPPORTED_FEATURES \ 529 (1ULL << VIRTIO_NET_F_MAC | \ 530 1ULL << VIRTIO_NET_F_STATUS | \ 531 1ULL << VIRTIO_NET_F_MQ | \ 532 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \ 533 1ULL << VIRTIO_NET_F_CTRL_VQ | \ 534 1ULL << VIRTIO_NET_F_CTRL_RX | \ 535 1ULL << VIRTIO_NET_F_CTRL_VLAN | \ 536 1ULL << VIRTIO_NET_F_CSUM | \ 537 1ULL << VIRTIO_NET_F_HOST_TSO4 | \ 538 1ULL << VIRTIO_NET_F_HOST_TSO6 | \ 539 1ULL << VIRTIO_NET_F_MRG_RXBUF | \ 540 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \ 541 1ULL << VIRTIO_NET_F_GUEST_CSUM | \ 542 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \ 543 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \ 544 1ULL << VIRTIO_F_IN_ORDER | \ 545 1ULL << VIRTIO_F_VERSION_1 | \ 546 1ULL << VIRTIO_F_RING_PACKED) 547 548 int 549 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, 550 int cq, int queue_size, const char *mac, char **ifname, 551 int server, int mrg_rxbuf, int in_order, int packed_vq, 552 enum virtio_user_backend_type backend_type) 553 { 554 uint64_t backend_features; 555 int i; 556 557 pthread_mutex_init(&dev->mutex, NULL); 558 strlcpy(dev->path, path, PATH_MAX); 559 560 for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) { 561 dev->kickfds[i] = -1; 562 dev->callfds[i] = -1; 563 } 564 565 dev->started = 0; 566 dev->max_queue_pairs = queues; 567 dev->queue_pairs = 1; /* mq disabled by default */ 568 dev->queue_size = queue_size; 569 dev->is_server = server; 570 dev->mac_specified = 0; 571 dev->frontend_features = 0; 572 dev->unsupported_features = 0; 573 dev->backend_type = backend_type; 574 575 if (*ifname) { 576 dev->ifname = *ifname; 577 *ifname = NULL; 578 } 579 580 if (virtio_user_dev_setup(dev) < 0) { 581 PMD_INIT_LOG(ERR, "(%s) backend set up fails", dev->path); 582 return -1; 583 } 584 585 if (dev->ops->set_owner(dev) < 0) { 586 PMD_INIT_LOG(ERR, "(%s) Failed to set backend owner", dev->path); 587 return -1; 588 } 589 590 if (dev->ops->get_backend_features(&backend_features) < 0) { 591 PMD_INIT_LOG(ERR, "(%s) Failed to get backend features", dev->path); 592 return -1; 593 } 594 595 dev->unsupported_features = ~(VIRTIO_USER_SUPPORTED_FEATURES | backend_features); 596 597 if (dev->ops->get_features(dev, &dev->device_features) < 0) { 598 PMD_INIT_LOG(ERR, "(%s) Failed to get device features", dev->path); 599 return -1; 600 } 601 602 virtio_user_dev_init_mac(dev, mac); 603 604 if (!mrg_rxbuf) 605 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF); 606 607 if (!in_order) 608 dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER); 609 610 if (!packed_vq) 611 dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED); 612 613 if (dev->mac_specified) 614 dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC); 615 else 616 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC); 617 618 if (cq) { 619 /* device does not really need to know anything about CQ, 620 * so if necessary, we just claim to support CQ 621 */ 622 dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); 623 } else { 624 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); 625 /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */ 626 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX); 627 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN); 628 dev->unsupported_features |= 629 (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE); 630 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ); 631 dev->unsupported_features |= 632 (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR); 633 } 634 635 /* The backend will not report this feature, we add it explicitly */ 636 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) 637 dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS); 638 639 dev->frontend_features &= ~dev->unsupported_features; 640 dev->device_features &= ~dev->unsupported_features; 641 642 if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME, 643 virtio_user_mem_event_cb, dev)) { 644 if (rte_errno != ENOTSUP) { 645 PMD_INIT_LOG(ERR, "(%s) Failed to register mem event callback\n", 646 dev->path); 647 return -1; 648 } 649 } 650 651 return 0; 652 } 653 654 void 655 virtio_user_dev_uninit(struct virtio_user_dev *dev) 656 { 657 virtio_user_stop_device(dev); 658 659 rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev); 660 661 virtio_user_dev_uninit_notify(dev); 662 663 free(dev->ifname); 664 665 if (dev->is_server) 666 unlink(dev->path); 667 668 dev->ops->destroy(dev); 669 } 670 671 uint8_t 672 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs) 673 { 674 uint16_t i; 675 uint8_t ret = 0; 676 677 if (q_pairs > dev->max_queue_pairs) { 678 PMD_INIT_LOG(ERR, "(%s) multi-q config %u, but only %u supported", 679 dev->path, q_pairs, dev->max_queue_pairs); 680 return -1; 681 } 682 683 for (i = 0; i < q_pairs; ++i) 684 ret |= dev->ops->enable_qp(dev, i, 1); 685 for (i = q_pairs; i < dev->max_queue_pairs; ++i) 686 ret |= dev->ops->enable_qp(dev, i, 0); 687 688 dev->queue_pairs = q_pairs; 689 690 return ret; 691 } 692 693 static uint32_t 694 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring, 695 uint16_t idx_hdr) 696 { 697 struct virtio_net_ctrl_hdr *hdr; 698 virtio_net_ctrl_ack status = ~0; 699 uint16_t i, idx_data, idx_status; 700 uint32_t n_descs = 0; 701 702 /* locate desc for header, data, and status */ 703 idx_data = vring->desc[idx_hdr].next; 704 n_descs++; 705 706 i = idx_data; 707 while (vring->desc[i].flags == VRING_DESC_F_NEXT) { 708 i = vring->desc[i].next; 709 n_descs++; 710 } 711 712 /* locate desc for status */ 713 idx_status = i; 714 n_descs++; 715 716 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; 717 if (hdr->class == VIRTIO_NET_CTRL_MQ && 718 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 719 uint16_t queues; 720 721 queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr; 722 status = virtio_user_handle_mq(dev, queues); 723 } else if (hdr->class == VIRTIO_NET_CTRL_RX || 724 hdr->class == VIRTIO_NET_CTRL_MAC || 725 hdr->class == VIRTIO_NET_CTRL_VLAN) { 726 status = 0; 727 } 728 729 /* Update status */ 730 *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status; 731 732 return n_descs; 733 } 734 735 static inline int 736 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) 737 { 738 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE); 739 740 return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) && 741 wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED); 742 } 743 744 static uint32_t 745 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev, 746 struct vring_packed *vring, 747 uint16_t idx_hdr) 748 { 749 struct virtio_net_ctrl_hdr *hdr; 750 virtio_net_ctrl_ack status = ~0; 751 uint16_t idx_data, idx_status; 752 /* initialize to one, header is first */ 753 uint32_t n_descs = 1; 754 755 /* locate desc for header, data, and status */ 756 idx_data = idx_hdr + 1; 757 if (idx_data >= dev->queue_size) 758 idx_data -= dev->queue_size; 759 760 n_descs++; 761 762 idx_status = idx_data; 763 while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) { 764 idx_status++; 765 if (idx_status >= dev->queue_size) 766 idx_status -= dev->queue_size; 767 n_descs++; 768 } 769 770 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; 771 if (hdr->class == VIRTIO_NET_CTRL_MQ && 772 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 773 uint16_t queues; 774 775 queues = *(uint16_t *)(uintptr_t) 776 vring->desc[idx_data].addr; 777 status = virtio_user_handle_mq(dev, queues); 778 } else if (hdr->class == VIRTIO_NET_CTRL_RX || 779 hdr->class == VIRTIO_NET_CTRL_MAC || 780 hdr->class == VIRTIO_NET_CTRL_VLAN) { 781 status = 0; 782 } 783 784 /* Update status */ 785 *(virtio_net_ctrl_ack *)(uintptr_t) 786 vring->desc[idx_status].addr = status; 787 788 /* Update used descriptor */ 789 vring->desc[idx_hdr].id = vring->desc[idx_status].id; 790 vring->desc[idx_hdr].len = sizeof(status); 791 792 return n_descs; 793 } 794 795 void 796 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx) 797 { 798 struct virtio_user_queue *vq = &dev->packed_queues[queue_idx]; 799 struct vring_packed *vring = &dev->packed_vrings[queue_idx]; 800 uint16_t n_descs, flags; 801 802 /* Perform a load-acquire barrier in desc_is_avail to 803 * enforce the ordering between desc flags and desc 804 * content. 805 */ 806 while (desc_is_avail(&vring->desc[vq->used_idx], 807 vq->used_wrap_counter)) { 808 809 n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring, 810 vq->used_idx); 811 812 flags = VRING_DESC_F_WRITE; 813 if (vq->used_wrap_counter) 814 flags |= VRING_PACKED_DESC_F_AVAIL_USED; 815 816 __atomic_store_n(&vring->desc[vq->used_idx].flags, flags, 817 __ATOMIC_RELEASE); 818 819 vq->used_idx += n_descs; 820 if (vq->used_idx >= dev->queue_size) { 821 vq->used_idx -= dev->queue_size; 822 vq->used_wrap_counter ^= 1; 823 } 824 } 825 } 826 827 void 828 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx) 829 { 830 uint16_t avail_idx, desc_idx; 831 struct vring_used_elem *uep; 832 uint32_t n_descs; 833 struct vring *vring = &dev->vrings[queue_idx]; 834 835 /* Consume avail ring, using used ring idx as first one */ 836 while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) 837 != vring->avail->idx) { 838 avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) 839 & (vring->num - 1); 840 desc_idx = vring->avail->ring[avail_idx]; 841 842 n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx); 843 844 /* Update used ring */ 845 uep = &vring->used->ring[avail_idx]; 846 uep->id = desc_idx; 847 uep->len = n_descs; 848 849 __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED); 850 } 851 } 852 853 int 854 virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status) 855 { 856 int ret; 857 858 pthread_mutex_lock(&dev->mutex); 859 dev->status = status; 860 ret = dev->ops->set_status(dev, status); 861 if (ret && ret != -ENOTSUP) 862 PMD_INIT_LOG(ERR, "(%s) Failed to set backend status\n", dev->path); 863 864 pthread_mutex_unlock(&dev->mutex); 865 return ret; 866 } 867 868 int 869 virtio_user_dev_update_status(struct virtio_user_dev *dev) 870 { 871 int ret; 872 uint8_t status; 873 874 pthread_mutex_lock(&dev->mutex); 875 876 ret = dev->ops->get_status(dev, &status); 877 if (!ret) { 878 dev->status = status; 879 PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n" 880 "\t-RESET: %u\n" 881 "\t-ACKNOWLEDGE: %u\n" 882 "\t-DRIVER: %u\n" 883 "\t-DRIVER_OK: %u\n" 884 "\t-FEATURES_OK: %u\n" 885 "\t-DEVICE_NEED_RESET: %u\n" 886 "\t-FAILED: %u\n", 887 dev->status, 888 (dev->status == VIRTIO_CONFIG_STATUS_RESET), 889 !!(dev->status & VIRTIO_CONFIG_STATUS_ACK), 890 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER), 891 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK), 892 !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK), 893 !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET), 894 !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED)); 895 } else if (ret != -ENOTSUP) { 896 PMD_INIT_LOG(ERR, "(%s) Failed to get backend status\n", dev->path); 897 } 898 899 pthread_mutex_unlock(&dev->mutex); 900 return ret; 901 } 902 903 int 904 virtio_user_dev_update_link_state(struct virtio_user_dev *dev) 905 { 906 if (dev->ops->update_link_state) 907 return dev->ops->update_link_state(dev); 908 909 return 0; 910 } 911 912 static void 913 virtio_user_dev_reset_queues_packed(struct rte_eth_dev *eth_dev) 914 { 915 struct virtio_user_dev *dev = eth_dev->data->dev_private; 916 struct virtio_hw *hw = &dev->hw; 917 struct virtnet_rx *rxvq; 918 struct virtnet_tx *txvq; 919 uint16_t i; 920 921 /* Add lock to avoid queue contention. */ 922 rte_spinlock_lock(&hw->state_lock); 923 hw->started = 0; 924 925 /* 926 * Waiting for datapath to complete before resetting queues. 927 * 1 ms should be enough for the ongoing Tx/Rx function to finish. 928 */ 929 rte_delay_ms(1); 930 931 /* Vring reset for each Tx queue and Rx queue. */ 932 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 933 rxvq = eth_dev->data->rx_queues[i]; 934 virtqueue_rxvq_reset_packed(virtnet_rxq_to_vq(rxvq)); 935 virtio_dev_rx_queue_setup_finish(eth_dev, i); 936 } 937 938 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { 939 txvq = eth_dev->data->tx_queues[i]; 940 virtqueue_txvq_reset_packed(virtnet_txq_to_vq(txvq)); 941 } 942 943 hw->started = 1; 944 rte_spinlock_unlock(&hw->state_lock); 945 } 946 947 void 948 virtio_user_dev_delayed_disconnect_handler(void *param) 949 { 950 struct virtio_user_dev *dev = param; 951 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id]; 952 953 if (rte_intr_disable(eth_dev->intr_handle) < 0) { 954 PMD_DRV_LOG(ERR, "interrupt disable failed"); 955 return; 956 } 957 PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d", 958 eth_dev->intr_handle->fd); 959 if (rte_intr_callback_unregister(eth_dev->intr_handle, 960 virtio_interrupt_handler, 961 eth_dev) != 1) 962 PMD_DRV_LOG(ERR, "interrupt unregister failed"); 963 964 if (dev->is_server) { 965 if (dev->ops->server_disconnect) 966 dev->ops->server_disconnect(dev); 967 968 eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev); 969 970 PMD_DRV_LOG(DEBUG, "Registering intr fd: %d", 971 eth_dev->intr_handle->fd); 972 973 if (rte_intr_callback_register(eth_dev->intr_handle, 974 virtio_interrupt_handler, 975 eth_dev)) 976 PMD_DRV_LOG(ERR, "interrupt register failed"); 977 978 if (rte_intr_enable(eth_dev->intr_handle) < 0) { 979 PMD_DRV_LOG(ERR, "interrupt enable failed"); 980 return; 981 } 982 } 983 } 984 985 static void 986 virtio_user_dev_delayed_intr_reconfig_handler(void *param) 987 { 988 struct virtio_user_dev *dev = param; 989 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id]; 990 991 PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d", 992 eth_dev->intr_handle->fd); 993 994 if (rte_intr_callback_unregister(eth_dev->intr_handle, 995 virtio_interrupt_handler, 996 eth_dev) != 1) 997 PMD_DRV_LOG(ERR, "interrupt unregister failed"); 998 999 eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev); 1000 1001 PMD_DRV_LOG(DEBUG, "Registering intr fd: %d", eth_dev->intr_handle->fd); 1002 1003 if (rte_intr_callback_register(eth_dev->intr_handle, 1004 virtio_interrupt_handler, eth_dev)) 1005 PMD_DRV_LOG(ERR, "interrupt register failed"); 1006 1007 if (rte_intr_enable(eth_dev->intr_handle) < 0) 1008 PMD_DRV_LOG(ERR, "interrupt enable failed"); 1009 } 1010 1011 int 1012 virtio_user_dev_server_reconnect(struct virtio_user_dev *dev) 1013 { 1014 int ret, old_status; 1015 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id]; 1016 struct virtio_hw *hw = &dev->hw; 1017 1018 if (!dev->ops->server_reconnect) { 1019 PMD_DRV_LOG(ERR, "(%s) Missing server reconnect callback", dev->path); 1020 return -1; 1021 } 1022 1023 if (dev->ops->server_reconnect(dev)) { 1024 PMD_DRV_LOG(ERR, "(%s) Reconnect callback call failed", dev->path); 1025 return -1; 1026 } 1027 1028 old_status = dev->status; 1029 1030 virtio_reset(hw); 1031 1032 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK); 1033 1034 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER); 1035 1036 if (dev->ops->get_features(dev, &dev->device_features) < 0) { 1037 PMD_INIT_LOG(ERR, "get_features failed: %s", 1038 strerror(errno)); 1039 return -1; 1040 } 1041 1042 /* unmask vhost-user unsupported features */ 1043 dev->device_features &= ~(dev->unsupported_features); 1044 1045 dev->features &= (dev->device_features | dev->frontend_features); 1046 1047 /* For packed ring, resetting queues is required in reconnection. */ 1048 if (virtio_with_packed_queue(hw) && 1049 (old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) { 1050 PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped" 1051 " when packed ring reconnecting."); 1052 virtio_user_dev_reset_queues_packed(eth_dev); 1053 } 1054 1055 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK); 1056 1057 /* Start the device */ 1058 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK); 1059 if (!dev->started) 1060 return -1; 1061 1062 if (dev->queue_pairs > 1) { 1063 ret = virtio_user_handle_mq(dev, dev->queue_pairs); 1064 if (ret != 0) { 1065 PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!"); 1066 return -1; 1067 } 1068 } 1069 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { 1070 if (rte_intr_disable(eth_dev->intr_handle) < 0) { 1071 PMD_DRV_LOG(ERR, "interrupt disable failed"); 1072 return -1; 1073 } 1074 /* 1075 * This function can be called from the interrupt handler, so 1076 * we can't unregister interrupt handler here. Setting 1077 * alarm to do that later. 1078 */ 1079 rte_eal_alarm_set(1, 1080 virtio_user_dev_delayed_intr_reconfig_handler, 1081 (void *)dev); 1082 } 1083 PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!"); 1084 return 0; 1085 } 1086