1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdio.h> 7 #include <fcntl.h> 8 #include <string.h> 9 #include <errno.h> 10 #include <sys/mman.h> 11 #include <unistd.h> 12 #include <sys/eventfd.h> 13 #include <sys/types.h> 14 #include <sys/stat.h> 15 16 #include <rte_string_fns.h> 17 #include <rte_eal_memconfig.h> 18 19 #include "vhost.h" 20 #include "virtio_user_dev.h" 21 #include "../virtio_ethdev.h" 22 23 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb" 24 25 const char * const virtio_user_backend_strings[] = { 26 [VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN", 27 [VIRTIO_USER_BACKEND_VHOST_USER] = "VHOST_USER", 28 [VIRTIO_USER_BACKEND_VHOST_KERNEL] = "VHOST_NET", 29 [VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA", 30 }; 31 32 static int 33 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel) 34 { 35 /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come 36 * firstly because vhost depends on this msg to allocate virtqueue 37 * pair. 38 */ 39 struct vhost_vring_file file; 40 41 file.index = queue_sel; 42 file.fd = dev->callfds[queue_sel]; 43 dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file); 44 45 return 0; 46 } 47 48 static int 49 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel) 50 { 51 struct vhost_vring_file file; 52 struct vhost_vring_state state; 53 struct vring *vring = &dev->vrings[queue_sel]; 54 struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel]; 55 struct vhost_vring_addr addr = { 56 .index = queue_sel, 57 .log_guest_addr = 0, 58 .flags = 0, /* disable log */ 59 }; 60 61 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) { 62 addr.desc_user_addr = 63 (uint64_t)(uintptr_t)pq_vring->desc; 64 addr.avail_user_addr = 65 (uint64_t)(uintptr_t)pq_vring->driver; 66 addr.used_user_addr = 67 (uint64_t)(uintptr_t)pq_vring->device; 68 } else { 69 addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc; 70 addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail; 71 addr.used_user_addr = (uint64_t)(uintptr_t)vring->used; 72 } 73 74 state.index = queue_sel; 75 state.num = vring->num; 76 dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state); 77 78 state.index = queue_sel; 79 state.num = 0; /* no reservation */ 80 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) 81 state.num |= (1 << 15); 82 dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state); 83 84 dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr); 85 86 /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes 87 * lastly because vhost depends on this msg to judge if 88 * virtio is ready. 89 */ 90 file.index = queue_sel; 91 file.fd = dev->kickfds[queue_sel]; 92 dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file); 93 94 return 0; 95 } 96 97 static int 98 virtio_user_queue_setup(struct virtio_user_dev *dev, 99 int (*fn)(struct virtio_user_dev *, uint32_t)) 100 { 101 uint32_t i, queue_sel; 102 103 for (i = 0; i < dev->max_queue_pairs; ++i) { 104 queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX; 105 if (fn(dev, queue_sel) < 0) { 106 PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i); 107 return -1; 108 } 109 } 110 for (i = 0; i < dev->max_queue_pairs; ++i) { 111 queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX; 112 if (fn(dev, queue_sel) < 0) { 113 PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i); 114 return -1; 115 } 116 } 117 118 return 0; 119 } 120 121 int 122 virtio_user_dev_set_features(struct virtio_user_dev *dev) 123 { 124 uint64_t features; 125 int ret = -1; 126 127 pthread_mutex_lock(&dev->mutex); 128 129 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER && 130 dev->vhostfd < 0) 131 goto error; 132 133 /* Step 0: tell vhost to create queues */ 134 if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0) 135 goto error; 136 137 features = dev->features; 138 139 /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */ 140 features &= ~(1ull << VIRTIO_NET_F_MAC); 141 /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */ 142 features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ); 143 features &= ~(1ull << VIRTIO_NET_F_STATUS); 144 ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features); 145 if (ret < 0) 146 goto error; 147 PMD_DRV_LOG(INFO, "set features: %" PRIx64, features); 148 error: 149 pthread_mutex_unlock(&dev->mutex); 150 151 return ret; 152 } 153 154 int 155 virtio_user_start_device(struct virtio_user_dev *dev) 156 { 157 int ret; 158 159 /* 160 * XXX workaround! 161 * 162 * We need to make sure that the locks will be 163 * taken in the correct order to avoid deadlocks. 164 * 165 * Before releasing this lock, this thread should 166 * not trigger any memory hotplug events. 167 * 168 * This is a temporary workaround, and should be 169 * replaced when we get proper supports from the 170 * memory subsystem in the future. 171 */ 172 rte_mcfg_mem_read_lock(); 173 pthread_mutex_lock(&dev->mutex); 174 175 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER && 176 dev->vhostfd < 0) 177 goto error; 178 179 /* Step 2: share memory regions */ 180 ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL); 181 if (ret < 0) 182 goto error; 183 184 /* Step 3: kick queues */ 185 if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0) 186 goto error; 187 188 /* Step 4: enable queues 189 * we enable the 1st queue pair by default. 190 */ 191 dev->ops->enable_qp(dev, 0, 1); 192 193 dev->started = true; 194 pthread_mutex_unlock(&dev->mutex); 195 rte_mcfg_mem_read_unlock(); 196 197 return 0; 198 error: 199 pthread_mutex_unlock(&dev->mutex); 200 rte_mcfg_mem_read_unlock(); 201 /* TODO: free resource here or caller to check */ 202 return -1; 203 } 204 205 int virtio_user_stop_device(struct virtio_user_dev *dev) 206 { 207 struct vhost_vring_state state; 208 uint32_t i; 209 int error = 0; 210 211 pthread_mutex_lock(&dev->mutex); 212 if (!dev->started) 213 goto out; 214 215 for (i = 0; i < dev->max_queue_pairs; ++i) 216 dev->ops->enable_qp(dev, i, 0); 217 218 /* Stop the backend. */ 219 for (i = 0; i < dev->max_queue_pairs * 2; ++i) { 220 state.index = i; 221 if (dev->ops->send_request(dev, VHOST_USER_GET_VRING_BASE, 222 &state) < 0) { 223 PMD_DRV_LOG(ERR, "get_vring_base failed, index=%u\n", 224 i); 225 error = -1; 226 goto out; 227 } 228 } 229 230 dev->started = false; 231 out: 232 pthread_mutex_unlock(&dev->mutex); 233 234 return error; 235 } 236 237 static inline void 238 parse_mac(struct virtio_user_dev *dev, const char *mac) 239 { 240 struct rte_ether_addr tmp; 241 242 if (!mac) 243 return; 244 245 if (rte_ether_unformat_addr(mac, &tmp) == 0) { 246 memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN); 247 dev->mac_specified = 1; 248 } else { 249 /* ignore the wrong mac, use random mac */ 250 PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac); 251 } 252 } 253 254 static int 255 virtio_user_dev_init_notify(struct virtio_user_dev *dev) 256 { 257 uint32_t i, j; 258 int callfd; 259 int kickfd; 260 261 for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) { 262 if (i >= dev->max_queue_pairs * 2) { 263 dev->kickfds[i] = -1; 264 dev->callfds[i] = -1; 265 continue; 266 } 267 268 /* May use invalid flag, but some backend uses kickfd and 269 * callfd as criteria to judge if dev is alive. so finally we 270 * use real event_fd. 271 */ 272 callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); 273 if (callfd < 0) { 274 PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno)); 275 break; 276 } 277 kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); 278 if (kickfd < 0) { 279 close(callfd); 280 PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno)); 281 break; 282 } 283 dev->callfds[i] = callfd; 284 dev->kickfds[i] = kickfd; 285 } 286 287 if (i < VIRTIO_MAX_VIRTQUEUES) { 288 for (j = 0; j < i; ++j) { 289 close(dev->callfds[j]); 290 close(dev->kickfds[j]); 291 } 292 293 return -1; 294 } 295 296 return 0; 297 } 298 299 static int 300 virtio_user_fill_intr_handle(struct virtio_user_dev *dev) 301 { 302 uint32_t i; 303 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id]; 304 305 if (!eth_dev->intr_handle) { 306 eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle)); 307 if (!eth_dev->intr_handle) { 308 PMD_DRV_LOG(ERR, "fail to allocate intr_handle"); 309 return -1; 310 } 311 memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle)); 312 } 313 314 for (i = 0; i < dev->max_queue_pairs; ++i) 315 eth_dev->intr_handle->efds[i] = dev->callfds[i]; 316 eth_dev->intr_handle->nb_efd = dev->max_queue_pairs; 317 eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1; 318 eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV; 319 /* For virtio vdev, no need to read counter for clean */ 320 eth_dev->intr_handle->efd_counter_size = 0; 321 eth_dev->intr_handle->fd = -1; 322 if (dev->vhostfd >= 0) 323 eth_dev->intr_handle->fd = dev->vhostfd; 324 else if (dev->is_server) 325 eth_dev->intr_handle->fd = dev->listenfd; 326 327 return 0; 328 } 329 330 static void 331 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused, 332 const void *addr, 333 size_t len __rte_unused, 334 void *arg) 335 { 336 struct virtio_user_dev *dev = arg; 337 struct rte_memseg_list *msl; 338 uint16_t i; 339 340 /* ignore externally allocated memory */ 341 msl = rte_mem_virt2memseg_list(addr); 342 if (msl->external) 343 return; 344 345 pthread_mutex_lock(&dev->mutex); 346 347 if (dev->started == false) 348 goto exit; 349 350 /* Step 1: pause the active queues */ 351 for (i = 0; i < dev->queue_pairs; i++) 352 dev->ops->enable_qp(dev, i, 0); 353 354 /* Step 2: update memory regions */ 355 dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL); 356 357 /* Step 3: resume the active queues */ 358 for (i = 0; i < dev->queue_pairs; i++) 359 dev->ops->enable_qp(dev, i, 1); 360 361 exit: 362 pthread_mutex_unlock(&dev->mutex); 363 } 364 365 static int 366 virtio_user_dev_setup(struct virtio_user_dev *dev) 367 { 368 uint32_t q; 369 370 dev->vhostfd = -1; 371 dev->vhostfds = NULL; 372 dev->tapfds = NULL; 373 374 if (dev->is_server) { 375 if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) { 376 PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!"); 377 return -1; 378 } 379 dev->ops = &virtio_ops_user; 380 } else { 381 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) { 382 dev->ops = &virtio_ops_user; 383 } else if (dev->backend_type == 384 VIRTIO_USER_BACKEND_VHOST_KERNEL) { 385 dev->ops = &virtio_ops_kernel; 386 387 dev->vhostfds = malloc(dev->max_queue_pairs * 388 sizeof(int)); 389 dev->tapfds = malloc(dev->max_queue_pairs * 390 sizeof(int)); 391 if (!dev->vhostfds || !dev->tapfds) { 392 PMD_INIT_LOG(ERR, "Failed to malloc"); 393 return -1; 394 } 395 396 for (q = 0; q < dev->max_queue_pairs; ++q) { 397 dev->vhostfds[q] = -1; 398 dev->tapfds[q] = -1; 399 } 400 } else if (dev->backend_type == 401 VIRTIO_USER_BACKEND_VHOST_VDPA) { 402 dev->ops = &virtio_ops_vdpa; 403 } else { 404 PMD_DRV_LOG(ERR, "Unknown backend type"); 405 return -1; 406 } 407 } 408 409 if (dev->ops->setup(dev) < 0) 410 return -1; 411 412 if (virtio_user_dev_init_notify(dev) < 0) 413 return -1; 414 415 if (virtio_user_fill_intr_handle(dev) < 0) 416 return -1; 417 418 return 0; 419 } 420 421 /* Use below macro to filter features from vhost backend */ 422 #define VIRTIO_USER_SUPPORTED_FEATURES \ 423 (1ULL << VIRTIO_NET_F_MAC | \ 424 1ULL << VIRTIO_NET_F_STATUS | \ 425 1ULL << VIRTIO_NET_F_MQ | \ 426 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \ 427 1ULL << VIRTIO_NET_F_CTRL_VQ | \ 428 1ULL << VIRTIO_NET_F_CTRL_RX | \ 429 1ULL << VIRTIO_NET_F_CTRL_VLAN | \ 430 1ULL << VIRTIO_NET_F_CSUM | \ 431 1ULL << VIRTIO_NET_F_HOST_TSO4 | \ 432 1ULL << VIRTIO_NET_F_HOST_TSO6 | \ 433 1ULL << VIRTIO_NET_F_MRG_RXBUF | \ 434 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \ 435 1ULL << VIRTIO_NET_F_GUEST_CSUM | \ 436 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \ 437 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \ 438 1ULL << VIRTIO_F_IN_ORDER | \ 439 1ULL << VIRTIO_F_VERSION_1 | \ 440 1ULL << VIRTIO_F_RING_PACKED | \ 441 1ULL << VHOST_USER_F_PROTOCOL_FEATURES) 442 443 #define VHOST_USER_SUPPORTED_PROTOCOL_FEATURES \ 444 (1ULL << VHOST_USER_PROTOCOL_F_MQ | \ 445 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \ 446 1ULL << VHOST_USER_PROTOCOL_F_STATUS) 447 448 #define VHOST_VDPA_SUPPORTED_PROTOCOL_FEATURES \ 449 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | \ 450 1ULL << VHOST_BACKEND_F_IOTLB_BATCH) 451 int 452 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, 453 int cq, int queue_size, const char *mac, char **ifname, 454 int server, int mrg_rxbuf, int in_order, int packed_vq, 455 enum virtio_user_backend_type backend_type) 456 { 457 uint64_t protocol_features = 0; 458 459 pthread_mutex_init(&dev->mutex, NULL); 460 strlcpy(dev->path, path, PATH_MAX); 461 dev->started = 0; 462 dev->max_queue_pairs = queues; 463 dev->queue_pairs = 1; /* mq disabled by default */ 464 dev->queue_size = queue_size; 465 dev->is_server = server; 466 dev->mac_specified = 0; 467 dev->frontend_features = 0; 468 dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES; 469 dev->backend_type = backend_type; 470 471 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) 472 dev->protocol_features = VHOST_USER_SUPPORTED_PROTOCOL_FEATURES; 473 else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) 474 dev->protocol_features = VHOST_VDPA_SUPPORTED_PROTOCOL_FEATURES; 475 476 parse_mac(dev, mac); 477 478 if (*ifname) { 479 dev->ifname = *ifname; 480 *ifname = NULL; 481 } 482 483 if (virtio_user_dev_setup(dev) < 0) { 484 PMD_INIT_LOG(ERR, "backend set up fails"); 485 return -1; 486 } 487 488 if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) 489 dev->unsupported_features |= 490 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES); 491 492 if (!dev->is_server) { 493 if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, 494 NULL) < 0) { 495 PMD_INIT_LOG(ERR, "set_owner fails: %s", 496 strerror(errno)); 497 return -1; 498 } 499 500 if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, 501 &dev->device_features) < 0) { 502 PMD_INIT_LOG(ERR, "get_features failed: %s", 503 strerror(errno)); 504 return -1; 505 } 506 507 508 if ((dev->device_features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) || 509 (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA)) { 510 if (dev->ops->send_request(dev, 511 VHOST_USER_GET_PROTOCOL_FEATURES, 512 &protocol_features)) 513 return -1; 514 515 dev->protocol_features &= protocol_features; 516 517 if (dev->ops->send_request(dev, 518 VHOST_USER_SET_PROTOCOL_FEATURES, 519 &dev->protocol_features)) 520 return -1; 521 522 if (!(dev->protocol_features & 523 (1ULL << VHOST_USER_PROTOCOL_F_MQ))) 524 dev->unsupported_features |= 525 (1ull << VIRTIO_NET_F_MQ); 526 } 527 } else { 528 /* We just pretend vhost-user can support all these features. 529 * Note that this could be problematic that if some feature is 530 * negotiated but not supported by the vhost-user which comes 531 * later. 532 */ 533 dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES; 534 535 /* We cannot assume VHOST_USER_PROTOCOL_F_STATUS is supported 536 * until it's negotiated 537 */ 538 dev->protocol_features &= 539 ~(1ULL << VHOST_USER_PROTOCOL_F_STATUS); 540 } 541 542 543 544 if (!mrg_rxbuf) 545 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF); 546 547 if (!in_order) 548 dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER); 549 550 if (!packed_vq) 551 dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED); 552 553 if (dev->mac_specified) 554 dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC); 555 else 556 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC); 557 558 if (cq) { 559 /* device does not really need to know anything about CQ, 560 * so if necessary, we just claim to support CQ 561 */ 562 dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); 563 } else { 564 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); 565 /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */ 566 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX); 567 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN); 568 dev->unsupported_features |= 569 (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE); 570 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ); 571 dev->unsupported_features |= 572 (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR); 573 } 574 575 /* The backend will not report this feature, we add it explicitly */ 576 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) 577 dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS); 578 579 /* 580 * Device features = 581 * (frontend_features | backend_features) & ~unsupported_features; 582 */ 583 dev->device_features |= dev->frontend_features; 584 dev->device_features &= ~dev->unsupported_features; 585 586 if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME, 587 virtio_user_mem_event_cb, dev)) { 588 if (rte_errno != ENOTSUP) { 589 PMD_INIT_LOG(ERR, "Failed to register mem event" 590 " callback\n"); 591 return -1; 592 } 593 } 594 595 return 0; 596 } 597 598 void 599 virtio_user_dev_uninit(struct virtio_user_dev *dev) 600 { 601 uint32_t i; 602 603 virtio_user_stop_device(dev); 604 605 rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev); 606 607 for (i = 0; i < dev->max_queue_pairs * 2; ++i) { 608 close(dev->callfds[i]); 609 close(dev->kickfds[i]); 610 } 611 612 if (dev->vhostfd >= 0) 613 close(dev->vhostfd); 614 615 if (dev->is_server && dev->listenfd >= 0) { 616 close(dev->listenfd); 617 dev->listenfd = -1; 618 } 619 620 if (dev->vhostfds) { 621 for (i = 0; i < dev->max_queue_pairs; ++i) { 622 close(dev->vhostfds[i]); 623 if (dev->tapfds[i] >= 0) 624 close(dev->tapfds[i]); 625 } 626 free(dev->vhostfds); 627 free(dev->tapfds); 628 } 629 630 free(dev->ifname); 631 632 if (dev->is_server) 633 unlink(dev->path); 634 } 635 636 uint8_t 637 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs) 638 { 639 uint16_t i; 640 uint8_t ret = 0; 641 642 if (q_pairs > dev->max_queue_pairs) { 643 PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported", 644 q_pairs, dev->max_queue_pairs); 645 return -1; 646 } 647 648 /* Server mode can't enable queue pairs if vhostfd is invalid, 649 * always return 0 in this case. 650 */ 651 if (!dev->is_server || dev->vhostfd >= 0) { 652 for (i = 0; i < q_pairs; ++i) 653 ret |= dev->ops->enable_qp(dev, i, 1); 654 for (i = q_pairs; i < dev->max_queue_pairs; ++i) 655 ret |= dev->ops->enable_qp(dev, i, 0); 656 } 657 dev->queue_pairs = q_pairs; 658 659 return ret; 660 } 661 662 static uint32_t 663 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring, 664 uint16_t idx_hdr) 665 { 666 struct virtio_net_ctrl_hdr *hdr; 667 virtio_net_ctrl_ack status = ~0; 668 uint16_t i, idx_data, idx_status; 669 uint32_t n_descs = 0; 670 671 /* locate desc for header, data, and status */ 672 idx_data = vring->desc[idx_hdr].next; 673 n_descs++; 674 675 i = idx_data; 676 while (vring->desc[i].flags == VRING_DESC_F_NEXT) { 677 i = vring->desc[i].next; 678 n_descs++; 679 } 680 681 /* locate desc for status */ 682 idx_status = i; 683 n_descs++; 684 685 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; 686 if (hdr->class == VIRTIO_NET_CTRL_MQ && 687 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 688 uint16_t queues; 689 690 queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr; 691 status = virtio_user_handle_mq(dev, queues); 692 } else if (hdr->class == VIRTIO_NET_CTRL_RX || 693 hdr->class == VIRTIO_NET_CTRL_MAC || 694 hdr->class == VIRTIO_NET_CTRL_VLAN) { 695 status = 0; 696 } 697 698 /* Update status */ 699 *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status; 700 701 return n_descs; 702 } 703 704 static inline int 705 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) 706 { 707 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE); 708 709 return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) && 710 wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED); 711 } 712 713 static uint32_t 714 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev, 715 struct vring_packed *vring, 716 uint16_t idx_hdr) 717 { 718 struct virtio_net_ctrl_hdr *hdr; 719 virtio_net_ctrl_ack status = ~0; 720 uint16_t idx_data, idx_status; 721 /* initialize to one, header is first */ 722 uint32_t n_descs = 1; 723 724 /* locate desc for header, data, and status */ 725 idx_data = idx_hdr + 1; 726 if (idx_data >= dev->queue_size) 727 idx_data -= dev->queue_size; 728 729 n_descs++; 730 731 idx_status = idx_data; 732 while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) { 733 idx_status++; 734 if (idx_status >= dev->queue_size) 735 idx_status -= dev->queue_size; 736 n_descs++; 737 } 738 739 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; 740 if (hdr->class == VIRTIO_NET_CTRL_MQ && 741 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 742 uint16_t queues; 743 744 queues = *(uint16_t *)(uintptr_t) 745 vring->desc[idx_data].addr; 746 status = virtio_user_handle_mq(dev, queues); 747 } else if (hdr->class == VIRTIO_NET_CTRL_RX || 748 hdr->class == VIRTIO_NET_CTRL_MAC || 749 hdr->class == VIRTIO_NET_CTRL_VLAN) { 750 status = 0; 751 } 752 753 /* Update status */ 754 *(virtio_net_ctrl_ack *)(uintptr_t) 755 vring->desc[idx_status].addr = status; 756 757 /* Update used descriptor */ 758 vring->desc[idx_hdr].id = vring->desc[idx_status].id; 759 vring->desc[idx_hdr].len = sizeof(status); 760 761 return n_descs; 762 } 763 764 void 765 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx) 766 { 767 struct virtio_user_queue *vq = &dev->packed_queues[queue_idx]; 768 struct vring_packed *vring = &dev->packed_vrings[queue_idx]; 769 uint16_t n_descs, flags; 770 771 /* Perform a load-acquire barrier in desc_is_avail to 772 * enforce the ordering between desc flags and desc 773 * content. 774 */ 775 while (desc_is_avail(&vring->desc[vq->used_idx], 776 vq->used_wrap_counter)) { 777 778 n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring, 779 vq->used_idx); 780 781 flags = VRING_DESC_F_WRITE; 782 if (vq->used_wrap_counter) 783 flags |= VRING_PACKED_DESC_F_AVAIL_USED; 784 785 __atomic_store_n(&vring->desc[vq->used_idx].flags, flags, 786 __ATOMIC_RELEASE); 787 788 vq->used_idx += n_descs; 789 if (vq->used_idx >= dev->queue_size) { 790 vq->used_idx -= dev->queue_size; 791 vq->used_wrap_counter ^= 1; 792 } 793 } 794 } 795 796 void 797 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx) 798 { 799 uint16_t avail_idx, desc_idx; 800 struct vring_used_elem *uep; 801 uint32_t n_descs; 802 struct vring *vring = &dev->vrings[queue_idx]; 803 804 /* Consume avail ring, using used ring idx as first one */ 805 while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) 806 != vring->avail->idx) { 807 avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) 808 & (vring->num - 1); 809 desc_idx = vring->avail->ring[avail_idx]; 810 811 n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx); 812 813 /* Update used ring */ 814 uep = &vring->used->ring[avail_idx]; 815 uep->id = desc_idx; 816 uep->len = n_descs; 817 818 __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED); 819 } 820 } 821 822 int 823 virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status) 824 { 825 int ret; 826 uint64_t arg = status; 827 828 pthread_mutex_lock(&dev->mutex); 829 dev->status = status; 830 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) 831 ret = dev->ops->send_request(dev, 832 VHOST_USER_SET_STATUS, &arg); 833 else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) 834 ret = dev->ops->send_request(dev, 835 VHOST_USER_SET_STATUS, &status); 836 else 837 ret = -ENOTSUP; 838 839 if (ret && ret != -ENOTSUP) { 840 PMD_INIT_LOG(ERR, "VHOST_USER_SET_STATUS failed (%d): %s", ret, 841 strerror(errno)); 842 } 843 844 pthread_mutex_unlock(&dev->mutex); 845 return ret; 846 } 847 848 int 849 virtio_user_dev_update_status(struct virtio_user_dev *dev) 850 { 851 uint64_t ret; 852 uint8_t status; 853 int err; 854 855 pthread_mutex_lock(&dev->mutex); 856 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) { 857 err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS, &ret); 858 if (!err && ret > UINT8_MAX) { 859 PMD_INIT_LOG(ERR, "Invalid VHOST_USER_GET_STATUS " 860 "response 0x%" PRIx64 "\n", ret); 861 err = -1; 862 goto error; 863 } 864 865 status = ret; 866 } else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) { 867 err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS, 868 &status); 869 } else { 870 err = -ENOTSUP; 871 } 872 873 if (!err) { 874 dev->status = status; 875 PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n" 876 "\t-RESET: %u\n" 877 "\t-ACKNOWLEDGE: %u\n" 878 "\t-DRIVER: %u\n" 879 "\t-DRIVER_OK: %u\n" 880 "\t-FEATURES_OK: %u\n" 881 "\t-DEVICE_NEED_RESET: %u\n" 882 "\t-FAILED: %u\n", 883 dev->status, 884 (dev->status == VIRTIO_CONFIG_STATUS_RESET), 885 !!(dev->status & VIRTIO_CONFIG_STATUS_ACK), 886 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER), 887 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK), 888 !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK), 889 !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET), 890 !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED)); 891 } else if (err != -ENOTSUP) { 892 PMD_INIT_LOG(ERR, "VHOST_USER_GET_STATUS failed (%d): %s", err, 893 strerror(errno)); 894 } 895 896 error: 897 pthread_mutex_unlock(&dev->mutex); 898 return err; 899 } 900