1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdio.h> 7 #include <fcntl.h> 8 #include <string.h> 9 #include <errno.h> 10 #include <sys/mman.h> 11 #include <unistd.h> 12 #include <sys/eventfd.h> 13 #include <sys/types.h> 14 #include <sys/stat.h> 15 16 #include <rte_string_fns.h> 17 #include <rte_eal_memconfig.h> 18 19 #include "vhost.h" 20 #include "virtio_user_dev.h" 21 #include "../virtio_ethdev.h" 22 23 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb" 24 25 static int 26 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel) 27 { 28 /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come 29 * firstly because vhost depends on this msg to allocate virtqueue 30 * pair. 31 */ 32 struct vhost_vring_file file; 33 34 file.index = queue_sel; 35 file.fd = dev->callfds[queue_sel]; 36 dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file); 37 38 return 0; 39 } 40 41 static int 42 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel) 43 { 44 struct vhost_vring_file file; 45 struct vhost_vring_state state; 46 struct vring *vring = &dev->vrings[queue_sel]; 47 struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel]; 48 struct vhost_vring_addr addr = { 49 .index = queue_sel, 50 .log_guest_addr = 0, 51 .flags = 0, /* disable log */ 52 }; 53 54 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) { 55 addr.desc_user_addr = 56 (uint64_t)(uintptr_t)pq_vring->desc; 57 addr.avail_user_addr = 58 (uint64_t)(uintptr_t)pq_vring->driver; 59 addr.used_user_addr = 60 (uint64_t)(uintptr_t)pq_vring->device; 61 } else { 62 addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc; 63 addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail; 64 addr.used_user_addr = (uint64_t)(uintptr_t)vring->used; 65 } 66 67 state.index = queue_sel; 68 state.num = vring->num; 69 dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state); 70 71 state.index = queue_sel; 72 state.num = 0; /* no reservation */ 73 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) 74 state.num |= (1 << 15); 75 dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state); 76 77 dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr); 78 79 /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes 80 * lastly because vhost depends on this msg to judge if 81 * virtio is ready. 82 */ 83 file.index = queue_sel; 84 file.fd = dev->kickfds[queue_sel]; 85 dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file); 86 87 return 0; 88 } 89 90 static int 91 virtio_user_queue_setup(struct virtio_user_dev *dev, 92 int (*fn)(struct virtio_user_dev *, uint32_t)) 93 { 94 uint32_t i, queue_sel; 95 96 for (i = 0; i < dev->max_queue_pairs; ++i) { 97 queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX; 98 if (fn(dev, queue_sel) < 0) { 99 PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i); 100 return -1; 101 } 102 } 103 for (i = 0; i < dev->max_queue_pairs; ++i) { 104 queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX; 105 if (fn(dev, queue_sel) < 0) { 106 PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i); 107 return -1; 108 } 109 } 110 111 return 0; 112 } 113 114 int 115 virtio_user_dev_set_features(struct virtio_user_dev *dev) 116 { 117 uint64_t features; 118 int ret = -1; 119 120 pthread_mutex_lock(&dev->mutex); 121 122 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER && 123 dev->vhostfd < 0) 124 goto error; 125 126 /* Step 0: tell vhost to create queues */ 127 if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0) 128 goto error; 129 130 features = dev->features; 131 132 /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */ 133 features &= ~(1ull << VIRTIO_NET_F_MAC); 134 /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */ 135 features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ); 136 features &= ~(1ull << VIRTIO_NET_F_STATUS); 137 ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features); 138 if (ret < 0) 139 goto error; 140 PMD_DRV_LOG(INFO, "set features: %" PRIx64, features); 141 error: 142 pthread_mutex_unlock(&dev->mutex); 143 144 return ret; 145 } 146 147 int 148 virtio_user_start_device(struct virtio_user_dev *dev) 149 { 150 int ret; 151 152 /* 153 * XXX workaround! 154 * 155 * We need to make sure that the locks will be 156 * taken in the correct order to avoid deadlocks. 157 * 158 * Before releasing this lock, this thread should 159 * not trigger any memory hotplug events. 160 * 161 * This is a temporary workaround, and should be 162 * replaced when we get proper supports from the 163 * memory subsystem in the future. 164 */ 165 rte_mcfg_mem_read_lock(); 166 pthread_mutex_lock(&dev->mutex); 167 168 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER && 169 dev->vhostfd < 0) 170 goto error; 171 172 /* Step 2: share memory regions */ 173 ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL); 174 if (ret < 0) 175 goto error; 176 177 /* Step 3: kick queues */ 178 if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0) 179 goto error; 180 181 /* Step 4: enable queues 182 * we enable the 1st queue pair by default. 183 */ 184 dev->ops->enable_qp(dev, 0, 1); 185 186 dev->started = true; 187 pthread_mutex_unlock(&dev->mutex); 188 rte_mcfg_mem_read_unlock(); 189 190 return 0; 191 error: 192 pthread_mutex_unlock(&dev->mutex); 193 rte_mcfg_mem_read_unlock(); 194 /* TODO: free resource here or caller to check */ 195 return -1; 196 } 197 198 int virtio_user_stop_device(struct virtio_user_dev *dev) 199 { 200 struct vhost_vring_state state; 201 uint32_t i; 202 int error = 0; 203 204 pthread_mutex_lock(&dev->mutex); 205 if (!dev->started) 206 goto out; 207 208 for (i = 0; i < dev->max_queue_pairs; ++i) 209 dev->ops->enable_qp(dev, i, 0); 210 211 /* Stop the backend. */ 212 for (i = 0; i < dev->max_queue_pairs * 2; ++i) { 213 state.index = i; 214 if (dev->ops->send_request(dev, VHOST_USER_GET_VRING_BASE, 215 &state) < 0) { 216 PMD_DRV_LOG(ERR, "get_vring_base failed, index=%u\n", 217 i); 218 error = -1; 219 goto out; 220 } 221 } 222 223 dev->started = false; 224 out: 225 pthread_mutex_unlock(&dev->mutex); 226 227 return error; 228 } 229 230 static inline void 231 parse_mac(struct virtio_user_dev *dev, const char *mac) 232 { 233 struct rte_ether_addr tmp; 234 235 if (!mac) 236 return; 237 238 if (rte_ether_unformat_addr(mac, &tmp) == 0) { 239 memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN); 240 dev->mac_specified = 1; 241 } else { 242 /* ignore the wrong mac, use random mac */ 243 PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac); 244 } 245 } 246 247 static int 248 virtio_user_dev_init_notify(struct virtio_user_dev *dev) 249 { 250 uint32_t i, j; 251 int callfd; 252 int kickfd; 253 254 for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) { 255 if (i >= dev->max_queue_pairs * 2) { 256 dev->kickfds[i] = -1; 257 dev->callfds[i] = -1; 258 continue; 259 } 260 261 /* May use invalid flag, but some backend uses kickfd and 262 * callfd as criteria to judge if dev is alive. so finally we 263 * use real event_fd. 264 */ 265 callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); 266 if (callfd < 0) { 267 PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno)); 268 break; 269 } 270 kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); 271 if (kickfd < 0) { 272 PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno)); 273 break; 274 } 275 dev->callfds[i] = callfd; 276 dev->kickfds[i] = kickfd; 277 } 278 279 if (i < VIRTIO_MAX_VIRTQUEUES) { 280 for (j = 0; j <= i; ++j) { 281 close(dev->callfds[j]); 282 close(dev->kickfds[j]); 283 } 284 285 return -1; 286 } 287 288 return 0; 289 } 290 291 static int 292 virtio_user_fill_intr_handle(struct virtio_user_dev *dev) 293 { 294 uint32_t i; 295 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id]; 296 297 if (!eth_dev->intr_handle) { 298 eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle)); 299 if (!eth_dev->intr_handle) { 300 PMD_DRV_LOG(ERR, "fail to allocate intr_handle"); 301 return -1; 302 } 303 memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle)); 304 } 305 306 for (i = 0; i < dev->max_queue_pairs; ++i) 307 eth_dev->intr_handle->efds[i] = dev->callfds[i]; 308 eth_dev->intr_handle->nb_efd = dev->max_queue_pairs; 309 eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1; 310 eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV; 311 /* For virtio vdev, no need to read counter for clean */ 312 eth_dev->intr_handle->efd_counter_size = 0; 313 eth_dev->intr_handle->fd = -1; 314 if (dev->vhostfd >= 0) 315 eth_dev->intr_handle->fd = dev->vhostfd; 316 else if (dev->is_server) 317 eth_dev->intr_handle->fd = dev->listenfd; 318 319 return 0; 320 } 321 322 static void 323 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused, 324 const void *addr, 325 size_t len __rte_unused, 326 void *arg) 327 { 328 struct virtio_user_dev *dev = arg; 329 struct rte_memseg_list *msl; 330 uint16_t i; 331 332 /* ignore externally allocated memory */ 333 msl = rte_mem_virt2memseg_list(addr); 334 if (msl->external) 335 return; 336 337 pthread_mutex_lock(&dev->mutex); 338 339 if (dev->started == false) 340 goto exit; 341 342 /* Step 1: pause the active queues */ 343 for (i = 0; i < dev->queue_pairs; i++) 344 dev->ops->enable_qp(dev, i, 0); 345 346 /* Step 2: update memory regions */ 347 dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL); 348 349 /* Step 3: resume the active queues */ 350 for (i = 0; i < dev->queue_pairs; i++) 351 dev->ops->enable_qp(dev, i, 1); 352 353 exit: 354 pthread_mutex_unlock(&dev->mutex); 355 } 356 357 static int 358 virtio_user_dev_setup(struct virtio_user_dev *dev) 359 { 360 uint32_t q; 361 362 dev->vhostfd = -1; 363 dev->vhostfds = NULL; 364 dev->tapfds = NULL; 365 366 if (dev->is_server) { 367 if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) { 368 PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!"); 369 return -1; 370 } 371 dev->ops = &virtio_ops_user; 372 } else { 373 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) { 374 dev->ops = &virtio_ops_user; 375 } else if (dev->backend_type == 376 VIRTIO_USER_BACKEND_VHOST_KERNEL) { 377 dev->ops = &virtio_ops_kernel; 378 379 dev->vhostfds = malloc(dev->max_queue_pairs * 380 sizeof(int)); 381 dev->tapfds = malloc(dev->max_queue_pairs * 382 sizeof(int)); 383 if (!dev->vhostfds || !dev->tapfds) { 384 PMD_INIT_LOG(ERR, "Failed to malloc"); 385 return -1; 386 } 387 388 for (q = 0; q < dev->max_queue_pairs; ++q) { 389 dev->vhostfds[q] = -1; 390 dev->tapfds[q] = -1; 391 } 392 } else if (dev->backend_type == 393 VIRTIO_USER_BACKEND_VHOST_VDPA) { 394 dev->ops = &virtio_ops_vdpa; 395 } else { 396 PMD_DRV_LOG(ERR, "Unknown backend type"); 397 return -1; 398 } 399 } 400 401 if (dev->ops->setup(dev) < 0) 402 return -1; 403 404 if (virtio_user_dev_init_notify(dev) < 0) 405 return -1; 406 407 if (virtio_user_fill_intr_handle(dev) < 0) 408 return -1; 409 410 return 0; 411 } 412 413 /* Use below macro to filter features from vhost backend */ 414 #define VIRTIO_USER_SUPPORTED_FEATURES \ 415 (1ULL << VIRTIO_NET_F_MAC | \ 416 1ULL << VIRTIO_NET_F_STATUS | \ 417 1ULL << VIRTIO_NET_F_MQ | \ 418 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \ 419 1ULL << VIRTIO_NET_F_CTRL_VQ | \ 420 1ULL << VIRTIO_NET_F_CTRL_RX | \ 421 1ULL << VIRTIO_NET_F_CTRL_VLAN | \ 422 1ULL << VIRTIO_NET_F_CSUM | \ 423 1ULL << VIRTIO_NET_F_HOST_TSO4 | \ 424 1ULL << VIRTIO_NET_F_HOST_TSO6 | \ 425 1ULL << VIRTIO_NET_F_MRG_RXBUF | \ 426 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \ 427 1ULL << VIRTIO_NET_F_GUEST_CSUM | \ 428 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \ 429 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \ 430 1ULL << VIRTIO_F_IN_ORDER | \ 431 1ULL << VIRTIO_F_VERSION_1 | \ 432 1ULL << VIRTIO_F_RING_PACKED | \ 433 1ULL << VHOST_USER_F_PROTOCOL_FEATURES) 434 435 #define VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES \ 436 (1ULL << VHOST_USER_PROTOCOL_F_MQ | \ 437 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \ 438 1ULL << VHOST_USER_PROTOCOL_F_STATUS) 439 440 int 441 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, 442 int cq, int queue_size, const char *mac, char **ifname, 443 int server, int mrg_rxbuf, int in_order, int packed_vq, 444 enum virtio_user_backend_type backend_type) 445 { 446 uint64_t protocol_features = 0; 447 448 pthread_mutex_init(&dev->mutex, NULL); 449 strlcpy(dev->path, path, PATH_MAX); 450 dev->started = 0; 451 dev->max_queue_pairs = queues; 452 dev->queue_pairs = 1; /* mq disabled by default */ 453 dev->queue_size = queue_size; 454 dev->is_server = server; 455 dev->mac_specified = 0; 456 dev->frontend_features = 0; 457 dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES; 458 dev->protocol_features = VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES; 459 dev->backend_type = backend_type; 460 461 parse_mac(dev, mac); 462 463 if (*ifname) { 464 dev->ifname = *ifname; 465 *ifname = NULL; 466 } 467 468 if (virtio_user_dev_setup(dev) < 0) { 469 PMD_INIT_LOG(ERR, "backend set up fails"); 470 return -1; 471 } 472 473 if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) 474 dev->unsupported_features |= 475 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES); 476 477 if (!dev->is_server) { 478 if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, 479 NULL) < 0) { 480 PMD_INIT_LOG(ERR, "set_owner fails: %s", 481 strerror(errno)); 482 return -1; 483 } 484 485 if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, 486 &dev->device_features) < 0) { 487 PMD_INIT_LOG(ERR, "get_features failed: %s", 488 strerror(errno)); 489 return -1; 490 } 491 492 493 if (dev->device_features & 494 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) { 495 if (dev->ops->send_request(dev, 496 VHOST_USER_GET_PROTOCOL_FEATURES, 497 &protocol_features)) 498 return -1; 499 500 dev->protocol_features &= protocol_features; 501 502 if (dev->ops->send_request(dev, 503 VHOST_USER_SET_PROTOCOL_FEATURES, 504 &dev->protocol_features)) 505 return -1; 506 507 if (!(dev->protocol_features & 508 (1ULL << VHOST_USER_PROTOCOL_F_MQ))) 509 dev->unsupported_features |= 510 (1ull << VIRTIO_NET_F_MQ); 511 } 512 } else { 513 /* We just pretend vhost-user can support all these features. 514 * Note that this could be problematic that if some feature is 515 * negotiated but not supported by the vhost-user which comes 516 * later. 517 */ 518 dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES; 519 } 520 521 522 523 if (!mrg_rxbuf) 524 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF); 525 526 if (!in_order) 527 dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER); 528 529 if (!packed_vq) 530 dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED); 531 532 if (dev->mac_specified) 533 dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC); 534 else 535 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC); 536 537 if (cq) { 538 /* device does not really need to know anything about CQ, 539 * so if necessary, we just claim to support CQ 540 */ 541 dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); 542 } else { 543 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); 544 /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */ 545 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX); 546 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN); 547 dev->unsupported_features |= 548 (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE); 549 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ); 550 dev->unsupported_features |= 551 (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR); 552 } 553 554 /* The backend will not report this feature, we add it explicitly */ 555 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) 556 dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS); 557 558 /* 559 * Device features = 560 * (frontend_features | backend_features) & ~unsupported_features; 561 */ 562 dev->device_features |= dev->frontend_features; 563 dev->device_features &= ~dev->unsupported_features; 564 565 if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME, 566 virtio_user_mem_event_cb, dev)) { 567 if (rte_errno != ENOTSUP) { 568 PMD_INIT_LOG(ERR, "Failed to register mem event" 569 " callback\n"); 570 return -1; 571 } 572 } 573 574 return 0; 575 } 576 577 void 578 virtio_user_dev_uninit(struct virtio_user_dev *dev) 579 { 580 uint32_t i; 581 582 virtio_user_stop_device(dev); 583 584 rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev); 585 586 for (i = 0; i < dev->max_queue_pairs * 2; ++i) { 587 close(dev->callfds[i]); 588 close(dev->kickfds[i]); 589 } 590 591 if (dev->vhostfd >= 0) 592 close(dev->vhostfd); 593 594 if (dev->is_server && dev->listenfd >= 0) { 595 close(dev->listenfd); 596 dev->listenfd = -1; 597 } 598 599 if (dev->vhostfds) { 600 for (i = 0; i < dev->max_queue_pairs; ++i) { 601 close(dev->vhostfds[i]); 602 if (dev->tapfds[i] >= 0) 603 close(dev->tapfds[i]); 604 } 605 free(dev->vhostfds); 606 free(dev->tapfds); 607 } 608 609 free(dev->ifname); 610 611 if (dev->is_server) 612 unlink(dev->path); 613 } 614 615 uint8_t 616 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs) 617 { 618 uint16_t i; 619 uint8_t ret = 0; 620 621 if (q_pairs > dev->max_queue_pairs) { 622 PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported", 623 q_pairs, dev->max_queue_pairs); 624 return -1; 625 } 626 627 /* Server mode can't enable queue pairs if vhostfd is invalid, 628 * always return 0 in this case. 629 */ 630 if (!dev->is_server || dev->vhostfd >= 0) { 631 for (i = 0; i < q_pairs; ++i) 632 ret |= dev->ops->enable_qp(dev, i, 1); 633 for (i = q_pairs; i < dev->max_queue_pairs; ++i) 634 ret |= dev->ops->enable_qp(dev, i, 0); 635 } 636 dev->queue_pairs = q_pairs; 637 638 return ret; 639 } 640 641 static uint32_t 642 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring, 643 uint16_t idx_hdr) 644 { 645 struct virtio_net_ctrl_hdr *hdr; 646 virtio_net_ctrl_ack status = ~0; 647 uint16_t i, idx_data, idx_status; 648 uint32_t n_descs = 0; 649 650 /* locate desc for header, data, and status */ 651 idx_data = vring->desc[idx_hdr].next; 652 n_descs++; 653 654 i = idx_data; 655 while (vring->desc[i].flags == VRING_DESC_F_NEXT) { 656 i = vring->desc[i].next; 657 n_descs++; 658 } 659 660 /* locate desc for status */ 661 idx_status = i; 662 n_descs++; 663 664 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; 665 if (hdr->class == VIRTIO_NET_CTRL_MQ && 666 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 667 uint16_t queues; 668 669 queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr; 670 status = virtio_user_handle_mq(dev, queues); 671 } else if (hdr->class == VIRTIO_NET_CTRL_RX || 672 hdr->class == VIRTIO_NET_CTRL_MAC || 673 hdr->class == VIRTIO_NET_CTRL_VLAN) { 674 status = 0; 675 } 676 677 /* Update status */ 678 *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status; 679 680 return n_descs; 681 } 682 683 static inline int 684 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) 685 { 686 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE); 687 688 return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) && 689 wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED); 690 } 691 692 static uint32_t 693 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev, 694 struct vring_packed *vring, 695 uint16_t idx_hdr) 696 { 697 struct virtio_net_ctrl_hdr *hdr; 698 virtio_net_ctrl_ack status = ~0; 699 uint16_t idx_data, idx_status; 700 /* initialize to one, header is first */ 701 uint32_t n_descs = 1; 702 703 /* locate desc for header, data, and status */ 704 idx_data = idx_hdr + 1; 705 if (idx_data >= dev->queue_size) 706 idx_data -= dev->queue_size; 707 708 n_descs++; 709 710 idx_status = idx_data; 711 while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) { 712 idx_status++; 713 if (idx_status >= dev->queue_size) 714 idx_status -= dev->queue_size; 715 n_descs++; 716 } 717 718 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; 719 if (hdr->class == VIRTIO_NET_CTRL_MQ && 720 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 721 uint16_t queues; 722 723 queues = *(uint16_t *)(uintptr_t) 724 vring->desc[idx_data].addr; 725 status = virtio_user_handle_mq(dev, queues); 726 } else if (hdr->class == VIRTIO_NET_CTRL_RX || 727 hdr->class == VIRTIO_NET_CTRL_MAC || 728 hdr->class == VIRTIO_NET_CTRL_VLAN) { 729 status = 0; 730 } 731 732 /* Update status */ 733 *(virtio_net_ctrl_ack *)(uintptr_t) 734 vring->desc[idx_status].addr = status; 735 736 /* Update used descriptor */ 737 vring->desc[idx_hdr].id = vring->desc[idx_status].id; 738 vring->desc[idx_hdr].len = sizeof(status); 739 740 return n_descs; 741 } 742 743 void 744 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx) 745 { 746 struct virtio_user_queue *vq = &dev->packed_queues[queue_idx]; 747 struct vring_packed *vring = &dev->packed_vrings[queue_idx]; 748 uint16_t n_descs, flags; 749 750 /* Perform a load-acquire barrier in desc_is_avail to 751 * enforce the ordering between desc flags and desc 752 * content. 753 */ 754 while (desc_is_avail(&vring->desc[vq->used_idx], 755 vq->used_wrap_counter)) { 756 757 n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring, 758 vq->used_idx); 759 760 flags = VRING_DESC_F_WRITE; 761 if (vq->used_wrap_counter) 762 flags |= VRING_PACKED_DESC_F_AVAIL_USED; 763 764 __atomic_store_n(&vring->desc[vq->used_idx].flags, flags, 765 __ATOMIC_RELEASE); 766 767 vq->used_idx += n_descs; 768 if (vq->used_idx >= dev->queue_size) { 769 vq->used_idx -= dev->queue_size; 770 vq->used_wrap_counter ^= 1; 771 } 772 } 773 } 774 775 void 776 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx) 777 { 778 uint16_t avail_idx, desc_idx; 779 struct vring_used_elem *uep; 780 uint32_t n_descs; 781 struct vring *vring = &dev->vrings[queue_idx]; 782 783 /* Consume avail ring, using used ring idx as first one */ 784 while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) 785 != vring->avail->idx) { 786 avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) 787 & (vring->num - 1); 788 desc_idx = vring->avail->ring[avail_idx]; 789 790 n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx); 791 792 /* Update used ring */ 793 uep = &vring->used->ring[avail_idx]; 794 uep->id = desc_idx; 795 uep->len = n_descs; 796 797 __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED); 798 } 799 } 800 801 int 802 virtio_user_send_status_update(struct virtio_user_dev *dev, uint8_t status) 803 { 804 int ret; 805 uint64_t arg = status; 806 807 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) 808 ret = dev->ops->send_request(dev, 809 VHOST_USER_SET_STATUS, &arg); 810 else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) 811 ret = dev->ops->send_request(dev, 812 VHOST_USER_SET_STATUS, &status); 813 else 814 return 0; 815 816 if (ret) { 817 PMD_INIT_LOG(ERR, "VHOST_USER_SET_STATUS failed (%d): %s", ret, 818 strerror(errno)); 819 return -1; 820 } 821 822 return 0; 823 } 824 825 int 826 virtio_user_update_status(struct virtio_user_dev *dev) 827 { 828 uint64_t ret; 829 uint8_t status; 830 int err; 831 832 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) { 833 err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS, &ret); 834 if (!err && ret > UINT8_MAX) { 835 PMD_INIT_LOG(ERR, "Invalid VHOST_USER_GET_STATUS " 836 "response 0x%" PRIx64 "\n", ret); 837 return -1; 838 } 839 840 status = ret; 841 } else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) { 842 err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS, 843 &status); 844 } else { 845 return 0; 846 } 847 848 if (err) { 849 PMD_INIT_LOG(ERR, "VHOST_USER_GET_STATUS failed (%d): %s", err, 850 strerror(errno)); 851 return -1; 852 } 853 854 dev->status = status; 855 PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n" 856 "\t-RESET: %u\n" 857 "\t-ACKNOWLEDGE: %u\n" 858 "\t-DRIVER: %u\n" 859 "\t-DRIVER_OK: %u\n" 860 "\t-FEATURES_OK: %u\n" 861 "\t-DEVICE_NEED_RESET: %u\n" 862 "\t-FAILED: %u\n", 863 dev->status, 864 (dev->status == VIRTIO_CONFIG_STATUS_RESET), 865 !!(dev->status & VIRTIO_CONFIG_STATUS_ACK), 866 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER), 867 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK), 868 !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK), 869 !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET), 870 !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED)); 871 return 0; 872 } 873