1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdio.h> 7 #include <fcntl.h> 8 #include <string.h> 9 #include <errno.h> 10 #include <sys/mman.h> 11 #include <unistd.h> 12 #include <sys/eventfd.h> 13 #include <sys/types.h> 14 #include <sys/stat.h> 15 16 #include <rte_string_fns.h> 17 #include <rte_eal_memconfig.h> 18 19 #include "vhost.h" 20 #include "virtio_user_dev.h" 21 #include "../virtio_ethdev.h" 22 23 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb" 24 25 static int 26 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel) 27 { 28 /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come 29 * firstly because vhost depends on this msg to allocate virtqueue 30 * pair. 31 */ 32 struct vhost_vring_file file; 33 34 file.index = queue_sel; 35 file.fd = dev->callfds[queue_sel]; 36 dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file); 37 38 return 0; 39 } 40 41 static int 42 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel) 43 { 44 struct vhost_vring_file file; 45 struct vhost_vring_state state; 46 struct vring *vring = &dev->vrings[queue_sel]; 47 struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel]; 48 struct vhost_vring_addr addr = { 49 .index = queue_sel, 50 .log_guest_addr = 0, 51 .flags = 0, /* disable log */ 52 }; 53 54 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) { 55 addr.desc_user_addr = 56 (uint64_t)(uintptr_t)pq_vring->desc; 57 addr.avail_user_addr = 58 (uint64_t)(uintptr_t)pq_vring->driver; 59 addr.used_user_addr = 60 (uint64_t)(uintptr_t)pq_vring->device; 61 } else { 62 addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc; 63 addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail; 64 addr.used_user_addr = (uint64_t)(uintptr_t)vring->used; 65 } 66 67 state.index = queue_sel; 68 state.num = vring->num; 69 dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state); 70 71 state.index = queue_sel; 72 state.num = 0; /* no reservation */ 73 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) 74 state.num |= (1 << 15); 75 dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state); 76 77 dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr); 78 79 /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes 80 * lastly because vhost depends on this msg to judge if 81 * virtio is ready. 82 */ 83 file.index = queue_sel; 84 file.fd = dev->kickfds[queue_sel]; 85 dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file); 86 87 return 0; 88 } 89 90 static int 91 virtio_user_queue_setup(struct virtio_user_dev *dev, 92 int (*fn)(struct virtio_user_dev *, uint32_t)) 93 { 94 uint32_t i, queue_sel; 95 96 for (i = 0; i < dev->max_queue_pairs; ++i) { 97 queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX; 98 if (fn(dev, queue_sel) < 0) { 99 PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i); 100 return -1; 101 } 102 } 103 for (i = 0; i < dev->max_queue_pairs; ++i) { 104 queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX; 105 if (fn(dev, queue_sel) < 0) { 106 PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i); 107 return -1; 108 } 109 } 110 111 return 0; 112 } 113 114 int 115 is_vhost_user_by_type(const char *path) 116 { 117 struct stat sb; 118 119 if (stat(path, &sb) == -1) 120 return 0; 121 122 return S_ISSOCK(sb.st_mode); 123 } 124 125 int 126 virtio_user_start_device(struct virtio_user_dev *dev) 127 { 128 uint64_t features; 129 int ret; 130 131 /* 132 * XXX workaround! 133 * 134 * We need to make sure that the locks will be 135 * taken in the correct order to avoid deadlocks. 136 * 137 * Before releasing this lock, this thread should 138 * not trigger any memory hotplug events. 139 * 140 * This is a temporary workaround, and should be 141 * replaced when we get proper supports from the 142 * memory subsystem in the future. 143 */ 144 rte_mcfg_mem_read_lock(); 145 pthread_mutex_lock(&dev->mutex); 146 147 if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0) 148 goto error; 149 150 /* Step 0: tell vhost to create queues */ 151 if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0) 152 goto error; 153 154 /* Step 1: negotiate protocol features & set features */ 155 features = dev->features; 156 157 158 /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */ 159 features &= ~(1ull << VIRTIO_NET_F_MAC); 160 /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */ 161 features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ); 162 features &= ~(1ull << VIRTIO_NET_F_STATUS); 163 ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features); 164 if (ret < 0) 165 goto error; 166 PMD_DRV_LOG(INFO, "set features: %" PRIx64, features); 167 168 /* Step 2: share memory regions */ 169 ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL); 170 if (ret < 0) 171 goto error; 172 173 /* Step 3: kick queues */ 174 if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0) 175 goto error; 176 177 /* Step 4: enable queues 178 * we enable the 1st queue pair by default. 179 */ 180 dev->ops->enable_qp(dev, 0, 1); 181 182 dev->started = true; 183 pthread_mutex_unlock(&dev->mutex); 184 rte_mcfg_mem_read_unlock(); 185 186 return 0; 187 error: 188 pthread_mutex_unlock(&dev->mutex); 189 rte_mcfg_mem_read_unlock(); 190 /* TODO: free resource here or caller to check */ 191 return -1; 192 } 193 194 int virtio_user_stop_device(struct virtio_user_dev *dev) 195 { 196 struct vhost_vring_state state; 197 uint32_t i; 198 int error = 0; 199 200 pthread_mutex_lock(&dev->mutex); 201 if (!dev->started) 202 goto out; 203 204 for (i = 0; i < dev->max_queue_pairs; ++i) 205 dev->ops->enable_qp(dev, i, 0); 206 207 /* Stop the backend. */ 208 for (i = 0; i < dev->max_queue_pairs * 2; ++i) { 209 state.index = i; 210 if (dev->ops->send_request(dev, VHOST_USER_GET_VRING_BASE, 211 &state) < 0) { 212 PMD_DRV_LOG(ERR, "get_vring_base failed, index=%u\n", 213 i); 214 error = -1; 215 goto out; 216 } 217 } 218 219 dev->started = false; 220 out: 221 pthread_mutex_unlock(&dev->mutex); 222 223 return error; 224 } 225 226 static inline void 227 parse_mac(struct virtio_user_dev *dev, const char *mac) 228 { 229 struct rte_ether_addr tmp; 230 231 if (!mac) 232 return; 233 234 if (rte_ether_unformat_addr(mac, &tmp) == 0) { 235 memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN); 236 dev->mac_specified = 1; 237 } else { 238 /* ignore the wrong mac, use random mac */ 239 PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac); 240 } 241 } 242 243 static int 244 virtio_user_dev_init_notify(struct virtio_user_dev *dev) 245 { 246 uint32_t i, j; 247 int callfd; 248 int kickfd; 249 250 for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) { 251 if (i >= dev->max_queue_pairs * 2) { 252 dev->kickfds[i] = -1; 253 dev->callfds[i] = -1; 254 continue; 255 } 256 257 /* May use invalid flag, but some backend uses kickfd and 258 * callfd as criteria to judge if dev is alive. so finally we 259 * use real event_fd. 260 */ 261 callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); 262 if (callfd < 0) { 263 PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno)); 264 break; 265 } 266 kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); 267 if (kickfd < 0) { 268 PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno)); 269 break; 270 } 271 dev->callfds[i] = callfd; 272 dev->kickfds[i] = kickfd; 273 } 274 275 if (i < VIRTIO_MAX_VIRTQUEUES) { 276 for (j = 0; j <= i; ++j) { 277 close(dev->callfds[j]); 278 close(dev->kickfds[j]); 279 } 280 281 return -1; 282 } 283 284 return 0; 285 } 286 287 static int 288 virtio_user_fill_intr_handle(struct virtio_user_dev *dev) 289 { 290 uint32_t i; 291 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id]; 292 293 if (!eth_dev->intr_handle) { 294 eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle)); 295 if (!eth_dev->intr_handle) { 296 PMD_DRV_LOG(ERR, "fail to allocate intr_handle"); 297 return -1; 298 } 299 memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle)); 300 } 301 302 for (i = 0; i < dev->max_queue_pairs; ++i) 303 eth_dev->intr_handle->efds[i] = dev->callfds[i]; 304 eth_dev->intr_handle->nb_efd = dev->max_queue_pairs; 305 eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1; 306 eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV; 307 /* For virtio vdev, no need to read counter for clean */ 308 eth_dev->intr_handle->efd_counter_size = 0; 309 eth_dev->intr_handle->fd = -1; 310 if (dev->vhostfd >= 0) 311 eth_dev->intr_handle->fd = dev->vhostfd; 312 else if (dev->is_server) 313 eth_dev->intr_handle->fd = dev->listenfd; 314 315 return 0; 316 } 317 318 static void 319 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused, 320 const void *addr, 321 size_t len __rte_unused, 322 void *arg) 323 { 324 struct virtio_user_dev *dev = arg; 325 struct rte_memseg_list *msl; 326 uint16_t i; 327 328 /* ignore externally allocated memory */ 329 msl = rte_mem_virt2memseg_list(addr); 330 if (msl->external) 331 return; 332 333 pthread_mutex_lock(&dev->mutex); 334 335 if (dev->started == false) 336 goto exit; 337 338 /* Step 1: pause the active queues */ 339 for (i = 0; i < dev->queue_pairs; i++) 340 dev->ops->enable_qp(dev, i, 0); 341 342 /* Step 2: update memory regions */ 343 dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL); 344 345 /* Step 3: resume the active queues */ 346 for (i = 0; i < dev->queue_pairs; i++) 347 dev->ops->enable_qp(dev, i, 1); 348 349 exit: 350 pthread_mutex_unlock(&dev->mutex); 351 } 352 353 static int 354 virtio_user_dev_setup(struct virtio_user_dev *dev) 355 { 356 uint32_t q; 357 358 dev->vhostfd = -1; 359 dev->vhostfds = NULL; 360 dev->tapfds = NULL; 361 362 if (dev->is_server) { 363 if (access(dev->path, F_OK) == 0 && 364 !is_vhost_user_by_type(dev->path)) { 365 PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!"); 366 return -1; 367 } 368 dev->ops = &virtio_ops_user; 369 } else { 370 if (is_vhost_user_by_type(dev->path)) { 371 dev->ops = &virtio_ops_user; 372 } else { 373 dev->ops = &virtio_ops_kernel; 374 375 dev->vhostfds = malloc(dev->max_queue_pairs * 376 sizeof(int)); 377 dev->tapfds = malloc(dev->max_queue_pairs * 378 sizeof(int)); 379 if (!dev->vhostfds || !dev->tapfds) { 380 PMD_INIT_LOG(ERR, "Failed to malloc"); 381 return -1; 382 } 383 384 for (q = 0; q < dev->max_queue_pairs; ++q) { 385 dev->vhostfds[q] = -1; 386 dev->tapfds[q] = -1; 387 } 388 } 389 } 390 391 if (dev->ops->setup(dev) < 0) 392 return -1; 393 394 if (virtio_user_dev_init_notify(dev) < 0) 395 return -1; 396 397 if (virtio_user_fill_intr_handle(dev) < 0) 398 return -1; 399 400 return 0; 401 } 402 403 /* Use below macro to filter features from vhost backend */ 404 #define VIRTIO_USER_SUPPORTED_FEATURES \ 405 (1ULL << VIRTIO_NET_F_MAC | \ 406 1ULL << VIRTIO_NET_F_STATUS | \ 407 1ULL << VIRTIO_NET_F_MQ | \ 408 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \ 409 1ULL << VIRTIO_NET_F_CTRL_VQ | \ 410 1ULL << VIRTIO_NET_F_CTRL_RX | \ 411 1ULL << VIRTIO_NET_F_CTRL_VLAN | \ 412 1ULL << VIRTIO_NET_F_CSUM | \ 413 1ULL << VIRTIO_NET_F_HOST_TSO4 | \ 414 1ULL << VIRTIO_NET_F_HOST_TSO6 | \ 415 1ULL << VIRTIO_NET_F_MRG_RXBUF | \ 416 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \ 417 1ULL << VIRTIO_NET_F_GUEST_CSUM | \ 418 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \ 419 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \ 420 1ULL << VIRTIO_F_IN_ORDER | \ 421 1ULL << VIRTIO_F_VERSION_1 | \ 422 1ULL << VIRTIO_F_RING_PACKED | \ 423 1ULL << VHOST_USER_F_PROTOCOL_FEATURES) 424 425 #define VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES \ 426 (1ULL << VHOST_USER_PROTOCOL_F_MQ | \ 427 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) 428 429 int 430 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, 431 int cq, int queue_size, const char *mac, char **ifname, 432 int server, int mrg_rxbuf, int in_order, int packed_vq) 433 { 434 uint64_t protocol_features = 0; 435 436 pthread_mutex_init(&dev->mutex, NULL); 437 strlcpy(dev->path, path, PATH_MAX); 438 dev->started = 0; 439 dev->max_queue_pairs = queues; 440 dev->queue_pairs = 1; /* mq disabled by default */ 441 dev->queue_size = queue_size; 442 dev->is_server = server; 443 dev->mac_specified = 0; 444 dev->frontend_features = 0; 445 dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES; 446 dev->protocol_features = VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES; 447 parse_mac(dev, mac); 448 449 if (*ifname) { 450 dev->ifname = *ifname; 451 *ifname = NULL; 452 } 453 454 if (virtio_user_dev_setup(dev) < 0) { 455 PMD_INIT_LOG(ERR, "backend set up fails"); 456 return -1; 457 } 458 459 if (!is_vhost_user_by_type(dev->path)) 460 dev->unsupported_features |= 461 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES); 462 463 if (!dev->is_server) { 464 if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, 465 NULL) < 0) { 466 PMD_INIT_LOG(ERR, "set_owner fails: %s", 467 strerror(errno)); 468 return -1; 469 } 470 471 if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, 472 &dev->device_features) < 0) { 473 PMD_INIT_LOG(ERR, "get_features failed: %s", 474 strerror(errno)); 475 return -1; 476 } 477 478 479 if (dev->device_features & 480 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) { 481 if (dev->ops->send_request(dev, 482 VHOST_USER_GET_PROTOCOL_FEATURES, 483 &protocol_features)) 484 return -1; 485 486 dev->protocol_features &= protocol_features; 487 488 if (dev->ops->send_request(dev, 489 VHOST_USER_SET_PROTOCOL_FEATURES, 490 &dev->protocol_features)) 491 return -1; 492 493 if (!(dev->protocol_features & 494 (1ULL << VHOST_USER_PROTOCOL_F_MQ))) 495 dev->unsupported_features |= 496 (1ull << VIRTIO_NET_F_MQ); 497 } 498 } else { 499 /* We just pretend vhost-user can support all these features. 500 * Note that this could be problematic that if some feature is 501 * negotiated but not supported by the vhost-user which comes 502 * later. 503 */ 504 dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES; 505 } 506 507 508 509 if (!mrg_rxbuf) 510 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF); 511 512 if (!in_order) 513 dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER); 514 515 if (!packed_vq) 516 dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED); 517 518 if (dev->mac_specified) 519 dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC); 520 else 521 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC); 522 523 if (cq) { 524 /* device does not really need to know anything about CQ, 525 * so if necessary, we just claim to support CQ 526 */ 527 dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); 528 } else { 529 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); 530 /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */ 531 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX); 532 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN); 533 dev->unsupported_features |= 534 (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE); 535 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ); 536 dev->unsupported_features |= 537 (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR); 538 } 539 540 /* The backend will not report this feature, we add it explicitly */ 541 if (is_vhost_user_by_type(dev->path)) 542 dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS); 543 544 /* 545 * Device features = 546 * (frontend_features | backend_features) & ~unsupported_features; 547 */ 548 dev->device_features |= dev->frontend_features; 549 dev->device_features &= ~dev->unsupported_features; 550 551 if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME, 552 virtio_user_mem_event_cb, dev)) { 553 if (rte_errno != ENOTSUP) { 554 PMD_INIT_LOG(ERR, "Failed to register mem event" 555 " callback\n"); 556 return -1; 557 } 558 } 559 560 return 0; 561 } 562 563 void 564 virtio_user_dev_uninit(struct virtio_user_dev *dev) 565 { 566 uint32_t i; 567 568 virtio_user_stop_device(dev); 569 570 rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev); 571 572 for (i = 0; i < dev->max_queue_pairs * 2; ++i) { 573 close(dev->callfds[i]); 574 close(dev->kickfds[i]); 575 } 576 577 if (dev->vhostfd >= 0) 578 close(dev->vhostfd); 579 580 if (dev->is_server && dev->listenfd >= 0) { 581 close(dev->listenfd); 582 dev->listenfd = -1; 583 } 584 585 if (dev->vhostfds) { 586 for (i = 0; i < dev->max_queue_pairs; ++i) { 587 close(dev->vhostfds[i]); 588 if (dev->tapfds[i] >= 0) 589 close(dev->tapfds[i]); 590 } 591 free(dev->vhostfds); 592 free(dev->tapfds); 593 } 594 595 free(dev->ifname); 596 597 if (dev->is_server) 598 unlink(dev->path); 599 } 600 601 uint8_t 602 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs) 603 { 604 uint16_t i; 605 uint8_t ret = 0; 606 607 if (q_pairs > dev->max_queue_pairs) { 608 PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported", 609 q_pairs, dev->max_queue_pairs); 610 return -1; 611 } 612 613 /* Server mode can't enable queue pairs if vhostfd is invalid, 614 * always return 0 in this case. 615 */ 616 if (!dev->is_server || dev->vhostfd >= 0) { 617 for (i = 0; i < q_pairs; ++i) 618 ret |= dev->ops->enable_qp(dev, i, 1); 619 for (i = q_pairs; i < dev->max_queue_pairs; ++i) 620 ret |= dev->ops->enable_qp(dev, i, 0); 621 } 622 dev->queue_pairs = q_pairs; 623 624 return ret; 625 } 626 627 static uint32_t 628 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring, 629 uint16_t idx_hdr) 630 { 631 struct virtio_net_ctrl_hdr *hdr; 632 virtio_net_ctrl_ack status = ~0; 633 uint16_t i, idx_data, idx_status; 634 uint32_t n_descs = 0; 635 636 /* locate desc for header, data, and status */ 637 idx_data = vring->desc[idx_hdr].next; 638 n_descs++; 639 640 i = idx_data; 641 while (vring->desc[i].flags == VRING_DESC_F_NEXT) { 642 i = vring->desc[i].next; 643 n_descs++; 644 } 645 646 /* locate desc for status */ 647 idx_status = i; 648 n_descs++; 649 650 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; 651 if (hdr->class == VIRTIO_NET_CTRL_MQ && 652 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 653 uint16_t queues; 654 655 queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr; 656 status = virtio_user_handle_mq(dev, queues); 657 } else if (hdr->class == VIRTIO_NET_CTRL_RX || 658 hdr->class == VIRTIO_NET_CTRL_MAC || 659 hdr->class == VIRTIO_NET_CTRL_VLAN) { 660 status = 0; 661 } 662 663 /* Update status */ 664 *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status; 665 666 return n_descs; 667 } 668 669 static inline int 670 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) 671 { 672 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE); 673 674 return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) && 675 wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED); 676 } 677 678 static uint32_t 679 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev, 680 struct vring_packed *vring, 681 uint16_t idx_hdr) 682 { 683 struct virtio_net_ctrl_hdr *hdr; 684 virtio_net_ctrl_ack status = ~0; 685 uint16_t idx_data, idx_status; 686 /* initialize to one, header is first */ 687 uint32_t n_descs = 1; 688 689 /* locate desc for header, data, and status */ 690 idx_data = idx_hdr + 1; 691 if (idx_data >= dev->queue_size) 692 idx_data -= dev->queue_size; 693 694 n_descs++; 695 696 idx_status = idx_data; 697 while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) { 698 idx_status++; 699 if (idx_status >= dev->queue_size) 700 idx_status -= dev->queue_size; 701 n_descs++; 702 } 703 704 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; 705 if (hdr->class == VIRTIO_NET_CTRL_MQ && 706 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 707 uint16_t queues; 708 709 queues = *(uint16_t *)(uintptr_t) 710 vring->desc[idx_data].addr; 711 status = virtio_user_handle_mq(dev, queues); 712 } else if (hdr->class == VIRTIO_NET_CTRL_RX || 713 hdr->class == VIRTIO_NET_CTRL_MAC || 714 hdr->class == VIRTIO_NET_CTRL_VLAN) { 715 status = 0; 716 } 717 718 /* Update status */ 719 *(virtio_net_ctrl_ack *)(uintptr_t) 720 vring->desc[idx_status].addr = status; 721 722 /* Update used descriptor */ 723 vring->desc[idx_hdr].id = vring->desc[idx_status].id; 724 vring->desc[idx_hdr].len = sizeof(status); 725 726 return n_descs; 727 } 728 729 void 730 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx) 731 { 732 struct virtio_user_queue *vq = &dev->packed_queues[queue_idx]; 733 struct vring_packed *vring = &dev->packed_vrings[queue_idx]; 734 uint16_t n_descs, flags; 735 736 /* Perform a load-acquire barrier in desc_is_avail to 737 * enforce the ordering between desc flags and desc 738 * content. 739 */ 740 while (desc_is_avail(&vring->desc[vq->used_idx], 741 vq->used_wrap_counter)) { 742 743 n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring, 744 vq->used_idx); 745 746 flags = VRING_DESC_F_WRITE; 747 if (vq->used_wrap_counter) 748 flags |= VRING_PACKED_DESC_F_AVAIL_USED; 749 750 __atomic_store_n(&vring->desc[vq->used_idx].flags, flags, 751 __ATOMIC_RELEASE); 752 753 vq->used_idx += n_descs; 754 if (vq->used_idx >= dev->queue_size) { 755 vq->used_idx -= dev->queue_size; 756 vq->used_wrap_counter ^= 1; 757 } 758 } 759 } 760 761 void 762 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx) 763 { 764 uint16_t avail_idx, desc_idx; 765 struct vring_used_elem *uep; 766 uint32_t n_descs; 767 struct vring *vring = &dev->vrings[queue_idx]; 768 769 /* Consume avail ring, using used ring idx as first one */ 770 while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) 771 != vring->avail->idx) { 772 avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) 773 & (vring->num - 1); 774 desc_idx = vring->avail->ring[avail_idx]; 775 776 n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx); 777 778 /* Update used ring */ 779 uep = &vring->used->ring[avail_idx]; 780 uep->id = desc_idx; 781 uep->len = n_descs; 782 783 __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED); 784 } 785 } 786