1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdio.h> 7 #include <fcntl.h> 8 #include <string.h> 9 #include <errno.h> 10 #include <sys/mman.h> 11 #include <unistd.h> 12 #include <sys/eventfd.h> 13 #include <sys/types.h> 14 #include <sys/stat.h> 15 16 #include <rte_string_fns.h> 17 #include <rte_eal_memconfig.h> 18 19 #include "vhost.h" 20 #include "virtio_user_dev.h" 21 #include "../virtio_ethdev.h" 22 23 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb" 24 25 static int 26 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel) 27 { 28 /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come 29 * firstly because vhost depends on this msg to allocate virtqueue 30 * pair. 31 */ 32 struct vhost_vring_file file; 33 34 file.index = queue_sel; 35 file.fd = dev->callfds[queue_sel]; 36 dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file); 37 38 return 0; 39 } 40 41 static int 42 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel) 43 { 44 struct vhost_vring_file file; 45 struct vhost_vring_state state; 46 struct vring *vring = &dev->vrings[queue_sel]; 47 struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel]; 48 struct vhost_vring_addr addr = { 49 .index = queue_sel, 50 .log_guest_addr = 0, 51 .flags = 0, /* disable log */ 52 }; 53 54 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) { 55 addr.desc_user_addr = 56 (uint64_t)(uintptr_t)pq_vring->desc; 57 addr.avail_user_addr = 58 (uint64_t)(uintptr_t)pq_vring->driver; 59 addr.used_user_addr = 60 (uint64_t)(uintptr_t)pq_vring->device; 61 } else { 62 addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc; 63 addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail; 64 addr.used_user_addr = (uint64_t)(uintptr_t)vring->used; 65 } 66 67 state.index = queue_sel; 68 state.num = vring->num; 69 dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state); 70 71 state.index = queue_sel; 72 state.num = 0; /* no reservation */ 73 if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) 74 state.num |= (1 << 15); 75 dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state); 76 77 dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr); 78 79 /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes 80 * lastly because vhost depends on this msg to judge if 81 * virtio is ready. 82 */ 83 file.index = queue_sel; 84 file.fd = dev->kickfds[queue_sel]; 85 dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file); 86 87 return 0; 88 } 89 90 static int 91 virtio_user_queue_setup(struct virtio_user_dev *dev, 92 int (*fn)(struct virtio_user_dev *, uint32_t)) 93 { 94 uint32_t i, queue_sel; 95 96 for (i = 0; i < dev->max_queue_pairs; ++i) { 97 queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX; 98 if (fn(dev, queue_sel) < 0) { 99 PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i); 100 return -1; 101 } 102 } 103 for (i = 0; i < dev->max_queue_pairs; ++i) { 104 queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX; 105 if (fn(dev, queue_sel) < 0) { 106 PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i); 107 return -1; 108 } 109 } 110 111 return 0; 112 } 113 114 int 115 is_vhost_user_by_type(const char *path) 116 { 117 struct stat sb; 118 119 if (stat(path, &sb) == -1) 120 return 0; 121 122 return S_ISSOCK(sb.st_mode); 123 } 124 125 int 126 virtio_user_start_device(struct virtio_user_dev *dev) 127 { 128 uint64_t features; 129 int ret; 130 131 /* 132 * XXX workaround! 133 * 134 * We need to make sure that the locks will be 135 * taken in the correct order to avoid deadlocks. 136 * 137 * Before releasing this lock, this thread should 138 * not trigger any memory hotplug events. 139 * 140 * This is a temporary workaround, and should be 141 * replaced when we get proper supports from the 142 * memory subsystem in the future. 143 */ 144 rte_mcfg_mem_read_lock(); 145 pthread_mutex_lock(&dev->mutex); 146 147 if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0) 148 goto error; 149 150 /* Step 0: tell vhost to create queues */ 151 if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0) 152 goto error; 153 154 /* Step 1: negotiate protocol features & set features */ 155 features = dev->features; 156 157 158 /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */ 159 features &= ~(1ull << VIRTIO_NET_F_MAC); 160 /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */ 161 features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ); 162 features &= ~(1ull << VIRTIO_NET_F_STATUS); 163 ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features); 164 if (ret < 0) 165 goto error; 166 PMD_DRV_LOG(INFO, "set features: %" PRIx64, features); 167 168 /* Step 2: share memory regions */ 169 ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL); 170 if (ret < 0) 171 goto error; 172 173 /* Step 3: kick queues */ 174 if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0) 175 goto error; 176 177 /* Step 4: enable queues 178 * we enable the 1st queue pair by default. 179 */ 180 dev->ops->enable_qp(dev, 0, 1); 181 182 dev->started = true; 183 pthread_mutex_unlock(&dev->mutex); 184 rte_mcfg_mem_read_unlock(); 185 186 return 0; 187 error: 188 pthread_mutex_unlock(&dev->mutex); 189 rte_mcfg_mem_read_unlock(); 190 /* TODO: free resource here or caller to check */ 191 return -1; 192 } 193 194 int virtio_user_stop_device(struct virtio_user_dev *dev) 195 { 196 struct vhost_vring_state state; 197 uint32_t i; 198 int error = 0; 199 200 pthread_mutex_lock(&dev->mutex); 201 if (!dev->started) 202 goto out; 203 204 for (i = 0; i < dev->max_queue_pairs; ++i) 205 dev->ops->enable_qp(dev, i, 0); 206 207 /* Stop the backend. */ 208 for (i = 0; i < dev->max_queue_pairs * 2; ++i) { 209 state.index = i; 210 if (dev->ops->send_request(dev, VHOST_USER_GET_VRING_BASE, 211 &state) < 0) { 212 PMD_DRV_LOG(ERR, "get_vring_base failed, index=%u\n", 213 i); 214 error = -1; 215 goto out; 216 } 217 } 218 219 dev->started = false; 220 out: 221 pthread_mutex_unlock(&dev->mutex); 222 223 return error; 224 } 225 226 static inline void 227 parse_mac(struct virtio_user_dev *dev, const char *mac) 228 { 229 struct rte_ether_addr tmp; 230 231 if (!mac) 232 return; 233 234 if (rte_ether_unformat_addr(mac, &tmp) == 0) { 235 memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN); 236 dev->mac_specified = 1; 237 } else { 238 /* ignore the wrong mac, use random mac */ 239 PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac); 240 } 241 } 242 243 static int 244 virtio_user_dev_init_notify(struct virtio_user_dev *dev) 245 { 246 uint32_t i, j; 247 int callfd; 248 int kickfd; 249 250 for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) { 251 if (i >= dev->max_queue_pairs * 2) { 252 dev->kickfds[i] = -1; 253 dev->callfds[i] = -1; 254 continue; 255 } 256 257 /* May use invalid flag, but some backend uses kickfd and 258 * callfd as criteria to judge if dev is alive. so finally we 259 * use real event_fd. 260 */ 261 callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); 262 if (callfd < 0) { 263 PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno)); 264 break; 265 } 266 kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); 267 if (kickfd < 0) { 268 PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno)); 269 break; 270 } 271 dev->callfds[i] = callfd; 272 dev->kickfds[i] = kickfd; 273 } 274 275 if (i < VIRTIO_MAX_VIRTQUEUES) { 276 for (j = 0; j <= i; ++j) { 277 close(dev->callfds[j]); 278 close(dev->kickfds[j]); 279 } 280 281 return -1; 282 } 283 284 return 0; 285 } 286 287 static int 288 virtio_user_fill_intr_handle(struct virtio_user_dev *dev) 289 { 290 uint32_t i; 291 struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id]; 292 293 if (!eth_dev->intr_handle) { 294 eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle)); 295 if (!eth_dev->intr_handle) { 296 PMD_DRV_LOG(ERR, "fail to allocate intr_handle"); 297 return -1; 298 } 299 memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle)); 300 } 301 302 for (i = 0; i < dev->max_queue_pairs; ++i) 303 eth_dev->intr_handle->efds[i] = dev->callfds[i]; 304 eth_dev->intr_handle->nb_efd = dev->max_queue_pairs; 305 eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1; 306 eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV; 307 /* For virtio vdev, no need to read counter for clean */ 308 eth_dev->intr_handle->efd_counter_size = 0; 309 eth_dev->intr_handle->fd = -1; 310 if (dev->vhostfd >= 0) 311 eth_dev->intr_handle->fd = dev->vhostfd; 312 else if (dev->is_server) 313 eth_dev->intr_handle->fd = dev->listenfd; 314 315 return 0; 316 } 317 318 static void 319 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused, 320 const void *addr, 321 size_t len __rte_unused, 322 void *arg) 323 { 324 struct virtio_user_dev *dev = arg; 325 struct rte_memseg_list *msl; 326 uint16_t i; 327 328 /* ignore externally allocated memory */ 329 msl = rte_mem_virt2memseg_list(addr); 330 if (msl->external) 331 return; 332 333 pthread_mutex_lock(&dev->mutex); 334 335 if (dev->started == false) 336 goto exit; 337 338 /* Step 1: pause the active queues */ 339 for (i = 0; i < dev->queue_pairs; i++) 340 dev->ops->enable_qp(dev, i, 0); 341 342 /* Step 2: update memory regions */ 343 dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL); 344 345 /* Step 3: resume the active queues */ 346 for (i = 0; i < dev->queue_pairs; i++) 347 dev->ops->enable_qp(dev, i, 1); 348 349 exit: 350 pthread_mutex_unlock(&dev->mutex); 351 } 352 353 static int 354 virtio_user_dev_setup(struct virtio_user_dev *dev) 355 { 356 uint32_t q; 357 358 dev->vhostfd = -1; 359 dev->vhostfds = NULL; 360 dev->tapfds = NULL; 361 362 if (dev->is_server) { 363 if (access(dev->path, F_OK) == 0 && 364 !is_vhost_user_by_type(dev->path)) { 365 PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!"); 366 return -1; 367 } 368 dev->ops = &virtio_ops_user; 369 } else { 370 if (is_vhost_user_by_type(dev->path)) { 371 dev->ops = &virtio_ops_user; 372 } else { 373 dev->ops = &virtio_ops_kernel; 374 375 dev->vhostfds = malloc(dev->max_queue_pairs * 376 sizeof(int)); 377 dev->tapfds = malloc(dev->max_queue_pairs * 378 sizeof(int)); 379 if (!dev->vhostfds || !dev->tapfds) { 380 PMD_INIT_LOG(ERR, "Failed to malloc"); 381 return -1; 382 } 383 384 for (q = 0; q < dev->max_queue_pairs; ++q) { 385 dev->vhostfds[q] = -1; 386 dev->tapfds[q] = -1; 387 } 388 } 389 } 390 391 if (dev->ops->setup(dev) < 0) 392 return -1; 393 394 if (virtio_user_dev_init_notify(dev) < 0) 395 return -1; 396 397 if (virtio_user_fill_intr_handle(dev) < 0) 398 return -1; 399 400 return 0; 401 } 402 403 /* Use below macro to filter features from vhost backend */ 404 #define VIRTIO_USER_SUPPORTED_FEATURES \ 405 (1ULL << VIRTIO_NET_F_MAC | \ 406 1ULL << VIRTIO_NET_F_STATUS | \ 407 1ULL << VIRTIO_NET_F_MQ | \ 408 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \ 409 1ULL << VIRTIO_NET_F_CTRL_VQ | \ 410 1ULL << VIRTIO_NET_F_CTRL_RX | \ 411 1ULL << VIRTIO_NET_F_CTRL_VLAN | \ 412 1ULL << VIRTIO_NET_F_CSUM | \ 413 1ULL << VIRTIO_NET_F_HOST_TSO4 | \ 414 1ULL << VIRTIO_NET_F_HOST_TSO6 | \ 415 1ULL << VIRTIO_NET_F_MRG_RXBUF | \ 416 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \ 417 1ULL << VIRTIO_NET_F_GUEST_CSUM | \ 418 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \ 419 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \ 420 1ULL << VIRTIO_F_IN_ORDER | \ 421 1ULL << VIRTIO_F_VERSION_1 | \ 422 1ULL << VIRTIO_F_RING_PACKED | \ 423 1ULL << VHOST_USER_F_PROTOCOL_FEATURES) 424 425 #define VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES \ 426 (1ULL << VHOST_USER_PROTOCOL_F_MQ | \ 427 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \ 428 1ULL << VHOST_USER_PROTOCOL_F_STATUS) 429 430 int 431 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, 432 int cq, int queue_size, const char *mac, char **ifname, 433 int server, int mrg_rxbuf, int in_order, int packed_vq) 434 { 435 uint64_t protocol_features = 0; 436 437 pthread_mutex_init(&dev->mutex, NULL); 438 strlcpy(dev->path, path, PATH_MAX); 439 dev->started = 0; 440 dev->max_queue_pairs = queues; 441 dev->queue_pairs = 1; /* mq disabled by default */ 442 dev->queue_size = queue_size; 443 dev->is_server = server; 444 dev->mac_specified = 0; 445 dev->frontend_features = 0; 446 dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES; 447 dev->protocol_features = VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES; 448 parse_mac(dev, mac); 449 450 if (*ifname) { 451 dev->ifname = *ifname; 452 *ifname = NULL; 453 } 454 455 if (virtio_user_dev_setup(dev) < 0) { 456 PMD_INIT_LOG(ERR, "backend set up fails"); 457 return -1; 458 } 459 460 if (!is_vhost_user_by_type(dev->path)) 461 dev->unsupported_features |= 462 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES); 463 464 if (!dev->is_server) { 465 if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, 466 NULL) < 0) { 467 PMD_INIT_LOG(ERR, "set_owner fails: %s", 468 strerror(errno)); 469 return -1; 470 } 471 472 if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, 473 &dev->device_features) < 0) { 474 PMD_INIT_LOG(ERR, "get_features failed: %s", 475 strerror(errno)); 476 return -1; 477 } 478 479 480 if (dev->device_features & 481 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) { 482 if (dev->ops->send_request(dev, 483 VHOST_USER_GET_PROTOCOL_FEATURES, 484 &protocol_features)) 485 return -1; 486 487 dev->protocol_features &= protocol_features; 488 489 if (dev->ops->send_request(dev, 490 VHOST_USER_SET_PROTOCOL_FEATURES, 491 &dev->protocol_features)) 492 return -1; 493 494 if (!(dev->protocol_features & 495 (1ULL << VHOST_USER_PROTOCOL_F_MQ))) 496 dev->unsupported_features |= 497 (1ull << VIRTIO_NET_F_MQ); 498 } 499 } else { 500 /* We just pretend vhost-user can support all these features. 501 * Note that this could be problematic that if some feature is 502 * negotiated but not supported by the vhost-user which comes 503 * later. 504 */ 505 dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES; 506 } 507 508 509 510 if (!mrg_rxbuf) 511 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF); 512 513 if (!in_order) 514 dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER); 515 516 if (!packed_vq) 517 dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED); 518 519 if (dev->mac_specified) 520 dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC); 521 else 522 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC); 523 524 if (cq) { 525 /* device does not really need to know anything about CQ, 526 * so if necessary, we just claim to support CQ 527 */ 528 dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); 529 } else { 530 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); 531 /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */ 532 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX); 533 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN); 534 dev->unsupported_features |= 535 (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE); 536 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ); 537 dev->unsupported_features |= 538 (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR); 539 } 540 541 /* The backend will not report this feature, we add it explicitly */ 542 if (is_vhost_user_by_type(dev->path)) 543 dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS); 544 545 /* 546 * Device features = 547 * (frontend_features | backend_features) & ~unsupported_features; 548 */ 549 dev->device_features |= dev->frontend_features; 550 dev->device_features &= ~dev->unsupported_features; 551 552 if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME, 553 virtio_user_mem_event_cb, dev)) { 554 if (rte_errno != ENOTSUP) { 555 PMD_INIT_LOG(ERR, "Failed to register mem event" 556 " callback\n"); 557 return -1; 558 } 559 } 560 561 return 0; 562 } 563 564 void 565 virtio_user_dev_uninit(struct virtio_user_dev *dev) 566 { 567 uint32_t i; 568 569 virtio_user_stop_device(dev); 570 571 rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev); 572 573 for (i = 0; i < dev->max_queue_pairs * 2; ++i) { 574 close(dev->callfds[i]); 575 close(dev->kickfds[i]); 576 } 577 578 if (dev->vhostfd >= 0) 579 close(dev->vhostfd); 580 581 if (dev->is_server && dev->listenfd >= 0) { 582 close(dev->listenfd); 583 dev->listenfd = -1; 584 } 585 586 if (dev->vhostfds) { 587 for (i = 0; i < dev->max_queue_pairs; ++i) { 588 close(dev->vhostfds[i]); 589 if (dev->tapfds[i] >= 0) 590 close(dev->tapfds[i]); 591 } 592 free(dev->vhostfds); 593 free(dev->tapfds); 594 } 595 596 free(dev->ifname); 597 598 if (dev->is_server) 599 unlink(dev->path); 600 } 601 602 uint8_t 603 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs) 604 { 605 uint16_t i; 606 uint8_t ret = 0; 607 608 if (q_pairs > dev->max_queue_pairs) { 609 PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported", 610 q_pairs, dev->max_queue_pairs); 611 return -1; 612 } 613 614 /* Server mode can't enable queue pairs if vhostfd is invalid, 615 * always return 0 in this case. 616 */ 617 if (!dev->is_server || dev->vhostfd >= 0) { 618 for (i = 0; i < q_pairs; ++i) 619 ret |= dev->ops->enable_qp(dev, i, 1); 620 for (i = q_pairs; i < dev->max_queue_pairs; ++i) 621 ret |= dev->ops->enable_qp(dev, i, 0); 622 } 623 dev->queue_pairs = q_pairs; 624 625 return ret; 626 } 627 628 static uint32_t 629 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring, 630 uint16_t idx_hdr) 631 { 632 struct virtio_net_ctrl_hdr *hdr; 633 virtio_net_ctrl_ack status = ~0; 634 uint16_t i, idx_data, idx_status; 635 uint32_t n_descs = 0; 636 637 /* locate desc for header, data, and status */ 638 idx_data = vring->desc[idx_hdr].next; 639 n_descs++; 640 641 i = idx_data; 642 while (vring->desc[i].flags == VRING_DESC_F_NEXT) { 643 i = vring->desc[i].next; 644 n_descs++; 645 } 646 647 /* locate desc for status */ 648 idx_status = i; 649 n_descs++; 650 651 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; 652 if (hdr->class == VIRTIO_NET_CTRL_MQ && 653 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 654 uint16_t queues; 655 656 queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr; 657 status = virtio_user_handle_mq(dev, queues); 658 } else if (hdr->class == VIRTIO_NET_CTRL_RX || 659 hdr->class == VIRTIO_NET_CTRL_MAC || 660 hdr->class == VIRTIO_NET_CTRL_VLAN) { 661 status = 0; 662 } 663 664 /* Update status */ 665 *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status; 666 667 return n_descs; 668 } 669 670 static inline int 671 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) 672 { 673 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE); 674 675 return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) && 676 wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED); 677 } 678 679 static uint32_t 680 virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev, 681 struct vring_packed *vring, 682 uint16_t idx_hdr) 683 { 684 struct virtio_net_ctrl_hdr *hdr; 685 virtio_net_ctrl_ack status = ~0; 686 uint16_t idx_data, idx_status; 687 /* initialize to one, header is first */ 688 uint32_t n_descs = 1; 689 690 /* locate desc for header, data, and status */ 691 idx_data = idx_hdr + 1; 692 if (idx_data >= dev->queue_size) 693 idx_data -= dev->queue_size; 694 695 n_descs++; 696 697 idx_status = idx_data; 698 while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) { 699 idx_status++; 700 if (idx_status >= dev->queue_size) 701 idx_status -= dev->queue_size; 702 n_descs++; 703 } 704 705 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; 706 if (hdr->class == VIRTIO_NET_CTRL_MQ && 707 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 708 uint16_t queues; 709 710 queues = *(uint16_t *)(uintptr_t) 711 vring->desc[idx_data].addr; 712 status = virtio_user_handle_mq(dev, queues); 713 } else if (hdr->class == VIRTIO_NET_CTRL_RX || 714 hdr->class == VIRTIO_NET_CTRL_MAC || 715 hdr->class == VIRTIO_NET_CTRL_VLAN) { 716 status = 0; 717 } 718 719 /* Update status */ 720 *(virtio_net_ctrl_ack *)(uintptr_t) 721 vring->desc[idx_status].addr = status; 722 723 /* Update used descriptor */ 724 vring->desc[idx_hdr].id = vring->desc[idx_status].id; 725 vring->desc[idx_hdr].len = sizeof(status); 726 727 return n_descs; 728 } 729 730 void 731 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx) 732 { 733 struct virtio_user_queue *vq = &dev->packed_queues[queue_idx]; 734 struct vring_packed *vring = &dev->packed_vrings[queue_idx]; 735 uint16_t n_descs, flags; 736 737 /* Perform a load-acquire barrier in desc_is_avail to 738 * enforce the ordering between desc flags and desc 739 * content. 740 */ 741 while (desc_is_avail(&vring->desc[vq->used_idx], 742 vq->used_wrap_counter)) { 743 744 n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring, 745 vq->used_idx); 746 747 flags = VRING_DESC_F_WRITE; 748 if (vq->used_wrap_counter) 749 flags |= VRING_PACKED_DESC_F_AVAIL_USED; 750 751 __atomic_store_n(&vring->desc[vq->used_idx].flags, flags, 752 __ATOMIC_RELEASE); 753 754 vq->used_idx += n_descs; 755 if (vq->used_idx >= dev->queue_size) { 756 vq->used_idx -= dev->queue_size; 757 vq->used_wrap_counter ^= 1; 758 } 759 } 760 } 761 762 void 763 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx) 764 { 765 uint16_t avail_idx, desc_idx; 766 struct vring_used_elem *uep; 767 uint32_t n_descs; 768 struct vring *vring = &dev->vrings[queue_idx]; 769 770 /* Consume avail ring, using used ring idx as first one */ 771 while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) 772 != vring->avail->idx) { 773 avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED) 774 & (vring->num - 1); 775 desc_idx = vring->avail->ring[avail_idx]; 776 777 n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx); 778 779 /* Update used ring */ 780 uep = &vring->used->ring[avail_idx]; 781 uep->id = desc_idx; 782 uep->len = n_descs; 783 784 __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED); 785 } 786 } 787 788 int 789 virtio_user_send_status_update(struct virtio_user_dev *dev, uint8_t status) 790 { 791 int ret; 792 uint64_t arg = status; 793 794 /* Vhost-user only for now */ 795 if (!is_vhost_user_by_type(dev->path)) 796 return 0; 797 798 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS))) 799 return 0; 800 801 ret = dev->ops->send_request(dev, VHOST_USER_SET_STATUS, &arg); 802 if (ret) { 803 PMD_INIT_LOG(ERR, "VHOST_USER_SET_STATUS failed (%d): %s", ret, 804 strerror(errno)); 805 return -1; 806 } 807 808 return 0; 809 } 810 811 int 812 virtio_user_update_status(struct virtio_user_dev *dev) 813 { 814 uint64_t ret; 815 int err; 816 817 /* Vhost-user only for now */ 818 if (!is_vhost_user_by_type(dev->path)) 819 return 0; 820 821 if (!(dev->protocol_features & (1UL << VHOST_USER_PROTOCOL_F_STATUS))) 822 return 0; 823 824 err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS, &ret); 825 if (err) { 826 PMD_INIT_LOG(ERR, "VHOST_USER_GET_STATUS failed (%d): %s", err, 827 strerror(errno)); 828 return -1; 829 } 830 if (ret > UINT8_MAX) { 831 PMD_INIT_LOG(ERR, "Invalid VHOST_USER_GET_STATUS response 0x%" PRIx64 "\n", ret); 832 return -1; 833 } 834 835 dev->status = ret; 836 PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n" 837 "\t-RESET: %u\n" 838 "\t-ACKNOWLEDGE: %u\n" 839 "\t-DRIVER: %u\n" 840 "\t-DRIVER_OK: %u\n" 841 "\t-FEATURES_OK: %u\n" 842 "\t-DEVICE_NEED_RESET: %u\n" 843 "\t-FAILED: %u\n", 844 dev->status, 845 (dev->status == VIRTIO_CONFIG_STATUS_RESET), 846 !!(dev->status & VIRTIO_CONFIG_STATUS_ACK), 847 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER), 848 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK), 849 !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK), 850 !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET), 851 !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED)); 852 return 0; 853 } 854