1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <sys/socket.h> 6 #include <sys/types.h> 7 #include <sys/stat.h> 8 #include <unistd.h> 9 #include <fcntl.h> 10 #include <sys/un.h> 11 #include <string.h> 12 #include <errno.h> 13 14 #include <rte_string_fns.h> 15 #include <rte_fbarray.h> 16 17 #include "vhost.h" 18 #include "virtio_user_dev.h" 19 20 /* The version of the protocol we support */ 21 #define VHOST_USER_VERSION 0x1 22 23 #define VHOST_MEMORY_MAX_NREGIONS 8 24 struct vhost_memory { 25 uint32_t nregions; 26 uint32_t padding; 27 struct vhost_memory_region regions[VHOST_MEMORY_MAX_NREGIONS]; 28 }; 29 30 struct vhost_user_msg { 31 enum vhost_user_request request; 32 33 #define VHOST_USER_VERSION_MASK 0x3 34 #define VHOST_USER_REPLY_MASK (0x1 << 2) 35 #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3) 36 uint32_t flags; 37 uint32_t size; /* the following payload size */ 38 union { 39 #define VHOST_USER_VRING_IDX_MASK 0xff 40 #define VHOST_USER_VRING_NOFD_MASK (0x1 << 8) 41 uint64_t u64; 42 struct vhost_vring_state state; 43 struct vhost_vring_addr addr; 44 struct vhost_memory memory; 45 } payload; 46 int fds[VHOST_MEMORY_MAX_NREGIONS]; 47 } __rte_packed; 48 49 #define VHOST_USER_HDR_SIZE offsetof(struct vhost_user_msg, payload.u64) 50 #define VHOST_USER_PAYLOAD_SIZE \ 51 (sizeof(struct vhost_user_msg) - VHOST_USER_HDR_SIZE) 52 53 static int 54 vhost_user_write(int fd, struct vhost_user_msg *msg, int *fds, int fd_num) 55 { 56 int r; 57 struct msghdr msgh; 58 struct iovec iov; 59 size_t fd_size = fd_num * sizeof(int); 60 char control[CMSG_SPACE(fd_size)]; 61 struct cmsghdr *cmsg; 62 63 memset(&msgh, 0, sizeof(msgh)); 64 memset(control, 0, sizeof(control)); 65 66 iov.iov_base = (uint8_t *)msg; 67 iov.iov_len = VHOST_USER_HDR_SIZE + msg->size; 68 69 msgh.msg_iov = &iov; 70 msgh.msg_iovlen = 1; 71 msgh.msg_control = control; 72 msgh.msg_controllen = sizeof(control); 73 74 cmsg = CMSG_FIRSTHDR(&msgh); 75 cmsg->cmsg_len = CMSG_LEN(fd_size); 76 cmsg->cmsg_level = SOL_SOCKET; 77 cmsg->cmsg_type = SCM_RIGHTS; 78 memcpy(CMSG_DATA(cmsg), fds, fd_size); 79 80 do { 81 r = sendmsg(fd, &msgh, 0); 82 } while (r < 0 && errno == EINTR); 83 84 if (r < 0) 85 PMD_DRV_LOG(ERR, "Failed to send msg: %s", strerror(errno)); 86 87 return r; 88 } 89 90 static int 91 vhost_user_read(int fd, struct vhost_user_msg *msg) 92 { 93 uint32_t valid_flags = VHOST_USER_REPLY_MASK | VHOST_USER_VERSION; 94 int ret, sz_hdr = VHOST_USER_HDR_SIZE, sz_payload; 95 96 ret = recv(fd, (void *)msg, sz_hdr, 0); 97 if (ret < sz_hdr) { 98 PMD_DRV_LOG(ERR, "Failed to recv msg hdr: %d instead of %d.", 99 ret, sz_hdr); 100 goto fail; 101 } 102 103 /* validate msg flags */ 104 if (msg->flags != (valid_flags)) { 105 PMD_DRV_LOG(ERR, "Failed to recv msg: flags %x instead of %x.", 106 msg->flags, valid_flags); 107 goto fail; 108 } 109 110 sz_payload = msg->size; 111 112 if ((size_t)sz_payload > sizeof(msg->payload)) 113 goto fail; 114 115 if (sz_payload) { 116 ret = recv(fd, (void *)((char *)msg + sz_hdr), sz_payload, 0); 117 if (ret < sz_payload) { 118 PMD_DRV_LOG(ERR, 119 "Failed to recv msg payload: %d instead of %d.", 120 ret, msg->size); 121 goto fail; 122 } 123 } 124 125 return 0; 126 127 fail: 128 return -1; 129 } 130 131 static int 132 vhost_user_check_reply_ack(struct virtio_user_dev *dev, struct vhost_user_msg *msg) 133 { 134 enum vhost_user_request req = msg->request; 135 int ret; 136 137 if (!(msg->flags & VHOST_USER_NEED_REPLY_MASK)) 138 return 0; 139 140 ret = vhost_user_read(dev->vhostfd, msg); 141 if (ret < 0) { 142 PMD_DRV_LOG(ERR, "Failed to read reply-ack"); 143 return -1; 144 } 145 146 if (req != msg->request) { 147 PMD_DRV_LOG(ERR, "Unexpected reply-ack request type (%d)", msg->request); 148 return -1; 149 } 150 151 if (msg->size != sizeof(msg->payload.u64)) { 152 PMD_DRV_LOG(ERR, "Unexpected reply-ack payload size (%u)", msg->size); 153 return -1; 154 } 155 156 if (msg->payload.u64) { 157 PMD_DRV_LOG(ERR, "Slave replied NACK to request type (%d)", msg->request); 158 return -1; 159 } 160 161 return 0; 162 } 163 164 static int 165 vhost_user_set_owner(struct virtio_user_dev *dev) 166 { 167 int ret; 168 struct vhost_user_msg msg = { 169 .request = VHOST_USER_SET_OWNER, 170 .flags = VHOST_USER_VERSION, 171 }; 172 173 ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0); 174 if (ret < 0) { 175 PMD_DRV_LOG(ERR, "Failed to set owner"); 176 return -1; 177 } 178 179 return 0; 180 } 181 182 static int 183 vhost_user_get_features(struct virtio_user_dev *dev, uint64_t *features) 184 { 185 int ret; 186 struct vhost_user_msg msg = { 187 .request = VHOST_USER_GET_FEATURES, 188 .flags = VHOST_USER_VERSION, 189 }; 190 191 ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0); 192 if (ret < 0) 193 goto err; 194 195 ret = vhost_user_read(dev->vhostfd, &msg); 196 if (ret < 0) 197 goto err; 198 199 if (msg.request != VHOST_USER_GET_FEATURES) { 200 PMD_DRV_LOG(ERR, "Unexpected request type (%d)", msg.request); 201 goto err; 202 } 203 204 if (msg.size != sizeof(*features)) { 205 PMD_DRV_LOG(ERR, "Unexpected payload size (%u)", msg.size); 206 goto err; 207 } 208 209 *features = msg.payload.u64; 210 211 return 0; 212 err: 213 PMD_DRV_LOG(ERR, "Failed to get backend features"); 214 215 return -1; 216 } 217 218 static int 219 vhost_user_set_features(struct virtio_user_dev *dev, uint64_t features) 220 { 221 int ret; 222 struct vhost_user_msg msg = { 223 .request = VHOST_USER_SET_FEATURES, 224 .flags = VHOST_USER_VERSION, 225 .size = sizeof(features), 226 .payload.u64 = features, 227 }; 228 229 msg.payload.u64 |= dev->device_features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES); 230 231 ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0); 232 if (ret < 0) { 233 PMD_DRV_LOG(ERR, "Failed to set features"); 234 return -1; 235 } 236 237 return 0; 238 } 239 240 static int 241 vhost_user_get_protocol_features(struct virtio_user_dev *dev, uint64_t *features) 242 { 243 int ret; 244 struct vhost_user_msg msg = { 245 .request = VHOST_USER_GET_PROTOCOL_FEATURES, 246 .flags = VHOST_USER_VERSION, 247 }; 248 249 ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0); 250 if (ret < 0) 251 goto err; 252 253 ret = vhost_user_read(dev->vhostfd, &msg); 254 if (ret < 0) 255 goto err; 256 257 if (msg.request != VHOST_USER_GET_PROTOCOL_FEATURES) { 258 PMD_DRV_LOG(ERR, "Unexpected request type (%d)", msg.request); 259 goto err; 260 } 261 262 if (msg.size != sizeof(*features)) { 263 PMD_DRV_LOG(ERR, "Unexpected payload size (%u)", msg.size); 264 goto err; 265 } 266 267 *features = msg.payload.u64; 268 269 return 0; 270 err: 271 PMD_DRV_LOG(ERR, "Failed to get backend protocol features"); 272 273 return -1; 274 } 275 276 static int 277 vhost_user_set_protocol_features(struct virtio_user_dev *dev, uint64_t features) 278 { 279 int ret; 280 struct vhost_user_msg msg = { 281 .request = VHOST_USER_SET_PROTOCOL_FEATURES, 282 .flags = VHOST_USER_VERSION, 283 .size = sizeof(features), 284 .payload.u64 = features, 285 }; 286 287 ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0); 288 if (ret < 0) { 289 PMD_DRV_LOG(ERR, "Failed to set protocol features"); 290 return -1; 291 } 292 293 return 0; 294 } 295 296 struct walk_arg { 297 struct vhost_memory *vm; 298 int *fds; 299 int region_nr; 300 }; 301 302 static int 303 update_memory_region(const struct rte_memseg_list *msl __rte_unused, 304 const struct rte_memseg *ms, void *arg) 305 { 306 struct walk_arg *wa = arg; 307 struct vhost_memory_region *mr; 308 uint64_t start_addr, end_addr; 309 size_t offset; 310 int i, fd; 311 312 fd = rte_memseg_get_fd_thread_unsafe(ms); 313 if (fd < 0) { 314 PMD_DRV_LOG(ERR, "Failed to get fd, ms=%p rte_errno=%d", 315 ms, rte_errno); 316 return -1; 317 } 318 319 if (rte_memseg_get_fd_offset_thread_unsafe(ms, &offset) < 0) { 320 PMD_DRV_LOG(ERR, "Failed to get offset, ms=%p rte_errno=%d", 321 ms, rte_errno); 322 return -1; 323 } 324 325 start_addr = (uint64_t)(uintptr_t)ms->addr; 326 end_addr = start_addr + ms->len; 327 328 for (i = 0; i < wa->region_nr; i++) { 329 if (wa->fds[i] != fd) 330 continue; 331 332 mr = &wa->vm->regions[i]; 333 334 if (mr->userspace_addr + mr->memory_size < end_addr) 335 mr->memory_size = end_addr - mr->userspace_addr; 336 337 if (mr->userspace_addr > start_addr) { 338 mr->userspace_addr = start_addr; 339 mr->guest_phys_addr = start_addr; 340 } 341 342 if (mr->mmap_offset > offset) 343 mr->mmap_offset = offset; 344 345 PMD_DRV_LOG(DEBUG, "index=%d fd=%d offset=0x%" PRIx64 346 " addr=0x%" PRIx64 " len=%" PRIu64, i, fd, 347 mr->mmap_offset, mr->userspace_addr, 348 mr->memory_size); 349 350 return 0; 351 } 352 353 if (i >= VHOST_MEMORY_MAX_NREGIONS) { 354 PMD_DRV_LOG(ERR, "Too many memory regions"); 355 return -1; 356 } 357 358 mr = &wa->vm->regions[i]; 359 wa->fds[i] = fd; 360 361 mr->guest_phys_addr = start_addr; 362 mr->userspace_addr = start_addr; 363 mr->memory_size = ms->len; 364 mr->mmap_offset = offset; 365 366 PMD_DRV_LOG(DEBUG, "index=%d fd=%d offset=0x%" PRIx64 367 " addr=0x%" PRIx64 " len=%" PRIu64, i, fd, 368 mr->mmap_offset, mr->userspace_addr, 369 mr->memory_size); 370 371 wa->region_nr++; 372 373 return 0; 374 } 375 376 static int 377 vhost_user_set_memory_table(struct virtio_user_dev *dev) 378 { 379 struct walk_arg wa; 380 int fds[VHOST_MEMORY_MAX_NREGIONS]; 381 int ret, fd_num; 382 struct vhost_user_msg msg = { 383 .request = VHOST_USER_SET_MEM_TABLE, 384 .flags = VHOST_USER_VERSION, 385 }; 386 387 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK)) 388 msg.flags |= VHOST_USER_NEED_REPLY_MASK; 389 390 wa.region_nr = 0; 391 wa.vm = &msg.payload.memory; 392 wa.fds = fds; 393 394 /* 395 * The memory lock has already been taken by memory subsystem 396 * or virtio_user_start_device(). 397 */ 398 ret = rte_memseg_walk_thread_unsafe(update_memory_region, &wa); 399 if (ret < 0) 400 goto err; 401 402 fd_num = wa.region_nr; 403 msg.payload.memory.nregions = wa.region_nr; 404 msg.payload.memory.padding = 0; 405 406 msg.size = sizeof(msg.payload.memory.nregions); 407 msg.size += sizeof(msg.payload.memory.padding); 408 msg.size += fd_num * sizeof(struct vhost_memory_region); 409 410 ret = vhost_user_write(dev->vhostfd, &msg, fds, fd_num); 411 if (ret < 0) 412 goto err; 413 414 return vhost_user_check_reply_ack(dev, &msg); 415 err: 416 PMD_DRV_LOG(ERR, "Failed to set memory table"); 417 return -1; 418 } 419 420 static int 421 vhost_user_set_vring(struct virtio_user_dev *dev, enum vhost_user_request req, 422 struct vhost_vring_state *state) 423 { 424 int ret; 425 struct vhost_user_msg msg = { 426 .request = req, 427 .flags = VHOST_USER_VERSION, 428 .size = sizeof(*state), 429 .payload.state = *state, 430 }; 431 432 ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0); 433 if (ret < 0) { 434 PMD_DRV_LOG(ERR, "Failed to set vring state (request %d)", req); 435 return -1; 436 } 437 438 return 0; 439 } 440 441 static int 442 vhost_user_set_vring_enable(struct virtio_user_dev *dev, struct vhost_vring_state *state) 443 { 444 return vhost_user_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, state); 445 } 446 447 static int 448 vhost_user_set_vring_num(struct virtio_user_dev *dev, struct vhost_vring_state *state) 449 { 450 return vhost_user_set_vring(dev, VHOST_USER_SET_VRING_NUM, state); 451 } 452 453 static int 454 vhost_user_set_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state) 455 { 456 return vhost_user_set_vring(dev, VHOST_USER_SET_VRING_BASE, state); 457 } 458 459 static int 460 vhost_user_get_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state) 461 { 462 int ret; 463 struct vhost_user_msg msg; 464 unsigned int index = state->index; 465 466 ret = vhost_user_set_vring(dev, VHOST_USER_GET_VRING_BASE, state); 467 if (ret < 0) { 468 PMD_DRV_LOG(ERR, "Failed to send request"); 469 goto err; 470 } 471 472 ret = vhost_user_read(dev->vhostfd, &msg); 473 if (ret < 0) { 474 PMD_DRV_LOG(ERR, "Failed to read reply"); 475 goto err; 476 } 477 478 if (msg.request != VHOST_USER_GET_VRING_BASE) { 479 PMD_DRV_LOG(ERR, "Unexpected request type (%d)", msg.request); 480 goto err; 481 } 482 483 if (msg.size != sizeof(*state)) { 484 PMD_DRV_LOG(ERR, "Unexpected payload size (%u)", msg.size); 485 goto err; 486 } 487 488 if (msg.payload.state.index != index) { 489 PMD_DRV_LOG(ERR, "Unexpected ring index (%u)", state->index); 490 goto err; 491 } 492 493 *state = msg.payload.state; 494 495 return 0; 496 err: 497 PMD_DRV_LOG(ERR, "Failed to get vring base"); 498 return -1; 499 } 500 501 static int 502 vhost_user_set_vring_file(struct virtio_user_dev *dev, enum vhost_user_request req, 503 struct vhost_vring_file *file) 504 { 505 int ret; 506 int fd = file->fd; 507 int num_fd = 0; 508 struct vhost_user_msg msg = { 509 .request = req, 510 .flags = VHOST_USER_VERSION, 511 .size = sizeof(msg.payload.u64), 512 .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK, 513 }; 514 515 if (fd >= 0) 516 num_fd++; 517 else 518 msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK; 519 520 ret = vhost_user_write(dev->vhostfd, &msg, &fd, num_fd); 521 if (ret < 0) { 522 PMD_DRV_LOG(ERR, "Failed to set vring file (request %d)", req); 523 return -1; 524 } 525 526 return 0; 527 } 528 529 static int 530 vhost_user_set_vring_call(struct virtio_user_dev *dev, struct vhost_vring_file *file) 531 { 532 return vhost_user_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file); 533 } 534 535 static int 536 vhost_user_set_vring_kick(struct virtio_user_dev *dev, struct vhost_vring_file *file) 537 { 538 return vhost_user_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file); 539 } 540 541 542 static int 543 vhost_user_set_vring_addr(struct virtio_user_dev *dev, struct vhost_vring_addr *addr) 544 { 545 int ret; 546 struct vhost_user_msg msg = { 547 .request = VHOST_USER_SET_VRING_ADDR, 548 .flags = VHOST_USER_VERSION, 549 .size = sizeof(*addr), 550 .payload.addr = *addr, 551 }; 552 553 ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0); 554 if (ret < 0) { 555 PMD_DRV_LOG(ERR, "Failed to send vring addresses"); 556 return -1; 557 } 558 559 return 0; 560 } 561 562 static int 563 vhost_user_get_status(struct virtio_user_dev *dev, uint8_t *status) 564 { 565 int ret; 566 struct vhost_user_msg msg = { 567 .request = VHOST_USER_GET_STATUS, 568 .flags = VHOST_USER_VERSION, 569 }; 570 571 /* 572 * If features have not been negotiated, we don't know if the backend 573 * supports protocol features 574 */ 575 if (!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK)) 576 return -ENOTSUP; 577 578 /* Status protocol feature requires protocol features support */ 579 if (!(dev->device_features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) 580 return -ENOTSUP; 581 582 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS))) 583 return -ENOTSUP; 584 585 ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0); 586 if (ret < 0) { 587 PMD_DRV_LOG(ERR, "Failed to send request"); 588 goto err; 589 } 590 591 ret = vhost_user_read(dev->vhostfd, &msg); 592 if (ret < 0) { 593 PMD_DRV_LOG(ERR, "Failed to recv request"); 594 goto err; 595 } 596 597 if (msg.request != VHOST_USER_GET_STATUS) { 598 PMD_DRV_LOG(ERR, "Unexpected request type (%d)", msg.request); 599 goto err; 600 } 601 602 if (msg.size != sizeof(msg.payload.u64)) { 603 PMD_DRV_LOG(ERR, "Unexpected payload size (%u)", msg.size); 604 goto err; 605 } 606 607 *status = (uint8_t)msg.payload.u64; 608 609 return 0; 610 err: 611 PMD_DRV_LOG(ERR, "Failed to get device status"); 612 return -1; 613 } 614 615 static int 616 vhost_user_set_status(struct virtio_user_dev *dev, uint8_t status) 617 { 618 int ret; 619 struct vhost_user_msg msg = { 620 .request = VHOST_USER_SET_STATUS, 621 .flags = VHOST_USER_VERSION, 622 .size = sizeof(msg.payload.u64), 623 .payload.u64 = status, 624 }; 625 626 /* 627 * If features have not been negotiated, we don't know if the backend 628 * supports protocol features 629 */ 630 if (!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK)) 631 return -ENOTSUP; 632 633 /* Status protocol feature requires protocol features support */ 634 if (!(dev->device_features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) 635 return -ENOTSUP; 636 637 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS))) 638 return -ENOTSUP; 639 640 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK)) 641 msg.flags |= VHOST_USER_NEED_REPLY_MASK; 642 643 ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0); 644 if (ret < 0) { 645 PMD_DRV_LOG(ERR, "Failed to send get status request"); 646 return -1; 647 } 648 649 return vhost_user_check_reply_ack(dev, &msg); 650 } 651 652 #define MAX_VIRTIO_USER_BACKLOG 1 653 static int 654 virtio_user_start_server(struct virtio_user_dev *dev, struct sockaddr_un *un) 655 { 656 int ret; 657 int flag; 658 int fd = dev->listenfd; 659 660 ret = bind(fd, (struct sockaddr *)un, sizeof(*un)); 661 if (ret < 0) { 662 PMD_DRV_LOG(ERR, "failed to bind to %s: %s; remove it and try again\n", 663 dev->path, strerror(errno)); 664 return -1; 665 } 666 ret = listen(fd, MAX_VIRTIO_USER_BACKLOG); 667 if (ret < 0) 668 return -1; 669 670 flag = fcntl(fd, F_GETFL); 671 if (fcntl(fd, F_SETFL, flag | O_NONBLOCK) < 0) { 672 PMD_DRV_LOG(ERR, "fcntl failed, %s", strerror(errno)); 673 return -1; 674 } 675 676 return 0; 677 } 678 679 /** 680 * Set up environment to talk with a vhost user backend. 681 * 682 * @return 683 * - (-1) if fail; 684 * - (0) if succeed. 685 */ 686 static int 687 vhost_user_setup(struct virtio_user_dev *dev) 688 { 689 int fd; 690 int flag; 691 struct sockaddr_un un; 692 693 fd = socket(AF_UNIX, SOCK_STREAM, 0); 694 if (fd < 0) { 695 PMD_DRV_LOG(ERR, "socket() error, %s", strerror(errno)); 696 return -1; 697 } 698 699 flag = fcntl(fd, F_GETFD); 700 if (fcntl(fd, F_SETFD, flag | FD_CLOEXEC) < 0) 701 PMD_DRV_LOG(WARNING, "fcntl failed, %s", strerror(errno)); 702 703 memset(&un, 0, sizeof(un)); 704 un.sun_family = AF_UNIX; 705 strlcpy(un.sun_path, dev->path, sizeof(un.sun_path)); 706 707 if (dev->is_server) { 708 dev->listenfd = fd; 709 if (virtio_user_start_server(dev, &un) < 0) { 710 PMD_DRV_LOG(ERR, "virtio-user startup fails in server mode"); 711 close(fd); 712 return -1; 713 } 714 dev->vhostfd = -1; 715 } else { 716 if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) { 717 PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno)); 718 close(fd); 719 return -1; 720 } 721 dev->vhostfd = fd; 722 } 723 724 return 0; 725 } 726 727 static int 728 vhost_user_enable_queue_pair(struct virtio_user_dev *dev, 729 uint16_t pair_idx, 730 int enable) 731 { 732 int i; 733 734 if (dev->qp_enabled[pair_idx] == enable) 735 return 0; 736 737 for (i = 0; i < 2; ++i) { 738 struct vhost_vring_state state = { 739 .index = pair_idx * 2 + i, 740 .num = enable, 741 }; 742 743 if (vhost_user_set_vring_enable(dev, &state)) 744 return -1; 745 } 746 747 dev->qp_enabled[pair_idx] = enable; 748 return 0; 749 } 750 751 struct virtio_user_backend_ops virtio_ops_user = { 752 .setup = vhost_user_setup, 753 .set_owner = vhost_user_set_owner, 754 .get_features = vhost_user_get_features, 755 .set_features = vhost_user_set_features, 756 .get_protocol_features = vhost_user_get_protocol_features, 757 .set_protocol_features = vhost_user_set_protocol_features, 758 .set_memory_table = vhost_user_set_memory_table, 759 .set_vring_num = vhost_user_set_vring_num, 760 .set_vring_base = vhost_user_set_vring_base, 761 .get_vring_base = vhost_user_get_vring_base, 762 .set_vring_call = vhost_user_set_vring_call, 763 .set_vring_kick = vhost_user_set_vring_kick, 764 .set_vring_addr = vhost_user_set_vring_addr, 765 .get_status = vhost_user_get_status, 766 .set_status = vhost_user_set_status, 767 .enable_qp = vhost_user_enable_queue_pair 768 }; 769