1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2023 Red Hat, Inc. 3 */ 4 5 #include <stdint.h> 6 #include <stdio.h> 7 #include <unistd.h> 8 #include <fcntl.h> 9 10 11 #include <linux/vduse.h> 12 #include <linux/virtio_net.h> 13 14 #include <sys/ioctl.h> 15 #include <sys/mman.h> 16 #include <sys/stat.h> 17 18 #include <rte_common.h> 19 20 #include "fd_man.h" 21 #include "iotlb.h" 22 #include "vduse.h" 23 #include "vhost.h" 24 #include "virtio_net_ctrl.h" 25 26 #define VHOST_VDUSE_API_VERSION 0 27 #define VDUSE_CTRL_PATH "/dev/vduse/control" 28 29 struct vduse { 30 struct fdset fdset; 31 }; 32 33 static struct vduse vduse = { 34 .fdset = { 35 .fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} }, 36 .fd_mutex = PTHREAD_MUTEX_INITIALIZER, 37 .fd_pooling_mutex = PTHREAD_MUTEX_INITIALIZER, 38 .num = 0 39 }, 40 }; 41 42 static bool vduse_events_thread; 43 44 static const char * const vduse_reqs_str[] = { 45 "VDUSE_GET_VQ_STATE", 46 "VDUSE_SET_STATUS", 47 "VDUSE_UPDATE_IOTLB", 48 }; 49 50 #define vduse_req_id_to_str(id) \ 51 (id < RTE_DIM(vduse_reqs_str) ? \ 52 vduse_reqs_str[id] : "Unknown") 53 54 static int 55 vduse_inject_irq(struct virtio_net *dev, struct vhost_virtqueue *vq) 56 { 57 return ioctl(dev->vduse_dev_fd, VDUSE_VQ_INJECT_IRQ, &vq->index); 58 } 59 60 static void 61 vduse_iotlb_remove_notify(uint64_t addr, uint64_t offset, uint64_t size) 62 { 63 munmap((void *)(uintptr_t)addr, offset + size); 64 } 65 66 static int 67 vduse_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm __rte_unused) 68 { 69 struct vduse_iotlb_entry entry; 70 uint64_t size, page_size; 71 struct stat stat; 72 void *mmap_addr; 73 int fd, ret; 74 75 entry.start = iova; 76 entry.last = iova + 1; 77 78 ret = ioctl(dev->vduse_dev_fd, VDUSE_IOTLB_GET_FD, &entry); 79 if (ret < 0) { 80 VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to get IOTLB entry for 0x%" PRIx64 "\n", 81 iova); 82 return -1; 83 } 84 85 fd = ret; 86 87 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "New IOTLB entry:\n"); 88 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "\tIOVA: %" PRIx64 " - %" PRIx64 "\n", 89 (uint64_t)entry.start, (uint64_t)entry.last); 90 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "\toffset: %" PRIx64 "\n", (uint64_t)entry.offset); 91 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "\tfd: %d\n", fd); 92 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "\tperm: %x\n", entry.perm); 93 94 size = entry.last - entry.start + 1; 95 mmap_addr = mmap(0, size + entry.offset, entry.perm, MAP_SHARED, fd, 0); 96 if (!mmap_addr) { 97 VHOST_LOG_CONFIG(dev->ifname, ERR, 98 "Failed to mmap IOTLB entry for 0x%" PRIx64 "\n", iova); 99 ret = -1; 100 goto close_fd; 101 } 102 103 ret = fstat(fd, &stat); 104 if (ret < 0) { 105 VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to get page size.\n"); 106 munmap(mmap_addr, entry.offset + size); 107 goto close_fd; 108 } 109 page_size = (uint64_t)stat.st_blksize; 110 111 vhost_user_iotlb_cache_insert(dev, entry.start, (uint64_t)(uintptr_t)mmap_addr, 112 entry.offset, size, page_size, entry.perm); 113 114 ret = 0; 115 close_fd: 116 close(fd); 117 118 return ret; 119 } 120 121 static struct vhost_backend_ops vduse_backend_ops = { 122 .iotlb_miss = vduse_iotlb_miss, 123 .iotlb_remove_notify = vduse_iotlb_remove_notify, 124 .inject_irq = vduse_inject_irq, 125 }; 126 127 static void 128 vduse_control_queue_event(int fd, void *arg, int *remove __rte_unused) 129 { 130 struct virtio_net *dev = arg; 131 uint64_t buf; 132 int ret; 133 134 ret = read(fd, &buf, sizeof(buf)); 135 if (ret < 0) { 136 VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to read control queue event: %s\n", 137 strerror(errno)); 138 return; 139 } 140 141 VHOST_LOG_CONFIG(dev->ifname, DEBUG, "Control queue kicked\n"); 142 if (virtio_net_ctrl_handle(dev)) 143 VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to handle ctrl request\n"); 144 } 145 146 static void 147 vduse_vring_setup(struct virtio_net *dev, unsigned int index) 148 { 149 struct vhost_virtqueue *vq = dev->virtqueue[index]; 150 struct vhost_vring_addr *ra = &vq->ring_addrs; 151 struct vduse_vq_info vq_info; 152 struct vduse_vq_eventfd vq_efd; 153 int ret; 154 155 vq_info.index = index; 156 ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_GET_INFO, &vq_info); 157 if (ret) { 158 VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to get VQ %u info: %s\n", 159 index, strerror(errno)); 160 return; 161 } 162 163 VHOST_LOG_CONFIG(dev->ifname, INFO, "VQ %u info:\n", index); 164 VHOST_LOG_CONFIG(dev->ifname, INFO, "\tnum: %u\n", vq_info.num); 165 VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdesc_addr: %llx\n", vq_info.desc_addr); 166 VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdriver_addr: %llx\n", vq_info.driver_addr); 167 VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdevice_addr: %llx\n", vq_info.device_addr); 168 VHOST_LOG_CONFIG(dev->ifname, INFO, "\tavail_idx: %u\n", vq_info.split.avail_index); 169 VHOST_LOG_CONFIG(dev->ifname, INFO, "\tready: %u\n", vq_info.ready); 170 171 vq->last_avail_idx = vq_info.split.avail_index; 172 vq->size = vq_info.num; 173 vq->ready = true; 174 vq->enabled = vq_info.ready; 175 ra->desc_user_addr = vq_info.desc_addr; 176 ra->avail_user_addr = vq_info.driver_addr; 177 ra->used_user_addr = vq_info.device_addr; 178 179 vq->kickfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); 180 if (vq->kickfd < 0) { 181 VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to init kickfd for VQ %u: %s\n", 182 index, strerror(errno)); 183 vq->kickfd = VIRTIO_INVALID_EVENTFD; 184 return; 185 } 186 VHOST_LOG_CONFIG(dev->ifname, INFO, "\tkick fd: %d\n", vq->kickfd); 187 188 vq->shadow_used_split = rte_malloc_socket(NULL, 189 vq->size * sizeof(struct vring_used_elem), 190 RTE_CACHE_LINE_SIZE, 0); 191 vq->batch_copy_elems = rte_malloc_socket(NULL, 192 vq->size * sizeof(struct batch_copy_elem), 193 RTE_CACHE_LINE_SIZE, 0); 194 195 vhost_user_iotlb_rd_lock(vq); 196 if (vring_translate(dev, vq)) 197 VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to translate vring %d addresses\n", 198 index); 199 200 if (vhost_enable_guest_notification(dev, vq, 0)) 201 VHOST_LOG_CONFIG(dev->ifname, ERR, 202 "Failed to disable guest notifications on vring %d\n", 203 index); 204 vhost_user_iotlb_rd_unlock(vq); 205 206 vq_efd.index = index; 207 vq_efd.fd = vq->kickfd; 208 209 ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd); 210 if (ret) { 211 VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to setup kickfd for VQ %u: %s\n", 212 index, strerror(errno)); 213 close(vq->kickfd); 214 vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; 215 return; 216 } 217 218 if (vq == dev->cvq) { 219 ret = fdset_add(&vduse.fdset, vq->kickfd, vduse_control_queue_event, NULL, dev); 220 if (ret) { 221 VHOST_LOG_CONFIG(dev->ifname, ERR, 222 "Failed to setup kickfd handler for VQ %u: %s\n", 223 index, strerror(errno)); 224 vq_efd.fd = VDUSE_EVENTFD_DEASSIGN; 225 ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd); 226 close(vq->kickfd); 227 vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; 228 } 229 fdset_pipe_notify(&vduse.fdset); 230 vhost_enable_guest_notification(dev, vq, 1); 231 VHOST_LOG_CONFIG(dev->ifname, INFO, "Ctrl queue event handler installed\n"); 232 } 233 } 234 235 static void 236 vduse_vring_cleanup(struct virtio_net *dev, unsigned int index) 237 { 238 struct vhost_virtqueue *vq = dev->virtqueue[index]; 239 struct vduse_vq_eventfd vq_efd; 240 int ret; 241 242 if (vq == dev->cvq && vq->kickfd >= 0) { 243 fdset_del(&vduse.fdset, vq->kickfd); 244 fdset_pipe_notify(&vduse.fdset); 245 } 246 247 vq_efd.index = index; 248 vq_efd.fd = VDUSE_EVENTFD_DEASSIGN; 249 250 ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd); 251 if (ret) 252 VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to cleanup kickfd for VQ %u: %s\n", 253 index, strerror(errno)); 254 255 close(vq->kickfd); 256 vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; 257 258 vring_invalidate(dev, vq); 259 260 rte_free(vq->batch_copy_elems); 261 vq->batch_copy_elems = NULL; 262 263 rte_free(vq->shadow_used_split); 264 vq->shadow_used_split = NULL; 265 266 vq->enabled = false; 267 vq->ready = false; 268 vq->size = 0; 269 vq->last_used_idx = 0; 270 vq->last_avail_idx = 0; 271 } 272 273 static void 274 vduse_device_start(struct virtio_net *dev) 275 { 276 unsigned int i, ret; 277 278 VHOST_LOG_CONFIG(dev->ifname, INFO, "Starting device...\n"); 279 280 dev->notify_ops = vhost_driver_callback_get(dev->ifname); 281 if (!dev->notify_ops) { 282 VHOST_LOG_CONFIG(dev->ifname, ERR, 283 "Failed to get callback ops for driver\n"); 284 return; 285 } 286 287 ret = ioctl(dev->vduse_dev_fd, VDUSE_DEV_GET_FEATURES, &dev->features); 288 if (ret) { 289 VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to get features: %s\n", 290 strerror(errno)); 291 return; 292 } 293 294 VHOST_LOG_CONFIG(dev->ifname, INFO, "Negotiated Virtio features: 0x%" PRIx64 "\n", 295 dev->features); 296 297 if (dev->features & 298 ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | 299 (1ULL << VIRTIO_F_VERSION_1) | 300 (1ULL << VIRTIO_F_RING_PACKED))) { 301 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf); 302 } else { 303 dev->vhost_hlen = sizeof(struct virtio_net_hdr); 304 } 305 306 for (i = 0; i < dev->nr_vring; i++) 307 vduse_vring_setup(dev, i); 308 309 dev->flags |= VIRTIO_DEV_READY; 310 311 if (dev->notify_ops->new_device(dev->vid) == 0) 312 dev->flags |= VIRTIO_DEV_RUNNING; 313 314 for (i = 0; i < dev->nr_vring; i++) { 315 struct vhost_virtqueue *vq = dev->virtqueue[i]; 316 317 if (vq == dev->cvq) 318 continue; 319 320 if (dev->notify_ops->vring_state_changed) 321 dev->notify_ops->vring_state_changed(dev->vid, i, vq->enabled); 322 } 323 } 324 325 static void 326 vduse_device_stop(struct virtio_net *dev) 327 { 328 unsigned int i; 329 330 VHOST_LOG_CONFIG(dev->ifname, INFO, "Stopping device...\n"); 331 332 vhost_destroy_device_notify(dev); 333 334 dev->flags &= ~VIRTIO_DEV_READY; 335 336 for (i = 0; i < dev->nr_vring; i++) 337 vduse_vring_cleanup(dev, i); 338 339 vhost_user_iotlb_flush_all(dev); 340 } 341 342 static void 343 vduse_events_handler(int fd, void *arg, int *remove __rte_unused) 344 { 345 struct virtio_net *dev = arg; 346 struct vduse_dev_request req; 347 struct vduse_dev_response resp; 348 struct vhost_virtqueue *vq; 349 uint8_t old_status = dev->status; 350 int ret; 351 352 memset(&resp, 0, sizeof(resp)); 353 354 ret = read(fd, &req, sizeof(req)); 355 if (ret < 0) { 356 VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to read request: %s\n", 357 strerror(errno)); 358 return; 359 } else if (ret < (int)sizeof(req)) { 360 VHOST_LOG_CONFIG(dev->ifname, ERR, "Incomplete to read request %d\n", ret); 361 return; 362 } 363 364 VHOST_LOG_CONFIG(dev->ifname, INFO, "New request: %s (%u)\n", 365 vduse_req_id_to_str(req.type), req.type); 366 367 switch (req.type) { 368 case VDUSE_GET_VQ_STATE: 369 vq = dev->virtqueue[req.vq_state.index]; 370 VHOST_LOG_CONFIG(dev->ifname, INFO, "\tvq index: %u, avail_index: %u\n", 371 req.vq_state.index, vq->last_avail_idx); 372 resp.vq_state.split.avail_index = vq->last_avail_idx; 373 resp.result = VDUSE_REQ_RESULT_OK; 374 break; 375 case VDUSE_SET_STATUS: 376 VHOST_LOG_CONFIG(dev->ifname, INFO, "\tnew status: 0x%08x\n", 377 req.s.status); 378 old_status = dev->status; 379 dev->status = req.s.status; 380 resp.result = VDUSE_REQ_RESULT_OK; 381 break; 382 case VDUSE_UPDATE_IOTLB: 383 VHOST_LOG_CONFIG(dev->ifname, INFO, "\tIOVA range: %" PRIx64 " - %" PRIx64 "\n", 384 (uint64_t)req.iova.start, (uint64_t)req.iova.last); 385 vhost_user_iotlb_cache_remove(dev, req.iova.start, 386 req.iova.last - req.iova.start + 1); 387 resp.result = VDUSE_REQ_RESULT_OK; 388 break; 389 default: 390 resp.result = VDUSE_REQ_RESULT_FAILED; 391 break; 392 } 393 394 resp.request_id = req.request_id; 395 396 ret = write(dev->vduse_dev_fd, &resp, sizeof(resp)); 397 if (ret != sizeof(resp)) { 398 VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to write response %s\n", 399 strerror(errno)); 400 return; 401 } 402 403 if ((old_status ^ dev->status) & VIRTIO_DEVICE_STATUS_DRIVER_OK) { 404 if (dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK) 405 vduse_device_start(dev); 406 else 407 vduse_device_stop(dev); 408 } 409 410 VHOST_LOG_CONFIG(dev->ifname, INFO, "Request %s (%u) handled successfully\n", 411 vduse_req_id_to_str(req.type), req.type); 412 } 413 414 int 415 vduse_device_create(const char *path, bool compliant_ol_flags) 416 { 417 int control_fd, dev_fd, vid, ret; 418 pthread_t fdset_tid; 419 uint32_t i, max_queue_pairs, total_queues; 420 struct virtio_net *dev; 421 struct virtio_net_config vnet_config = {{ 0 }}; 422 uint64_t ver = VHOST_VDUSE_API_VERSION; 423 uint64_t features; 424 struct vduse_dev_config *dev_config = NULL; 425 const char *name = path + strlen("/dev/vduse/"); 426 427 /* If first device, create events dispatcher thread */ 428 if (vduse_events_thread == false) { 429 /** 430 * create a pipe which will be waited by poll and notified to 431 * rebuild the wait list of poll. 432 */ 433 if (fdset_pipe_init(&vduse.fdset) < 0) { 434 VHOST_LOG_CONFIG(path, ERR, "failed to create pipe for vduse fdset\n"); 435 return -1; 436 } 437 438 ret = rte_ctrl_thread_create(&fdset_tid, "dpdk-vduse-evt", NULL, 439 fdset_event_dispatch, &vduse.fdset); 440 if (ret != 0) { 441 VHOST_LOG_CONFIG(path, ERR, "failed to create vduse fdset handling thread\n"); 442 fdset_pipe_uninit(&vduse.fdset); 443 return -1; 444 } 445 446 vduse_events_thread = true; 447 } 448 449 control_fd = open(VDUSE_CTRL_PATH, O_RDWR); 450 if (control_fd < 0) { 451 VHOST_LOG_CONFIG(name, ERR, "Failed to open %s: %s\n", 452 VDUSE_CTRL_PATH, strerror(errno)); 453 return -1; 454 } 455 456 if (ioctl(control_fd, VDUSE_SET_API_VERSION, &ver)) { 457 VHOST_LOG_CONFIG(name, ERR, "Failed to set API version: %" PRIu64 ": %s\n", 458 ver, strerror(errno)); 459 ret = -1; 460 goto out_ctrl_close; 461 } 462 463 dev_config = malloc(offsetof(struct vduse_dev_config, config) + 464 sizeof(vnet_config)); 465 if (!dev_config) { 466 VHOST_LOG_CONFIG(name, ERR, "Failed to allocate VDUSE config\n"); 467 ret = -1; 468 goto out_ctrl_close; 469 } 470 471 ret = rte_vhost_driver_get_features(path, &features); 472 if (ret < 0) { 473 VHOST_LOG_CONFIG(name, ERR, "Failed to get backend features\n"); 474 goto out_free; 475 } 476 477 ret = rte_vhost_driver_get_queue_num(path, &max_queue_pairs); 478 if (ret < 0) { 479 VHOST_LOG_CONFIG(name, ERR, "Failed to get max queue pairs\n"); 480 goto out_free; 481 } 482 483 VHOST_LOG_CONFIG(path, INFO, "VDUSE max queue pairs: %u\n", max_queue_pairs); 484 total_queues = max_queue_pairs * 2; 485 486 if (max_queue_pairs == 1) 487 features &= ~(RTE_BIT64(VIRTIO_NET_F_CTRL_VQ) | RTE_BIT64(VIRTIO_NET_F_MQ)); 488 else 489 total_queues += 1; /* Includes ctrl queue */ 490 491 vnet_config.max_virtqueue_pairs = max_queue_pairs; 492 memset(dev_config, 0, sizeof(struct vduse_dev_config)); 493 494 strncpy(dev_config->name, name, VDUSE_NAME_MAX - 1); 495 dev_config->device_id = VIRTIO_ID_NET; 496 dev_config->vendor_id = 0; 497 dev_config->features = features; 498 dev_config->vq_num = total_queues; 499 dev_config->vq_align = sysconf(_SC_PAGE_SIZE); 500 dev_config->config_size = sizeof(struct virtio_net_config); 501 memcpy(dev_config->config, &vnet_config, sizeof(vnet_config)); 502 503 ret = ioctl(control_fd, VDUSE_CREATE_DEV, dev_config); 504 if (ret < 0) { 505 VHOST_LOG_CONFIG(name, ERR, "Failed to create VDUSE device: %s\n", 506 strerror(errno)); 507 goto out_free; 508 } 509 510 dev_fd = open(path, O_RDWR); 511 if (dev_fd < 0) { 512 VHOST_LOG_CONFIG(name, ERR, "Failed to open device %s: %s\n", 513 path, strerror(errno)); 514 ret = -1; 515 goto out_dev_close; 516 } 517 518 ret = fcntl(dev_fd, F_SETFL, O_NONBLOCK); 519 if (ret < 0) { 520 VHOST_LOG_CONFIG(name, ERR, "Failed to set chardev as non-blocking: %s\n", 521 strerror(errno)); 522 goto out_dev_close; 523 } 524 525 vid = vhost_new_device(&vduse_backend_ops); 526 if (vid < 0) { 527 VHOST_LOG_CONFIG(name, ERR, "Failed to create new Vhost device\n"); 528 ret = -1; 529 goto out_dev_close; 530 } 531 532 dev = get_device(vid); 533 if (!dev) { 534 ret = -1; 535 goto out_dev_close; 536 } 537 538 strncpy(dev->ifname, path, IF_NAME_SZ - 1); 539 dev->vduse_ctrl_fd = control_fd; 540 dev->vduse_dev_fd = dev_fd; 541 vhost_setup_virtio_net(dev->vid, true, compliant_ol_flags, true, true); 542 543 for (i = 0; i < total_queues; i++) { 544 struct vduse_vq_config vq_cfg = { 0 }; 545 546 ret = alloc_vring_queue(dev, i); 547 if (ret) { 548 VHOST_LOG_CONFIG(name, ERR, "Failed to alloc vring %d metadata\n", i); 549 goto out_dev_destroy; 550 } 551 552 vq_cfg.index = i; 553 vq_cfg.max_size = 1024; 554 555 ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP, &vq_cfg); 556 if (ret) { 557 VHOST_LOG_CONFIG(name, ERR, "Failed to set-up VQ %d\n", i); 558 goto out_dev_destroy; 559 } 560 } 561 562 dev->cvq = dev->virtqueue[max_queue_pairs * 2]; 563 564 ret = fdset_add(&vduse.fdset, dev->vduse_dev_fd, vduse_events_handler, NULL, dev); 565 if (ret) { 566 VHOST_LOG_CONFIG(name, ERR, "Failed to add fd %d to vduse fdset\n", 567 dev->vduse_dev_fd); 568 goto out_dev_destroy; 569 } 570 fdset_pipe_notify(&vduse.fdset); 571 572 free(dev_config); 573 574 return 0; 575 576 out_dev_destroy: 577 vhost_destroy_device(vid); 578 out_dev_close: 579 if (dev_fd >= 0) 580 close(dev_fd); 581 ioctl(control_fd, VDUSE_DESTROY_DEV, name); 582 out_free: 583 free(dev_config); 584 out_ctrl_close: 585 close(control_fd); 586 587 return ret; 588 } 589 590 int 591 vduse_device_destroy(const char *path) 592 { 593 const char *name = path + strlen("/dev/vduse/"); 594 struct virtio_net *dev; 595 int vid, ret; 596 597 for (vid = 0; vid < RTE_MAX_VHOST_DEVICE; vid++) { 598 dev = vhost_devices[vid]; 599 600 if (dev == NULL) 601 continue; 602 603 if (!strcmp(path, dev->ifname)) 604 break; 605 } 606 607 if (vid == RTE_MAX_VHOST_DEVICE) 608 return -1; 609 610 vduse_device_stop(dev); 611 612 fdset_del(&vduse.fdset, dev->vduse_dev_fd); 613 fdset_pipe_notify(&vduse.fdset); 614 615 if (dev->vduse_dev_fd >= 0) { 616 close(dev->vduse_dev_fd); 617 dev->vduse_dev_fd = -1; 618 } 619 620 if (dev->vduse_ctrl_fd >= 0) { 621 ret = ioctl(dev->vduse_ctrl_fd, VDUSE_DESTROY_DEV, name); 622 if (ret) 623 VHOST_LOG_CONFIG(name, ERR, "Failed to destroy VDUSE device: %s\n", 624 strerror(errno)); 625 close(dev->vduse_ctrl_fd); 626 dev->vduse_ctrl_fd = -1; 627 } 628 629 vhost_destroy_device(vid); 630 631 return 0; 632 } 633