1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES 3 * All rights reserved. 4 */ 5 6 /* 7 * virtio-fs over vfio-user transport 8 */ 9 #include <linux/virtio_fs.h> 10 11 #include "spdk/stdinc.h" 12 #include "spdk/env.h" 13 #include "spdk/stdinc.h" 14 #include "spdk/assert.h" 15 #include "spdk/barrier.h" 16 #include "spdk/thread.h" 17 #include "spdk/memory.h" 18 #include "spdk/util.h" 19 #include "spdk/log.h" 20 #include "spdk/string.h" 21 #include "spdk/likely.h" 22 #include "spdk/pci_ids.h" 23 #include "spdk/fuse_dispatcher.h" 24 #include "linux/fuse_kernel.h" 25 26 #include "vfu_virtio_internal.h" 27 28 #define VIRTIO_FS_SUPPORTED_FEATURES 0 29 30 struct virtio_fs_endpoint { 31 struct vfu_virtio_endpoint virtio; 32 33 /* virtio_fs specific configurations */ 34 struct spdk_fuse_dispatcher *fuse_disp; 35 struct spdk_thread *init_thread; 36 struct spdk_io_channel *io_channel; 37 struct virtio_fs_config fs_cfg; 38 bool destruction_initiated; 39 40 /* virtio_fs ring process poller */ 41 struct spdk_poller *ring_poller; 42 }; 43 44 struct virtio_fs_req { 45 volatile uint32_t *status; 46 struct virtio_fs_endpoint *endpoint; 47 /* KEEP req at last */ 48 struct vfu_virtio_req req; 49 }; 50 51 static inline struct virtio_fs_endpoint * 52 to_fs_endpoint(struct vfu_virtio_endpoint *virtio_endpoint) 53 { 54 return SPDK_CONTAINEROF(virtio_endpoint, struct virtio_fs_endpoint, virtio); 55 } 56 57 static inline struct virtio_fs_req * 58 to_fs_request(struct vfu_virtio_req *request) 59 { 60 return SPDK_CONTAINEROF(request, struct virtio_fs_req, req); 61 } 62 63 static int 64 vfu_virtio_fs_vring_poll(void *ctx) 65 { 66 struct virtio_fs_endpoint *fs_endpoint = ctx; 67 struct vfu_virtio_dev *dev = fs_endpoint->virtio.dev; 68 struct vfu_virtio_vq *vq; 69 uint32_t i, count = 0; 70 71 if (spdk_unlikely(!virtio_dev_is_started(dev))) { 72 return SPDK_POLLER_IDLE; 73 } 74 75 if (spdk_unlikely(fs_endpoint->virtio.quiesce_in_progress)) { 76 return SPDK_POLLER_IDLE; 77 } 78 79 for (i = 0; i < dev->num_queues; i++) { 80 vq = &dev->vqs[i]; 81 if (!vq->enabled || vq->q_state != VFU_VQ_ACTIVE) { 82 continue; 83 } 84 85 vfu_virtio_vq_flush_irq(dev, vq); 86 87 if (vq->packed.packed_ring) { 88 /* packed vring */ 89 count += vfu_virtio_dev_process_packed_ring(dev, vq); 90 } else { 91 /* split vring */ 92 count += vfu_virtio_dev_process_split_ring(dev, vq); 93 } 94 } 95 96 return count ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; 97 } 98 99 static int 100 virtio_fs_start(struct vfu_virtio_endpoint *virtio_endpoint) 101 { 102 struct virtio_fs_endpoint *fs_endpoint = to_fs_endpoint(virtio_endpoint); 103 104 if (fs_endpoint->ring_poller) { 105 return 0; 106 } 107 108 SPDK_DEBUGLOG(vfu_virtio_fs, "%s: starting...\n", 109 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint)); 110 fs_endpoint->io_channel = spdk_fuse_dispatcher_get_io_channel(fs_endpoint->fuse_disp); 111 if (!fs_endpoint->io_channel) { 112 SPDK_ERRLOG("%s: failed to get primary IO channel\n", 113 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint)); 114 return -EINVAL; 115 } 116 117 fs_endpoint->ring_poller = SPDK_POLLER_REGISTER(vfu_virtio_fs_vring_poll, fs_endpoint, 0); 118 return 0; 119 } 120 121 static void 122 _virtio_fs_stop_msg(void *ctx) 123 { 124 struct virtio_fs_endpoint *fs_endpoint = ctx; 125 126 spdk_poller_unregister(&fs_endpoint->ring_poller); 127 spdk_put_io_channel(fs_endpoint->io_channel); 128 129 fs_endpoint->io_channel = NULL; 130 131 SPDK_DEBUGLOG(vfu_virtio_fs, "%s is stopped\n", 132 spdk_vfu_get_endpoint_id(fs_endpoint->virtio.endpoint)); 133 } 134 135 static int 136 virtio_fs_stop(struct vfu_virtio_endpoint *virtio_endpoint) 137 { 138 struct virtio_fs_endpoint *fs_endpoint = to_fs_endpoint(virtio_endpoint); 139 140 if (!fs_endpoint->io_channel) { 141 return 0; 142 } 143 144 SPDK_DEBUGLOG(vfu_virtio_fs, "%s stopping\n", spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint)); 145 spdk_thread_send_msg(virtio_endpoint->thread, _virtio_fs_stop_msg, fs_endpoint); 146 return 0; 147 } 148 149 static void 150 virtio_fs_req_finish(struct virtio_fs_req *fs_req, uint32_t status) 151 { 152 struct vfu_virtio_req *req = &fs_req->req; 153 154 if (spdk_likely(fs_req->status)) { 155 *fs_req->status = status; 156 fs_req->status = NULL; 157 } 158 159 vfu_virtio_finish_req(req); 160 } 161 162 static void 163 virtio_fs_fuse_req_done(void *cb_arg, int error) 164 { 165 struct virtio_fs_req *fs_req = cb_arg; 166 virtio_fs_req_finish(fs_req, -error); 167 } 168 169 static int 170 virtio_fs_process_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq, 171 struct vfu_virtio_req *req) 172 { 173 struct virtio_fs_endpoint *fs_endpoint = to_fs_endpoint(virtio_endpoint); 174 struct virtio_fs_req *fs_req = to_fs_request(req); 175 struct iovec *iov; 176 const struct fuse_in_header *in; 177 uint32_t in_len; 178 struct iovec *in_iov, *out_iov; 179 int in_iovcnt, out_iovcnt; 180 181 fs_req->endpoint = fs_endpoint; 182 183 in_iov = &req->iovs[0]; 184 in_iovcnt = 0; 185 186 if (spdk_unlikely(in_iov[0].iov_len < sizeof(*in))) { 187 SPDK_ERRLOG("Invalid virtio_fs IN header length %lu\n", in_iov[0].iov_len); 188 virtio_fs_req_finish(fs_req, ENOTSUP); 189 return -EINVAL; 190 } 191 192 in = in_iov->iov_base; 193 in_len = 0; 194 while (true) { 195 iov = &req->iovs[in_iovcnt]; 196 in_len += iov->iov_len; 197 in_iovcnt++; 198 if (in_len == in->len) { 199 break; 200 } else if (in_len > in->len) { 201 SPDK_ERRLOG("Invalid IOV array: length of %d elements >= %" PRIu32"\n", in_len, in->len); 202 virtio_fs_req_finish(fs_req, ENOTSUP); 203 return -EINVAL; 204 } 205 } 206 207 out_iov = &req->iovs[in_iovcnt]; 208 out_iovcnt = req->iovcnt - in_iovcnt; 209 210 spdk_fuse_dispatcher_submit_request(fs_endpoint->fuse_disp, fs_endpoint->io_channel, 211 in_iov, in_iovcnt, out_iov, out_iovcnt, 212 virtio_fs_fuse_req_done, fs_req); 213 return 0; 214 } 215 216 217 static uint64_t 218 virtio_fs_get_supported_features(struct vfu_virtio_endpoint *virtio_endpoint) 219 { 220 uint64_t features; 221 222 features = VIRTIO_FS_SUPPORTED_FEATURES | VIRTIO_HOST_SUPPORTED_FEATURES; 223 224 if (!virtio_endpoint->packed_ring) { 225 features &= ~(1ULL << VIRTIO_F_RING_PACKED); 226 } 227 228 return features; 229 } 230 231 static struct vfu_virtio_req * 232 virtio_fs_alloc_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq) 233 { 234 struct virtio_fs_req *fs_req; 235 236 fs_req = calloc(1, sizeof(*fs_req) + dma_sg_size() * (VIRTIO_DEV_MAX_IOVS + 1)); 237 if (!fs_req) { 238 return NULL; 239 } 240 241 return &fs_req->req; 242 } 243 244 static void 245 virtio_fs_free_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq, 246 struct vfu_virtio_req *req) 247 { 248 struct virtio_fs_req *fs_req = to_fs_request(req); 249 250 free(fs_req); 251 } 252 253 static int 254 virtio_fs_get_device_specific_config(struct vfu_virtio_endpoint *virtio_endpoint, char *buf, 255 uint64_t offset, uint64_t count) 256 { 257 struct virtio_fs_endpoint *fs_endpoint = to_fs_endpoint(virtio_endpoint); 258 uint8_t *fs_cfg; 259 uint64_t len; 260 261 SPDK_DEBUGLOG(vfu_virtio_fs, "%s: getting %" PRIu64 " config bytes at offset %" PRIu64 262 " (total: %zu)\n", spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), 263 count, offset, sizeof(struct virtio_fs_config)); 264 265 if (offset >= sizeof(struct virtio_fs_config)) { 266 SPDK_WARNLOG("Offset is beyond the config size\n"); 267 return -EINVAL; 268 } 269 270 len = spdk_min(sizeof(struct virtio_fs_config) - offset, count); 271 272 fs_cfg = (uint8_t *)&fs_endpoint->fs_cfg; 273 memcpy(buf, fs_cfg + offset, len); 274 275 return 0; 276 } 277 278 static struct vfu_virtio_ops virtio_fs_ops = { 279 .get_device_features = virtio_fs_get_supported_features, 280 .alloc_req = virtio_fs_alloc_req, 281 .free_req = virtio_fs_free_req, 282 .exec_request = virtio_fs_process_req, 283 .get_config = virtio_fs_get_device_specific_config, 284 .start_device = virtio_fs_start, 285 .stop_device = virtio_fs_stop, 286 }; 287 288 static void _vfu_virtio_fs_fuse_disp_delete(void *cb_arg); 289 290 static void 291 _vfu_virtio_fs_fuse_dispatcher_delete_cpl(void *cb_arg, int error) 292 { 293 struct virtio_fs_endpoint *fs_endpoint = cb_arg; 294 295 if (error) { 296 SPDK_ERRLOG("%s: FUSE dispatcher deletion failed with %d. Retrying...\n", 297 spdk_fuse_dispatcher_get_fsdev_name(fs_endpoint->fuse_disp), error); 298 spdk_thread_send_msg(spdk_get_thread(), _vfu_virtio_fs_fuse_disp_delete, fs_endpoint); 299 } 300 301 SPDK_NOTICELOG("FUSE dispatcher deleted\n"); 302 fs_endpoint->fuse_disp = NULL; 303 } 304 305 static void 306 _vfu_virtio_fs_fuse_disp_delete(void *cb_arg) 307 { 308 struct virtio_fs_endpoint *fs_endpoint = cb_arg; 309 int res; 310 311 SPDK_DEBUGLOG(vfu_virtio_fs, "%s: initiating FUSE dispatcher deletion...\n", 312 spdk_fuse_dispatcher_get_fsdev_name(fs_endpoint->fuse_disp)); 313 314 res = spdk_fuse_dispatcher_delete(fs_endpoint->fuse_disp, _vfu_virtio_fs_fuse_dispatcher_delete_cpl, 315 fs_endpoint); 316 if (res) { 317 SPDK_ERRLOG("%s: FUSE dispatcher deletion failed with %d. Retrying...\n", 318 spdk_fuse_dispatcher_get_fsdev_name(fs_endpoint->fuse_disp), res); 319 spdk_thread_send_msg(spdk_get_thread(), _vfu_virtio_fs_fuse_disp_delete, fs_endpoint); 320 } 321 } 322 323 static void 324 fuse_disp_event_cb(enum spdk_fuse_dispatcher_event_type type, struct spdk_fuse_dispatcher *disp, 325 void *event_ctx) 326 { 327 struct virtio_fs_endpoint *fs_endpoint = event_ctx; 328 329 SPDK_DEBUGLOG(vfu_virtio_fs, "%s: FUSE dispatcher event#%d arrived\n", 330 spdk_fuse_dispatcher_get_fsdev_name(fs_endpoint->fuse_disp), type); 331 332 switch (type) { 333 case SPDK_FUSE_DISP_EVENT_FSDEV_REMOVE: 334 SPDK_NOTICELOG("%s: received SPDK_FUSE_DISP_EVENT_FSDEV_REMOVE\n", 335 spdk_fuse_dispatcher_get_fsdev_name(fs_endpoint->fuse_disp)); 336 memset(&fs_endpoint->fs_cfg, 0, sizeof(fs_endpoint->fs_cfg)); 337 338 if (fs_endpoint->io_channel) { 339 spdk_thread_send_msg(fs_endpoint->virtio.thread, _virtio_fs_stop_msg, fs_endpoint); 340 } 341 342 if (fs_endpoint->fuse_disp) { 343 spdk_thread_send_msg(fs_endpoint->init_thread, _vfu_virtio_fs_fuse_disp_delete, 344 fs_endpoint); 345 fs_endpoint->fuse_disp = NULL; 346 } 347 break; 348 default: 349 SPDK_NOTICELOG("%s: unsupported event type %d\n", 350 spdk_fuse_dispatcher_get_fsdev_name(fs_endpoint->fuse_disp), type); 351 break; 352 } 353 } 354 355 struct vfu_virtio_fs_add_fsdev_ctx { 356 struct spdk_vfu_endpoint *endpoint; 357 vfu_virtio_fs_add_fsdev_cpl_cb cb; 358 void *cb_arg; 359 }; 360 361 static void 362 fuse_dispatcher_create_cpl(void *cb_arg, struct spdk_fuse_dispatcher *disp) 363 { 364 struct vfu_virtio_fs_add_fsdev_ctx *ctx = cb_arg; 365 struct spdk_vfu_endpoint *endpoint = ctx->endpoint; 366 struct vfu_virtio_endpoint *virtio_endpoint; 367 struct virtio_fs_endpoint *fs_endpoint; 368 369 if (!disp) { 370 SPDK_ERRLOG("%s: failed to create SPDK FUSE dispatcher\n", 371 spdk_vfu_get_endpoint_id(endpoint)); 372 ctx->cb(ctx->cb_arg, -EINVAL); 373 free(ctx); 374 return; 375 } 376 377 virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint); 378 fs_endpoint = to_fs_endpoint(virtio_endpoint); 379 380 fs_endpoint->fuse_disp = disp; 381 382 SPDK_DEBUGLOG(vfu_virtio_fs, "%s: FUSE dispatcher created successfully\n", 383 spdk_fuse_dispatcher_get_fsdev_name(disp)); 384 385 ctx->cb(ctx->cb_arg, 0); 386 free(ctx); 387 } 388 389 int 390 vfu_virtio_fs_add_fsdev(const char *name, const char *fsdev_name, const char *tag, 391 uint16_t num_queues, uint16_t qsize, bool packed_ring, 392 vfu_virtio_fs_add_fsdev_cpl_cb cb, void *cb_arg) 393 { 394 struct spdk_vfu_endpoint *endpoint; 395 struct vfu_virtio_endpoint *virtio_endpoint; 396 struct virtio_fs_endpoint *fs_endpoint; 397 struct vfu_virtio_fs_add_fsdev_ctx *ctx; 398 size_t tag_len; 399 int ret; 400 401 if (!name || !fsdev_name || !tag) { 402 SPDK_ERRLOG("name, fsdev_name and tag are mandatory\n"); 403 return -EINVAL; 404 } 405 406 endpoint = spdk_vfu_get_endpoint_by_name(name); 407 if (!endpoint) { 408 SPDK_ERRLOG("Endpoint %s doesn't exist\n", name); 409 return -ENOENT; 410 } 411 412 virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint); 413 fs_endpoint = to_fs_endpoint(virtio_endpoint); 414 415 if (fs_endpoint->fuse_disp) { 416 SPDK_ERRLOG("%s: FUSE dispatcher already exists\n", spdk_vfu_get_endpoint_id(endpoint)); 417 return -EEXIST; 418 } 419 420 tag_len = strlen(tag); 421 if (tag_len > sizeof(fs_endpoint->fs_cfg.tag)) { 422 SPDK_ERRLOG("%s: tag is too long (%s, %zu > %zu)\n", spdk_vfu_get_endpoint_id(endpoint), tag, 423 tag_len, sizeof(fs_endpoint->fs_cfg.tag)); 424 return -EINVAL; 425 } 426 427 if (num_queues && (num_queues <= VIRTIO_DEV_MAX_VQS)) { 428 fs_endpoint->virtio.num_queues = num_queues; 429 } 430 if (qsize && (qsize <= VIRTIO_VQ_MAX_SIZE)) { 431 fs_endpoint->virtio.qsize = qsize; 432 } 433 fs_endpoint->virtio.packed_ring = packed_ring; 434 435 SPDK_DEBUGLOG(vfu_virtio_fs, "%s: add fsdev %s, tag=%s, num_queues %u, qsize %u, packed ring %s\n", 436 spdk_vfu_get_endpoint_id(endpoint), fsdev_name, tag, fs_endpoint->virtio.num_queues, 437 fs_endpoint->virtio.qsize, packed_ring ? "enabled" : "disabled"); 438 439 /* Update config */ 440 memset(&fs_endpoint->fs_cfg, 0, sizeof(fs_endpoint->fs_cfg)); 441 fs_endpoint->fs_cfg.num_request_queues = fs_endpoint->virtio.num_queues - 442 1; /* excluding the hprio */ 443 memcpy(fs_endpoint->fs_cfg.tag, tag, tag_len); 444 fs_endpoint->init_thread = spdk_get_thread(); 445 446 ctx = calloc(1, sizeof(*ctx)); 447 if (!ctx) { 448 SPDK_ERRLOG("Failed to allocate context\n"); 449 return -ENOMEM; 450 } 451 452 ctx->endpoint = endpoint; 453 ctx->cb = cb; 454 ctx->cb_arg = cb_arg; 455 456 ret = spdk_fuse_dispatcher_create(fsdev_name, fuse_disp_event_cb, fs_endpoint, 457 fuse_dispatcher_create_cpl, ctx); 458 if (ret) { 459 SPDK_ERRLOG("Failed to create SPDK FUSE dispatcher for %s (err=%d)\n", 460 fsdev_name, ret); 461 free(ctx); 462 return ret; 463 } 464 465 return 0; 466 } 467 468 static void * 469 vfu_virtio_fs_endpoint_init(struct spdk_vfu_endpoint *endpoint, 470 char *basename, const char *endpoint_name) 471 { 472 struct virtio_fs_endpoint *fs_endpoint; 473 int ret; 474 475 fs_endpoint = calloc(1, sizeof(*fs_endpoint)); 476 if (!fs_endpoint) { 477 return NULL; 478 } 479 480 ret = vfu_virtio_endpoint_setup(&fs_endpoint->virtio, endpoint, basename, endpoint_name, 481 &virtio_fs_ops); 482 if (ret) { 483 SPDK_ERRLOG("Error to setup endpoint %s\n", endpoint_name); 484 free(fs_endpoint); 485 return NULL; 486 } 487 488 return (void *)&fs_endpoint->virtio; 489 } 490 491 static int 492 vfu_virtio_fs_endpoint_destruct(struct spdk_vfu_endpoint *endpoint) 493 { 494 struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint); 495 struct virtio_fs_endpoint *fs_endpoint = to_fs_endpoint(virtio_endpoint); 496 497 if (fs_endpoint->fuse_disp) { 498 if (!fs_endpoint->destruction_initiated) { 499 if (fs_endpoint->init_thread == spdk_get_thread()) { 500 _vfu_virtio_fs_fuse_disp_delete(fs_endpoint); 501 } else { 502 spdk_thread_send_msg(spdk_get_thread(), _vfu_virtio_fs_fuse_disp_delete, fs_endpoint); 503 } 504 fs_endpoint->destruction_initiated = true; 505 } 506 return -EAGAIN; 507 } 508 509 vfu_virtio_endpoint_destruct(&fs_endpoint->virtio); 510 free(fs_endpoint); 511 512 return 0; 513 } 514 515 static int 516 vfu_virtio_fs_get_device_info(struct spdk_vfu_endpoint *endpoint, 517 struct spdk_vfu_pci_device *device_info) 518 { 519 struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint); 520 struct virtio_fs_endpoint *fs_endpoint = to_fs_endpoint(virtio_endpoint); 521 522 vfu_virtio_get_device_info(&fs_endpoint->virtio, device_info); 523 /* Fill Device ID */ 524 device_info->id.did = PCI_DEVICE_ID_VIRTIO_FS; 525 526 return 0; 527 } 528 529 static struct spdk_vfu_endpoint_ops vfu_virtio_fs_ops = { 530 .name = "virtio_fs", 531 .init = vfu_virtio_fs_endpoint_init, 532 .get_device_info = vfu_virtio_fs_get_device_info, 533 .get_vendor_capability = vfu_virtio_get_vendor_capability, 534 .post_memory_add = vfu_virtio_post_memory_add, 535 .pre_memory_remove = vfu_virtio_pre_memory_remove, 536 .reset_device = vfu_virtio_pci_reset_cb, 537 .quiesce_device = vfu_virtio_quiesce_cb, 538 .destruct = vfu_virtio_fs_endpoint_destruct, 539 .attach_device = vfu_virtio_attach_device, 540 .detach_device = vfu_virtio_detach_device, 541 }; 542 543 static void 544 __attribute__((constructor)) _vfu_virtio_fs_pci_model_register(void) 545 { 546 spdk_vfu_register_endpoint_ops(&vfu_virtio_fs_ops); 547 } 548 549 SPDK_LOG_REGISTER_COMPONENT(vfu_virtio_fs) 550 SPDK_LOG_REGISTER_COMPONENT(vfu_virtio_fs_data) 551