xref: /spdk/module/vfu_device/vfu_virtio_fs.c (revision c6c1234de9e0015e670dd0b51bf6ce39ee0e07bd)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES
3  *   All rights reserved.
4  */
5 
6 /*
7  * virtio-fs over vfio-user transport
8  */
9 #include <linux/virtio_fs.h>
10 
11 #include "spdk/stdinc.h"
12 #include "spdk/env.h"
13 #include "spdk/stdinc.h"
14 #include "spdk/assert.h"
15 #include "spdk/barrier.h"
16 #include "spdk/thread.h"
17 #include "spdk/memory.h"
18 #include "spdk/util.h"
19 #include "spdk/log.h"
20 #include "spdk/string.h"
21 #include "spdk/likely.h"
22 #include "spdk/pci_ids.h"
23 #include "spdk/fuse_dispatcher.h"
24 #include "linux/fuse_kernel.h"
25 
26 #include "vfu_virtio_internal.h"
27 
28 #define VIRTIO_FS_SUPPORTED_FEATURES 0
29 
30 struct virtio_fs_endpoint {
31 	struct vfu_virtio_endpoint virtio;
32 
33 	/* virtio_fs specific configurations */
34 	struct spdk_fuse_dispatcher *fuse_disp;
35 	struct spdk_thread *init_thread;
36 	struct spdk_io_channel *io_channel;
37 	struct virtio_fs_config	fs_cfg;
38 
39 	/* virtio_fs ring process poller */
40 	struct spdk_poller *ring_poller;
41 };
42 
43 struct virtio_fs_req {
44 	volatile uint32_t *status;
45 	struct virtio_fs_endpoint *endpoint;
46 	/* KEEP req at last */
47 	struct vfu_virtio_req req;
48 };
49 
50 static inline struct virtio_fs_endpoint *
51 to_fs_endpoint(struct vfu_virtio_endpoint *virtio_endpoint)
52 {
53 	return SPDK_CONTAINEROF(virtio_endpoint, struct virtio_fs_endpoint, virtio);
54 }
55 
56 static inline struct virtio_fs_req *
57 to_fs_request(struct vfu_virtio_req *request)
58 {
59 	return SPDK_CONTAINEROF(request, struct virtio_fs_req, req);
60 }
61 
62 static int
63 vfu_virtio_fs_vring_poll(void *ctx)
64 {
65 	struct virtio_fs_endpoint *fs_endpoint = ctx;
66 	struct vfu_virtio_dev *dev = fs_endpoint->virtio.dev;
67 	struct vfu_virtio_vq *vq;
68 	uint32_t i, count = 0;
69 
70 	if (spdk_unlikely(!virtio_dev_is_started(dev))) {
71 		return SPDK_POLLER_IDLE;
72 	}
73 
74 	if (spdk_unlikely(fs_endpoint->virtio.quiesce_in_progress)) {
75 		return SPDK_POLLER_IDLE;
76 	}
77 
78 	for (i = 0; i < dev->num_queues; i++) {
79 		vq = &dev->vqs[i];
80 		if (!vq->enabled || vq->q_state != VFU_VQ_ACTIVE) {
81 			continue;
82 		}
83 
84 		vfu_virtio_vq_flush_irq(dev, vq);
85 
86 		if (vq->packed.packed_ring) {
87 			/* packed vring */
88 			count += vfu_virtio_dev_process_packed_ring(dev, vq);
89 		} else {
90 			/* split vring */
91 			count += vfu_virtio_dev_process_split_ring(dev, vq);
92 		}
93 	}
94 
95 	return count ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
96 }
97 
98 static int
99 virtio_fs_start(struct vfu_virtio_endpoint *virtio_endpoint)
100 {
101 	struct virtio_fs_endpoint *fs_endpoint = to_fs_endpoint(virtio_endpoint);
102 
103 	if (fs_endpoint->ring_poller) {
104 		return 0;
105 	}
106 
107 	SPDK_DEBUGLOG(vfu_virtio_fs, "%s: starting...\n",
108 		      spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint));
109 	fs_endpoint->io_channel = spdk_fuse_dispatcher_get_io_channel(fs_endpoint->fuse_disp);
110 	if (!fs_endpoint->io_channel) {
111 		SPDK_ERRLOG("%s: failed to get primary IO channel\n",
112 			    spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint));
113 		return -EINVAL;
114 	}
115 
116 	fs_endpoint->ring_poller = SPDK_POLLER_REGISTER(vfu_virtio_fs_vring_poll, fs_endpoint, 0);
117 	return 0;
118 }
119 
120 static void
121 _virtio_fs_stop_msg(void *ctx)
122 {
123 	struct virtio_fs_endpoint *fs_endpoint = ctx;
124 
125 	spdk_poller_unregister(&fs_endpoint->ring_poller);
126 	spdk_put_io_channel(fs_endpoint->io_channel);
127 
128 	fs_endpoint->io_channel = NULL;
129 
130 	SPDK_DEBUGLOG(vfu_virtio_fs, "%s is stopped\n",
131 		      spdk_vfu_get_endpoint_id(fs_endpoint->virtio.endpoint));
132 }
133 
134 static int
135 virtio_fs_stop(struct vfu_virtio_endpoint *virtio_endpoint)
136 {
137 	struct virtio_fs_endpoint *fs_endpoint = to_fs_endpoint(virtio_endpoint);
138 
139 	if (!fs_endpoint->io_channel) {
140 		return 0;
141 	}
142 
143 	SPDK_DEBUGLOG(vfu_virtio_fs, "%s stopping\n", spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint));
144 	spdk_thread_send_msg(virtio_endpoint->thread, _virtio_fs_stop_msg, fs_endpoint);
145 	return 0;
146 }
147 
148 static void
149 virtio_fs_req_finish(struct virtio_fs_req *fs_req, uint32_t status)
150 {
151 	struct vfu_virtio_req *req = &fs_req->req;
152 
153 	if (spdk_likely(fs_req->status)) {
154 		*fs_req->status = status;
155 		fs_req->status = NULL;
156 	}
157 
158 	vfu_virtio_finish_req(req);
159 }
160 
161 static void
162 virtio_fs_fuse_req_done(void *cb_arg, int error)
163 {
164 	struct virtio_fs_req *fs_req = cb_arg;
165 	virtio_fs_req_finish(fs_req, -error);
166 }
167 
168 static int
169 virtio_fs_process_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq,
170 		      struct vfu_virtio_req *req)
171 {
172 	struct virtio_fs_endpoint *fs_endpoint = to_fs_endpoint(virtio_endpoint);
173 	struct virtio_fs_req *fs_req = to_fs_request(req);
174 	struct iovec *iov;
175 	const struct fuse_in_header *in;
176 	uint32_t in_len;
177 	struct iovec *in_iov, *out_iov;
178 	int in_iovcnt, out_iovcnt;
179 
180 	fs_req->endpoint = fs_endpoint;
181 
182 	in_iov = &req->iovs[0];
183 	in_iovcnt = 0;
184 
185 	if (spdk_unlikely(in_iov[0].iov_len < sizeof(*in))) {
186 		SPDK_ERRLOG("Invalid virtio_fs IN header length %lu\n", in_iov[0].iov_len);
187 		virtio_fs_req_finish(fs_req, ENOTSUP);
188 		return -EINVAL;
189 	}
190 
191 	in = in_iov->iov_base;
192 	in_len = 0;
193 	while (true) {
194 		iov = &req->iovs[in_iovcnt];
195 		in_len += iov->iov_len;
196 		in_iovcnt++;
197 		if (in_len == in->len) {
198 			break;
199 		} else if (in_len > in->len) {
200 			SPDK_ERRLOG("Invalid IOV array: length of %d elements >= %" PRIu32"\n", in_len, in->len);
201 			virtio_fs_req_finish(fs_req, ENOTSUP);
202 			return -EINVAL;
203 		}
204 	}
205 
206 	out_iov = &req->iovs[in_iovcnt];
207 	out_iovcnt = req->iovcnt - in_iovcnt;
208 
209 	spdk_fuse_dispatcher_submit_request(fs_endpoint->fuse_disp, fs_endpoint->io_channel,
210 					    in_iov, in_iovcnt, out_iov, out_iovcnt,
211 					    virtio_fs_fuse_req_done, fs_req);
212 	return 0;
213 }
214 
215 
216 static uint64_t
217 virtio_fs_get_supported_features(struct vfu_virtio_endpoint *virtio_endpoint)
218 {
219 	uint64_t features;
220 
221 	features = VIRTIO_FS_SUPPORTED_FEATURES | VIRTIO_HOST_SUPPORTED_FEATURES;
222 
223 	if (!virtio_endpoint->packed_ring) {
224 		features &= ~(1ULL << VIRTIO_F_RING_PACKED);
225 	}
226 
227 	return features;
228 }
229 
230 static struct vfu_virtio_req *
231 virtio_fs_alloc_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq)
232 {
233 	struct virtio_fs_req *fs_req;
234 
235 	fs_req = calloc(1, sizeof(*fs_req) + dma_sg_size() * (VIRTIO_DEV_MAX_IOVS + 1));
236 	if (!fs_req) {
237 		return NULL;
238 	}
239 
240 	return &fs_req->req;
241 }
242 
243 static void
244 virtio_fs_free_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq,
245 		   struct vfu_virtio_req *req)
246 {
247 	struct virtio_fs_req *fs_req = to_fs_request(req);
248 
249 	free(fs_req);
250 }
251 
252 static int
253 virtio_fs_get_device_specific_config(struct vfu_virtio_endpoint *virtio_endpoint, char *buf,
254 				     uint64_t offset, uint64_t count)
255 {
256 	struct virtio_fs_endpoint *fs_endpoint = to_fs_endpoint(virtio_endpoint);
257 	uint8_t *fs_cfg;
258 	uint64_t len;
259 
260 	SPDK_DEBUGLOG(vfu_virtio_fs, "%s: getting %" PRIu64 " config bytes at offset %" PRIu64
261 		      " (total: %zu)\n", spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
262 		      count, offset, sizeof(struct virtio_fs_config));
263 
264 	if (offset >= sizeof(struct virtio_fs_config)) {
265 		SPDK_WARNLOG("Offset is beyond the config size\n");
266 		return -EINVAL;
267 	}
268 
269 	len = spdk_min(sizeof(struct virtio_fs_config) - offset, count);
270 
271 	fs_cfg = (uint8_t *)&fs_endpoint->fs_cfg;
272 	memcpy(buf, fs_cfg + offset, len);
273 
274 	return 0;
275 }
276 
277 static struct vfu_virtio_ops virtio_fs_ops = {
278 	.get_device_features = virtio_fs_get_supported_features,
279 	.alloc_req = virtio_fs_alloc_req,
280 	.free_req = virtio_fs_free_req,
281 	.exec_request = virtio_fs_process_req,
282 	.get_config = virtio_fs_get_device_specific_config,
283 	.start_device = virtio_fs_start,
284 	.stop_device = virtio_fs_stop,
285 };
286 
287 static void _vfu_virtio_fs_fuse_disp_delete(void *cb_arg);
288 
289 static void
290 _vfu_virtio_fs_fuse_dispatcher_delete_cpl(void *cb_arg, int error)
291 {
292 	struct spdk_fuse_dispatcher *fuse_disp = cb_arg;
293 
294 	if (error) {
295 		SPDK_ERRLOG("%s: FUSE dispatcher deletion failed with %d. Retrying...\n",
296 			    spdk_fuse_dispatcher_get_fsdev_name(fuse_disp), error);
297 		spdk_thread_send_msg(spdk_get_thread(), _vfu_virtio_fs_fuse_disp_delete, fuse_disp);
298 	}
299 
300 	SPDK_NOTICELOG("FUSE dispatcher deleted\n");
301 }
302 
303 static void
304 _vfu_virtio_fs_fuse_disp_delete(void *cb_arg)
305 {
306 	struct spdk_fuse_dispatcher *fuse_disp = cb_arg;
307 	int res;
308 
309 	SPDK_DEBUGLOG(vfu_virtio_fs, "%s: initiating FUSE dispatcher deletion...\n",
310 		      spdk_fuse_dispatcher_get_fsdev_name(fuse_disp));
311 
312 	res = spdk_fuse_dispatcher_delete(fuse_disp, _vfu_virtio_fs_fuse_dispatcher_delete_cpl, fuse_disp);
313 	if (res) {
314 		SPDK_ERRLOG("%s: FUSE dispatcher deletion failed with %d. Retrying...\n",
315 			    spdk_fuse_dispatcher_get_fsdev_name(fuse_disp), res);
316 		spdk_thread_send_msg(spdk_get_thread(), _vfu_virtio_fs_fuse_disp_delete, fuse_disp);
317 	}
318 }
319 
320 static void
321 fuse_disp_event_cb(enum spdk_fuse_dispatcher_event_type type, struct spdk_fuse_dispatcher *disp,
322 		   void *event_ctx)
323 {
324 	struct virtio_fs_endpoint *fs_endpoint = event_ctx;
325 
326 	SPDK_DEBUGLOG(vfu_virtio_fs, "%s: FUSE dispatcher event#%d arrived\n",
327 		      spdk_fuse_dispatcher_get_fsdev_name(fs_endpoint->fuse_disp), type);
328 
329 	switch (type) {
330 	case SPDK_FUSE_DISP_EVENT_FSDEV_REMOVE:
331 		SPDK_NOTICELOG("%s: received SPDK_FUSE_DISP_EVENT_FSDEV_REMOVE\n",
332 			       spdk_fuse_dispatcher_get_fsdev_name(fs_endpoint->fuse_disp));
333 		memset(&fs_endpoint->fs_cfg, 0, sizeof(fs_endpoint->fs_cfg));
334 
335 		if (fs_endpoint->io_channel) {
336 			spdk_thread_send_msg(fs_endpoint->virtio.thread, _virtio_fs_stop_msg, fs_endpoint);
337 		}
338 
339 		if (fs_endpoint->fuse_disp) {
340 			spdk_thread_send_msg(fs_endpoint->init_thread, _vfu_virtio_fs_fuse_disp_delete,
341 					     fs_endpoint->fuse_disp);
342 			fs_endpoint->fuse_disp = NULL;
343 		}
344 		break;
345 	default:
346 		SPDK_NOTICELOG("%s: unsupported event type %d\n",
347 			       spdk_fuse_dispatcher_get_fsdev_name(fs_endpoint->fuse_disp), type);
348 		break;
349 	}
350 }
351 
352 struct vfu_virtio_fs_add_fsdev_ctx {
353 	struct spdk_vfu_endpoint *endpoint;
354 	vfu_virtio_fs_add_fsdev_cpl_cb cb;
355 	void *cb_arg;
356 };
357 
358 static void
359 fuse_dispatcher_create_cpl(void *cb_arg, struct spdk_fuse_dispatcher *disp)
360 {
361 	struct vfu_virtio_fs_add_fsdev_ctx *ctx = cb_arg;
362 	struct spdk_vfu_endpoint *endpoint = ctx->endpoint;
363 	struct vfu_virtio_endpoint *virtio_endpoint;
364 	struct virtio_fs_endpoint *fs_endpoint;
365 
366 	if (!disp) {
367 		SPDK_ERRLOG("%s: failed to create SPDK FUSE dispatcher\n",
368 			    spdk_vfu_get_endpoint_id(endpoint));
369 		ctx->cb(ctx->cb_arg, -EINVAL);
370 		free(ctx);
371 		return;
372 	}
373 
374 	virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
375 	fs_endpoint = to_fs_endpoint(virtio_endpoint);
376 
377 	fs_endpoint->fuse_disp = disp;
378 
379 	SPDK_DEBUGLOG(vfu_virtio_fs, "%s: FUSE dispatcher created successfully\n",
380 		      spdk_fuse_dispatcher_get_fsdev_name(disp));
381 
382 	ctx->cb(ctx->cb_arg, 0);
383 	free(ctx);
384 }
385 
386 int
387 vfu_virtio_fs_add_fsdev(const char *name, const char *fsdev_name, const char *tag,
388 			uint16_t num_queues, uint16_t qsize, bool packed_ring,
389 			vfu_virtio_fs_add_fsdev_cpl_cb cb, void *cb_arg)
390 {
391 	struct spdk_vfu_endpoint *endpoint;
392 	struct vfu_virtio_endpoint *virtio_endpoint;
393 	struct virtio_fs_endpoint *fs_endpoint;
394 	struct vfu_virtio_fs_add_fsdev_ctx *ctx;
395 	size_t tag_len;
396 	int ret;
397 
398 	if (!name || !fsdev_name || !tag) {
399 		SPDK_ERRLOG("name, fsdev_name and tag are mandatory\n");
400 		return -EINVAL;
401 	}
402 
403 	endpoint = spdk_vfu_get_endpoint_by_name(name);
404 	if (!endpoint) {
405 		SPDK_ERRLOG("Endpoint %s doesn't exist\n", name);
406 		return -ENOENT;
407 	}
408 
409 	virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
410 	fs_endpoint = to_fs_endpoint(virtio_endpoint);
411 
412 	if (fs_endpoint->fuse_disp) {
413 		SPDK_ERRLOG("%s: FUSE dispatcher already exists\n", spdk_vfu_get_endpoint_id(endpoint));
414 		return -EEXIST;
415 	}
416 
417 	tag_len = strlen(tag);
418 	if (tag_len > sizeof(fs_endpoint->fs_cfg.tag)) {
419 		SPDK_ERRLOG("%s: tag is too long (%s, %zu > %zu)\n", spdk_vfu_get_endpoint_id(endpoint), tag,
420 			    tag_len, sizeof(fs_endpoint->fs_cfg.tag));
421 		return -EINVAL;
422 	}
423 
424 	if (num_queues && (num_queues <= VIRTIO_DEV_MAX_VQS)) {
425 		fs_endpoint->virtio.num_queues = num_queues;
426 	}
427 	if (qsize && (qsize <= VIRTIO_VQ_MAX_SIZE)) {
428 		fs_endpoint->virtio.qsize = qsize;
429 	}
430 	fs_endpoint->virtio.packed_ring = packed_ring;
431 
432 	SPDK_DEBUGLOG(vfu_virtio_fs, "%s: add fsdev %s, tag=%s, num_queues %u, qsize %u, packed ring %s\n",
433 		      spdk_vfu_get_endpoint_id(endpoint), fsdev_name, tag, fs_endpoint->virtio.num_queues,
434 		      fs_endpoint->virtio.qsize, packed_ring ? "enabled" : "disabled");
435 
436 	/* Update config */
437 	memset(&fs_endpoint->fs_cfg, 0, sizeof(fs_endpoint->fs_cfg));
438 	fs_endpoint->fs_cfg.num_request_queues = fs_endpoint->virtio.num_queues -
439 			1; /* excluding the hprio */
440 	memcpy(fs_endpoint->fs_cfg.tag, tag, tag_len);
441 	fs_endpoint->init_thread = spdk_get_thread();
442 
443 	ctx = calloc(1, sizeof(*ctx));
444 	if (!ctx) {
445 		SPDK_ERRLOG("Failed to allocate context\n");
446 		return -ENOMEM;
447 	}
448 
449 	ctx->endpoint = endpoint;
450 	ctx->cb = cb;
451 	ctx->cb_arg = cb_arg;
452 
453 	ret = spdk_fuse_dispatcher_create(fsdev_name, fuse_disp_event_cb, fs_endpoint,
454 					  fuse_dispatcher_create_cpl, ctx);
455 	if (ret) {
456 		SPDK_ERRLOG("Failed to create SPDK FUSE dispatcher for %s (err=%d)\n",
457 			    fsdev_name, ret);
458 		free(ctx);
459 		return ret;
460 	}
461 
462 	return 0;
463 }
464 
465 static void *
466 vfu_virtio_fs_endpoint_init(struct spdk_vfu_endpoint *endpoint,
467 			    char *basename, const char *endpoint_name)
468 {
469 	struct virtio_fs_endpoint *fs_endpoint;
470 	int ret;
471 
472 	fs_endpoint = calloc(1, sizeof(*fs_endpoint));
473 	if (!fs_endpoint) {
474 		return NULL;
475 	}
476 
477 	ret = vfu_virtio_endpoint_setup(&fs_endpoint->virtio, endpoint, basename, endpoint_name,
478 					&virtio_fs_ops);
479 	if (ret) {
480 		SPDK_ERRLOG("Error to setup endpoint %s\n", endpoint_name);
481 		free(fs_endpoint);
482 		return NULL;
483 	}
484 
485 	return (void *)&fs_endpoint->virtio;
486 }
487 
488 static int
489 vfu_virtio_fs_endpoint_destruct(struct spdk_vfu_endpoint *endpoint)
490 {
491 	struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
492 	struct virtio_fs_endpoint *fs_endpoint = to_fs_endpoint(virtio_endpoint);
493 
494 	if (fs_endpoint->fuse_disp) {
495 		if (fs_endpoint->init_thread == spdk_get_thread()) {
496 			_vfu_virtio_fs_fuse_disp_delete(fs_endpoint->fuse_disp);
497 		} else {
498 			spdk_thread_send_msg(spdk_get_thread(), _vfu_virtio_fs_fuse_disp_delete, fs_endpoint->fuse_disp);
499 		}
500 		fs_endpoint->fuse_disp = NULL;
501 	}
502 
503 	vfu_virtio_endpoint_destruct(&fs_endpoint->virtio);
504 	free(fs_endpoint);
505 
506 	return 0;
507 }
508 
509 static int
510 vfu_virtio_fs_get_device_info(struct spdk_vfu_endpoint *endpoint,
511 			      struct spdk_vfu_pci_device *device_info)
512 {
513 	struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
514 	struct virtio_fs_endpoint *fs_endpoint = to_fs_endpoint(virtio_endpoint);
515 
516 	vfu_virtio_get_device_info(&fs_endpoint->virtio, device_info);
517 	/* Fill Device ID */
518 	device_info->id.did = PCI_DEVICE_ID_VIRTIO_FS;
519 
520 	return 0;
521 }
522 
523 static struct spdk_vfu_endpoint_ops vfu_virtio_fs_ops = {
524 	.name = "virtio_fs",
525 	.init = vfu_virtio_fs_endpoint_init,
526 	.get_device_info = vfu_virtio_fs_get_device_info,
527 	.get_vendor_capability = vfu_virtio_get_vendor_capability,
528 	.post_memory_add = vfu_virtio_post_memory_add,
529 	.pre_memory_remove = vfu_virtio_pre_memory_remove,
530 	.reset_device = vfu_virtio_pci_reset_cb,
531 	.quiesce_device = vfu_virtio_quiesce_cb,
532 	.destruct = vfu_virtio_fs_endpoint_destruct,
533 	.attach_device = vfu_virtio_attach_device,
534 	.detach_device = vfu_virtio_detach_device,
535 };
536 
537 static void
538 __attribute__((constructor)) _vfu_virtio_fs_pci_model_register(void)
539 {
540 	spdk_vfu_register_endpoint_ops(&vfu_virtio_fs_ops);
541 }
542 
543 SPDK_LOG_REGISTER_COMPONENT(vfu_virtio_fs)
544 SPDK_LOG_REGISTER_COMPONENT(vfu_virtio_fs_data)
545