xref: /spdk/lib/nvme/nvme_cuse.c (revision d491e7ea33f0f52fd9abbfc4fbfff6a7f3cf2ec2)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #define FUSE_USE_VERSION 31
35 
36 #include <fuse3/cuse_lowlevel.h>
37 
38 #include <linux/nvme_ioctl.h>
39 #include <linux/fs.h>
40 
41 #include "nvme_internal.h"
42 #include "nvme_io_msg.h"
43 #include "nvme_cuse.h"
44 
45 struct cuse_device {
46 	bool				is_started;
47 
48 	char				dev_name[128];
49 	uint32_t			index;
50 	int				claim_fd;
51 	char				lock_name[64];
52 
53 	struct spdk_nvme_ctrlr		*ctrlr;		/**< NVMe controller */
54 	uint32_t			nsid;		/**< NVMe name space id, or 0 */
55 
56 	pthread_t			tid;
57 	struct fuse_session		*session;
58 
59 	struct cuse_device		*ctrlr_device;
60 	struct cuse_device		*ns_devices;	/**< Array of cuse ns devices */
61 
62 	TAILQ_ENTRY(cuse_device)	tailq;
63 };
64 
65 static pthread_mutex_t g_cuse_mtx = PTHREAD_MUTEX_INITIALIZER;
66 static TAILQ_HEAD(, cuse_device) g_ctrlr_ctx_head = TAILQ_HEAD_INITIALIZER(g_ctrlr_ctx_head);
67 static struct spdk_bit_array *g_ctrlr_started;
68 
69 struct cuse_io_ctx {
70 	struct spdk_nvme_cmd		nvme_cmd;
71 	enum spdk_nvme_data_transfer	data_transfer;
72 
73 	uint64_t			lba;
74 	uint32_t			lba_count;
75 
76 	void				*data;
77 	int				data_len;
78 
79 	fuse_req_t			req;
80 };
81 
82 static void
83 cuse_io_ctx_free(struct cuse_io_ctx *ctx)
84 {
85 	spdk_free(ctx->data);
86 	free(ctx);
87 }
88 
89 #define FUSE_REPLY_CHECK_BUFFER(req, arg, out_bufsz, val)		\
90 	if (out_bufsz == 0) {						\
91 		struct iovec out_iov;					\
92 		out_iov.iov_base = (void *)arg;				\
93 		out_iov.iov_len = sizeof(val);				\
94 		fuse_reply_ioctl_retry(req, NULL, 0, &out_iov, 1);	\
95 		return;							\
96 	}
97 
98 static void
99 cuse_nvme_passthru_cmd_cb(void *arg, const struct spdk_nvme_cpl *cpl)
100 {
101 	struct cuse_io_ctx *ctx = arg;
102 	struct iovec out_iov[2];
103 	struct spdk_nvme_cpl _cpl;
104 
105 	if (ctx->data_transfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER ||
106 	    ctx->data_transfer == SPDK_NVME_DATA_NONE) {
107 		fuse_reply_ioctl_iov(ctx->req, cpl->status.sc, NULL, 0);
108 	} else {
109 		memcpy(&_cpl, cpl, sizeof(struct spdk_nvme_cpl));
110 
111 		out_iov[0].iov_base = &_cpl.cdw0;
112 		out_iov[0].iov_len = sizeof(_cpl.cdw0);
113 
114 		if (ctx->data_len > 0) {
115 			out_iov[1].iov_base = ctx->data;
116 			out_iov[1].iov_len = ctx->data_len;
117 			fuse_reply_ioctl_iov(ctx->req, cpl->status.sc, out_iov, 2);
118 		} else {
119 			fuse_reply_ioctl_iov(ctx->req, cpl->status.sc, out_iov, 1);
120 		}
121 	}
122 
123 	cuse_io_ctx_free(ctx);
124 }
125 
126 static void
127 cuse_nvme_passthru_cmd_execute(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, void *arg)
128 {
129 	int rc;
130 	struct cuse_io_ctx *ctx = arg;
131 
132 	if (nsid != 0) {
133 		rc = spdk_nvme_ctrlr_cmd_io_raw(ctrlr, ctrlr->external_io_msgs_qpair, &ctx->nvme_cmd, ctx->data,
134 						ctx->data_len, cuse_nvme_passthru_cmd_cb, (void *)ctx);
135 	} else {
136 		rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &ctx->nvme_cmd, ctx->data, ctx->data_len,
137 						   cuse_nvme_passthru_cmd_cb, (void *)ctx);
138 	}
139 	if (rc < 0) {
140 		fuse_reply_err(ctx->req, EINVAL);
141 		cuse_io_ctx_free(ctx);
142 	}
143 }
144 
145 static void
146 cuse_nvme_passthru_cmd_send(fuse_req_t req, struct nvme_passthru_cmd *passthru_cmd,
147 			    const void *data, int cmd)
148 {
149 	struct cuse_io_ctx *ctx;
150 	struct cuse_device *cuse_device = fuse_req_userdata(req);
151 	int rv;
152 
153 	ctx = (struct cuse_io_ctx *)calloc(1, sizeof(struct cuse_io_ctx));
154 	if (!ctx) {
155 		SPDK_ERRLOG("Cannot allocate memory for cuse_io_ctx\n");
156 		fuse_reply_err(req, ENOMEM);
157 		return;
158 	}
159 
160 	ctx->req = req;
161 	ctx->data_transfer = spdk_nvme_opc_get_data_transfer(passthru_cmd->opcode);
162 
163 	memset(&ctx->nvme_cmd, 0, sizeof(ctx->nvme_cmd));
164 	ctx->nvme_cmd.opc = passthru_cmd->opcode;
165 	ctx->nvme_cmd.nsid = passthru_cmd->nsid;
166 	ctx->nvme_cmd.cdw10 = passthru_cmd->cdw10;
167 	ctx->nvme_cmd.cdw11 = passthru_cmd->cdw11;
168 	ctx->nvme_cmd.cdw12 = passthru_cmd->cdw12;
169 	ctx->nvme_cmd.cdw13 = passthru_cmd->cdw13;
170 	ctx->nvme_cmd.cdw14 = passthru_cmd->cdw14;
171 	ctx->nvme_cmd.cdw15 = passthru_cmd->cdw15;
172 
173 	ctx->data_len = passthru_cmd->data_len;
174 
175 	if (ctx->data_len > 0) {
176 		ctx->data = spdk_malloc(ctx->data_len, 4096, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
177 		if (!ctx->data) {
178 			SPDK_ERRLOG("Cannot allocate memory for data\n");
179 			fuse_reply_err(req, ENOMEM);
180 			free(ctx);
181 			return;
182 		}
183 		if (data != NULL) {
184 			memcpy(ctx->data, data, ctx->data_len);
185 		}
186 	}
187 
188 	if ((unsigned int)cmd != NVME_IOCTL_ADMIN_CMD) {
189 		/* Send NS for IO IOCTLs */
190 		rv = nvme_io_msg_send(cuse_device->ctrlr, passthru_cmd->nsid, cuse_nvme_passthru_cmd_execute, ctx);
191 	} else {
192 		/* NS == 0 for Admin IOCTLs */
193 		rv = nvme_io_msg_send(cuse_device->ctrlr, 0, cuse_nvme_passthru_cmd_execute, ctx);
194 	}
195 	if (rv) {
196 		SPDK_ERRLOG("Cannot send io msg to the controller\n");
197 		fuse_reply_err(req, -rv);
198 		cuse_io_ctx_free(ctx);
199 		return;
200 	}
201 }
202 
203 static void
204 cuse_nvme_passthru_cmd(fuse_req_t req, int cmd, void *arg,
205 		       struct fuse_file_info *fi, unsigned flags,
206 		       const void *in_buf, size_t in_bufsz, size_t out_bufsz)
207 {
208 	struct nvme_passthru_cmd *passthru_cmd;
209 	struct iovec in_iov[2], out_iov[2];
210 
211 	in_iov[0].iov_base = (void *)arg;
212 	in_iov[0].iov_len = sizeof(*passthru_cmd);
213 	if (in_bufsz == 0) {
214 		fuse_reply_ioctl_retry(req, in_iov, 1, NULL, 0);
215 		return;
216 	}
217 
218 	passthru_cmd = (struct nvme_passthru_cmd *)in_buf;
219 
220 	switch (spdk_nvme_opc_get_data_transfer(passthru_cmd->opcode)) {
221 	case SPDK_NVME_DATA_HOST_TO_CONTROLLER:
222 		if (passthru_cmd->addr != 0) {
223 			in_iov[1].iov_base = (void *)passthru_cmd->addr;
224 			in_iov[1].iov_len = passthru_cmd->data_len;
225 			if (in_bufsz == sizeof(*passthru_cmd)) {
226 				fuse_reply_ioctl_retry(req, in_iov, 2, NULL, 0);
227 				return;
228 			}
229 			cuse_nvme_passthru_cmd_send(req, passthru_cmd, in_buf + sizeof(*passthru_cmd), cmd);
230 		} else {
231 			cuse_nvme_passthru_cmd_send(req, passthru_cmd, NULL, cmd);
232 		}
233 		return;
234 	case SPDK_NVME_DATA_NONE:
235 	case SPDK_NVME_DATA_CONTROLLER_TO_HOST:
236 		if (out_bufsz == 0) {
237 			out_iov[0].iov_base = &((struct nvme_passthru_cmd *)arg)->result;
238 			out_iov[0].iov_len = sizeof(uint32_t);
239 			if (passthru_cmd->data_len > 0) {
240 				out_iov[1].iov_base = (void *)passthru_cmd->addr;
241 				out_iov[1].iov_len = passthru_cmd->data_len;
242 				fuse_reply_ioctl_retry(req, in_iov, 1, out_iov, 2);
243 			} else {
244 				fuse_reply_ioctl_retry(req, in_iov, 1, out_iov, 1);
245 			}
246 			return;
247 		}
248 
249 		cuse_nvme_passthru_cmd_send(req, passthru_cmd, NULL, cmd);
250 
251 		return;
252 	case SPDK_NVME_DATA_BIDIRECTIONAL:
253 		fuse_reply_err(req, EINVAL);
254 		return;
255 	}
256 }
257 
258 static void
259 cuse_nvme_reset_execute(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, void *arg)
260 {
261 	int rc;
262 	fuse_req_t req = arg;
263 
264 	rc = spdk_nvme_ctrlr_reset(ctrlr);
265 	if (rc) {
266 		fuse_reply_err(req, rc);
267 		return;
268 	}
269 
270 	fuse_reply_ioctl_iov(req, 0, NULL, 0);
271 }
272 
273 static void
274 cuse_nvme_subsys_reset_execute(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, void *arg)
275 {
276 	int rc;
277 	fuse_req_t req = arg;
278 
279 	rc = spdk_nvme_ctrlr_reset_subsystem(ctrlr);
280 	if (rc) {
281 		fuse_reply_err(req, rc);
282 		return;
283 	}
284 
285 	fuse_reply_ioctl_iov(req, 0, NULL, 0);
286 }
287 
288 static void
289 cuse_nvme_reset(fuse_req_t req, int cmd, void *arg,
290 		struct fuse_file_info *fi, unsigned flags,
291 		const void *in_buf, size_t in_bufsz, size_t out_bufsz)
292 {
293 	int rv;
294 	struct cuse_device *cuse_device = fuse_req_userdata(req);
295 
296 	if (cuse_device->nsid) {
297 		SPDK_ERRLOG("Namespace reset not supported\n");
298 		fuse_reply_err(req, EINVAL);
299 		return;
300 	}
301 
302 	if (cmd == NVME_IOCTL_SUBSYS_RESET) {
303 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_SUBSYS_RESET\n");
304 		rv = nvme_io_msg_send(cuse_device->ctrlr, cuse_device->nsid, cuse_nvme_subsys_reset_execute,
305 				      (void *)req);
306 	} else {
307 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_RESET\n");
308 		rv = nvme_io_msg_send(cuse_device->ctrlr, cuse_device->nsid, cuse_nvme_reset_execute, (void *)req);
309 	}
310 	if (rv) {
311 		SPDK_ERRLOG("Cannot send reset\n");
312 		fuse_reply_err(req, EINVAL);
313 	}
314 }
315 
316 static void
317 cuse_nvme_rescan_execute(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, void *arg)
318 {
319 	fuse_req_t req = arg;
320 
321 	nvme_ctrlr_update_namespaces(ctrlr);
322 	fuse_reply_ioctl_iov(req, 0, NULL, 0);
323 }
324 
325 static void
326 cuse_nvme_rescan(fuse_req_t req, int cmd, void *arg,
327 		 struct fuse_file_info *fi, unsigned flags,
328 		 const void *in_buf, size_t in_bufsz, size_t out_bufsz)
329 {
330 	int rv;
331 	struct cuse_device *cuse_device = fuse_req_userdata(req);
332 
333 	if (cuse_device->nsid) {
334 		SPDK_ERRLOG("Namespace rescan not supported\n");
335 		fuse_reply_err(req, EINVAL);
336 		return;
337 	}
338 
339 	rv = nvme_io_msg_send(cuse_device->ctrlr, cuse_device->nsid, cuse_nvme_rescan_execute, (void *)req);
340 	if (rv) {
341 		SPDK_ERRLOG("Cannot send rescan\n");
342 		fuse_reply_err(req, EINVAL);
343 	}
344 }
345 
346 /*****************************************************************************
347  * Namespace IO requests
348  */
349 
350 static void
351 cuse_nvme_submit_io_write_done(void *ref, const struct spdk_nvme_cpl *cpl)
352 {
353 	struct cuse_io_ctx *ctx = (struct cuse_io_ctx *)ref;
354 
355 	fuse_reply_ioctl_iov(ctx->req, cpl->status.sc, NULL, 0);
356 
357 	cuse_io_ctx_free(ctx);
358 }
359 
360 static void
361 cuse_nvme_submit_io_write_cb(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, void *arg)
362 {
363 	int rc;
364 	struct cuse_io_ctx *ctx = arg;
365 	struct spdk_nvme_ns *ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
366 
367 	rc = spdk_nvme_ns_cmd_write(ns, ctrlr->external_io_msgs_qpair, ctx->data,
368 				    ctx->lba, /* LBA start */
369 				    ctx->lba_count, /* number of LBAs */
370 				    cuse_nvme_submit_io_write_done, ctx, 0);
371 
372 	if (rc != 0) {
373 		SPDK_ERRLOG("write failed: rc = %d\n", rc);
374 		fuse_reply_err(ctx->req, rc);
375 		cuse_io_ctx_free(ctx);
376 		return;
377 	}
378 }
379 
380 static void
381 cuse_nvme_submit_io_write(struct cuse_device *cuse_device, fuse_req_t req, int cmd, void *arg,
382 			  struct fuse_file_info *fi, unsigned flags, uint32_t block_size,
383 			  const void *in_buf, size_t in_bufsz, size_t out_bufsz)
384 {
385 	const struct nvme_user_io *user_io = in_buf;
386 	struct cuse_io_ctx *ctx;
387 	int rc;
388 
389 	ctx = (struct cuse_io_ctx *)calloc(1, sizeof(struct cuse_io_ctx));
390 	if (!ctx) {
391 		SPDK_ERRLOG("Cannot allocate memory for context\n");
392 		fuse_reply_err(req, ENOMEM);
393 		return;
394 	}
395 
396 	ctx->req = req;
397 	ctx->lba = user_io->slba;
398 	ctx->lba_count = user_io->nblocks + 1;
399 	ctx->data_len = ctx->lba_count * block_size;
400 
401 	ctx->data = spdk_zmalloc(ctx->data_len, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
402 				 SPDK_MALLOC_DMA);
403 	if (ctx->data == NULL) {
404 		SPDK_ERRLOG("Write buffer allocation failed\n");
405 		fuse_reply_err(ctx->req, ENOMEM);
406 		free(ctx);
407 		return;
408 	}
409 
410 	memcpy(ctx->data, in_buf + sizeof(*user_io), ctx->data_len);
411 
412 	rc = nvme_io_msg_send(cuse_device->ctrlr, cuse_device->nsid, cuse_nvme_submit_io_write_cb,
413 			      ctx);
414 	if (rc < 0) {
415 		SPDK_ERRLOG("Cannot send write io\n");
416 		fuse_reply_err(ctx->req, rc);
417 		cuse_io_ctx_free(ctx);
418 	}
419 }
420 
421 static void
422 cuse_nvme_submit_io_read_done(void *ref, const struct spdk_nvme_cpl *cpl)
423 {
424 	struct cuse_io_ctx *ctx = (struct cuse_io_ctx *)ref;
425 	struct iovec iov;
426 
427 	iov.iov_base = ctx->data;
428 	iov.iov_len = ctx->data_len;
429 
430 	fuse_reply_ioctl_iov(ctx->req, cpl->status.sc, &iov, 1);
431 
432 	cuse_io_ctx_free(ctx);
433 }
434 
435 static void
436 cuse_nvme_submit_io_read_cb(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, void *arg)
437 {
438 	int rc;
439 	struct cuse_io_ctx *ctx = arg;
440 	struct spdk_nvme_ns *ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
441 
442 	rc = spdk_nvme_ns_cmd_read(ns, ctrlr->external_io_msgs_qpair, ctx->data,
443 				   ctx->lba, /* LBA start */
444 				   ctx->lba_count, /* number of LBAs */
445 				   cuse_nvme_submit_io_read_done, ctx, 0);
446 
447 	if (rc != 0) {
448 		SPDK_ERRLOG("read failed: rc = %d\n", rc);
449 		fuse_reply_err(ctx->req, rc);
450 		cuse_io_ctx_free(ctx);
451 		return;
452 	}
453 }
454 
455 static void
456 cuse_nvme_submit_io_read(struct cuse_device *cuse_device, fuse_req_t req, int cmd, void *arg,
457 			 struct fuse_file_info *fi, unsigned flags, uint32_t block_size,
458 			 const void *in_buf, size_t in_bufsz, size_t out_bufsz)
459 {
460 	int rc;
461 	struct cuse_io_ctx *ctx;
462 	const struct nvme_user_io *user_io = in_buf;
463 
464 	ctx = (struct cuse_io_ctx *)calloc(1, sizeof(struct cuse_io_ctx));
465 	if (!ctx) {
466 		SPDK_ERRLOG("Cannot allocate memory for context\n");
467 		fuse_reply_err(req, ENOMEM);
468 		return;
469 	}
470 
471 	ctx->req = req;
472 	ctx->lba = user_io->slba;
473 	ctx->lba_count = user_io->nblocks + 1;
474 
475 	ctx->data_len = ctx->lba_count * block_size;
476 	ctx->data = spdk_zmalloc(ctx->data_len, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
477 				 SPDK_MALLOC_DMA);
478 	if (ctx->data == NULL) {
479 		SPDK_ERRLOG("Read buffer allocation failed\n");
480 		fuse_reply_err(ctx->req, ENOMEM);
481 		free(ctx);
482 		return;
483 	}
484 
485 	rc = nvme_io_msg_send(cuse_device->ctrlr, cuse_device->nsid, cuse_nvme_submit_io_read_cb, ctx);
486 	if (rc < 0) {
487 		SPDK_ERRLOG("Cannot send read io\n");
488 		fuse_reply_err(ctx->req, rc);
489 		cuse_io_ctx_free(ctx);
490 	}
491 }
492 
493 
494 static void
495 cuse_nvme_submit_io(fuse_req_t req, int cmd, void *arg,
496 		    struct fuse_file_info *fi, unsigned flags,
497 		    const void *in_buf, size_t in_bufsz, size_t out_bufsz)
498 {
499 	const struct nvme_user_io *user_io;
500 	struct iovec in_iov[2], out_iov;
501 	struct cuse_device *cuse_device = fuse_req_userdata(req);
502 	struct spdk_nvme_ns *ns;
503 	uint32_t block_size;
504 
505 	in_iov[0].iov_base = (void *)arg;
506 	in_iov[0].iov_len = sizeof(*user_io);
507 	if (in_bufsz == 0) {
508 		fuse_reply_ioctl_retry(req, in_iov, 1, NULL, 0);
509 		return;
510 	}
511 
512 	user_io = in_buf;
513 
514 	ns = spdk_nvme_ctrlr_get_ns(cuse_device->ctrlr, cuse_device->nsid);
515 	block_size = spdk_nvme_ns_get_sector_size(ns);
516 
517 	switch (user_io->opcode) {
518 	case SPDK_NVME_OPC_READ:
519 		out_iov.iov_base = (void *)user_io->addr;
520 		out_iov.iov_len = (user_io->nblocks + 1) * block_size;
521 		if (out_bufsz == 0) {
522 			fuse_reply_ioctl_retry(req, in_iov, 1, &out_iov, 1);
523 			return;
524 		}
525 
526 		cuse_nvme_submit_io_read(cuse_device, req, cmd, arg, fi, flags,
527 					 block_size, in_buf, in_bufsz, out_bufsz);
528 		break;
529 	case SPDK_NVME_OPC_WRITE:
530 		in_iov[1].iov_base = (void *)user_io->addr;
531 		in_iov[1].iov_len = (user_io->nblocks + 1) * block_size;
532 		if (in_bufsz == sizeof(*user_io)) {
533 			fuse_reply_ioctl_retry(req, in_iov, 2, NULL, 0);
534 			return;
535 		}
536 
537 		cuse_nvme_submit_io_write(cuse_device, req, cmd, arg, fi, flags,
538 					  block_size, in_buf, in_bufsz, out_bufsz);
539 		break;
540 	default:
541 		SPDK_ERRLOG("SUBMIT_IO: opc:%d not valid\n", user_io->opcode);
542 		fuse_reply_err(req, EINVAL);
543 		return;
544 	}
545 
546 }
547 
548 /*****************************************************************************
549  * Other namespace IOCTLs
550  */
551 static void
552 cuse_blkgetsize64(fuse_req_t req, int cmd, void *arg,
553 		  struct fuse_file_info *fi, unsigned flags,
554 		  const void *in_buf, size_t in_bufsz, size_t out_bufsz)
555 {
556 	uint64_t size;
557 	struct spdk_nvme_ns *ns;
558 	struct cuse_device *cuse_device = fuse_req_userdata(req);
559 
560 	FUSE_REPLY_CHECK_BUFFER(req, arg, out_bufsz, size);
561 
562 	ns = spdk_nvme_ctrlr_get_ns(cuse_device->ctrlr, cuse_device->nsid);
563 	size = spdk_nvme_ns_get_num_sectors(ns);
564 	fuse_reply_ioctl(req, 0, &size, sizeof(size));
565 }
566 
567 static void
568 cuse_blkpbszget(fuse_req_t req, int cmd, void *arg,
569 		struct fuse_file_info *fi, unsigned flags,
570 		const void *in_buf, size_t in_bufsz, size_t out_bufsz)
571 {
572 	int pbsz;
573 	struct spdk_nvme_ns *ns;
574 	struct cuse_device *cuse_device = fuse_req_userdata(req);
575 
576 	FUSE_REPLY_CHECK_BUFFER(req, arg, out_bufsz, pbsz);
577 
578 	ns = spdk_nvme_ctrlr_get_ns(cuse_device->ctrlr, cuse_device->nsid);
579 	pbsz = spdk_nvme_ns_get_sector_size(ns);
580 	fuse_reply_ioctl(req, 0, &pbsz, sizeof(pbsz));
581 }
582 
583 static void
584 cuse_blkgetsize(fuse_req_t req, int cmd, void *arg,
585 		struct fuse_file_info *fi, unsigned flags,
586 		const void *in_buf, size_t in_bufsz, size_t out_bufsz)
587 {
588 	long size;
589 	struct spdk_nvme_ns *ns;
590 	struct cuse_device *cuse_device = fuse_req_userdata(req);
591 
592 	FUSE_REPLY_CHECK_BUFFER(req, arg, out_bufsz, size);
593 
594 	ns = spdk_nvme_ctrlr_get_ns(cuse_device->ctrlr, cuse_device->nsid);
595 
596 	/* return size in 512 bytes blocks */
597 	size = spdk_nvme_ns_get_num_sectors(ns) * 512 / spdk_nvme_ns_get_sector_size(ns);
598 	fuse_reply_ioctl(req, 0, &size, sizeof(size));
599 }
600 
601 static void
602 cuse_blkgetsectorsize(fuse_req_t req, int cmd, void *arg,
603 		      struct fuse_file_info *fi, unsigned flags,
604 		      const void *in_buf, size_t in_bufsz, size_t out_bufsz)
605 {
606 	int ssize;
607 	struct spdk_nvme_ns *ns;
608 	struct cuse_device *cuse_device = fuse_req_userdata(req);
609 
610 	FUSE_REPLY_CHECK_BUFFER(req, arg, out_bufsz, ssize);
611 
612 	ns = spdk_nvme_ctrlr_get_ns(cuse_device->ctrlr, cuse_device->nsid);
613 	ssize = spdk_nvme_ns_get_sector_size(ns);
614 	fuse_reply_ioctl(req, 0, &ssize, sizeof(ssize));
615 }
616 
617 static void
618 cuse_getid(fuse_req_t req, int cmd, void *arg,
619 	   struct fuse_file_info *fi, unsigned flags,
620 	   const void *in_buf, size_t in_bufsz, size_t out_bufsz)
621 {
622 	struct cuse_device *cuse_device = fuse_req_userdata(req);
623 
624 	fuse_reply_ioctl(req, cuse_device->nsid, NULL, 0);
625 }
626 
627 static void
628 cuse_ctrlr_ioctl(fuse_req_t req, int cmd, void *arg,
629 		 struct fuse_file_info *fi, unsigned flags,
630 		 const void *in_buf, size_t in_bufsz, size_t out_bufsz)
631 {
632 	if (flags & FUSE_IOCTL_COMPAT) {
633 		fuse_reply_err(req, ENOSYS);
634 		return;
635 	}
636 
637 	switch ((unsigned int)cmd) {
638 	case NVME_IOCTL_ADMIN_CMD:
639 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_ADMIN_CMD\n");
640 		cuse_nvme_passthru_cmd(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
641 		break;
642 
643 	case NVME_IOCTL_RESET:
644 	case NVME_IOCTL_SUBSYS_RESET:
645 		cuse_nvme_reset(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
646 		break;
647 
648 	case NVME_IOCTL_RESCAN:
649 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_RESCAN\n");
650 		cuse_nvme_rescan(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
651 		break;
652 
653 	default:
654 		SPDK_ERRLOG("Unsupported IOCTL 0x%X.\n", cmd);
655 		fuse_reply_err(req, EINVAL);
656 	}
657 }
658 
659 static void
660 cuse_ns_ioctl(fuse_req_t req, int cmd, void *arg,
661 	      struct fuse_file_info *fi, unsigned flags,
662 	      const void *in_buf, size_t in_bufsz, size_t out_bufsz)
663 {
664 	if (flags & FUSE_IOCTL_COMPAT) {
665 		fuse_reply_err(req, ENOSYS);
666 		return;
667 	}
668 
669 	switch ((unsigned int)cmd) {
670 	case NVME_IOCTL_ADMIN_CMD:
671 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_ADMIN_CMD\n");
672 		cuse_nvme_passthru_cmd(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
673 		break;
674 
675 	case NVME_IOCTL_SUBMIT_IO:
676 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_SUBMIT_IO\n");
677 		cuse_nvme_submit_io(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
678 		break;
679 
680 	case NVME_IOCTL_IO_CMD:
681 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_IO_CMD\n");
682 		cuse_nvme_passthru_cmd(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
683 		break;
684 
685 	case NVME_IOCTL_ID:
686 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_ID\n");
687 		cuse_getid(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
688 		break;
689 
690 	case BLKPBSZGET:
691 		SPDK_DEBUGLOG(nvme_cuse, "BLKPBSZGET\n");
692 		cuse_blkpbszget(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
693 		break;
694 
695 	case BLKSSZGET:
696 		SPDK_DEBUGLOG(nvme_cuse, "BLKSSZGET\n");
697 		cuse_blkgetsectorsize(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
698 		break;
699 
700 	case BLKGETSIZE:
701 		SPDK_DEBUGLOG(nvme_cuse, "BLKGETSIZE\n");
702 		/* Returns the device size as a number of 512-byte blocks (returns pointer to long) */
703 		cuse_blkgetsize(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
704 		break;
705 
706 	case BLKGETSIZE64:
707 		SPDK_DEBUGLOG(nvme_cuse, "BLKGETSIZE64\n");
708 		/* Returns the device size in sectors (returns pointer to uint64_t) */
709 		cuse_blkgetsize64(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
710 		break;
711 
712 	default:
713 		SPDK_ERRLOG("Unsupported IOCTL 0x%X.\n", cmd);
714 		fuse_reply_err(req, EINVAL);
715 	}
716 }
717 
718 /*****************************************************************************
719  * CUSE threads initialization.
720  */
721 
722 static void cuse_open(fuse_req_t req, struct fuse_file_info *fi)
723 {
724 	fuse_reply_open(req, fi);
725 }
726 
727 static const struct cuse_lowlevel_ops cuse_ctrlr_clop = {
728 	.open		= cuse_open,
729 	.ioctl		= cuse_ctrlr_ioctl,
730 };
731 
732 static const struct cuse_lowlevel_ops cuse_ns_clop = {
733 	.open		= cuse_open,
734 	.ioctl		= cuse_ns_ioctl,
735 };
736 
737 static void *
738 cuse_thread(void *arg)
739 {
740 	struct cuse_device *cuse_device = arg;
741 	char *cuse_argv[] = { "cuse", "-f" };
742 	int cuse_argc = SPDK_COUNTOF(cuse_argv);
743 	char devname_arg[128 + 8];
744 	const char *dev_info_argv[] = { devname_arg };
745 	struct cuse_info ci;
746 	int multithreaded;
747 	int rc;
748 	struct fuse_buf buf = { .mem = NULL };
749 	struct pollfd fds;
750 	int timeout_msecs = 500;
751 
752 	spdk_unaffinitize_thread();
753 
754 	snprintf(devname_arg, sizeof(devname_arg), "DEVNAME=%s", cuse_device->dev_name);
755 
756 	memset(&ci, 0, sizeof(ci));
757 	ci.dev_info_argc = 1;
758 	ci.dev_info_argv = dev_info_argv;
759 	ci.flags = CUSE_UNRESTRICTED_IOCTL;
760 
761 	if (cuse_device->nsid) {
762 		cuse_device->session = cuse_lowlevel_setup(cuse_argc, cuse_argv, &ci, &cuse_ns_clop,
763 				       &multithreaded, cuse_device);
764 	} else {
765 		cuse_device->session = cuse_lowlevel_setup(cuse_argc, cuse_argv, &ci, &cuse_ctrlr_clop,
766 				       &multithreaded, cuse_device);
767 	}
768 	if (!cuse_device->session) {
769 		SPDK_ERRLOG("Cannot create cuse session\n");
770 		goto err;
771 	}
772 
773 	SPDK_NOTICELOG("fuse session for device %s created\n", cuse_device->dev_name);
774 
775 	/* Receive and process fuse requests */
776 	fds.fd = fuse_session_fd(cuse_device->session);
777 	fds.events = POLLIN;
778 	while (!fuse_session_exited(cuse_device->session)) {
779 		rc = poll(&fds, 1, timeout_msecs);
780 		if (rc <= 0) {
781 			continue;
782 		}
783 		rc = fuse_session_receive_buf(cuse_device->session, &buf);
784 		if (rc > 0) {
785 			fuse_session_process_buf(cuse_device->session, &buf);
786 		}
787 	}
788 	free(buf.mem);
789 	fuse_session_reset(cuse_device->session);
790 	cuse_lowlevel_teardown(cuse_device->session);
791 err:
792 	pthread_exit(NULL);
793 }
794 
795 /*****************************************************************************
796  * CUSE devices management
797  */
798 
799 static int
800 cuse_nvme_ns_start(struct cuse_device *ctrlr_device, uint32_t nsid)
801 {
802 	struct cuse_device *ns_device;
803 	int rv;
804 
805 	ns_device = &ctrlr_device->ns_devices[nsid - 1];
806 	if (ns_device->is_started) {
807 		return 0;
808 	}
809 
810 	ns_device->ctrlr = ctrlr_device->ctrlr;
811 	ns_device->ctrlr_device = ctrlr_device;
812 	ns_device->nsid = nsid;
813 	rv = snprintf(ns_device->dev_name, sizeof(ns_device->dev_name), "%sn%d",
814 		      ctrlr_device->dev_name, ns_device->nsid);
815 	if (rv < 0) {
816 		SPDK_ERRLOG("Device name too long.\n");
817 		free(ns_device);
818 		return -ENAMETOOLONG;
819 	}
820 
821 	rv = pthread_create(&ns_device->tid, NULL, cuse_thread, ns_device);
822 	if (rv != 0) {
823 		SPDK_ERRLOG("pthread_create failed\n");
824 		return -rv;
825 	}
826 
827 	ns_device->is_started = true;
828 
829 	return 0;
830 }
831 
832 static void
833 cuse_nvme_ns_stop(struct cuse_device *ctrlr_device, uint32_t nsid)
834 {
835 	struct cuse_device *ns_device;
836 
837 	ns_device = &ctrlr_device->ns_devices[nsid - 1];
838 	if (!ns_device->is_started) {
839 		return;
840 	}
841 
842 	fuse_session_exit(ns_device->session);
843 	pthread_join(ns_device->tid, NULL);
844 	ns_device->is_started = false;
845 }
846 
847 static int
848 nvme_cuse_claim(struct cuse_device *ctrlr_device, uint32_t index)
849 {
850 	int dev_fd;
851 	int pid;
852 	void *dev_map;
853 	struct flock cusedev_lock = {
854 		.l_type = F_WRLCK,
855 		.l_whence = SEEK_SET,
856 		.l_start = 0,
857 		.l_len = 0,
858 	};
859 
860 	snprintf(ctrlr_device->lock_name, sizeof(ctrlr_device->lock_name),
861 		 "/var/tmp/spdk_nvme_cuse_lock_%" PRIu32, index);
862 
863 	dev_fd = open(ctrlr_device->lock_name, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
864 	if (dev_fd == -1) {
865 		SPDK_ERRLOG("could not open %s\n", ctrlr_device->lock_name);
866 		return -errno;
867 	}
868 
869 	if (ftruncate(dev_fd, sizeof(int)) != 0) {
870 		SPDK_ERRLOG("could not truncate %s\n", ctrlr_device->lock_name);
871 		close(dev_fd);
872 		return -errno;
873 	}
874 
875 	dev_map = mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE,
876 		       MAP_SHARED, dev_fd, 0);
877 	if (dev_map == MAP_FAILED) {
878 		SPDK_ERRLOG("could not mmap dev %s (%d)\n", ctrlr_device->lock_name, errno);
879 		close(dev_fd);
880 		return -errno;
881 	}
882 
883 	if (fcntl(dev_fd, F_SETLK, &cusedev_lock) != 0) {
884 		pid = *(int *)dev_map;
885 		SPDK_ERRLOG("Cannot create lock on device %s, probably"
886 			    " process %d has claimed it\n", ctrlr_device->lock_name, pid);
887 		munmap(dev_map, sizeof(int));
888 		close(dev_fd);
889 		/* F_SETLK returns unspecified errnos, normalize them */
890 		return -EACCES;
891 	}
892 
893 	*(int *)dev_map = (int)getpid();
894 	munmap(dev_map, sizeof(int));
895 	ctrlr_device->claim_fd = dev_fd;
896 	ctrlr_device->index = index;
897 	/* Keep dev_fd open to maintain the lock. */
898 	return 0;
899 }
900 
901 static void
902 nvme_cuse_unclaim(struct cuse_device *ctrlr_device)
903 {
904 	close(ctrlr_device->claim_fd);
905 	ctrlr_device->claim_fd = -1;
906 	unlink(ctrlr_device->lock_name);
907 }
908 
909 static void
910 cuse_nvme_ctrlr_stop(struct cuse_device *ctrlr_device)
911 {
912 	uint32_t i;
913 	uint32_t num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr_device->ctrlr);
914 
915 	for (i = 1; i <= num_ns; i++) {
916 		cuse_nvme_ns_stop(ctrlr_device, i);
917 	}
918 
919 	fuse_session_exit(ctrlr_device->session);
920 	pthread_join(ctrlr_device->tid, NULL);
921 	TAILQ_REMOVE(&g_ctrlr_ctx_head, ctrlr_device, tailq);
922 	spdk_bit_array_clear(g_ctrlr_started, ctrlr_device->index);
923 	if (spdk_bit_array_count_set(g_ctrlr_started) == 0) {
924 		spdk_bit_array_free(&g_ctrlr_started);
925 	}
926 	nvme_cuse_unclaim(ctrlr_device);
927 	free(ctrlr_device->ns_devices);
928 	free(ctrlr_device);
929 }
930 
931 static int
932 cuse_nvme_ctrlr_update_namespaces(struct cuse_device *ctrlr_device)
933 {
934 	uint32_t nsid;
935 	uint32_t num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr_device->ctrlr);
936 
937 	for (nsid = 1; nsid <= num_ns; nsid++) {
938 		if (!spdk_nvme_ctrlr_is_active_ns(ctrlr_device->ctrlr, nsid)) {
939 			cuse_nvme_ns_stop(ctrlr_device, nsid);
940 			continue;
941 		}
942 
943 		if (cuse_nvme_ns_start(ctrlr_device, nsid) < 0) {
944 			SPDK_ERRLOG("Cannot start CUSE namespace device.");
945 			return -1;
946 		}
947 	}
948 
949 	return 0;
950 }
951 
952 static int
953 nvme_cuse_start(struct spdk_nvme_ctrlr *ctrlr)
954 {
955 	int rv = 0;
956 	struct cuse_device *ctrlr_device;
957 	uint32_t num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr);
958 
959 	SPDK_NOTICELOG("Creating cuse device for controller\n");
960 
961 	if (g_ctrlr_started == NULL) {
962 		g_ctrlr_started = spdk_bit_array_create(128);
963 		if (g_ctrlr_started == NULL) {
964 			SPDK_ERRLOG("Cannot create bit array\n");
965 			return -ENOMEM;
966 		}
967 	}
968 
969 	ctrlr_device = (struct cuse_device *)calloc(1, sizeof(struct cuse_device));
970 	if (!ctrlr_device) {
971 		SPDK_ERRLOG("Cannot allocate memory for ctrlr_device.");
972 		rv = -ENOMEM;
973 		goto err2;
974 	}
975 
976 	ctrlr_device->ctrlr = ctrlr;
977 
978 	/* Check if device already exists, if not increment index until success */
979 	ctrlr_device->index = 0;
980 	while (1) {
981 		ctrlr_device->index = spdk_bit_array_find_first_clear(g_ctrlr_started, ctrlr_device->index);
982 		if (ctrlr_device->index == UINT32_MAX) {
983 			SPDK_ERRLOG("Too many registered controllers\n");
984 			goto err2;
985 		}
986 
987 		if (nvme_cuse_claim(ctrlr_device, ctrlr_device->index) == 0) {
988 			break;
989 		}
990 		ctrlr_device->index++;
991 	}
992 	spdk_bit_array_set(g_ctrlr_started, ctrlr_device->index);
993 	snprintf(ctrlr_device->dev_name, sizeof(ctrlr_device->dev_name), "spdk/nvme%d",
994 		 ctrlr_device->index);
995 
996 	rv = pthread_create(&ctrlr_device->tid, NULL, cuse_thread, ctrlr_device);
997 	if (rv != 0) {
998 		SPDK_ERRLOG("pthread_create failed\n");
999 		rv = -rv;
1000 		goto err3;
1001 	}
1002 	TAILQ_INSERT_TAIL(&g_ctrlr_ctx_head, ctrlr_device, tailq);
1003 
1004 	ctrlr_device->ns_devices = (struct cuse_device *)calloc(num_ns, sizeof(struct cuse_device));
1005 	/* Start all active namespaces */
1006 	if (cuse_nvme_ctrlr_update_namespaces(ctrlr_device) < 0) {
1007 		SPDK_ERRLOG("Cannot start CUSE namespace devices.");
1008 		cuse_nvme_ctrlr_stop(ctrlr_device);
1009 		rv = -1;
1010 		goto err3;
1011 	}
1012 
1013 	return 0;
1014 
1015 err3:
1016 	spdk_bit_array_clear(g_ctrlr_started, ctrlr_device->index);
1017 err2:
1018 	free(ctrlr_device);
1019 	if (spdk_bit_array_count_set(g_ctrlr_started) == 0) {
1020 		spdk_bit_array_free(&g_ctrlr_started);
1021 	}
1022 	return rv;
1023 }
1024 
1025 static struct cuse_device *
1026 nvme_cuse_get_cuse_ctrlr_device(struct spdk_nvme_ctrlr *ctrlr)
1027 {
1028 	struct cuse_device *ctrlr_device = NULL;
1029 
1030 	TAILQ_FOREACH(ctrlr_device, &g_ctrlr_ctx_head, tailq) {
1031 		if (ctrlr_device->ctrlr == ctrlr) {
1032 			break;
1033 		}
1034 	}
1035 
1036 	return ctrlr_device;
1037 }
1038 
1039 static struct cuse_device *
1040 nvme_cuse_get_cuse_ns_device(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
1041 {
1042 	struct cuse_device *ctrlr_device = NULL;
1043 	uint32_t num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr);
1044 
1045 	if (nsid < 1 || nsid > num_ns) {
1046 		return NULL;
1047 	}
1048 
1049 	ctrlr_device = nvme_cuse_get_cuse_ctrlr_device(ctrlr);
1050 	if (!ctrlr_device) {
1051 		return NULL;
1052 	}
1053 
1054 	if (!ctrlr_device->ns_devices[nsid - 1].is_started) {
1055 		return NULL;
1056 	}
1057 
1058 	return &ctrlr_device->ns_devices[nsid - 1];
1059 }
1060 
1061 static void
1062 nvme_cuse_stop(struct spdk_nvme_ctrlr *ctrlr)
1063 {
1064 	struct cuse_device *ctrlr_device;
1065 
1066 	pthread_mutex_lock(&g_cuse_mtx);
1067 
1068 	ctrlr_device = nvme_cuse_get_cuse_ctrlr_device(ctrlr);
1069 	if (!ctrlr_device) {
1070 		SPDK_ERRLOG("Cannot find associated CUSE device\n");
1071 		pthread_mutex_unlock(&g_cuse_mtx);
1072 		return;
1073 	}
1074 
1075 	cuse_nvme_ctrlr_stop(ctrlr_device);
1076 
1077 	pthread_mutex_unlock(&g_cuse_mtx);
1078 }
1079 
1080 static void
1081 nvme_cuse_update(struct spdk_nvme_ctrlr *ctrlr)
1082 {
1083 	struct cuse_device *ctrlr_device;
1084 
1085 	pthread_mutex_lock(&g_cuse_mtx);
1086 
1087 	ctrlr_device = nvme_cuse_get_cuse_ctrlr_device(ctrlr);
1088 	if (!ctrlr_device) {
1089 		pthread_mutex_unlock(&g_cuse_mtx);
1090 		return;
1091 	}
1092 
1093 	cuse_nvme_ctrlr_update_namespaces(ctrlr_device);
1094 
1095 	pthread_mutex_unlock(&g_cuse_mtx);
1096 }
1097 
1098 static struct nvme_io_msg_producer cuse_nvme_io_msg_producer = {
1099 	.name = "cuse",
1100 	.stop = nvme_cuse_stop,
1101 	.update = nvme_cuse_update,
1102 };
1103 
1104 int
1105 spdk_nvme_cuse_register(struct spdk_nvme_ctrlr *ctrlr)
1106 {
1107 	int rc;
1108 
1109 	rc = nvme_io_msg_ctrlr_register(ctrlr, &cuse_nvme_io_msg_producer);
1110 	if (rc) {
1111 		return rc;
1112 	}
1113 
1114 	pthread_mutex_lock(&g_cuse_mtx);
1115 
1116 	rc = nvme_cuse_start(ctrlr);
1117 	if (rc) {
1118 		nvme_io_msg_ctrlr_unregister(ctrlr, &cuse_nvme_io_msg_producer);
1119 	}
1120 
1121 	pthread_mutex_unlock(&g_cuse_mtx);
1122 
1123 	return rc;
1124 }
1125 
1126 int
1127 spdk_nvme_cuse_unregister(struct spdk_nvme_ctrlr *ctrlr)
1128 {
1129 	struct cuse_device *ctrlr_device;
1130 
1131 	pthread_mutex_lock(&g_cuse_mtx);
1132 
1133 	ctrlr_device = nvme_cuse_get_cuse_ctrlr_device(ctrlr);
1134 	if (!ctrlr_device) {
1135 		SPDK_ERRLOG("Cannot find associated CUSE device\n");
1136 		pthread_mutex_unlock(&g_cuse_mtx);
1137 		return -ENODEV;
1138 	}
1139 
1140 	cuse_nvme_ctrlr_stop(ctrlr_device);
1141 
1142 	pthread_mutex_unlock(&g_cuse_mtx);
1143 
1144 	nvme_io_msg_ctrlr_unregister(ctrlr, &cuse_nvme_io_msg_producer);
1145 
1146 	return 0;
1147 }
1148 
1149 void
1150 spdk_nvme_cuse_update_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1151 {
1152 	nvme_cuse_update(ctrlr);
1153 }
1154 
1155 int
1156 spdk_nvme_cuse_get_ctrlr_name(struct spdk_nvme_ctrlr *ctrlr, char *name, size_t *size)
1157 {
1158 	struct cuse_device *ctrlr_device;
1159 	size_t req_len;
1160 
1161 	pthread_mutex_lock(&g_cuse_mtx);
1162 
1163 	ctrlr_device = nvme_cuse_get_cuse_ctrlr_device(ctrlr);
1164 	if (!ctrlr_device) {
1165 		pthread_mutex_unlock(&g_cuse_mtx);
1166 		return -ENODEV;
1167 	}
1168 
1169 	req_len = strnlen(ctrlr_device->dev_name, sizeof(ctrlr_device->dev_name));
1170 	if (*size < req_len) {
1171 		*size = req_len;
1172 		pthread_mutex_unlock(&g_cuse_mtx);
1173 		return -ENOSPC;
1174 	}
1175 	snprintf(name, req_len + 1, "%s", ctrlr_device->dev_name);
1176 
1177 	pthread_mutex_unlock(&g_cuse_mtx);
1178 
1179 	return 0;
1180 }
1181 
1182 int
1183 spdk_nvme_cuse_get_ns_name(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, char *name, size_t *size)
1184 {
1185 	struct cuse_device *ns_device;
1186 	size_t req_len;
1187 
1188 	pthread_mutex_lock(&g_cuse_mtx);
1189 
1190 	ns_device = nvme_cuse_get_cuse_ns_device(ctrlr, nsid);
1191 	if (!ns_device) {
1192 		pthread_mutex_unlock(&g_cuse_mtx);
1193 		return -ENODEV;
1194 	}
1195 
1196 	req_len = strnlen(ns_device->dev_name, sizeof(ns_device->dev_name));
1197 	if (*size < req_len) {
1198 		*size = req_len;
1199 		pthread_mutex_unlock(&g_cuse_mtx);
1200 		return -ENOSPC;
1201 	}
1202 	snprintf(name, req_len + 1, "%s", ns_device->dev_name);
1203 
1204 	pthread_mutex_unlock(&g_cuse_mtx);
1205 
1206 	return 0;
1207 }
1208 
1209 SPDK_LOG_REGISTER_COMPONENT(nvme_cuse)
1210