xref: /spdk/lib/nvme/nvme_cuse.c (revision cc6920a4763d4b9a43aa40583c8397d8f14fa100)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #define FUSE_USE_VERSION 31
35 
36 #include <fuse3/cuse_lowlevel.h>
37 
38 #include <linux/nvme_ioctl.h>
39 #include <linux/fs.h>
40 
41 #include "nvme_internal.h"
42 #include "nvme_io_msg.h"
43 #include "nvme_cuse.h"
44 
45 struct cuse_device {
46 	char				dev_name[128];
47 	uint32_t			index;
48 	int				claim_fd;
49 	char				lock_name[64];
50 
51 	struct spdk_nvme_ctrlr		*ctrlr;		/**< NVMe controller */
52 	uint32_t			nsid;		/**< NVMe name space id, or 0 */
53 
54 	pthread_t			tid;
55 	struct fuse_session		*session;
56 
57 	struct cuse_device		*ctrlr_device;
58 	TAILQ_HEAD(, cuse_device)	ns_devices;
59 
60 	TAILQ_ENTRY(cuse_device)	tailq;
61 };
62 
63 static pthread_mutex_t g_cuse_mtx = PTHREAD_MUTEX_INITIALIZER;
64 static TAILQ_HEAD(, cuse_device) g_ctrlr_ctx_head = TAILQ_HEAD_INITIALIZER(g_ctrlr_ctx_head);
65 static struct spdk_bit_array *g_ctrlr_started;
66 
67 struct cuse_io_ctx {
68 	struct spdk_nvme_cmd		nvme_cmd;
69 	enum spdk_nvme_data_transfer	data_transfer;
70 
71 	uint64_t			lba;
72 	uint32_t			lba_count;
73 
74 	void				*data;
75 	int				data_len;
76 
77 	fuse_req_t			req;
78 };
79 
80 static void
81 cuse_io_ctx_free(struct cuse_io_ctx *ctx)
82 {
83 	spdk_free(ctx->data);
84 	free(ctx);
85 }
86 
87 #define FUSE_REPLY_CHECK_BUFFER(req, arg, out_bufsz, val)		\
88 	if (out_bufsz == 0) {						\
89 		struct iovec out_iov;					\
90 		out_iov.iov_base = (void *)arg;				\
91 		out_iov.iov_len = sizeof(val);				\
92 		fuse_reply_ioctl_retry(req, NULL, 0, &out_iov, 1);	\
93 		return;							\
94 	}
95 
96 static void
97 cuse_nvme_passthru_cmd_cb(void *arg, const struct spdk_nvme_cpl *cpl)
98 {
99 	struct cuse_io_ctx *ctx = arg;
100 	struct iovec out_iov[2];
101 	struct spdk_nvme_cpl _cpl;
102 
103 	if (ctx->data_transfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER ||
104 	    ctx->data_transfer == SPDK_NVME_DATA_NONE) {
105 		fuse_reply_ioctl_iov(ctx->req, cpl->status.sc, NULL, 0);
106 	} else {
107 		memcpy(&_cpl, cpl, sizeof(struct spdk_nvme_cpl));
108 
109 		out_iov[0].iov_base = &_cpl.cdw0;
110 		out_iov[0].iov_len = sizeof(_cpl.cdw0);
111 
112 		if (ctx->data_len > 0) {
113 			out_iov[1].iov_base = ctx->data;
114 			out_iov[1].iov_len = ctx->data_len;
115 			fuse_reply_ioctl_iov(ctx->req, cpl->status.sc, out_iov, 2);
116 		} else {
117 			fuse_reply_ioctl_iov(ctx->req, cpl->status.sc, out_iov, 1);
118 		}
119 	}
120 
121 	cuse_io_ctx_free(ctx);
122 }
123 
124 static void
125 cuse_nvme_passthru_cmd_execute(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, void *arg)
126 {
127 	int rc;
128 	struct cuse_io_ctx *ctx = arg;
129 
130 	if (nsid != 0) {
131 		rc = spdk_nvme_ctrlr_cmd_io_raw(ctrlr, ctrlr->external_io_msgs_qpair, &ctx->nvme_cmd, ctx->data,
132 						ctx->data_len, cuse_nvme_passthru_cmd_cb, (void *)ctx);
133 	} else {
134 		rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &ctx->nvme_cmd, ctx->data, ctx->data_len,
135 						   cuse_nvme_passthru_cmd_cb, (void *)ctx);
136 	}
137 	if (rc < 0) {
138 		fuse_reply_err(ctx->req, EINVAL);
139 		cuse_io_ctx_free(ctx);
140 	}
141 }
142 
143 static void
144 cuse_nvme_passthru_cmd_send(fuse_req_t req, struct nvme_passthru_cmd *passthru_cmd,
145 			    const void *data, int cmd)
146 {
147 	struct cuse_io_ctx *ctx;
148 	struct cuse_device *cuse_device = fuse_req_userdata(req);
149 	int rv;
150 
151 	ctx = (struct cuse_io_ctx *)calloc(1, sizeof(struct cuse_io_ctx));
152 	if (!ctx) {
153 		SPDK_ERRLOG("Cannot allocate memory for cuse_io_ctx\n");
154 		fuse_reply_err(req, ENOMEM);
155 		return;
156 	}
157 
158 	ctx->req = req;
159 	ctx->data_transfer = spdk_nvme_opc_get_data_transfer(passthru_cmd->opcode);
160 
161 	memset(&ctx->nvme_cmd, 0, sizeof(ctx->nvme_cmd));
162 	ctx->nvme_cmd.opc = passthru_cmd->opcode;
163 	ctx->nvme_cmd.nsid = passthru_cmd->nsid;
164 	ctx->nvme_cmd.cdw10 = passthru_cmd->cdw10;
165 	ctx->nvme_cmd.cdw11 = passthru_cmd->cdw11;
166 	ctx->nvme_cmd.cdw12 = passthru_cmd->cdw12;
167 	ctx->nvme_cmd.cdw13 = passthru_cmd->cdw13;
168 	ctx->nvme_cmd.cdw14 = passthru_cmd->cdw14;
169 	ctx->nvme_cmd.cdw15 = passthru_cmd->cdw15;
170 
171 	ctx->data_len = passthru_cmd->data_len;
172 
173 	if (ctx->data_len > 0) {
174 		ctx->data = spdk_malloc(ctx->data_len, 4096, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
175 		if (!ctx->data) {
176 			SPDK_ERRLOG("Cannot allocate memory for data\n");
177 			fuse_reply_err(req, ENOMEM);
178 			free(ctx);
179 			return;
180 		}
181 		if (data != NULL) {
182 			memcpy(ctx->data, data, ctx->data_len);
183 		}
184 	}
185 
186 	if ((unsigned int)cmd != NVME_IOCTL_ADMIN_CMD) {
187 		/* Send NS for IO IOCTLs */
188 		rv = nvme_io_msg_send(cuse_device->ctrlr, passthru_cmd->nsid, cuse_nvme_passthru_cmd_execute, ctx);
189 	} else {
190 		/* NS == 0 for Admin IOCTLs */
191 		rv = nvme_io_msg_send(cuse_device->ctrlr, 0, cuse_nvme_passthru_cmd_execute, ctx);
192 	}
193 	if (rv) {
194 		SPDK_ERRLOG("Cannot send io msg to the controller\n");
195 		fuse_reply_err(req, -rv);
196 		cuse_io_ctx_free(ctx);
197 		return;
198 	}
199 }
200 
201 static void
202 cuse_nvme_passthru_cmd(fuse_req_t req, int cmd, void *arg,
203 		       struct fuse_file_info *fi, unsigned flags,
204 		       const void *in_buf, size_t in_bufsz, size_t out_bufsz)
205 {
206 	struct nvme_passthru_cmd *passthru_cmd;
207 	struct iovec in_iov[2], out_iov[2];
208 
209 	in_iov[0].iov_base = (void *)arg;
210 	in_iov[0].iov_len = sizeof(*passthru_cmd);
211 	if (in_bufsz == 0) {
212 		fuse_reply_ioctl_retry(req, in_iov, 1, NULL, 0);
213 		return;
214 	}
215 
216 	passthru_cmd = (struct nvme_passthru_cmd *)in_buf;
217 
218 	switch (spdk_nvme_opc_get_data_transfer(passthru_cmd->opcode)) {
219 	case SPDK_NVME_DATA_HOST_TO_CONTROLLER:
220 		if (passthru_cmd->addr != 0) {
221 			in_iov[1].iov_base = (void *)passthru_cmd->addr;
222 			in_iov[1].iov_len = passthru_cmd->data_len;
223 			if (in_bufsz == sizeof(*passthru_cmd)) {
224 				fuse_reply_ioctl_retry(req, in_iov, 2, NULL, 0);
225 				return;
226 			}
227 			cuse_nvme_passthru_cmd_send(req, passthru_cmd, in_buf + sizeof(*passthru_cmd), cmd);
228 		} else {
229 			cuse_nvme_passthru_cmd_send(req, passthru_cmd, NULL, cmd);
230 		}
231 		return;
232 	case SPDK_NVME_DATA_NONE:
233 	case SPDK_NVME_DATA_CONTROLLER_TO_HOST:
234 		if (out_bufsz == 0) {
235 			out_iov[0].iov_base = &((struct nvme_passthru_cmd *)arg)->result;
236 			out_iov[0].iov_len = sizeof(uint32_t);
237 			if (passthru_cmd->data_len > 0) {
238 				out_iov[1].iov_base = (void *)passthru_cmd->addr;
239 				out_iov[1].iov_len = passthru_cmd->data_len;
240 				fuse_reply_ioctl_retry(req, in_iov, 1, out_iov, 2);
241 			} else {
242 				fuse_reply_ioctl_retry(req, in_iov, 1, out_iov, 1);
243 			}
244 			return;
245 		}
246 
247 		cuse_nvme_passthru_cmd_send(req, passthru_cmd, NULL, cmd);
248 
249 		return;
250 	case SPDK_NVME_DATA_BIDIRECTIONAL:
251 		fuse_reply_err(req, EINVAL);
252 		return;
253 	}
254 }
255 
256 static void
257 cuse_nvme_reset_execute(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, void *arg)
258 {
259 	int rc;
260 	fuse_req_t req = arg;
261 
262 	rc = spdk_nvme_ctrlr_reset(ctrlr);
263 	if (rc) {
264 		fuse_reply_err(req, rc);
265 		return;
266 	}
267 
268 	fuse_reply_ioctl_iov(req, 0, NULL, 0);
269 }
270 
271 static void
272 cuse_nvme_subsys_reset_execute(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, void *arg)
273 {
274 	int rc;
275 	fuse_req_t req = arg;
276 
277 	rc = spdk_nvme_ctrlr_reset_subsystem(ctrlr);
278 	if (rc) {
279 		fuse_reply_err(req, rc);
280 		return;
281 	}
282 
283 	fuse_reply_ioctl_iov(req, 0, NULL, 0);
284 }
285 
286 static void
287 cuse_nvme_reset(fuse_req_t req, int cmd, void *arg,
288 		struct fuse_file_info *fi, unsigned flags,
289 		const void *in_buf, size_t in_bufsz, size_t out_bufsz)
290 {
291 	int rv;
292 	struct cuse_device *cuse_device = fuse_req_userdata(req);
293 
294 	if (cuse_device->nsid) {
295 		SPDK_ERRLOG("Namespace reset not supported\n");
296 		fuse_reply_err(req, EINVAL);
297 		return;
298 	}
299 
300 	if (cmd == NVME_IOCTL_SUBSYS_RESET) {
301 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_SUBSYS_RESET\n");
302 		rv = nvme_io_msg_send(cuse_device->ctrlr, cuse_device->nsid, cuse_nvme_subsys_reset_execute,
303 				      (void *)req);
304 	} else {
305 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_RESET\n");
306 		rv = nvme_io_msg_send(cuse_device->ctrlr, cuse_device->nsid, cuse_nvme_reset_execute, (void *)req);
307 	}
308 	if (rv) {
309 		SPDK_ERRLOG("Cannot send reset\n");
310 		fuse_reply_err(req, EINVAL);
311 	}
312 }
313 
314 static void
315 cuse_nvme_rescan_execute(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, void *arg)
316 {
317 	fuse_req_t req = arg;
318 
319 	nvme_ctrlr_update_namespaces(ctrlr);
320 	fuse_reply_ioctl_iov(req, 0, NULL, 0);
321 }
322 
323 static void
324 cuse_nvme_rescan(fuse_req_t req, int cmd, void *arg,
325 		 struct fuse_file_info *fi, unsigned flags,
326 		 const void *in_buf, size_t in_bufsz, size_t out_bufsz)
327 {
328 	int rv;
329 	struct cuse_device *cuse_device = fuse_req_userdata(req);
330 
331 	if (cuse_device->nsid) {
332 		SPDK_ERRLOG("Namespace rescan not supported\n");
333 		fuse_reply_err(req, EINVAL);
334 		return;
335 	}
336 
337 	rv = nvme_io_msg_send(cuse_device->ctrlr, cuse_device->nsid, cuse_nvme_rescan_execute, (void *)req);
338 	if (rv) {
339 		SPDK_ERRLOG("Cannot send rescan\n");
340 		fuse_reply_err(req, EINVAL);
341 	}
342 }
343 
344 /*****************************************************************************
345  * Namespace IO requests
346  */
347 
348 static void
349 cuse_nvme_submit_io_write_done(void *ref, const struct spdk_nvme_cpl *cpl)
350 {
351 	struct cuse_io_ctx *ctx = (struct cuse_io_ctx *)ref;
352 
353 	fuse_reply_ioctl_iov(ctx->req, cpl->status.sc, NULL, 0);
354 
355 	cuse_io_ctx_free(ctx);
356 }
357 
358 static void
359 cuse_nvme_submit_io_write_cb(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, void *arg)
360 {
361 	int rc;
362 	struct cuse_io_ctx *ctx = arg;
363 	struct spdk_nvme_ns *ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
364 
365 	rc = spdk_nvme_ns_cmd_write(ns, ctrlr->external_io_msgs_qpair, ctx->data,
366 				    ctx->lba, /* LBA start */
367 				    ctx->lba_count, /* number of LBAs */
368 				    cuse_nvme_submit_io_write_done, ctx, 0);
369 
370 	if (rc != 0) {
371 		SPDK_ERRLOG("write failed: rc = %d\n", rc);
372 		fuse_reply_err(ctx->req, rc);
373 		cuse_io_ctx_free(ctx);
374 		return;
375 	}
376 }
377 
378 static void
379 cuse_nvme_submit_io_write(struct cuse_device *cuse_device, fuse_req_t req, int cmd, void *arg,
380 			  struct fuse_file_info *fi, unsigned flags, uint32_t block_size,
381 			  const void *in_buf, size_t in_bufsz, size_t out_bufsz)
382 {
383 	const struct nvme_user_io *user_io = in_buf;
384 	struct cuse_io_ctx *ctx;
385 	int rc;
386 
387 	ctx = (struct cuse_io_ctx *)calloc(1, sizeof(struct cuse_io_ctx));
388 	if (!ctx) {
389 		SPDK_ERRLOG("Cannot allocate memory for context\n");
390 		fuse_reply_err(req, ENOMEM);
391 		return;
392 	}
393 
394 	ctx->req = req;
395 	ctx->lba = user_io->slba;
396 	ctx->lba_count = user_io->nblocks + 1;
397 	ctx->data_len = ctx->lba_count * block_size;
398 
399 	ctx->data = spdk_zmalloc(ctx->data_len, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
400 				 SPDK_MALLOC_DMA);
401 	if (ctx->data == NULL) {
402 		SPDK_ERRLOG("Write buffer allocation failed\n");
403 		fuse_reply_err(ctx->req, ENOMEM);
404 		free(ctx);
405 		return;
406 	}
407 
408 	memcpy(ctx->data, in_buf + sizeof(*user_io), ctx->data_len);
409 
410 	rc = nvme_io_msg_send(cuse_device->ctrlr, cuse_device->nsid, cuse_nvme_submit_io_write_cb,
411 			      ctx);
412 	if (rc < 0) {
413 		SPDK_ERRLOG("Cannot send write io\n");
414 		fuse_reply_err(ctx->req, rc);
415 		cuse_io_ctx_free(ctx);
416 	}
417 }
418 
419 static void
420 cuse_nvme_submit_io_read_done(void *ref, const struct spdk_nvme_cpl *cpl)
421 {
422 	struct cuse_io_ctx *ctx = (struct cuse_io_ctx *)ref;
423 	struct iovec iov;
424 
425 	iov.iov_base = ctx->data;
426 	iov.iov_len = ctx->data_len;
427 
428 	fuse_reply_ioctl_iov(ctx->req, cpl->status.sc, &iov, 1);
429 
430 	cuse_io_ctx_free(ctx);
431 }
432 
433 static void
434 cuse_nvme_submit_io_read_cb(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, void *arg)
435 {
436 	int rc;
437 	struct cuse_io_ctx *ctx = arg;
438 	struct spdk_nvme_ns *ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
439 
440 	rc = spdk_nvme_ns_cmd_read(ns, ctrlr->external_io_msgs_qpair, ctx->data,
441 				   ctx->lba, /* LBA start */
442 				   ctx->lba_count, /* number of LBAs */
443 				   cuse_nvme_submit_io_read_done, ctx, 0);
444 
445 	if (rc != 0) {
446 		SPDK_ERRLOG("read failed: rc = %d\n", rc);
447 		fuse_reply_err(ctx->req, rc);
448 		cuse_io_ctx_free(ctx);
449 		return;
450 	}
451 }
452 
453 static void
454 cuse_nvme_submit_io_read(struct cuse_device *cuse_device, fuse_req_t req, int cmd, void *arg,
455 			 struct fuse_file_info *fi, unsigned flags, uint32_t block_size,
456 			 const void *in_buf, size_t in_bufsz, size_t out_bufsz)
457 {
458 	int rc;
459 	struct cuse_io_ctx *ctx;
460 	const struct nvme_user_io *user_io = in_buf;
461 
462 	ctx = (struct cuse_io_ctx *)calloc(1, sizeof(struct cuse_io_ctx));
463 	if (!ctx) {
464 		SPDK_ERRLOG("Cannot allocate memory for context\n");
465 		fuse_reply_err(req, ENOMEM);
466 		return;
467 	}
468 
469 	ctx->req = req;
470 	ctx->lba = user_io->slba;
471 	ctx->lba_count = user_io->nblocks + 1;
472 
473 	ctx->data_len = ctx->lba_count * block_size;
474 	ctx->data = spdk_zmalloc(ctx->data_len, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
475 				 SPDK_MALLOC_DMA);
476 	if (ctx->data == NULL) {
477 		SPDK_ERRLOG("Read buffer allocation failed\n");
478 		fuse_reply_err(ctx->req, ENOMEM);
479 		free(ctx);
480 		return;
481 	}
482 
483 	rc = nvme_io_msg_send(cuse_device->ctrlr, cuse_device->nsid, cuse_nvme_submit_io_read_cb, ctx);
484 	if (rc < 0) {
485 		SPDK_ERRLOG("Cannot send read io\n");
486 		fuse_reply_err(ctx->req, rc);
487 		cuse_io_ctx_free(ctx);
488 	}
489 }
490 
491 
492 static void
493 cuse_nvme_submit_io(fuse_req_t req, int cmd, void *arg,
494 		    struct fuse_file_info *fi, unsigned flags,
495 		    const void *in_buf, size_t in_bufsz, size_t out_bufsz)
496 {
497 	const struct nvme_user_io *user_io;
498 	struct iovec in_iov[2], out_iov;
499 	struct cuse_device *cuse_device = fuse_req_userdata(req);
500 	struct spdk_nvme_ns *ns;
501 	uint32_t block_size;
502 
503 	in_iov[0].iov_base = (void *)arg;
504 	in_iov[0].iov_len = sizeof(*user_io);
505 	if (in_bufsz == 0) {
506 		fuse_reply_ioctl_retry(req, in_iov, 1, NULL, 0);
507 		return;
508 	}
509 
510 	user_io = in_buf;
511 
512 	ns = spdk_nvme_ctrlr_get_ns(cuse_device->ctrlr, cuse_device->nsid);
513 	block_size = spdk_nvme_ns_get_sector_size(ns);
514 
515 	switch (user_io->opcode) {
516 	case SPDK_NVME_OPC_READ:
517 		out_iov.iov_base = (void *)user_io->addr;
518 		out_iov.iov_len = (user_io->nblocks + 1) * block_size;
519 		if (out_bufsz == 0) {
520 			fuse_reply_ioctl_retry(req, in_iov, 1, &out_iov, 1);
521 			return;
522 		}
523 
524 		cuse_nvme_submit_io_read(cuse_device, req, cmd, arg, fi, flags,
525 					 block_size, in_buf, in_bufsz, out_bufsz);
526 		break;
527 	case SPDK_NVME_OPC_WRITE:
528 		in_iov[1].iov_base = (void *)user_io->addr;
529 		in_iov[1].iov_len = (user_io->nblocks + 1) * block_size;
530 		if (in_bufsz == sizeof(*user_io)) {
531 			fuse_reply_ioctl_retry(req, in_iov, 2, NULL, 0);
532 			return;
533 		}
534 
535 		cuse_nvme_submit_io_write(cuse_device, req, cmd, arg, fi, flags,
536 					  block_size, in_buf, in_bufsz, out_bufsz);
537 		break;
538 	default:
539 		SPDK_ERRLOG("SUBMIT_IO: opc:%d not valid\n", user_io->opcode);
540 		fuse_reply_err(req, EINVAL);
541 		return;
542 	}
543 
544 }
545 
546 /*****************************************************************************
547  * Other namespace IOCTLs
548  */
549 static void
550 cuse_blkgetsize64(fuse_req_t req, int cmd, void *arg,
551 		  struct fuse_file_info *fi, unsigned flags,
552 		  const void *in_buf, size_t in_bufsz, size_t out_bufsz)
553 {
554 	uint64_t size;
555 	struct spdk_nvme_ns *ns;
556 	struct cuse_device *cuse_device = fuse_req_userdata(req);
557 
558 	FUSE_REPLY_CHECK_BUFFER(req, arg, out_bufsz, size);
559 
560 	ns = spdk_nvme_ctrlr_get_ns(cuse_device->ctrlr, cuse_device->nsid);
561 	size = spdk_nvme_ns_get_num_sectors(ns);
562 	fuse_reply_ioctl(req, 0, &size, sizeof(size));
563 }
564 
565 static void
566 cuse_blkpbszget(fuse_req_t req, int cmd, void *arg,
567 		struct fuse_file_info *fi, unsigned flags,
568 		const void *in_buf, size_t in_bufsz, size_t out_bufsz)
569 {
570 	int pbsz;
571 	struct spdk_nvme_ns *ns;
572 	struct cuse_device *cuse_device = fuse_req_userdata(req);
573 
574 	FUSE_REPLY_CHECK_BUFFER(req, arg, out_bufsz, pbsz);
575 
576 	ns = spdk_nvme_ctrlr_get_ns(cuse_device->ctrlr, cuse_device->nsid);
577 	pbsz = spdk_nvme_ns_get_sector_size(ns);
578 	fuse_reply_ioctl(req, 0, &pbsz, sizeof(pbsz));
579 }
580 
581 static void
582 cuse_blkgetsize(fuse_req_t req, int cmd, void *arg,
583 		struct fuse_file_info *fi, unsigned flags,
584 		const void *in_buf, size_t in_bufsz, size_t out_bufsz)
585 {
586 	long size;
587 	struct spdk_nvme_ns *ns;
588 	struct cuse_device *cuse_device = fuse_req_userdata(req);
589 
590 	FUSE_REPLY_CHECK_BUFFER(req, arg, out_bufsz, size);
591 
592 	ns = spdk_nvme_ctrlr_get_ns(cuse_device->ctrlr, cuse_device->nsid);
593 
594 	/* return size in 512 bytes blocks */
595 	size = spdk_nvme_ns_get_num_sectors(ns) * 512 / spdk_nvme_ns_get_sector_size(ns);
596 	fuse_reply_ioctl(req, 0, &size, sizeof(size));
597 }
598 
599 static void
600 cuse_blkgetsectorsize(fuse_req_t req, int cmd, void *arg,
601 		      struct fuse_file_info *fi, unsigned flags,
602 		      const void *in_buf, size_t in_bufsz, size_t out_bufsz)
603 {
604 	int ssize;
605 	struct spdk_nvme_ns *ns;
606 	struct cuse_device *cuse_device = fuse_req_userdata(req);
607 
608 	FUSE_REPLY_CHECK_BUFFER(req, arg, out_bufsz, ssize);
609 
610 	ns = spdk_nvme_ctrlr_get_ns(cuse_device->ctrlr, cuse_device->nsid);
611 	ssize = spdk_nvme_ns_get_sector_size(ns);
612 	fuse_reply_ioctl(req, 0, &ssize, sizeof(ssize));
613 }
614 
615 static void
616 cuse_getid(fuse_req_t req, int cmd, void *arg,
617 	   struct fuse_file_info *fi, unsigned flags,
618 	   const void *in_buf, size_t in_bufsz, size_t out_bufsz)
619 {
620 	struct cuse_device *cuse_device = fuse_req_userdata(req);
621 
622 	fuse_reply_ioctl(req, cuse_device->nsid, NULL, 0);
623 }
624 
625 static void
626 cuse_ctrlr_ioctl(fuse_req_t req, int cmd, void *arg,
627 		 struct fuse_file_info *fi, unsigned flags,
628 		 const void *in_buf, size_t in_bufsz, size_t out_bufsz)
629 {
630 	if (flags & FUSE_IOCTL_COMPAT) {
631 		fuse_reply_err(req, ENOSYS);
632 		return;
633 	}
634 
635 	switch ((unsigned int)cmd) {
636 	case NVME_IOCTL_ADMIN_CMD:
637 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_ADMIN_CMD\n");
638 		cuse_nvme_passthru_cmd(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
639 		break;
640 
641 	case NVME_IOCTL_RESET:
642 	case NVME_IOCTL_SUBSYS_RESET:
643 		cuse_nvme_reset(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
644 		break;
645 
646 	case NVME_IOCTL_RESCAN:
647 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_RESCAN\n");
648 		cuse_nvme_rescan(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
649 		break;
650 
651 	default:
652 		SPDK_ERRLOG("Unsupported IOCTL 0x%X.\n", cmd);
653 		fuse_reply_err(req, ENOTTY);
654 	}
655 }
656 
657 static void
658 cuse_ns_ioctl(fuse_req_t req, int cmd, void *arg,
659 	      struct fuse_file_info *fi, unsigned flags,
660 	      const void *in_buf, size_t in_bufsz, size_t out_bufsz)
661 {
662 	if (flags & FUSE_IOCTL_COMPAT) {
663 		fuse_reply_err(req, ENOSYS);
664 		return;
665 	}
666 
667 	switch ((unsigned int)cmd) {
668 	case NVME_IOCTL_ADMIN_CMD:
669 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_ADMIN_CMD\n");
670 		cuse_nvme_passthru_cmd(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
671 		break;
672 
673 	case NVME_IOCTL_SUBMIT_IO:
674 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_SUBMIT_IO\n");
675 		cuse_nvme_submit_io(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
676 		break;
677 
678 	case NVME_IOCTL_IO_CMD:
679 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_IO_CMD\n");
680 		cuse_nvme_passthru_cmd(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
681 		break;
682 
683 	case NVME_IOCTL_ID:
684 		SPDK_DEBUGLOG(nvme_cuse, "NVME_IOCTL_ID\n");
685 		cuse_getid(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
686 		break;
687 
688 	case BLKPBSZGET:
689 		SPDK_DEBUGLOG(nvme_cuse, "BLKPBSZGET\n");
690 		cuse_blkpbszget(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
691 		break;
692 
693 	case BLKSSZGET:
694 		SPDK_DEBUGLOG(nvme_cuse, "BLKSSZGET\n");
695 		cuse_blkgetsectorsize(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
696 		break;
697 
698 	case BLKGETSIZE:
699 		SPDK_DEBUGLOG(nvme_cuse, "BLKGETSIZE\n");
700 		/* Returns the device size as a number of 512-byte blocks (returns pointer to long) */
701 		cuse_blkgetsize(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
702 		break;
703 
704 	case BLKGETSIZE64:
705 		SPDK_DEBUGLOG(nvme_cuse, "BLKGETSIZE64\n");
706 		/* Returns the device size in sectors (returns pointer to uint64_t) */
707 		cuse_blkgetsize64(req, cmd, arg, fi, flags, in_buf, in_bufsz, out_bufsz);
708 		break;
709 
710 	default:
711 		SPDK_ERRLOG("Unsupported IOCTL 0x%X.\n", cmd);
712 		fuse_reply_err(req, ENOTTY);
713 	}
714 }
715 
716 /*****************************************************************************
717  * CUSE threads initialization.
718  */
719 
720 static void cuse_open(fuse_req_t req, struct fuse_file_info *fi)
721 {
722 	fuse_reply_open(req, fi);
723 }
724 
725 static const struct cuse_lowlevel_ops cuse_ctrlr_clop = {
726 	.open		= cuse_open,
727 	.ioctl		= cuse_ctrlr_ioctl,
728 };
729 
730 static const struct cuse_lowlevel_ops cuse_ns_clop = {
731 	.open		= cuse_open,
732 	.ioctl		= cuse_ns_ioctl,
733 };
734 
735 static int cuse_session_create(struct cuse_device *cuse_device)
736 {
737 	char *cuse_argv[] = { "cuse", "-f" };
738 	int multithreaded;
739 	int cuse_argc = SPDK_COUNTOF(cuse_argv);
740 	struct cuse_info ci;
741 	char devname_arg[128 + 8];
742 	const char *dev_info_argv[] = { devname_arg };
743 
744 	snprintf(devname_arg, sizeof(devname_arg), "DEVNAME=%s", cuse_device->dev_name);
745 
746 	memset(&ci, 0, sizeof(ci));
747 	ci.dev_info_argc = 1;
748 	ci.dev_info_argv = dev_info_argv;
749 	ci.flags = CUSE_UNRESTRICTED_IOCTL;
750 
751 	if (cuse_device->nsid) {
752 		cuse_device->session = cuse_lowlevel_setup(cuse_argc, cuse_argv, &ci, &cuse_ns_clop,
753 				       &multithreaded, cuse_device);
754 	} else {
755 		cuse_device->session = cuse_lowlevel_setup(cuse_argc, cuse_argv, &ci, &cuse_ctrlr_clop,
756 				       &multithreaded, cuse_device);
757 	}
758 
759 	if (!cuse_device->session) {
760 		SPDK_ERRLOG("Cannot create cuse session\n");
761 		return -1;
762 	}
763 	SPDK_NOTICELOG("fuse session for device %s created\n", cuse_device->dev_name);
764 	return 0;
765 }
766 
767 static void *
768 cuse_thread(void *arg)
769 {
770 	struct cuse_device *cuse_device = arg;
771 	int rc;
772 	struct fuse_buf buf = { .mem = NULL };
773 	struct pollfd fds;
774 	int timeout_msecs = 500;
775 
776 	spdk_unaffinitize_thread();
777 
778 	/* Receive and process fuse requests */
779 	fds.fd = fuse_session_fd(cuse_device->session);
780 	fds.events = POLLIN;
781 	while (!fuse_session_exited(cuse_device->session)) {
782 		rc = poll(&fds, 1, timeout_msecs);
783 		if (rc <= 0) {
784 			continue;
785 		}
786 		rc = fuse_session_receive_buf(cuse_device->session, &buf);
787 		if (rc > 0) {
788 			fuse_session_process_buf(cuse_device->session, &buf);
789 		}
790 	}
791 	free(buf.mem);
792 	fuse_session_reset(cuse_device->session);
793 	pthread_exit(NULL);
794 }
795 
796 static struct cuse_device *nvme_cuse_get_cuse_ns_device(struct spdk_nvme_ctrlr *ctrlr,
797 		uint32_t nsid);
798 
799 /*****************************************************************************
800  * CUSE devices management
801  */
802 
803 static int
804 cuse_nvme_ns_start(struct cuse_device *ctrlr_device, uint32_t nsid)
805 {
806 	struct cuse_device *ns_device;
807 	int rv;
808 
809 	ns_device = nvme_cuse_get_cuse_ns_device(ctrlr_device->ctrlr, nsid);
810 	if (ns_device != NULL) {
811 		return 0;
812 	}
813 
814 	ns_device = calloc(1, sizeof(struct cuse_device));
815 	if (ns_device == NULL) {
816 		return -ENOMEM;
817 	}
818 
819 	ns_device->ctrlr = ctrlr_device->ctrlr;
820 	ns_device->ctrlr_device = ctrlr_device;
821 	ns_device->nsid = nsid;
822 	rv = snprintf(ns_device->dev_name, sizeof(ns_device->dev_name), "%sn%d",
823 		      ctrlr_device->dev_name, ns_device->nsid);
824 	if (rv < 0) {
825 		SPDK_ERRLOG("Device name too long.\n");
826 		free(ns_device);
827 		return -ENAMETOOLONG;
828 	}
829 	rv = cuse_session_create(ns_device);
830 	if (rv != 0) {
831 		free(ns_device);
832 		return rv;
833 	}
834 	rv = pthread_create(&ns_device->tid, NULL, cuse_thread, ns_device);
835 	if (rv != 0) {
836 		SPDK_ERRLOG("pthread_create failed\n");
837 		free(ns_device);
838 		return -rv;
839 	}
840 	TAILQ_INSERT_TAIL(&ctrlr_device->ns_devices, ns_device, tailq);
841 
842 	return 0;
843 }
844 
845 static void
846 cuse_nvme_ns_stop(struct cuse_device *ctrlr_device, struct cuse_device *ns_device)
847 {
848 	if (ns_device->session != NULL) {
849 		fuse_session_exit(ns_device->session);
850 	}
851 	pthread_join(ns_device->tid, NULL);
852 	TAILQ_REMOVE(&ctrlr_device->ns_devices, ns_device, tailq);
853 	if (ns_device->session != NULL) {
854 		cuse_lowlevel_teardown(ns_device->session);
855 	}
856 	free(ns_device);
857 }
858 
859 static int
860 nvme_cuse_claim(struct cuse_device *ctrlr_device, uint32_t index)
861 {
862 	int dev_fd;
863 	int pid;
864 	void *dev_map;
865 	struct flock cusedev_lock = {
866 		.l_type = F_WRLCK,
867 		.l_whence = SEEK_SET,
868 		.l_start = 0,
869 		.l_len = 0,
870 	};
871 
872 	snprintf(ctrlr_device->lock_name, sizeof(ctrlr_device->lock_name),
873 		 "/var/tmp/spdk_nvme_cuse_lock_%" PRIu32, index);
874 
875 	dev_fd = open(ctrlr_device->lock_name, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
876 	if (dev_fd == -1) {
877 		SPDK_ERRLOG("could not open %s\n", ctrlr_device->lock_name);
878 		return -errno;
879 	}
880 
881 	if (ftruncate(dev_fd, sizeof(int)) != 0) {
882 		SPDK_ERRLOG("could not truncate %s\n", ctrlr_device->lock_name);
883 		close(dev_fd);
884 		return -errno;
885 	}
886 
887 	dev_map = mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE,
888 		       MAP_SHARED, dev_fd, 0);
889 	if (dev_map == MAP_FAILED) {
890 		SPDK_ERRLOG("could not mmap dev %s (%d)\n", ctrlr_device->lock_name, errno);
891 		close(dev_fd);
892 		return -errno;
893 	}
894 
895 	if (fcntl(dev_fd, F_SETLK, &cusedev_lock) != 0) {
896 		pid = *(int *)dev_map;
897 		SPDK_ERRLOG("Cannot create lock on device %s, probably"
898 			    " process %d has claimed it\n", ctrlr_device->lock_name, pid);
899 		munmap(dev_map, sizeof(int));
900 		close(dev_fd);
901 		/* F_SETLK returns unspecified errnos, normalize them */
902 		return -EACCES;
903 	}
904 
905 	*(int *)dev_map = (int)getpid();
906 	munmap(dev_map, sizeof(int));
907 	ctrlr_device->claim_fd = dev_fd;
908 	ctrlr_device->index = index;
909 	/* Keep dev_fd open to maintain the lock. */
910 	return 0;
911 }
912 
913 static void
914 nvme_cuse_unclaim(struct cuse_device *ctrlr_device)
915 {
916 	close(ctrlr_device->claim_fd);
917 	ctrlr_device->claim_fd = -1;
918 	unlink(ctrlr_device->lock_name);
919 }
920 
921 static void
922 cuse_nvme_ctrlr_stop(struct cuse_device *ctrlr_device)
923 {
924 	struct cuse_device *ns_device, *tmp;
925 
926 	TAILQ_FOREACH_SAFE(ns_device, &ctrlr_device->ns_devices, tailq, tmp) {
927 		cuse_nvme_ns_stop(ctrlr_device, ns_device);
928 	}
929 
930 	assert(TAILQ_EMPTY(&ctrlr_device->ns_devices));
931 
932 	fuse_session_exit(ctrlr_device->session);
933 	pthread_join(ctrlr_device->tid, NULL);
934 	TAILQ_REMOVE(&g_ctrlr_ctx_head, ctrlr_device, tailq);
935 	spdk_bit_array_clear(g_ctrlr_started, ctrlr_device->index);
936 	if (spdk_bit_array_count_set(g_ctrlr_started) == 0) {
937 		spdk_bit_array_free(&g_ctrlr_started);
938 	}
939 	nvme_cuse_unclaim(ctrlr_device);
940 	if (ctrlr_device->session != NULL) {
941 		cuse_lowlevel_teardown(ctrlr_device->session);
942 	}
943 	free(ctrlr_device);
944 }
945 
946 static int
947 cuse_nvme_ctrlr_update_namespaces(struct cuse_device *ctrlr_device)
948 {
949 	struct cuse_device *ns_device, *tmp;
950 	uint32_t nsid;
951 
952 	/* Remove namespaces that have disappeared */
953 	TAILQ_FOREACH_SAFE(ns_device, &ctrlr_device->ns_devices, tailq, tmp) {
954 		if (!spdk_nvme_ctrlr_is_active_ns(ctrlr_device->ctrlr, ns_device->nsid)) {
955 			cuse_nvme_ns_stop(ctrlr_device, ns_device);
956 		}
957 	}
958 
959 	/* Add new namespaces */
960 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr_device->ctrlr);
961 	while (nsid != 0) {
962 		if (cuse_nvme_ns_start(ctrlr_device, nsid) < 0) {
963 			SPDK_ERRLOG("Cannot start CUSE namespace device.");
964 			return -1;
965 		}
966 
967 		nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr_device->ctrlr, nsid);
968 	}
969 
970 	return 0;
971 }
972 
973 static int
974 nvme_cuse_start(struct spdk_nvme_ctrlr *ctrlr)
975 {
976 	int rv = 0;
977 	struct cuse_device *ctrlr_device;
978 
979 	SPDK_NOTICELOG("Creating cuse device for controller\n");
980 
981 	if (g_ctrlr_started == NULL) {
982 		g_ctrlr_started = spdk_bit_array_create(128);
983 		if (g_ctrlr_started == NULL) {
984 			SPDK_ERRLOG("Cannot create bit array\n");
985 			return -ENOMEM;
986 		}
987 	}
988 
989 	ctrlr_device = (struct cuse_device *)calloc(1, sizeof(struct cuse_device));
990 	if (!ctrlr_device) {
991 		SPDK_ERRLOG("Cannot allocate memory for ctrlr_device.");
992 		rv = -ENOMEM;
993 		goto free_device;
994 	}
995 
996 	ctrlr_device->ctrlr = ctrlr;
997 
998 	/* Check if device already exists, if not increment index until success */
999 	ctrlr_device->index = 0;
1000 	while (1) {
1001 		ctrlr_device->index = spdk_bit_array_find_first_clear(g_ctrlr_started, ctrlr_device->index);
1002 		if (ctrlr_device->index == UINT32_MAX) {
1003 			SPDK_ERRLOG("Too many registered controllers\n");
1004 			goto free_device;
1005 		}
1006 
1007 		if (nvme_cuse_claim(ctrlr_device, ctrlr_device->index) == 0) {
1008 			break;
1009 		}
1010 		ctrlr_device->index++;
1011 	}
1012 	spdk_bit_array_set(g_ctrlr_started, ctrlr_device->index);
1013 	snprintf(ctrlr_device->dev_name, sizeof(ctrlr_device->dev_name), "spdk/nvme%d",
1014 		 ctrlr_device->index);
1015 
1016 	rv = cuse_session_create(ctrlr_device);
1017 	if (rv != 0) {
1018 		goto clear_and_free;
1019 	}
1020 
1021 	rv = pthread_create(&ctrlr_device->tid, NULL, cuse_thread, ctrlr_device);
1022 	if (rv != 0) {
1023 		SPDK_ERRLOG("pthread_create failed\n");
1024 		rv = -rv;
1025 		goto clear_and_free;
1026 	}
1027 
1028 	TAILQ_INSERT_TAIL(&g_ctrlr_ctx_head, ctrlr_device, tailq);
1029 
1030 	TAILQ_INIT(&ctrlr_device->ns_devices);
1031 
1032 	/* Start all active namespaces */
1033 	if (cuse_nvme_ctrlr_update_namespaces(ctrlr_device) < 0) {
1034 		SPDK_ERRLOG("Cannot start CUSE namespace devices.");
1035 		cuse_nvme_ctrlr_stop(ctrlr_device);
1036 		rv = -1;
1037 		goto clear_and_free;
1038 	}
1039 
1040 	return 0;
1041 
1042 clear_and_free:
1043 	spdk_bit_array_clear(g_ctrlr_started, ctrlr_device->index);
1044 free_device:
1045 	free(ctrlr_device);
1046 	if (spdk_bit_array_count_set(g_ctrlr_started) == 0) {
1047 		spdk_bit_array_free(&g_ctrlr_started);
1048 	}
1049 	return rv;
1050 }
1051 
1052 static struct cuse_device *
1053 nvme_cuse_get_cuse_ctrlr_device(struct spdk_nvme_ctrlr *ctrlr)
1054 {
1055 	struct cuse_device *ctrlr_device = NULL;
1056 
1057 	TAILQ_FOREACH(ctrlr_device, &g_ctrlr_ctx_head, tailq) {
1058 		if (ctrlr_device->ctrlr == ctrlr) {
1059 			break;
1060 		}
1061 	}
1062 
1063 	return ctrlr_device;
1064 }
1065 
1066 static struct cuse_device *
1067 nvme_cuse_get_cuse_ns_device(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
1068 {
1069 	struct cuse_device *ctrlr_device = NULL;
1070 	struct cuse_device *ns_device;
1071 
1072 	ctrlr_device = nvme_cuse_get_cuse_ctrlr_device(ctrlr);
1073 	if (!ctrlr_device) {
1074 		return NULL;
1075 	}
1076 
1077 	TAILQ_FOREACH(ns_device, &ctrlr_device->ns_devices, tailq) {
1078 		if (ns_device->nsid == nsid) {
1079 			return ns_device;
1080 		}
1081 	}
1082 
1083 	return NULL;
1084 }
1085 
1086 static void
1087 nvme_cuse_stop(struct spdk_nvme_ctrlr *ctrlr)
1088 {
1089 	struct cuse_device *ctrlr_device;
1090 
1091 	pthread_mutex_lock(&g_cuse_mtx);
1092 
1093 	ctrlr_device = nvme_cuse_get_cuse_ctrlr_device(ctrlr);
1094 	if (!ctrlr_device) {
1095 		SPDK_ERRLOG("Cannot find associated CUSE device\n");
1096 		pthread_mutex_unlock(&g_cuse_mtx);
1097 		return;
1098 	}
1099 
1100 	cuse_nvme_ctrlr_stop(ctrlr_device);
1101 
1102 	pthread_mutex_unlock(&g_cuse_mtx);
1103 }
1104 
1105 static void
1106 nvme_cuse_update(struct spdk_nvme_ctrlr *ctrlr)
1107 {
1108 	struct cuse_device *ctrlr_device;
1109 
1110 	pthread_mutex_lock(&g_cuse_mtx);
1111 
1112 	ctrlr_device = nvme_cuse_get_cuse_ctrlr_device(ctrlr);
1113 	if (!ctrlr_device) {
1114 		pthread_mutex_unlock(&g_cuse_mtx);
1115 		return;
1116 	}
1117 
1118 	cuse_nvme_ctrlr_update_namespaces(ctrlr_device);
1119 
1120 	pthread_mutex_unlock(&g_cuse_mtx);
1121 }
1122 
1123 static struct nvme_io_msg_producer cuse_nvme_io_msg_producer = {
1124 	.name = "cuse",
1125 	.stop = nvme_cuse_stop,
1126 	.update = nvme_cuse_update,
1127 };
1128 
1129 int
1130 spdk_nvme_cuse_register(struct spdk_nvme_ctrlr *ctrlr)
1131 {
1132 	int rc;
1133 
1134 	rc = nvme_io_msg_ctrlr_register(ctrlr, &cuse_nvme_io_msg_producer);
1135 	if (rc) {
1136 		return rc;
1137 	}
1138 
1139 	pthread_mutex_lock(&g_cuse_mtx);
1140 
1141 	rc = nvme_cuse_start(ctrlr);
1142 	if (rc) {
1143 		nvme_io_msg_ctrlr_unregister(ctrlr, &cuse_nvme_io_msg_producer);
1144 	}
1145 
1146 	pthread_mutex_unlock(&g_cuse_mtx);
1147 
1148 	return rc;
1149 }
1150 
1151 int
1152 spdk_nvme_cuse_unregister(struct spdk_nvme_ctrlr *ctrlr)
1153 {
1154 	struct cuse_device *ctrlr_device;
1155 
1156 	pthread_mutex_lock(&g_cuse_mtx);
1157 
1158 	ctrlr_device = nvme_cuse_get_cuse_ctrlr_device(ctrlr);
1159 	if (!ctrlr_device) {
1160 		SPDK_ERRLOG("Cannot find associated CUSE device\n");
1161 		pthread_mutex_unlock(&g_cuse_mtx);
1162 		return -ENODEV;
1163 	}
1164 
1165 	cuse_nvme_ctrlr_stop(ctrlr_device);
1166 
1167 	pthread_mutex_unlock(&g_cuse_mtx);
1168 
1169 	nvme_io_msg_ctrlr_unregister(ctrlr, &cuse_nvme_io_msg_producer);
1170 
1171 	return 0;
1172 }
1173 
1174 void
1175 spdk_nvme_cuse_update_namespaces(struct spdk_nvme_ctrlr *ctrlr)
1176 {
1177 	nvme_cuse_update(ctrlr);
1178 }
1179 
1180 int
1181 spdk_nvme_cuse_get_ctrlr_name(struct spdk_nvme_ctrlr *ctrlr, char *name, size_t *size)
1182 {
1183 	struct cuse_device *ctrlr_device;
1184 	size_t req_len;
1185 
1186 	pthread_mutex_lock(&g_cuse_mtx);
1187 
1188 	ctrlr_device = nvme_cuse_get_cuse_ctrlr_device(ctrlr);
1189 	if (!ctrlr_device) {
1190 		pthread_mutex_unlock(&g_cuse_mtx);
1191 		return -ENODEV;
1192 	}
1193 
1194 	req_len = strnlen(ctrlr_device->dev_name, sizeof(ctrlr_device->dev_name));
1195 	if (*size < req_len) {
1196 		*size = req_len;
1197 		pthread_mutex_unlock(&g_cuse_mtx);
1198 		return -ENOSPC;
1199 	}
1200 	snprintf(name, req_len + 1, "%s", ctrlr_device->dev_name);
1201 
1202 	pthread_mutex_unlock(&g_cuse_mtx);
1203 
1204 	return 0;
1205 }
1206 
1207 int
1208 spdk_nvme_cuse_get_ns_name(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, char *name, size_t *size)
1209 {
1210 	struct cuse_device *ns_device;
1211 	size_t req_len;
1212 
1213 	pthread_mutex_lock(&g_cuse_mtx);
1214 
1215 	ns_device = nvme_cuse_get_cuse_ns_device(ctrlr, nsid);
1216 	if (!ns_device) {
1217 		pthread_mutex_unlock(&g_cuse_mtx);
1218 		return -ENODEV;
1219 	}
1220 
1221 	req_len = strnlen(ns_device->dev_name, sizeof(ns_device->dev_name));
1222 	if (*size < req_len) {
1223 		*size = req_len;
1224 		pthread_mutex_unlock(&g_cuse_mtx);
1225 		return -ENOSPC;
1226 	}
1227 	snprintf(name, req_len + 1, "%s", ns_device->dev_name);
1228 
1229 	pthread_mutex_unlock(&g_cuse_mtx);
1230 
1231 	return 0;
1232 }
1233 
1234 SPDK_LOG_REGISTER_COMPONENT(nvme_cuse)
1235