xref: /spdk/test/unit/lib/nvme/nvme_cuse.c/nvme_cuse_ut.c (revision 75a12cbf919f9587b358cb1dd70e812f2c93a261)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_internal/cunit.h"
8 #include "nvme/nvme_cuse.c"
9 #include "common/lib/nvme/common_stubs.h"
10 
11 SPDK_LOG_REGISTER_COMPONENT(nvme)
12 
13 DEFINE_STUB(spdk_nvme_ctrlr_cmd_admin_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
14 		struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
15 		spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
16 
17 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
18 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, void *md_buf,
19 		spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
20 
21 DEFINE_STUB(spdk_nvme_ctrlr_reset, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
22 
23 DEFINE_STUB(spdk_nvme_ctrlr_reset_subsystem, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
24 
25 DEFINE_STUB(spdk_nvme_ns_cmd_read_with_md, int, (struct spdk_nvme_ns *ns,
26 		struct spdk_nvme_qpair *qpair,
27 		void *payload, void *metadata,
28 		uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
29 		uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
30 
31 DEFINE_STUB(spdk_nvme_ns_cmd_write_with_md, int, (struct spdk_nvme_ns *ns,
32 		struct spdk_nvme_qpair *qpair,
33 		void *payload, void *metadata,
34 		uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
35 		uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
36 
37 DEFINE_STUB(spdk_nvme_ns_get_num_sectors, uint64_t,
38 	    (struct spdk_nvme_ns *ns), 0);
39 
40 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
41 
42 DEFINE_STUB_V(spdk_unaffinitize_thread, (void));
43 
44 DEFINE_STUB(nvme_io_msg_ctrlr_register, int,
45 	    (struct spdk_nvme_ctrlr *ctrlr,
46 	     struct nvme_io_msg_producer *io_msg_producer), 0);
47 
48 DEFINE_STUB_V(nvme_io_msg_ctrlr_unregister,
49 	      (struct spdk_nvme_ctrlr *ctrlr,
50 	       struct nvme_io_msg_producer *io_msg_producer));
51 
52 DEFINE_STUB(spdk_nvme_ctrlr_is_active_ns, bool,
53 	    (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid), true);
54 
55 DEFINE_STUB(fuse_reply_err, int, (fuse_req_t req, int err), 0);
56 DEFINE_STUB(pthread_join, int, (pthread_t tid, void **val), 0);
57 
58 DEFINE_STUB_V(nvme_ctrlr_update_namespaces, (struct spdk_nvme_ctrlr *ctrlr));
59 
60 DEFINE_STUB_V(fuse_session_reset, (struct fuse_session *session));
61 
62 struct fuse_session {
63 	int exited;
64 	int fd;
65 };
66 
67 int
68 fuse_session_fd(struct fuse_session *session)
69 {
70 	return session->fd;
71 }
72 
73 void
74 fuse_session_exit(struct fuse_session *session)
75 {
76 	session->exited = 1;
77 }
78 
79 int
80 fuse_session_exited(struct fuse_session *session)
81 {
82 	return session->exited;
83 }
84 
85 struct fuse_session *
86 cuse_lowlevel_setup(int argc, char *argv[], const struct cuse_info *ci,
87 		    const struct cuse_lowlevel_ops *clop, int *multithreaded, void *userdata)
88 {
89 	struct fuse_session *fuse_session = calloc(1, sizeof(struct fuse_session));
90 	if (fuse_session == NULL) {
91 		return NULL;
92 	}
93 	fuse_session->fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
94 	if (fuse_session->fd < 0) {
95 		free(fuse_session);
96 		return NULL;
97 	}
98 	return fuse_session;
99 }
100 
101 void
102 cuse_lowlevel_teardown(struct fuse_session	*fuse_session)
103 {
104 	close(fuse_session->fd);
105 	free(fuse_session);
106 }
107 
108 static int
109 nvme_ns_cmp(struct spdk_nvme_ns *ns1, struct spdk_nvme_ns *ns2)
110 {
111 	return ns1->id - ns2->id;
112 }
113 
114 RB_GENERATE_STATIC(nvme_ns_tree, spdk_nvme_ns, node, nvme_ns_cmp);
115 
116 struct cuse_io_ctx *g_ut_ctx;
117 struct spdk_nvme_ctrlr *g_ut_ctrlr;
118 uint32_t g_ut_nsid;
119 
120 uint32_t
121 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
122 {
123 	return ctrlr->cdata.nn;
124 }
125 
126 uint32_t
127 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
128 {
129 	return 1;
130 }
131 
132 uint32_t
133 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
134 {
135 	if (nsid > ctrlr->cdata.nn) {
136 		return 0;
137 	}
138 
139 	return nsid + 1;
140 }
141 
142 DEFINE_RETURN_MOCK(nvme_io_msg_send, int);
143 int
144 nvme_io_msg_send(struct spdk_nvme_ctrlr *ctrlr,
145 		 uint32_t nsid, spdk_nvme_io_msg_fn fn, void *arg)
146 {
147 	g_ut_ctx = arg;
148 	g_ut_nsid = nsid;
149 	g_ut_ctrlr = ctrlr;
150 
151 	HANDLE_RETURN_MOCK(nvme_io_msg_send);
152 	return 0;
153 }
154 
155 uint32_t
156 spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns)
157 {
158 	return ns->sector_size;
159 }
160 
161 static struct spdk_nvme_ns g_inactive_ns = {};
162 
163 struct spdk_nvme_ns *
164 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
165 {
166 	struct spdk_nvme_ns tmp;
167 	struct spdk_nvme_ns *ns;
168 
169 	if (nsid < 1 || nsid > ctrlr->cdata.nn) {
170 		return NULL;
171 	}
172 
173 	tmp.id = nsid;
174 	ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
175 
176 	if (ns == NULL) {
177 		return &g_inactive_ns;
178 	}
179 
180 	return ns;
181 }
182 
183 struct cuse_device *g_cuse_device;
184 DEFINE_RETURN_MOCK(fuse_req_userdata, void *);
185 void *
186 fuse_req_userdata(fuse_req_t req)
187 {
188 	return g_cuse_device;
189 }
190 
191 static void
192 test_cuse_nvme_submit_io_read_write(void)
193 {
194 	struct cuse_device cuse_device = {};
195 	struct fuse_file_info fi = {};
196 	struct nvme_user_io *user_io = NULL;
197 	char arg[1024] = {};
198 	fuse_req_t req = (void *)0xDEEACDFF;
199 	unsigned flags = FUSE_IOCTL_DIR;
200 	uint32_t block_size = 4096;
201 	uint32_t md_size = 0;
202 	size_t in_bufsz = 4096;
203 	size_t out_bufsz = 4096;
204 
205 	/* Allocate memory to avoid stack buffer overflow */
206 	user_io = calloc(3, 4096);
207 	SPDK_CU_ASSERT_FATAL(user_io != NULL);
208 	cuse_device.ctrlr = (void *)0xDEADBEEF;
209 	cuse_device.nsid = 1;
210 	user_io->slba = 1024;
211 	user_io->nblocks = 1;
212 	g_ut_ctx = NULL;
213 
214 	/* Submit IO read */
215 	cuse_nvme_submit_io_read(&cuse_device, req, 0, arg, &fi, flags,
216 				 block_size, md_size, user_io, in_bufsz, out_bufsz);
217 	CU_ASSERT(g_ut_ctx != NULL);
218 	CU_ASSERT(g_ut_ctx->req == req);
219 	CU_ASSERT(g_ut_ctx->lba == user_io->slba);
220 	CU_ASSERT(g_ut_ctx->lba_count == (uint32_t)(user_io->nblocks + 1));
221 	CU_ASSERT(g_ut_ctx->data_len ==
222 		  (int)((user_io->nblocks + 1) * block_size));
223 	CU_ASSERT(g_ut_ctx->data != NULL);
224 	CU_ASSERT(g_ut_ctx->metadata_len == 0);
225 	CU_ASSERT(g_ut_ctx->metadata == NULL);
226 	CU_ASSERT(g_ut_ctx->appmask == 0);
227 	CU_ASSERT(g_ut_ctx->apptag == 0);
228 	cuse_io_ctx_free(g_ut_ctx);
229 
230 	/* Submit IO write */
231 	g_ut_ctx = NULL;
232 
233 	cuse_nvme_submit_io_write(&cuse_device, req, 0, arg, &fi, flags,
234 				  block_size, md_size, user_io, in_bufsz, out_bufsz);
235 	CU_ASSERT(g_ut_ctx != NULL);
236 	CU_ASSERT(g_ut_ctx->req == req);
237 	CU_ASSERT(g_ut_ctx->lba == user_io->slba);
238 	CU_ASSERT(g_ut_ctx->lba_count == (uint32_t)(user_io->nblocks + 1));
239 	CU_ASSERT(g_ut_ctx->data_len ==
240 		  (int)((user_io->nblocks + 1) * block_size));
241 	CU_ASSERT(g_ut_ctx->data != NULL);
242 	CU_ASSERT(g_ut_ctx->metadata_len == 0);
243 	CU_ASSERT(g_ut_ctx->metadata == NULL);
244 	CU_ASSERT(g_ut_ctx->appmask == 0);
245 	CU_ASSERT(g_ut_ctx->apptag == 0);
246 	cuse_io_ctx_free(g_ut_ctx);
247 	free(user_io);
248 }
249 
250 static void
251 test_cuse_nvme_submit_io_read_write_with_md(void)
252 {
253 	struct cuse_device cuse_device = {};
254 	struct fuse_file_info fi = {};
255 	struct nvme_user_io *user_io = NULL;
256 	char arg[1024] = {};
257 	fuse_req_t req = (void *)0xDEEACDFF;
258 	unsigned flags = FUSE_IOCTL_DIR;
259 	uint32_t block_size = 4096;
260 	uint32_t md_size = 8;
261 	size_t in_bufsz = 4096;
262 	size_t out_bufsz = 4096;
263 
264 	/* Allocate memory to avoid stack buffer overflow */
265 	user_io = calloc(4, 4096);
266 	SPDK_CU_ASSERT_FATAL(user_io != NULL);
267 	cuse_device.ctrlr = (void *)0xDEADBEEF;
268 	cuse_device.nsid = 1;
269 	user_io->slba = 1024;
270 	user_io->nblocks = 1;
271 	user_io->appmask = 0xF00D;
272 	user_io->apptag = 0xC0DE;
273 	user_io->metadata = 0xDEADDEAD;
274 	g_ut_ctx = NULL;
275 
276 	/* Submit IO read */
277 	cuse_nvme_submit_io_read(&cuse_device, req, 0, arg, &fi, flags,
278 				 block_size, md_size, user_io, in_bufsz, out_bufsz);
279 	CU_ASSERT(g_ut_ctx != NULL);
280 	CU_ASSERT(g_ut_ctx->req == req);
281 	CU_ASSERT(g_ut_ctx->lba == user_io->slba);
282 	CU_ASSERT(g_ut_ctx->lba_count == (uint32_t)(user_io->nblocks + 1));
283 	CU_ASSERT(g_ut_ctx->data_len ==
284 		  (int)((user_io->nblocks + 1) * block_size));
285 	CU_ASSERT(g_ut_ctx->data != NULL);
286 	CU_ASSERT(g_ut_ctx->metadata_len ==
287 		  (int)((user_io->nblocks + 1) * md_size));
288 	CU_ASSERT(g_ut_ctx->metadata != NULL);
289 	CU_ASSERT(g_ut_ctx->appmask == 0xF00D);
290 	CU_ASSERT(g_ut_ctx->apptag == 0xC0DE);
291 	cuse_io_ctx_free(g_ut_ctx);
292 
293 	/* Submit IO write */
294 	g_ut_ctx = NULL;
295 
296 	cuse_nvme_submit_io_write(&cuse_device, req, 0, arg, &fi, flags,
297 				  block_size, md_size, user_io, in_bufsz, out_bufsz);
298 	CU_ASSERT(g_ut_ctx != NULL);
299 	CU_ASSERT(g_ut_ctx->req == req);
300 	CU_ASSERT(g_ut_ctx->lba == user_io->slba);
301 	CU_ASSERT(g_ut_ctx->lba_count == (uint32_t)(user_io->nblocks + 1));
302 	CU_ASSERT(g_ut_ctx->data_len ==
303 		  (int)((user_io->nblocks + 1) * block_size));
304 	CU_ASSERT(g_ut_ctx->data != NULL);
305 	CU_ASSERT(g_ut_ctx->metadata_len ==
306 		  (int)((user_io->nblocks + 1) * md_size));
307 	CU_ASSERT(g_ut_ctx->metadata != NULL);
308 	CU_ASSERT(g_ut_ctx->appmask == 0xF00D);
309 	CU_ASSERT(g_ut_ctx->apptag == 0xC0DE);
310 	cuse_io_ctx_free(g_ut_ctx);
311 	free(user_io);
312 }
313 
314 static void
315 test_cuse_nvme_submit_passthru_cmd(void)
316 {
317 	struct nvme_passthru_cmd *passthru_cmd = NULL;
318 	fuse_req_t req = (void *)0xDEEACDFF;
319 
320 	passthru_cmd = calloc(1, sizeof(struct nvme_passthru_cmd));
321 	g_cuse_device = calloc(1, sizeof(struct cuse_device));
322 
323 	/* Use fatal or we'll segfault if we didn't get memory */
324 	SPDK_CU_ASSERT_FATAL(passthru_cmd != NULL);
325 	SPDK_CU_ASSERT_FATAL(g_cuse_device != NULL);
326 	g_cuse_device->ctrlr = (void *)0xDEADBEEF;
327 
328 	g_ut_ctx = NULL;
329 	/* Passthrough command */
330 	passthru_cmd->opcode       = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
331 	passthru_cmd->nsid         = 1;
332 	passthru_cmd->data_len     = 512;
333 	passthru_cmd->metadata_len = 0;
334 	passthru_cmd->cdw10        = 0xc0de1010;
335 	passthru_cmd->cdw11        = 0xc0de1111;
336 	passthru_cmd->cdw12        = 0xc0de1212;
337 	passthru_cmd->cdw13        = 0xc0de1313;
338 	passthru_cmd->cdw14        = 0xc0de1414;
339 	passthru_cmd->cdw15        = 0xc0de1515;
340 
341 	/* Send IO Command IOCTL */
342 	cuse_nvme_passthru_cmd_send(req, passthru_cmd, NULL, NULL, NVME_IOCTL_IO_CMD);
343 	SPDK_CU_ASSERT_FATAL(g_ut_ctx != NULL);
344 	CU_ASSERT(g_ut_ctx->data != NULL);
345 	CU_ASSERT(g_ut_ctx->metadata == NULL);
346 	CU_ASSERT(g_ut_ctx->req               == req);
347 	CU_ASSERT(g_ut_ctx->data_len          == 512);
348 	CU_ASSERT(g_ut_ctx->metadata_len      == 0);
349 	CU_ASSERT(g_ut_ctx->nvme_cmd.opc      == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
350 	CU_ASSERT(g_ut_ctx->nvme_cmd.nsid     == 1);
351 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw10    == 0xc0de1010);
352 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw11    == 0xc0de1111);
353 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw12    == 0xc0de1212);
354 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw13    == 0xc0de1313);
355 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw14    == 0xc0de1414);
356 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw15    == 0xc0de1515);
357 
358 	cuse_io_ctx_free(g_ut_ctx);
359 	free(passthru_cmd);
360 	free(g_cuse_device);
361 }
362 
363 static void
364 test_cuse_nvme_submit_passthru_cmd_with_md(void)
365 {
366 	struct nvme_passthru_cmd *passthru_cmd = NULL;
367 	fuse_req_t req = (void *)0xDEEACDFF;
368 
369 	passthru_cmd = calloc(1, sizeof(struct nvme_passthru_cmd));
370 	g_cuse_device = calloc(1, sizeof(struct cuse_device));
371 
372 	/* Use fatal or we'll segfault if we didn't get memory */
373 	SPDK_CU_ASSERT_FATAL(passthru_cmd != NULL);
374 	SPDK_CU_ASSERT_FATAL(g_cuse_device != NULL);
375 	g_cuse_device->ctrlr = (void *)0xDEADBEEF;
376 
377 	g_ut_ctx = NULL;
378 	/* Passthrough command */
379 	passthru_cmd->opcode       = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
380 	passthru_cmd->nsid         = 1;
381 	passthru_cmd->data_len     = 512;
382 	passthru_cmd->metadata_len = 8;
383 	passthru_cmd->cdw10        = 0xc0de1010;
384 	passthru_cmd->cdw11        = 0xc0de1111;
385 	passthru_cmd->cdw12        = 0xc0de1212;
386 	passthru_cmd->cdw13        = 0xc0de1313;
387 	passthru_cmd->cdw14        = 0xc0de1414;
388 	passthru_cmd->cdw15        = 0xc0de1515;
389 
390 	/* Send IO Command IOCTL */
391 	cuse_nvme_passthru_cmd_send(req, passthru_cmd, NULL, NULL, NVME_IOCTL_IO_CMD);
392 	SPDK_CU_ASSERT_FATAL(g_ut_ctx != NULL);
393 	CU_ASSERT(g_ut_ctx->data != NULL);
394 	CU_ASSERT(g_ut_ctx->metadata != NULL);
395 	CU_ASSERT(g_ut_ctx->req               == req);
396 	CU_ASSERT(g_ut_ctx->data_len          == 512);
397 	CU_ASSERT(g_ut_ctx->metadata_len      == 8);
398 	CU_ASSERT(g_ut_ctx->nvme_cmd.opc      == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
399 	CU_ASSERT(g_ut_ctx->nvme_cmd.nsid     == 1);
400 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw10    == 0xc0de1010);
401 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw11    == 0xc0de1111);
402 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw12    == 0xc0de1212);
403 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw13    == 0xc0de1313);
404 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw14    == 0xc0de1414);
405 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw15    == 0xc0de1515);
406 
407 	cuse_io_ctx_free(g_ut_ctx);
408 	free(passthru_cmd);
409 	free(g_cuse_device);
410 }
411 
412 static void
413 test_nvme_cuse_get_cuse_ns_device(void)
414 {
415 	struct spdk_nvme_ctrlr ctrlr = {};
416 	struct cuse_device ctrlr_device = {};
417 	struct cuse_device ns_device = { .nsid = 1 };
418 	struct cuse_device *cuse_dev = NULL;
419 
420 	ctrlr.cdata.nn = 3;
421 	ctrlr_device.ctrlr = &ctrlr;
422 	TAILQ_INIT(&ctrlr_device.ns_devices);
423 	TAILQ_INSERT_TAIL(&ctrlr_device.ns_devices, &ns_device, tailq);
424 
425 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_ctrlr_ctx_head));
426 	TAILQ_INSERT_TAIL(&g_ctrlr_ctx_head, &ctrlr_device, tailq);
427 
428 	cuse_dev = nvme_cuse_get_cuse_ns_device(&ctrlr, 1);
429 	CU_ASSERT(cuse_dev == &ns_device);
430 
431 	/* nsid 2 was not started */
432 	cuse_dev = nvme_cuse_get_cuse_ns_device(&ctrlr, 2);
433 	CU_ASSERT(cuse_dev == NULL);
434 
435 	/* nsid invalid */
436 	cuse_dev = nvme_cuse_get_cuse_ns_device(&ctrlr, 0);
437 	CU_ASSERT(cuse_dev == NULL);
438 
439 	TAILQ_REMOVE(&g_ctrlr_ctx_head, &ctrlr_device, tailq);
440 }
441 
442 static void
443 test_cuse_nvme_submit_io(void)
444 {
445 	struct cuse_device cuse_device = {};
446 	struct spdk_nvme_ctrlr ctrlr = {};
447 	struct fuse_file_info fi = {};
448 	struct spdk_nvme_ns ns = {};
449 	struct nvme_user_io *user_io = NULL;
450 	char arg[1024] = {};
451 	fuse_req_t req = (void *)0xDEEACDFF;
452 
453 	/* Allocate memory to avoid stack buffer overflow */
454 	user_io = calloc(3, 4096);
455 	SPDK_CU_ASSERT_FATAL(user_io != NULL);
456 
457 	RB_INIT(&ctrlr.ns);
458 	ns.id = 1;
459 	RB_INSERT(nvme_ns_tree, &ctrlr.ns, &ns);
460 
461 	cuse_device.ctrlr = &ctrlr;
462 	ctrlr.cdata.nn = 1;
463 	ns.sector_size = 4096;
464 	ns.id = 1;
465 	user_io->slba = 1024;
466 	user_io->nblocks = 1;
467 	cuse_device.nsid = 1;
468 	g_cuse_device = &cuse_device;
469 
470 	/* Read */
471 	user_io->opcode = SPDK_NVME_OPC_READ;
472 	g_ut_ctx = NULL;
473 
474 	cuse_nvme_submit_io(req, 0, arg, &fi, FUSE_IOCTL_DIR, user_io, 4096, 4096);
475 	SPDK_CU_ASSERT_FATAL(g_ut_ctx != NULL);
476 	CU_ASSERT(g_ut_nsid == 1);
477 	CU_ASSERT(g_ut_ctx->req == (void *)0xDEEACDFF);
478 	CU_ASSERT(g_ut_ctx->lba == 1024);
479 	CU_ASSERT(g_ut_ctx->lba_count == 2);
480 	CU_ASSERT(g_ut_ctx->data_len == 2 * 4096);
481 	CU_ASSERT(g_ut_ctx->data != NULL);
482 	CU_ASSERT(g_ut_ctx->metadata_len == 0);
483 	CU_ASSERT(g_ut_ctx->metadata == NULL);
484 	CU_ASSERT(g_ut_ctx->appmask == 0);
485 	CU_ASSERT(g_ut_ctx->apptag == 0);
486 
487 	cuse_io_ctx_free(g_ut_ctx);
488 
489 	/* Write */
490 	user_io->opcode = SPDK_NVME_OPC_WRITE;
491 	g_ut_ctx = NULL;
492 
493 	cuse_nvme_submit_io(req, 0, arg, &fi, FUSE_IOCTL_DIR, user_io, 4096, 4096);
494 	SPDK_CU_ASSERT_FATAL(g_ut_ctx != NULL);
495 	CU_ASSERT(g_ut_nsid == 1);
496 	CU_ASSERT(g_ut_ctx->req == req);
497 	CU_ASSERT(g_ut_ctx->lba == 1024);
498 	CU_ASSERT(g_ut_ctx->lba_count == 2);
499 	CU_ASSERT(g_ut_ctx->data_len == 2 * 4096);
500 	CU_ASSERT(g_ut_ctx->data != NULL);
501 	CU_ASSERT(g_ut_ctx->metadata_len == 0);
502 	CU_ASSERT(g_ut_ctx->metadata == NULL);
503 	CU_ASSERT(g_ut_ctx->appmask == 0);
504 	CU_ASSERT(g_ut_ctx->apptag == 0);
505 	cuse_io_ctx_free(g_ut_ctx);
506 
507 	/* Invalid */
508 	g_ut_ctx = NULL;
509 	user_io->opcode = SPDK_NVME_OPC_FLUSH;
510 
511 	cuse_nvme_submit_io(req, 0, arg, &fi, FUSE_IOCTL_DIR, user_io, 4096, 4096);
512 	SPDK_CU_ASSERT_FATAL(g_ut_ctx == NULL);
513 
514 	free(user_io);
515 }
516 
517 static void
518 test_cuse_nvme_reset(void)
519 {
520 	struct cuse_device cuse_device = {};
521 	struct spdk_nvme_ctrlr ctrlr = {};
522 	fuse_req_t req = (void *)0xDEADBEEF;
523 
524 	cuse_device.ctrlr = &ctrlr;
525 	g_cuse_device = &cuse_device;
526 
527 	/* Invalid nsid  */
528 	cuse_device.nsid = 1;
529 	g_ut_ctx = NULL;
530 
531 	cuse_nvme_reset(req, 0, NULL, NULL, 0, NULL, 4096, 4096);
532 	CU_ASSERT(g_ut_ctx == NULL);
533 
534 	/* Valid nsid, check IO message sent value */
535 	cuse_device.nsid = 0;
536 
537 	cuse_nvme_reset(req, 0, NULL, NULL, 0, NULL, 4096, 4096);
538 	CU_ASSERT(g_ut_ctx == (void *)0xDEADBEEF);
539 	CU_ASSERT(g_ut_ctrlr == &ctrlr);
540 	CU_ASSERT(g_ut_nsid == 0);
541 }
542 
543 static void
544 test_nvme_cuse_stop(void)
545 {
546 	int rc;
547 	struct spdk_nvme_ctrlr ctrlr = {};
548 	ctrlr.cdata.nn = 2;
549 
550 	/* Allocate memory for nvme_cuse_stop() to free. */
551 	rc = spdk_nvme_cuse_register(&ctrlr);
552 	CU_ASSERT(rc == 0);
553 
554 	nvme_cuse_stop(&ctrlr);
555 	CU_ASSERT(g_ctrlr_started == NULL);
556 	CU_ASSERT(TAILQ_EMPTY(&g_ctrlr_ctx_head));
557 	while (g_device_fdgrp != NULL) {
558 		sched_yield();
559 	}
560 }
561 
562 static void
563 test_spdk_nvme_cuse_get_ctrlr_name(void)
564 {
565 	int rc_ctrlr = 0;
566 	int rc_ns = 0;
567 	uint32_t nsid = 0;
568 	const uint32_t NSID1 = 12;
569 	const uint32_t NSID2 = 22;
570 	size_t name_size = 0;
571 
572 	char name_ctrlr[128] = "unit_test_ctrlr_dev_name";
573 	char name_ns_1[128] = "unit_test_ns_dev_name_1";
574 	char name_ns_2[128] = "unit_test_ns_dev_name_2";
575 
576 	char rt_name_ctrlr[128];
577 	char rt_name_ns[128];
578 
579 	struct spdk_nvme_ctrlr ctrlr = {};
580 	struct cuse_device ctrlr_device = {};
581 	struct cuse_device ns_dev1 = {};
582 	struct cuse_device ns_dev2 = {};
583 
584 	ctrlr_device.ctrlr = &ctrlr;
585 	memcpy(ctrlr_device.dev_name, name_ctrlr, sizeof(ctrlr_device.dev_name));
586 
587 	TAILQ_INIT(&ctrlr_device.ns_devices);
588 	ns_dev1.nsid = NSID1;
589 	ns_dev2.nsid = NSID2;
590 
591 	memcpy(ns_dev1.dev_name, name_ns_1, sizeof(ns_dev1.dev_name));
592 	memcpy(ns_dev2.dev_name, name_ns_2, sizeof(ns_dev2.dev_name));
593 	TAILQ_INIT(&g_ctrlr_ctx_head);
594 	TAILQ_INIT(&ctrlr_device.ns_devices);
595 	TAILQ_INSERT_TAIL(&g_ctrlr_ctx_head, &ctrlr_device, tailq);
596 	TAILQ_INSERT_TAIL(&ctrlr_device.ns_devices, &ns_dev1, tailq);
597 	TAILQ_INSERT_TAIL(&ctrlr_device.ns_devices, &ns_dev2, tailq);
598 
599 	/* Test case: Give a null spdk_nvme_ctrlr to find cuse_device. Expect: Return -ENODEV failed */
600 	rc_ctrlr = spdk_nvme_cuse_get_ctrlr_name(NULL, rt_name_ctrlr, &name_size);
601 	CU_ASSERT(rc_ctrlr == -ENODEV);
602 	rc_ns = spdk_nvme_cuse_get_ns_name(NULL, nsid, rt_name_ctrlr, &name_size);
603 	CU_ASSERT(rc_ns == -ENODEV);
604 
605 	/* Test case: Give a wrong nsid to find cuse_device. Expect: Return -ENODEV failed */
606 	rc_ns = spdk_nvme_cuse_get_ns_name(&ctrlr, nsid, rt_name_ns, &name_size);
607 	CU_ASSERT(rc_ns == -ENODEV);
608 
609 	/* Test case: Let parameter size<sizeof(dev_name). Expect: Return -ENOSPC failed */
610 	name_size = 0;
611 	rc_ctrlr = spdk_nvme_cuse_get_ctrlr_name(&ctrlr, rt_name_ctrlr, &name_size);
612 	CU_ASSERT(rc_ctrlr == -ENOSPC);
613 	name_size = 0;
614 	rc_ns = spdk_nvme_cuse_get_ns_name(&ctrlr, NSID1, rt_name_ns, &name_size);
615 	CU_ASSERT(rc_ns == -ENOSPC);
616 
617 	/* Test case: All parameters is conformed to function. Expect: Success */
618 	name_size = 128;
619 	rc_ctrlr = spdk_nvme_cuse_get_ctrlr_name(&ctrlr, rt_name_ctrlr, &name_size);
620 	CU_ASSERT(rc_ctrlr == 0);
621 	rc_ns = spdk_nvme_cuse_get_ns_name(&ctrlr, NSID1, rt_name_ns, &name_size);
622 	CU_ASSERT(rc_ns == 0);
623 	CU_ASSERT(strncmp(rt_name_ctrlr, name_ctrlr, sizeof(name_ctrlr)) == 0);
624 	CU_ASSERT(strncmp(rt_name_ns, name_ns_1, sizeof(name_ns_1)) == 0);
625 }
626 
627 int
628 main(int argc, char **argv)
629 {
630 	CU_pSuite	suite = NULL;
631 	unsigned int	num_failures;
632 
633 	CU_initialize_registry();
634 
635 	suite = CU_add_suite("nvme_cuse", NULL, NULL);
636 	CU_ADD_TEST(suite, test_cuse_nvme_submit_io_read_write);
637 	CU_ADD_TEST(suite, test_cuse_nvme_submit_io_read_write_with_md);
638 	CU_ADD_TEST(suite, test_cuse_nvme_submit_passthru_cmd);
639 	CU_ADD_TEST(suite, test_cuse_nvme_submit_passthru_cmd_with_md);
640 	CU_ADD_TEST(suite, test_nvme_cuse_get_cuse_ns_device);
641 	CU_ADD_TEST(suite, test_cuse_nvme_submit_io);
642 	CU_ADD_TEST(suite, test_cuse_nvme_reset);
643 	CU_ADD_TEST(suite, test_nvme_cuse_stop);
644 	CU_ADD_TEST(suite, test_spdk_nvme_cuse_get_ctrlr_name);
645 
646 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
647 	CU_cleanup_registry();
648 	return num_failures;
649 }
650