xref: /spdk/test/unit/lib/nvme/nvme_cuse.c/nvme_cuse_ut.c (revision cdb0726b95631d46eaf4f2e39ddb6533f150fd27)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_cunit.h"
8 #include "nvme/nvme_cuse.c"
9 #include "common/lib/nvme/common_stubs.h"
10 
11 SPDK_LOG_REGISTER_COMPONENT(nvme)
12 
13 DEFINE_STUB(spdk_nvme_ctrlr_alloc_cmb_io_buffer, void *,
14 	    (struct spdk_nvme_ctrlr *ctrlr, size_t size), NULL);
15 
16 DEFINE_STUB(spdk_nvme_ctrlr_cmd_admin_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
17 		struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
18 		spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
19 
20 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
21 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, void *md_buf,
22 		spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
23 
24 DEFINE_STUB(spdk_nvme_ctrlr_reset, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
25 
26 DEFINE_STUB(spdk_nvme_ctrlr_reset_subsystem, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
27 
28 DEFINE_STUB(spdk_nvme_ns_cmd_read_with_md, int, (struct spdk_nvme_ns *ns,
29 		struct spdk_nvme_qpair *qpair,
30 		void *payload, void *metadata,
31 		uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
32 		uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
33 
34 DEFINE_STUB(spdk_nvme_ns_cmd_write_with_md, int, (struct spdk_nvme_ns *ns,
35 		struct spdk_nvme_qpair *qpair,
36 		void *payload, void *metadata,
37 		uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
38 		uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
39 
40 DEFINE_STUB(spdk_nvme_ns_get_num_sectors, uint64_t,
41 	    (struct spdk_nvme_ns *ns), 0);
42 
43 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
44 
45 DEFINE_STUB_V(spdk_unaffinitize_thread, (void));
46 
47 DEFINE_STUB(nvme_io_msg_ctrlr_register, int,
48 	    (struct spdk_nvme_ctrlr *ctrlr,
49 	     struct nvme_io_msg_producer *io_msg_producer), 0);
50 
51 DEFINE_STUB_V(nvme_io_msg_ctrlr_unregister,
52 	      (struct spdk_nvme_ctrlr *ctrlr,
53 	       struct nvme_io_msg_producer *io_msg_producer));
54 
55 DEFINE_STUB(spdk_nvme_ctrlr_is_active_ns, bool,
56 	    (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid), true);
57 
58 DEFINE_STUB(fuse_reply_err, int, (fuse_req_t req, int err), 0);
59 DEFINE_STUB_V(fuse_session_exit, (struct fuse_session *se));
60 DEFINE_STUB(pthread_join, int, (pthread_t tid, void **val), 0);
61 
62 DEFINE_STUB_V(nvme_ctrlr_update_namespaces, (struct spdk_nvme_ctrlr *ctrlr));
63 
64 static int
65 nvme_ns_cmp(struct spdk_nvme_ns *ns1, struct spdk_nvme_ns *ns2)
66 {
67 	return ns1->id - ns2->id;
68 }
69 
70 RB_GENERATE_STATIC(nvme_ns_tree, spdk_nvme_ns, node, nvme_ns_cmp);
71 
72 struct cuse_io_ctx *g_ut_ctx;
73 struct spdk_nvme_ctrlr *g_ut_ctrlr;
74 uint32_t g_ut_nsid;
75 
76 uint32_t
77 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
78 {
79 	return ctrlr->cdata.nn;
80 }
81 
82 uint32_t
83 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
84 {
85 	return 1;
86 }
87 
88 uint32_t
89 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
90 {
91 	if (nsid > ctrlr->cdata.nn) {
92 		return 0;
93 	}
94 
95 	return nsid + 1;
96 }
97 
98 DEFINE_RETURN_MOCK(nvme_io_msg_send, int);
99 int
100 nvme_io_msg_send(struct spdk_nvme_ctrlr *ctrlr,
101 		 uint32_t nsid, spdk_nvme_io_msg_fn fn, void *arg)
102 {
103 	g_ut_ctx = arg;
104 	g_ut_nsid = nsid;
105 	g_ut_ctrlr = ctrlr;
106 
107 	HANDLE_RETURN_MOCK(nvme_io_msg_send);
108 	return 0;
109 }
110 
111 uint32_t
112 spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns)
113 {
114 	return ns->sector_size;
115 }
116 
117 static struct spdk_nvme_ns g_inactive_ns = {};
118 
119 struct spdk_nvme_ns *
120 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
121 {
122 	struct spdk_nvme_ns tmp;
123 	struct spdk_nvme_ns *ns;
124 
125 	if (nsid < 1 || nsid > ctrlr->cdata.nn) {
126 		return NULL;
127 	}
128 
129 	tmp.id = nsid;
130 	ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
131 
132 	if (ns == NULL) {
133 		return &g_inactive_ns;
134 	}
135 
136 	return ns;
137 }
138 
139 struct cuse_device *g_cuse_device;
140 DEFINE_RETURN_MOCK(fuse_req_userdata, void *);
141 void *
142 fuse_req_userdata(fuse_req_t req)
143 {
144 	return g_cuse_device;
145 }
146 
147 static void
148 test_cuse_nvme_submit_io_read_write(void)
149 {
150 	struct cuse_device cuse_device = {};
151 	struct fuse_file_info fi = {};
152 	struct nvme_user_io *user_io = NULL;
153 	char arg[1024] = {};
154 	fuse_req_t req = (void *)0xDEEACDFF;
155 	unsigned flags = FUSE_IOCTL_DIR;
156 	uint32_t block_size = 4096;
157 	uint32_t md_size = 0;
158 	size_t in_bufsz = 4096;
159 	size_t out_bufsz = 4096;
160 
161 	/* Allocate memory to avoid stack buffer overflow */
162 	user_io = calloc(3, 4096);
163 	SPDK_CU_ASSERT_FATAL(user_io != NULL);
164 	cuse_device.ctrlr = (void *)0xDEADBEEF;
165 	cuse_device.nsid = 1;
166 	user_io->slba = 1024;
167 	user_io->nblocks = 1;
168 	g_ut_ctx = NULL;
169 
170 	/* Submit IO read */
171 	cuse_nvme_submit_io_read(&cuse_device, req, 0, arg, &fi, flags,
172 				 block_size, md_size, user_io, in_bufsz, out_bufsz);
173 	CU_ASSERT(g_ut_ctx != NULL);
174 	CU_ASSERT(g_ut_ctx->req == req);
175 	CU_ASSERT(g_ut_ctx->lba = user_io->slba);
176 	CU_ASSERT(g_ut_ctx->lba_count == (uint32_t)(user_io->nblocks + 1));
177 	CU_ASSERT(g_ut_ctx->data_len ==
178 		  (int)((user_io->nblocks + 1) * block_size));
179 	CU_ASSERT(g_ut_ctx->data != NULL);
180 	CU_ASSERT(g_ut_ctx->metadata_len == 0);
181 	CU_ASSERT(g_ut_ctx->metadata == NULL);
182 	CU_ASSERT(g_ut_ctx->appmask == 0);
183 	CU_ASSERT(g_ut_ctx->apptag == 0);
184 	cuse_io_ctx_free(g_ut_ctx);
185 
186 	/* Submit IO write */
187 	g_ut_ctx = NULL;
188 
189 	cuse_nvme_submit_io_write(&cuse_device, req, 0, arg, &fi, flags,
190 				  block_size, md_size, user_io, in_bufsz, out_bufsz);
191 	CU_ASSERT(g_ut_ctx != NULL);
192 	CU_ASSERT(g_ut_ctx->req == req);
193 	CU_ASSERT(g_ut_ctx->lba = user_io->slba);
194 	CU_ASSERT(g_ut_ctx->lba_count == (uint32_t)(user_io->nblocks + 1));
195 	CU_ASSERT(g_ut_ctx->data_len ==
196 		  (int)((user_io->nblocks + 1) * block_size));
197 	CU_ASSERT(g_ut_ctx->data != NULL);
198 	CU_ASSERT(g_ut_ctx->metadata_len == 0);
199 	CU_ASSERT(g_ut_ctx->metadata == NULL);
200 	CU_ASSERT(g_ut_ctx->appmask == 0);
201 	CU_ASSERT(g_ut_ctx->apptag == 0);
202 	cuse_io_ctx_free(g_ut_ctx);
203 	free(user_io);
204 }
205 
206 static void
207 test_cuse_nvme_submit_io_read_write_with_md(void)
208 {
209 	struct cuse_device cuse_device = {};
210 	struct fuse_file_info fi = {};
211 	struct nvme_user_io *user_io = NULL;
212 	char arg[1024] = {};
213 	fuse_req_t req = (void *)0xDEEACDFF;
214 	unsigned flags = FUSE_IOCTL_DIR;
215 	uint32_t block_size = 4096;
216 	uint32_t md_size = 8;
217 	size_t in_bufsz = 4096;
218 	size_t out_bufsz = 4096;
219 
220 	/* Allocate memory to avoid stack buffer overflow */
221 	user_io = calloc(4, 4096);
222 	SPDK_CU_ASSERT_FATAL(user_io != NULL);
223 	cuse_device.ctrlr = (void *)0xDEADBEEF;
224 	cuse_device.nsid = 1;
225 	user_io->slba = 1024;
226 	user_io->nblocks = 1;
227 	user_io->appmask = 0xF00D;
228 	user_io->apptag = 0xC0DE;
229 	user_io->metadata = 0xDEADDEAD;
230 	g_ut_ctx = NULL;
231 
232 	/* Submit IO read */
233 	cuse_nvme_submit_io_read(&cuse_device, req, 0, arg, &fi, flags,
234 				 block_size, md_size, user_io, in_bufsz, out_bufsz);
235 	CU_ASSERT(g_ut_ctx != NULL);
236 	CU_ASSERT(g_ut_ctx->req == req);
237 	CU_ASSERT(g_ut_ctx->lba = user_io->slba);
238 	CU_ASSERT(g_ut_ctx->lba_count == (uint32_t)(user_io->nblocks + 1));
239 	CU_ASSERT(g_ut_ctx->data_len ==
240 		  (int)((user_io->nblocks + 1) * block_size));
241 	CU_ASSERT(g_ut_ctx->data != NULL);
242 	CU_ASSERT(g_ut_ctx->metadata_len ==
243 		  (int)((user_io->nblocks + 1) * md_size));
244 	CU_ASSERT(g_ut_ctx->metadata != NULL);
245 	CU_ASSERT(g_ut_ctx->appmask == 0xF00D);
246 	CU_ASSERT(g_ut_ctx->apptag == 0xC0DE);
247 	cuse_io_ctx_free(g_ut_ctx);
248 
249 	/* Submit IO write */
250 	g_ut_ctx = NULL;
251 
252 	cuse_nvme_submit_io_write(&cuse_device, req, 0, arg, &fi, flags,
253 				  block_size, md_size, user_io, in_bufsz, out_bufsz);
254 	CU_ASSERT(g_ut_ctx != NULL);
255 	CU_ASSERT(g_ut_ctx->req == req);
256 	CU_ASSERT(g_ut_ctx->lba = user_io->slba);
257 	CU_ASSERT(g_ut_ctx->lba_count == (uint32_t)(user_io->nblocks + 1));
258 	CU_ASSERT(g_ut_ctx->data_len ==
259 		  (int)((user_io->nblocks + 1) * block_size));
260 	CU_ASSERT(g_ut_ctx->data != NULL);
261 	CU_ASSERT(g_ut_ctx->metadata_len ==
262 		  (int)((user_io->nblocks + 1) * md_size));
263 	CU_ASSERT(g_ut_ctx->metadata != NULL);
264 	CU_ASSERT(g_ut_ctx->appmask == 0xF00D);
265 	CU_ASSERT(g_ut_ctx->apptag == 0xC0DE);
266 	cuse_io_ctx_free(g_ut_ctx);
267 	free(user_io);
268 }
269 
270 static void
271 test_cuse_nvme_submit_passthru_cmd(void)
272 {
273 	struct nvme_passthru_cmd *passthru_cmd = NULL;
274 	fuse_req_t req = (void *)0xDEEACDFF;
275 
276 	passthru_cmd = calloc(1, sizeof(struct nvme_passthru_cmd));
277 	g_cuse_device = calloc(1, sizeof(struct cuse_device));
278 
279 	/* Use fatal or we'll segfault if we didn't get memory */
280 	SPDK_CU_ASSERT_FATAL(passthru_cmd != NULL);
281 	SPDK_CU_ASSERT_FATAL(g_cuse_device != NULL);
282 	g_cuse_device->ctrlr = (void *)0xDEADBEEF;
283 
284 	g_ut_ctx = NULL;
285 	/* Passthrough command */
286 	passthru_cmd->opcode       = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
287 	passthru_cmd->nsid         = 1;
288 	passthru_cmd->data_len     = 512;
289 	passthru_cmd->metadata_len = 0;
290 	passthru_cmd->cdw10        = 0xc0de1010;
291 	passthru_cmd->cdw11        = 0xc0de1111;
292 	passthru_cmd->cdw12        = 0xc0de1212;
293 	passthru_cmd->cdw13        = 0xc0de1313;
294 	passthru_cmd->cdw14        = 0xc0de1414;
295 	passthru_cmd->cdw15        = 0xc0de1515;
296 
297 	/* Send IO Command IOCTL */
298 	cuse_nvme_passthru_cmd_send(req, passthru_cmd, NULL, NULL, NVME_IOCTL_IO_CMD);
299 	SPDK_CU_ASSERT_FATAL(g_ut_ctx != NULL);
300 	CU_ASSERT(g_ut_ctx->data != NULL);
301 	CU_ASSERT(g_ut_ctx->metadata == NULL);
302 	CU_ASSERT(g_ut_ctx->req               == req);
303 	CU_ASSERT(g_ut_ctx->data_len          == 512);
304 	CU_ASSERT(g_ut_ctx->metadata_len      == 0);
305 	CU_ASSERT(g_ut_ctx->nvme_cmd.opc      == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
306 	CU_ASSERT(g_ut_ctx->nvme_cmd.nsid     == 1);
307 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw10    == 0xc0de1010);
308 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw11    == 0xc0de1111);
309 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw12    == 0xc0de1212);
310 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw13    == 0xc0de1313);
311 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw14    == 0xc0de1414);
312 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw15    == 0xc0de1515);
313 
314 	cuse_io_ctx_free(g_ut_ctx);
315 	free(passthru_cmd);
316 	free(g_cuse_device);
317 }
318 
319 static void
320 test_cuse_nvme_submit_passthru_cmd_with_md(void)
321 {
322 	struct nvme_passthru_cmd *passthru_cmd = NULL;
323 	fuse_req_t req = (void *)0xDEEACDFF;
324 
325 	passthru_cmd = calloc(1, sizeof(struct nvme_passthru_cmd));
326 	g_cuse_device = calloc(1, sizeof(struct cuse_device));
327 
328 	/* Use fatal or we'll segfault if we didn't get memory */
329 	SPDK_CU_ASSERT_FATAL(passthru_cmd != NULL);
330 	SPDK_CU_ASSERT_FATAL(g_cuse_device != NULL);
331 	g_cuse_device->ctrlr = (void *)0xDEADBEEF;
332 
333 	g_ut_ctx = NULL;
334 	/* Passthrough command */
335 	passthru_cmd->opcode       = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
336 	passthru_cmd->nsid         = 1;
337 	passthru_cmd->data_len     = 512;
338 	passthru_cmd->metadata_len = 8;
339 	passthru_cmd->cdw10        = 0xc0de1010;
340 	passthru_cmd->cdw11        = 0xc0de1111;
341 	passthru_cmd->cdw12        = 0xc0de1212;
342 	passthru_cmd->cdw13        = 0xc0de1313;
343 	passthru_cmd->cdw14        = 0xc0de1414;
344 	passthru_cmd->cdw15        = 0xc0de1515;
345 
346 	/* Send IO Command IOCTL */
347 	cuse_nvme_passthru_cmd_send(req, passthru_cmd, NULL, NULL, NVME_IOCTL_IO_CMD);
348 	SPDK_CU_ASSERT_FATAL(g_ut_ctx != NULL);
349 	CU_ASSERT(g_ut_ctx->data != NULL);
350 	CU_ASSERT(g_ut_ctx->metadata != NULL);
351 	CU_ASSERT(g_ut_ctx->req               == req);
352 	CU_ASSERT(g_ut_ctx->data_len          == 512);
353 	CU_ASSERT(g_ut_ctx->metadata_len      == 8);
354 	CU_ASSERT(g_ut_ctx->nvme_cmd.opc      == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
355 	CU_ASSERT(g_ut_ctx->nvme_cmd.nsid     == 1);
356 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw10    == 0xc0de1010);
357 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw11    == 0xc0de1111);
358 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw12    == 0xc0de1212);
359 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw13    == 0xc0de1313);
360 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw14    == 0xc0de1414);
361 	CU_ASSERT(g_ut_ctx->nvme_cmd.cdw15    == 0xc0de1515);
362 
363 	cuse_io_ctx_free(g_ut_ctx);
364 	free(passthru_cmd);
365 	free(g_cuse_device);
366 }
367 
368 static void
369 test_nvme_cuse_get_cuse_ns_device(void)
370 {
371 	struct spdk_nvme_ctrlr ctrlr = {};
372 	struct cuse_device ctrlr_device = {};
373 	struct cuse_device ns_device = { .nsid = 1 };
374 	struct cuse_device *cuse_dev = NULL;
375 
376 	ctrlr.cdata.nn = 3;
377 	ctrlr_device.ctrlr = &ctrlr;
378 	TAILQ_INIT(&ctrlr_device.ns_devices);
379 	TAILQ_INSERT_TAIL(&ctrlr_device.ns_devices, &ns_device, tailq);
380 
381 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_ctrlr_ctx_head));
382 	TAILQ_INSERT_TAIL(&g_ctrlr_ctx_head, &ctrlr_device, tailq);
383 
384 	cuse_dev = nvme_cuse_get_cuse_ns_device(&ctrlr, 1);
385 	CU_ASSERT(cuse_dev == &ns_device);
386 
387 	/* nsid 2 was not started */
388 	cuse_dev = nvme_cuse_get_cuse_ns_device(&ctrlr, 2);
389 	CU_ASSERT(cuse_dev == NULL);
390 
391 	/* nsid invalid */
392 	cuse_dev = nvme_cuse_get_cuse_ns_device(&ctrlr, 0);
393 	CU_ASSERT(cuse_dev == NULL);
394 
395 	TAILQ_REMOVE(&g_ctrlr_ctx_head, &ctrlr_device, tailq);
396 }
397 
398 static void
399 test_cuse_nvme_submit_io(void)
400 {
401 	struct cuse_device cuse_device = {};
402 	struct spdk_nvme_ctrlr ctrlr = {};
403 	struct fuse_file_info fi = {};
404 	struct spdk_nvme_ns ns = {};
405 	struct nvme_user_io *user_io = NULL;
406 	char arg[1024] = {};
407 	fuse_req_t req = (void *)0xDEEACDFF;
408 
409 	/* Allocate memory to avoid stack buffer overflow */
410 	user_io = calloc(3, 4096);
411 	SPDK_CU_ASSERT_FATAL(user_io != NULL);
412 
413 	RB_INIT(&ctrlr.ns);
414 	ns.id = 1;
415 	RB_INSERT(nvme_ns_tree, &ctrlr.ns, &ns);
416 
417 	cuse_device.ctrlr = &ctrlr;
418 	ctrlr.cdata.nn = 1;
419 	ns.sector_size = 4096;
420 	ns.id = 1;
421 	user_io->slba = 1024;
422 	user_io->nblocks = 1;
423 	cuse_device.nsid = 1;
424 	g_cuse_device = &cuse_device;
425 
426 	/* Read */
427 	user_io->opcode = SPDK_NVME_OPC_READ;
428 	g_ut_ctx = NULL;
429 
430 	cuse_nvme_submit_io(req, 0, arg, &fi, FUSE_IOCTL_DIR, user_io, 4096, 4096);
431 	SPDK_CU_ASSERT_FATAL(g_ut_ctx != NULL);
432 	CU_ASSERT(g_ut_nsid == 1);
433 	CU_ASSERT(g_ut_ctx->req == (void *)0xDEEACDFF);
434 	CU_ASSERT(g_ut_ctx->lba = 1024);
435 	CU_ASSERT(g_ut_ctx->lba_count == 2);
436 	CU_ASSERT(g_ut_ctx->data_len == 2 * 4096);
437 	CU_ASSERT(g_ut_ctx->data != NULL);
438 	CU_ASSERT(g_ut_ctx->metadata_len == 0);
439 	CU_ASSERT(g_ut_ctx->metadata == NULL);
440 	CU_ASSERT(g_ut_ctx->appmask == 0);
441 	CU_ASSERT(g_ut_ctx->apptag == 0);
442 
443 	cuse_io_ctx_free(g_ut_ctx);
444 
445 	/* Write */
446 	user_io->opcode = SPDK_NVME_OPC_WRITE;
447 	g_ut_ctx = NULL;
448 
449 	cuse_nvme_submit_io(req, 0, arg, &fi, FUSE_IOCTL_DIR, user_io, 4096, 4096);
450 	SPDK_CU_ASSERT_FATAL(g_ut_ctx != NULL);
451 	CU_ASSERT(g_ut_nsid == 1);
452 	CU_ASSERT(g_ut_ctx->req == req);
453 	CU_ASSERT(g_ut_ctx->lba = 1024);
454 	CU_ASSERT(g_ut_ctx->lba_count == 2);
455 	CU_ASSERT(g_ut_ctx->data_len == 2 * 4096);
456 	CU_ASSERT(g_ut_ctx->data != NULL);
457 	CU_ASSERT(g_ut_ctx->metadata_len == 0);
458 	CU_ASSERT(g_ut_ctx->metadata == NULL);
459 	CU_ASSERT(g_ut_ctx->appmask == 0);
460 	CU_ASSERT(g_ut_ctx->apptag == 0);
461 	cuse_io_ctx_free(g_ut_ctx);
462 
463 	/* Invalid */
464 	g_ut_ctx = NULL;
465 	user_io->opcode = SPDK_NVME_OPC_FLUSH;
466 
467 	cuse_nvme_submit_io(req, 0, arg, &fi, FUSE_IOCTL_DIR, user_io, 4096, 4096);
468 	SPDK_CU_ASSERT_FATAL(g_ut_ctx == NULL);
469 
470 	free(user_io);
471 }
472 
473 static void
474 test_cuse_nvme_reset(void)
475 {
476 	struct cuse_device cuse_device = {};
477 	struct spdk_nvme_ctrlr ctrlr = {};
478 	fuse_req_t req = (void *)0xDEADBEEF;
479 
480 	cuse_device.ctrlr = &ctrlr;
481 	g_cuse_device = &cuse_device;
482 
483 	/* Invalid nsid  */
484 	cuse_device.nsid = 1;
485 	g_ut_ctx = NULL;
486 
487 	cuse_nvme_reset(req, 0, NULL, NULL, 0, NULL, 4096, 4096);
488 	CU_ASSERT(g_ut_ctx == NULL);
489 
490 	/* Valid nsid, check IO message sent value */
491 	cuse_device.nsid = 0;
492 
493 	cuse_nvme_reset(req, 0, NULL, NULL, 0, NULL, 4096, 4096);
494 	CU_ASSERT(g_ut_ctx == (void *)0xDEADBEEF);
495 	CU_ASSERT(g_ut_ctrlr == &ctrlr);
496 	CU_ASSERT(g_ut_nsid == 0);
497 }
498 
499 static void
500 test_nvme_cuse_stop(void)
501 {
502 	struct spdk_nvme_ctrlr ctrlr = {};
503 	struct cuse_device *ctrlr_device = NULL;
504 	struct cuse_device *ns_dev1, *ns_dev2;
505 
506 	/* Allocate memory for nvme_cuse_stop() to free. */
507 	ctrlr_device = calloc(1, sizeof(struct cuse_device));
508 	SPDK_CU_ASSERT_FATAL(ctrlr_device != NULL);
509 
510 	TAILQ_INIT(&ctrlr_device->ns_devices);
511 	ns_dev1 = calloc(1, sizeof(struct cuse_device));
512 	SPDK_CU_ASSERT_FATAL(ns_dev1 != NULL);
513 	ns_dev2 = calloc(1, sizeof(struct cuse_device));
514 	SPDK_CU_ASSERT_FATAL(ns_dev2 != NULL);
515 
516 	g_ctrlr_started = spdk_bit_array_create(128);
517 	SPDK_CU_ASSERT_FATAL(g_ctrlr_started != NULL);
518 
519 	TAILQ_INSERT_TAIL(&ctrlr_device->ns_devices, ns_dev1, tailq);
520 	TAILQ_INSERT_TAIL(&ctrlr_device->ns_devices, ns_dev2, tailq);
521 	ctrlr.cdata.nn = 2;
522 	ctrlr_device->ctrlr = &ctrlr;
523 	pthread_mutex_init(&g_cuse_mtx, NULL);
524 	TAILQ_INSERT_TAIL(&g_ctrlr_ctx_head, ctrlr_device, tailq);
525 
526 	nvme_cuse_stop(&ctrlr);
527 	CU_ASSERT(g_ctrlr_started == NULL);
528 	CU_ASSERT(TAILQ_EMPTY(&g_ctrlr_ctx_head));
529 }
530 
531 int
532 main(int argc, char **argv)
533 {
534 	CU_pSuite	suite = NULL;
535 	unsigned int	num_failures;
536 
537 	CU_set_error_action(CUEA_ABORT);
538 	CU_initialize_registry();
539 
540 	suite = CU_add_suite("nvme_cuse", NULL, NULL);
541 	CU_ADD_TEST(suite, test_cuse_nvme_submit_io_read_write);
542 	CU_ADD_TEST(suite, test_cuse_nvme_submit_io_read_write_with_md);
543 	CU_ADD_TEST(suite, test_cuse_nvme_submit_passthru_cmd);
544 	CU_ADD_TEST(suite, test_cuse_nvme_submit_passthru_cmd_with_md);
545 	CU_ADD_TEST(suite, test_nvme_cuse_get_cuse_ns_device);
546 	CU_ADD_TEST(suite, test_cuse_nvme_submit_io);
547 	CU_ADD_TEST(suite, test_cuse_nvme_reset);
548 	CU_ADD_TEST(suite, test_nvme_cuse_stop);
549 
550 	CU_basic_set_mode(CU_BRM_VERBOSE);
551 	CU_basic_run_tests();
552 	num_failures = CU_get_number_of_failures();
553 	CU_cleanup_registry();
554 	return num_failures;
555 }
556