xref: /spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c (revision 167ad6fb3ade0a58da32b6d09824aae4d3e2c79f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #include "spdk_internal/mock.h"
12 #include "thread/thread_internal.h"
13 
14 #include "nvmf/ctrlr_bdev.c"
15 
16 #include "spdk/bdev_module.h"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvmf)
19 
20 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -1);
21 
22 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
23 
24 DEFINE_STUB(spdk_bdev_get_physical_block_size, uint32_t,
25 	    (const struct spdk_bdev *bdev), 4096);
26 
27 DEFINE_STUB(nvmf_ctrlr_process_admin_cmd, int, (struct spdk_nvmf_request *req), 0);
28 
29 DEFINE_STUB(spdk_bdev_comparev_blocks, int, (struct spdk_bdev_desc *desc,
30 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt,
31 		uint64_t offset_blocks, uint64_t num_blocks,
32 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
33 
34 DEFINE_STUB(spdk_bdev_readv_blocks_ext, int, (struct spdk_bdev_desc *desc,
35 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, uint64_t offset_blocks,
36 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
37 		struct spdk_bdev_ext_io_opts *opts), 0);
38 
39 DEFINE_STUB(spdk_bdev_writev_blocks_ext, int, (struct spdk_bdev_desc *desc,
40 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, uint64_t offset_blocks,
41 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
42 		struct spdk_bdev_ext_io_opts *opts), 0);
43 
44 DEFINE_STUB(spdk_bdev_nvme_admin_passthru, int,
45 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
46 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
47 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
48 
49 DEFINE_STUB(spdk_bdev_abort, int,
50 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
51 	     void *bio_cb_arg, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
52 
53 DEFINE_STUB_V(spdk_bdev_io_get_iovec,
54 	      (struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp));
55 DEFINE_STUB(spdk_bdev_get_write_unit_size, uint32_t, (const struct spdk_bdev *bdev), 1);
56 
57 uint32_t
58 spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
59 {
60 	return bdev->optimal_io_boundary;
61 }
62 
63 uint32_t
64 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
65 {
66 	return bdev->md_len;
67 }
68 
69 bool
70 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
71 {
72 	return (bdev->md_len != 0) && bdev->md_interleave;
73 }
74 
75 /* We have to use the typedef in the function declaration to appease astyle. */
76 typedef enum spdk_dif_type spdk_dif_type_t;
77 
78 spdk_dif_type_t
79 spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
80 {
81 	if (bdev->md_len != 0) {
82 		return bdev->dif_type;
83 	} else {
84 		return SPDK_DIF_DISABLE;
85 	}
86 }
87 
88 bool
89 spdk_bdev_is_dif_head_of_md(const struct spdk_bdev *bdev)
90 {
91 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
92 		return bdev->dif_is_head_of_md;
93 	} else {
94 		return false;
95 	}
96 }
97 
98 uint32_t
99 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
100 {
101 	if (spdk_bdev_is_md_interleaved(bdev)) {
102 		return bdev->blocklen - bdev->md_len;
103 	} else {
104 		return bdev->blocklen;
105 	}
106 }
107 
108 uint16_t
109 spdk_bdev_get_acwu(const struct spdk_bdev *bdev)
110 {
111 	return bdev->acwu;
112 }
113 
114 uint32_t
115 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
116 {
117 	return bdev->blocklen;
118 }
119 
120 uint64_t
121 spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
122 {
123 	return bdev->blockcnt;
124 }
125 
126 DEFINE_STUB(spdk_bdev_comparev_and_writev_blocks, int,
127 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
128 	     struct iovec *compare_iov, int compare_iovcnt,
129 	     struct iovec *write_iov, int write_iovcnt,
130 	     uint64_t offset_blocks, uint64_t num_blocks,
131 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
132 	    0);
133 
134 DEFINE_STUB(nvmf_ctrlr_process_io_cmd, int, (struct spdk_nvmf_request *req), 0);
135 
136 DEFINE_STUB_V(spdk_bdev_io_get_nvme_fused_status, (const struct spdk_bdev_io *bdev_io,
137 		uint32_t *cdw0, int *cmp_sct, int *cmp_sc, int *wr_sct, int *wr_sc));
138 
139 DEFINE_STUB(spdk_bdev_is_dif_check_enabled, bool,
140 	    (const struct spdk_bdev *bdev, enum spdk_dif_check_type check_type), false);
141 
142 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *,
143 	    (struct spdk_bdev_desc *desc), NULL);
144 
145 DEFINE_STUB(spdk_bdev_flush_blocks, int,
146 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
147 	     uint64_t offset_blocks, uint64_t num_blocks,
148 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
149 	    0);
150 
151 DEFINE_STUB(spdk_bdev_unmap_blocks, int,
152 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
153 	     uint64_t offset_blocks, uint64_t num_blocks,
154 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
155 	    0);
156 
157 DEFINE_STUB(spdk_bdev_io_type_supported, bool,
158 	    (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
159 
160 DEFINE_STUB(spdk_bdev_queue_io_wait, int,
161 	    (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
162 	     struct spdk_bdev_io_wait_entry *entry),
163 	    0);
164 
165 DEFINE_STUB(spdk_bdev_write_blocks, int,
166 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
167 	     uint64_t offset_blocks, uint64_t num_blocks,
168 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
169 	    0);
170 
171 DEFINE_STUB(spdk_bdev_writev_blocks, int,
172 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
173 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
174 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
175 	    0);
176 
177 DEFINE_STUB(spdk_bdev_read_blocks, int,
178 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
179 	     uint64_t offset_blocks, uint64_t num_blocks,
180 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
181 	    0);
182 
183 DEFINE_STUB(spdk_bdev_readv_blocks, int,
184 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
185 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
186 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
187 	    0);
188 
189 DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
190 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
191 	     uint64_t offset_blocks, uint64_t num_blocks,
192 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
193 	    0);
194 
195 DEFINE_STUB(spdk_bdev_nvme_io_passthru, int,
196 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
197 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
198 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
199 	    0);
200 
201 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
202 
203 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
204 	    (const struct spdk_nvmf_subsystem *subsystem), NULL);
205 
206 DEFINE_STUB(spdk_bdev_zcopy_start, int,
207 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
208 	     struct iovec *iov, int iovcnt,
209 	     uint64_t offset_blocks, uint64_t num_blocks,
210 	     bool populate,
211 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
212 	    0);
213 
214 DEFINE_STUB(spdk_bdev_zcopy_end, int,
215 	    (struct spdk_bdev_io *bdev_io, bool commit,
216 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
217 	    0);
218 
219 DEFINE_STUB(spdk_bdev_copy_blocks, int,
220 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
221 	     uint64_t dst_offset_blocks, uint64_t src_offset_blocks, uint64_t num_blocks,
222 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
223 	    0);
224 
225 DEFINE_STUB(spdk_bdev_get_max_copy, uint32_t, (const struct spdk_bdev *bdev), 0);
226 
227 struct spdk_nvmf_ns *
228 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
229 {
230 	abort();
231 	return NULL;
232 }
233 
234 struct spdk_nvmf_ns *
235 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
236 {
237 	abort();
238 	return NULL;
239 }
240 
241 struct spdk_nvmf_ns *
242 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns)
243 {
244 	abort();
245 	return NULL;
246 }
247 
248 int
249 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
250 		  bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
251 		  uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
252 		  uint32_t data_offset, uint64_t guard_seed, struct spdk_dif_ctx_init_ext_opts *opts)
253 {
254 	ctx->dif_pi_format = opts->dif_pi_format;
255 	ctx->block_size = block_size;
256 	ctx->md_size = md_size;
257 	ctx->init_ref_tag = init_ref_tag;
258 
259 	return 0;
260 }
261 
262 static uint32_t g_bdev_nvme_status_cdw0;
263 static uint32_t g_bdev_nvme_status_sct = SPDK_NVME_SCT_GENERIC;
264 static uint32_t g_bdev_nvme_status_sc = SPDK_NVME_SC_SUCCESS;
265 
266 static void
267 reset_bdev_nvme_status(void)
268 {
269 	g_bdev_nvme_status_cdw0 = 0;
270 	g_bdev_nvme_status_sct = SPDK_NVME_SCT_GENERIC;
271 	g_bdev_nvme_status_sc = SPDK_NVME_SC_SUCCESS;
272 }
273 
274 void
275 spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct,
276 			     int *sc)
277 {
278 	*cdw0 = g_bdev_nvme_status_cdw0;
279 	*sct = g_bdev_nvme_status_sct;
280 	*sc = g_bdev_nvme_status_sc;
281 }
282 
283 static void
284 test_get_rw_params(void)
285 {
286 	struct spdk_nvme_cmd cmd = {0};
287 	uint64_t lba;
288 	uint64_t count;
289 
290 	lba = 0;
291 	count = 0;
292 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
293 	to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
294 	nvmf_bdev_ctrlr_get_rw_params(&cmd, &lba, &count);
295 	CU_ASSERT(lba == 0x1234567890ABCDEF);
296 	CU_ASSERT(count == 0x9875 + 1); /* NOTE: this field is 0's based, hence the +1 */
297 }
298 
299 static void
300 test_lba_in_range(void)
301 {
302 	/* Trivial cases (no overflow) */
303 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1) == true);
304 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1000) == true);
305 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1001) == false);
306 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 999) == true);
307 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 1000) == false);
308 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 999, 1) == true);
309 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1000, 1) == false);
310 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1001, 1) == false);
311 
312 	/* Overflow edge cases */
313 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 0, UINT64_MAX) == true);
314 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 1, UINT64_MAX) == false);
315 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX - 1, 1) == true);
316 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX, 1) == false);
317 }
318 
319 static void
320 test_get_dif_ctx(void)
321 {
322 	struct spdk_bdev bdev = {};
323 	struct spdk_nvme_cmd cmd = {};
324 	struct spdk_dif_ctx dif_ctx = {};
325 	bool ret;
326 
327 	bdev.md_len = 0;
328 
329 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
330 	CU_ASSERT(ret == false);
331 
332 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
333 	bdev.blocklen = 520;
334 	bdev.md_len = 8;
335 
336 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
337 	CU_ASSERT(ret == true);
338 	CU_ASSERT(dif_ctx.block_size = 520);
339 	CU_ASSERT(dif_ctx.md_size == 8);
340 	CU_ASSERT(dif_ctx.init_ref_tag == 0x90ABCDEF);
341 }
342 
343 static void
344 test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd(void)
345 {
346 	int rc;
347 	struct spdk_bdev bdev = {};
348 	struct spdk_bdev_desc *desc = NULL;
349 	struct spdk_io_channel ch = {};
350 
351 	struct spdk_nvmf_request cmp_req = {};
352 	union nvmf_c2h_msg cmp_rsp = {};
353 
354 	struct spdk_nvmf_request write_req = {};
355 	union nvmf_c2h_msg write_rsp = {};
356 
357 	struct spdk_nvmf_qpair qpair = {};
358 
359 	struct spdk_nvme_cmd cmp_cmd = {};
360 	struct spdk_nvme_cmd write_cmd = {};
361 
362 	struct spdk_nvmf_ctrlr ctrlr = {};
363 	struct spdk_nvmf_subsystem subsystem = {};
364 	struct spdk_nvmf_ns ns = {};
365 	struct spdk_nvmf_ns *subsys_ns[1] = {};
366 
367 	struct spdk_nvmf_poll_group group = {};
368 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
369 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
370 
371 	bdev.blocklen = 512;
372 	bdev.blockcnt = 10;
373 	ns.bdev = &bdev;
374 
375 	subsystem.id = 0;
376 	subsystem.max_nsid = 1;
377 	subsys_ns[0] = &ns;
378 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
379 
380 	/* Enable controller */
381 	ctrlr.vcprop.cc.bits.en = 1;
382 	ctrlr.subsys = &subsystem;
383 
384 	group.num_sgroups = 1;
385 	sgroups.num_ns = 1;
386 	sgroups.ns_info = &ns_info;
387 	group.sgroups = &sgroups;
388 
389 	qpair.ctrlr = &ctrlr;
390 	qpair.group = &group;
391 
392 	cmp_req.qpair = &qpair;
393 	cmp_req.cmd = (union nvmf_h2c_msg *)&cmp_cmd;
394 	cmp_req.rsp = &cmp_rsp;
395 
396 	cmp_cmd.nsid = 1;
397 	cmp_cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
398 	cmp_cmd.opc = SPDK_NVME_OPC_COMPARE;
399 
400 	write_req.qpair = &qpair;
401 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
402 	write_req.rsp = &write_rsp;
403 
404 	write_cmd.nsid = 1;
405 	write_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
406 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
407 
408 	/* 1. SUCCESS */
409 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
410 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
411 
412 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
413 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
414 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
415 
416 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
417 
418 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
419 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
420 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
421 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == 0);
422 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == 0);
423 
424 	/* 2. Fused command start lba / num blocks mismatch */
425 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
426 	cmp_cmd.cdw12 = 2;	/* NLB: CDW12 bits 15:00, 0's based */
427 
428 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
429 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
430 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
431 
432 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
433 
434 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
435 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
436 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
437 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
438 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
439 
440 	/* 3. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
441 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
442 	cmp_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
443 
444 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
445 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
446 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
447 
448 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
449 
450 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
451 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
452 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
453 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
454 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
455 
456 	/* 4. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
457 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
458 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
459 
460 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
461 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
462 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
463 
464 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
465 
466 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
467 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
468 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
469 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
470 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
471 }
472 
473 static void
474 test_nvmf_bdev_ctrlr_identify_ns(void)
475 {
476 	struct spdk_nvmf_ns ns = {};
477 	struct spdk_nvme_ns_data nsdata = {};
478 	struct spdk_bdev bdev = {};
479 	uint8_t ns_g_id[16] = "abcdefgh";
480 	uint8_t eui64[8] = "12345678";
481 
482 	ns.bdev = &bdev;
483 	ns.ptpl_file = (void *)0xDEADBEEF;
484 	memcpy(ns.opts.nguid, ns_g_id, 16);
485 	memcpy(ns.opts.eui64, eui64, 8);
486 
487 	bdev.blockcnt = 10;
488 	bdev.acwu = 1;
489 	bdev.md_len = 512;
490 	bdev.dif_type = SPDK_DIF_TYPE1;
491 	bdev.blocklen = 4096;
492 	bdev.md_interleave = 0;
493 	bdev.optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
494 	bdev.dif_is_head_of_md = true;
495 
496 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, false);
497 	CU_ASSERT(nsdata.nsze == 10);
498 	CU_ASSERT(nsdata.ncap == 10);
499 	CU_ASSERT(nsdata.nuse == 10);
500 	CU_ASSERT(nsdata.nlbaf == 0);
501 	CU_ASSERT(nsdata.flbas.format == 0);
502 	CU_ASSERT(nsdata.flbas.msb_format == 0);
503 	CU_ASSERT(nsdata.nacwu == 0);
504 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
505 	CU_ASSERT(nsdata.lbaf[0].ms == 512);
506 	CU_ASSERT(nsdata.dps.pit == SPDK_NVME_FMT_NVM_PROTECTION_DISABLE);
507 	CU_ASSERT(nsdata.noiob == SPDK_BDEV_IO_NUM_CHILD_IOV);
508 	CU_ASSERT(nsdata.nmic.can_share == 1);
509 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
510 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
511 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
512 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
513 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
514 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
515 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
516 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
517 	CU_ASSERT(nsdata.flbas.extended == 1);
518 	CU_ASSERT(nsdata.mc.extended == 1);
519 	CU_ASSERT(nsdata.mc.pointer == 0);
520 	CU_ASSERT(nsdata.dps.md_start == true);
521 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
522 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
523 
524 	memset(&nsdata, 0, sizeof(nsdata));
525 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, true);
526 	CU_ASSERT(nsdata.nsze == 10);
527 	CU_ASSERT(nsdata.ncap == 10);
528 	CU_ASSERT(nsdata.nuse == 10);
529 	CU_ASSERT(nsdata.nlbaf == 0);
530 	CU_ASSERT(nsdata.flbas.format == 0);
531 	CU_ASSERT(nsdata.flbas.msb_format == 0);
532 	CU_ASSERT(nsdata.nacwu == 0);
533 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
534 	CU_ASSERT(nsdata.noiob == SPDK_BDEV_IO_NUM_CHILD_IOV);
535 	CU_ASSERT(nsdata.nmic.can_share == 1);
536 	CU_ASSERT(nsdata.lbaf[0].ms == 0);
537 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
538 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
539 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
540 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
541 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
542 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
543 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
544 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
545 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
546 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
547 }
548 
549 static void
550 test_nvmf_bdev_ctrlr_zcopy_start(void)
551 {
552 	int rc;
553 	struct spdk_bdev bdev = {};
554 	struct spdk_bdev_desc *desc = NULL;
555 	struct spdk_io_channel ch = {};
556 
557 	struct spdk_nvmf_request write_req = {};
558 	union nvmf_c2h_msg write_rsp = {};
559 
560 	struct spdk_nvmf_qpair qpair = {};
561 
562 	struct spdk_nvme_cmd write_cmd = {};
563 
564 	struct spdk_nvmf_ctrlr ctrlr = {};
565 	struct spdk_nvmf_subsystem subsystem = {};
566 	struct spdk_nvmf_ns ns = {};
567 	struct spdk_nvmf_ns *subsys_ns[1] = {};
568 
569 	struct spdk_nvmf_poll_group group = {};
570 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
571 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
572 
573 	bdev.blocklen = 512;
574 	bdev.blockcnt = 10;
575 	ns.bdev = &bdev;
576 
577 	subsystem.id = 0;
578 	subsystem.max_nsid = 1;
579 	subsys_ns[0] = &ns;
580 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
581 
582 	/* Enable controller */
583 	ctrlr.vcprop.cc.bits.en = 1;
584 	ctrlr.subsys = &subsystem;
585 
586 	group.num_sgroups = 1;
587 	sgroups.num_ns = 1;
588 	sgroups.ns_info = &ns_info;
589 	group.sgroups = &sgroups;
590 
591 	qpair.ctrlr = &ctrlr;
592 	qpair.group = &group;
593 
594 	write_req.qpair = &qpair;
595 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
596 	write_req.rsp = &write_rsp;
597 
598 	write_cmd.nsid = 1;
599 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
600 
601 	/* 1. SUCCESS */
602 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
603 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
604 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
605 
606 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
607 
608 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
609 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
610 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_SUCCESS);
611 
612 	/* 2. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
613 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
614 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
615 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
616 
617 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
618 
619 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
620 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
621 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_LBA_OUT_OF_RANGE);
622 
623 	/* 3. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
624 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
625 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
626 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
627 
628 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
629 
630 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
631 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
632 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
633 }
634 
635 static void
636 test_nvmf_bdev_ctrlr_cmd(void)
637 {
638 	int rc;
639 	struct spdk_bdev bdev = {};
640 	struct spdk_io_channel ch = {};
641 	struct spdk_nvmf_request req = {};
642 	struct spdk_nvmf_qpair qpair = {};
643 	union nvmf_h2c_msg cmd = {};
644 	union nvmf_c2h_msg rsp = {};
645 	struct spdk_nvme_scc_source_range range = {};
646 
647 	req.cmd = &cmd;
648 	req.rsp = &rsp;
649 	req.qpair = &qpair;
650 	req.length = 4096;
651 	bdev.blocklen = 512;
652 	bdev.blockcnt = 3;
653 	cmd.nvme_cmd.cdw10 = 0;
654 	cmd.nvme_cmd.cdw12 = 2;
655 
656 	/* Compare status asynchronous */
657 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
658 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
659 
660 	/* SLBA out of range */
661 	cmd.nvme_cmd.cdw10 = 3;
662 
663 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
664 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
665 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
666 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
667 
668 	/* SGL length invalid */
669 	cmd.nvme_cmd.cdw10 = 0;
670 	req.length = 512;
671 	memset(&rsp, 0, sizeof(rsp));
672 
673 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
674 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
675 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
676 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
677 
678 	/* Device error */
679 	req.length = 4096;
680 	memset(&rsp, 0, sizeof(rsp));
681 	MOCK_SET(spdk_bdev_comparev_blocks, -1);
682 
683 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
684 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
685 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
686 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
687 
688 	/* bdev not support flush */
689 	MOCK_SET(spdk_bdev_io_type_supported, false);
690 	memset(&rsp, 0, sizeof(rsp));
691 
692 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
693 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
694 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
695 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
696 
697 	/*  Flush error */
698 	MOCK_SET(spdk_bdev_io_type_supported, true);
699 	MOCK_SET(spdk_bdev_flush_blocks, -1);
700 	memset(&rsp, 0, sizeof(rsp));
701 
702 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
703 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
704 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
705 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
706 
707 	/* Flush blocks status asynchronous */
708 	MOCK_SET(spdk_bdev_flush_blocks, 0);
709 
710 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
711 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
712 	MOCK_CLEAR(spdk_bdev_io_type_supported);
713 	MOCK_CLEAR(spdk_bdev_flush_blocks);
714 
715 	/* Write zeroes blocks status asynchronous */
716 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
717 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
718 
719 	/* SLBA out of range */
720 	cmd.nvme_cmd.cdw10 = 3;
721 	memset(&rsp, 0, sizeof(rsp));
722 
723 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
724 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
725 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
726 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
727 
728 	/* Write block error */
729 	MOCK_SET(spdk_bdev_write_zeroes_blocks, -1);
730 	cmd.nvme_cmd.cdw10 = 0;
731 	memset(&rsp, 0, sizeof(rsp));
732 
733 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
734 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
735 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
736 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
737 
738 	/* Copy blocks status asynchronous */
739 	MOCK_SET(spdk_bdev_io_type_supported, true);
740 	cmd.nvme_cmd.cdw10 = 1024;
741 	cmd.nvme_cmd.cdw11 = 0;
742 	cmd.nvme_cmd.cdw12 = 0;
743 	cmd.nvme_cmd.cdw12_bits.copy.nr = 0;
744 	range.slba = 512;
745 	range.nlb = 511;
746 	req.length = 32;
747 	spdk_iov_one(req.iov, &req.iovcnt, &range, req.length);
748 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
749 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
750 
751 	/* Copy command not supported */
752 	MOCK_SET(spdk_bdev_io_type_supported, false);
753 	memset(&rsp, 0, sizeof(rsp));
754 
755 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
756 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
757 
758 	MOCK_SET(spdk_bdev_io_type_supported, true);
759 
760 	/* Unsupported number of source ranges */
761 	cmd.nvme_cmd.cdw12_bits.copy.nr = 1;
762 	req.length = 64;
763 	memset(&rsp, 0, sizeof(rsp));
764 
765 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
766 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
767 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
768 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_CMD_SIZE_LIMIT_SIZE_EXCEEDED);
769 
770 	cmd.nvme_cmd.cdw12_bits.copy.nr = 0;
771 	req.length = 32;
772 
773 	/* Unsupported source range descriptor format */
774 	cmd.nvme_cmd.cdw12_bits.copy.df = 1;
775 	memset(&rsp, 0, sizeof(rsp));
776 
777 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
778 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
779 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
780 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
781 
782 	cmd.nvme_cmd.cdw12_bits.copy.df = 0;
783 
784 	/* Bdev copy command failed */
785 	MOCK_SET(spdk_bdev_copy_blocks, -1);
786 	memset(&rsp, 0, sizeof(rsp));
787 
788 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
789 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
790 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
791 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
792 
793 	MOCK_CLEAR(spdk_bdev_copy_blocks);
794 	MOCK_CLEAR(spdk_bdev_io_type_supported);
795 }
796 
797 static void
798 test_nvmf_bdev_ctrlr_read_write_cmd(void)
799 {
800 	struct spdk_bdev bdev = {};
801 	struct spdk_nvmf_request req = {};
802 	union nvmf_c2h_msg rsp = {};
803 	union nvmf_h2c_msg cmd = {};
804 	int rc;
805 
806 	req.cmd = &cmd;
807 	req.rsp = &rsp;
808 
809 	/* Read two blocks, block size 4096 */
810 	cmd.nvme_cmd.cdw12 = 1;
811 	bdev.blockcnt = 100;
812 	bdev.blocklen = 4096;
813 	req.length = 8192;
814 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
815 
816 	rc = nvmf_bdev_ctrlr_read_cmd(&bdev, NULL, NULL, &req);
817 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
818 
819 	/* Write two blocks, block size 4096 */
820 	cmd.nvme_cmd.cdw12 = 1;
821 	bdev.blockcnt = 100;
822 	bdev.blocklen = 4096;
823 	req.length = 8192;
824 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
825 
826 	rc = nvmf_bdev_ctrlr_write_cmd(&bdev, NULL, NULL, &req);
827 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
828 }
829 
830 static void
831 test_nvmf_bdev_ctrlr_nvme_passthru(void)
832 {
833 	int rc;
834 	struct spdk_bdev bdev = {};
835 	struct spdk_bdev_desc *desc = NULL;
836 	struct spdk_io_channel ch = {};
837 	struct spdk_nvmf_qpair qpair = {};
838 	struct spdk_nvmf_poll_group group = {};
839 
840 	struct spdk_nvmf_request req = {};
841 	union nvmf_c2h_msg rsp = {};
842 	struct spdk_nvme_cmd cmd = {};
843 	struct spdk_bdev_io bdev_io;
844 
845 	bdev.blocklen = 512;
846 	bdev.blockcnt = 10;
847 
848 	qpair.group = &group;
849 
850 	req.qpair = &qpair;
851 	req.cmd = (union nvmf_h2c_msg *)&cmd;
852 	req.rsp = &rsp;
853 	spdk_iov_one(req.iov, &req.iovcnt, NULL, 0);
854 
855 	cmd.nsid = 1;
856 	cmd.opc = 0xFF;
857 
858 	cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
859 	cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
860 
861 	/* NVME_IO success */
862 	memset(&rsp, 0, sizeof(rsp));
863 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
864 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
865 	nvmf_bdev_ctrlr_complete_cmd(&bdev_io, true, &req);
866 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
867 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
868 
869 	/* NVME_IO fail */
870 	memset(&rsp, 0, sizeof(rsp));
871 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
872 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
873 	g_bdev_nvme_status_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
874 	nvmf_bdev_ctrlr_complete_cmd(&bdev_io, false, &req);
875 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
876 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
877 	reset_bdev_nvme_status();
878 
879 	/* NVME_IO not supported */
880 	memset(&rsp, 0, sizeof(rsp));
881 	MOCK_SET(spdk_bdev_nvme_io_passthru, -ENOTSUP);
882 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
883 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
884 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
885 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
886 	CU_ASSERT(rsp.nvme_cpl.status.dnr == 1);
887 
888 	/* NVME_IO no channel - queue IO */
889 	memset(&rsp, 0, sizeof(rsp));
890 	MOCK_SET(spdk_bdev_nvme_io_passthru, -ENOMEM);
891 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
892 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
893 	CU_ASSERT(group.stat.pending_bdev_io == 1);
894 
895 	MOCK_SET(spdk_bdev_nvme_io_passthru, 0);
896 
897 	/* NVME_ADMIN success */
898 	memset(&rsp, 0, sizeof(rsp));
899 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
900 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
901 	nvmf_bdev_ctrlr_complete_admin_cmd(&bdev_io, true, &req);
902 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
903 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
904 
905 	/* NVME_ADMIN fail */
906 	memset(&rsp, 0, sizeof(rsp));
907 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
908 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
909 	g_bdev_nvme_status_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
910 	nvmf_bdev_ctrlr_complete_admin_cmd(&bdev_io, true, &req);
911 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
912 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
913 	reset_bdev_nvme_status();
914 
915 	/* NVME_ADMIN not supported */
916 	memset(&rsp, 0, sizeof(rsp));
917 	MOCK_SET(spdk_bdev_nvme_admin_passthru, -ENOTSUP);
918 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
919 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
920 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
921 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
922 	CU_ASSERT(rsp.nvme_cpl.status.dnr == 1);
923 
924 	/* NVME_ADMIN no channel - queue IO */
925 	memset(&rsp, 0, sizeof(rsp));
926 	MOCK_SET(spdk_bdev_nvme_admin_passthru, -ENOMEM);
927 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
928 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
929 	CU_ASSERT(group.stat.pending_bdev_io == 2);
930 
931 	MOCK_SET(spdk_bdev_nvme_admin_passthru, 0);
932 }
933 
934 int
935 main(int argc, char **argv)
936 {
937 	CU_pSuite	suite = NULL;
938 	unsigned int	num_failures;
939 
940 	CU_initialize_registry();
941 
942 	suite = CU_add_suite("nvmf", NULL, NULL);
943 
944 	CU_ADD_TEST(suite, test_get_rw_params);
945 	CU_ADD_TEST(suite, test_lba_in_range);
946 	CU_ADD_TEST(suite, test_get_dif_ctx);
947 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_identify_ns);
948 	CU_ADD_TEST(suite, test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd);
949 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_zcopy_start);
950 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_cmd);
951 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_read_write_cmd);
952 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_nvme_passthru);
953 
954 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
955 	CU_cleanup_registry();
956 	return num_failures;
957 }
958