xref: /spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c (revision 609205739e23a569e0d2529b8eec37ab535c02a0)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_cunit.h"
10 
11 #include "spdk_internal/mock.h"
12 #include "thread/thread_internal.h"
13 
14 #include "nvmf/ctrlr_bdev.c"
15 
16 #include "spdk/bdev_module.h"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvmf)
19 
20 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -1);
21 
22 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
23 
24 DEFINE_STUB(spdk_bdev_get_physical_block_size, uint32_t,
25 	    (const struct spdk_bdev *bdev), 4096);
26 
27 DEFINE_STUB(nvmf_ctrlr_process_admin_cmd, int, (struct spdk_nvmf_request *req), 0);
28 
29 DEFINE_STUB(spdk_bdev_comparev_blocks, int, (struct spdk_bdev_desc *desc,
30 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt,
31 		uint64_t offset_blocks, uint64_t num_blocks,
32 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
33 
34 DEFINE_STUB(spdk_bdev_nvme_admin_passthru, int,
35 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
36 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
37 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
38 
39 DEFINE_STUB(spdk_bdev_abort, int,
40 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
41 	     void *bio_cb_arg, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
42 
43 DEFINE_STUB_V(spdk_bdev_io_get_iovec,
44 	      (struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp));
45 DEFINE_STUB(spdk_bdev_get_write_unit_size, uint32_t, (const struct spdk_bdev *bdev), 1);
46 
47 uint32_t
48 spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
49 {
50 	return bdev->optimal_io_boundary;
51 }
52 
53 uint32_t
54 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
55 {
56 	return bdev->md_len;
57 }
58 
59 bool
60 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
61 {
62 	return (bdev->md_len != 0) && bdev->md_interleave;
63 }
64 
65 /* We have to use the typedef in the function declaration to appease astyle. */
66 typedef enum spdk_dif_type spdk_dif_type_t;
67 
68 spdk_dif_type_t
69 spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
70 {
71 	if (bdev->md_len != 0) {
72 		return bdev->dif_type;
73 	} else {
74 		return SPDK_DIF_DISABLE;
75 	}
76 }
77 
78 bool
79 spdk_bdev_is_dif_head_of_md(const struct spdk_bdev *bdev)
80 {
81 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
82 		return bdev->dif_is_head_of_md;
83 	} else {
84 		return false;
85 	}
86 }
87 
88 uint32_t
89 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
90 {
91 	if (spdk_bdev_is_md_interleaved(bdev)) {
92 		return bdev->blocklen - bdev->md_len;
93 	} else {
94 		return bdev->blocklen;
95 	}
96 }
97 
98 uint16_t
99 spdk_bdev_get_acwu(const struct spdk_bdev *bdev)
100 {
101 	return bdev->acwu;
102 }
103 
104 uint32_t
105 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
106 {
107 	return bdev->blocklen;
108 }
109 
110 uint64_t
111 spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
112 {
113 	return bdev->blockcnt;
114 }
115 
116 DEFINE_STUB(spdk_bdev_comparev_and_writev_blocks, int,
117 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
118 	     struct iovec *compare_iov, int compare_iovcnt,
119 	     struct iovec *write_iov, int write_iovcnt,
120 	     uint64_t offset_blocks, uint64_t num_blocks,
121 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
122 	    0);
123 
124 DEFINE_STUB(nvmf_ctrlr_process_io_cmd, int, (struct spdk_nvmf_request *req), 0);
125 
126 DEFINE_STUB_V(spdk_bdev_io_get_nvme_fused_status, (const struct spdk_bdev_io *bdev_io,
127 		uint32_t *cdw0, int *cmp_sct, int *cmp_sc, int *wr_sct, int *wr_sc));
128 
129 DEFINE_STUB(spdk_bdev_is_dif_check_enabled, bool,
130 	    (const struct spdk_bdev *bdev, enum spdk_dif_check_type check_type), false);
131 
132 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *,
133 	    (struct spdk_bdev_desc *desc), NULL);
134 
135 DEFINE_STUB(spdk_bdev_flush_blocks, int,
136 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
137 	     uint64_t offset_blocks, uint64_t num_blocks,
138 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
139 	    0);
140 
141 DEFINE_STUB(spdk_bdev_unmap_blocks, int,
142 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
143 	     uint64_t offset_blocks, uint64_t num_blocks,
144 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
145 	    0);
146 
147 DEFINE_STUB(spdk_bdev_io_type_supported, bool,
148 	    (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
149 
150 DEFINE_STUB(spdk_bdev_queue_io_wait, int,
151 	    (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
152 	     struct spdk_bdev_io_wait_entry *entry),
153 	    0);
154 
155 DEFINE_STUB(spdk_bdev_write_blocks, int,
156 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
157 	     uint64_t offset_blocks, uint64_t num_blocks,
158 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
159 	    0);
160 
161 DEFINE_STUB(spdk_bdev_writev_blocks, int,
162 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
163 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
164 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
165 	    0);
166 
167 DEFINE_STUB(spdk_bdev_read_blocks, int,
168 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
169 	     uint64_t offset_blocks, uint64_t num_blocks,
170 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
171 	    0);
172 
173 DEFINE_STUB(spdk_bdev_readv_blocks, int,
174 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
175 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
176 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
177 	    0);
178 
179 DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
180 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
181 	     uint64_t offset_blocks, uint64_t num_blocks,
182 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
183 	    0);
184 
185 DEFINE_STUB(spdk_bdev_nvme_io_passthru, int,
186 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
187 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
188 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
189 	    0);
190 
191 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
192 
193 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
194 	    (const struct spdk_nvmf_subsystem *subsystem), NULL);
195 
196 DEFINE_STUB(spdk_bdev_zcopy_start, int,
197 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
198 	     struct iovec *iov, int iovcnt,
199 	     uint64_t offset_blocks, uint64_t num_blocks,
200 	     bool populate,
201 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
202 	    0);
203 
204 DEFINE_STUB(spdk_bdev_zcopy_end, int,
205 	    (struct spdk_bdev_io *bdev_io, bool commit,
206 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
207 	    0);
208 
209 DEFINE_STUB(spdk_bdev_copy_blocks, int,
210 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
211 	     uint64_t dst_offset_blocks, uint64_t src_offset_blocks, uint64_t num_blocks,
212 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
213 	    0);
214 
215 DEFINE_STUB(spdk_bdev_get_max_copy, uint32_t, (const struct spdk_bdev *bdev), 0);
216 
217 struct spdk_nvmf_ns *
218 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
219 {
220 	abort();
221 	return NULL;
222 }
223 
224 struct spdk_nvmf_ns *
225 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
226 {
227 	abort();
228 	return NULL;
229 }
230 
231 struct spdk_nvmf_ns *
232 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns)
233 {
234 	abort();
235 	return NULL;
236 }
237 
238 int
239 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
240 		  bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
241 		  uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
242 		  uint32_t data_offset, uint16_t guard_seed)
243 {
244 	ctx->block_size = block_size;
245 	ctx->md_size = md_size;
246 	ctx->init_ref_tag = init_ref_tag;
247 
248 	return 0;
249 }
250 
251 static uint32_t g_bdev_nvme_status_cdw0;
252 static uint32_t g_bdev_nvme_status_sct = SPDK_NVME_SCT_GENERIC;
253 static uint32_t g_bdev_nvme_status_sc = SPDK_NVME_SC_SUCCESS;
254 
255 static void
256 reset_bdev_nvme_status(void)
257 {
258 	g_bdev_nvme_status_cdw0 = 0;
259 	g_bdev_nvme_status_sct = SPDK_NVME_SCT_GENERIC;
260 	g_bdev_nvme_status_sc = SPDK_NVME_SC_SUCCESS;
261 }
262 
263 void
264 spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct,
265 			     int *sc)
266 {
267 	*cdw0 = g_bdev_nvme_status_cdw0;
268 	*sct = g_bdev_nvme_status_sct;
269 	*sc = g_bdev_nvme_status_sc;
270 }
271 
272 static void
273 test_get_rw_params(void)
274 {
275 	struct spdk_nvme_cmd cmd = {0};
276 	uint64_t lba;
277 	uint64_t count;
278 
279 	lba = 0;
280 	count = 0;
281 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
282 	to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
283 	nvmf_bdev_ctrlr_get_rw_params(&cmd, &lba, &count);
284 	CU_ASSERT(lba == 0x1234567890ABCDEF);
285 	CU_ASSERT(count == 0x9875 + 1); /* NOTE: this field is 0's based, hence the +1 */
286 }
287 
288 static void
289 test_lba_in_range(void)
290 {
291 	/* Trivial cases (no overflow) */
292 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1) == true);
293 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1000) == true);
294 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1001) == false);
295 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 999) == true);
296 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 1000) == false);
297 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 999, 1) == true);
298 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1000, 1) == false);
299 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1001, 1) == false);
300 
301 	/* Overflow edge cases */
302 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 0, UINT64_MAX) == true);
303 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 1, UINT64_MAX) == false);
304 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX - 1, 1) == true);
305 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX, 1) == false);
306 }
307 
308 static void
309 test_get_dif_ctx(void)
310 {
311 	struct spdk_bdev bdev = {};
312 	struct spdk_nvme_cmd cmd = {};
313 	struct spdk_dif_ctx dif_ctx = {};
314 	bool ret;
315 
316 	bdev.md_len = 0;
317 
318 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
319 	CU_ASSERT(ret == false);
320 
321 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
322 	bdev.blocklen = 520;
323 	bdev.md_len = 8;
324 
325 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
326 	CU_ASSERT(ret == true);
327 	CU_ASSERT(dif_ctx.block_size = 520);
328 	CU_ASSERT(dif_ctx.md_size == 8);
329 	CU_ASSERT(dif_ctx.init_ref_tag == 0x90ABCDEF);
330 }
331 
332 static void
333 test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd(void)
334 {
335 	int rc;
336 	struct spdk_bdev bdev = {};
337 	struct spdk_bdev_desc *desc = NULL;
338 	struct spdk_io_channel ch = {};
339 
340 	struct spdk_nvmf_request cmp_req = {};
341 	union nvmf_c2h_msg cmp_rsp = {};
342 
343 	struct spdk_nvmf_request write_req = {};
344 	union nvmf_c2h_msg write_rsp = {};
345 
346 	struct spdk_nvmf_qpair qpair = {};
347 
348 	struct spdk_nvme_cmd cmp_cmd = {};
349 	struct spdk_nvme_cmd write_cmd = {};
350 
351 	struct spdk_nvmf_ctrlr ctrlr = {};
352 	struct spdk_nvmf_subsystem subsystem = {};
353 	struct spdk_nvmf_ns ns = {};
354 	struct spdk_nvmf_ns *subsys_ns[1] = {};
355 
356 	struct spdk_nvmf_poll_group group = {};
357 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
358 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
359 
360 	bdev.blocklen = 512;
361 	bdev.blockcnt = 10;
362 	ns.bdev = &bdev;
363 
364 	subsystem.id = 0;
365 	subsystem.max_nsid = 1;
366 	subsys_ns[0] = &ns;
367 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
368 
369 	/* Enable controller */
370 	ctrlr.vcprop.cc.bits.en = 1;
371 	ctrlr.subsys = &subsystem;
372 
373 	group.num_sgroups = 1;
374 	sgroups.num_ns = 1;
375 	sgroups.ns_info = &ns_info;
376 	group.sgroups = &sgroups;
377 
378 	qpair.ctrlr = &ctrlr;
379 	qpair.group = &group;
380 
381 	cmp_req.qpair = &qpair;
382 	cmp_req.cmd = (union nvmf_h2c_msg *)&cmp_cmd;
383 	cmp_req.rsp = &cmp_rsp;
384 
385 	cmp_cmd.nsid = 1;
386 	cmp_cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
387 	cmp_cmd.opc = SPDK_NVME_OPC_COMPARE;
388 
389 	write_req.qpair = &qpair;
390 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
391 	write_req.rsp = &write_rsp;
392 
393 	write_cmd.nsid = 1;
394 	write_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
395 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
396 
397 	/* 1. SUCCESS */
398 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
399 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
400 
401 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
402 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
403 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
404 
405 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
406 
407 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
408 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
409 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
410 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == 0);
411 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == 0);
412 
413 	/* 2. Fused command start lba / num blocks mismatch */
414 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
415 	cmp_cmd.cdw12 = 2;	/* NLB: CDW12 bits 15:00, 0's based */
416 
417 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
418 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
419 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
420 
421 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
422 
423 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
424 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
425 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
426 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
427 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
428 
429 	/* 3. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
430 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
431 	cmp_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
432 
433 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
434 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
435 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
436 
437 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
438 
439 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
440 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
441 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
442 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
443 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
444 
445 	/* 4. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
446 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
447 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
448 
449 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
450 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
451 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
452 
453 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
454 
455 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
456 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
457 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
458 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
459 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
460 }
461 
462 static void
463 test_nvmf_bdev_ctrlr_identify_ns(void)
464 {
465 	struct spdk_nvmf_ns ns = {};
466 	struct spdk_nvme_ns_data nsdata = {};
467 	struct spdk_bdev bdev = {};
468 	uint8_t ns_g_id[16] = "abcdefgh";
469 	uint8_t eui64[8] = "12345678";
470 
471 	ns.bdev = &bdev;
472 	ns.ptpl_file = (void *)0xDEADBEEF;
473 	memcpy(ns.opts.nguid, ns_g_id, 16);
474 	memcpy(ns.opts.eui64, eui64, 8);
475 
476 	bdev.blockcnt = 10;
477 	bdev.acwu = 1;
478 	bdev.md_len = 512;
479 	bdev.dif_type = SPDK_DIF_TYPE1;
480 	bdev.blocklen = 4096;
481 	bdev.md_interleave = 0;
482 	bdev.optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
483 	bdev.dif_is_head_of_md = true;
484 
485 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, false);
486 	CU_ASSERT(nsdata.nsze == 10);
487 	CU_ASSERT(nsdata.ncap == 10);
488 	CU_ASSERT(nsdata.nuse == 10);
489 	CU_ASSERT(nsdata.nlbaf == 0);
490 	CU_ASSERT(nsdata.flbas.format == 0);
491 	CU_ASSERT(nsdata.flbas.msb_format == 0);
492 	CU_ASSERT(nsdata.nacwu == 0);
493 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
494 	CU_ASSERT(nsdata.lbaf[0].ms == 512);
495 	CU_ASSERT(nsdata.dps.pit == SPDK_NVME_FMT_NVM_PROTECTION_DISABLE);
496 	CU_ASSERT(nsdata.noiob == SPDK_BDEV_IO_NUM_CHILD_IOV);
497 	CU_ASSERT(nsdata.nmic.can_share == 1);
498 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
499 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
500 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
501 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
502 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
503 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
504 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
505 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
506 	CU_ASSERT(nsdata.flbas.extended == 1);
507 	CU_ASSERT(nsdata.mc.extended == 1);
508 	CU_ASSERT(nsdata.mc.pointer == 0);
509 	CU_ASSERT(nsdata.dps.md_start == true);
510 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
511 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
512 
513 	memset(&nsdata, 0, sizeof(nsdata));
514 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, true);
515 	CU_ASSERT(nsdata.nsze == 10);
516 	CU_ASSERT(nsdata.ncap == 10);
517 	CU_ASSERT(nsdata.nuse == 10);
518 	CU_ASSERT(nsdata.nlbaf == 0);
519 	CU_ASSERT(nsdata.flbas.format == 0);
520 	CU_ASSERT(nsdata.flbas.msb_format == 0);
521 	CU_ASSERT(nsdata.nacwu == 0);
522 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
523 	CU_ASSERT(nsdata.noiob == SPDK_BDEV_IO_NUM_CHILD_IOV);
524 	CU_ASSERT(nsdata.nmic.can_share == 1);
525 	CU_ASSERT(nsdata.lbaf[0].ms == 0);
526 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
527 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
528 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
529 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
530 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
531 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
532 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
533 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
534 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
535 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
536 }
537 
538 static void
539 test_nvmf_bdev_ctrlr_zcopy_start(void)
540 {
541 	int rc;
542 	struct spdk_bdev bdev = {};
543 	struct spdk_bdev_desc *desc = NULL;
544 	struct spdk_io_channel ch = {};
545 
546 	struct spdk_nvmf_request write_req = {};
547 	union nvmf_c2h_msg write_rsp = {};
548 
549 	struct spdk_nvmf_qpair qpair = {};
550 
551 	struct spdk_nvme_cmd write_cmd = {};
552 
553 	struct spdk_nvmf_ctrlr ctrlr = {};
554 	struct spdk_nvmf_subsystem subsystem = {};
555 	struct spdk_nvmf_ns ns = {};
556 	struct spdk_nvmf_ns *subsys_ns[1] = {};
557 
558 	struct spdk_nvmf_poll_group group = {};
559 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
560 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
561 
562 	bdev.blocklen = 512;
563 	bdev.blockcnt = 10;
564 	ns.bdev = &bdev;
565 
566 	subsystem.id = 0;
567 	subsystem.max_nsid = 1;
568 	subsys_ns[0] = &ns;
569 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
570 
571 	/* Enable controller */
572 	ctrlr.vcprop.cc.bits.en = 1;
573 	ctrlr.subsys = &subsystem;
574 
575 	group.num_sgroups = 1;
576 	sgroups.num_ns = 1;
577 	sgroups.ns_info = &ns_info;
578 	group.sgroups = &sgroups;
579 
580 	qpair.ctrlr = &ctrlr;
581 	qpair.group = &group;
582 
583 	write_req.qpair = &qpair;
584 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
585 	write_req.rsp = &write_rsp;
586 
587 	write_cmd.nsid = 1;
588 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
589 
590 	/* 1. SUCCESS */
591 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
592 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
593 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
594 
595 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
596 
597 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
598 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
599 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_SUCCESS);
600 
601 	/* 2. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
602 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
603 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
604 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
605 
606 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
607 
608 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
609 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
610 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_LBA_OUT_OF_RANGE);
611 
612 	/* 3. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
613 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
614 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
615 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
616 
617 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
618 
619 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
620 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
621 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
622 }
623 
624 static void
625 test_nvmf_bdev_ctrlr_cmd(void)
626 {
627 	int rc;
628 	struct spdk_bdev bdev = {};
629 	struct spdk_io_channel ch = {};
630 	struct spdk_nvmf_request req = {};
631 	struct spdk_nvmf_qpair qpair = {};
632 	union nvmf_h2c_msg cmd = {};
633 	union nvmf_c2h_msg rsp = {};
634 	struct spdk_nvme_scc_source_range range = {};
635 
636 	req.cmd = &cmd;
637 	req.rsp = &rsp;
638 	req.qpair = &qpair;
639 	req.length = 4096;
640 	bdev.blocklen = 512;
641 	bdev.blockcnt = 3;
642 	cmd.nvme_cmd.cdw10 = 0;
643 	cmd.nvme_cmd.cdw12 = 2;
644 
645 	/* Compare status asynchronous */
646 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
647 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
648 
649 	/* SLBA out of range */
650 	cmd.nvme_cmd.cdw10 = 3;
651 
652 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
653 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
654 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
655 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
656 
657 	/* SGL length invalid */
658 	cmd.nvme_cmd.cdw10 = 0;
659 	req.length = 512;
660 	memset(&rsp, 0, sizeof(rsp));
661 
662 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
663 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
664 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
665 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
666 
667 	/* Device error */
668 	req.length = 4096;
669 	memset(&rsp, 0, sizeof(rsp));
670 	MOCK_SET(spdk_bdev_comparev_blocks, -1);
671 
672 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
673 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
674 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
675 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
676 
677 	/* bdev not support flush */
678 	MOCK_SET(spdk_bdev_io_type_supported, false);
679 	memset(&rsp, 0, sizeof(rsp));
680 
681 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
682 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
683 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
684 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
685 
686 	/*  Flush error */
687 	MOCK_SET(spdk_bdev_io_type_supported, true);
688 	MOCK_SET(spdk_bdev_flush_blocks, -1);
689 	memset(&rsp, 0, sizeof(rsp));
690 
691 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
692 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
693 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
694 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
695 
696 	/* Flush blocks status asynchronous */
697 	MOCK_SET(spdk_bdev_flush_blocks, 0);
698 
699 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
700 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
701 	MOCK_CLEAR(spdk_bdev_io_type_supported);
702 	MOCK_CLEAR(spdk_bdev_flush_blocks);
703 
704 	/* Write zeroes blocks status asynchronous */
705 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
706 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
707 
708 	/* SLBA out of range */
709 	cmd.nvme_cmd.cdw10 = 3;
710 	memset(&rsp, 0, sizeof(rsp));
711 
712 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
713 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
714 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
715 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
716 
717 	/* Write block error */
718 	MOCK_SET(spdk_bdev_write_zeroes_blocks, -1);
719 	cmd.nvme_cmd.cdw10 = 0;
720 	memset(&rsp, 0, sizeof(rsp));
721 
722 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
723 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
724 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
725 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
726 
727 	/* Copy blocks status asynchronous */
728 	MOCK_SET(spdk_bdev_io_type_supported, true);
729 	cmd.nvme_cmd.cdw10 = 1024;
730 	cmd.nvme_cmd.cdw11 = 0;
731 	cmd.nvme_cmd.cdw12 = 0;
732 	cmd.nvme_cmd.cdw12_bits.copy.nr = 0;
733 	range.slba = 512;
734 	range.nlb = 511;
735 	req.data = ⦥
736 	req.length = 32;
737 	spdk_iov_one(req.iov, &req.iovcnt, &range, req.length);
738 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
739 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
740 
741 	/* Copy command not supported */
742 	MOCK_SET(spdk_bdev_io_type_supported, false);
743 	memset(&rsp, 0, sizeof(rsp));
744 
745 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
746 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
747 
748 	MOCK_SET(spdk_bdev_io_type_supported, true);
749 
750 	/* Unsupported number of source ranges */
751 	cmd.nvme_cmd.cdw12_bits.copy.nr = 1;
752 	req.length = 64;
753 	memset(&rsp, 0, sizeof(rsp));
754 
755 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
756 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
757 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
758 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_CMD_SIZE_LIMIT_SIZE_EXCEEDED);
759 
760 	cmd.nvme_cmd.cdw12_bits.copy.nr = 0;
761 	req.length = 32;
762 
763 	/* Unsupported source range descriptor format */
764 	cmd.nvme_cmd.cdw12_bits.copy.df = 1;
765 	memset(&rsp, 0, sizeof(rsp));
766 
767 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
768 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
769 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
770 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
771 
772 	cmd.nvme_cmd.cdw12_bits.copy.df = 0;
773 
774 	/* Bdev copy command failed */
775 	MOCK_SET(spdk_bdev_copy_blocks, -1);
776 	memset(&rsp, 0, sizeof(rsp));
777 
778 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
779 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
780 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
781 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
782 
783 	MOCK_CLEAR(spdk_bdev_copy_blocks);
784 	MOCK_CLEAR(spdk_bdev_io_type_supported);
785 }
786 
787 static void
788 test_nvmf_bdev_ctrlr_read_write_cmd(void)
789 {
790 	struct spdk_bdev bdev = {};
791 	struct spdk_nvmf_request req = {};
792 	union nvmf_c2h_msg rsp = {};
793 	union nvmf_h2c_msg cmd = {};
794 	int rc;
795 
796 	req.cmd = &cmd;
797 	req.rsp = &rsp;
798 
799 	/* Read two blocks, block size 4096 */
800 	cmd.nvme_cmd.cdw12 = 1;
801 	bdev.blockcnt = 100;
802 	bdev.blocklen = 4096;
803 	req.length = 8192;
804 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
805 
806 	rc = nvmf_bdev_ctrlr_read_cmd(&bdev, NULL, NULL, &req);
807 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
808 
809 	/* Write two blocks, block size 4096 */
810 	cmd.nvme_cmd.cdw12 = 1;
811 	bdev.blockcnt = 100;
812 	bdev.blocklen = 4096;
813 	req.length = 8192;
814 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
815 
816 	rc = nvmf_bdev_ctrlr_write_cmd(&bdev, NULL, NULL, &req);
817 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
818 }
819 
820 static void
821 test_nvmf_bdev_ctrlr_nvme_passthru(void)
822 {
823 	int rc;
824 	struct spdk_bdev bdev = {};
825 	struct spdk_bdev_desc *desc = NULL;
826 	struct spdk_io_channel ch = {};
827 	struct spdk_nvmf_qpair qpair = {};
828 	struct spdk_nvmf_poll_group group = {};
829 
830 	struct spdk_nvmf_request req = {};
831 	union nvmf_c2h_msg rsp = {};
832 	struct spdk_nvme_cmd cmd = {};
833 	struct spdk_bdev_io bdev_io;
834 
835 	bdev.blocklen = 512;
836 	bdev.blockcnt = 10;
837 
838 	qpair.group = &group;
839 
840 	req.qpair = &qpair;
841 	req.cmd = (union nvmf_h2c_msg *)&cmd;
842 	req.rsp = &rsp;
843 	spdk_iov_one(req.iov, &req.iovcnt, NULL, 0);
844 
845 	cmd.nsid = 1;
846 	cmd.opc = 0xFF;
847 
848 	cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
849 	cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
850 
851 	/* NVME_IO success */
852 	memset(&rsp, 0, sizeof(rsp));
853 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
854 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
855 	nvmf_bdev_ctrlr_complete_cmd(&bdev_io, true, &req);
856 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
857 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
858 
859 	/* NVME_IO fail */
860 	memset(&rsp, 0, sizeof(rsp));
861 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
862 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
863 	g_bdev_nvme_status_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
864 	nvmf_bdev_ctrlr_complete_cmd(&bdev_io, false, &req);
865 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
866 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
867 	reset_bdev_nvme_status();
868 
869 	/* NVME_IO not supported */
870 	memset(&rsp, 0, sizeof(rsp));
871 	MOCK_SET(spdk_bdev_nvme_io_passthru, -ENOTSUP);
872 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
873 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
874 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
875 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
876 	CU_ASSERT(rsp.nvme_cpl.status.dnr == 1);
877 
878 	/* NVME_IO no channel - queue IO */
879 	memset(&rsp, 0, sizeof(rsp));
880 	MOCK_SET(spdk_bdev_nvme_io_passthru, -ENOMEM);
881 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
882 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
883 	CU_ASSERT(group.stat.pending_bdev_io == 1);
884 
885 	MOCK_SET(spdk_bdev_nvme_io_passthru, 0);
886 
887 	/* NVME_ADMIN success */
888 	memset(&rsp, 0, sizeof(rsp));
889 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
890 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
891 	nvmf_bdev_ctrlr_complete_admin_cmd(&bdev_io, true, &req);
892 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
893 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
894 
895 	/* NVME_ADMIN fail */
896 	memset(&rsp, 0, sizeof(rsp));
897 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
898 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
899 	g_bdev_nvme_status_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
900 	nvmf_bdev_ctrlr_complete_admin_cmd(&bdev_io, true, &req);
901 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
902 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
903 	reset_bdev_nvme_status();
904 
905 	/* NVME_ADMIN not supported */
906 	memset(&rsp, 0, sizeof(rsp));
907 	MOCK_SET(spdk_bdev_nvme_admin_passthru, -ENOTSUP);
908 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
909 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
910 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
911 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
912 	CU_ASSERT(rsp.nvme_cpl.status.dnr == 1);
913 
914 	/* NVME_ADMIN no channel - queue IO */
915 	memset(&rsp, 0, sizeof(rsp));
916 	MOCK_SET(spdk_bdev_nvme_admin_passthru, -ENOMEM);
917 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
918 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
919 	CU_ASSERT(group.stat.pending_bdev_io == 2);
920 
921 	MOCK_SET(spdk_bdev_nvme_admin_passthru, 0);
922 }
923 
924 int
925 main(int argc, char **argv)
926 {
927 	CU_pSuite	suite = NULL;
928 	unsigned int	num_failures;
929 
930 	CU_set_error_action(CUEA_ABORT);
931 	CU_initialize_registry();
932 
933 	suite = CU_add_suite("nvmf", NULL, NULL);
934 
935 	CU_ADD_TEST(suite, test_get_rw_params);
936 	CU_ADD_TEST(suite, test_lba_in_range);
937 	CU_ADD_TEST(suite, test_get_dif_ctx);
938 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_identify_ns);
939 	CU_ADD_TEST(suite, test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd);
940 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_zcopy_start);
941 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_cmd);
942 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_read_write_cmd);
943 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_nvme_passthru);
944 
945 	CU_basic_set_mode(CU_BRM_VERBOSE);
946 	CU_basic_run_tests();
947 	num_failures = CU_get_number_of_failures();
948 	CU_cleanup_registry();
949 	return num_failures;
950 }
951