xref: /spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c (revision e5693d682a9872b3bb3a84b3245a099af77992d6)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #include "spdk_internal/mock.h"
12 #include "thread/thread_internal.h"
13 
14 #include "nvmf/ctrlr_bdev.c"
15 
16 #include "spdk/bdev_module.h"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvmf)
19 
20 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -1);
21 
22 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
23 
24 DEFINE_STUB(spdk_bdev_get_physical_block_size, uint32_t,
25 	    (const struct spdk_bdev *bdev), 4096);
26 
27 DEFINE_STUB(nvmf_ctrlr_process_admin_cmd, int, (struct spdk_nvmf_request *req), 0);
28 
29 DEFINE_STUB(spdk_bdev_comparev_blocks, int, (struct spdk_bdev_desc *desc,
30 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt,
31 		uint64_t offset_blocks, uint64_t num_blocks,
32 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
33 
34 DEFINE_STUB(spdk_bdev_readv_blocks_ext, int, (struct spdk_bdev_desc *desc,
35 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, uint64_t offset_blocks,
36 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
37 		struct spdk_bdev_ext_io_opts *opts), 0);
38 
39 DEFINE_STUB(spdk_bdev_writev_blocks_ext, int, (struct spdk_bdev_desc *desc,
40 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, uint64_t offset_blocks,
41 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
42 		struct spdk_bdev_ext_io_opts *opts), 0);
43 
44 DEFINE_STUB(spdk_bdev_nvme_admin_passthru, int,
45 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
46 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
47 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
48 
49 DEFINE_STUB(spdk_bdev_abort, int,
50 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
51 	     void *bio_cb_arg, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
52 
53 DEFINE_STUB_V(spdk_bdev_io_get_iovec,
54 	      (struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp));
55 DEFINE_STUB(spdk_bdev_get_write_unit_size, uint32_t, (const struct spdk_bdev *bdev), 1);
56 
57 uint32_t
58 spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
59 {
60 	return bdev->optimal_io_boundary;
61 }
62 
63 uint32_t
64 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
65 {
66 	return bdev->md_len;
67 }
68 
69 bool
70 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
71 {
72 	return (bdev->md_len != 0) && bdev->md_interleave;
73 }
74 
75 /* We have to use the typedef in the function declaration to appease astyle. */
76 typedef enum spdk_dif_type spdk_dif_type_t;
77 
78 spdk_dif_type_t
79 spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
80 {
81 	if (bdev->md_len != 0) {
82 		return bdev->dif_type;
83 	} else {
84 		return SPDK_DIF_DISABLE;
85 	}
86 }
87 
88 bool
89 spdk_bdev_is_dif_head_of_md(const struct spdk_bdev *bdev)
90 {
91 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
92 		return bdev->dif_is_head_of_md;
93 	} else {
94 		return false;
95 	}
96 }
97 
98 uint32_t
99 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
100 {
101 	if (spdk_bdev_is_md_interleaved(bdev)) {
102 		return bdev->blocklen - bdev->md_len;
103 	} else {
104 		return bdev->blocklen;
105 	}
106 }
107 
108 uint16_t
109 spdk_bdev_get_acwu(const struct spdk_bdev *bdev)
110 {
111 	return bdev->acwu;
112 }
113 
114 uint32_t
115 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
116 {
117 	return bdev->blocklen;
118 }
119 
120 uint64_t
121 spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
122 {
123 	return bdev->blockcnt;
124 }
125 
126 /* We have to use the typedef in the function declaration to appease astyle. */
127 typedef enum spdk_dif_pi_format spdk_dif_pi_format_t;
128 
129 spdk_dif_pi_format_t
130 spdk_bdev_get_dif_pi_format(const struct spdk_bdev *bdev)
131 {
132 	return bdev->dif_pi_format;
133 }
134 
135 DEFINE_STUB(spdk_bdev_comparev_and_writev_blocks, int,
136 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
137 	     struct iovec *compare_iov, int compare_iovcnt,
138 	     struct iovec *write_iov, int write_iovcnt,
139 	     uint64_t offset_blocks, uint64_t num_blocks,
140 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
141 	    0);
142 
143 DEFINE_STUB(nvmf_ctrlr_process_io_cmd, int, (struct spdk_nvmf_request *req), 0);
144 
145 DEFINE_STUB_V(spdk_bdev_io_get_nvme_fused_status, (const struct spdk_bdev_io *bdev_io,
146 		uint32_t *cdw0, int *cmp_sct, int *cmp_sc, int *wr_sct, int *wr_sc));
147 
148 DEFINE_STUB(spdk_bdev_is_dif_check_enabled, bool,
149 	    (const struct spdk_bdev *bdev, enum spdk_dif_check_type check_type), false);
150 
151 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *,
152 	    (struct spdk_bdev_desc *desc), NULL);
153 
154 DEFINE_STUB(spdk_bdev_flush_blocks, int,
155 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
156 	     uint64_t offset_blocks, uint64_t num_blocks,
157 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
158 	    0);
159 
160 DEFINE_STUB(spdk_bdev_unmap_blocks, int,
161 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
162 	     uint64_t offset_blocks, uint64_t num_blocks,
163 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
164 	    0);
165 
166 DEFINE_STUB(spdk_bdev_io_type_supported, bool,
167 	    (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
168 
169 DEFINE_STUB(spdk_bdev_queue_io_wait, int,
170 	    (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
171 	     struct spdk_bdev_io_wait_entry *entry),
172 	    0);
173 
174 DEFINE_STUB(spdk_bdev_write_blocks, int,
175 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
176 	     uint64_t offset_blocks, uint64_t num_blocks,
177 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
178 	    0);
179 
180 DEFINE_STUB(spdk_bdev_writev_blocks, int,
181 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
182 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
183 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
184 	    0);
185 
186 DEFINE_STUB(spdk_bdev_read_blocks, int,
187 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
188 	     uint64_t offset_blocks, uint64_t num_blocks,
189 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
190 	    0);
191 
192 DEFINE_STUB(spdk_bdev_readv_blocks, int,
193 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
194 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
195 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
196 	    0);
197 
198 DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
199 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
200 	     uint64_t offset_blocks, uint64_t num_blocks,
201 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
202 	    0);
203 
204 DEFINE_STUB(spdk_bdev_nvme_iov_passthru_md, int, (
205 		    struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
206 		    const struct spdk_nvme_cmd *cmd, struct iovec *iov, int iovcnt,
207 		    size_t nbytes, void *md_buf, size_t md_len,
208 		    spdk_bdev_io_completion_cb cb, void *cb_arg),
209 	    0);
210 
211 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
212 
213 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
214 	    (const struct spdk_nvmf_subsystem *subsystem), NULL);
215 
216 DEFINE_STUB(spdk_bdev_zcopy_start, int,
217 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
218 	     struct iovec *iov, int iovcnt,
219 	     uint64_t offset_blocks, uint64_t num_blocks,
220 	     bool populate,
221 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
222 	    0);
223 
224 DEFINE_STUB(spdk_bdev_zcopy_end, int,
225 	    (struct spdk_bdev_io *bdev_io, bool commit,
226 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
227 	    0);
228 
229 DEFINE_STUB(spdk_bdev_copy_blocks, int,
230 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
231 	     uint64_t dst_offset_blocks, uint64_t src_offset_blocks, uint64_t num_blocks,
232 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
233 	    0);
234 
235 DEFINE_STUB(spdk_bdev_get_max_copy, uint32_t, (const struct spdk_bdev *bdev), 0);
236 
237 struct spdk_nvmf_ns *
238 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
239 {
240 	abort();
241 	return NULL;
242 }
243 
244 struct spdk_nvmf_ns *
245 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
246 {
247 	abort();
248 	return NULL;
249 }
250 
251 struct spdk_nvmf_ns *
252 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns)
253 {
254 	abort();
255 	return NULL;
256 }
257 
258 int
259 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
260 		  bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
261 		  uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
262 		  uint32_t data_offset, uint64_t guard_seed, struct spdk_dif_ctx_init_ext_opts *opts)
263 {
264 	ctx->dif_pi_format = opts->dif_pi_format;
265 	ctx->block_size = block_size;
266 	ctx->md_size = md_size;
267 	ctx->init_ref_tag = init_ref_tag;
268 
269 	return 0;
270 }
271 
272 static uint32_t g_bdev_nvme_status_cdw0;
273 static uint32_t g_bdev_nvme_status_sct = SPDK_NVME_SCT_GENERIC;
274 static uint32_t g_bdev_nvme_status_sc = SPDK_NVME_SC_SUCCESS;
275 
276 static void
277 reset_bdev_nvme_status(void)
278 {
279 	g_bdev_nvme_status_cdw0 = 0;
280 	g_bdev_nvme_status_sct = SPDK_NVME_SCT_GENERIC;
281 	g_bdev_nvme_status_sc = SPDK_NVME_SC_SUCCESS;
282 }
283 
284 void
285 spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct,
286 			     int *sc)
287 {
288 	*cdw0 = g_bdev_nvme_status_cdw0;
289 	*sct = g_bdev_nvme_status_sct;
290 	*sc = g_bdev_nvme_status_sc;
291 }
292 
293 bool
294 nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns)
295 {
296 	return ns->ptpl_file != NULL;
297 }
298 
299 static void
300 test_get_rw_params(void)
301 {
302 	struct spdk_nvme_cmd cmd = {0};
303 	uint64_t lba;
304 	uint64_t count;
305 
306 	lba = 0;
307 	count = 0;
308 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
309 	to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
310 	nvmf_bdev_ctrlr_get_rw_params(&cmd, &lba, &count);
311 	CU_ASSERT(lba == 0x1234567890ABCDEF);
312 	CU_ASSERT(count == 0x9875 + 1); /* NOTE: this field is 0's based, hence the +1 */
313 }
314 
315 static void
316 test_get_rw_ext_params(void)
317 {
318 	struct spdk_nvme_cmd cmd = {0};
319 	struct spdk_bdev_ext_io_opts opts = {0};
320 
321 	to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_DATA_PLACEMENT_DIRECTIVE);
322 	to_le32(&cmd.cdw13, 0x2 << 16);
323 	nvmf_bdev_ctrlr_get_rw_ext_params(&cmd, &opts);
324 	CU_ASSERT(opts.nvme_cdw12.raw == 0x209875);
325 	CU_ASSERT(opts.nvme_cdw13.raw == 0x20000);
326 }
327 
328 static void
329 test_lba_in_range(void)
330 {
331 	/* Trivial cases (no overflow) */
332 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1) == true);
333 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1000) == true);
334 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1001) == false);
335 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 999) == true);
336 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 1000) == false);
337 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 999, 1) == true);
338 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1000, 1) == false);
339 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1001, 1) == false);
340 
341 	/* Overflow edge cases */
342 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 0, UINT64_MAX) == true);
343 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 1, UINT64_MAX) == false);
344 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX - 1, 1) == true);
345 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX, 1) == false);
346 }
347 
348 static void
349 test_get_dif_ctx(void)
350 {
351 	struct spdk_bdev bdev = {};
352 	struct spdk_nvme_cmd cmd = {};
353 	struct spdk_dif_ctx dif_ctx = {};
354 	bool ret;
355 
356 	bdev.md_len = 0;
357 
358 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
359 	CU_ASSERT(ret == false);
360 
361 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
362 	bdev.blocklen = 520;
363 	bdev.md_len = 8;
364 
365 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
366 	CU_ASSERT(ret == true);
367 	CU_ASSERT(dif_ctx.block_size = 520);
368 	CU_ASSERT(dif_ctx.md_size == 8);
369 	CU_ASSERT(dif_ctx.init_ref_tag == 0x90ABCDEF);
370 }
371 
372 static void
373 test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd(void)
374 {
375 	int rc;
376 	struct spdk_bdev bdev = {};
377 	struct spdk_bdev_desc *desc = NULL;
378 	struct spdk_io_channel ch = {};
379 
380 	struct spdk_nvmf_request cmp_req = {};
381 	union nvmf_c2h_msg cmp_rsp = {};
382 
383 	struct spdk_nvmf_request write_req = {};
384 	union nvmf_c2h_msg write_rsp = {};
385 
386 	struct spdk_nvmf_qpair qpair = {};
387 
388 	struct spdk_nvme_cmd cmp_cmd = {};
389 	struct spdk_nvme_cmd write_cmd = {};
390 
391 	struct spdk_nvmf_ctrlr ctrlr = {};
392 	struct spdk_nvmf_subsystem subsystem = {};
393 	struct spdk_nvmf_ns ns = {};
394 	struct spdk_nvmf_ns *subsys_ns[1] = {};
395 
396 	struct spdk_nvmf_poll_group group = {};
397 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
398 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
399 
400 	bdev.blocklen = 512;
401 	bdev.blockcnt = 10;
402 	ns.bdev = &bdev;
403 
404 	subsystem.id = 0;
405 	subsystem.max_nsid = 1;
406 	subsys_ns[0] = &ns;
407 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
408 
409 	/* Enable controller */
410 	ctrlr.vcprop.cc.bits.en = 1;
411 	ctrlr.subsys = &subsystem;
412 
413 	group.num_sgroups = 1;
414 	sgroups.num_ns = 1;
415 	sgroups.ns_info = &ns_info;
416 	group.sgroups = &sgroups;
417 
418 	qpair.ctrlr = &ctrlr;
419 	qpair.group = &group;
420 
421 	cmp_req.qpair = &qpair;
422 	cmp_req.cmd = (union nvmf_h2c_msg *)&cmp_cmd;
423 	cmp_req.rsp = &cmp_rsp;
424 
425 	cmp_cmd.nsid = 1;
426 	cmp_cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
427 	cmp_cmd.opc = SPDK_NVME_OPC_COMPARE;
428 
429 	write_req.qpair = &qpair;
430 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
431 	write_req.rsp = &write_rsp;
432 
433 	write_cmd.nsid = 1;
434 	write_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
435 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
436 
437 	/* 1. SUCCESS */
438 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
439 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
440 
441 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
442 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
443 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
444 
445 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
446 
447 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
448 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
449 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
450 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == 0);
451 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == 0);
452 
453 	/* 2. Fused command start lba / num blocks mismatch */
454 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
455 	cmp_cmd.cdw12 = 2;	/* NLB: CDW12 bits 15:00, 0's based */
456 
457 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
458 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
459 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
460 
461 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
462 
463 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
464 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
465 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
466 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
467 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
468 
469 	/* 3. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
470 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
471 	cmp_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
472 
473 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
474 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
475 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
476 
477 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
478 
479 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
480 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
481 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
482 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
483 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
484 
485 	/* 4. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
486 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
487 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
488 
489 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
490 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
491 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
492 
493 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
494 
495 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
496 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
497 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
498 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
499 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
500 }
501 
502 static void
503 test_nvmf_bdev_ctrlr_identify_ns(void)
504 {
505 	struct spdk_nvmf_ns ns = {};
506 	struct spdk_nvme_ns_data nsdata = {};
507 	struct spdk_bdev bdev = {};
508 	uint8_t ns_g_id[16] = "abcdefgh";
509 	uint8_t eui64[8] = "12345678";
510 
511 	ns.bdev = &bdev;
512 	ns.ptpl_file = (void *)0xDEADBEEF;
513 	memcpy(ns.opts.nguid, ns_g_id, 16);
514 	memcpy(ns.opts.eui64, eui64, 8);
515 
516 	bdev.blockcnt = 10;
517 	bdev.acwu = 1;
518 	bdev.md_len = 512;
519 	bdev.dif_type = SPDK_DIF_TYPE1;
520 	bdev.blocklen = 4096;
521 	bdev.md_interleave = 0;
522 	bdev.optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
523 	bdev.dif_is_head_of_md = true;
524 
525 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, false);
526 	CU_ASSERT(nsdata.nsze == 10);
527 	CU_ASSERT(nsdata.ncap == 10);
528 	CU_ASSERT(nsdata.nuse == 10);
529 	CU_ASSERT(nsdata.nlbaf == 0);
530 	CU_ASSERT(nsdata.flbas.format == 0);
531 	CU_ASSERT(nsdata.flbas.msb_format == 0);
532 	CU_ASSERT(nsdata.nacwu == 0);
533 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
534 	CU_ASSERT(nsdata.lbaf[0].ms == 512);
535 	CU_ASSERT(nsdata.dps.pit == SPDK_NVME_FMT_NVM_PROTECTION_DISABLE);
536 	CU_ASSERT(nsdata.noiob == SPDK_BDEV_IO_NUM_CHILD_IOV);
537 	CU_ASSERT(nsdata.nmic.can_share == 1);
538 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
539 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
540 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
541 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
542 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
543 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
544 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
545 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
546 	CU_ASSERT(nsdata.flbas.extended == 1);
547 	CU_ASSERT(nsdata.mc.extended == 1);
548 	CU_ASSERT(nsdata.mc.pointer == 0);
549 	CU_ASSERT(nsdata.dps.md_start == true);
550 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
551 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
552 
553 	memset(&nsdata, 0, sizeof(nsdata));
554 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, true);
555 	CU_ASSERT(nsdata.nsze == 10);
556 	CU_ASSERT(nsdata.ncap == 10);
557 	CU_ASSERT(nsdata.nuse == 10);
558 	CU_ASSERT(nsdata.nlbaf == 0);
559 	CU_ASSERT(nsdata.flbas.format == 0);
560 	CU_ASSERT(nsdata.flbas.msb_format == 0);
561 	CU_ASSERT(nsdata.nacwu == 0);
562 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
563 	CU_ASSERT(nsdata.noiob == SPDK_BDEV_IO_NUM_CHILD_IOV);
564 	CU_ASSERT(nsdata.nmic.can_share == 1);
565 	CU_ASSERT(nsdata.lbaf[0].ms == 0);
566 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
567 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
568 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
569 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
570 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
571 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
572 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
573 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
574 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
575 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
576 }
577 
578 static void
579 test_nvmf_bdev_ctrlr_zcopy_start(void)
580 {
581 	int rc;
582 	struct spdk_bdev bdev = {};
583 	struct spdk_bdev_desc *desc = NULL;
584 	struct spdk_io_channel ch = {};
585 
586 	struct spdk_nvmf_request write_req = {};
587 	union nvmf_c2h_msg write_rsp = {};
588 
589 	struct spdk_nvmf_qpair qpair = {};
590 
591 	struct spdk_nvme_cmd write_cmd = {};
592 
593 	struct spdk_nvmf_ctrlr ctrlr = {};
594 	struct spdk_nvmf_subsystem subsystem = {};
595 	struct spdk_nvmf_ns ns = {};
596 	struct spdk_nvmf_ns *subsys_ns[1] = {};
597 
598 	struct spdk_nvmf_poll_group group = {};
599 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
600 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
601 
602 	bdev.blocklen = 512;
603 	bdev.blockcnt = 10;
604 	ns.bdev = &bdev;
605 
606 	subsystem.id = 0;
607 	subsystem.max_nsid = 1;
608 	subsys_ns[0] = &ns;
609 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
610 
611 	/* Enable controller */
612 	ctrlr.vcprop.cc.bits.en = 1;
613 	ctrlr.subsys = &subsystem;
614 
615 	group.num_sgroups = 1;
616 	sgroups.num_ns = 1;
617 	sgroups.ns_info = &ns_info;
618 	group.sgroups = &sgroups;
619 
620 	qpair.ctrlr = &ctrlr;
621 	qpair.group = &group;
622 
623 	write_req.qpair = &qpair;
624 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
625 	write_req.rsp = &write_rsp;
626 
627 	write_cmd.nsid = 1;
628 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
629 
630 	/* 1. SUCCESS */
631 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
632 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
633 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
634 
635 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
636 
637 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
638 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
639 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_SUCCESS);
640 
641 	/* 2. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
642 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
643 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
644 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
645 
646 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
647 
648 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
649 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
650 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_LBA_OUT_OF_RANGE);
651 
652 	/* 3. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
653 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
654 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
655 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
656 
657 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
658 
659 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
660 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
661 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
662 }
663 
664 static void
665 test_nvmf_bdev_ctrlr_cmd(void)
666 {
667 	int rc;
668 	struct spdk_bdev bdev = {};
669 	struct spdk_io_channel ch = {};
670 	struct spdk_nvmf_request req = {};
671 	struct spdk_nvmf_qpair qpair = {};
672 	union nvmf_h2c_msg cmd = {};
673 	union nvmf_c2h_msg rsp = {};
674 	struct spdk_nvme_scc_source_range range = {};
675 
676 	req.cmd = &cmd;
677 	req.rsp = &rsp;
678 	req.qpair = &qpair;
679 	req.length = 4096;
680 	bdev.blocklen = 512;
681 	bdev.blockcnt = 3;
682 	cmd.nvme_cmd.cdw10 = 0;
683 	cmd.nvme_cmd.cdw12 = 2;
684 
685 	/* Compare status asynchronous */
686 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
687 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
688 
689 	/* SLBA out of range */
690 	cmd.nvme_cmd.cdw10 = 3;
691 
692 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
693 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
694 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
695 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
696 
697 	/* SGL length invalid */
698 	cmd.nvme_cmd.cdw10 = 0;
699 	req.length = 512;
700 	memset(&rsp, 0, sizeof(rsp));
701 
702 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
703 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
704 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
705 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
706 
707 	/* Device error */
708 	req.length = 4096;
709 	memset(&rsp, 0, sizeof(rsp));
710 	MOCK_SET(spdk_bdev_comparev_blocks, -1);
711 
712 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
713 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
714 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
715 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
716 
717 	/* bdev not support flush */
718 	MOCK_SET(spdk_bdev_io_type_supported, false);
719 	memset(&rsp, 0, sizeof(rsp));
720 
721 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
722 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
723 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
724 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
725 
726 	/*  Flush error */
727 	MOCK_SET(spdk_bdev_io_type_supported, true);
728 	MOCK_SET(spdk_bdev_flush_blocks, -1);
729 	memset(&rsp, 0, sizeof(rsp));
730 
731 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
732 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
733 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
734 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
735 
736 	/* Flush blocks status asynchronous */
737 	MOCK_SET(spdk_bdev_flush_blocks, 0);
738 
739 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
740 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
741 	MOCK_CLEAR(spdk_bdev_io_type_supported);
742 	MOCK_CLEAR(spdk_bdev_flush_blocks);
743 
744 	/* Write zeroes blocks status asynchronous */
745 	struct spdk_nvmf_subsystem subsystem = { };
746 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem };
747 	qpair.ctrlr = &ctrlr;
748 
749 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
750 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
751 
752 	cmd.nvme_cmd.cdw12 = 3;
753 	subsystem.max_write_zeroes_size_kib = 1;
754 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
755 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
756 
757 	/* SLBA out of range */
758 	subsystem.max_write_zeroes_size_kib = 0;
759 	cmd.nvme_cmd.cdw12 = 2;
760 	cmd.nvme_cmd.cdw10 = 3;
761 	memset(&rsp, 0, sizeof(rsp));
762 
763 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
764 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
765 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
766 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
767 
768 	/* Write block error */
769 	MOCK_SET(spdk_bdev_write_zeroes_blocks, -1);
770 	cmd.nvme_cmd.cdw10 = 0;
771 	memset(&rsp, 0, sizeof(rsp));
772 
773 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
774 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
775 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
776 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
777 
778 	/* Copy blocks status asynchronous */
779 	MOCK_SET(spdk_bdev_io_type_supported, true);
780 	cmd.nvme_cmd.cdw10 = 1024;
781 	cmd.nvme_cmd.cdw11 = 0;
782 	cmd.nvme_cmd.cdw12 = 0;
783 	cmd.nvme_cmd.cdw12_bits.copy.nr = 0;
784 	range.slba = 512;
785 	range.nlb = 511;
786 	req.length = 32;
787 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &range, req.length);
788 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
789 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
790 
791 	/* Copy command not supported */
792 	MOCK_SET(spdk_bdev_io_type_supported, false);
793 	memset(&rsp, 0, sizeof(rsp));
794 
795 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
796 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
797 
798 	MOCK_SET(spdk_bdev_io_type_supported, true);
799 
800 	/* Unsupported number of source ranges */
801 	cmd.nvme_cmd.cdw12_bits.copy.nr = 1;
802 	req.length = 64;
803 	memset(&rsp, 0, sizeof(rsp));
804 
805 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
806 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
807 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
808 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_CMD_SIZE_LIMIT_SIZE_EXCEEDED);
809 
810 	cmd.nvme_cmd.cdw12_bits.copy.nr = 0;
811 	req.length = 32;
812 
813 	/* Unsupported source range descriptor format */
814 	cmd.nvme_cmd.cdw12_bits.copy.df = 1;
815 	memset(&rsp, 0, sizeof(rsp));
816 
817 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
818 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
819 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
820 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
821 
822 	cmd.nvme_cmd.cdw12_bits.copy.df = 0;
823 
824 	/* Bdev copy command failed */
825 	MOCK_SET(spdk_bdev_copy_blocks, -1);
826 	memset(&rsp, 0, sizeof(rsp));
827 
828 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
829 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
830 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
831 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
832 
833 	MOCK_CLEAR(spdk_bdev_copy_blocks);
834 	MOCK_CLEAR(spdk_bdev_io_type_supported);
835 }
836 
837 static void
838 test_nvmf_bdev_ctrlr_read_write_cmd(void)
839 {
840 	struct spdk_bdev bdev = {};
841 	struct spdk_nvmf_request req = {};
842 	union nvmf_c2h_msg rsp = {};
843 	union nvmf_h2c_msg cmd = {};
844 	int rc;
845 
846 	req.cmd = &cmd;
847 	req.rsp = &rsp;
848 
849 	/* Read two blocks, block size 4096 */
850 	cmd.nvme_cmd.cdw12 = 1;
851 	bdev.blockcnt = 100;
852 	bdev.blocklen = 4096;
853 	req.length = 8192;
854 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
855 
856 	rc = nvmf_bdev_ctrlr_read_cmd(&bdev, NULL, NULL, &req);
857 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
858 
859 	/* Write two blocks, block size 4096 */
860 	cmd.nvme_cmd.cdw12 = 1;
861 	bdev.blockcnt = 100;
862 	bdev.blocklen = 4096;
863 	req.length = 8192;
864 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
865 
866 	rc = nvmf_bdev_ctrlr_write_cmd(&bdev, NULL, NULL, &req);
867 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
868 }
869 
870 static void
871 test_nvmf_bdev_ctrlr_nvme_passthru(void)
872 {
873 	int rc;
874 	struct spdk_bdev bdev = {};
875 	struct spdk_bdev_desc *desc = NULL;
876 	struct spdk_io_channel ch = {};
877 	struct spdk_nvmf_qpair qpair = {};
878 	struct spdk_nvmf_poll_group group = {};
879 
880 	struct spdk_nvmf_request req = {};
881 	union nvmf_c2h_msg rsp = {};
882 	struct spdk_nvme_cmd cmd = {};
883 	struct spdk_bdev_io bdev_io;
884 
885 	bdev.blocklen = 512;
886 	bdev.blockcnt = 10;
887 
888 	qpair.group = &group;
889 
890 	req.qpair = &qpair;
891 	req.cmd = (union nvmf_h2c_msg *)&cmd;
892 	req.rsp = &rsp;
893 	SPDK_IOV_ONE(req.iov, &req.iovcnt, NULL, 0);
894 
895 	cmd.nsid = 1;
896 	cmd.opc = 0xFF;
897 
898 	cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
899 	cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
900 
901 	/* NVME_IO success */
902 	memset(&rsp, 0, sizeof(rsp));
903 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
904 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
905 	nvmf_bdev_ctrlr_complete_cmd(&bdev_io, true, &req);
906 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
907 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
908 
909 	/* NVME_IO fail */
910 	memset(&rsp, 0, sizeof(rsp));
911 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
912 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
913 	g_bdev_nvme_status_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
914 	nvmf_bdev_ctrlr_complete_cmd(&bdev_io, false, &req);
915 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
916 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
917 	reset_bdev_nvme_status();
918 
919 	/* NVME_IO not supported */
920 	memset(&rsp, 0, sizeof(rsp));
921 	MOCK_SET(spdk_bdev_nvme_iov_passthru_md, -ENOTSUP);
922 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
923 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
924 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
925 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
926 	CU_ASSERT(rsp.nvme_cpl.status.dnr == 1);
927 
928 	/* NVME_IO no channel - queue IO */
929 	memset(&rsp, 0, sizeof(rsp));
930 	MOCK_SET(spdk_bdev_nvme_iov_passthru_md, -ENOMEM);
931 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
932 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
933 	CU_ASSERT(group.stat.pending_bdev_io == 1);
934 
935 	MOCK_SET(spdk_bdev_nvme_iov_passthru_md, 0);
936 
937 	/* NVME_ADMIN success */
938 	memset(&rsp, 0, sizeof(rsp));
939 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
940 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
941 	nvmf_bdev_ctrlr_complete_admin_cmd(&bdev_io, true, &req);
942 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
943 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
944 
945 	/* NVME_ADMIN fail */
946 	memset(&rsp, 0, sizeof(rsp));
947 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
948 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
949 	g_bdev_nvme_status_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
950 	nvmf_bdev_ctrlr_complete_admin_cmd(&bdev_io, true, &req);
951 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
952 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
953 	reset_bdev_nvme_status();
954 
955 	/* NVME_ADMIN not supported */
956 	memset(&rsp, 0, sizeof(rsp));
957 	MOCK_SET(spdk_bdev_nvme_admin_passthru, -ENOTSUP);
958 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
959 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
960 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
961 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
962 	CU_ASSERT(rsp.nvme_cpl.status.dnr == 1);
963 
964 	/* NVME_ADMIN no channel - queue IO */
965 	memset(&rsp, 0, sizeof(rsp));
966 	MOCK_SET(spdk_bdev_nvme_admin_passthru, -ENOMEM);
967 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
968 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
969 	CU_ASSERT(group.stat.pending_bdev_io == 2);
970 
971 	MOCK_SET(spdk_bdev_nvme_admin_passthru, 0);
972 }
973 
974 int
975 main(int argc, char **argv)
976 {
977 	CU_pSuite	suite = NULL;
978 	unsigned int	num_failures;
979 
980 	CU_initialize_registry();
981 
982 	suite = CU_add_suite("nvmf", NULL, NULL);
983 
984 	CU_ADD_TEST(suite, test_get_rw_params);
985 	CU_ADD_TEST(suite, test_get_rw_ext_params);
986 	CU_ADD_TEST(suite, test_lba_in_range);
987 	CU_ADD_TEST(suite, test_get_dif_ctx);
988 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_identify_ns);
989 	CU_ADD_TEST(suite, test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd);
990 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_zcopy_start);
991 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_cmd);
992 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_read_write_cmd);
993 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_nvme_passthru);
994 
995 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
996 	CU_cleanup_registry();
997 	return num_failures;
998 }
999