xref: /spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c (revision 66289a6dbe28217365daa40fd92dcf327871c2e8)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #include "spdk_internal/mock.h"
12 #include "thread/thread_internal.h"
13 
14 #include "nvmf/ctrlr_bdev.c"
15 
16 #include "spdk/bdev_module.h"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvmf)
19 
20 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -1);
21 
22 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
23 
24 DEFINE_STUB(spdk_bdev_get_physical_block_size, uint32_t,
25 	    (const struct spdk_bdev *bdev), 4096);
26 
27 DEFINE_STUB(nvmf_ctrlr_process_admin_cmd, int, (struct spdk_nvmf_request *req), 0);
28 
29 DEFINE_STUB(spdk_bdev_comparev_blocks, int, (struct spdk_bdev_desc *desc,
30 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt,
31 		uint64_t offset_blocks, uint64_t num_blocks,
32 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
33 
34 DEFINE_STUB(spdk_bdev_readv_blocks_ext, int, (struct spdk_bdev_desc *desc,
35 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, uint64_t offset_blocks,
36 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
37 		struct spdk_bdev_ext_io_opts *opts), 0);
38 
39 DEFINE_STUB(spdk_bdev_writev_blocks_ext, int, (struct spdk_bdev_desc *desc,
40 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, uint64_t offset_blocks,
41 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
42 		struct spdk_bdev_ext_io_opts *opts), 0);
43 
44 DEFINE_STUB(spdk_bdev_nvme_admin_passthru, int,
45 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
46 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
47 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
48 
49 DEFINE_STUB(spdk_bdev_abort, int,
50 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
51 	     void *bio_cb_arg, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
52 
53 DEFINE_STUB_V(spdk_bdev_io_get_iovec,
54 	      (struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp));
55 DEFINE_STUB(spdk_bdev_get_write_unit_size, uint32_t, (const struct spdk_bdev *bdev), 1);
56 
57 struct spdk_bdev_desc {
58 	struct spdk_bdev *bdev;
59 };
60 
61 uint32_t
62 spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
63 {
64 	return bdev->optimal_io_boundary;
65 }
66 
67 uint32_t
68 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
69 {
70 	return bdev->md_len;
71 }
72 
73 bool
74 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
75 {
76 	return (bdev->md_len != 0) && bdev->md_interleave;
77 }
78 
79 /* We have to use the typedef in the function declaration to appease astyle. */
80 typedef enum spdk_dif_type spdk_dif_type_t;
81 
82 spdk_dif_type_t
83 spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
84 {
85 	if (bdev->md_len != 0) {
86 		return bdev->dif_type;
87 	} else {
88 		return SPDK_DIF_DISABLE;
89 	}
90 }
91 
92 bool
93 spdk_bdev_is_dif_head_of_md(const struct spdk_bdev *bdev)
94 {
95 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
96 		return bdev->dif_is_head_of_md;
97 	} else {
98 		return false;
99 	}
100 }
101 
102 uint32_t
103 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
104 {
105 	if (spdk_bdev_is_md_interleaved(bdev)) {
106 		return bdev->blocklen - bdev->md_len;
107 	} else {
108 		return bdev->blocklen;
109 	}
110 }
111 
112 uint16_t
113 spdk_bdev_get_acwu(const struct spdk_bdev *bdev)
114 {
115 	return bdev->acwu;
116 }
117 
118 uint32_t
119 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
120 {
121 	return bdev->blocklen;
122 }
123 
124 uint64_t
125 spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
126 {
127 	return bdev->blockcnt;
128 }
129 
130 /* We have to use the typedef in the function declaration to appease astyle. */
131 typedef enum spdk_dif_pi_format spdk_dif_pi_format_t;
132 
133 spdk_dif_pi_format_t
134 spdk_bdev_get_dif_pi_format(const struct spdk_bdev *bdev)
135 {
136 	return bdev->dif_pi_format;
137 }
138 
139 uint32_t
140 spdk_bdev_desc_get_md_size(struct spdk_bdev_desc *desc)
141 {
142 	return spdk_bdev_get_md_size(desc->bdev);
143 }
144 
145 bool
146 spdk_bdev_desc_is_md_interleaved(struct spdk_bdev_desc *desc)
147 {
148 	return spdk_bdev_is_md_interleaved(desc->bdev);
149 }
150 
151 /* We have to use the typedef in the function declaration to appease astyle. */
152 typedef enum spdk_dif_type spdk_dif_type_t;
153 
154 spdk_dif_type_t
155 spdk_bdev_desc_get_dif_type(struct spdk_bdev_desc *desc)
156 {
157 	return spdk_bdev_get_dif_type(desc->bdev);
158 }
159 
160 bool
161 spdk_bdev_desc_is_dif_head_of_md(struct spdk_bdev_desc *desc)
162 {
163 	return spdk_bdev_is_dif_head_of_md(desc->bdev);
164 }
165 
166 uint32_t
167 spdk_bdev_desc_get_block_size(struct spdk_bdev_desc *desc)
168 {
169 	return spdk_bdev_get_block_size(desc->bdev);
170 }
171 
172 spdk_dif_pi_format_t
173 spdk_bdev_desc_get_dif_pi_format(struct spdk_bdev_desc *desc)
174 {
175 	return spdk_bdev_get_dif_pi_format(desc->bdev);
176 }
177 
178 DEFINE_STUB(spdk_bdev_comparev_and_writev_blocks, int,
179 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
180 	     struct iovec *compare_iov, int compare_iovcnt,
181 	     struct iovec *write_iov, int write_iovcnt,
182 	     uint64_t offset_blocks, uint64_t num_blocks,
183 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
184 	    0);
185 
186 DEFINE_STUB(nvmf_ctrlr_process_io_cmd, int, (struct spdk_nvmf_request *req), 0);
187 
188 DEFINE_STUB_V(spdk_bdev_io_get_nvme_fused_status, (const struct spdk_bdev_io *bdev_io,
189 		uint32_t *cdw0, int *cmp_sct, int *cmp_sc, int *wr_sct, int *wr_sc));
190 
191 DEFINE_STUB(spdk_bdev_desc_is_dif_check_enabled, bool,
192 	    (struct spdk_bdev_desc *desc, enum spdk_dif_check_type check_type), false);
193 
194 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *,
195 	    (struct spdk_bdev_desc *desc), NULL);
196 
197 DEFINE_STUB(spdk_bdev_flush_blocks, int,
198 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
199 	     uint64_t offset_blocks, uint64_t num_blocks,
200 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
201 	    0);
202 
203 DEFINE_STUB(spdk_bdev_unmap_blocks, int,
204 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
205 	     uint64_t offset_blocks, uint64_t num_blocks,
206 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
207 	    0);
208 
209 DEFINE_STUB(spdk_bdev_io_type_supported, bool,
210 	    (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
211 
212 DEFINE_STUB(spdk_bdev_queue_io_wait, int,
213 	    (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
214 	     struct spdk_bdev_io_wait_entry *entry),
215 	    0);
216 
217 DEFINE_STUB(spdk_bdev_write_blocks, int,
218 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
219 	     uint64_t offset_blocks, uint64_t num_blocks,
220 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
221 	    0);
222 
223 DEFINE_STUB(spdk_bdev_writev_blocks, int,
224 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
225 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
226 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
227 	    0);
228 
229 DEFINE_STUB(spdk_bdev_read_blocks, int,
230 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
231 	     uint64_t offset_blocks, uint64_t num_blocks,
232 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
233 	    0);
234 
235 DEFINE_STUB(spdk_bdev_readv_blocks, int,
236 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
237 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
238 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
239 	    0);
240 
241 DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
242 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
243 	     uint64_t offset_blocks, uint64_t num_blocks,
244 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
245 	    0);
246 
247 DEFINE_STUB(spdk_bdev_nvme_iov_passthru_md, int, (
248 		    struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
249 		    const struct spdk_nvme_cmd *cmd, struct iovec *iov, int iovcnt,
250 		    size_t nbytes, void *md_buf, size_t md_len,
251 		    spdk_bdev_io_completion_cb cb, void *cb_arg),
252 	    0);
253 
254 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
255 
256 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
257 	    (const struct spdk_nvmf_subsystem *subsystem), NULL);
258 
259 DEFINE_STUB(spdk_bdev_zcopy_start, int,
260 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
261 	     struct iovec *iov, int iovcnt,
262 	     uint64_t offset_blocks, uint64_t num_blocks,
263 	     bool populate,
264 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
265 	    0);
266 
267 DEFINE_STUB(spdk_bdev_zcopy_end, int,
268 	    (struct spdk_bdev_io *bdev_io, bool commit,
269 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
270 	    0);
271 
272 DEFINE_STUB(spdk_bdev_copy_blocks, int,
273 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
274 	     uint64_t dst_offset_blocks, uint64_t src_offset_blocks, uint64_t num_blocks,
275 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
276 	    0);
277 
278 DEFINE_STUB(spdk_bdev_get_max_copy, uint32_t, (const struct spdk_bdev *bdev), 0);
279 
280 struct spdk_nvmf_ns *
281 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
282 {
283 	abort();
284 	return NULL;
285 }
286 
287 struct spdk_nvmf_ns *
288 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
289 {
290 	abort();
291 	return NULL;
292 }
293 
294 struct spdk_nvmf_ns *
295 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns)
296 {
297 	abort();
298 	return NULL;
299 }
300 
301 int
302 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
303 		  bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
304 		  uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
305 		  uint32_t data_offset, uint64_t guard_seed, struct spdk_dif_ctx_init_ext_opts *opts)
306 {
307 	ctx->dif_pi_format = opts->dif_pi_format;
308 	ctx->block_size = block_size;
309 	ctx->md_size = md_size;
310 	ctx->init_ref_tag = init_ref_tag;
311 
312 	return 0;
313 }
314 
315 static uint32_t g_bdev_nvme_status_cdw0;
316 static uint32_t g_bdev_nvme_status_sct = SPDK_NVME_SCT_GENERIC;
317 static uint32_t g_bdev_nvme_status_sc = SPDK_NVME_SC_SUCCESS;
318 
319 static void
320 reset_bdev_nvme_status(void)
321 {
322 	g_bdev_nvme_status_cdw0 = 0;
323 	g_bdev_nvme_status_sct = SPDK_NVME_SCT_GENERIC;
324 	g_bdev_nvme_status_sc = SPDK_NVME_SC_SUCCESS;
325 }
326 
327 void
328 spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct,
329 			     int *sc)
330 {
331 	*cdw0 = g_bdev_nvme_status_cdw0;
332 	*sct = g_bdev_nvme_status_sct;
333 	*sc = g_bdev_nvme_status_sc;
334 }
335 
336 bool
337 nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns)
338 {
339 	return ns->ptpl_file != NULL;
340 }
341 
342 static void
343 test_get_rw_params(void)
344 {
345 	struct spdk_nvme_cmd cmd = {0};
346 	uint64_t lba;
347 	uint64_t count;
348 
349 	lba = 0;
350 	count = 0;
351 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
352 	to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
353 	nvmf_bdev_ctrlr_get_rw_params(&cmd, &lba, &count);
354 	CU_ASSERT(lba == 0x1234567890ABCDEF);
355 	CU_ASSERT(count == 0x9875 + 1); /* NOTE: this field is 0's based, hence the +1 */
356 }
357 
358 static void
359 test_get_rw_ext_params(void)
360 {
361 	struct spdk_nvme_cmd cmd = {0};
362 	struct spdk_bdev_ext_io_opts opts = {0};
363 
364 	to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_DATA_PLACEMENT_DIRECTIVE |
365 		SPDK_NVME_IO_FLAGS_PRCHK_GUARD);
366 	to_le32(&cmd.cdw13, 0x2 << 16);
367 	nvmf_bdev_ctrlr_get_rw_ext_params(&cmd, &opts);
368 	CU_ASSERT(opts.nvme_cdw12.raw == 0x10209875);
369 	CU_ASSERT(opts.nvme_cdw13.raw == 0x20000);
370 	CU_ASSERT((opts.dif_check_flags_exclude_mask ^ SPDK_NVME_IO_FLAGS_PRCHK_MASK)
371 		  == SPDK_NVME_IO_FLAGS_PRCHK_GUARD);
372 }
373 
374 static void
375 test_lba_in_range(void)
376 {
377 	/* Trivial cases (no overflow) */
378 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1) == true);
379 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1000) == true);
380 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1001) == false);
381 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 999) == true);
382 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 1000) == false);
383 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 999, 1) == true);
384 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1000, 1) == false);
385 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1001, 1) == false);
386 
387 	/* Overflow edge cases */
388 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 0, UINT64_MAX) == true);
389 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 1, UINT64_MAX) == false);
390 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX - 1, 1) == true);
391 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX, 1) == false);
392 }
393 
394 static void
395 test_get_dif_ctx(void)
396 {
397 	struct spdk_bdev bdev = {};
398 	struct spdk_bdev_desc desc = { .bdev = &bdev, };
399 	struct spdk_nvme_cmd cmd = {};
400 	struct spdk_dif_ctx dif_ctx = {};
401 	bool ret;
402 
403 	bdev.md_len = 0;
404 
405 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&desc, &cmd, &dif_ctx);
406 	CU_ASSERT(ret == false);
407 
408 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
409 	bdev.blocklen = 520;
410 	bdev.md_len = 8;
411 
412 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&desc, &cmd, &dif_ctx);
413 	CU_ASSERT(ret == true);
414 	CU_ASSERT(dif_ctx.block_size == 520);
415 	CU_ASSERT(dif_ctx.md_size == 8);
416 	CU_ASSERT(dif_ctx.init_ref_tag == 0x90ABCDEF);
417 }
418 
419 static void
420 test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd(void)
421 {
422 	int rc;
423 	struct spdk_bdev bdev = {};
424 	struct spdk_bdev_desc desc = { .bdev = &bdev, };
425 	struct spdk_io_channel ch = {};
426 
427 	struct spdk_nvmf_request cmp_req = {};
428 	union nvmf_c2h_msg cmp_rsp = {};
429 
430 	struct spdk_nvmf_request write_req = {};
431 	union nvmf_c2h_msg write_rsp = {};
432 
433 	struct spdk_nvmf_qpair qpair = {};
434 
435 	struct spdk_nvme_cmd cmp_cmd = {};
436 	struct spdk_nvme_cmd write_cmd = {};
437 
438 	struct spdk_nvmf_ctrlr ctrlr = {};
439 	struct spdk_nvmf_subsystem subsystem = {};
440 	struct spdk_nvmf_ns ns = {};
441 	struct spdk_nvmf_ns *subsys_ns[1] = {};
442 
443 	struct spdk_nvmf_poll_group group = {};
444 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
445 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
446 
447 	bdev.blocklen = 512;
448 	bdev.blockcnt = 10;
449 	ns.bdev = &bdev;
450 
451 	subsystem.id = 0;
452 	subsystem.max_nsid = 1;
453 	subsys_ns[0] = &ns;
454 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
455 
456 	/* Enable controller */
457 	ctrlr.vcprop.cc.bits.en = 1;
458 	ctrlr.subsys = &subsystem;
459 
460 	group.num_sgroups = 1;
461 	sgroups.num_ns = 1;
462 	sgroups.ns_info = &ns_info;
463 	group.sgroups = &sgroups;
464 
465 	qpair.ctrlr = &ctrlr;
466 	qpair.group = &group;
467 
468 	cmp_req.qpair = &qpair;
469 	cmp_req.cmd = (union nvmf_h2c_msg *)&cmp_cmd;
470 	cmp_req.rsp = &cmp_rsp;
471 
472 	cmp_cmd.nsid = 1;
473 	cmp_cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
474 	cmp_cmd.opc = SPDK_NVME_OPC_COMPARE;
475 
476 	write_req.qpair = &qpair;
477 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
478 	write_req.rsp = &write_rsp;
479 
480 	write_cmd.nsid = 1;
481 	write_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
482 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
483 
484 	/* 1. SUCCESS */
485 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
486 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
487 
488 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
489 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
490 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
491 
492 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, &desc, &ch, &cmp_req, &write_req);
493 
494 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
495 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
496 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
497 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == 0);
498 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == 0);
499 
500 	/* 2. Fused command start lba / num blocks mismatch */
501 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
502 	cmp_cmd.cdw12 = 2;	/* NLB: CDW12 bits 15:00, 0's based */
503 
504 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
505 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
506 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
507 
508 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, &desc, &ch, &cmp_req, &write_req);
509 
510 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
511 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
512 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
513 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
514 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
515 
516 	/* 3. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
517 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
518 	cmp_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
519 
520 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
521 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
522 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
523 
524 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, &desc, &ch, &cmp_req, &write_req);
525 
526 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
527 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
528 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
529 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
530 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
531 
532 	/* 4. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
533 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
534 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
535 
536 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
537 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
538 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
539 
540 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, &desc, &ch, &cmp_req, &write_req);
541 
542 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
543 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
544 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
545 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
546 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
547 }
548 
549 static void
550 test_nvmf_bdev_ctrlr_identify_ns(void)
551 {
552 	struct spdk_nvmf_ns ns = {};
553 	struct spdk_nvme_ns_data nsdata = {};
554 	struct spdk_bdev bdev = {};
555 	struct spdk_bdev_desc desc = {};
556 	uint8_t ns_g_id[16] = "abcdefgh";
557 	uint8_t eui64[8] = "12345678";
558 
559 	desc.bdev = &bdev;
560 	ns.desc = &desc;
561 	ns.bdev = &bdev;
562 	ns.ptpl_file = (void *)0xDEADBEEF;
563 	memcpy(ns.opts.nguid, ns_g_id, 16);
564 	memcpy(ns.opts.eui64, eui64, 8);
565 
566 	bdev.blockcnt = 10;
567 	bdev.acwu = 1;
568 	bdev.md_len = 512;
569 	bdev.dif_type = SPDK_DIF_TYPE1;
570 	bdev.blocklen = 4096;
571 	bdev.md_interleave = 0;
572 	bdev.optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
573 	bdev.dif_is_head_of_md = true;
574 
575 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, false);
576 	CU_ASSERT(nsdata.nsze == 10);
577 	CU_ASSERT(nsdata.ncap == 10);
578 	CU_ASSERT(nsdata.nuse == 10);
579 	CU_ASSERT(nsdata.nlbaf == 0);
580 	CU_ASSERT(nsdata.flbas.format == 0);
581 	CU_ASSERT(nsdata.flbas.msb_format == 0);
582 	CU_ASSERT(nsdata.nacwu == 0);
583 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
584 	CU_ASSERT(nsdata.lbaf[0].ms == 512);
585 	CU_ASSERT(nsdata.dpc.pit1 == 1);
586 	CU_ASSERT(nsdata.dps.pit == SPDK_NVME_FMT_NVM_PROTECTION_TYPE1);
587 	CU_ASSERT(nsdata.noiob == SPDK_BDEV_IO_NUM_CHILD_IOV);
588 	CU_ASSERT(nsdata.nmic.can_share == 1);
589 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
590 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
591 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
592 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
593 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
594 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
595 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
596 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
597 	CU_ASSERT(nsdata.flbas.extended == 1);
598 	CU_ASSERT(nsdata.mc.extended == 1);
599 	CU_ASSERT(nsdata.mc.pointer == 0);
600 	CU_ASSERT(nsdata.dps.md_start == true);
601 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
602 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
603 
604 	memset(&nsdata, 0, sizeof(nsdata));
605 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, true);
606 	CU_ASSERT(nsdata.nsze == 10);
607 	CU_ASSERT(nsdata.ncap == 10);
608 	CU_ASSERT(nsdata.nuse == 10);
609 	CU_ASSERT(nsdata.nlbaf == 0);
610 	CU_ASSERT(nsdata.flbas.format == 0);
611 	CU_ASSERT(nsdata.flbas.msb_format == 0);
612 	CU_ASSERT(nsdata.nacwu == 0);
613 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
614 	CU_ASSERT(nsdata.noiob == SPDK_BDEV_IO_NUM_CHILD_IOV);
615 	CU_ASSERT(nsdata.nmic.can_share == 1);
616 	CU_ASSERT(nsdata.lbaf[0].ms == 0);
617 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
618 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
619 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
620 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
621 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
622 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
623 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
624 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
625 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
626 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
627 }
628 
629 static void
630 test_nvmf_bdev_ctrlr_zcopy_start(void)
631 {
632 	int rc;
633 	struct spdk_bdev bdev = {};
634 	struct spdk_bdev_desc desc = { .bdev = &bdev, };
635 	struct spdk_io_channel ch = {};
636 
637 	struct spdk_nvmf_request write_req = {};
638 	union nvmf_c2h_msg write_rsp = {};
639 
640 	struct spdk_nvmf_qpair qpair = {};
641 
642 	struct spdk_nvme_cmd write_cmd = {};
643 
644 	struct spdk_nvmf_ctrlr ctrlr = {};
645 	struct spdk_nvmf_subsystem subsystem = {};
646 	struct spdk_nvmf_ns ns = {};
647 	struct spdk_nvmf_ns *subsys_ns[1] = {};
648 
649 	struct spdk_nvmf_poll_group group = {};
650 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
651 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
652 
653 	bdev.blocklen = 512;
654 	bdev.blockcnt = 10;
655 	ns.bdev = &bdev;
656 
657 	subsystem.id = 0;
658 	subsystem.max_nsid = 1;
659 	subsys_ns[0] = &ns;
660 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
661 
662 	/* Enable controller */
663 	ctrlr.vcprop.cc.bits.en = 1;
664 	ctrlr.subsys = &subsystem;
665 
666 	group.num_sgroups = 1;
667 	sgroups.num_ns = 1;
668 	sgroups.ns_info = &ns_info;
669 	group.sgroups = &sgroups;
670 
671 	qpair.ctrlr = &ctrlr;
672 	qpair.group = &group;
673 
674 	write_req.qpair = &qpair;
675 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
676 	write_req.rsp = &write_rsp;
677 
678 	write_cmd.nsid = 1;
679 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
680 
681 	/* 1. SUCCESS */
682 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
683 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
684 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
685 
686 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, &desc, &ch, &write_req);
687 
688 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
689 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
690 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_SUCCESS);
691 
692 	/* 2. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
693 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
694 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
695 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
696 
697 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, &desc, &ch, &write_req);
698 
699 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
700 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
701 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_LBA_OUT_OF_RANGE);
702 
703 	/* 3. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
704 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
705 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
706 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
707 
708 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, &desc, &ch, &write_req);
709 
710 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
711 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
712 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
713 }
714 
715 static void
716 test_nvmf_bdev_ctrlr_cmd(void)
717 {
718 	int rc;
719 	struct spdk_bdev bdev = {};
720 	struct spdk_bdev_desc desc = { .bdev = &bdev, };
721 	struct spdk_io_channel ch = {};
722 	struct spdk_nvmf_request req = {};
723 	struct spdk_nvmf_qpair qpair = {};
724 	union nvmf_h2c_msg cmd = {};
725 	union nvmf_c2h_msg rsp = {};
726 	struct spdk_nvme_scc_source_range range = {};
727 
728 	req.cmd = &cmd;
729 	req.rsp = &rsp;
730 	req.qpair = &qpair;
731 	req.length = 4096;
732 	bdev.blocklen = 512;
733 	bdev.blockcnt = 3;
734 	cmd.nvme_cmd.cdw10 = 0;
735 	cmd.nvme_cmd.cdw12 = 2;
736 
737 	/* Compare status asynchronous */
738 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, &desc, &ch, &req);
739 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
740 
741 	/* SLBA out of range */
742 	cmd.nvme_cmd.cdw10 = 3;
743 
744 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, &desc, &ch, &req);
745 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
746 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
747 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
748 
749 	/* SGL length invalid */
750 	cmd.nvme_cmd.cdw10 = 0;
751 	req.length = 512;
752 	memset(&rsp, 0, sizeof(rsp));
753 
754 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, &desc, &ch, &req);
755 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
756 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
757 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
758 
759 	/* Device error */
760 	req.length = 4096;
761 	memset(&rsp, 0, sizeof(rsp));
762 	MOCK_SET(spdk_bdev_comparev_blocks, -1);
763 
764 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, &desc, &ch, &req);
765 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
766 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
767 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
768 
769 	/* bdev not support flush */
770 	MOCK_SET(spdk_bdev_io_type_supported, false);
771 	memset(&rsp, 0, sizeof(rsp));
772 
773 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, &desc, &ch, &req);
774 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
775 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
776 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
777 
778 	/*  Flush error */
779 	MOCK_SET(spdk_bdev_io_type_supported, true);
780 	MOCK_SET(spdk_bdev_flush_blocks, -1);
781 	memset(&rsp, 0, sizeof(rsp));
782 
783 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, &desc, &ch, &req);
784 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
785 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
786 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
787 
788 	/* Flush blocks status asynchronous */
789 	MOCK_SET(spdk_bdev_flush_blocks, 0);
790 
791 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, &desc, &ch, &req);
792 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
793 	MOCK_CLEAR(spdk_bdev_io_type_supported);
794 	MOCK_CLEAR(spdk_bdev_flush_blocks);
795 
796 	/* Write zeroes blocks status asynchronous */
797 	struct spdk_nvmf_subsystem subsystem = { };
798 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem };
799 	qpair.ctrlr = &ctrlr;
800 
801 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, &desc, &ch, &req);
802 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
803 
804 	cmd.nvme_cmd.cdw12 = 3;
805 	subsystem.max_write_zeroes_size_kib = 1;
806 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, &desc, &ch, &req);
807 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
808 
809 	/* SLBA out of range */
810 	subsystem.max_write_zeroes_size_kib = 0;
811 	cmd.nvme_cmd.cdw12 = 2;
812 	cmd.nvme_cmd.cdw10 = 3;
813 	memset(&rsp, 0, sizeof(rsp));
814 
815 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, &desc, &ch, &req);
816 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
817 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
818 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
819 
820 	/* Write block error */
821 	MOCK_SET(spdk_bdev_write_zeroes_blocks, -1);
822 	cmd.nvme_cmd.cdw10 = 0;
823 	memset(&rsp, 0, sizeof(rsp));
824 
825 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, &desc, &ch, &req);
826 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
827 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
828 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
829 
830 	/* Copy blocks status asynchronous */
831 	MOCK_SET(spdk_bdev_io_type_supported, true);
832 	cmd.nvme_cmd.cdw10 = 1024;
833 	cmd.nvme_cmd.cdw11 = 0;
834 	cmd.nvme_cmd.cdw12 = 0;
835 	cmd.nvme_cmd.cdw12_bits.copy.nr = 0;
836 	range.slba = 512;
837 	range.nlb = 511;
838 	req.length = 32;
839 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &range, req.length);
840 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
841 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
842 
843 	/* Copy command not supported */
844 	MOCK_SET(spdk_bdev_io_type_supported, false);
845 	memset(&rsp, 0, sizeof(rsp));
846 
847 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, &desc, &ch, &req);
848 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
849 
850 	MOCK_SET(spdk_bdev_io_type_supported, true);
851 
852 	/* Unsupported number of source ranges */
853 	cmd.nvme_cmd.cdw12_bits.copy.nr = 1;
854 	req.length = 64;
855 	memset(&rsp, 0, sizeof(rsp));
856 
857 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, &desc, &ch, &req);
858 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
859 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
860 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_CMD_SIZE_LIMIT_SIZE_EXCEEDED);
861 
862 	cmd.nvme_cmd.cdw12_bits.copy.nr = 0;
863 	req.length = 32;
864 
865 	/* Unsupported source range descriptor format */
866 	cmd.nvme_cmd.cdw12_bits.copy.df = 1;
867 	memset(&rsp, 0, sizeof(rsp));
868 
869 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, &desc, &ch, &req);
870 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
871 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
872 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
873 
874 	cmd.nvme_cmd.cdw12_bits.copy.df = 0;
875 
876 	/* Bdev copy command failed */
877 	MOCK_SET(spdk_bdev_copy_blocks, -1);
878 	memset(&rsp, 0, sizeof(rsp));
879 
880 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, &desc, &ch, &req);
881 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
882 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
883 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
884 
885 	MOCK_CLEAR(spdk_bdev_copy_blocks);
886 	MOCK_CLEAR(spdk_bdev_io_type_supported);
887 }
888 
889 static void
890 test_nvmf_bdev_ctrlr_read_write_cmd(void)
891 {
892 	struct spdk_bdev bdev = {};
893 	struct spdk_bdev_desc desc = { .bdev = &bdev, };
894 	struct spdk_nvmf_request req = {};
895 	union nvmf_c2h_msg rsp = {};
896 	union nvmf_h2c_msg cmd = {};
897 	int rc;
898 
899 	req.cmd = &cmd;
900 	req.rsp = &rsp;
901 
902 	/* Read two blocks, block size 4096 */
903 	cmd.nvme_cmd.cdw12 = 1;
904 	bdev.blockcnt = 100;
905 	bdev.blocklen = 4096;
906 	req.length = 8192;
907 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
908 
909 	rc = nvmf_bdev_ctrlr_read_cmd(&bdev, &desc, NULL, &req);
910 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
911 
912 	/* Write two blocks, block size 4096 */
913 	cmd.nvme_cmd.cdw12 = 1;
914 	bdev.blockcnt = 100;
915 	bdev.blocklen = 4096;
916 	req.length = 8192;
917 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
918 
919 	rc = nvmf_bdev_ctrlr_write_cmd(&bdev, &desc, NULL, &req);
920 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
921 }
922 
923 static void
924 test_nvmf_bdev_ctrlr_nvme_passthru(void)
925 {
926 	int rc;
927 	struct spdk_bdev bdev = {};
928 	struct spdk_bdev_desc *desc = NULL;
929 	struct spdk_io_channel ch = {};
930 	struct spdk_nvmf_qpair qpair = {};
931 	struct spdk_nvmf_poll_group group = {};
932 
933 	struct spdk_nvmf_request req = {};
934 	union nvmf_c2h_msg rsp = {};
935 	struct spdk_nvme_cmd cmd = {};
936 	struct spdk_bdev_io bdev_io;
937 
938 	bdev.blocklen = 512;
939 	bdev.blockcnt = 10;
940 
941 	qpair.group = &group;
942 
943 	req.qpair = &qpair;
944 	req.cmd = (union nvmf_h2c_msg *)&cmd;
945 	req.rsp = &rsp;
946 	SPDK_IOV_ONE(req.iov, &req.iovcnt, NULL, 0);
947 
948 	cmd.nsid = 1;
949 	cmd.opc = 0xFF;
950 
951 	cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
952 	cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
953 
954 	/* NVME_IO success */
955 	memset(&rsp, 0, sizeof(rsp));
956 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
957 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
958 	nvmf_bdev_ctrlr_complete_cmd(&bdev_io, true, &req);
959 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
960 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
961 
962 	/* NVME_IO fail */
963 	memset(&rsp, 0, sizeof(rsp));
964 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
965 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
966 	g_bdev_nvme_status_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
967 	nvmf_bdev_ctrlr_complete_cmd(&bdev_io, false, &req);
968 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
969 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
970 	reset_bdev_nvme_status();
971 
972 	/* NVME_IO not supported */
973 	memset(&rsp, 0, sizeof(rsp));
974 	MOCK_SET(spdk_bdev_nvme_iov_passthru_md, -ENOTSUP);
975 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
976 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
977 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
978 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
979 	CU_ASSERT(rsp.nvme_cpl.status.dnr == 1);
980 
981 	/* NVME_IO no channel - queue IO */
982 	memset(&rsp, 0, sizeof(rsp));
983 	MOCK_SET(spdk_bdev_nvme_iov_passthru_md, -ENOMEM);
984 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
985 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
986 	CU_ASSERT(group.stat.pending_bdev_io == 1);
987 
988 	MOCK_SET(spdk_bdev_nvme_iov_passthru_md, 0);
989 
990 	/* NVME_ADMIN success */
991 	memset(&rsp, 0, sizeof(rsp));
992 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
993 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
994 	nvmf_bdev_ctrlr_complete_admin_cmd(&bdev_io, true, &req);
995 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
996 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
997 
998 	/* NVME_ADMIN fail */
999 	memset(&rsp, 0, sizeof(rsp));
1000 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
1001 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1002 	g_bdev_nvme_status_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
1003 	nvmf_bdev_ctrlr_complete_admin_cmd(&bdev_io, true, &req);
1004 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1005 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
1006 	reset_bdev_nvme_status();
1007 
1008 	/* NVME_ADMIN not supported */
1009 	memset(&rsp, 0, sizeof(rsp));
1010 	MOCK_SET(spdk_bdev_nvme_admin_passthru, -ENOTSUP);
1011 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
1012 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1013 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1014 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1015 	CU_ASSERT(rsp.nvme_cpl.status.dnr == 1);
1016 
1017 	/* NVME_ADMIN no channel - queue IO */
1018 	memset(&rsp, 0, sizeof(rsp));
1019 	MOCK_SET(spdk_bdev_nvme_admin_passthru, -ENOMEM);
1020 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
1021 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1022 	CU_ASSERT(group.stat.pending_bdev_io == 2);
1023 
1024 	MOCK_SET(spdk_bdev_nvme_admin_passthru, 0);
1025 }
1026 
1027 int
1028 main(int argc, char **argv)
1029 {
1030 	CU_pSuite	suite = NULL;
1031 	unsigned int	num_failures;
1032 
1033 	CU_initialize_registry();
1034 
1035 	suite = CU_add_suite("nvmf", NULL, NULL);
1036 
1037 	CU_ADD_TEST(suite, test_get_rw_params);
1038 	CU_ADD_TEST(suite, test_get_rw_ext_params);
1039 	CU_ADD_TEST(suite, test_lba_in_range);
1040 	CU_ADD_TEST(suite, test_get_dif_ctx);
1041 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_identify_ns);
1042 	CU_ADD_TEST(suite, test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd);
1043 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_zcopy_start);
1044 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_cmd);
1045 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_read_write_cmd);
1046 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_nvme_passthru);
1047 
1048 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1049 	CU_cleanup_registry();
1050 	return num_failures;
1051 }
1052