xref: /spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c (revision 8afdeef3becfe9409cc9e7372bd0bc10e8b7d46d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #include "spdk_internal/mock.h"
12 #include "thread/thread_internal.h"
13 
14 #include "nvmf/ctrlr_bdev.c"
15 
16 #include "spdk/bdev_module.h"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvmf)
19 
20 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -1);
21 
22 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
23 
24 DEFINE_STUB(spdk_bdev_get_physical_block_size, uint32_t,
25 	    (const struct spdk_bdev *bdev), 4096);
26 
27 DEFINE_STUB(nvmf_ctrlr_process_admin_cmd, int, (struct spdk_nvmf_request *req), 0);
28 
29 DEFINE_STUB(spdk_bdev_comparev_blocks, int, (struct spdk_bdev_desc *desc,
30 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt,
31 		uint64_t offset_blocks, uint64_t num_blocks,
32 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
33 
34 DEFINE_STUB(spdk_bdev_readv_blocks_ext, int, (struct spdk_bdev_desc *desc,
35 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, uint64_t offset_blocks,
36 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
37 		struct spdk_bdev_ext_io_opts *opts), 0);
38 
39 DEFINE_STUB(spdk_bdev_writev_blocks_ext, int, (struct spdk_bdev_desc *desc,
40 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, uint64_t offset_blocks,
41 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
42 		struct spdk_bdev_ext_io_opts *opts), 0);
43 
44 DEFINE_STUB(spdk_bdev_nvme_admin_passthru, int,
45 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
46 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
47 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
48 
49 DEFINE_STUB(spdk_bdev_abort, int,
50 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
51 	     void *bio_cb_arg, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
52 
53 DEFINE_STUB_V(spdk_bdev_io_get_iovec,
54 	      (struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp));
55 DEFINE_STUB(spdk_bdev_get_write_unit_size, uint32_t, (const struct spdk_bdev *bdev), 1);
56 
57 uint32_t
58 spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
59 {
60 	return bdev->optimal_io_boundary;
61 }
62 
63 uint32_t
64 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
65 {
66 	return bdev->md_len;
67 }
68 
69 bool
70 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
71 {
72 	return (bdev->md_len != 0) && bdev->md_interleave;
73 }
74 
75 /* We have to use the typedef in the function declaration to appease astyle. */
76 typedef enum spdk_dif_type spdk_dif_type_t;
77 
78 spdk_dif_type_t
79 spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
80 {
81 	if (bdev->md_len != 0) {
82 		return bdev->dif_type;
83 	} else {
84 		return SPDK_DIF_DISABLE;
85 	}
86 }
87 
88 bool
89 spdk_bdev_is_dif_head_of_md(const struct spdk_bdev *bdev)
90 {
91 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
92 		return bdev->dif_is_head_of_md;
93 	} else {
94 		return false;
95 	}
96 }
97 
98 uint32_t
99 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
100 {
101 	if (spdk_bdev_is_md_interleaved(bdev)) {
102 		return bdev->blocklen - bdev->md_len;
103 	} else {
104 		return bdev->blocklen;
105 	}
106 }
107 
108 uint16_t
109 spdk_bdev_get_acwu(const struct spdk_bdev *bdev)
110 {
111 	return bdev->acwu;
112 }
113 
114 uint32_t
115 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
116 {
117 	return bdev->blocklen;
118 }
119 
120 uint64_t
121 spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
122 {
123 	return bdev->blockcnt;
124 }
125 
126 DEFINE_STUB(spdk_bdev_comparev_and_writev_blocks, int,
127 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
128 	     struct iovec *compare_iov, int compare_iovcnt,
129 	     struct iovec *write_iov, int write_iovcnt,
130 	     uint64_t offset_blocks, uint64_t num_blocks,
131 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
132 	    0);
133 
134 DEFINE_STUB(nvmf_ctrlr_process_io_cmd, int, (struct spdk_nvmf_request *req), 0);
135 
136 DEFINE_STUB_V(spdk_bdev_io_get_nvme_fused_status, (const struct spdk_bdev_io *bdev_io,
137 		uint32_t *cdw0, int *cmp_sct, int *cmp_sc, int *wr_sct, int *wr_sc));
138 
139 DEFINE_STUB(spdk_bdev_is_dif_check_enabled, bool,
140 	    (const struct spdk_bdev *bdev, enum spdk_dif_check_type check_type), false);
141 
142 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *,
143 	    (struct spdk_bdev_desc *desc), NULL);
144 
145 DEFINE_STUB(spdk_bdev_flush_blocks, int,
146 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
147 	     uint64_t offset_blocks, uint64_t num_blocks,
148 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
149 	    0);
150 
151 DEFINE_STUB(spdk_bdev_unmap_blocks, int,
152 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
153 	     uint64_t offset_blocks, uint64_t num_blocks,
154 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
155 	    0);
156 
157 DEFINE_STUB(spdk_bdev_io_type_supported, bool,
158 	    (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
159 
160 DEFINE_STUB(spdk_bdev_queue_io_wait, int,
161 	    (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
162 	     struct spdk_bdev_io_wait_entry *entry),
163 	    0);
164 
165 DEFINE_STUB(spdk_bdev_write_blocks, int,
166 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
167 	     uint64_t offset_blocks, uint64_t num_blocks,
168 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
169 	    0);
170 
171 DEFINE_STUB(spdk_bdev_writev_blocks, int,
172 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
173 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
174 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
175 	    0);
176 
177 DEFINE_STUB(spdk_bdev_read_blocks, int,
178 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
179 	     uint64_t offset_blocks, uint64_t num_blocks,
180 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
181 	    0);
182 
183 DEFINE_STUB(spdk_bdev_readv_blocks, int,
184 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
185 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
186 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
187 	    0);
188 
189 DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
190 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
191 	     uint64_t offset_blocks, uint64_t num_blocks,
192 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
193 	    0);
194 
195 DEFINE_STUB(spdk_bdev_nvme_iov_passthru_md, int, (
196 		    struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
197 		    const struct spdk_nvme_cmd *cmd, struct iovec *iov, int iovcnt,
198 		    size_t nbytes, void *md_buf, size_t md_len,
199 		    spdk_bdev_io_completion_cb cb, void *cb_arg),
200 	    0);
201 
202 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
203 
204 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
205 	    (const struct spdk_nvmf_subsystem *subsystem), NULL);
206 
207 DEFINE_STUB(spdk_bdev_zcopy_start, int,
208 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
209 	     struct iovec *iov, int iovcnt,
210 	     uint64_t offset_blocks, uint64_t num_blocks,
211 	     bool populate,
212 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
213 	    0);
214 
215 DEFINE_STUB(spdk_bdev_zcopy_end, int,
216 	    (struct spdk_bdev_io *bdev_io, bool commit,
217 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
218 	    0);
219 
220 DEFINE_STUB(spdk_bdev_copy_blocks, int,
221 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
222 	     uint64_t dst_offset_blocks, uint64_t src_offset_blocks, uint64_t num_blocks,
223 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
224 	    0);
225 
226 DEFINE_STUB(spdk_bdev_get_max_copy, uint32_t, (const struct spdk_bdev *bdev), 0);
227 
228 struct spdk_nvmf_ns *
229 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
230 {
231 	abort();
232 	return NULL;
233 }
234 
235 struct spdk_nvmf_ns *
236 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
237 {
238 	abort();
239 	return NULL;
240 }
241 
242 struct spdk_nvmf_ns *
243 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns)
244 {
245 	abort();
246 	return NULL;
247 }
248 
249 int
250 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
251 		  bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
252 		  uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
253 		  uint32_t data_offset, uint64_t guard_seed, struct spdk_dif_ctx_init_ext_opts *opts)
254 {
255 	ctx->dif_pi_format = opts->dif_pi_format;
256 	ctx->block_size = block_size;
257 	ctx->md_size = md_size;
258 	ctx->init_ref_tag = init_ref_tag;
259 
260 	return 0;
261 }
262 
263 static uint32_t g_bdev_nvme_status_cdw0;
264 static uint32_t g_bdev_nvme_status_sct = SPDK_NVME_SCT_GENERIC;
265 static uint32_t g_bdev_nvme_status_sc = SPDK_NVME_SC_SUCCESS;
266 
267 static void
268 reset_bdev_nvme_status(void)
269 {
270 	g_bdev_nvme_status_cdw0 = 0;
271 	g_bdev_nvme_status_sct = SPDK_NVME_SCT_GENERIC;
272 	g_bdev_nvme_status_sc = SPDK_NVME_SC_SUCCESS;
273 }
274 
275 void
276 spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct,
277 			     int *sc)
278 {
279 	*cdw0 = g_bdev_nvme_status_cdw0;
280 	*sct = g_bdev_nvme_status_sct;
281 	*sc = g_bdev_nvme_status_sc;
282 }
283 
284 bool
285 nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns)
286 {
287 	return ns->ptpl_file != NULL;
288 }
289 
290 static void
291 test_get_rw_params(void)
292 {
293 	struct spdk_nvme_cmd cmd = {0};
294 	uint64_t lba;
295 	uint64_t count;
296 
297 	lba = 0;
298 	count = 0;
299 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
300 	to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
301 	nvmf_bdev_ctrlr_get_rw_params(&cmd, &lba, &count);
302 	CU_ASSERT(lba == 0x1234567890ABCDEF);
303 	CU_ASSERT(count == 0x9875 + 1); /* NOTE: this field is 0's based, hence the +1 */
304 }
305 
306 static void
307 test_get_rw_ext_params(void)
308 {
309 	struct spdk_nvme_cmd cmd = {0};
310 	struct spdk_bdev_ext_io_opts opts = {0};
311 
312 	to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_DATA_PLACEMENT_DIRECTIVE);
313 	to_le32(&cmd.cdw13, 0x2 << 16);
314 	nvmf_bdev_ctrlr_get_rw_ext_params(&cmd, &opts);
315 	CU_ASSERT(opts.nvme_cdw12.raw == 0x209875);
316 	CU_ASSERT(opts.nvme_cdw13.raw == 0x20000);
317 }
318 
319 static void
320 test_lba_in_range(void)
321 {
322 	/* Trivial cases (no overflow) */
323 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1) == true);
324 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1000) == true);
325 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1001) == false);
326 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 999) == true);
327 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 1000) == false);
328 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 999, 1) == true);
329 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1000, 1) == false);
330 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1001, 1) == false);
331 
332 	/* Overflow edge cases */
333 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 0, UINT64_MAX) == true);
334 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 1, UINT64_MAX) == false);
335 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX - 1, 1) == true);
336 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX, 1) == false);
337 }
338 
339 static void
340 test_get_dif_ctx(void)
341 {
342 	struct spdk_bdev bdev = {};
343 	struct spdk_nvme_cmd cmd = {};
344 	struct spdk_dif_ctx dif_ctx = {};
345 	bool ret;
346 
347 	bdev.md_len = 0;
348 
349 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
350 	CU_ASSERT(ret == false);
351 
352 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
353 	bdev.blocklen = 520;
354 	bdev.md_len = 8;
355 
356 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
357 	CU_ASSERT(ret == true);
358 	CU_ASSERT(dif_ctx.block_size = 520);
359 	CU_ASSERT(dif_ctx.md_size == 8);
360 	CU_ASSERT(dif_ctx.init_ref_tag == 0x90ABCDEF);
361 }
362 
363 static void
364 test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd(void)
365 {
366 	int rc;
367 	struct spdk_bdev bdev = {};
368 	struct spdk_bdev_desc *desc = NULL;
369 	struct spdk_io_channel ch = {};
370 
371 	struct spdk_nvmf_request cmp_req = {};
372 	union nvmf_c2h_msg cmp_rsp = {};
373 
374 	struct spdk_nvmf_request write_req = {};
375 	union nvmf_c2h_msg write_rsp = {};
376 
377 	struct spdk_nvmf_qpair qpair = {};
378 
379 	struct spdk_nvme_cmd cmp_cmd = {};
380 	struct spdk_nvme_cmd write_cmd = {};
381 
382 	struct spdk_nvmf_ctrlr ctrlr = {};
383 	struct spdk_nvmf_subsystem subsystem = {};
384 	struct spdk_nvmf_ns ns = {};
385 	struct spdk_nvmf_ns *subsys_ns[1] = {};
386 
387 	struct spdk_nvmf_poll_group group = {};
388 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
389 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
390 
391 	bdev.blocklen = 512;
392 	bdev.blockcnt = 10;
393 	ns.bdev = &bdev;
394 
395 	subsystem.id = 0;
396 	subsystem.max_nsid = 1;
397 	subsys_ns[0] = &ns;
398 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
399 
400 	/* Enable controller */
401 	ctrlr.vcprop.cc.bits.en = 1;
402 	ctrlr.subsys = &subsystem;
403 
404 	group.num_sgroups = 1;
405 	sgroups.num_ns = 1;
406 	sgroups.ns_info = &ns_info;
407 	group.sgroups = &sgroups;
408 
409 	qpair.ctrlr = &ctrlr;
410 	qpair.group = &group;
411 
412 	cmp_req.qpair = &qpair;
413 	cmp_req.cmd = (union nvmf_h2c_msg *)&cmp_cmd;
414 	cmp_req.rsp = &cmp_rsp;
415 
416 	cmp_cmd.nsid = 1;
417 	cmp_cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
418 	cmp_cmd.opc = SPDK_NVME_OPC_COMPARE;
419 
420 	write_req.qpair = &qpair;
421 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
422 	write_req.rsp = &write_rsp;
423 
424 	write_cmd.nsid = 1;
425 	write_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
426 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
427 
428 	/* 1. SUCCESS */
429 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
430 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
431 
432 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
433 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
434 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
435 
436 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
437 
438 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
439 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
440 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
441 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == 0);
442 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == 0);
443 
444 	/* 2. Fused command start lba / num blocks mismatch */
445 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
446 	cmp_cmd.cdw12 = 2;	/* NLB: CDW12 bits 15:00, 0's based */
447 
448 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
449 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
450 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
451 
452 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
453 
454 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
455 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
456 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
457 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
458 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
459 
460 	/* 3. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
461 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
462 	cmp_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
463 
464 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
465 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
466 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
467 
468 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
469 
470 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
471 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
472 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
473 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
474 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
475 
476 	/* 4. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
477 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
478 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
479 
480 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
481 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
482 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
483 
484 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
485 
486 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
487 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
488 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
489 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
490 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
491 }
492 
493 static void
494 test_nvmf_bdev_ctrlr_identify_ns(void)
495 {
496 	struct spdk_nvmf_ns ns = {};
497 	struct spdk_nvme_ns_data nsdata = {};
498 	struct spdk_bdev bdev = {};
499 	uint8_t ns_g_id[16] = "abcdefgh";
500 	uint8_t eui64[8] = "12345678";
501 
502 	ns.bdev = &bdev;
503 	ns.ptpl_file = (void *)0xDEADBEEF;
504 	memcpy(ns.opts.nguid, ns_g_id, 16);
505 	memcpy(ns.opts.eui64, eui64, 8);
506 
507 	bdev.blockcnt = 10;
508 	bdev.acwu = 1;
509 	bdev.md_len = 512;
510 	bdev.dif_type = SPDK_DIF_TYPE1;
511 	bdev.blocklen = 4096;
512 	bdev.md_interleave = 0;
513 	bdev.optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
514 	bdev.dif_is_head_of_md = true;
515 
516 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, false);
517 	CU_ASSERT(nsdata.nsze == 10);
518 	CU_ASSERT(nsdata.ncap == 10);
519 	CU_ASSERT(nsdata.nuse == 10);
520 	CU_ASSERT(nsdata.nlbaf == 0);
521 	CU_ASSERT(nsdata.flbas.format == 0);
522 	CU_ASSERT(nsdata.flbas.msb_format == 0);
523 	CU_ASSERT(nsdata.nacwu == 0);
524 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
525 	CU_ASSERT(nsdata.lbaf[0].ms == 512);
526 	CU_ASSERT(nsdata.dps.pit == SPDK_NVME_FMT_NVM_PROTECTION_DISABLE);
527 	CU_ASSERT(nsdata.noiob == SPDK_BDEV_IO_NUM_CHILD_IOV);
528 	CU_ASSERT(nsdata.nmic.can_share == 1);
529 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
530 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
531 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
532 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
533 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
534 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
535 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
536 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
537 	CU_ASSERT(nsdata.flbas.extended == 1);
538 	CU_ASSERT(nsdata.mc.extended == 1);
539 	CU_ASSERT(nsdata.mc.pointer == 0);
540 	CU_ASSERT(nsdata.dps.md_start == true);
541 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
542 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
543 
544 	memset(&nsdata, 0, sizeof(nsdata));
545 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, true);
546 	CU_ASSERT(nsdata.nsze == 10);
547 	CU_ASSERT(nsdata.ncap == 10);
548 	CU_ASSERT(nsdata.nuse == 10);
549 	CU_ASSERT(nsdata.nlbaf == 0);
550 	CU_ASSERT(nsdata.flbas.format == 0);
551 	CU_ASSERT(nsdata.flbas.msb_format == 0);
552 	CU_ASSERT(nsdata.nacwu == 0);
553 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
554 	CU_ASSERT(nsdata.noiob == SPDK_BDEV_IO_NUM_CHILD_IOV);
555 	CU_ASSERT(nsdata.nmic.can_share == 1);
556 	CU_ASSERT(nsdata.lbaf[0].ms == 0);
557 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
558 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
559 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
560 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
561 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
562 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
563 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
564 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
565 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
566 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
567 }
568 
569 static void
570 test_nvmf_bdev_ctrlr_zcopy_start(void)
571 {
572 	int rc;
573 	struct spdk_bdev bdev = {};
574 	struct spdk_bdev_desc *desc = NULL;
575 	struct spdk_io_channel ch = {};
576 
577 	struct spdk_nvmf_request write_req = {};
578 	union nvmf_c2h_msg write_rsp = {};
579 
580 	struct spdk_nvmf_qpair qpair = {};
581 
582 	struct spdk_nvme_cmd write_cmd = {};
583 
584 	struct spdk_nvmf_ctrlr ctrlr = {};
585 	struct spdk_nvmf_subsystem subsystem = {};
586 	struct spdk_nvmf_ns ns = {};
587 	struct spdk_nvmf_ns *subsys_ns[1] = {};
588 
589 	struct spdk_nvmf_poll_group group = {};
590 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
591 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
592 
593 	bdev.blocklen = 512;
594 	bdev.blockcnt = 10;
595 	ns.bdev = &bdev;
596 
597 	subsystem.id = 0;
598 	subsystem.max_nsid = 1;
599 	subsys_ns[0] = &ns;
600 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
601 
602 	/* Enable controller */
603 	ctrlr.vcprop.cc.bits.en = 1;
604 	ctrlr.subsys = &subsystem;
605 
606 	group.num_sgroups = 1;
607 	sgroups.num_ns = 1;
608 	sgroups.ns_info = &ns_info;
609 	group.sgroups = &sgroups;
610 
611 	qpair.ctrlr = &ctrlr;
612 	qpair.group = &group;
613 
614 	write_req.qpair = &qpair;
615 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
616 	write_req.rsp = &write_rsp;
617 
618 	write_cmd.nsid = 1;
619 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
620 
621 	/* 1. SUCCESS */
622 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
623 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
624 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
625 
626 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
627 
628 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
629 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
630 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_SUCCESS);
631 
632 	/* 2. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
633 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
634 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
635 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
636 
637 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
638 
639 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
640 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
641 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_LBA_OUT_OF_RANGE);
642 
643 	/* 3. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
644 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
645 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
646 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
647 
648 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
649 
650 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
651 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
652 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
653 }
654 
655 static void
656 test_nvmf_bdev_ctrlr_cmd(void)
657 {
658 	int rc;
659 	struct spdk_bdev bdev = {};
660 	struct spdk_io_channel ch = {};
661 	struct spdk_nvmf_request req = {};
662 	struct spdk_nvmf_qpair qpair = {};
663 	union nvmf_h2c_msg cmd = {};
664 	union nvmf_c2h_msg rsp = {};
665 	struct spdk_nvme_scc_source_range range = {};
666 
667 	req.cmd = &cmd;
668 	req.rsp = &rsp;
669 	req.qpair = &qpair;
670 	req.length = 4096;
671 	bdev.blocklen = 512;
672 	bdev.blockcnt = 3;
673 	cmd.nvme_cmd.cdw10 = 0;
674 	cmd.nvme_cmd.cdw12 = 2;
675 
676 	/* Compare status asynchronous */
677 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
678 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
679 
680 	/* SLBA out of range */
681 	cmd.nvme_cmd.cdw10 = 3;
682 
683 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
684 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
685 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
686 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
687 
688 	/* SGL length invalid */
689 	cmd.nvme_cmd.cdw10 = 0;
690 	req.length = 512;
691 	memset(&rsp, 0, sizeof(rsp));
692 
693 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
694 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
695 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
696 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
697 
698 	/* Device error */
699 	req.length = 4096;
700 	memset(&rsp, 0, sizeof(rsp));
701 	MOCK_SET(spdk_bdev_comparev_blocks, -1);
702 
703 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
704 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
705 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
706 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
707 
708 	/* bdev not support flush */
709 	MOCK_SET(spdk_bdev_io_type_supported, false);
710 	memset(&rsp, 0, sizeof(rsp));
711 
712 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
713 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
714 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
715 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
716 
717 	/*  Flush error */
718 	MOCK_SET(spdk_bdev_io_type_supported, true);
719 	MOCK_SET(spdk_bdev_flush_blocks, -1);
720 	memset(&rsp, 0, sizeof(rsp));
721 
722 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
723 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
724 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
725 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
726 
727 	/* Flush blocks status asynchronous */
728 	MOCK_SET(spdk_bdev_flush_blocks, 0);
729 
730 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
731 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
732 	MOCK_CLEAR(spdk_bdev_io_type_supported);
733 	MOCK_CLEAR(spdk_bdev_flush_blocks);
734 
735 	/* Write zeroes blocks status asynchronous */
736 	struct spdk_nvmf_subsystem subsystem = { };
737 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem };
738 	qpair.ctrlr = &ctrlr;
739 
740 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
741 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
742 
743 	cmd.nvme_cmd.cdw12 = 3;
744 	subsystem.max_write_zeroes_size_kib = 1;
745 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
746 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
747 
748 	/* SLBA out of range */
749 	subsystem.max_write_zeroes_size_kib = 0;
750 	cmd.nvme_cmd.cdw12 = 2;
751 	cmd.nvme_cmd.cdw10 = 3;
752 	memset(&rsp, 0, sizeof(rsp));
753 
754 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
755 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
756 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
757 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
758 
759 	/* Write block error */
760 	MOCK_SET(spdk_bdev_write_zeroes_blocks, -1);
761 	cmd.nvme_cmd.cdw10 = 0;
762 	memset(&rsp, 0, sizeof(rsp));
763 
764 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
765 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
766 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
767 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
768 
769 	/* Copy blocks status asynchronous */
770 	MOCK_SET(spdk_bdev_io_type_supported, true);
771 	cmd.nvme_cmd.cdw10 = 1024;
772 	cmd.nvme_cmd.cdw11 = 0;
773 	cmd.nvme_cmd.cdw12 = 0;
774 	cmd.nvme_cmd.cdw12_bits.copy.nr = 0;
775 	range.slba = 512;
776 	range.nlb = 511;
777 	req.length = 32;
778 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &range, req.length);
779 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
780 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
781 
782 	/* Copy command not supported */
783 	MOCK_SET(spdk_bdev_io_type_supported, false);
784 	memset(&rsp, 0, sizeof(rsp));
785 
786 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
787 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
788 
789 	MOCK_SET(spdk_bdev_io_type_supported, true);
790 
791 	/* Unsupported number of source ranges */
792 	cmd.nvme_cmd.cdw12_bits.copy.nr = 1;
793 	req.length = 64;
794 	memset(&rsp, 0, sizeof(rsp));
795 
796 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
797 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
798 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
799 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_CMD_SIZE_LIMIT_SIZE_EXCEEDED);
800 
801 	cmd.nvme_cmd.cdw12_bits.copy.nr = 0;
802 	req.length = 32;
803 
804 	/* Unsupported source range descriptor format */
805 	cmd.nvme_cmd.cdw12_bits.copy.df = 1;
806 	memset(&rsp, 0, sizeof(rsp));
807 
808 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
809 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
810 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
811 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
812 
813 	cmd.nvme_cmd.cdw12_bits.copy.df = 0;
814 
815 	/* Bdev copy command failed */
816 	MOCK_SET(spdk_bdev_copy_blocks, -1);
817 	memset(&rsp, 0, sizeof(rsp));
818 
819 	rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
820 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
821 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
822 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
823 
824 	MOCK_CLEAR(spdk_bdev_copy_blocks);
825 	MOCK_CLEAR(spdk_bdev_io_type_supported);
826 }
827 
828 static void
829 test_nvmf_bdev_ctrlr_read_write_cmd(void)
830 {
831 	struct spdk_bdev bdev = {};
832 	struct spdk_nvmf_request req = {};
833 	union nvmf_c2h_msg rsp = {};
834 	union nvmf_h2c_msg cmd = {};
835 	int rc;
836 
837 	req.cmd = &cmd;
838 	req.rsp = &rsp;
839 
840 	/* Read two blocks, block size 4096 */
841 	cmd.nvme_cmd.cdw12 = 1;
842 	bdev.blockcnt = 100;
843 	bdev.blocklen = 4096;
844 	req.length = 8192;
845 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
846 
847 	rc = nvmf_bdev_ctrlr_read_cmd(&bdev, NULL, NULL, &req);
848 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
849 
850 	/* Write two blocks, block size 4096 */
851 	cmd.nvme_cmd.cdw12 = 1;
852 	bdev.blockcnt = 100;
853 	bdev.blocklen = 4096;
854 	req.length = 8192;
855 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
856 
857 	rc = nvmf_bdev_ctrlr_write_cmd(&bdev, NULL, NULL, &req);
858 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
859 }
860 
861 static void
862 test_nvmf_bdev_ctrlr_nvme_passthru(void)
863 {
864 	int rc;
865 	struct spdk_bdev bdev = {};
866 	struct spdk_bdev_desc *desc = NULL;
867 	struct spdk_io_channel ch = {};
868 	struct spdk_nvmf_qpair qpair = {};
869 	struct spdk_nvmf_poll_group group = {};
870 
871 	struct spdk_nvmf_request req = {};
872 	union nvmf_c2h_msg rsp = {};
873 	struct spdk_nvme_cmd cmd = {};
874 	struct spdk_bdev_io bdev_io;
875 
876 	bdev.blocklen = 512;
877 	bdev.blockcnt = 10;
878 
879 	qpair.group = &group;
880 
881 	req.qpair = &qpair;
882 	req.cmd = (union nvmf_h2c_msg *)&cmd;
883 	req.rsp = &rsp;
884 	SPDK_IOV_ONE(req.iov, &req.iovcnt, NULL, 0);
885 
886 	cmd.nsid = 1;
887 	cmd.opc = 0xFF;
888 
889 	cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
890 	cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
891 
892 	/* NVME_IO success */
893 	memset(&rsp, 0, sizeof(rsp));
894 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
895 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
896 	nvmf_bdev_ctrlr_complete_cmd(&bdev_io, true, &req);
897 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
898 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
899 
900 	/* NVME_IO fail */
901 	memset(&rsp, 0, sizeof(rsp));
902 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
903 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
904 	g_bdev_nvme_status_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
905 	nvmf_bdev_ctrlr_complete_cmd(&bdev_io, false, &req);
906 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
907 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
908 	reset_bdev_nvme_status();
909 
910 	/* NVME_IO not supported */
911 	memset(&rsp, 0, sizeof(rsp));
912 	MOCK_SET(spdk_bdev_nvme_iov_passthru_md, -ENOTSUP);
913 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
914 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
915 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
916 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
917 	CU_ASSERT(rsp.nvme_cpl.status.dnr == 1);
918 
919 	/* NVME_IO no channel - queue IO */
920 	memset(&rsp, 0, sizeof(rsp));
921 	MOCK_SET(spdk_bdev_nvme_iov_passthru_md, -ENOMEM);
922 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
923 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
924 	CU_ASSERT(group.stat.pending_bdev_io == 1);
925 
926 	MOCK_SET(spdk_bdev_nvme_iov_passthru_md, 0);
927 
928 	/* NVME_ADMIN success */
929 	memset(&rsp, 0, sizeof(rsp));
930 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
931 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
932 	nvmf_bdev_ctrlr_complete_admin_cmd(&bdev_io, true, &req);
933 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
934 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
935 
936 	/* NVME_ADMIN fail */
937 	memset(&rsp, 0, sizeof(rsp));
938 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
939 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
940 	g_bdev_nvme_status_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
941 	nvmf_bdev_ctrlr_complete_admin_cmd(&bdev_io, true, &req);
942 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
943 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
944 	reset_bdev_nvme_status();
945 
946 	/* NVME_ADMIN not supported */
947 	memset(&rsp, 0, sizeof(rsp));
948 	MOCK_SET(spdk_bdev_nvme_admin_passthru, -ENOTSUP);
949 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
950 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
951 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
952 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
953 	CU_ASSERT(rsp.nvme_cpl.status.dnr == 1);
954 
955 	/* NVME_ADMIN no channel - queue IO */
956 	memset(&rsp, 0, sizeof(rsp));
957 	MOCK_SET(spdk_bdev_nvme_admin_passthru, -ENOMEM);
958 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
959 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
960 	CU_ASSERT(group.stat.pending_bdev_io == 2);
961 
962 	MOCK_SET(spdk_bdev_nvme_admin_passthru, 0);
963 }
964 
965 int
966 main(int argc, char **argv)
967 {
968 	CU_pSuite	suite = NULL;
969 	unsigned int	num_failures;
970 
971 	CU_initialize_registry();
972 
973 	suite = CU_add_suite("nvmf", NULL, NULL);
974 
975 	CU_ADD_TEST(suite, test_get_rw_params);
976 	CU_ADD_TEST(suite, test_get_rw_ext_params);
977 	CU_ADD_TEST(suite, test_lba_in_range);
978 	CU_ADD_TEST(suite, test_get_dif_ctx);
979 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_identify_ns);
980 	CU_ADD_TEST(suite, test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd);
981 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_zcopy_start);
982 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_cmd);
983 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_read_write_cmd);
984 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_nvme_passthru);
985 
986 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
987 	CU_cleanup_registry();
988 	return num_failures;
989 }
990