xref: /spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c (revision cc6920a4763d4b9a43aa40583c8397d8f14fa100)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "spdk_internal/mock.h"
39 #include "thread/thread_internal.h"
40 
41 #include "nvmf/ctrlr_bdev.c"
42 
43 #include "spdk/bdev_module.h"
44 
45 SPDK_LOG_REGISTER_COMPONENT(nvmf)
46 
47 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -1);
48 
49 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
50 
51 DEFINE_STUB(spdk_bdev_get_physical_block_size, uint32_t,
52 	    (const struct spdk_bdev *bdev), 4096);
53 
54 DEFINE_STUB(nvmf_ctrlr_process_admin_cmd, int, (struct spdk_nvmf_request *req), 0);
55 
56 DEFINE_STUB(spdk_bdev_comparev_blocks, int, (struct spdk_bdev_desc *desc,
57 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt,
58 		uint64_t offset_blocks, uint64_t num_blocks,
59 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
60 
61 DEFINE_STUB(spdk_bdev_nvme_admin_passthru, int,
62 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
63 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
64 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
65 
66 DEFINE_STUB(spdk_bdev_abort, int,
67 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
68 	     void *bio_cb_arg, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
69 
70 DEFINE_STUB_V(spdk_bdev_io_get_iovec,
71 	      (struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp));
72 
73 uint32_t
74 spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
75 {
76 	return bdev->optimal_io_boundary;
77 }
78 
79 uint32_t
80 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
81 {
82 	return bdev->md_len;
83 }
84 
85 bool
86 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
87 {
88 	return (bdev->md_len != 0) && bdev->md_interleave;
89 }
90 
91 enum spdk_dif_type spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
92 {
93 	if (bdev->md_len != 0) {
94 		return bdev->dif_type;
95 	} else {
96 		return SPDK_DIF_DISABLE;
97 	}
98 }
99 
100 bool
101 spdk_bdev_is_dif_head_of_md(const struct spdk_bdev *bdev)
102 {
103 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
104 		return bdev->dif_is_head_of_md;
105 	} else {
106 		return false;
107 	}
108 }
109 
110 uint32_t
111 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
112 {
113 	if (spdk_bdev_is_md_interleaved(bdev)) {
114 		return bdev->blocklen - bdev->md_len;
115 	} else {
116 		return bdev->blocklen;
117 	}
118 }
119 
120 uint16_t
121 spdk_bdev_get_acwu(const struct spdk_bdev *bdev)
122 {
123 	return bdev->acwu;
124 }
125 
126 uint32_t
127 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
128 {
129 	return bdev->blocklen;
130 }
131 
132 uint64_t
133 spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
134 {
135 	return bdev->blockcnt;
136 }
137 
138 DEFINE_STUB(spdk_bdev_comparev_and_writev_blocks, int,
139 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
140 	     struct iovec *compare_iov, int compare_iovcnt,
141 	     struct iovec *write_iov, int write_iovcnt,
142 	     uint64_t offset_blocks, uint64_t num_blocks,
143 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
144 	    0);
145 
146 DEFINE_STUB(nvmf_ctrlr_process_io_cmd, int, (struct spdk_nvmf_request *req), 0);
147 
148 DEFINE_STUB_V(spdk_bdev_io_get_nvme_fused_status, (const struct spdk_bdev_io *bdev_io,
149 		uint32_t *cdw0, int *cmp_sct, int *cmp_sc, int *wr_sct, int *wr_sc));
150 
151 DEFINE_STUB(spdk_bdev_is_dif_check_enabled, bool,
152 	    (const struct spdk_bdev *bdev, enum spdk_dif_check_type check_type), false);
153 
154 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *,
155 	    (struct spdk_bdev_desc *desc), NULL);
156 
157 DEFINE_STUB(spdk_bdev_flush_blocks, int,
158 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
159 	     uint64_t offset_blocks, uint64_t num_blocks,
160 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
161 	    0);
162 
163 DEFINE_STUB(spdk_bdev_unmap_blocks, int,
164 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
165 	     uint64_t offset_blocks, uint64_t num_blocks,
166 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
167 	    0);
168 
169 DEFINE_STUB(spdk_bdev_io_type_supported, bool,
170 	    (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
171 
172 DEFINE_STUB(spdk_bdev_queue_io_wait, int,
173 	    (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
174 	     struct spdk_bdev_io_wait_entry *entry),
175 	    0);
176 
177 DEFINE_STUB(spdk_bdev_write_blocks, int,
178 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
179 	     uint64_t offset_blocks, uint64_t num_blocks,
180 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
181 	    0);
182 
183 DEFINE_STUB(spdk_bdev_writev_blocks, int,
184 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
185 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
186 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
187 	    0);
188 
189 DEFINE_STUB(spdk_bdev_read_blocks, int,
190 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
191 	     uint64_t offset_blocks, uint64_t num_blocks,
192 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
193 	    0);
194 
195 DEFINE_STUB(spdk_bdev_readv_blocks, int,
196 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
197 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
198 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
199 	    0);
200 
201 DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
202 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
203 	     uint64_t offset_blocks, uint64_t num_blocks,
204 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
205 	    0);
206 
207 DEFINE_STUB(spdk_bdev_nvme_io_passthru, int,
208 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
209 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
210 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
211 	    0);
212 
213 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
214 
215 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
216 	    (const struct spdk_nvmf_subsystem *subsystem), NULL);
217 
218 DEFINE_STUB(spdk_bdev_zcopy_start, int,
219 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
220 	     struct iovec *iov, int iovcnt,
221 	     uint64_t offset_blocks, uint64_t num_blocks,
222 	     bool populate,
223 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
224 	    0);
225 
226 DEFINE_STUB(spdk_bdev_zcopy_end, int,
227 	    (struct spdk_bdev_io *bdev_io, bool commit,
228 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
229 	    0);
230 
231 struct spdk_nvmf_ns *
232 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
233 {
234 	abort();
235 	return NULL;
236 }
237 
238 struct spdk_nvmf_ns *
239 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
240 {
241 	abort();
242 	return NULL;
243 }
244 
245 struct spdk_nvmf_ns *
246 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns)
247 {
248 	abort();
249 	return NULL;
250 }
251 
252 DEFINE_STUB_V(spdk_bdev_io_get_nvme_status,
253 	      (const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct, int *sc));
254 
255 int
256 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
257 		  bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
258 		  uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
259 		  uint32_t data_offset, uint16_t guard_seed)
260 {
261 	ctx->block_size = block_size;
262 	ctx->md_size = md_size;
263 	ctx->init_ref_tag = init_ref_tag;
264 
265 	return 0;
266 }
267 
268 static void
269 test_get_rw_params(void)
270 {
271 	struct spdk_nvme_cmd cmd = {0};
272 	uint64_t lba;
273 	uint64_t count;
274 
275 	lba = 0;
276 	count = 0;
277 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
278 	to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
279 	nvmf_bdev_ctrlr_get_rw_params(&cmd, &lba, &count);
280 	CU_ASSERT(lba == 0x1234567890ABCDEF);
281 	CU_ASSERT(count == 0x9875 + 1); /* NOTE: this field is 0's based, hence the +1 */
282 }
283 
284 static void
285 test_lba_in_range(void)
286 {
287 	/* Trivial cases (no overflow) */
288 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1) == true);
289 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1000) == true);
290 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1001) == false);
291 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 999) == true);
292 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 1000) == false);
293 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 999, 1) == true);
294 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1000, 1) == false);
295 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1001, 1) == false);
296 
297 	/* Overflow edge cases */
298 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 0, UINT64_MAX) == true);
299 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 1, UINT64_MAX) == false);
300 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX - 1, 1) == true);
301 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX, 1) == false);
302 }
303 
304 static void
305 test_get_dif_ctx(void)
306 {
307 	struct spdk_bdev bdev = {};
308 	struct spdk_nvme_cmd cmd = {};
309 	struct spdk_dif_ctx dif_ctx = {};
310 	bool ret;
311 
312 	bdev.md_len = 0;
313 
314 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
315 	CU_ASSERT(ret == false);
316 
317 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
318 	bdev.blocklen = 520;
319 	bdev.md_len = 8;
320 
321 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
322 	CU_ASSERT(ret == true);
323 	CU_ASSERT(dif_ctx.block_size = 520);
324 	CU_ASSERT(dif_ctx.md_size == 8);
325 	CU_ASSERT(dif_ctx.init_ref_tag == 0x90ABCDEF);
326 }
327 
328 static void
329 test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd(void)
330 {
331 	int rc;
332 	struct spdk_bdev bdev = {};
333 	struct spdk_bdev_desc *desc = NULL;
334 	struct spdk_io_channel ch = {};
335 
336 	struct spdk_nvmf_request cmp_req = {};
337 	union nvmf_c2h_msg cmp_rsp = {};
338 
339 	struct spdk_nvmf_request write_req = {};
340 	union nvmf_c2h_msg write_rsp = {};
341 
342 	struct spdk_nvmf_qpair qpair = {};
343 
344 	struct spdk_nvme_cmd cmp_cmd = {};
345 	struct spdk_nvme_cmd write_cmd = {};
346 
347 	struct spdk_nvmf_ctrlr ctrlr = {};
348 	struct spdk_nvmf_subsystem subsystem = {};
349 	struct spdk_nvmf_ns ns = {};
350 	struct spdk_nvmf_ns *subsys_ns[1] = {};
351 
352 	struct spdk_nvmf_poll_group group = {};
353 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
354 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
355 
356 	bdev.blocklen = 512;
357 	bdev.blockcnt = 10;
358 	ns.bdev = &bdev;
359 
360 	subsystem.id = 0;
361 	subsystem.max_nsid = 1;
362 	subsys_ns[0] = &ns;
363 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
364 
365 	/* Enable controller */
366 	ctrlr.vcprop.cc.bits.en = 1;
367 	ctrlr.subsys = &subsystem;
368 
369 	group.num_sgroups = 1;
370 	sgroups.num_ns = 1;
371 	sgroups.ns_info = &ns_info;
372 	group.sgroups = &sgroups;
373 
374 	qpair.ctrlr = &ctrlr;
375 	qpair.group = &group;
376 
377 	cmp_req.qpair = &qpair;
378 	cmp_req.cmd = (union nvmf_h2c_msg *)&cmp_cmd;
379 	cmp_req.rsp = &cmp_rsp;
380 
381 	cmp_cmd.nsid = 1;
382 	cmp_cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
383 	cmp_cmd.opc = SPDK_NVME_OPC_COMPARE;
384 
385 	write_req.qpair = &qpair;
386 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
387 	write_req.rsp = &write_rsp;
388 
389 	write_cmd.nsid = 1;
390 	write_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
391 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
392 
393 	/* 1. SUCCESS */
394 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
395 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
396 
397 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
398 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
399 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
400 
401 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
402 
403 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
404 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
405 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
406 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == 0);
407 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == 0);
408 
409 	/* 2. Fused command start lba / num blocks mismatch */
410 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
411 	cmp_cmd.cdw12 = 2;	/* NLB: CDW12 bits 15:00, 0's based */
412 
413 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
414 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
415 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
416 
417 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
418 
419 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
420 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
421 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
422 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
423 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
424 
425 	/* 3. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
426 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
427 	cmp_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
428 
429 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
430 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
431 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
432 
433 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
434 
435 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
436 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
437 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
438 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
439 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
440 
441 	/* 4. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
442 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
443 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
444 
445 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
446 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
447 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
448 
449 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
450 
451 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
452 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
453 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
454 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
455 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
456 }
457 
458 static void
459 test_nvmf_bdev_ctrlr_identify_ns(void)
460 {
461 	struct spdk_nvmf_ns ns = {};
462 	struct spdk_nvme_ns_data nsdata = {};
463 	struct spdk_bdev bdev = {};
464 	uint8_t ns_g_id[16] = "abcdefgh";
465 	uint8_t eui64[8] = "12345678";
466 
467 	ns.bdev = &bdev;
468 	ns.ptpl_file = (void *)0xDEADBEEF;
469 	memcpy(ns.opts.nguid, ns_g_id, 16);
470 	memcpy(ns.opts.eui64, eui64, 8);
471 
472 	bdev.blockcnt = 10;
473 	bdev.acwu = 0;
474 	bdev.md_len = 512;
475 	bdev.dif_type = SPDK_DIF_TYPE1;
476 	bdev.blocklen = 4096;
477 	bdev.md_interleave = 0;
478 	bdev.optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
479 	bdev.dif_is_head_of_md = true;
480 
481 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, false);
482 	CU_ASSERT(nsdata.nsze == 10);
483 	CU_ASSERT(nsdata.ncap == 10);
484 	CU_ASSERT(nsdata.nuse == 10);
485 	CU_ASSERT(nsdata.nlbaf == 0);
486 	CU_ASSERT(nsdata.flbas.format == 0);
487 	CU_ASSERT(nsdata.nacwu == 0);
488 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
489 	CU_ASSERT(nsdata.lbaf[0].ms == 512);
490 	CU_ASSERT(nsdata.dpc.pit1 == 1);
491 	CU_ASSERT(nsdata.dps.pit == SPDK_NVME_FMT_NVM_PROTECTION_TYPE1);
492 	CU_ASSERT(nsdata.noiob == BDEV_IO_NUM_CHILD_IOV);
493 	CU_ASSERT(nsdata.nmic.can_share == 1);
494 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
495 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
496 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
497 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
498 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
499 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
500 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
501 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
502 	CU_ASSERT(nsdata.flbas.extended == 1);
503 	CU_ASSERT(nsdata.mc.extended == 1);
504 	CU_ASSERT(nsdata.mc.pointer == 0);
505 	CU_ASSERT(nsdata.dps.md_start == true);
506 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
507 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
508 
509 	memset(&nsdata, 0, sizeof(nsdata));
510 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, true);
511 	CU_ASSERT(nsdata.nsze == 10);
512 	CU_ASSERT(nsdata.ncap == 10);
513 	CU_ASSERT(nsdata.nuse == 10);
514 	CU_ASSERT(nsdata.nlbaf == 0);
515 	CU_ASSERT(nsdata.flbas.format == 0);
516 	CU_ASSERT(nsdata.nacwu == 0);
517 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
518 	CU_ASSERT(nsdata.noiob == BDEV_IO_NUM_CHILD_IOV);
519 	CU_ASSERT(nsdata.nmic.can_share == 1);
520 	CU_ASSERT(nsdata.lbaf[0].ms == 0);
521 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
522 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
523 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
524 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
525 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
526 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
527 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
528 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
529 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
530 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
531 }
532 
533 static void
534 test_nvmf_bdev_ctrlr_start_zcopy(void)
535 {
536 	int rc;
537 	struct spdk_bdev bdev = {};
538 	struct spdk_bdev_desc *desc = NULL;
539 	struct spdk_io_channel ch = {};
540 
541 	struct spdk_nvmf_request write_req = {};
542 	union nvmf_c2h_msg write_rsp = {};
543 
544 	struct spdk_nvmf_qpair qpair = {};
545 
546 	struct spdk_nvme_cmd write_cmd = {};
547 
548 	struct spdk_nvmf_ctrlr ctrlr = {};
549 	struct spdk_nvmf_subsystem subsystem = {};
550 	struct spdk_nvmf_ns ns = {};
551 	struct spdk_nvmf_ns *subsys_ns[1] = {};
552 
553 	struct spdk_nvmf_poll_group group = {};
554 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
555 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
556 
557 	bdev.blocklen = 512;
558 	bdev.blockcnt = 10;
559 	ns.bdev = &bdev;
560 
561 	subsystem.id = 0;
562 	subsystem.max_nsid = 1;
563 	subsys_ns[0] = &ns;
564 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
565 
566 	/* Enable controller */
567 	ctrlr.vcprop.cc.bits.en = 1;
568 	ctrlr.subsys = &subsystem;
569 
570 	group.num_sgroups = 1;
571 	sgroups.num_ns = 1;
572 	sgroups.ns_info = &ns_info;
573 	group.sgroups = &sgroups;
574 
575 	qpair.ctrlr = &ctrlr;
576 	qpair.group = &group;
577 
578 	write_req.qpair = &qpair;
579 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
580 	write_req.rsp = &write_rsp;
581 
582 	write_cmd.nsid = 1;
583 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
584 
585 	/* 1. SUCCESS */
586 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
587 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
588 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
589 
590 	rc = nvmf_bdev_ctrlr_start_zcopy(&bdev, desc, &ch, &write_req);
591 
592 	CU_ASSERT(rc == 0);
593 
594 	/* 2. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
595 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
596 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
597 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
598 
599 	rc = nvmf_bdev_ctrlr_start_zcopy(&bdev, desc, &ch, &write_req);
600 
601 	CU_ASSERT(rc < 0);
602 
603 	/* 3. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
604 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
605 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
606 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
607 
608 	rc = nvmf_bdev_ctrlr_start_zcopy(&bdev, desc, &ch, &write_req);
609 
610 	CU_ASSERT(rc < 0);
611 }
612 
613 static void
614 test_nvmf_bdev_ctrlr_cmd(void)
615 {
616 	int rc;
617 	struct spdk_bdev bdev = {};
618 	struct spdk_io_channel ch = {};
619 	struct spdk_nvmf_request req = {};
620 	struct spdk_nvmf_qpair qpair = {};
621 	union nvmf_h2c_msg cmd = {};
622 	union nvmf_c2h_msg rsp = {};
623 
624 	req.cmd = &cmd;
625 	req.rsp = &rsp;
626 	req.qpair = &qpair;
627 	req.length = 4096;
628 	bdev.blocklen = 512;
629 	bdev.blockcnt = 3;
630 	cmd.nvme_cmd.cdw10 = 0;
631 	cmd.nvme_cmd.cdw12 = 2;
632 
633 	/* Compare status asynchronous */
634 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
635 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
636 
637 	/* SLBA out of range */
638 	cmd.nvme_cmd.cdw10 = 3;
639 
640 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
641 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
642 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
643 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
644 
645 	/* SGL length invalid */
646 	cmd.nvme_cmd.cdw10 = 0;
647 	req.length = 512;
648 	memset(&rsp, 0, sizeof(rsp));
649 
650 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
651 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
652 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
653 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
654 
655 	/* Device error */
656 	req.length = 4096;
657 	memset(&rsp, 0, sizeof(rsp));
658 	MOCK_SET(spdk_bdev_comparev_blocks, -1);
659 
660 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
661 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
662 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
663 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
664 
665 	/* bdev not support flush */
666 	MOCK_SET(spdk_bdev_io_type_supported, false);
667 	memset(&rsp, 0, sizeof(rsp));
668 
669 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
670 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
671 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
672 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
673 
674 	/*  Flush error */
675 	MOCK_SET(spdk_bdev_io_type_supported, true);
676 	MOCK_SET(spdk_bdev_flush_blocks, -1);
677 	memset(&rsp, 0, sizeof(rsp));
678 
679 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
680 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
681 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
682 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
683 
684 	/* Flush blocks status asynchronous */
685 	MOCK_SET(spdk_bdev_flush_blocks, 0);
686 
687 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
688 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
689 	MOCK_CLEAR(spdk_bdev_io_type_supported);
690 	MOCK_CLEAR(spdk_bdev_flush_blocks);
691 
692 	/* Write zeroes blocks status asynchronous */
693 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
694 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
695 
696 	/* SLBA out of range */
697 	cmd.nvme_cmd.cdw10 = 3;
698 	memset(&rsp, 0, sizeof(rsp));
699 
700 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
701 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
702 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
703 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
704 
705 	/* Write block error */
706 	MOCK_SET(spdk_bdev_write_zeroes_blocks, -1);
707 	cmd.nvme_cmd.cdw10 = 0;
708 	memset(&rsp, 0, sizeof(rsp));
709 
710 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
711 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
712 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
713 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
714 }
715 
716 static void
717 test_nvmf_bdev_ctrlr_read_write_cmd(void)
718 {
719 	struct spdk_bdev bdev = {};
720 	struct spdk_nvmf_request req = {};
721 	union nvmf_c2h_msg rsp = {};
722 	union nvmf_h2c_msg cmd = {};
723 	int rc;
724 
725 	req.cmd = &cmd;
726 	req.rsp = &rsp;
727 
728 	/* Read two blocks, block size 4096 */
729 	cmd.nvme_cmd.cdw12 = 1;
730 	bdev.blockcnt = 100;
731 	bdev.blocklen = 4096;
732 	req.length = 8192;
733 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
734 
735 	rc = nvmf_bdev_ctrlr_read_cmd(&bdev, NULL, NULL, &req);
736 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
737 
738 	/* Execute zero copy  */
739 	req.zcopy_phase = NVMF_ZCOPY_PHASE_EXECUTE;
740 
741 	rc = nvmf_bdev_ctrlr_read_cmd(&bdev, NULL, NULL, &req);
742 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
743 
744 	/* Read cmd request length invalid */
745 	req.length = 4096;
746 
747 	rc = nvmf_bdev_ctrlr_read_cmd(&bdev, NULL, NULL, &req);
748 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
749 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
750 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
751 
752 	/* Write two blocks, block size 4096 */
753 	cmd.nvme_cmd.cdw12 = 1;
754 	bdev.blockcnt = 100;
755 	bdev.blocklen = 4096;
756 	req.length = 8192;
757 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
758 
759 	rc = nvmf_bdev_ctrlr_write_cmd(&bdev, NULL, NULL, &req);
760 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
761 
762 	/* Execute zero copy  */
763 	req.zcopy_phase = NVMF_ZCOPY_PHASE_EXECUTE;
764 
765 	rc = nvmf_bdev_ctrlr_write_cmd(&bdev, NULL, NULL, &req);
766 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
767 
768 	/* Write cmd request length invalid */
769 	req.length = 4096;
770 
771 	rc = nvmf_bdev_ctrlr_read_cmd(&bdev, NULL, NULL, &req);
772 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
773 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
774 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
775 }
776 
777 int main(int argc, char **argv)
778 {
779 	CU_pSuite	suite = NULL;
780 	unsigned int	num_failures;
781 
782 	CU_set_error_action(CUEA_ABORT);
783 	CU_initialize_registry();
784 
785 	suite = CU_add_suite("nvmf", NULL, NULL);
786 
787 	CU_ADD_TEST(suite, test_get_rw_params);
788 	CU_ADD_TEST(suite, test_lba_in_range);
789 	CU_ADD_TEST(suite, test_get_dif_ctx);
790 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_identify_ns);
791 	CU_ADD_TEST(suite, test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd);
792 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_start_zcopy);
793 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_cmd);
794 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_read_write_cmd);
795 
796 	CU_basic_set_mode(CU_BRM_VERBOSE);
797 	CU_basic_run_tests();
798 	num_failures = CU_get_number_of_failures();
799 	CU_cleanup_registry();
800 	return num_failures;
801 }
802