xref: /spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c (revision 78ecd30d8e4650007c80a053011116b85f3f17ae)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "spdk_internal/mock.h"
39 #include "thread/thread_internal.h"
40 
41 #include "nvmf/ctrlr_bdev.c"
42 
43 #include "spdk/bdev_module.h"
44 
45 SPDK_LOG_REGISTER_COMPONENT(nvmf)
46 
47 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -1);
48 
49 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
50 
51 DEFINE_STUB(spdk_bdev_get_physical_block_size, uint32_t,
52 	    (const struct spdk_bdev *bdev), 4096);
53 
54 DEFINE_STUB(nvmf_ctrlr_process_admin_cmd, int, (struct spdk_nvmf_request *req), 0);
55 
56 DEFINE_STUB(spdk_bdev_comparev_blocks, int, (struct spdk_bdev_desc *desc,
57 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt,
58 		uint64_t offset_blocks, uint64_t num_blocks,
59 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
60 
61 DEFINE_STUB(spdk_bdev_nvme_admin_passthru, int,
62 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
63 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
64 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
65 
66 DEFINE_STUB(spdk_bdev_abort, int,
67 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
68 	     void *bio_cb_arg, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
69 
70 uint32_t
71 spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
72 {
73 	return bdev->optimal_io_boundary;
74 }
75 
76 uint32_t
77 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
78 {
79 	return bdev->md_len;
80 }
81 
82 bool
83 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
84 {
85 	return (bdev->md_len != 0) && bdev->md_interleave;
86 }
87 
88 enum spdk_dif_type spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
89 {
90 	if (bdev->md_len != 0) {
91 		return bdev->dif_type;
92 	} else {
93 		return SPDK_DIF_DISABLE;
94 	}
95 }
96 
97 bool
98 spdk_bdev_is_dif_head_of_md(const struct spdk_bdev *bdev)
99 {
100 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
101 		return bdev->dif_is_head_of_md;
102 	} else {
103 		return false;
104 	}
105 }
106 
107 uint32_t
108 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
109 {
110 	if (spdk_bdev_is_md_interleaved(bdev)) {
111 		return bdev->blocklen - bdev->md_len;
112 	} else {
113 		return bdev->blocklen;
114 	}
115 }
116 
117 uint16_t
118 spdk_bdev_get_acwu(const struct spdk_bdev *bdev)
119 {
120 	return bdev->acwu;
121 }
122 
123 uint32_t
124 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
125 {
126 	return bdev->blocklen;
127 }
128 
129 uint64_t
130 spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
131 {
132 	return bdev->blockcnt;
133 }
134 
135 DEFINE_STUB(spdk_bdev_comparev_and_writev_blocks, int,
136 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
137 	     struct iovec *compare_iov, int compare_iovcnt,
138 	     struct iovec *write_iov, int write_iovcnt,
139 	     uint64_t offset_blocks, uint64_t num_blocks,
140 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
141 	    0);
142 
143 DEFINE_STUB(nvmf_ctrlr_process_io_cmd, int, (struct spdk_nvmf_request *req), 0);
144 
145 DEFINE_STUB_V(spdk_bdev_io_get_nvme_fused_status, (const struct spdk_bdev_io *bdev_io,
146 		uint32_t *cdw0, int *cmp_sct, int *cmp_sc, int *wr_sct, int *wr_sc));
147 
148 DEFINE_STUB(spdk_bdev_is_dif_check_enabled, bool,
149 	    (const struct spdk_bdev *bdev, enum spdk_dif_check_type check_type), false);
150 
151 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *,
152 	    (struct spdk_bdev_desc *desc), NULL);
153 
154 DEFINE_STUB(spdk_bdev_flush_blocks, int,
155 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
156 	     uint64_t offset_blocks, uint64_t num_blocks,
157 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
158 	    0);
159 
160 DEFINE_STUB(spdk_bdev_unmap_blocks, int,
161 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
162 	     uint64_t offset_blocks, uint64_t num_blocks,
163 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
164 	    0);
165 
166 DEFINE_STUB(spdk_bdev_io_type_supported, bool,
167 	    (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
168 
169 DEFINE_STUB(spdk_bdev_queue_io_wait, int,
170 	    (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
171 	     struct spdk_bdev_io_wait_entry *entry),
172 	    0);
173 
174 DEFINE_STUB(spdk_bdev_write_blocks, int,
175 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
176 	     uint64_t offset_blocks, uint64_t num_blocks,
177 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
178 	    0);
179 
180 DEFINE_STUB(spdk_bdev_writev_blocks, int,
181 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
182 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
183 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
184 	    0);
185 
186 DEFINE_STUB(spdk_bdev_read_blocks, int,
187 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
188 	     uint64_t offset_blocks, uint64_t num_blocks,
189 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
190 	    0);
191 
192 DEFINE_STUB(spdk_bdev_readv_blocks, int,
193 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
194 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
195 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
196 	    0);
197 
198 DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
199 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
200 	     uint64_t offset_blocks, uint64_t num_blocks,
201 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
202 	    0);
203 
204 DEFINE_STUB(spdk_bdev_nvme_io_passthru, int,
205 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
206 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
207 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
208 	    0);
209 
210 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
211 
212 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
213 	    (const struct spdk_nvmf_subsystem *subsystem), NULL);
214 
215 struct spdk_nvmf_ns *
216 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
217 {
218 	abort();
219 	return NULL;
220 }
221 
222 struct spdk_nvmf_ns *
223 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
224 {
225 	abort();
226 	return NULL;
227 }
228 
229 struct spdk_nvmf_ns *
230 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns)
231 {
232 	abort();
233 	return NULL;
234 }
235 
236 DEFINE_STUB_V(spdk_bdev_io_get_nvme_status,
237 	      (const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct, int *sc));
238 
239 int
240 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
241 		  bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
242 		  uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
243 		  uint32_t data_offset, uint16_t guard_seed)
244 {
245 	ctx->block_size = block_size;
246 	ctx->md_size = md_size;
247 	ctx->init_ref_tag = init_ref_tag;
248 
249 	return 0;
250 }
251 
252 static void
253 test_get_rw_params(void)
254 {
255 	struct spdk_nvme_cmd cmd = {0};
256 	uint64_t lba;
257 	uint64_t count;
258 
259 	lba = 0;
260 	count = 0;
261 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
262 	to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
263 	nvmf_bdev_ctrlr_get_rw_params(&cmd, &lba, &count);
264 	CU_ASSERT(lba == 0x1234567890ABCDEF);
265 	CU_ASSERT(count == 0x9875 + 1); /* NOTE: this field is 0's based, hence the +1 */
266 }
267 
268 static void
269 test_lba_in_range(void)
270 {
271 	/* Trivial cases (no overflow) */
272 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1) == true);
273 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1000) == true);
274 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1001) == false);
275 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 999) == true);
276 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 1000) == false);
277 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 999, 1) == true);
278 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1000, 1) == false);
279 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1001, 1) == false);
280 
281 	/* Overflow edge cases */
282 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 0, UINT64_MAX) == true);
283 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 1, UINT64_MAX) == false);
284 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX - 1, 1) == true);
285 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX, 1) == false);
286 }
287 
288 static void
289 test_get_dif_ctx(void)
290 {
291 	struct spdk_bdev bdev = {};
292 	struct spdk_nvme_cmd cmd = {};
293 	struct spdk_dif_ctx dif_ctx = {};
294 	bool ret;
295 
296 	bdev.md_len = 0;
297 
298 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
299 	CU_ASSERT(ret == false);
300 
301 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
302 	bdev.blocklen = 520;
303 	bdev.md_len = 8;
304 
305 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
306 	CU_ASSERT(ret == true);
307 	CU_ASSERT(dif_ctx.block_size = 520);
308 	CU_ASSERT(dif_ctx.md_size == 8);
309 	CU_ASSERT(dif_ctx.init_ref_tag == 0x90ABCDEF);
310 }
311 
312 static void
313 test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd(void)
314 {
315 	int rc;
316 	struct spdk_bdev bdev = {};
317 	struct spdk_bdev_desc *desc = NULL;
318 	struct spdk_io_channel ch = {};
319 
320 	struct spdk_nvmf_request cmp_req = {};
321 	union nvmf_c2h_msg cmp_rsp = {};
322 
323 	struct spdk_nvmf_request write_req = {};
324 	union nvmf_c2h_msg write_rsp = {};
325 
326 	struct spdk_nvmf_qpair qpair = {};
327 
328 	struct spdk_nvme_cmd cmp_cmd = {};
329 	struct spdk_nvme_cmd write_cmd = {};
330 
331 	struct spdk_nvmf_ctrlr ctrlr = {};
332 	struct spdk_nvmf_subsystem subsystem = {};
333 	struct spdk_nvmf_ns ns = {};
334 	struct spdk_nvmf_ns *subsys_ns[1] = {};
335 
336 	struct spdk_nvmf_poll_group group = {};
337 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
338 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
339 
340 	bdev.blocklen = 512;
341 	bdev.blockcnt = 10;
342 	ns.bdev = &bdev;
343 
344 	subsystem.id = 0;
345 	subsystem.max_nsid = 1;
346 	subsys_ns[0] = &ns;
347 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
348 
349 	/* Enable controller */
350 	ctrlr.vcprop.cc.bits.en = 1;
351 	ctrlr.subsys = &subsystem;
352 
353 	group.num_sgroups = 1;
354 	sgroups.num_ns = 1;
355 	sgroups.ns_info = &ns_info;
356 	group.sgroups = &sgroups;
357 
358 	qpair.ctrlr = &ctrlr;
359 	qpair.group = &group;
360 
361 	cmp_req.qpair = &qpair;
362 	cmp_req.cmd = (union nvmf_h2c_msg *)&cmp_cmd;
363 	cmp_req.rsp = &cmp_rsp;
364 
365 	cmp_cmd.nsid = 1;
366 	cmp_cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
367 	cmp_cmd.opc = SPDK_NVME_OPC_COMPARE;
368 
369 	write_req.qpair = &qpair;
370 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
371 	write_req.rsp = &write_rsp;
372 
373 	write_cmd.nsid = 1;
374 	write_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
375 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
376 
377 	/* 1. SUCCESS */
378 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
379 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
380 
381 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
382 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
383 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
384 
385 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
386 
387 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
388 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
389 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
390 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == 0);
391 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == 0);
392 
393 	/* 2. Fused command start lba / num blocks mismatch */
394 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
395 	cmp_cmd.cdw12 = 2;	/* NLB: CDW12 bits 15:00, 0's based */
396 
397 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
398 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
399 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
400 
401 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
402 
403 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
404 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
405 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
406 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
407 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
408 
409 	/* 3. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
410 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
411 	cmp_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
412 
413 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
414 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
415 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
416 
417 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
418 
419 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
420 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
421 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
422 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
423 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
424 
425 	/* 4. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
426 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
427 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
428 
429 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
430 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
431 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
432 
433 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
434 
435 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
436 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
437 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
438 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
439 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
440 }
441 
442 static void
443 test_nvmf_bdev_ctrlr_identify_ns(void)
444 {
445 	struct spdk_nvmf_ns ns = {};
446 	struct spdk_nvme_ns_data nsdata = {};
447 	struct spdk_bdev bdev = {};
448 	uint8_t ns_g_id[16] = "abcdefgh";
449 	uint8_t eui64[8] = "12345678";
450 
451 	ns.bdev = &bdev;
452 	ns.ptpl_file = (void *)0xDEADBEEF;
453 	memcpy(ns.opts.nguid, ns_g_id, 16);
454 	memcpy(ns.opts.eui64, eui64, 8);
455 
456 	bdev.blockcnt = 10;
457 	bdev.acwu = 0;
458 	bdev.md_len = 512;
459 	bdev.dif_type = SPDK_DIF_TYPE1;
460 	bdev.blocklen = 4096;
461 	bdev.md_interleave = 0;
462 	bdev.optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
463 	bdev.dif_is_head_of_md = true;
464 
465 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, false);
466 	CU_ASSERT(nsdata.nsze == 10);
467 	CU_ASSERT(nsdata.ncap == 10);
468 	CU_ASSERT(nsdata.nuse == 10);
469 	CU_ASSERT(nsdata.nlbaf == 0);
470 	CU_ASSERT(nsdata.flbas.format == 0);
471 	CU_ASSERT(nsdata.nacwu == 0);
472 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
473 	CU_ASSERT(nsdata.lbaf[0].ms == 512);
474 	CU_ASSERT(nsdata.dpc.pit1 == 1);
475 	CU_ASSERT(nsdata.dps.pit == SPDK_NVME_FMT_NVM_PROTECTION_TYPE1);
476 	CU_ASSERT(nsdata.noiob == BDEV_IO_NUM_CHILD_IOV);
477 	CU_ASSERT(nsdata.nmic.can_share == 1);
478 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
479 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
480 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
481 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
482 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
483 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
484 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
485 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
486 	CU_ASSERT(nsdata.flbas.extended == 1);
487 	CU_ASSERT(nsdata.mc.extended == 1);
488 	CU_ASSERT(nsdata.mc.pointer == 0);
489 	CU_ASSERT(nsdata.dps.md_start == true);
490 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
491 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
492 
493 	memset(&nsdata, 0, sizeof(nsdata));
494 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, true);
495 	CU_ASSERT(nsdata.nsze == 10);
496 	CU_ASSERT(nsdata.ncap == 10);
497 	CU_ASSERT(nsdata.nuse == 10);
498 	CU_ASSERT(nsdata.nlbaf == 0);
499 	CU_ASSERT(nsdata.flbas.format == 0);
500 	CU_ASSERT(nsdata.nacwu == 0);
501 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
502 	CU_ASSERT(nsdata.noiob == BDEV_IO_NUM_CHILD_IOV);
503 	CU_ASSERT(nsdata.nmic.can_share == 1);
504 	CU_ASSERT(nsdata.lbaf[0].ms == 0);
505 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
506 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
507 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
508 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
509 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
510 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
511 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
512 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
513 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
514 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
515 }
516 
517 int main(int argc, char **argv)
518 {
519 	CU_pSuite	suite = NULL;
520 	unsigned int	num_failures;
521 
522 	CU_set_error_action(CUEA_ABORT);
523 	CU_initialize_registry();
524 
525 	suite = CU_add_suite("nvmf", NULL, NULL);
526 
527 	CU_ADD_TEST(suite, test_get_rw_params);
528 	CU_ADD_TEST(suite, test_lba_in_range);
529 	CU_ADD_TEST(suite, test_get_dif_ctx);
530 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_identify_ns);
531 
532 	CU_ADD_TEST(suite, test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd);
533 
534 	CU_basic_set_mode(CU_BRM_VERBOSE);
535 	CU_basic_run_tests();
536 	num_failures = CU_get_number_of_failures();
537 	CU_cleanup_registry();
538 	return num_failures;
539 }
540