xref: /spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c (revision 6f338d4bf3a8a91b7abe377a605a321ea2b05bf7)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_cunit.h"
10 
11 #include "spdk_internal/mock.h"
12 #include "thread/thread_internal.h"
13 
14 #include "nvmf/ctrlr_bdev.c"
15 
16 #include "spdk/bdev_module.h"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvmf)
19 
20 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -1);
21 
22 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
23 
24 DEFINE_STUB(spdk_bdev_get_physical_block_size, uint32_t,
25 	    (const struct spdk_bdev *bdev), 4096);
26 
27 DEFINE_STUB(nvmf_ctrlr_process_admin_cmd, int, (struct spdk_nvmf_request *req), 0);
28 
29 DEFINE_STUB(spdk_bdev_comparev_blocks, int, (struct spdk_bdev_desc *desc,
30 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt,
31 		uint64_t offset_blocks, uint64_t num_blocks,
32 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
33 
34 DEFINE_STUB(spdk_bdev_nvme_admin_passthru, int,
35 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
36 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
37 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
38 
39 DEFINE_STUB(spdk_bdev_abort, int,
40 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
41 	     void *bio_cb_arg, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
42 
43 DEFINE_STUB_V(spdk_bdev_io_get_iovec,
44 	      (struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp));
45 
46 uint32_t
47 spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
48 {
49 	return bdev->optimal_io_boundary;
50 }
51 
52 uint32_t
53 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
54 {
55 	return bdev->md_len;
56 }
57 
58 bool
59 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
60 {
61 	return (bdev->md_len != 0) && bdev->md_interleave;
62 }
63 
64 /* We have to use the typedef in the function declaration to appease astyle. */
65 typedef enum spdk_dif_type spdk_dif_type_t;
66 
67 spdk_dif_type_t
68 spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
69 {
70 	if (bdev->md_len != 0) {
71 		return bdev->dif_type;
72 	} else {
73 		return SPDK_DIF_DISABLE;
74 	}
75 }
76 
77 bool
78 spdk_bdev_is_dif_head_of_md(const struct spdk_bdev *bdev)
79 {
80 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
81 		return bdev->dif_is_head_of_md;
82 	} else {
83 		return false;
84 	}
85 }
86 
87 uint32_t
88 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
89 {
90 	if (spdk_bdev_is_md_interleaved(bdev)) {
91 		return bdev->blocklen - bdev->md_len;
92 	} else {
93 		return bdev->blocklen;
94 	}
95 }
96 
97 uint16_t
98 spdk_bdev_get_acwu(const struct spdk_bdev *bdev)
99 {
100 	return bdev->acwu;
101 }
102 
103 uint32_t
104 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
105 {
106 	return bdev->blocklen;
107 }
108 
109 uint64_t
110 spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
111 {
112 	return bdev->blockcnt;
113 }
114 
115 DEFINE_STUB(spdk_bdev_comparev_and_writev_blocks, int,
116 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
117 	     struct iovec *compare_iov, int compare_iovcnt,
118 	     struct iovec *write_iov, int write_iovcnt,
119 	     uint64_t offset_blocks, uint64_t num_blocks,
120 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
121 	    0);
122 
123 DEFINE_STUB(nvmf_ctrlr_process_io_cmd, int, (struct spdk_nvmf_request *req), 0);
124 
125 DEFINE_STUB_V(spdk_bdev_io_get_nvme_fused_status, (const struct spdk_bdev_io *bdev_io,
126 		uint32_t *cdw0, int *cmp_sct, int *cmp_sc, int *wr_sct, int *wr_sc));
127 
128 DEFINE_STUB(spdk_bdev_is_dif_check_enabled, bool,
129 	    (const struct spdk_bdev *bdev, enum spdk_dif_check_type check_type), false);
130 
131 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *,
132 	    (struct spdk_bdev_desc *desc), NULL);
133 
134 DEFINE_STUB(spdk_bdev_flush_blocks, int,
135 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
136 	     uint64_t offset_blocks, uint64_t num_blocks,
137 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
138 	    0);
139 
140 DEFINE_STUB(spdk_bdev_unmap_blocks, int,
141 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
142 	     uint64_t offset_blocks, uint64_t num_blocks,
143 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
144 	    0);
145 
146 DEFINE_STUB(spdk_bdev_io_type_supported, bool,
147 	    (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
148 
149 DEFINE_STUB(spdk_bdev_queue_io_wait, int,
150 	    (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
151 	     struct spdk_bdev_io_wait_entry *entry),
152 	    0);
153 
154 DEFINE_STUB(spdk_bdev_write_blocks, int,
155 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
156 	     uint64_t offset_blocks, uint64_t num_blocks,
157 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
158 	    0);
159 
160 DEFINE_STUB(spdk_bdev_writev_blocks, int,
161 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
162 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
163 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
164 	    0);
165 
166 DEFINE_STUB(spdk_bdev_read_blocks, int,
167 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
168 	     uint64_t offset_blocks, uint64_t num_blocks,
169 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
170 	    0);
171 
172 DEFINE_STUB(spdk_bdev_readv_blocks, int,
173 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
174 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
175 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
176 	    0);
177 
178 DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
179 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
180 	     uint64_t offset_blocks, uint64_t num_blocks,
181 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
182 	    0);
183 
184 DEFINE_STUB(spdk_bdev_nvme_io_passthru, int,
185 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
186 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
187 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
188 	    0);
189 
190 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
191 
192 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
193 	    (const struct spdk_nvmf_subsystem *subsystem), NULL);
194 
195 DEFINE_STUB(spdk_bdev_zcopy_start, int,
196 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
197 	     struct iovec *iov, int iovcnt,
198 	     uint64_t offset_blocks, uint64_t num_blocks,
199 	     bool populate,
200 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
201 	    0);
202 
203 DEFINE_STUB(spdk_bdev_zcopy_end, int,
204 	    (struct spdk_bdev_io *bdev_io, bool commit,
205 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
206 	    0);
207 
208 struct spdk_nvmf_ns *
209 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
210 {
211 	abort();
212 	return NULL;
213 }
214 
215 struct spdk_nvmf_ns *
216 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
217 {
218 	abort();
219 	return NULL;
220 }
221 
222 struct spdk_nvmf_ns *
223 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns)
224 {
225 	abort();
226 	return NULL;
227 }
228 
229 int
230 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
231 		  bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
232 		  uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
233 		  uint32_t data_offset, uint16_t guard_seed)
234 {
235 	ctx->block_size = block_size;
236 	ctx->md_size = md_size;
237 	ctx->init_ref_tag = init_ref_tag;
238 
239 	return 0;
240 }
241 
242 static uint32_t g_bdev_nvme_status_cdw0;
243 static uint32_t g_bdev_nvme_status_sct = SPDK_NVME_SCT_GENERIC;
244 static uint32_t g_bdev_nvme_status_sc = SPDK_NVME_SC_SUCCESS;
245 
246 static void
247 reset_bdev_nvme_status(void)
248 {
249 	g_bdev_nvme_status_cdw0 = 0;
250 	g_bdev_nvme_status_sct = SPDK_NVME_SCT_GENERIC;
251 	g_bdev_nvme_status_sc = SPDK_NVME_SC_SUCCESS;
252 }
253 
254 void
255 spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct,
256 			     int *sc)
257 {
258 	*cdw0 = g_bdev_nvme_status_cdw0;
259 	*sct = g_bdev_nvme_status_sct;
260 	*sc = g_bdev_nvme_status_sc;
261 }
262 
263 static void
264 test_get_rw_params(void)
265 {
266 	struct spdk_nvme_cmd cmd = {0};
267 	uint64_t lba;
268 	uint64_t count;
269 
270 	lba = 0;
271 	count = 0;
272 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
273 	to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
274 	nvmf_bdev_ctrlr_get_rw_params(&cmd, &lba, &count);
275 	CU_ASSERT(lba == 0x1234567890ABCDEF);
276 	CU_ASSERT(count == 0x9875 + 1); /* NOTE: this field is 0's based, hence the +1 */
277 }
278 
279 static void
280 test_lba_in_range(void)
281 {
282 	/* Trivial cases (no overflow) */
283 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1) == true);
284 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1000) == true);
285 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1001) == false);
286 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 999) == true);
287 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 1000) == false);
288 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 999, 1) == true);
289 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1000, 1) == false);
290 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1001, 1) == false);
291 
292 	/* Overflow edge cases */
293 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 0, UINT64_MAX) == true);
294 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 1, UINT64_MAX) == false);
295 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX - 1, 1) == true);
296 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX, 1) == false);
297 }
298 
299 static void
300 test_get_dif_ctx(void)
301 {
302 	struct spdk_bdev bdev = {};
303 	struct spdk_nvme_cmd cmd = {};
304 	struct spdk_dif_ctx dif_ctx = {};
305 	bool ret;
306 
307 	bdev.md_len = 0;
308 
309 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
310 	CU_ASSERT(ret == false);
311 
312 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
313 	bdev.blocklen = 520;
314 	bdev.md_len = 8;
315 
316 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
317 	CU_ASSERT(ret == true);
318 	CU_ASSERT(dif_ctx.block_size = 520);
319 	CU_ASSERT(dif_ctx.md_size == 8);
320 	CU_ASSERT(dif_ctx.init_ref_tag == 0x90ABCDEF);
321 }
322 
323 static void
324 test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd(void)
325 {
326 	int rc;
327 	struct spdk_bdev bdev = {};
328 	struct spdk_bdev_desc *desc = NULL;
329 	struct spdk_io_channel ch = {};
330 
331 	struct spdk_nvmf_request cmp_req = {};
332 	union nvmf_c2h_msg cmp_rsp = {};
333 
334 	struct spdk_nvmf_request write_req = {};
335 	union nvmf_c2h_msg write_rsp = {};
336 
337 	struct spdk_nvmf_qpair qpair = {};
338 
339 	struct spdk_nvme_cmd cmp_cmd = {};
340 	struct spdk_nvme_cmd write_cmd = {};
341 
342 	struct spdk_nvmf_ctrlr ctrlr = {};
343 	struct spdk_nvmf_subsystem subsystem = {};
344 	struct spdk_nvmf_ns ns = {};
345 	struct spdk_nvmf_ns *subsys_ns[1] = {};
346 
347 	struct spdk_nvmf_poll_group group = {};
348 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
349 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
350 
351 	bdev.blocklen = 512;
352 	bdev.blockcnt = 10;
353 	ns.bdev = &bdev;
354 
355 	subsystem.id = 0;
356 	subsystem.max_nsid = 1;
357 	subsys_ns[0] = &ns;
358 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
359 
360 	/* Enable controller */
361 	ctrlr.vcprop.cc.bits.en = 1;
362 	ctrlr.subsys = &subsystem;
363 
364 	group.num_sgroups = 1;
365 	sgroups.num_ns = 1;
366 	sgroups.ns_info = &ns_info;
367 	group.sgroups = &sgroups;
368 
369 	qpair.ctrlr = &ctrlr;
370 	qpair.group = &group;
371 
372 	cmp_req.qpair = &qpair;
373 	cmp_req.cmd = (union nvmf_h2c_msg *)&cmp_cmd;
374 	cmp_req.rsp = &cmp_rsp;
375 
376 	cmp_cmd.nsid = 1;
377 	cmp_cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
378 	cmp_cmd.opc = SPDK_NVME_OPC_COMPARE;
379 
380 	write_req.qpair = &qpair;
381 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
382 	write_req.rsp = &write_rsp;
383 
384 	write_cmd.nsid = 1;
385 	write_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
386 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
387 
388 	/* 1. SUCCESS */
389 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
390 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
391 
392 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
393 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
394 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
395 
396 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
397 
398 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
399 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
400 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
401 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == 0);
402 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == 0);
403 
404 	/* 2. Fused command start lba / num blocks mismatch */
405 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
406 	cmp_cmd.cdw12 = 2;	/* NLB: CDW12 bits 15:00, 0's based */
407 
408 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
409 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
410 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
411 
412 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
413 
414 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
415 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
416 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
417 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
418 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
419 
420 	/* 3. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
421 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
422 	cmp_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
423 
424 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
425 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
426 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
427 
428 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
429 
430 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
431 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
432 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
433 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
434 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
435 
436 	/* 4. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
437 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
438 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
439 
440 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
441 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
442 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
443 
444 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
445 
446 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
447 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
448 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
449 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
450 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
451 }
452 
453 static void
454 test_nvmf_bdev_ctrlr_identify_ns(void)
455 {
456 	struct spdk_nvmf_ns ns = {};
457 	struct spdk_nvme_ns_data nsdata = {};
458 	struct spdk_bdev bdev = {};
459 	uint8_t ns_g_id[16] = "abcdefgh";
460 	uint8_t eui64[8] = "12345678";
461 
462 	ns.bdev = &bdev;
463 	ns.ptpl_file = (void *)0xDEADBEEF;
464 	memcpy(ns.opts.nguid, ns_g_id, 16);
465 	memcpy(ns.opts.eui64, eui64, 8);
466 
467 	bdev.blockcnt = 10;
468 	bdev.acwu = 1;
469 	bdev.md_len = 512;
470 	bdev.dif_type = SPDK_DIF_TYPE1;
471 	bdev.blocklen = 4096;
472 	bdev.md_interleave = 0;
473 	bdev.optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
474 	bdev.dif_is_head_of_md = true;
475 
476 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, false);
477 	CU_ASSERT(nsdata.nsze == 10);
478 	CU_ASSERT(nsdata.ncap == 10);
479 	CU_ASSERT(nsdata.nuse == 10);
480 	CU_ASSERT(nsdata.nlbaf == 0);
481 	CU_ASSERT(nsdata.flbas.format == 0);
482 	CU_ASSERT(nsdata.nacwu == 0);
483 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
484 	CU_ASSERT(nsdata.lbaf[0].ms == 512);
485 	CU_ASSERT(nsdata.dpc.pit1 == 1);
486 	CU_ASSERT(nsdata.dps.pit == SPDK_NVME_FMT_NVM_PROTECTION_TYPE1);
487 	CU_ASSERT(nsdata.noiob == BDEV_IO_NUM_CHILD_IOV);
488 	CU_ASSERT(nsdata.nmic.can_share == 1);
489 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
490 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
491 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
492 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
493 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
494 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
495 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
496 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
497 	CU_ASSERT(nsdata.flbas.extended == 1);
498 	CU_ASSERT(nsdata.mc.extended == 1);
499 	CU_ASSERT(nsdata.mc.pointer == 0);
500 	CU_ASSERT(nsdata.dps.md_start == true);
501 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
502 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
503 
504 	memset(&nsdata, 0, sizeof(nsdata));
505 	nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, true);
506 	CU_ASSERT(nsdata.nsze == 10);
507 	CU_ASSERT(nsdata.ncap == 10);
508 	CU_ASSERT(nsdata.nuse == 10);
509 	CU_ASSERT(nsdata.nlbaf == 0);
510 	CU_ASSERT(nsdata.flbas.format == 0);
511 	CU_ASSERT(nsdata.nacwu == 0);
512 	CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096));
513 	CU_ASSERT(nsdata.noiob == BDEV_IO_NUM_CHILD_IOV);
514 	CU_ASSERT(nsdata.nmic.can_share == 1);
515 	CU_ASSERT(nsdata.lbaf[0].ms == 0);
516 	CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);
517 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1);
518 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1);
519 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1);
520 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1);
521 	CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1);
522 	CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1);
523 	CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1);
524 	CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16));
525 	CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8));
526 }
527 
528 static void
529 test_nvmf_bdev_ctrlr_zcopy_start(void)
530 {
531 	int rc;
532 	struct spdk_bdev bdev = {};
533 	struct spdk_bdev_desc *desc = NULL;
534 	struct spdk_io_channel ch = {};
535 
536 	struct spdk_nvmf_request write_req = {};
537 	union nvmf_c2h_msg write_rsp = {};
538 
539 	struct spdk_nvmf_qpair qpair = {};
540 
541 	struct spdk_nvme_cmd write_cmd = {};
542 
543 	struct spdk_nvmf_ctrlr ctrlr = {};
544 	struct spdk_nvmf_subsystem subsystem = {};
545 	struct spdk_nvmf_ns ns = {};
546 	struct spdk_nvmf_ns *subsys_ns[1] = {};
547 
548 	struct spdk_nvmf_poll_group group = {};
549 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
550 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
551 
552 	bdev.blocklen = 512;
553 	bdev.blockcnt = 10;
554 	ns.bdev = &bdev;
555 
556 	subsystem.id = 0;
557 	subsystem.max_nsid = 1;
558 	subsys_ns[0] = &ns;
559 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
560 
561 	/* Enable controller */
562 	ctrlr.vcprop.cc.bits.en = 1;
563 	ctrlr.subsys = &subsystem;
564 
565 	group.num_sgroups = 1;
566 	sgroups.num_ns = 1;
567 	sgroups.ns_info = &ns_info;
568 	group.sgroups = &sgroups;
569 
570 	qpair.ctrlr = &ctrlr;
571 	qpair.group = &group;
572 
573 	write_req.qpair = &qpair;
574 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
575 	write_req.rsp = &write_rsp;
576 
577 	write_cmd.nsid = 1;
578 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
579 
580 	/* 1. SUCCESS */
581 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
582 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
583 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
584 
585 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
586 
587 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
588 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
589 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_SUCCESS);
590 
591 	/* 2. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
592 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
593 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
594 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
595 
596 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
597 
598 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
599 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
600 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_LBA_OUT_OF_RANGE);
601 
602 	/* 3. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
603 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
604 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
605 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
606 
607 	rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req);
608 
609 	CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
610 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
611 	CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
612 }
613 
614 static void
615 test_nvmf_bdev_ctrlr_cmd(void)
616 {
617 	int rc;
618 	struct spdk_bdev bdev = {};
619 	struct spdk_io_channel ch = {};
620 	struct spdk_nvmf_request req = {};
621 	struct spdk_nvmf_qpair qpair = {};
622 	union nvmf_h2c_msg cmd = {};
623 	union nvmf_c2h_msg rsp = {};
624 
625 	req.cmd = &cmd;
626 	req.rsp = &rsp;
627 	req.qpair = &qpair;
628 	req.length = 4096;
629 	bdev.blocklen = 512;
630 	bdev.blockcnt = 3;
631 	cmd.nvme_cmd.cdw10 = 0;
632 	cmd.nvme_cmd.cdw12 = 2;
633 
634 	/* Compare status asynchronous */
635 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
636 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
637 
638 	/* SLBA out of range */
639 	cmd.nvme_cmd.cdw10 = 3;
640 
641 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
642 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
643 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
644 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
645 
646 	/* SGL length invalid */
647 	cmd.nvme_cmd.cdw10 = 0;
648 	req.length = 512;
649 	memset(&rsp, 0, sizeof(rsp));
650 
651 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
652 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
653 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
654 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
655 
656 	/* Device error */
657 	req.length = 4096;
658 	memset(&rsp, 0, sizeof(rsp));
659 	MOCK_SET(spdk_bdev_comparev_blocks, -1);
660 
661 	rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req);
662 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
663 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
664 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
665 
666 	/* bdev not support flush */
667 	MOCK_SET(spdk_bdev_io_type_supported, false);
668 	memset(&rsp, 0, sizeof(rsp));
669 
670 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
671 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
672 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
673 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
674 
675 	/*  Flush error */
676 	MOCK_SET(spdk_bdev_io_type_supported, true);
677 	MOCK_SET(spdk_bdev_flush_blocks, -1);
678 	memset(&rsp, 0, sizeof(rsp));
679 
680 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
681 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
682 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
683 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
684 
685 	/* Flush blocks status asynchronous */
686 	MOCK_SET(spdk_bdev_flush_blocks, 0);
687 
688 	rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req);
689 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
690 	MOCK_CLEAR(spdk_bdev_io_type_supported);
691 	MOCK_CLEAR(spdk_bdev_flush_blocks);
692 
693 	/* Write zeroes blocks status asynchronous */
694 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
695 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
696 
697 	/* SLBA out of range */
698 	cmd.nvme_cmd.cdw10 = 3;
699 	memset(&rsp, 0, sizeof(rsp));
700 
701 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
702 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
703 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
704 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
705 
706 	/* Write block error */
707 	MOCK_SET(spdk_bdev_write_zeroes_blocks, -1);
708 	cmd.nvme_cmd.cdw10 = 0;
709 	memset(&rsp, 0, sizeof(rsp));
710 
711 	rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req);
712 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
713 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
714 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
715 }
716 
717 static void
718 test_nvmf_bdev_ctrlr_read_write_cmd(void)
719 {
720 	struct spdk_bdev bdev = {};
721 	struct spdk_nvmf_request req = {};
722 	union nvmf_c2h_msg rsp = {};
723 	union nvmf_h2c_msg cmd = {};
724 	int rc;
725 
726 	req.cmd = &cmd;
727 	req.rsp = &rsp;
728 
729 	/* Read two blocks, block size 4096 */
730 	cmd.nvme_cmd.cdw12 = 1;
731 	bdev.blockcnt = 100;
732 	bdev.blocklen = 4096;
733 	req.length = 8192;
734 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
735 
736 	rc = nvmf_bdev_ctrlr_read_cmd(&bdev, NULL, NULL, &req);
737 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
738 
739 	/* Write two blocks, block size 4096 */
740 	cmd.nvme_cmd.cdw12 = 1;
741 	bdev.blockcnt = 100;
742 	bdev.blocklen = 4096;
743 	req.length = 8192;
744 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
745 
746 	rc = nvmf_bdev_ctrlr_write_cmd(&bdev, NULL, NULL, &req);
747 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
748 }
749 
750 static void
751 test_nvmf_bdev_ctrlr_nvme_passthru(void)
752 {
753 	int rc;
754 	struct spdk_bdev bdev = {};
755 	struct spdk_bdev_desc *desc = NULL;
756 	struct spdk_io_channel ch = {};
757 	struct spdk_nvmf_qpair qpair = {};
758 	struct spdk_nvmf_poll_group group = {};
759 
760 	struct spdk_nvmf_request req = {};
761 	union nvmf_c2h_msg rsp = {};
762 	struct spdk_nvme_cmd cmd = {};
763 	struct spdk_bdev_io bdev_io;
764 
765 	bdev.blocklen = 512;
766 	bdev.blockcnt = 10;
767 
768 	qpair.group = &group;
769 
770 	req.qpair = &qpair;
771 	req.cmd = (union nvmf_h2c_msg *)&cmd;
772 	req.rsp = &rsp;
773 
774 	cmd.nsid = 1;
775 	cmd.opc = 0xFF;
776 
777 	cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
778 	cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
779 
780 	/* NVME_IO success */
781 	memset(&rsp, 0, sizeof(rsp));
782 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
783 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
784 	nvmf_bdev_ctrlr_complete_cmd(&bdev_io, true, &req);
785 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
786 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
787 
788 	/* NVME_IO fail */
789 	memset(&rsp, 0, sizeof(rsp));
790 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
791 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
792 	g_bdev_nvme_status_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
793 	nvmf_bdev_ctrlr_complete_cmd(&bdev_io, false, &req);
794 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
795 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
796 	reset_bdev_nvme_status();
797 
798 	/* NVME_IO not supported */
799 	memset(&rsp, 0, sizeof(rsp));
800 	MOCK_SET(spdk_bdev_nvme_io_passthru, -ENOTSUP);
801 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
802 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
803 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
804 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
805 	CU_ASSERT(rsp.nvme_cpl.status.dnr == 1);
806 
807 	/* NVME_IO no channel - queue IO */
808 	memset(&rsp, 0, sizeof(rsp));
809 	MOCK_SET(spdk_bdev_nvme_io_passthru, -ENOMEM);
810 	rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req);
811 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
812 	CU_ASSERT(group.stat.pending_bdev_io == 1);
813 
814 	MOCK_SET(spdk_bdev_nvme_io_passthru, 0);
815 
816 	/* NVME_ADMIN success */
817 	memset(&rsp, 0, sizeof(rsp));
818 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
819 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
820 	nvmf_bdev_ctrlr_complete_admin_cmd(&bdev_io, true, &req);
821 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
822 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
823 
824 	/* NVME_ADMIN fail */
825 	memset(&rsp, 0, sizeof(rsp));
826 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
827 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
828 	g_bdev_nvme_status_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
829 	nvmf_bdev_ctrlr_complete_admin_cmd(&bdev_io, true, &req);
830 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
831 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR);
832 	reset_bdev_nvme_status();
833 
834 	/* NVME_ADMIN not supported */
835 	memset(&rsp, 0, sizeof(rsp));
836 	MOCK_SET(spdk_bdev_nvme_admin_passthru, -ENOTSUP);
837 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
838 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
839 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
840 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
841 	CU_ASSERT(rsp.nvme_cpl.status.dnr == 1);
842 
843 	/* NVME_ADMIN no channel - queue IO */
844 	memset(&rsp, 0, sizeof(rsp));
845 	MOCK_SET(spdk_bdev_nvme_admin_passthru, -ENOMEM);
846 	rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL);
847 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
848 	CU_ASSERT(group.stat.pending_bdev_io == 2);
849 
850 	MOCK_SET(spdk_bdev_nvme_admin_passthru, 0);
851 }
852 
853 int
854 main(int argc, char **argv)
855 {
856 	CU_pSuite	suite = NULL;
857 	unsigned int	num_failures;
858 
859 	CU_set_error_action(CUEA_ABORT);
860 	CU_initialize_registry();
861 
862 	suite = CU_add_suite("nvmf", NULL, NULL);
863 
864 	CU_ADD_TEST(suite, test_get_rw_params);
865 	CU_ADD_TEST(suite, test_lba_in_range);
866 	CU_ADD_TEST(suite, test_get_dif_ctx);
867 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_identify_ns);
868 	CU_ADD_TEST(suite, test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd);
869 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_zcopy_start);
870 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_cmd);
871 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_read_write_cmd);
872 	CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_nvme_passthru);
873 
874 	CU_basic_set_mode(CU_BRM_VERBOSE);
875 	CU_basic_run_tests();
876 	num_failures = CU_get_number_of_failures();
877 	CU_cleanup_registry();
878 	return num_failures;
879 }
880