xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision d39c4443d43c9f9ab10fa35965af4af45b55b593)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 #include "spdk_cunit.h"
37 #include "spdk/thread.h"
38 #include "spdk/bdev_module.h"
39 #include "spdk/bdev_module.h"
40 
41 #include "common/lib/ut_multithread.c"
42 
43 #include "bdev/nvme/bdev_nvme.c"
44 #include "bdev/nvme/common.c"
45 
46 #include "unit/lib/json_mock.c"
47 
48 static void *g_accel_p = (void *)0xdeadbeaf;
49 
50 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
51 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
52 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
53 	     spdk_nvme_remove_cb remove_cb), NULL);
54 
55 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
56 		enum spdk_nvme_transport_type trtype));
57 
58 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
59 	    NULL);
60 
61 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
62 
63 DEFINE_STUB_V(spdk_nvme_ctrlr_get_default_ctrlr_opts, (struct spdk_nvme_ctrlr_opts *opts,
64 		size_t opts_size));
65 
66 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
67 		struct spdk_nvme_transport_id *trid), 0);
68 
69 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
70 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
71 
72 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
73 
74 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0);
75 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf));
76 
77 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain, struct spdk_memory_domain *);
78 
79 struct spdk_memory_domain *spdk_nvme_ctrlr_get_memory_domain(const struct spdk_nvme_ctrlr *ctrlr)
80 {
81 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain);
82 
83 	return NULL;
84 }
85 
86 struct spdk_io_channel *
87 spdk_accel_engine_get_io_channel(void)
88 {
89 	return spdk_get_io_channel(g_accel_p);
90 }
91 
92 void
93 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
94 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
95 {
96 	/* Avoid warning that opts is used uninitialised */
97 	memset(opts, 0, opts_size);
98 }
99 
100 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
101 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
102 
103 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
104 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
105 
106 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
107 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
108 
109 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
110 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
111 
112 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
113 
114 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
115 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
116 
117 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
118 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
119 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
120 
121 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
122 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
123 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
124 
125 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
126 
127 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
128 
129 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
130 
131 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
132 
133 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
134 
135 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
136 
137 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
138 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
139 
140 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
141 
142 DEFINE_STUB(spdk_nvme_ns_get_csi, enum spdk_nvme_csi,
143 	    (const struct spdk_nvme_ns *ns), 0);
144 
145 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
146 		char *name, size_t *size), 0);
147 
148 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
149 	    (struct spdk_nvme_ns *ns), 0);
150 
151 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
152 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
153 
154 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
155 	    (struct spdk_nvme_ns *ns), 0);
156 
157 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
158 	    (struct spdk_nvme_ns *ns), 0);
159 
160 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
161 	    (struct spdk_nvme_ns *ns), 0);
162 
163 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
164 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
165 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
166 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
167 
168 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
169 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
170 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
171 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
172 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
173 
174 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
175 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
176 	     void *payload, uint32_t payload_size, uint64_t slba,
177 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
178 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
179 
180 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
181 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
182 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
183 
184 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
185 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
186 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
187 
188 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
189 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
190 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
191 
192 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
193 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
194 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
195 
196 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
197 
198 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
199 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
200 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
201 
202 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
203 
204 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
205 
206 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
207 
208 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
209 
210 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
211 		struct iovec *iov,
212 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
213 
214 
215 struct ut_nvme_req {
216 	uint16_t			opc;
217 	spdk_nvme_cmd_cb		cb_fn;
218 	void				*cb_arg;
219 	struct spdk_nvme_cpl		cpl;
220 	TAILQ_ENTRY(ut_nvme_req)	tailq;
221 };
222 
223 struct spdk_nvme_ns {
224 	struct spdk_nvme_ctrlr		*ctrlr;
225 	uint32_t			id;
226 	bool				is_active;
227 	struct spdk_uuid		uuid;
228 	enum spdk_nvme_ana_state	ana_state;
229 };
230 
231 struct spdk_nvme_qpair {
232 	struct spdk_nvme_ctrlr		*ctrlr;
233 	bool				is_connected;
234 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
235 	uint32_t			num_outstanding_reqs;
236 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
237 	struct spdk_nvme_poll_group	*poll_group;
238 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
239 };
240 
241 struct spdk_nvme_ctrlr {
242 	uint32_t			num_ns;
243 	struct spdk_nvme_ns		*ns;
244 	struct spdk_nvme_ns_data	*nsdata;
245 	struct spdk_nvme_qpair		adminq;
246 	struct spdk_nvme_ctrlr_data	cdata;
247 	bool				attached;
248 	bool				is_failed;
249 	bool				fail_reset;
250 	struct spdk_nvme_transport_id	trid;
251 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
252 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
253 	struct spdk_nvme_ctrlr_opts	opts;
254 };
255 
256 struct spdk_nvme_poll_group {
257 	void				*ctx;
258 	struct spdk_nvme_accel_fn_table	accel_fn_table;
259 	TAILQ_HEAD(, spdk_nvme_qpair)	qpairs;
260 };
261 
262 struct spdk_nvme_probe_ctx {
263 	struct spdk_nvme_transport_id	trid;
264 	void				*cb_ctx;
265 	spdk_nvme_attach_cb		attach_cb;
266 	struct spdk_nvme_ctrlr		*init_ctrlr;
267 };
268 
269 struct spdk_nvme_ctrlr_reset_ctx {
270 	struct spdk_nvme_ctrlr		*ctrlr;
271 };
272 
273 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
274 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
275 			g_ut_attached_ctrlrs);
276 static int g_ut_attach_ctrlr_status;
277 static size_t g_ut_attach_bdev_count;
278 static int g_ut_register_bdev_status;
279 
280 static void
281 ut_init_trid(struct spdk_nvme_transport_id *trid)
282 {
283 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
284 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
285 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
286 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
287 }
288 
289 static void
290 ut_init_trid2(struct spdk_nvme_transport_id *trid)
291 {
292 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
293 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
294 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
295 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
296 }
297 
298 static void
299 ut_init_trid3(struct spdk_nvme_transport_id *trid)
300 {
301 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
302 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
303 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
304 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
305 }
306 
307 static int
308 cmp_int(int a, int b)
309 {
310 	return a - b;
311 }
312 
313 int
314 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
315 			       const struct spdk_nvme_transport_id *trid2)
316 {
317 	int cmp;
318 
319 	/* We assume trtype is TCP for now. */
320 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
321 
322 	cmp = cmp_int(trid1->trtype, trid2->trtype);
323 	if (cmp) {
324 		return cmp;
325 	}
326 
327 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
328 	if (cmp) {
329 		return cmp;
330 	}
331 
332 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
333 	if (cmp) {
334 		return cmp;
335 	}
336 
337 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
338 	if (cmp) {
339 		return cmp;
340 	}
341 
342 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
343 	if (cmp) {
344 		return cmp;
345 	}
346 
347 	return 0;
348 }
349 
350 static struct spdk_nvme_ctrlr *
351 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
352 		bool ana_reporting)
353 {
354 	struct spdk_nvme_ctrlr *ctrlr;
355 	uint32_t i;
356 
357 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
358 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
359 			/* There is a ctrlr whose trid matches. */
360 			return NULL;
361 		}
362 	}
363 
364 	ctrlr = calloc(1, sizeof(*ctrlr));
365 	if (ctrlr == NULL) {
366 		return NULL;
367 	}
368 
369 	ctrlr->attached = true;
370 	ctrlr->adminq.ctrlr = ctrlr;
371 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
372 
373 	if (num_ns != 0) {
374 		ctrlr->num_ns = num_ns;
375 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
376 		if (ctrlr->ns == NULL) {
377 			free(ctrlr);
378 			return NULL;
379 		}
380 
381 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
382 		if (ctrlr->nsdata == NULL) {
383 			free(ctrlr->ns);
384 			free(ctrlr);
385 			return NULL;
386 		}
387 
388 		for (i = 0; i < num_ns; i++) {
389 			ctrlr->ns[i].id = i + 1;
390 			ctrlr->ns[i].ctrlr = ctrlr;
391 			ctrlr->ns[i].is_active = true;
392 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
393 			ctrlr->nsdata[i].nsze = 1024;
394 		}
395 
396 		ctrlr->cdata.nn = num_ns;
397 		ctrlr->cdata.nanagrpid = num_ns;
398 	}
399 
400 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
401 	ctrlr->trid = *trid;
402 	TAILQ_INIT(&ctrlr->active_io_qpairs);
403 
404 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
405 
406 	return ctrlr;
407 }
408 
409 static void
410 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
411 {
412 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
413 
414 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
415 	free(ctrlr->nsdata);
416 	free(ctrlr->ns);
417 	free(ctrlr);
418 }
419 
420 static int
421 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
422 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
423 {
424 	struct ut_nvme_req *req;
425 
426 	req = calloc(1, sizeof(*req));
427 	if (req == NULL) {
428 		return -ENOMEM;
429 	}
430 
431 	req->opc = opc;
432 	req->cb_fn = cb_fn;
433 	req->cb_arg = cb_arg;
434 
435 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
436 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
437 
438 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
439 	qpair->num_outstanding_reqs++;
440 
441 	return 0;
442 }
443 
444 static struct spdk_bdev_io *
445 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
446 		 struct spdk_io_channel *ch)
447 {
448 	struct spdk_bdev_io *bdev_io;
449 
450 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
451 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
452 	bdev_io->type = type;
453 	bdev_io->bdev = &nbdev->disk;
454 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
455 
456 	return bdev_io;
457 }
458 
459 static void
460 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
461 {
462 	bdev_io->u.bdev.iovs = &bdev_io->iov;
463 	bdev_io->u.bdev.iovcnt = 1;
464 
465 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
466 	bdev_io->iov.iov_len = 4096;
467 }
468 
469 static void
470 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
471 {
472 	if (ctrlr->is_failed) {
473 		free(ctrlr);
474 		return;
475 	}
476 
477 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
478 
479 	if (probe_ctx->attach_cb) {
480 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
481 	}
482 }
483 
484 int
485 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
486 {
487 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
488 
489 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
490 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
491 			continue;
492 		}
493 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
494 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
495 	}
496 
497 	free(probe_ctx);
498 
499 	return 0;
500 }
501 
502 struct spdk_nvme_probe_ctx *
503 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
504 			const struct spdk_nvme_ctrlr_opts *opts,
505 			spdk_nvme_attach_cb attach_cb)
506 {
507 	struct spdk_nvme_probe_ctx *probe_ctx;
508 
509 	if (trid == NULL) {
510 		return NULL;
511 	}
512 
513 	probe_ctx = calloc(1, sizeof(*probe_ctx));
514 	if (probe_ctx == NULL) {
515 		return NULL;
516 	}
517 
518 	probe_ctx->trid = *trid;
519 	probe_ctx->cb_ctx = (void *)opts;
520 	probe_ctx->attach_cb = attach_cb;
521 
522 	return probe_ctx;
523 }
524 
525 int
526 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
527 {
528 	if (ctrlr->attached) {
529 		ut_detach_ctrlr(ctrlr);
530 	}
531 
532 	return 0;
533 }
534 
535 const struct spdk_nvme_ctrlr_data *
536 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
537 {
538 	return &ctrlr->cdata;
539 }
540 
541 uint32_t
542 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
543 {
544 	return ctrlr->num_ns;
545 }
546 
547 struct spdk_nvme_ns *
548 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
549 {
550 	if (nsid < 1 || nsid > ctrlr->num_ns) {
551 		return NULL;
552 	}
553 
554 	return &ctrlr->ns[nsid - 1];
555 }
556 
557 bool
558 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
559 {
560 	if (nsid < 1 || nsid > ctrlr->num_ns) {
561 		return false;
562 	}
563 
564 	return ctrlr->ns[nsid - 1].is_active;
565 }
566 
567 union spdk_nvme_csts_register
568 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
569 {
570 	union spdk_nvme_csts_register csts;
571 
572 	csts.raw = 0;
573 
574 	return csts;
575 }
576 
577 union spdk_nvme_vs_register
578 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
579 {
580 	union spdk_nvme_vs_register vs;
581 
582 	vs.raw = 0;
583 
584 	return vs;
585 }
586 
587 struct spdk_nvme_qpair *
588 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
589 			       const struct spdk_nvme_io_qpair_opts *user_opts,
590 			       size_t opts_size)
591 {
592 	struct spdk_nvme_qpair *qpair;
593 
594 	qpair = calloc(1, sizeof(*qpair));
595 	if (qpair == NULL) {
596 		return NULL;
597 	}
598 
599 	qpair->ctrlr = ctrlr;
600 	TAILQ_INIT(&qpair->outstanding_reqs);
601 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
602 
603 	return qpair;
604 }
605 
606 int
607 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
608 				 struct spdk_nvme_qpair *qpair)
609 {
610 	if (qpair->is_connected) {
611 		return -EISCONN;
612 	}
613 
614 	qpair->is_connected = true;
615 
616 	return 0;
617 }
618 
619 int
620 spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
621 {
622 	struct spdk_nvme_ctrlr *ctrlr;
623 
624 	ctrlr = qpair->ctrlr;
625 
626 	if (ctrlr->is_failed) {
627 		return -ENXIO;
628 	}
629 	qpair->is_connected = true;
630 
631 	return 0;
632 }
633 
634 void
635 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
636 {
637 	qpair->is_connected = false;
638 }
639 
640 int
641 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
642 {
643 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
644 
645 	qpair->is_connected = false;
646 
647 	if (qpair->poll_group != NULL) {
648 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
649 	}
650 
651 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
652 
653 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
654 
655 	free(qpair);
656 
657 	return 0;
658 }
659 
660 int
661 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
662 {
663 	if (ctrlr->fail_reset) {
664 		return -EIO;
665 	}
666 
667 	ctrlr->is_failed = false;
668 
669 	return 0;
670 }
671 
672 int
673 spdk_nvme_ctrlr_reset_poll_async(struct spdk_nvme_ctrlr_reset_ctx *ctrlr_reset_ctx)
674 {
675 	struct spdk_nvme_ctrlr *ctrlr = ctrlr_reset_ctx->ctrlr;
676 
677 	free(ctrlr_reset_ctx);
678 	return spdk_nvme_ctrlr_reset(ctrlr);
679 }
680 
681 int
682 spdk_nvme_ctrlr_reset_async(struct spdk_nvme_ctrlr *ctrlr,
683 			    struct spdk_nvme_ctrlr_reset_ctx **reset_ctx)
684 {
685 	struct spdk_nvme_ctrlr_reset_ctx *ctrlr_reset_ctx;
686 
687 	ctrlr_reset_ctx = calloc(1, sizeof(*ctrlr_reset_ctx));
688 	if (!ctrlr_reset_ctx) {
689 		return -ENOMEM;
690 	}
691 
692 	ctrlr_reset_ctx->ctrlr = ctrlr;
693 	*reset_ctx = ctrlr_reset_ctx;
694 
695 	return 0;
696 }
697 
698 void
699 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
700 {
701 	ctrlr->is_failed = true;
702 }
703 
704 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
705 				 sizeof(uint32_t))
706 static void
707 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
708 {
709 	struct spdk_nvme_ana_page ana_hdr;
710 	char _ana_desc[UT_ANA_DESC_SIZE];
711 	struct spdk_nvme_ana_group_descriptor *ana_desc;
712 	struct spdk_nvme_ns *ns;
713 	uint32_t i;
714 
715 	memset(&ana_hdr, 0, sizeof(ana_hdr));
716 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
717 
718 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
719 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
720 
721 	buf += sizeof(ana_hdr);
722 	length -= sizeof(ana_hdr);
723 
724 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
725 
726 	for (i = 0; i < ctrlr->num_ns; i++) {
727 		ns = &ctrlr->ns[i];
728 
729 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
730 
731 		ana_desc->ana_group_id = ns->id;
732 		ana_desc->num_of_nsid = 1;
733 		ana_desc->ana_state = ns->ana_state;
734 		ana_desc->nsid[0] = ns->id;
735 
736 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
737 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
738 
739 		buf += UT_ANA_DESC_SIZE;
740 		length -= UT_ANA_DESC_SIZE;
741 	}
742 }
743 
744 int
745 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
746 				 uint8_t log_page, uint32_t nsid,
747 				 void *payload, uint32_t payload_size,
748 				 uint64_t offset,
749 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
750 {
751 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
752 		SPDK_CU_ASSERT_FATAL(offset == 0);
753 		ut_create_ana_log_page(ctrlr, payload, payload_size);
754 	}
755 
756 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
757 				      cb_fn, cb_arg);
758 }
759 
760 int
761 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
762 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
763 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
764 {
765 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
766 }
767 
768 int
769 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
770 			      void *cmd_cb_arg,
771 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
772 {
773 	struct ut_nvme_req *req = NULL, *abort_req;
774 
775 	if (qpair == NULL) {
776 		qpair = &ctrlr->adminq;
777 	}
778 
779 	abort_req = calloc(1, sizeof(*abort_req));
780 	if (abort_req == NULL) {
781 		return -ENOMEM;
782 	}
783 
784 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
785 		if (req->cb_arg == cmd_cb_arg) {
786 			break;
787 		}
788 	}
789 
790 	if (req == NULL) {
791 		free(abort_req);
792 		return -ENOENT;
793 	}
794 
795 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
796 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
797 
798 	abort_req->opc = SPDK_NVME_OPC_ABORT;
799 	abort_req->cb_fn = cb_fn;
800 	abort_req->cb_arg = cb_arg;
801 
802 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
803 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
804 	abort_req->cpl.cdw0 = 0;
805 
806 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
807 	ctrlr->adminq.num_outstanding_reqs++;
808 
809 	return 0;
810 }
811 
812 int32_t
813 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
814 {
815 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
816 }
817 
818 uint32_t
819 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
820 {
821 	return ns->id;
822 }
823 
824 struct spdk_nvme_ctrlr *
825 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
826 {
827 	return ns->ctrlr;
828 }
829 
830 static inline struct spdk_nvme_ns_data *
831 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
832 {
833 	return &ns->ctrlr->nsdata[ns->id - 1];
834 }
835 
836 const struct spdk_nvme_ns_data *
837 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
838 {
839 	return _nvme_ns_get_data(ns);
840 }
841 
842 uint64_t
843 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
844 {
845 	return _nvme_ns_get_data(ns)->nsze;
846 }
847 
848 const struct spdk_uuid *
849 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
850 {
851 	return &ns->uuid;
852 }
853 
854 int
855 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
856 			      void *metadata, uint64_t lba, uint32_t lba_count,
857 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
858 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
859 {
860 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
861 }
862 
863 int
864 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
865 			       void *buffer, void *metadata, uint64_t lba,
866 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
867 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
868 {
869 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
870 }
871 
872 int
873 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
874 			       uint64_t lba, uint32_t lba_count,
875 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
876 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
877 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
878 			       uint16_t apptag_mask, uint16_t apptag)
879 {
880 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
881 }
882 
883 int
884 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
885 				uint64_t lba, uint32_t lba_count,
886 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
887 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
888 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
889 				uint16_t apptag_mask, uint16_t apptag)
890 {
891 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
892 }
893 
894 static bool g_ut_readv_ext_called;
895 int
896 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
897 			   uint64_t lba, uint32_t lba_count,
898 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
899 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
900 			   spdk_nvme_req_next_sge_cb next_sge_fn,
901 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
902 {
903 	g_ut_readv_ext_called = true;
904 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
905 }
906 
907 static bool g_ut_writev_ext_called;
908 int
909 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
910 			    uint64_t lba, uint32_t lba_count,
911 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
912 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
913 			    spdk_nvme_req_next_sge_cb next_sge_fn,
914 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
915 {
916 	g_ut_writev_ext_called = true;
917 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
918 }
919 
920 int
921 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
922 				  uint64_t lba, uint32_t lba_count,
923 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
924 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
925 				  spdk_nvme_req_next_sge_cb next_sge_fn,
926 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
927 {
928 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
929 }
930 
931 int
932 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
933 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
934 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
935 {
936 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
937 }
938 
939 int
940 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
941 			      uint64_t lba, uint32_t lba_count,
942 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
943 			      uint32_t io_flags)
944 {
945 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
946 }
947 
948 struct spdk_nvme_poll_group *
949 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
950 {
951 	struct spdk_nvme_poll_group *group;
952 
953 	group = calloc(1, sizeof(*group));
954 	if (group == NULL) {
955 		return NULL;
956 	}
957 
958 	group->ctx = ctx;
959 	if (table != NULL) {
960 		group->accel_fn_table = *table;
961 	}
962 	TAILQ_INIT(&group->qpairs);
963 
964 	return group;
965 }
966 
967 int
968 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
969 {
970 	if (!TAILQ_EMPTY(&group->qpairs)) {
971 		return -EBUSY;
972 	}
973 
974 	free(group);
975 
976 	return 0;
977 }
978 
979 int32_t
980 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
981 				    uint32_t max_completions)
982 {
983 	struct ut_nvme_req *req, *tmp;
984 	uint32_t num_completions = 0;
985 
986 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
987 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
988 		qpair->num_outstanding_reqs--;
989 
990 		req->cb_fn(req->cb_arg, &req->cpl);
991 
992 		free(req);
993 		num_completions++;
994 	}
995 
996 	return num_completions;
997 }
998 
999 int64_t
1000 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1001 		uint32_t completions_per_qpair,
1002 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1003 {
1004 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1005 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1006 
1007 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1008 
1009 	if (disconnected_qpair_cb == NULL) {
1010 		return -EINVAL;
1011 	}
1012 
1013 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
1014 		if (qpair->is_connected) {
1015 			local_completions = spdk_nvme_qpair_process_completions(qpair,
1016 					    completions_per_qpair);
1017 			if (local_completions < 0 && error_reason == 0) {
1018 				error_reason = local_completions;
1019 			} else {
1020 				num_completions += local_completions;
1021 				assert(num_completions >= 0);
1022 			}
1023 		}
1024 	}
1025 
1026 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
1027 		if (!qpair->is_connected) {
1028 			disconnected_qpair_cb(qpair, group->ctx);
1029 		}
1030 	}
1031 
1032 	return error_reason ? error_reason : num_completions;
1033 }
1034 
1035 int
1036 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1037 			 struct spdk_nvme_qpair *qpair)
1038 {
1039 	CU_ASSERT(!qpair->is_connected);
1040 
1041 	qpair->poll_group = group;
1042 	TAILQ_INSERT_TAIL(&group->qpairs, qpair, poll_group_tailq);
1043 
1044 	return 0;
1045 }
1046 
1047 int
1048 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1049 			    struct spdk_nvme_qpair *qpair)
1050 {
1051 	CU_ASSERT(!qpair->is_connected);
1052 
1053 	TAILQ_REMOVE(&group->qpairs, qpair, poll_group_tailq);
1054 
1055 	return 0;
1056 }
1057 
1058 int
1059 spdk_bdev_register(struct spdk_bdev *bdev)
1060 {
1061 	return g_ut_register_bdev_status;
1062 }
1063 
1064 void
1065 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1066 {
1067 	int rc;
1068 
1069 	rc = bdev->fn_table->destruct(bdev->ctxt);
1070 	if (rc <= 0 && cb_fn != NULL) {
1071 		cb_fn(cb_arg, rc);
1072 	}
1073 }
1074 
1075 int
1076 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1077 {
1078 	bdev->blockcnt = size;
1079 
1080 	return 0;
1081 }
1082 
1083 struct spdk_io_channel *
1084 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1085 {
1086 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1087 }
1088 
1089 void
1090 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1091 {
1092 	bdev_io->internal.status = status;
1093 	bdev_io->internal.in_submit_request = false;
1094 }
1095 
1096 void
1097 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1098 {
1099 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1100 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1101 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1102 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1103 	} else {
1104 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1105 	}
1106 
1107 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1108 	bdev_io->internal.error.nvme.sct = sct;
1109 	bdev_io->internal.error.nvme.sc = sc;
1110 
1111 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1112 }
1113 
1114 void
1115 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1116 {
1117 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1118 
1119 	ut_bdev_io_set_buf(bdev_io);
1120 
1121 	cb(ch, bdev_io, true);
1122 }
1123 
1124 static void
1125 test_create_ctrlr(void)
1126 {
1127 	struct spdk_nvme_transport_id trid = {};
1128 	struct spdk_nvme_ctrlr ctrlr = {};
1129 	int rc;
1130 
1131 	ut_init_trid(&trid);
1132 
1133 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1134 	CU_ASSERT(rc == 0);
1135 
1136 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1137 
1138 	rc = bdev_nvme_delete("nvme0", NULL);
1139 	CU_ASSERT(rc == 0);
1140 
1141 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1142 
1143 	poll_threads();
1144 
1145 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1146 }
1147 
1148 static void
1149 test_reset_ctrlr(void)
1150 {
1151 	struct spdk_nvme_transport_id trid = {};
1152 	struct spdk_nvme_ctrlr ctrlr = {};
1153 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1154 	struct nvme_ctrlr_trid *curr_trid;
1155 	struct spdk_io_channel *ch1, *ch2;
1156 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1157 	int rc;
1158 
1159 	ut_init_trid(&trid);
1160 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1161 
1162 	set_thread(0);
1163 
1164 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1165 	CU_ASSERT(rc == 0);
1166 
1167 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1168 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1169 
1170 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1171 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1172 
1173 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1174 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1175 
1176 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1177 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1178 
1179 	set_thread(1);
1180 
1181 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1182 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1183 
1184 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1185 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1186 
1187 	/* Reset starts from thread 1. */
1188 	set_thread(1);
1189 
1190 	/* Case 1: ctrlr is already being destructed. */
1191 	nvme_ctrlr->destruct = true;
1192 
1193 	rc = bdev_nvme_reset(nvme_ctrlr);
1194 	CU_ASSERT(rc == -EBUSY);
1195 
1196 	/* Case 2: reset is in progress. */
1197 	nvme_ctrlr->destruct = false;
1198 	nvme_ctrlr->resetting = true;
1199 
1200 	rc = bdev_nvme_reset(nvme_ctrlr);
1201 	CU_ASSERT(rc == -EAGAIN);
1202 
1203 	/* Case 3: reset completes successfully. */
1204 	nvme_ctrlr->resetting = false;
1205 	curr_trid->is_failed = true;
1206 	ctrlr.is_failed = true;
1207 
1208 	rc = bdev_nvme_reset(nvme_ctrlr);
1209 	CU_ASSERT(rc == 0);
1210 	CU_ASSERT(nvme_ctrlr->resetting == true);
1211 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1212 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1213 
1214 	poll_thread_times(0, 1);
1215 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1216 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1217 
1218 	poll_thread_times(1, 1);
1219 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1220 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1221 	CU_ASSERT(ctrlr.is_failed == true);
1222 
1223 	poll_thread_times(1, 1);
1224 	CU_ASSERT(ctrlr.is_failed == false);
1225 
1226 	poll_thread_times(0, 1);
1227 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1228 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1229 
1230 	poll_thread_times(1, 1);
1231 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1232 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1233 	CU_ASSERT(nvme_ctrlr->resetting == true);
1234 	CU_ASSERT(curr_trid->is_failed == true);
1235 
1236 	poll_thread_times(1, 1);
1237 	CU_ASSERT(nvme_ctrlr->resetting == false);
1238 	CU_ASSERT(curr_trid->is_failed == false);
1239 
1240 	spdk_put_io_channel(ch2);
1241 
1242 	set_thread(0);
1243 
1244 	spdk_put_io_channel(ch1);
1245 
1246 	poll_threads();
1247 
1248 	rc = bdev_nvme_delete("nvme0", NULL);
1249 	CU_ASSERT(rc == 0);
1250 
1251 	poll_threads();
1252 
1253 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1254 }
1255 
1256 static void
1257 test_race_between_reset_and_destruct_ctrlr(void)
1258 {
1259 	struct spdk_nvme_transport_id trid = {};
1260 	struct spdk_nvme_ctrlr ctrlr = {};
1261 	struct nvme_ctrlr *nvme_ctrlr;
1262 	struct spdk_io_channel *ch1, *ch2;
1263 	int rc;
1264 
1265 	ut_init_trid(&trid);
1266 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1267 
1268 	set_thread(0);
1269 
1270 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1271 	CU_ASSERT(rc == 0);
1272 
1273 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1274 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1275 
1276 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1277 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1278 
1279 	set_thread(1);
1280 
1281 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1282 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1283 
1284 	/* Reset starts from thread 1. */
1285 	set_thread(1);
1286 
1287 	rc = bdev_nvme_reset(nvme_ctrlr);
1288 	CU_ASSERT(rc == 0);
1289 	CU_ASSERT(nvme_ctrlr->resetting == true);
1290 
1291 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1292 	set_thread(0);
1293 
1294 	rc = bdev_nvme_delete("nvme0", NULL);
1295 	CU_ASSERT(rc == 0);
1296 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1297 	CU_ASSERT(nvme_ctrlr->destruct == true);
1298 	CU_ASSERT(nvme_ctrlr->resetting == true);
1299 
1300 	poll_threads();
1301 
1302 	/* Reset completed but ctrlr is not still destructed yet. */
1303 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1304 	CU_ASSERT(nvme_ctrlr->destruct == true);
1305 	CU_ASSERT(nvme_ctrlr->resetting == false);
1306 
1307 	/* New reset request is rejected. */
1308 	rc = bdev_nvme_reset(nvme_ctrlr);
1309 	CU_ASSERT(rc == -EBUSY);
1310 
1311 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1312 	 * However there are two channels and destruct is not completed yet.
1313 	 */
1314 	poll_threads();
1315 
1316 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1317 
1318 	set_thread(0);
1319 
1320 	spdk_put_io_channel(ch1);
1321 
1322 	set_thread(1);
1323 
1324 	spdk_put_io_channel(ch2);
1325 
1326 	poll_threads();
1327 
1328 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1329 }
1330 
1331 static void
1332 test_failover_ctrlr(void)
1333 {
1334 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1335 	struct spdk_nvme_ctrlr ctrlr = {};
1336 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1337 	struct nvme_ctrlr_trid *curr_trid, *next_trid;
1338 	struct spdk_io_channel *ch1, *ch2;
1339 	int rc;
1340 
1341 	ut_init_trid(&trid1);
1342 	ut_init_trid2(&trid2);
1343 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1344 
1345 	set_thread(0);
1346 
1347 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
1348 	CU_ASSERT(rc == 0);
1349 
1350 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1351 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1352 
1353 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1354 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1355 
1356 	set_thread(1);
1357 
1358 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1359 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1360 
1361 	/* First, test one trid case. */
1362 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1363 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1364 
1365 	/* Failover starts from thread 1. */
1366 	set_thread(1);
1367 
1368 	/* Case 1: ctrlr is already being destructed. */
1369 	nvme_ctrlr->destruct = true;
1370 
1371 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1372 	CU_ASSERT(rc == 0);
1373 	CU_ASSERT(curr_trid->is_failed == false);
1374 
1375 	/* Case 2: reset is in progress. */
1376 	nvme_ctrlr->destruct = false;
1377 	nvme_ctrlr->resetting = true;
1378 
1379 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1380 	CU_ASSERT(rc == 0);
1381 
1382 	/* Case 3: failover is in progress. */
1383 	nvme_ctrlr->failover_in_progress = true;
1384 
1385 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1386 	CU_ASSERT(rc == 0);
1387 	CU_ASSERT(curr_trid->is_failed == false);
1388 
1389 	/* Case 4: reset completes successfully. */
1390 	nvme_ctrlr->resetting = false;
1391 	nvme_ctrlr->failover_in_progress = false;
1392 
1393 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1394 	CU_ASSERT(rc == 0);
1395 
1396 	CU_ASSERT(nvme_ctrlr->resetting == true);
1397 	CU_ASSERT(curr_trid->is_failed == true);
1398 
1399 	poll_threads();
1400 
1401 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1402 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1403 
1404 	CU_ASSERT(nvme_ctrlr->resetting == false);
1405 	CU_ASSERT(curr_trid->is_failed == false);
1406 
1407 	set_thread(0);
1408 
1409 	/* Second, test two trids case. */
1410 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1411 	CU_ASSERT(rc == 0);
1412 
1413 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1414 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1415 	CU_ASSERT(&curr_trid->trid == nvme_ctrlr->connected_trid);
1416 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1417 
1418 	/* Failover starts from thread 1. */
1419 	set_thread(1);
1420 
1421 	/* Case 5: reset is in progress. */
1422 	nvme_ctrlr->resetting = true;
1423 
1424 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1425 	CU_ASSERT(rc == -EAGAIN);
1426 
1427 	/* Case 5: failover is in progress. */
1428 	nvme_ctrlr->failover_in_progress = true;
1429 
1430 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1431 	CU_ASSERT(rc == 0);
1432 
1433 	/* Case 6: failover completes successfully. */
1434 	nvme_ctrlr->resetting = false;
1435 	nvme_ctrlr->failover_in_progress = false;
1436 
1437 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1438 	CU_ASSERT(rc == 0);
1439 
1440 	CU_ASSERT(nvme_ctrlr->resetting == true);
1441 	CU_ASSERT(nvme_ctrlr->failover_in_progress == true);
1442 
1443 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1444 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1445 	CU_ASSERT(next_trid != curr_trid);
1446 	CU_ASSERT(&next_trid->trid == nvme_ctrlr->connected_trid);
1447 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1448 
1449 	poll_threads();
1450 
1451 	CU_ASSERT(nvme_ctrlr->resetting == false);
1452 	CU_ASSERT(nvme_ctrlr->failover_in_progress == false);
1453 
1454 	spdk_put_io_channel(ch2);
1455 
1456 	set_thread(0);
1457 
1458 	spdk_put_io_channel(ch1);
1459 
1460 	poll_threads();
1461 
1462 	rc = bdev_nvme_delete("nvme0", NULL);
1463 	CU_ASSERT(rc == 0);
1464 
1465 	poll_threads();
1466 
1467 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1468 }
1469 
1470 static void
1471 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1472 {
1473 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1474 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1475 }
1476 
1477 static void
1478 test_pending_reset(void)
1479 {
1480 	struct spdk_nvme_transport_id trid = {};
1481 	struct spdk_nvme_host_id hostid = {};
1482 	struct spdk_nvme_ctrlr *ctrlr;
1483 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1484 	const int STRING_SIZE = 32;
1485 	const char *attached_names[STRING_SIZE];
1486 	struct nvme_bdev *bdev;
1487 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1488 	struct spdk_io_channel *ch1, *ch2;
1489 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1490 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1491 	int rc;
1492 
1493 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1494 	ut_init_trid(&trid);
1495 
1496 	set_thread(0);
1497 
1498 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
1499 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1500 
1501 	g_ut_attach_ctrlr_status = 0;
1502 	g_ut_attach_bdev_count = 1;
1503 
1504 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
1505 			      attach_ctrlr_done, NULL, NULL);
1506 	CU_ASSERT(rc == 0);
1507 
1508 	spdk_delay_us(1000);
1509 	poll_threads();
1510 
1511 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1512 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1513 
1514 	bdev = nvme_ctrlr->namespaces[0]->bdev;
1515 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1516 
1517 	ch1 = spdk_get_io_channel(bdev);
1518 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1519 
1520 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1521 	ctrlr_ch1 = nbdev_ch1->ctrlr_ch;
1522 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1523 
1524 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1525 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1526 
1527 	set_thread(1);
1528 
1529 	ch2 = spdk_get_io_channel(bdev);
1530 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1531 
1532 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1533 	ctrlr_ch2 = nbdev_ch2->ctrlr_ch;
1534 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1535 
1536 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1537 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1538 
1539 	/* The first reset request is submitted on thread 1, and the second reset request
1540 	 * is submitted on thread 0 while processing the first request.
1541 	 */
1542 	bdev_nvme_submit_request(ch2, first_bdev_io);
1543 	CU_ASSERT(nvme_ctrlr->resetting == true);
1544 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1545 
1546 	set_thread(0);
1547 
1548 	bdev_nvme_submit_request(ch1, second_bdev_io);
1549 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1550 
1551 	poll_threads();
1552 
1553 	CU_ASSERT(nvme_ctrlr->resetting == false);
1554 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1555 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1556 
1557 	/* The first reset request is submitted on thread 1, and the second reset request
1558 	 * is submitted on thread 0 while processing the first request.
1559 	 *
1560 	 * The difference from the above scenario is that the controller is removed while
1561 	 * processing the first request. Hence both reset requests should fail.
1562 	 */
1563 	set_thread(1);
1564 
1565 	bdev_nvme_submit_request(ch2, first_bdev_io);
1566 	CU_ASSERT(nvme_ctrlr->resetting == true);
1567 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1568 
1569 	set_thread(0);
1570 
1571 	bdev_nvme_submit_request(ch1, second_bdev_io);
1572 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1573 
1574 	ctrlr->fail_reset = true;
1575 
1576 	poll_threads();
1577 
1578 	CU_ASSERT(nvme_ctrlr->resetting == false);
1579 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1580 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1581 
1582 	spdk_put_io_channel(ch1);
1583 
1584 	set_thread(1);
1585 
1586 	spdk_put_io_channel(ch2);
1587 
1588 	poll_threads();
1589 
1590 	set_thread(0);
1591 
1592 	rc = bdev_nvme_delete("nvme0", NULL);
1593 	CU_ASSERT(rc == 0);
1594 
1595 	poll_threads();
1596 
1597 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1598 
1599 	free(first_bdev_io);
1600 	free(second_bdev_io);
1601 }
1602 
1603 static void
1604 test_attach_ctrlr(void)
1605 {
1606 	struct spdk_nvme_transport_id trid = {};
1607 	struct spdk_nvme_host_id hostid = {};
1608 	struct spdk_nvme_ctrlr *ctrlr;
1609 	struct nvme_ctrlr *nvme_ctrlr;
1610 	const int STRING_SIZE = 32;
1611 	const char *attached_names[STRING_SIZE];
1612 	struct nvme_bdev *nbdev;
1613 	int rc;
1614 
1615 	set_thread(0);
1616 
1617 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1618 	ut_init_trid(&trid);
1619 
1620 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
1621 	 * by probe polling.
1622 	 */
1623 	ctrlr = ut_attach_ctrlr(&trid, 0, false);
1624 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1625 
1626 	ctrlr->is_failed = true;
1627 	g_ut_attach_ctrlr_status = -EIO;
1628 	g_ut_attach_bdev_count = 0;
1629 
1630 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
1631 			      attach_ctrlr_done, NULL, NULL);
1632 	CU_ASSERT(rc == 0);
1633 
1634 	spdk_delay_us(1000);
1635 	poll_threads();
1636 
1637 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1638 
1639 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
1640 	ctrlr = ut_attach_ctrlr(&trid, 0, false);
1641 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1642 
1643 	g_ut_attach_ctrlr_status = 0;
1644 
1645 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
1646 			      attach_ctrlr_done, NULL, NULL);
1647 	CU_ASSERT(rc == 0);
1648 
1649 	spdk_delay_us(1000);
1650 	poll_threads();
1651 
1652 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1653 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1654 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1655 	CU_ASSERT(nvme_ctrlr->num_ns == 0);
1656 
1657 	rc = bdev_nvme_delete("nvme0", NULL);
1658 	CU_ASSERT(rc == 0);
1659 
1660 	poll_threads();
1661 
1662 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1663 
1664 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
1665 	 * one nvme_bdev is created.
1666 	 */
1667 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
1668 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1669 
1670 	g_ut_attach_bdev_count = 1;
1671 
1672 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
1673 			      attach_ctrlr_done, NULL, NULL);
1674 	CU_ASSERT(rc == 0);
1675 
1676 	spdk_delay_us(1000);
1677 	poll_threads();
1678 
1679 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1680 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1681 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1682 	CU_ASSERT(nvme_ctrlr->num_ns == 1);
1683 
1684 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
1685 	attached_names[0] = NULL;
1686 
1687 	nbdev = nvme_ctrlr->namespaces[0]->bdev;
1688 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
1689 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
1690 
1691 	rc = bdev_nvme_delete("nvme0", NULL);
1692 	CU_ASSERT(rc == 0);
1693 
1694 	poll_threads();
1695 
1696 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1697 
1698 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
1699 	 * created because creating one nvme_bdev failed.
1700 	 */
1701 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
1702 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1703 
1704 	g_ut_register_bdev_status = -EINVAL;
1705 	g_ut_attach_bdev_count = 0;
1706 
1707 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
1708 			      attach_ctrlr_done, NULL, NULL);
1709 	CU_ASSERT(rc == 0);
1710 
1711 	spdk_delay_us(1000);
1712 	poll_threads();
1713 
1714 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1715 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1716 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1717 	CU_ASSERT(nvme_ctrlr->num_ns == 1);
1718 
1719 	CU_ASSERT(attached_names[0] == NULL);
1720 
1721 	rc = bdev_nvme_delete("nvme0", NULL);
1722 	CU_ASSERT(rc == 0);
1723 
1724 	poll_threads();
1725 
1726 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1727 
1728 	g_ut_register_bdev_status = 0;
1729 }
1730 
1731 static void
1732 test_reconnect_qpair(void)
1733 {
1734 	struct spdk_nvme_transport_id trid = {};
1735 	struct spdk_nvme_ctrlr ctrlr = {};
1736 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1737 	struct spdk_io_channel *ch;
1738 	struct nvme_ctrlr_channel *ctrlr_ch;
1739 	int rc;
1740 
1741 	set_thread(0);
1742 
1743 	ut_init_trid(&trid);
1744 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1745 
1746 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1747 	CU_ASSERT(rc == 0);
1748 
1749 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1750 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1751 
1752 	ch = spdk_get_io_channel(nvme_ctrlr);
1753 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1754 
1755 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
1756 	CU_ASSERT(ctrlr_ch->qpair != NULL);
1757 	CU_ASSERT(ctrlr_ch->group != NULL);
1758 	CU_ASSERT(ctrlr_ch->group->group != NULL);
1759 	CU_ASSERT(ctrlr_ch->group->poller != NULL);
1760 
1761 	/* Test if the disconnected qpair is reconnected. */
1762 	ctrlr_ch->qpair->is_connected = false;
1763 
1764 	poll_threads();
1765 
1766 	CU_ASSERT(ctrlr_ch->qpair->is_connected == true);
1767 
1768 	/* If the ctrlr is failed, reconnecting qpair should fail too. */
1769 	ctrlr_ch->qpair->is_connected = false;
1770 	ctrlr.is_failed = true;
1771 
1772 	poll_threads();
1773 
1774 	CU_ASSERT(ctrlr_ch->qpair->is_connected == false);
1775 
1776 	spdk_put_io_channel(ch);
1777 
1778 	poll_threads();
1779 
1780 	rc = bdev_nvme_delete("nvme0", NULL);
1781 	CU_ASSERT(rc == 0);
1782 
1783 	poll_threads();
1784 
1785 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1786 }
1787 
1788 static void
1789 test_aer_cb(void)
1790 {
1791 	struct spdk_nvme_transport_id trid = {};
1792 	struct spdk_nvme_host_id hostid = {};
1793 	struct spdk_nvme_ctrlr *ctrlr;
1794 	struct nvme_ctrlr *nvme_ctrlr;
1795 	struct nvme_bdev *bdev;
1796 	const int STRING_SIZE = 32;
1797 	const char *attached_names[STRING_SIZE];
1798 	union spdk_nvme_async_event_completion event = {};
1799 	struct spdk_nvme_cpl cpl = {};
1800 	int rc;
1801 
1802 	set_thread(0);
1803 
1804 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1805 	ut_init_trid(&trid);
1806 
1807 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
1808 	 * namespaces are populated.
1809 	 */
1810 	ctrlr = ut_attach_ctrlr(&trid, 4, true);
1811 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1812 
1813 	ctrlr->ns[0].is_active = false;
1814 
1815 	g_ut_attach_ctrlr_status = 0;
1816 	g_ut_attach_bdev_count = 3;
1817 
1818 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
1819 			      attach_ctrlr_done, NULL, NULL);
1820 	CU_ASSERT(rc == 0);
1821 
1822 	spdk_delay_us(1000);
1823 	poll_threads();
1824 
1825 	spdk_delay_us(10000);
1826 	poll_threads();
1827 
1828 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1829 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1830 
1831 	CU_ASSERT(nvme_ctrlr->num_ns == 4);
1832 	CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == false);
1833 	CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true);
1834 	CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == true);
1835 	CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true);
1836 
1837 	bdev = nvme_ctrlr->namespaces[3]->bdev;
1838 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1839 	CU_ASSERT(bdev->disk.blockcnt == 1024);
1840 
1841 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
1842 	 * change the size of the 4th namespace.
1843 	 */
1844 	ctrlr->ns[0].is_active = true;
1845 	ctrlr->ns[2].is_active = false;
1846 	ctrlr->nsdata[3].nsze = 2048;
1847 
1848 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1849 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
1850 	cpl.cdw0 = event.raw;
1851 
1852 	aer_cb(nvme_ctrlr, &cpl);
1853 
1854 	CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == true);
1855 	CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true);
1856 	CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == false);
1857 	CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true);
1858 	CU_ASSERT(bdev->disk.blockcnt == 2048);
1859 
1860 	/* Change ANA state of active namespaces. */
1861 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
1862 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
1863 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
1864 
1865 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1866 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
1867 	cpl.cdw0 = event.raw;
1868 
1869 	aer_cb(nvme_ctrlr, &cpl);
1870 
1871 	spdk_delay_us(10000);
1872 	poll_threads();
1873 
1874 	CU_ASSERT(nvme_ctrlr->namespaces[0]->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
1875 	CU_ASSERT(nvme_ctrlr->namespaces[1]->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
1876 	CU_ASSERT(nvme_ctrlr->namespaces[3]->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
1877 
1878 	rc = bdev_nvme_delete("nvme0", NULL);
1879 	CU_ASSERT(rc == 0);
1880 
1881 	poll_threads();
1882 
1883 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1884 }
1885 
1886 static void
1887 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1888 			enum spdk_bdev_io_type io_type)
1889 {
1890 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1891 	struct spdk_nvme_ns *ns = NULL;
1892 	struct spdk_nvme_qpair *qpair = NULL;
1893 
1894 	CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
1895 
1896 	bdev_io->type = io_type;
1897 	bdev_io->internal.in_submit_request = true;
1898 
1899 	bdev_nvme_submit_request(ch, bdev_io);
1900 
1901 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1902 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
1903 
1904 	poll_threads();
1905 
1906 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1907 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1908 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1909 }
1910 
1911 static void
1912 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1913 		   enum spdk_bdev_io_type io_type)
1914 {
1915 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1916 	struct spdk_nvme_ns *ns = NULL;
1917 	struct spdk_nvme_qpair *qpair = NULL;
1918 
1919 	CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
1920 
1921 	bdev_io->type = io_type;
1922 	bdev_io->internal.in_submit_request = true;
1923 
1924 	bdev_nvme_submit_request(ch, bdev_io);
1925 
1926 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1927 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1928 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1929 }
1930 
1931 static void
1932 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
1933 {
1934 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1935 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
1936 	struct ut_nvme_req *req;
1937 	struct spdk_nvme_ns *ns = NULL;
1938 	struct spdk_nvme_qpair *qpair = NULL;
1939 
1940 	CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
1941 
1942 	/* Only compare and write now. */
1943 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
1944 	bdev_io->internal.in_submit_request = true;
1945 
1946 	bdev_nvme_submit_request(ch, bdev_io);
1947 
1948 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1949 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
1950 	CU_ASSERT(bio->first_fused_submitted == true);
1951 
1952 	/* First outstanding request is compare operation. */
1953 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
1954 	SPDK_CU_ASSERT_FATAL(req != NULL);
1955 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
1956 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
1957 
1958 	poll_threads();
1959 
1960 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1961 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1962 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1963 }
1964 
1965 static void
1966 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1967 			 struct spdk_nvme_ctrlr *ctrlr)
1968 {
1969 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
1970 	bdev_io->internal.in_submit_request = true;
1971 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1972 
1973 	bdev_nvme_submit_request(ch, bdev_io);
1974 
1975 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1976 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
1977 
1978 	spdk_delay_us(10000);
1979 	poll_thread_times(1, 1);
1980 
1981 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1982 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
1983 
1984 	poll_thread_times(0, 1);
1985 
1986 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1987 }
1988 
1989 static void
1990 test_submit_nvme_cmd(void)
1991 {
1992 	struct spdk_nvme_transport_id trid = {};
1993 	struct spdk_nvme_host_id hostid = {};
1994 	struct spdk_nvme_ctrlr *ctrlr;
1995 	struct nvme_ctrlr *nvme_ctrlr;
1996 	const int STRING_SIZE = 32;
1997 	const char *attached_names[STRING_SIZE];
1998 	struct nvme_bdev *bdev;
1999 	struct spdk_bdev_io *bdev_io;
2000 	struct spdk_io_channel *ch;
2001 	struct spdk_bdev_ext_io_opts ext_io_opts = {};
2002 	int rc;
2003 
2004 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2005 	ut_init_trid(&trid);
2006 
2007 	set_thread(1);
2008 
2009 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
2010 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2011 
2012 	g_ut_attach_ctrlr_status = 0;
2013 	g_ut_attach_bdev_count = 1;
2014 
2015 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2016 			      attach_ctrlr_done, NULL, NULL);
2017 	CU_ASSERT(rc == 0);
2018 
2019 	spdk_delay_us(1000);
2020 	poll_threads();
2021 
2022 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2023 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2024 
2025 	bdev = nvme_ctrlr->namespaces[0]->bdev;
2026 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2027 
2028 	set_thread(0);
2029 
2030 	ch = spdk_get_io_channel(bdev);
2031 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2032 
2033 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2034 
2035 	bdev_io->u.bdev.iovs = NULL;
2036 
2037 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2038 
2039 	ut_bdev_io_set_buf(bdev_io);
2040 
2041 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2042 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2043 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2044 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2045 
2046 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2047 
2048 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2049 
2050 	/* Verify that ext NVME API is called if bdev_io ext_opts is set */
2051 	bdev_io->internal.ext_opts = &ext_io_opts;
2052 	g_ut_readv_ext_called = false;
2053 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2054 	CU_ASSERT(g_ut_readv_ext_called == true);
2055 	g_ut_readv_ext_called = false;
2056 
2057 	g_ut_writev_ext_called = false;
2058 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2059 	CU_ASSERT(g_ut_writev_ext_called == true);
2060 	g_ut_writev_ext_called = false;
2061 	bdev_io->internal.ext_opts = NULL;
2062 
2063 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2064 
2065 	free(bdev_io);
2066 
2067 	spdk_put_io_channel(ch);
2068 
2069 	poll_threads();
2070 
2071 	set_thread(1);
2072 
2073 	rc = bdev_nvme_delete("nvme0", NULL);
2074 	CU_ASSERT(rc == 0);
2075 
2076 	poll_threads();
2077 
2078 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2079 }
2080 
2081 static void
2082 test_add_remove_trid(void)
2083 {
2084 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
2085 	struct spdk_nvme_host_id hostid = {};
2086 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2087 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2088 	const int STRING_SIZE = 32;
2089 	const char *attached_names[STRING_SIZE];
2090 	struct nvme_ctrlr_trid *ctrid;
2091 	int rc;
2092 
2093 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2094 	ut_init_trid(&trid1);
2095 	ut_init_trid2(&trid2);
2096 	ut_init_trid3(&trid3);
2097 
2098 	set_thread(0);
2099 
2100 	g_ut_attach_ctrlr_status = 0;
2101 	g_ut_attach_bdev_count = 0;
2102 
2103 	ctrlr1 = ut_attach_ctrlr(&trid1, 0, false);
2104 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2105 
2106 	rc = bdev_nvme_create(&trid1, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2107 			      attach_ctrlr_done, NULL, NULL);
2108 	CU_ASSERT(rc == 0);
2109 
2110 	spdk_delay_us(1000);
2111 	poll_threads();
2112 
2113 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2114 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2115 
2116 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2117 
2118 	ctrlr2 = ut_attach_ctrlr(&trid2, 0, false);
2119 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2120 
2121 	rc = bdev_nvme_create(&trid2, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2122 			      attach_ctrlr_done, NULL, NULL);
2123 	CU_ASSERT(rc == 0);
2124 
2125 	spdk_delay_us(1000);
2126 	poll_threads();
2127 
2128 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2129 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2130 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) == 0) {
2131 			break;
2132 		}
2133 	}
2134 	CU_ASSERT(ctrid != NULL);
2135 
2136 	/* trid3 is not in the registered list. */
2137 	rc = bdev_nvme_delete("nvme0", &trid3);
2138 	CU_ASSERT(rc == -ENXIO);
2139 
2140 	/* trid2 is not used, and simply removed. */
2141 	rc = bdev_nvme_delete("nvme0", &trid2);
2142 	CU_ASSERT(rc == 0);
2143 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2144 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2145 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) != 0);
2146 	}
2147 
2148 	ctrlr3 = ut_attach_ctrlr(&trid3, 0, false);
2149 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2150 
2151 	rc = bdev_nvme_create(&trid3, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2152 			      attach_ctrlr_done, NULL, NULL);
2153 	CU_ASSERT(rc == 0);
2154 
2155 	spdk_delay_us(1000);
2156 	poll_threads();
2157 
2158 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2159 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2160 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &trid3) == 0) {
2161 			break;
2162 		}
2163 	}
2164 	CU_ASSERT(ctrid != NULL);
2165 
2166 	/* trid1 is currently used and trid3 is an alternative path.
2167 	 * If we remove trid1, path is changed to trid3.
2168 	 */
2169 	rc = bdev_nvme_delete("nvme0", &trid1);
2170 	CU_ASSERT(rc == 0);
2171 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2172 	CU_ASSERT(nvme_ctrlr->resetting == true);
2173 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2174 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid1) != 0);
2175 	}
2176 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid3) == 0);
2177 
2178 	poll_threads();
2179 
2180 	CU_ASSERT(nvme_ctrlr->resetting == false);
2181 
2182 	/* trid3 is the current and only path. If we remove trid3, the corresponding
2183 	 * nvme_ctrlr is removed.
2184 	 */
2185 	rc = bdev_nvme_delete("nvme0", &trid3);
2186 	CU_ASSERT(rc == 0);
2187 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2188 
2189 	poll_threads();
2190 
2191 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2192 
2193 	ctrlr1 = ut_attach_ctrlr(&trid1, 0, false);
2194 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2195 
2196 	rc = bdev_nvme_create(&trid1, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2197 			      attach_ctrlr_done, NULL, NULL);
2198 	CU_ASSERT(rc == 0);
2199 
2200 	spdk_delay_us(1000);
2201 	poll_threads();
2202 
2203 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2204 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2205 
2206 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2207 
2208 	ctrlr2 = ut_attach_ctrlr(&trid2, 0, false);
2209 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2210 
2211 	rc = bdev_nvme_create(&trid2, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2212 			      attach_ctrlr_done, NULL, NULL);
2213 	CU_ASSERT(rc == 0);
2214 
2215 	spdk_delay_us(1000);
2216 	poll_threads();
2217 
2218 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2219 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2220 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) == 0) {
2221 			break;
2222 		}
2223 	}
2224 	CU_ASSERT(ctrid != NULL);
2225 
2226 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2227 	rc = bdev_nvme_delete("nvme0", NULL);
2228 	CU_ASSERT(rc == 0);
2229 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2230 
2231 	poll_threads();
2232 
2233 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2234 }
2235 
2236 static void
2237 test_abort(void)
2238 {
2239 	struct spdk_nvme_transport_id trid = {};
2240 	struct spdk_nvme_host_id hostid = {};
2241 	struct spdk_nvme_ctrlr *ctrlr;
2242 	struct nvme_ctrlr *nvme_ctrlr;
2243 	const int STRING_SIZE = 32;
2244 	const char *attached_names[STRING_SIZE];
2245 	struct nvme_bdev *bdev;
2246 	struct spdk_bdev_io *write_io, *admin_io, *abort_io;
2247 	struct spdk_io_channel *ch1, *ch2;
2248 	struct nvme_bdev_channel *nbdev_ch1;
2249 	struct nvme_ctrlr_channel *ctrlr_ch1;
2250 	int rc;
2251 
2252 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2253 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2254 	 * are submitted on thread 1. Both should succeed.
2255 	 */
2256 
2257 	ut_init_trid(&trid);
2258 
2259 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
2260 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2261 
2262 	g_ut_attach_ctrlr_status = 0;
2263 	g_ut_attach_bdev_count = 1;
2264 
2265 	set_thread(1);
2266 
2267 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2268 			      attach_ctrlr_done, NULL, NULL);
2269 	CU_ASSERT(rc == 0);
2270 
2271 	spdk_delay_us(1000);
2272 	poll_threads();
2273 
2274 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2275 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2276 
2277 	bdev = nvme_ctrlr->namespaces[0]->bdev;
2278 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2279 
2280 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2281 	ut_bdev_io_set_buf(write_io);
2282 
2283 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2284 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2285 
2286 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2287 
2288 	set_thread(0);
2289 
2290 	ch1 = spdk_get_io_channel(bdev);
2291 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2292 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2293 	ctrlr_ch1 = nbdev_ch1->ctrlr_ch;
2294 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
2295 
2296 	set_thread(1);
2297 
2298 	ch2 = spdk_get_io_channel(bdev);
2299 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2300 
2301 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2302 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2303 
2304 	/* Aborting the already completed request should fail. */
2305 	write_io->internal.in_submit_request = true;
2306 	bdev_nvme_submit_request(ch1, write_io);
2307 	poll_threads();
2308 
2309 	CU_ASSERT(write_io->internal.in_submit_request == false);
2310 
2311 	abort_io->u.abort.bio_to_abort = write_io;
2312 	abort_io->internal.in_submit_request = true;
2313 
2314 	bdev_nvme_submit_request(ch1, abort_io);
2315 
2316 	poll_threads();
2317 
2318 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2319 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2320 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2321 
2322 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2323 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2324 
2325 	admin_io->internal.in_submit_request = true;
2326 	bdev_nvme_submit_request(ch1, admin_io);
2327 	spdk_delay_us(10000);
2328 	poll_threads();
2329 
2330 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2331 
2332 	abort_io->u.abort.bio_to_abort = admin_io;
2333 	abort_io->internal.in_submit_request = true;
2334 
2335 	bdev_nvme_submit_request(ch2, abort_io);
2336 
2337 	poll_threads();
2338 
2339 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2340 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2341 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2342 
2343 	/* Aborting the write request should succeed. */
2344 	write_io->internal.in_submit_request = true;
2345 	bdev_nvme_submit_request(ch1, write_io);
2346 
2347 	CU_ASSERT(write_io->internal.in_submit_request == true);
2348 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
2349 
2350 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2351 	abort_io->u.abort.bio_to_abort = write_io;
2352 	abort_io->internal.in_submit_request = true;
2353 
2354 	bdev_nvme_submit_request(ch1, abort_io);
2355 
2356 	spdk_delay_us(10000);
2357 	poll_threads();
2358 
2359 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2360 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2361 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2362 	CU_ASSERT(write_io->internal.in_submit_request == false);
2363 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2364 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
2365 
2366 	/* Aborting the admin request should succeed. */
2367 	admin_io->internal.in_submit_request = true;
2368 	bdev_nvme_submit_request(ch1, admin_io);
2369 
2370 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2371 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2372 
2373 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2374 	abort_io->u.abort.bio_to_abort = admin_io;
2375 	abort_io->internal.in_submit_request = true;
2376 
2377 	bdev_nvme_submit_request(ch2, abort_io);
2378 
2379 	spdk_delay_us(10000);
2380 	poll_threads();
2381 
2382 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2383 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2384 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2385 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2386 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2387 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2388 
2389 	set_thread(0);
2390 
2391 	spdk_put_io_channel(ch1);
2392 
2393 	set_thread(1);
2394 
2395 	spdk_put_io_channel(ch2);
2396 
2397 	poll_threads();
2398 
2399 	free(write_io);
2400 	free(admin_io);
2401 	free(abort_io);
2402 
2403 	set_thread(1);
2404 
2405 	rc = bdev_nvme_delete("nvme0", NULL);
2406 	CU_ASSERT(rc == 0);
2407 
2408 	poll_threads();
2409 
2410 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2411 }
2412 
2413 static void
2414 test_get_io_qpair(void)
2415 {
2416 	struct spdk_nvme_transport_id trid = {};
2417 	struct spdk_nvme_ctrlr ctrlr = {};
2418 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2419 	struct spdk_io_channel *ch;
2420 	struct nvme_ctrlr_channel *ctrlr_ch;
2421 	struct spdk_nvme_qpair *qpair;
2422 	int rc;
2423 
2424 	ut_init_trid(&trid);
2425 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2426 
2427 	set_thread(0);
2428 
2429 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
2430 	CU_ASSERT(rc == 0);
2431 
2432 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2433 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2434 
2435 	ch = spdk_get_io_channel(nvme_ctrlr);
2436 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2437 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2438 	CU_ASSERT(ctrlr_ch->qpair != NULL);
2439 
2440 	qpair = bdev_nvme_get_io_qpair(ch);
2441 	CU_ASSERT(qpair == ctrlr_ch->qpair);
2442 
2443 	spdk_put_io_channel(ch);
2444 
2445 	rc = bdev_nvme_delete("nvme0", NULL);
2446 	CU_ASSERT(rc == 0);
2447 
2448 	poll_threads();
2449 
2450 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2451 }
2452 
2453 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2454  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2455  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2456  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2457  */
2458 static void
2459 test_bdev_unregister(void)
2460 {
2461 	struct spdk_nvme_transport_id trid = {};
2462 	struct spdk_nvme_host_id hostid = {};
2463 	struct spdk_nvme_ctrlr *ctrlr;
2464 	struct nvme_ctrlr *nvme_ctrlr;
2465 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2466 	const int STRING_SIZE = 32;
2467 	const char *attached_names[STRING_SIZE];
2468 	struct nvme_bdev *bdev1, *bdev2;
2469 	int rc;
2470 
2471 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2472 	ut_init_trid(&trid);
2473 
2474 	ctrlr = ut_attach_ctrlr(&trid, 2, false);
2475 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2476 
2477 	g_ut_attach_ctrlr_status = 0;
2478 	g_ut_attach_bdev_count = 2;
2479 
2480 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2481 			      attach_ctrlr_done, NULL, NULL);
2482 	CU_ASSERT(rc == 0);
2483 
2484 	spdk_delay_us(1000);
2485 	poll_threads();
2486 
2487 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2488 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2489 
2490 	nvme_ns1 = nvme_ctrlr->namespaces[0];
2491 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2492 
2493 	bdev1 = nvme_ns1->bdev;
2494 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2495 
2496 	nvme_ns2 = nvme_ctrlr->namespaces[1];
2497 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2498 
2499 	bdev2 = nvme_ns2->bdev;
2500 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2501 
2502 	bdev_nvme_destruct(&bdev1->disk);
2503 	bdev_nvme_destruct(&bdev2->disk);
2504 
2505 	poll_threads();
2506 
2507 	CU_ASSERT(nvme_ns1->bdev == NULL);
2508 	CU_ASSERT(nvme_ns2->bdev == NULL);
2509 
2510 	nvme_ctrlr->destruct = true;
2511 	_nvme_ctrlr_destruct(nvme_ctrlr);
2512 
2513 	poll_threads();
2514 
2515 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2516 }
2517 
2518 static void
2519 test_compare_ns(void)
2520 {
2521 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2522 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2523 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2524 
2525 	/* No IDs are defined. */
2526 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2527 
2528 	/* Only EUI64 are defined and not matched. */
2529 	nsdata1.eui64 = 0xABCDEF0123456789;
2530 	nsdata2.eui64 = 0xBBCDEF0123456789;
2531 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2532 
2533 	/* Only EUI64 are defined and matched. */
2534 	nsdata2.eui64 = 0xABCDEF0123456789;
2535 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2536 
2537 	/* Only NGUID are defined and not matched. */
2538 	nsdata1.eui64 = 0x0;
2539 	nsdata2.eui64 = 0x0;
2540 	nsdata1.nguid[0] = 0x12;
2541 	nsdata2.nguid[0] = 0x10;
2542 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2543 
2544 	/* Only NGUID are defined and matched. */
2545 	nsdata2.nguid[0] = 0x12;
2546 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2547 
2548 	/* Only UUID are defined and not matched. */
2549 	nsdata1.nguid[0] = 0x0;
2550 	nsdata2.nguid[0] = 0x0;
2551 	ns1.uuid.u.raw[0] = 0xAA;
2552 	ns2.uuid.u.raw[0] = 0xAB;
2553 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2554 
2555 	/* Only UUID are defined and matched. */
2556 	ns1.uuid.u.raw[0] = 0xAB;
2557 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2558 
2559 	/* All EUI64, NGUID, and UUID are defined and matched. */
2560 	nsdata1.eui64 = 0x123456789ABCDEF;
2561 	nsdata2.eui64 = 0x123456789ABCDEF;
2562 	nsdata1.nguid[15] = 0x34;
2563 	nsdata2.nguid[15] = 0x34;
2564 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2565 }
2566 
2567 static void
2568 test_init_ana_log_page(void)
2569 {
2570 	struct spdk_nvme_transport_id trid = {};
2571 	struct spdk_nvme_host_id hostid = {};
2572 	struct spdk_nvme_ctrlr *ctrlr;
2573 	struct nvme_ctrlr *nvme_ctrlr;
2574 	const int STRING_SIZE = 32;
2575 	const char *attached_names[STRING_SIZE];
2576 	int rc;
2577 
2578 	set_thread(0);
2579 
2580 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2581 	ut_init_trid(&trid);
2582 
2583 	ctrlr = ut_attach_ctrlr(&trid, 5, true);
2584 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2585 
2586 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2587 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2588 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2589 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
2590 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2591 
2592 	g_ut_attach_ctrlr_status = 0;
2593 	g_ut_attach_bdev_count = 5;
2594 
2595 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2596 			      attach_ctrlr_done, NULL, NULL);
2597 	CU_ASSERT(rc == 0);
2598 
2599 	spdk_delay_us(1000);
2600 	poll_threads();
2601 
2602 	spdk_delay_us(10000);
2603 	poll_threads();
2604 
2605 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2606 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2607 
2608 	CU_ASSERT(nvme_ctrlr->num_ns == 5);
2609 	CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == true);
2610 	CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true);
2611 	CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == true);
2612 	CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true);
2613 	CU_ASSERT(nvme_ctrlr->namespaces[4]->populated == true);
2614 	CU_ASSERT(nvme_ctrlr->namespaces[0]->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
2615 	CU_ASSERT(nvme_ctrlr->namespaces[1]->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2616 	CU_ASSERT(nvme_ctrlr->namespaces[2]->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2617 	CU_ASSERT(nvme_ctrlr->namespaces[3]->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
2618 	CU_ASSERT(nvme_ctrlr->namespaces[4]->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2619 	CU_ASSERT(nvme_ctrlr->namespaces[0]->bdev != NULL);
2620 	CU_ASSERT(nvme_ctrlr->namespaces[1]->bdev != NULL);
2621 	CU_ASSERT(nvme_ctrlr->namespaces[2]->bdev != NULL);
2622 	CU_ASSERT(nvme_ctrlr->namespaces[3]->bdev != NULL);
2623 	CU_ASSERT(nvme_ctrlr->namespaces[4]->bdev != NULL);
2624 
2625 	rc = bdev_nvme_delete("nvme0", NULL);
2626 	CU_ASSERT(rc == 0);
2627 
2628 	poll_threads();
2629 
2630 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2631 }
2632 
2633 static void
2634 init_accel(void)
2635 {
2636 	spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb,
2637 				sizeof(int), "accel_p");
2638 }
2639 
2640 static void
2641 fini_accel(void)
2642 {
2643 	spdk_io_device_unregister(g_accel_p, NULL);
2644 }
2645 
2646 static void
2647 test_get_memory_domains(void)
2648 {
2649 	struct nvme_ctrlr ctrlr = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
2650 	struct nvme_ns ns = { .ctrlr = &ctrlr };
2651 	struct nvme_bdev nbdev = { .nvme_ns = &ns };
2652 	struct spdk_memory_domain *domain = (struct spdk_memory_domain *) 0xf00df00d;
2653 	struct spdk_memory_domain *domains[2] = {};
2654 	int rc = 0;
2655 
2656 	/* nvme controller doesn't have a memory domain */
2657 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, NULL);
2658 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2659 	CU_ASSERT(rc == 0)
2660 
2661 	/* nvme controller has a memory domain but array size is insufficient */
2662 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, domain);
2663 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
2664 	CU_ASSERT(rc == 1);
2665 
2666 	/* nvme controller has a memory domain but domains array is NULL */
2667 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, domain);
2668 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
2669 	CU_ASSERT(rc == 1);
2670 
2671 	/* nvme controller has a memory domain */
2672 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, domain);
2673 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1);
2674 	CU_ASSERT(rc == 1);
2675 	CU_ASSERT(domains[0] == domain);
2676 
2677 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domain);
2678 }
2679 
2680 int
2681 main(int argc, const char **argv)
2682 {
2683 	CU_pSuite	suite = NULL;
2684 	unsigned int	num_failures;
2685 
2686 	CU_set_error_action(CUEA_ABORT);
2687 	CU_initialize_registry();
2688 
2689 	suite = CU_add_suite("nvme", NULL, NULL);
2690 
2691 	CU_ADD_TEST(suite, test_create_ctrlr);
2692 	CU_ADD_TEST(suite, test_reset_ctrlr);
2693 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
2694 	CU_ADD_TEST(suite, test_failover_ctrlr);
2695 	CU_ADD_TEST(suite, test_pending_reset);
2696 	CU_ADD_TEST(suite, test_attach_ctrlr);
2697 	CU_ADD_TEST(suite, test_reconnect_qpair);
2698 	CU_ADD_TEST(suite, test_aer_cb);
2699 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
2700 	CU_ADD_TEST(suite, test_add_remove_trid);
2701 	CU_ADD_TEST(suite, test_abort);
2702 	CU_ADD_TEST(suite, test_get_io_qpair);
2703 	CU_ADD_TEST(suite, test_bdev_unregister);
2704 	CU_ADD_TEST(suite, test_compare_ns);
2705 	CU_ADD_TEST(suite, test_init_ana_log_page);
2706 	CU_ADD_TEST(suite, test_get_memory_domains);
2707 
2708 	CU_basic_set_mode(CU_BRM_VERBOSE);
2709 
2710 	allocate_threads(3);
2711 	set_thread(0);
2712 	bdev_nvme_library_init();
2713 	init_accel();
2714 
2715 	CU_basic_run_tests();
2716 
2717 	set_thread(0);
2718 	bdev_nvme_library_fini();
2719 	fini_accel();
2720 	free_threads();
2721 
2722 	num_failures = CU_get_number_of_failures();
2723 	CU_cleanup_registry();
2724 
2725 	return num_failures;
2726 }
2727