xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision 6e5d6032a09ca918509e7c6f28d6d2e20b8dc832)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 #include "spdk_cunit.h"
37 #include "spdk/thread.h"
38 #include "spdk/bdev_module.h"
39 #include "spdk/bdev_module.h"
40 
41 #include "common/lib/ut_multithread.c"
42 
43 #include "bdev/nvme/bdev_nvme.c"
44 #include "bdev/nvme/common.c"
45 
46 #include "unit/lib/json_mock.c"
47 
48 static void *g_accel_p = (void *)0xdeadbeaf;
49 
50 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
51 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
52 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
53 	     spdk_nvme_remove_cb remove_cb), NULL);
54 
55 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
56 		enum spdk_nvme_transport_type trtype));
57 
58 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
59 	    NULL);
60 
61 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
62 
63 DEFINE_STUB_V(spdk_nvme_ctrlr_get_default_ctrlr_opts, (struct spdk_nvme_ctrlr_opts *opts,
64 		size_t opts_size));
65 
66 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
67 		struct spdk_nvme_transport_id *trid), 0);
68 
69 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
70 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
71 
72 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
73 
74 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0);
75 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf));
76 
77 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain, struct spdk_memory_domain *);
78 
79 struct spdk_memory_domain *spdk_nvme_ctrlr_get_memory_domain(const struct spdk_nvme_ctrlr *ctrlr)
80 {
81 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain);
82 
83 	return NULL;
84 }
85 
86 struct spdk_io_channel *
87 spdk_accel_engine_get_io_channel(void)
88 {
89 	return spdk_get_io_channel(g_accel_p);
90 }
91 
92 void
93 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
94 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
95 {
96 	/* Avoid warning that opts is used uninitialised */
97 	memset(opts, 0, opts_size);
98 }
99 
100 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
101 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
102 
103 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
104 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
105 
106 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
107 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
108 
109 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
110 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
111 
112 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
113 
114 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
115 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
116 
117 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
118 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
119 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
120 
121 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
122 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
123 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
124 
125 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
126 
127 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
128 
129 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
130 
131 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
132 
133 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
134 
135 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
136 
137 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
138 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
139 
140 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
141 
142 DEFINE_STUB(spdk_nvme_ns_get_csi, enum spdk_nvme_csi,
143 	    (const struct spdk_nvme_ns *ns), 0);
144 
145 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
146 		char *name, size_t *size), 0);
147 
148 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
149 	    (struct spdk_nvme_ns *ns), 0);
150 
151 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
152 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
153 
154 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
155 	    (struct spdk_nvme_ns *ns), 0);
156 
157 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
158 	    (struct spdk_nvme_ns *ns), 0);
159 
160 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
161 	    (struct spdk_nvme_ns *ns), 0);
162 
163 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
164 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
165 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
166 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
167 
168 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
169 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
170 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
171 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
172 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
173 
174 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
175 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
176 	     void *payload, uint32_t payload_size, uint64_t slba,
177 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
178 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
179 
180 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
181 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
182 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
183 
184 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
185 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
186 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
187 
188 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
189 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
190 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
191 
192 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
193 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
194 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
195 
196 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
197 
198 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
199 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
200 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
201 
202 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
203 
204 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
205 
206 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
207 
208 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
209 
210 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
211 		struct iovec *iov,
212 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
213 
214 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr));
215 
216 struct ut_nvme_req {
217 	uint16_t			opc;
218 	spdk_nvme_cmd_cb		cb_fn;
219 	void				*cb_arg;
220 	struct spdk_nvme_cpl		cpl;
221 	TAILQ_ENTRY(ut_nvme_req)	tailq;
222 };
223 
224 struct spdk_nvme_ns {
225 	struct spdk_nvme_ctrlr		*ctrlr;
226 	uint32_t			id;
227 	bool				is_active;
228 	struct spdk_uuid		uuid;
229 	enum spdk_nvme_ana_state	ana_state;
230 };
231 
232 struct spdk_nvme_qpair {
233 	struct spdk_nvme_ctrlr		*ctrlr;
234 	bool				is_connected;
235 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
236 	uint32_t			num_outstanding_reqs;
237 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
238 	struct spdk_nvme_poll_group	*poll_group;
239 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
240 };
241 
242 struct spdk_nvme_ctrlr {
243 	uint32_t			num_ns;
244 	struct spdk_nvme_ns		*ns;
245 	struct spdk_nvme_ns_data	*nsdata;
246 	struct spdk_nvme_qpair		adminq;
247 	struct spdk_nvme_ctrlr_data	cdata;
248 	bool				attached;
249 	bool				is_failed;
250 	bool				fail_reset;
251 	struct spdk_nvme_transport_id	trid;
252 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
253 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
254 	struct spdk_nvme_ctrlr_opts	opts;
255 };
256 
257 struct spdk_nvme_poll_group {
258 	void				*ctx;
259 	struct spdk_nvme_accel_fn_table	accel_fn_table;
260 	TAILQ_HEAD(, spdk_nvme_qpair)	qpairs;
261 };
262 
263 struct spdk_nvme_probe_ctx {
264 	struct spdk_nvme_transport_id	trid;
265 	void				*cb_ctx;
266 	spdk_nvme_attach_cb		attach_cb;
267 	struct spdk_nvme_ctrlr		*init_ctrlr;
268 };
269 
270 struct spdk_nvme_ctrlr_reset_ctx {
271 	struct spdk_nvme_ctrlr		*ctrlr;
272 };
273 
274 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
275 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
276 			g_ut_attached_ctrlrs);
277 static int g_ut_attach_ctrlr_status;
278 static size_t g_ut_attach_bdev_count;
279 static int g_ut_register_bdev_status;
280 
281 static void
282 ut_init_trid(struct spdk_nvme_transport_id *trid)
283 {
284 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
285 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
286 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
287 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
288 }
289 
290 static void
291 ut_init_trid2(struct spdk_nvme_transport_id *trid)
292 {
293 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
294 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
295 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
296 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
297 }
298 
299 static void
300 ut_init_trid3(struct spdk_nvme_transport_id *trid)
301 {
302 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
303 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
304 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
305 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
306 }
307 
308 static int
309 cmp_int(int a, int b)
310 {
311 	return a - b;
312 }
313 
314 int
315 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
316 			       const struct spdk_nvme_transport_id *trid2)
317 {
318 	int cmp;
319 
320 	/* We assume trtype is TCP for now. */
321 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
322 
323 	cmp = cmp_int(trid1->trtype, trid2->trtype);
324 	if (cmp) {
325 		return cmp;
326 	}
327 
328 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
329 	if (cmp) {
330 		return cmp;
331 	}
332 
333 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
334 	if (cmp) {
335 		return cmp;
336 	}
337 
338 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
339 	if (cmp) {
340 		return cmp;
341 	}
342 
343 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
344 	if (cmp) {
345 		return cmp;
346 	}
347 
348 	return 0;
349 }
350 
351 static struct spdk_nvme_ctrlr *
352 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
353 		bool ana_reporting)
354 {
355 	struct spdk_nvme_ctrlr *ctrlr;
356 	uint32_t i;
357 
358 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
359 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
360 			/* There is a ctrlr whose trid matches. */
361 			return NULL;
362 		}
363 	}
364 
365 	ctrlr = calloc(1, sizeof(*ctrlr));
366 	if (ctrlr == NULL) {
367 		return NULL;
368 	}
369 
370 	ctrlr->attached = true;
371 	ctrlr->adminq.ctrlr = ctrlr;
372 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
373 
374 	if (num_ns != 0) {
375 		ctrlr->num_ns = num_ns;
376 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
377 		if (ctrlr->ns == NULL) {
378 			free(ctrlr);
379 			return NULL;
380 		}
381 
382 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
383 		if (ctrlr->nsdata == NULL) {
384 			free(ctrlr->ns);
385 			free(ctrlr);
386 			return NULL;
387 		}
388 
389 		for (i = 0; i < num_ns; i++) {
390 			ctrlr->ns[i].id = i + 1;
391 			ctrlr->ns[i].ctrlr = ctrlr;
392 			ctrlr->ns[i].is_active = true;
393 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
394 			ctrlr->nsdata[i].nsze = 1024;
395 		}
396 
397 		ctrlr->cdata.nn = num_ns;
398 		ctrlr->cdata.nanagrpid = num_ns;
399 	}
400 
401 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
402 	ctrlr->trid = *trid;
403 	TAILQ_INIT(&ctrlr->active_io_qpairs);
404 
405 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
406 
407 	return ctrlr;
408 }
409 
410 static void
411 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
412 {
413 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
414 
415 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
416 	free(ctrlr->nsdata);
417 	free(ctrlr->ns);
418 	free(ctrlr);
419 }
420 
421 static int
422 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
423 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
424 {
425 	struct ut_nvme_req *req;
426 
427 	req = calloc(1, sizeof(*req));
428 	if (req == NULL) {
429 		return -ENOMEM;
430 	}
431 
432 	req->opc = opc;
433 	req->cb_fn = cb_fn;
434 	req->cb_arg = cb_arg;
435 
436 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
437 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
438 
439 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
440 	qpair->num_outstanding_reqs++;
441 
442 	return 0;
443 }
444 
445 static struct spdk_bdev_io *
446 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
447 		 struct spdk_io_channel *ch)
448 {
449 	struct spdk_bdev_io *bdev_io;
450 
451 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
452 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
453 	bdev_io->type = type;
454 	bdev_io->bdev = &nbdev->disk;
455 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
456 
457 	return bdev_io;
458 }
459 
460 static void
461 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
462 {
463 	bdev_io->u.bdev.iovs = &bdev_io->iov;
464 	bdev_io->u.bdev.iovcnt = 1;
465 
466 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
467 	bdev_io->iov.iov_len = 4096;
468 }
469 
470 static void
471 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
472 {
473 	if (ctrlr->is_failed) {
474 		free(ctrlr);
475 		return;
476 	}
477 
478 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
479 
480 	if (probe_ctx->attach_cb) {
481 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
482 	}
483 }
484 
485 int
486 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
487 {
488 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
489 
490 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
491 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
492 			continue;
493 		}
494 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
495 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
496 	}
497 
498 	free(probe_ctx);
499 
500 	return 0;
501 }
502 
503 struct spdk_nvme_probe_ctx *
504 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
505 			const struct spdk_nvme_ctrlr_opts *opts,
506 			spdk_nvme_attach_cb attach_cb)
507 {
508 	struct spdk_nvme_probe_ctx *probe_ctx;
509 
510 	if (trid == NULL) {
511 		return NULL;
512 	}
513 
514 	probe_ctx = calloc(1, sizeof(*probe_ctx));
515 	if (probe_ctx == NULL) {
516 		return NULL;
517 	}
518 
519 	probe_ctx->trid = *trid;
520 	probe_ctx->cb_ctx = (void *)opts;
521 	probe_ctx->attach_cb = attach_cb;
522 
523 	return probe_ctx;
524 }
525 
526 int
527 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
528 {
529 	if (ctrlr->attached) {
530 		ut_detach_ctrlr(ctrlr);
531 	}
532 
533 	return 0;
534 }
535 
536 const struct spdk_nvme_ctrlr_data *
537 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
538 {
539 	return &ctrlr->cdata;
540 }
541 
542 uint32_t
543 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
544 {
545 	return ctrlr->num_ns;
546 }
547 
548 struct spdk_nvme_ns *
549 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
550 {
551 	if (nsid < 1 || nsid > ctrlr->num_ns) {
552 		return NULL;
553 	}
554 
555 	return &ctrlr->ns[nsid - 1];
556 }
557 
558 bool
559 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
560 {
561 	if (nsid < 1 || nsid > ctrlr->num_ns) {
562 		return false;
563 	}
564 
565 	return ctrlr->ns[nsid - 1].is_active;
566 }
567 
568 union spdk_nvme_csts_register
569 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
570 {
571 	union spdk_nvme_csts_register csts;
572 
573 	csts.raw = 0;
574 
575 	return csts;
576 }
577 
578 union spdk_nvme_vs_register
579 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
580 {
581 	union spdk_nvme_vs_register vs;
582 
583 	vs.raw = 0;
584 
585 	return vs;
586 }
587 
588 struct spdk_nvme_qpair *
589 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
590 			       const struct spdk_nvme_io_qpair_opts *user_opts,
591 			       size_t opts_size)
592 {
593 	struct spdk_nvme_qpair *qpair;
594 
595 	qpair = calloc(1, sizeof(*qpair));
596 	if (qpair == NULL) {
597 		return NULL;
598 	}
599 
600 	qpair->ctrlr = ctrlr;
601 	TAILQ_INIT(&qpair->outstanding_reqs);
602 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
603 
604 	return qpair;
605 }
606 
607 int
608 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
609 				 struct spdk_nvme_qpair *qpair)
610 {
611 	if (qpair->is_connected) {
612 		return -EISCONN;
613 	}
614 
615 	qpair->is_connected = true;
616 
617 	return 0;
618 }
619 
620 int
621 spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
622 {
623 	struct spdk_nvme_ctrlr *ctrlr;
624 
625 	ctrlr = qpair->ctrlr;
626 
627 	if (ctrlr->is_failed) {
628 		return -ENXIO;
629 	}
630 	qpair->is_connected = true;
631 
632 	return 0;
633 }
634 
635 void
636 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
637 {
638 	qpair->is_connected = false;
639 }
640 
641 int
642 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
643 {
644 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
645 
646 	qpair->is_connected = false;
647 
648 	if (qpair->poll_group != NULL) {
649 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
650 	}
651 
652 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
653 
654 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
655 
656 	free(qpair);
657 
658 	return 0;
659 }
660 
661 int
662 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
663 {
664 	if (ctrlr->fail_reset) {
665 		return -EIO;
666 	}
667 
668 	ctrlr->is_failed = false;
669 
670 	return 0;
671 }
672 
673 int
674 spdk_nvme_ctrlr_reset_poll_async(struct spdk_nvme_ctrlr_reset_ctx *ctrlr_reset_ctx)
675 {
676 	struct spdk_nvme_ctrlr *ctrlr = ctrlr_reset_ctx->ctrlr;
677 
678 	free(ctrlr_reset_ctx);
679 	return spdk_nvme_ctrlr_reset(ctrlr);
680 }
681 
682 int
683 spdk_nvme_ctrlr_reset_async(struct spdk_nvme_ctrlr *ctrlr,
684 			    struct spdk_nvme_ctrlr_reset_ctx **reset_ctx)
685 {
686 	struct spdk_nvme_ctrlr_reset_ctx *ctrlr_reset_ctx;
687 
688 	ctrlr_reset_ctx = calloc(1, sizeof(*ctrlr_reset_ctx));
689 	if (!ctrlr_reset_ctx) {
690 		return -ENOMEM;
691 	}
692 
693 	ctrlr_reset_ctx->ctrlr = ctrlr;
694 	*reset_ctx = ctrlr_reset_ctx;
695 
696 	return 0;
697 }
698 
699 void
700 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
701 {
702 	ctrlr->is_failed = true;
703 }
704 
705 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
706 				 sizeof(uint32_t))
707 static void
708 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
709 {
710 	struct spdk_nvme_ana_page ana_hdr;
711 	char _ana_desc[UT_ANA_DESC_SIZE];
712 	struct spdk_nvme_ana_group_descriptor *ana_desc;
713 	struct spdk_nvme_ns *ns;
714 	uint32_t i;
715 
716 	memset(&ana_hdr, 0, sizeof(ana_hdr));
717 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
718 
719 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
720 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
721 
722 	buf += sizeof(ana_hdr);
723 	length -= sizeof(ana_hdr);
724 
725 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
726 
727 	for (i = 0; i < ctrlr->num_ns; i++) {
728 		ns = &ctrlr->ns[i];
729 
730 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
731 
732 		ana_desc->ana_group_id = ns->id;
733 		ana_desc->num_of_nsid = 1;
734 		ana_desc->ana_state = ns->ana_state;
735 		ana_desc->nsid[0] = ns->id;
736 
737 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
738 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
739 
740 		buf += UT_ANA_DESC_SIZE;
741 		length -= UT_ANA_DESC_SIZE;
742 	}
743 }
744 
745 int
746 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
747 				 uint8_t log_page, uint32_t nsid,
748 				 void *payload, uint32_t payload_size,
749 				 uint64_t offset,
750 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
751 {
752 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
753 		SPDK_CU_ASSERT_FATAL(offset == 0);
754 		ut_create_ana_log_page(ctrlr, payload, payload_size);
755 	}
756 
757 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
758 				      cb_fn, cb_arg);
759 }
760 
761 int
762 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
763 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
764 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
765 {
766 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
767 }
768 
769 int
770 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
771 			      void *cmd_cb_arg,
772 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
773 {
774 	struct ut_nvme_req *req = NULL, *abort_req;
775 
776 	if (qpair == NULL) {
777 		qpair = &ctrlr->adminq;
778 	}
779 
780 	abort_req = calloc(1, sizeof(*abort_req));
781 	if (abort_req == NULL) {
782 		return -ENOMEM;
783 	}
784 
785 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
786 		if (req->cb_arg == cmd_cb_arg) {
787 			break;
788 		}
789 	}
790 
791 	if (req == NULL) {
792 		free(abort_req);
793 		return -ENOENT;
794 	}
795 
796 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
797 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
798 
799 	abort_req->opc = SPDK_NVME_OPC_ABORT;
800 	abort_req->cb_fn = cb_fn;
801 	abort_req->cb_arg = cb_arg;
802 
803 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
804 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
805 	abort_req->cpl.cdw0 = 0;
806 
807 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
808 	ctrlr->adminq.num_outstanding_reqs++;
809 
810 	return 0;
811 }
812 
813 int32_t
814 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
815 {
816 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
817 }
818 
819 uint32_t
820 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
821 {
822 	return ns->id;
823 }
824 
825 struct spdk_nvme_ctrlr *
826 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
827 {
828 	return ns->ctrlr;
829 }
830 
831 static inline struct spdk_nvme_ns_data *
832 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
833 {
834 	return &ns->ctrlr->nsdata[ns->id - 1];
835 }
836 
837 const struct spdk_nvme_ns_data *
838 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
839 {
840 	return _nvme_ns_get_data(ns);
841 }
842 
843 uint64_t
844 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
845 {
846 	return _nvme_ns_get_data(ns)->nsze;
847 }
848 
849 const struct spdk_uuid *
850 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
851 {
852 	return &ns->uuid;
853 }
854 
855 int
856 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
857 			      void *metadata, uint64_t lba, uint32_t lba_count,
858 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
859 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
860 {
861 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
862 }
863 
864 int
865 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
866 			       void *buffer, void *metadata, uint64_t lba,
867 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
868 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
869 {
870 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
871 }
872 
873 int
874 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
875 			       uint64_t lba, uint32_t lba_count,
876 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
877 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
878 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
879 			       uint16_t apptag_mask, uint16_t apptag)
880 {
881 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
882 }
883 
884 int
885 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
886 				uint64_t lba, uint32_t lba_count,
887 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
888 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
889 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
890 				uint16_t apptag_mask, uint16_t apptag)
891 {
892 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
893 }
894 
895 static bool g_ut_readv_ext_called;
896 int
897 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
898 			   uint64_t lba, uint32_t lba_count,
899 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
900 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
901 			   spdk_nvme_req_next_sge_cb next_sge_fn,
902 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
903 {
904 	g_ut_readv_ext_called = true;
905 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
906 }
907 
908 static bool g_ut_writev_ext_called;
909 int
910 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
911 			    uint64_t lba, uint32_t lba_count,
912 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
913 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
914 			    spdk_nvme_req_next_sge_cb next_sge_fn,
915 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
916 {
917 	g_ut_writev_ext_called = true;
918 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
919 }
920 
921 int
922 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
923 				  uint64_t lba, uint32_t lba_count,
924 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
925 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
926 				  spdk_nvme_req_next_sge_cb next_sge_fn,
927 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
928 {
929 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
930 }
931 
932 int
933 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
934 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
935 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
936 {
937 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
938 }
939 
940 int
941 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
942 			      uint64_t lba, uint32_t lba_count,
943 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
944 			      uint32_t io_flags)
945 {
946 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
947 }
948 
949 struct spdk_nvme_poll_group *
950 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
951 {
952 	struct spdk_nvme_poll_group *group;
953 
954 	group = calloc(1, sizeof(*group));
955 	if (group == NULL) {
956 		return NULL;
957 	}
958 
959 	group->ctx = ctx;
960 	if (table != NULL) {
961 		group->accel_fn_table = *table;
962 	}
963 	TAILQ_INIT(&group->qpairs);
964 
965 	return group;
966 }
967 
968 int
969 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
970 {
971 	if (!TAILQ_EMPTY(&group->qpairs)) {
972 		return -EBUSY;
973 	}
974 
975 	free(group);
976 
977 	return 0;
978 }
979 
980 int32_t
981 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
982 				    uint32_t max_completions)
983 {
984 	struct ut_nvme_req *req, *tmp;
985 	uint32_t num_completions = 0;
986 
987 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
988 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
989 		qpair->num_outstanding_reqs--;
990 
991 		req->cb_fn(req->cb_arg, &req->cpl);
992 
993 		free(req);
994 		num_completions++;
995 	}
996 
997 	return num_completions;
998 }
999 
1000 int64_t
1001 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1002 		uint32_t completions_per_qpair,
1003 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1004 {
1005 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1006 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1007 
1008 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1009 
1010 	if (disconnected_qpair_cb == NULL) {
1011 		return -EINVAL;
1012 	}
1013 
1014 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
1015 		if (qpair->is_connected) {
1016 			local_completions = spdk_nvme_qpair_process_completions(qpair,
1017 					    completions_per_qpair);
1018 			if (local_completions < 0 && error_reason == 0) {
1019 				error_reason = local_completions;
1020 			} else {
1021 				num_completions += local_completions;
1022 				assert(num_completions >= 0);
1023 			}
1024 		}
1025 	}
1026 
1027 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
1028 		if (!qpair->is_connected) {
1029 			disconnected_qpair_cb(qpair, group->ctx);
1030 		}
1031 	}
1032 
1033 	return error_reason ? error_reason : num_completions;
1034 }
1035 
1036 int
1037 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1038 			 struct spdk_nvme_qpair *qpair)
1039 {
1040 	CU_ASSERT(!qpair->is_connected);
1041 
1042 	qpair->poll_group = group;
1043 	TAILQ_INSERT_TAIL(&group->qpairs, qpair, poll_group_tailq);
1044 
1045 	return 0;
1046 }
1047 
1048 int
1049 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1050 			    struct spdk_nvme_qpair *qpair)
1051 {
1052 	CU_ASSERT(!qpair->is_connected);
1053 
1054 	TAILQ_REMOVE(&group->qpairs, qpair, poll_group_tailq);
1055 
1056 	return 0;
1057 }
1058 
1059 int
1060 spdk_bdev_register(struct spdk_bdev *bdev)
1061 {
1062 	return g_ut_register_bdev_status;
1063 }
1064 
1065 void
1066 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1067 {
1068 	int rc;
1069 
1070 	rc = bdev->fn_table->destruct(bdev->ctxt);
1071 	if (rc <= 0 && cb_fn != NULL) {
1072 		cb_fn(cb_arg, rc);
1073 	}
1074 }
1075 
1076 int
1077 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1078 {
1079 	bdev->blockcnt = size;
1080 
1081 	return 0;
1082 }
1083 
1084 struct spdk_io_channel *
1085 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1086 {
1087 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1088 }
1089 
1090 void
1091 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1092 {
1093 	bdev_io->internal.status = status;
1094 	bdev_io->internal.in_submit_request = false;
1095 }
1096 
1097 void
1098 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1099 {
1100 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1101 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1102 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1103 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1104 	} else {
1105 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1106 	}
1107 
1108 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1109 	bdev_io->internal.error.nvme.sct = sct;
1110 	bdev_io->internal.error.nvme.sc = sc;
1111 
1112 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1113 }
1114 
1115 void
1116 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1117 {
1118 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1119 
1120 	ut_bdev_io_set_buf(bdev_io);
1121 
1122 	cb(ch, bdev_io, true);
1123 }
1124 
1125 static void
1126 test_create_ctrlr(void)
1127 {
1128 	struct spdk_nvme_transport_id trid = {};
1129 	struct spdk_nvme_ctrlr ctrlr = {};
1130 	int rc;
1131 
1132 	ut_init_trid(&trid);
1133 
1134 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1135 	CU_ASSERT(rc == 0);
1136 
1137 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1138 
1139 	rc = bdev_nvme_delete("nvme0", NULL);
1140 	CU_ASSERT(rc == 0);
1141 
1142 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1143 
1144 	poll_threads();
1145 
1146 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1147 }
1148 
1149 static void
1150 test_reset_ctrlr(void)
1151 {
1152 	struct spdk_nvme_transport_id trid = {};
1153 	struct spdk_nvme_ctrlr ctrlr = {};
1154 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1155 	struct nvme_ctrlr_trid *curr_trid;
1156 	struct spdk_io_channel *ch1, *ch2;
1157 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1158 	int rc;
1159 
1160 	ut_init_trid(&trid);
1161 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1162 
1163 	set_thread(0);
1164 
1165 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1166 	CU_ASSERT(rc == 0);
1167 
1168 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1169 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1170 
1171 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1172 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1173 
1174 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1175 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1176 
1177 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1178 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1179 
1180 	set_thread(1);
1181 
1182 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1183 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1184 
1185 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1186 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1187 
1188 	/* Reset starts from thread 1. */
1189 	set_thread(1);
1190 
1191 	/* Case 1: ctrlr is already being destructed. */
1192 	nvme_ctrlr->destruct = true;
1193 
1194 	rc = bdev_nvme_reset(nvme_ctrlr);
1195 	CU_ASSERT(rc == -EBUSY);
1196 
1197 	/* Case 2: reset is in progress. */
1198 	nvme_ctrlr->destruct = false;
1199 	nvme_ctrlr->resetting = true;
1200 
1201 	rc = bdev_nvme_reset(nvme_ctrlr);
1202 	CU_ASSERT(rc == -EAGAIN);
1203 
1204 	/* Case 3: reset completes successfully. */
1205 	nvme_ctrlr->resetting = false;
1206 	curr_trid->is_failed = true;
1207 	ctrlr.is_failed = true;
1208 
1209 	rc = bdev_nvme_reset(nvme_ctrlr);
1210 	CU_ASSERT(rc == 0);
1211 	CU_ASSERT(nvme_ctrlr->resetting == true);
1212 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1213 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1214 
1215 	poll_thread_times(0, 1);
1216 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1217 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1218 
1219 	poll_thread_times(1, 1);
1220 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1221 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1222 	CU_ASSERT(ctrlr.is_failed == true);
1223 
1224 	poll_thread_times(1, 1);
1225 	CU_ASSERT(ctrlr.is_failed == false);
1226 
1227 	poll_thread_times(0, 1);
1228 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1229 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1230 
1231 	poll_thread_times(1, 1);
1232 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1233 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1234 	CU_ASSERT(nvme_ctrlr->resetting == true);
1235 	CU_ASSERT(curr_trid->is_failed == true);
1236 
1237 	poll_thread_times(1, 1);
1238 	CU_ASSERT(nvme_ctrlr->resetting == false);
1239 	CU_ASSERT(curr_trid->is_failed == false);
1240 
1241 	spdk_put_io_channel(ch2);
1242 
1243 	set_thread(0);
1244 
1245 	spdk_put_io_channel(ch1);
1246 
1247 	poll_threads();
1248 
1249 	rc = bdev_nvme_delete("nvme0", NULL);
1250 	CU_ASSERT(rc == 0);
1251 
1252 	poll_threads();
1253 
1254 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1255 }
1256 
1257 static void
1258 test_race_between_reset_and_destruct_ctrlr(void)
1259 {
1260 	struct spdk_nvme_transport_id trid = {};
1261 	struct spdk_nvme_ctrlr ctrlr = {};
1262 	struct nvme_ctrlr *nvme_ctrlr;
1263 	struct spdk_io_channel *ch1, *ch2;
1264 	int rc;
1265 
1266 	ut_init_trid(&trid);
1267 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1268 
1269 	set_thread(0);
1270 
1271 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1272 	CU_ASSERT(rc == 0);
1273 
1274 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1275 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1276 
1277 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1278 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1279 
1280 	set_thread(1);
1281 
1282 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1283 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1284 
1285 	/* Reset starts from thread 1. */
1286 	set_thread(1);
1287 
1288 	rc = bdev_nvme_reset(nvme_ctrlr);
1289 	CU_ASSERT(rc == 0);
1290 	CU_ASSERT(nvme_ctrlr->resetting == true);
1291 
1292 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1293 	set_thread(0);
1294 
1295 	rc = bdev_nvme_delete("nvme0", NULL);
1296 	CU_ASSERT(rc == 0);
1297 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1298 	CU_ASSERT(nvme_ctrlr->destruct == true);
1299 	CU_ASSERT(nvme_ctrlr->resetting == true);
1300 
1301 	poll_threads();
1302 
1303 	/* Reset completed but ctrlr is not still destructed yet. */
1304 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1305 	CU_ASSERT(nvme_ctrlr->destruct == true);
1306 	CU_ASSERT(nvme_ctrlr->resetting == false);
1307 
1308 	/* New reset request is rejected. */
1309 	rc = bdev_nvme_reset(nvme_ctrlr);
1310 	CU_ASSERT(rc == -EBUSY);
1311 
1312 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1313 	 * However there are two channels and destruct is not completed yet.
1314 	 */
1315 	poll_threads();
1316 
1317 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1318 
1319 	set_thread(0);
1320 
1321 	spdk_put_io_channel(ch1);
1322 
1323 	set_thread(1);
1324 
1325 	spdk_put_io_channel(ch2);
1326 
1327 	poll_threads();
1328 
1329 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1330 }
1331 
1332 static void
1333 test_failover_ctrlr(void)
1334 {
1335 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1336 	struct spdk_nvme_ctrlr ctrlr = {};
1337 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1338 	struct nvme_ctrlr_trid *curr_trid, *next_trid;
1339 	struct spdk_io_channel *ch1, *ch2;
1340 	int rc;
1341 
1342 	ut_init_trid(&trid1);
1343 	ut_init_trid2(&trid2);
1344 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1345 
1346 	set_thread(0);
1347 
1348 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
1349 	CU_ASSERT(rc == 0);
1350 
1351 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1352 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1353 
1354 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1355 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1356 
1357 	set_thread(1);
1358 
1359 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1360 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1361 
1362 	/* First, test one trid case. */
1363 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1364 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1365 
1366 	/* Failover starts from thread 1. */
1367 	set_thread(1);
1368 
1369 	/* Case 1: ctrlr is already being destructed. */
1370 	nvme_ctrlr->destruct = true;
1371 
1372 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1373 	CU_ASSERT(rc == 0);
1374 	CU_ASSERT(curr_trid->is_failed == false);
1375 
1376 	/* Case 2: reset is in progress. */
1377 	nvme_ctrlr->destruct = false;
1378 	nvme_ctrlr->resetting = true;
1379 
1380 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1381 	CU_ASSERT(rc == 0);
1382 
1383 	/* Case 3: failover is in progress. */
1384 	nvme_ctrlr->failover_in_progress = true;
1385 
1386 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1387 	CU_ASSERT(rc == 0);
1388 	CU_ASSERT(curr_trid->is_failed == false);
1389 
1390 	/* Case 4: reset completes successfully. */
1391 	nvme_ctrlr->resetting = false;
1392 	nvme_ctrlr->failover_in_progress = false;
1393 
1394 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1395 	CU_ASSERT(rc == 0);
1396 
1397 	CU_ASSERT(nvme_ctrlr->resetting == true);
1398 	CU_ASSERT(curr_trid->is_failed == true);
1399 
1400 	poll_threads();
1401 
1402 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1403 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1404 
1405 	CU_ASSERT(nvme_ctrlr->resetting == false);
1406 	CU_ASSERT(curr_trid->is_failed == false);
1407 
1408 	set_thread(0);
1409 
1410 	/* Second, test two trids case. */
1411 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1412 	CU_ASSERT(rc == 0);
1413 
1414 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1415 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1416 	CU_ASSERT(&curr_trid->trid == nvme_ctrlr->connected_trid);
1417 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1418 
1419 	/* Failover starts from thread 1. */
1420 	set_thread(1);
1421 
1422 	/* Case 5: reset is in progress. */
1423 	nvme_ctrlr->resetting = true;
1424 
1425 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1426 	CU_ASSERT(rc == -EAGAIN);
1427 
1428 	/* Case 5: failover is in progress. */
1429 	nvme_ctrlr->failover_in_progress = true;
1430 
1431 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1432 	CU_ASSERT(rc == 0);
1433 
1434 	/* Case 6: failover completes successfully. */
1435 	nvme_ctrlr->resetting = false;
1436 	nvme_ctrlr->failover_in_progress = false;
1437 
1438 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1439 	CU_ASSERT(rc == 0);
1440 
1441 	CU_ASSERT(nvme_ctrlr->resetting == true);
1442 	CU_ASSERT(nvme_ctrlr->failover_in_progress == true);
1443 
1444 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1445 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1446 	CU_ASSERT(next_trid != curr_trid);
1447 	CU_ASSERT(&next_trid->trid == nvme_ctrlr->connected_trid);
1448 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1449 
1450 	poll_threads();
1451 
1452 	CU_ASSERT(nvme_ctrlr->resetting == false);
1453 	CU_ASSERT(nvme_ctrlr->failover_in_progress == false);
1454 
1455 	spdk_put_io_channel(ch2);
1456 
1457 	set_thread(0);
1458 
1459 	spdk_put_io_channel(ch1);
1460 
1461 	poll_threads();
1462 
1463 	rc = bdev_nvme_delete("nvme0", NULL);
1464 	CU_ASSERT(rc == 0);
1465 
1466 	poll_threads();
1467 
1468 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1469 }
1470 
1471 static void
1472 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1473 {
1474 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1475 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1476 }
1477 
1478 static void
1479 test_pending_reset(void)
1480 {
1481 	struct spdk_nvme_transport_id trid = {};
1482 	struct spdk_nvme_host_id hostid = {};
1483 	struct spdk_nvme_ctrlr *ctrlr;
1484 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1485 	const int STRING_SIZE = 32;
1486 	const char *attached_names[STRING_SIZE];
1487 	struct nvme_bdev *bdev;
1488 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1489 	struct spdk_io_channel *ch1, *ch2;
1490 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1491 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1492 	int rc;
1493 
1494 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1495 	ut_init_trid(&trid);
1496 
1497 	set_thread(0);
1498 
1499 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
1500 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1501 
1502 	g_ut_attach_ctrlr_status = 0;
1503 	g_ut_attach_bdev_count = 1;
1504 
1505 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
1506 			      attach_ctrlr_done, NULL, NULL);
1507 	CU_ASSERT(rc == 0);
1508 
1509 	spdk_delay_us(1000);
1510 	poll_threads();
1511 
1512 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1513 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1514 
1515 	bdev = nvme_ctrlr->namespaces[0]->bdev;
1516 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1517 
1518 	ch1 = spdk_get_io_channel(bdev);
1519 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1520 
1521 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1522 	ctrlr_ch1 = nbdev_ch1->ctrlr_ch;
1523 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1524 
1525 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1526 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1527 
1528 	set_thread(1);
1529 
1530 	ch2 = spdk_get_io_channel(bdev);
1531 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1532 
1533 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1534 	ctrlr_ch2 = nbdev_ch2->ctrlr_ch;
1535 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1536 
1537 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1538 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1539 
1540 	/* The first reset request is submitted on thread 1, and the second reset request
1541 	 * is submitted on thread 0 while processing the first request.
1542 	 */
1543 	bdev_nvme_submit_request(ch2, first_bdev_io);
1544 	CU_ASSERT(nvme_ctrlr->resetting == true);
1545 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1546 
1547 	set_thread(0);
1548 
1549 	bdev_nvme_submit_request(ch1, second_bdev_io);
1550 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1551 
1552 	poll_threads();
1553 
1554 	CU_ASSERT(nvme_ctrlr->resetting == false);
1555 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1556 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1557 
1558 	/* The first reset request is submitted on thread 1, and the second reset request
1559 	 * is submitted on thread 0 while processing the first request.
1560 	 *
1561 	 * The difference from the above scenario is that the controller is removed while
1562 	 * processing the first request. Hence both reset requests should fail.
1563 	 */
1564 	set_thread(1);
1565 
1566 	bdev_nvme_submit_request(ch2, first_bdev_io);
1567 	CU_ASSERT(nvme_ctrlr->resetting == true);
1568 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1569 
1570 	set_thread(0);
1571 
1572 	bdev_nvme_submit_request(ch1, second_bdev_io);
1573 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1574 
1575 	ctrlr->fail_reset = true;
1576 
1577 	poll_threads();
1578 
1579 	CU_ASSERT(nvme_ctrlr->resetting == false);
1580 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1581 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1582 
1583 	spdk_put_io_channel(ch1);
1584 
1585 	set_thread(1);
1586 
1587 	spdk_put_io_channel(ch2);
1588 
1589 	poll_threads();
1590 
1591 	set_thread(0);
1592 
1593 	rc = bdev_nvme_delete("nvme0", NULL);
1594 	CU_ASSERT(rc == 0);
1595 
1596 	poll_threads();
1597 
1598 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1599 
1600 	free(first_bdev_io);
1601 	free(second_bdev_io);
1602 }
1603 
1604 static void
1605 test_attach_ctrlr(void)
1606 {
1607 	struct spdk_nvme_transport_id trid = {};
1608 	struct spdk_nvme_host_id hostid = {};
1609 	struct spdk_nvme_ctrlr *ctrlr;
1610 	struct nvme_ctrlr *nvme_ctrlr;
1611 	const int STRING_SIZE = 32;
1612 	const char *attached_names[STRING_SIZE];
1613 	struct nvme_bdev *nbdev;
1614 	int rc;
1615 
1616 	set_thread(0);
1617 
1618 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1619 	ut_init_trid(&trid);
1620 
1621 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
1622 	 * by probe polling.
1623 	 */
1624 	ctrlr = ut_attach_ctrlr(&trid, 0, false);
1625 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1626 
1627 	ctrlr->is_failed = true;
1628 	g_ut_attach_ctrlr_status = -EIO;
1629 	g_ut_attach_bdev_count = 0;
1630 
1631 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
1632 			      attach_ctrlr_done, NULL, NULL);
1633 	CU_ASSERT(rc == 0);
1634 
1635 	spdk_delay_us(1000);
1636 	poll_threads();
1637 
1638 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1639 
1640 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
1641 	ctrlr = ut_attach_ctrlr(&trid, 0, false);
1642 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1643 
1644 	g_ut_attach_ctrlr_status = 0;
1645 
1646 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
1647 			      attach_ctrlr_done, NULL, NULL);
1648 	CU_ASSERT(rc == 0);
1649 
1650 	spdk_delay_us(1000);
1651 	poll_threads();
1652 
1653 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1654 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1655 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1656 	CU_ASSERT(nvme_ctrlr->num_ns == 0);
1657 
1658 	rc = bdev_nvme_delete("nvme0", NULL);
1659 	CU_ASSERT(rc == 0);
1660 
1661 	poll_threads();
1662 
1663 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1664 
1665 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
1666 	 * one nvme_bdev is created.
1667 	 */
1668 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
1669 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1670 
1671 	g_ut_attach_bdev_count = 1;
1672 
1673 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
1674 			      attach_ctrlr_done, NULL, NULL);
1675 	CU_ASSERT(rc == 0);
1676 
1677 	spdk_delay_us(1000);
1678 	poll_threads();
1679 
1680 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1681 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1682 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1683 	CU_ASSERT(nvme_ctrlr->num_ns == 1);
1684 
1685 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
1686 	attached_names[0] = NULL;
1687 
1688 	nbdev = nvme_ctrlr->namespaces[0]->bdev;
1689 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
1690 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
1691 
1692 	rc = bdev_nvme_delete("nvme0", NULL);
1693 	CU_ASSERT(rc == 0);
1694 
1695 	poll_threads();
1696 
1697 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1698 
1699 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
1700 	 * created because creating one nvme_bdev failed.
1701 	 */
1702 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
1703 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1704 
1705 	g_ut_register_bdev_status = -EINVAL;
1706 	g_ut_attach_bdev_count = 0;
1707 
1708 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
1709 			      attach_ctrlr_done, NULL, NULL);
1710 	CU_ASSERT(rc == 0);
1711 
1712 	spdk_delay_us(1000);
1713 	poll_threads();
1714 
1715 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1716 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1717 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1718 	CU_ASSERT(nvme_ctrlr->num_ns == 1);
1719 
1720 	CU_ASSERT(attached_names[0] == NULL);
1721 
1722 	rc = bdev_nvme_delete("nvme0", NULL);
1723 	CU_ASSERT(rc == 0);
1724 
1725 	poll_threads();
1726 
1727 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1728 
1729 	g_ut_register_bdev_status = 0;
1730 }
1731 
1732 static void
1733 test_reconnect_qpair(void)
1734 {
1735 	struct spdk_nvme_transport_id trid = {};
1736 	struct spdk_nvme_ctrlr ctrlr = {};
1737 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1738 	struct spdk_io_channel *ch;
1739 	struct nvme_ctrlr_channel *ctrlr_ch;
1740 	int rc;
1741 
1742 	set_thread(0);
1743 
1744 	ut_init_trid(&trid);
1745 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1746 
1747 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1748 	CU_ASSERT(rc == 0);
1749 
1750 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1751 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1752 
1753 	ch = spdk_get_io_channel(nvme_ctrlr);
1754 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1755 
1756 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
1757 	CU_ASSERT(ctrlr_ch->qpair != NULL);
1758 	CU_ASSERT(ctrlr_ch->group != NULL);
1759 	CU_ASSERT(ctrlr_ch->group->group != NULL);
1760 	CU_ASSERT(ctrlr_ch->group->poller != NULL);
1761 
1762 	/* Test if the disconnected qpair is reconnected. */
1763 	ctrlr_ch->qpair->is_connected = false;
1764 
1765 	poll_threads();
1766 
1767 	CU_ASSERT(ctrlr_ch->qpair->is_connected == true);
1768 
1769 	/* If the ctrlr is failed, reconnecting qpair should fail too. */
1770 	ctrlr_ch->qpair->is_connected = false;
1771 	ctrlr.is_failed = true;
1772 
1773 	poll_threads();
1774 
1775 	CU_ASSERT(ctrlr_ch->qpair->is_connected == false);
1776 
1777 	spdk_put_io_channel(ch);
1778 
1779 	poll_threads();
1780 
1781 	rc = bdev_nvme_delete("nvme0", NULL);
1782 	CU_ASSERT(rc == 0);
1783 
1784 	poll_threads();
1785 
1786 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1787 }
1788 
1789 static void
1790 test_aer_cb(void)
1791 {
1792 	struct spdk_nvme_transport_id trid = {};
1793 	struct spdk_nvme_host_id hostid = {};
1794 	struct spdk_nvme_ctrlr *ctrlr;
1795 	struct nvme_ctrlr *nvme_ctrlr;
1796 	struct nvme_bdev *bdev;
1797 	const int STRING_SIZE = 32;
1798 	const char *attached_names[STRING_SIZE];
1799 	union spdk_nvme_async_event_completion event = {};
1800 	struct spdk_nvme_cpl cpl = {};
1801 	int rc;
1802 
1803 	set_thread(0);
1804 
1805 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1806 	ut_init_trid(&trid);
1807 
1808 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
1809 	 * namespaces are populated.
1810 	 */
1811 	ctrlr = ut_attach_ctrlr(&trid, 4, true);
1812 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1813 
1814 	ctrlr->ns[0].is_active = false;
1815 
1816 	g_ut_attach_ctrlr_status = 0;
1817 	g_ut_attach_bdev_count = 3;
1818 
1819 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
1820 			      attach_ctrlr_done, NULL, NULL);
1821 	CU_ASSERT(rc == 0);
1822 
1823 	spdk_delay_us(1000);
1824 	poll_threads();
1825 
1826 	spdk_delay_us(10000);
1827 	poll_threads();
1828 
1829 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1830 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1831 
1832 	CU_ASSERT(nvme_ctrlr->num_ns == 4);
1833 	CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == false);
1834 	CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true);
1835 	CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == true);
1836 	CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true);
1837 
1838 	bdev = nvme_ctrlr->namespaces[3]->bdev;
1839 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1840 	CU_ASSERT(bdev->disk.blockcnt == 1024);
1841 
1842 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
1843 	 * change the size of the 4th namespace.
1844 	 */
1845 	ctrlr->ns[0].is_active = true;
1846 	ctrlr->ns[2].is_active = false;
1847 	ctrlr->nsdata[3].nsze = 2048;
1848 
1849 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1850 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
1851 	cpl.cdw0 = event.raw;
1852 
1853 	aer_cb(nvme_ctrlr, &cpl);
1854 
1855 	CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == true);
1856 	CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true);
1857 	CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == false);
1858 	CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true);
1859 	CU_ASSERT(bdev->disk.blockcnt == 2048);
1860 
1861 	/* Change ANA state of active namespaces. */
1862 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
1863 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
1864 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
1865 
1866 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1867 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
1868 	cpl.cdw0 = event.raw;
1869 
1870 	aer_cb(nvme_ctrlr, &cpl);
1871 
1872 	spdk_delay_us(10000);
1873 	poll_threads();
1874 
1875 	CU_ASSERT(nvme_ctrlr->namespaces[0]->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
1876 	CU_ASSERT(nvme_ctrlr->namespaces[1]->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
1877 	CU_ASSERT(nvme_ctrlr->namespaces[3]->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
1878 
1879 	rc = bdev_nvme_delete("nvme0", NULL);
1880 	CU_ASSERT(rc == 0);
1881 
1882 	poll_threads();
1883 
1884 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1885 }
1886 
1887 static void
1888 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1889 			enum spdk_bdev_io_type io_type)
1890 {
1891 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1892 	struct spdk_nvme_ns *ns = NULL;
1893 	struct spdk_nvme_qpair *qpair = NULL;
1894 
1895 	CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
1896 
1897 	bdev_io->type = io_type;
1898 	bdev_io->internal.in_submit_request = true;
1899 
1900 	bdev_nvme_submit_request(ch, bdev_io);
1901 
1902 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1903 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
1904 
1905 	poll_threads();
1906 
1907 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1908 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1909 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1910 }
1911 
1912 static void
1913 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1914 		   enum spdk_bdev_io_type io_type)
1915 {
1916 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1917 	struct spdk_nvme_ns *ns = NULL;
1918 	struct spdk_nvme_qpair *qpair = NULL;
1919 
1920 	CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
1921 
1922 	bdev_io->type = io_type;
1923 	bdev_io->internal.in_submit_request = true;
1924 
1925 	bdev_nvme_submit_request(ch, bdev_io);
1926 
1927 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1928 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1929 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1930 }
1931 
1932 static void
1933 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
1934 {
1935 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1936 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
1937 	struct ut_nvme_req *req;
1938 	struct spdk_nvme_ns *ns = NULL;
1939 	struct spdk_nvme_qpair *qpair = NULL;
1940 
1941 	CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
1942 
1943 	/* Only compare and write now. */
1944 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
1945 	bdev_io->internal.in_submit_request = true;
1946 
1947 	bdev_nvme_submit_request(ch, bdev_io);
1948 
1949 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1950 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
1951 	CU_ASSERT(bio->first_fused_submitted == true);
1952 
1953 	/* First outstanding request is compare operation. */
1954 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
1955 	SPDK_CU_ASSERT_FATAL(req != NULL);
1956 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
1957 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
1958 
1959 	poll_threads();
1960 
1961 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1962 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1963 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1964 }
1965 
1966 static void
1967 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1968 			 struct spdk_nvme_ctrlr *ctrlr)
1969 {
1970 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
1971 	bdev_io->internal.in_submit_request = true;
1972 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1973 
1974 	bdev_nvme_submit_request(ch, bdev_io);
1975 
1976 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1977 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
1978 
1979 	spdk_delay_us(10000);
1980 	poll_thread_times(1, 1);
1981 
1982 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1983 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
1984 
1985 	poll_thread_times(0, 1);
1986 
1987 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1988 }
1989 
1990 static void
1991 test_submit_nvme_cmd(void)
1992 {
1993 	struct spdk_nvme_transport_id trid = {};
1994 	struct spdk_nvme_host_id hostid = {};
1995 	struct spdk_nvme_ctrlr *ctrlr;
1996 	struct nvme_ctrlr *nvme_ctrlr;
1997 	const int STRING_SIZE = 32;
1998 	const char *attached_names[STRING_SIZE];
1999 	struct nvme_bdev *bdev;
2000 	struct spdk_bdev_io *bdev_io;
2001 	struct spdk_io_channel *ch;
2002 	struct spdk_bdev_ext_io_opts ext_io_opts = {};
2003 	int rc;
2004 
2005 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2006 	ut_init_trid(&trid);
2007 
2008 	set_thread(1);
2009 
2010 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
2011 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2012 
2013 	g_ut_attach_ctrlr_status = 0;
2014 	g_ut_attach_bdev_count = 1;
2015 
2016 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2017 			      attach_ctrlr_done, NULL, NULL);
2018 	CU_ASSERT(rc == 0);
2019 
2020 	spdk_delay_us(1000);
2021 	poll_threads();
2022 
2023 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2024 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2025 
2026 	bdev = nvme_ctrlr->namespaces[0]->bdev;
2027 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2028 
2029 	set_thread(0);
2030 
2031 	ch = spdk_get_io_channel(bdev);
2032 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2033 
2034 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2035 
2036 	bdev_io->u.bdev.iovs = NULL;
2037 
2038 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2039 
2040 	ut_bdev_io_set_buf(bdev_io);
2041 
2042 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2043 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2044 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2045 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2046 
2047 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2048 
2049 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2050 
2051 	/* Verify that ext NVME API is called if bdev_io ext_opts is set */
2052 	bdev_io->internal.ext_opts = &ext_io_opts;
2053 	g_ut_readv_ext_called = false;
2054 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2055 	CU_ASSERT(g_ut_readv_ext_called == true);
2056 	g_ut_readv_ext_called = false;
2057 
2058 	g_ut_writev_ext_called = false;
2059 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2060 	CU_ASSERT(g_ut_writev_ext_called == true);
2061 	g_ut_writev_ext_called = false;
2062 	bdev_io->internal.ext_opts = NULL;
2063 
2064 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2065 
2066 	free(bdev_io);
2067 
2068 	spdk_put_io_channel(ch);
2069 
2070 	poll_threads();
2071 
2072 	set_thread(1);
2073 
2074 	rc = bdev_nvme_delete("nvme0", NULL);
2075 	CU_ASSERT(rc == 0);
2076 
2077 	poll_threads();
2078 
2079 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2080 }
2081 
2082 static void
2083 test_add_remove_trid(void)
2084 {
2085 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
2086 	struct spdk_nvme_host_id hostid = {};
2087 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2088 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2089 	const int STRING_SIZE = 32;
2090 	const char *attached_names[STRING_SIZE];
2091 	struct nvme_ctrlr_trid *ctrid;
2092 	int rc;
2093 
2094 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2095 	ut_init_trid(&trid1);
2096 	ut_init_trid2(&trid2);
2097 	ut_init_trid3(&trid3);
2098 
2099 	set_thread(0);
2100 
2101 	g_ut_attach_ctrlr_status = 0;
2102 	g_ut_attach_bdev_count = 0;
2103 
2104 	ctrlr1 = ut_attach_ctrlr(&trid1, 0, false);
2105 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2106 
2107 	rc = bdev_nvme_create(&trid1, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2108 			      attach_ctrlr_done, NULL, NULL);
2109 	CU_ASSERT(rc == 0);
2110 
2111 	spdk_delay_us(1000);
2112 	poll_threads();
2113 
2114 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2115 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2116 
2117 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2118 
2119 	ctrlr2 = ut_attach_ctrlr(&trid2, 0, false);
2120 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2121 
2122 	rc = bdev_nvme_create(&trid2, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2123 			      attach_ctrlr_done, NULL, NULL);
2124 	CU_ASSERT(rc == 0);
2125 
2126 	spdk_delay_us(1000);
2127 	poll_threads();
2128 
2129 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2130 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2131 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) == 0) {
2132 			break;
2133 		}
2134 	}
2135 	CU_ASSERT(ctrid != NULL);
2136 
2137 	/* trid3 is not in the registered list. */
2138 	rc = bdev_nvme_delete("nvme0", &trid3);
2139 	CU_ASSERT(rc == -ENXIO);
2140 
2141 	/* trid2 is not used, and simply removed. */
2142 	rc = bdev_nvme_delete("nvme0", &trid2);
2143 	CU_ASSERT(rc == 0);
2144 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2145 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2146 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) != 0);
2147 	}
2148 
2149 	ctrlr3 = ut_attach_ctrlr(&trid3, 0, false);
2150 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2151 
2152 	rc = bdev_nvme_create(&trid3, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2153 			      attach_ctrlr_done, NULL, NULL);
2154 	CU_ASSERT(rc == 0);
2155 
2156 	spdk_delay_us(1000);
2157 	poll_threads();
2158 
2159 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2160 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2161 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &trid3) == 0) {
2162 			break;
2163 		}
2164 	}
2165 	CU_ASSERT(ctrid != NULL);
2166 
2167 	/* trid1 is currently used and trid3 is an alternative path.
2168 	 * If we remove trid1, path is changed to trid3.
2169 	 */
2170 	rc = bdev_nvme_delete("nvme0", &trid1);
2171 	CU_ASSERT(rc == 0);
2172 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2173 	CU_ASSERT(nvme_ctrlr->resetting == true);
2174 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2175 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid1) != 0);
2176 	}
2177 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid3) == 0);
2178 
2179 	poll_threads();
2180 
2181 	CU_ASSERT(nvme_ctrlr->resetting == false);
2182 
2183 	/* trid3 is the current and only path. If we remove trid3, the corresponding
2184 	 * nvme_ctrlr is removed.
2185 	 */
2186 	rc = bdev_nvme_delete("nvme0", &trid3);
2187 	CU_ASSERT(rc == 0);
2188 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2189 
2190 	poll_threads();
2191 
2192 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2193 
2194 	ctrlr1 = ut_attach_ctrlr(&trid1, 0, false);
2195 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2196 
2197 	rc = bdev_nvme_create(&trid1, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2198 			      attach_ctrlr_done, NULL, NULL);
2199 	CU_ASSERT(rc == 0);
2200 
2201 	spdk_delay_us(1000);
2202 	poll_threads();
2203 
2204 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2205 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2206 
2207 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2208 
2209 	ctrlr2 = ut_attach_ctrlr(&trid2, 0, false);
2210 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2211 
2212 	rc = bdev_nvme_create(&trid2, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2213 			      attach_ctrlr_done, NULL, NULL);
2214 	CU_ASSERT(rc == 0);
2215 
2216 	spdk_delay_us(1000);
2217 	poll_threads();
2218 
2219 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2220 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2221 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) == 0) {
2222 			break;
2223 		}
2224 	}
2225 	CU_ASSERT(ctrid != NULL);
2226 
2227 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2228 	rc = bdev_nvme_delete("nvme0", NULL);
2229 	CU_ASSERT(rc == 0);
2230 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2231 
2232 	poll_threads();
2233 
2234 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2235 }
2236 
2237 static void
2238 test_abort(void)
2239 {
2240 	struct spdk_nvme_transport_id trid = {};
2241 	struct spdk_nvme_host_id hostid = {};
2242 	struct spdk_nvme_ctrlr *ctrlr;
2243 	struct nvme_ctrlr *nvme_ctrlr;
2244 	const int STRING_SIZE = 32;
2245 	const char *attached_names[STRING_SIZE];
2246 	struct nvme_bdev *bdev;
2247 	struct spdk_bdev_io *write_io, *admin_io, *abort_io;
2248 	struct spdk_io_channel *ch1, *ch2;
2249 	struct nvme_bdev_channel *nbdev_ch1;
2250 	struct nvme_ctrlr_channel *ctrlr_ch1;
2251 	int rc;
2252 
2253 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2254 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2255 	 * are submitted on thread 1. Both should succeed.
2256 	 */
2257 
2258 	ut_init_trid(&trid);
2259 
2260 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
2261 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2262 
2263 	g_ut_attach_ctrlr_status = 0;
2264 	g_ut_attach_bdev_count = 1;
2265 
2266 	set_thread(1);
2267 
2268 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2269 			      attach_ctrlr_done, NULL, NULL);
2270 	CU_ASSERT(rc == 0);
2271 
2272 	spdk_delay_us(1000);
2273 	poll_threads();
2274 
2275 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2276 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2277 
2278 	bdev = nvme_ctrlr->namespaces[0]->bdev;
2279 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2280 
2281 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2282 	ut_bdev_io_set_buf(write_io);
2283 
2284 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2285 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2286 
2287 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2288 
2289 	set_thread(0);
2290 
2291 	ch1 = spdk_get_io_channel(bdev);
2292 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2293 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2294 	ctrlr_ch1 = nbdev_ch1->ctrlr_ch;
2295 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
2296 
2297 	set_thread(1);
2298 
2299 	ch2 = spdk_get_io_channel(bdev);
2300 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2301 
2302 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2303 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2304 
2305 	/* Aborting the already completed request should fail. */
2306 	write_io->internal.in_submit_request = true;
2307 	bdev_nvme_submit_request(ch1, write_io);
2308 	poll_threads();
2309 
2310 	CU_ASSERT(write_io->internal.in_submit_request == false);
2311 
2312 	abort_io->u.abort.bio_to_abort = write_io;
2313 	abort_io->internal.in_submit_request = true;
2314 
2315 	bdev_nvme_submit_request(ch1, abort_io);
2316 
2317 	poll_threads();
2318 
2319 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2320 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2321 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2322 
2323 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2324 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2325 
2326 	admin_io->internal.in_submit_request = true;
2327 	bdev_nvme_submit_request(ch1, admin_io);
2328 	spdk_delay_us(10000);
2329 	poll_threads();
2330 
2331 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2332 
2333 	abort_io->u.abort.bio_to_abort = admin_io;
2334 	abort_io->internal.in_submit_request = true;
2335 
2336 	bdev_nvme_submit_request(ch2, abort_io);
2337 
2338 	poll_threads();
2339 
2340 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2341 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2342 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2343 
2344 	/* Aborting the write request should succeed. */
2345 	write_io->internal.in_submit_request = true;
2346 	bdev_nvme_submit_request(ch1, write_io);
2347 
2348 	CU_ASSERT(write_io->internal.in_submit_request == true);
2349 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
2350 
2351 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2352 	abort_io->u.abort.bio_to_abort = write_io;
2353 	abort_io->internal.in_submit_request = true;
2354 
2355 	bdev_nvme_submit_request(ch1, abort_io);
2356 
2357 	spdk_delay_us(10000);
2358 	poll_threads();
2359 
2360 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2361 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2362 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2363 	CU_ASSERT(write_io->internal.in_submit_request == false);
2364 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2365 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
2366 
2367 	/* Aborting the admin request should succeed. */
2368 	admin_io->internal.in_submit_request = true;
2369 	bdev_nvme_submit_request(ch1, admin_io);
2370 
2371 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2372 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2373 
2374 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2375 	abort_io->u.abort.bio_to_abort = admin_io;
2376 	abort_io->internal.in_submit_request = true;
2377 
2378 	bdev_nvme_submit_request(ch2, abort_io);
2379 
2380 	spdk_delay_us(10000);
2381 	poll_threads();
2382 
2383 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2384 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2385 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2386 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2387 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2388 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2389 
2390 	set_thread(0);
2391 
2392 	spdk_put_io_channel(ch1);
2393 
2394 	set_thread(1);
2395 
2396 	spdk_put_io_channel(ch2);
2397 
2398 	poll_threads();
2399 
2400 	free(write_io);
2401 	free(admin_io);
2402 	free(abort_io);
2403 
2404 	set_thread(1);
2405 
2406 	rc = bdev_nvme_delete("nvme0", NULL);
2407 	CU_ASSERT(rc == 0);
2408 
2409 	poll_threads();
2410 
2411 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2412 }
2413 
2414 static void
2415 test_get_io_qpair(void)
2416 {
2417 	struct spdk_nvme_transport_id trid = {};
2418 	struct spdk_nvme_ctrlr ctrlr = {};
2419 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2420 	struct spdk_io_channel *ch;
2421 	struct nvme_ctrlr_channel *ctrlr_ch;
2422 	struct spdk_nvme_qpair *qpair;
2423 	int rc;
2424 
2425 	ut_init_trid(&trid);
2426 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2427 
2428 	set_thread(0);
2429 
2430 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
2431 	CU_ASSERT(rc == 0);
2432 
2433 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2434 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2435 
2436 	ch = spdk_get_io_channel(nvme_ctrlr);
2437 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2438 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2439 	CU_ASSERT(ctrlr_ch->qpair != NULL);
2440 
2441 	qpair = bdev_nvme_get_io_qpair(ch);
2442 	CU_ASSERT(qpair == ctrlr_ch->qpair);
2443 
2444 	spdk_put_io_channel(ch);
2445 
2446 	rc = bdev_nvme_delete("nvme0", NULL);
2447 	CU_ASSERT(rc == 0);
2448 
2449 	poll_threads();
2450 
2451 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2452 }
2453 
2454 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2455  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2456  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2457  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2458  */
2459 static void
2460 test_bdev_unregister(void)
2461 {
2462 	struct spdk_nvme_transport_id trid = {};
2463 	struct spdk_nvme_host_id hostid = {};
2464 	struct spdk_nvme_ctrlr *ctrlr;
2465 	struct nvme_ctrlr *nvme_ctrlr;
2466 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2467 	const int STRING_SIZE = 32;
2468 	const char *attached_names[STRING_SIZE];
2469 	struct nvme_bdev *bdev1, *bdev2;
2470 	int rc;
2471 
2472 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2473 	ut_init_trid(&trid);
2474 
2475 	ctrlr = ut_attach_ctrlr(&trid, 2, false);
2476 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2477 
2478 	g_ut_attach_ctrlr_status = 0;
2479 	g_ut_attach_bdev_count = 2;
2480 
2481 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2482 			      attach_ctrlr_done, NULL, NULL);
2483 	CU_ASSERT(rc == 0);
2484 
2485 	spdk_delay_us(1000);
2486 	poll_threads();
2487 
2488 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2489 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2490 
2491 	nvme_ns1 = nvme_ctrlr->namespaces[0];
2492 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2493 
2494 	bdev1 = nvme_ns1->bdev;
2495 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2496 
2497 	nvme_ns2 = nvme_ctrlr->namespaces[1];
2498 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2499 
2500 	bdev2 = nvme_ns2->bdev;
2501 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2502 
2503 	bdev_nvme_destruct(&bdev1->disk);
2504 	bdev_nvme_destruct(&bdev2->disk);
2505 
2506 	poll_threads();
2507 
2508 	CU_ASSERT(nvme_ns1->bdev == NULL);
2509 	CU_ASSERT(nvme_ns2->bdev == NULL);
2510 
2511 	nvme_ctrlr->destruct = true;
2512 	_nvme_ctrlr_destruct(nvme_ctrlr);
2513 
2514 	poll_threads();
2515 
2516 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2517 }
2518 
2519 static void
2520 test_compare_ns(void)
2521 {
2522 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2523 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2524 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2525 
2526 	/* No IDs are defined. */
2527 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2528 
2529 	/* Only EUI64 are defined and not matched. */
2530 	nsdata1.eui64 = 0xABCDEF0123456789;
2531 	nsdata2.eui64 = 0xBBCDEF0123456789;
2532 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2533 
2534 	/* Only EUI64 are defined and matched. */
2535 	nsdata2.eui64 = 0xABCDEF0123456789;
2536 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2537 
2538 	/* Only NGUID are defined and not matched. */
2539 	nsdata1.eui64 = 0x0;
2540 	nsdata2.eui64 = 0x0;
2541 	nsdata1.nguid[0] = 0x12;
2542 	nsdata2.nguid[0] = 0x10;
2543 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2544 
2545 	/* Only NGUID are defined and matched. */
2546 	nsdata2.nguid[0] = 0x12;
2547 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2548 
2549 	/* Only UUID are defined and not matched. */
2550 	nsdata1.nguid[0] = 0x0;
2551 	nsdata2.nguid[0] = 0x0;
2552 	ns1.uuid.u.raw[0] = 0xAA;
2553 	ns2.uuid.u.raw[0] = 0xAB;
2554 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2555 
2556 	/* Only UUID are defined and matched. */
2557 	ns1.uuid.u.raw[0] = 0xAB;
2558 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2559 
2560 	/* All EUI64, NGUID, and UUID are defined and matched. */
2561 	nsdata1.eui64 = 0x123456789ABCDEF;
2562 	nsdata2.eui64 = 0x123456789ABCDEF;
2563 	nsdata1.nguid[15] = 0x34;
2564 	nsdata2.nguid[15] = 0x34;
2565 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2566 }
2567 
2568 static void
2569 test_init_ana_log_page(void)
2570 {
2571 	struct spdk_nvme_transport_id trid = {};
2572 	struct spdk_nvme_host_id hostid = {};
2573 	struct spdk_nvme_ctrlr *ctrlr;
2574 	struct nvme_ctrlr *nvme_ctrlr;
2575 	const int STRING_SIZE = 32;
2576 	const char *attached_names[STRING_SIZE];
2577 	int rc;
2578 
2579 	set_thread(0);
2580 
2581 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2582 	ut_init_trid(&trid);
2583 
2584 	ctrlr = ut_attach_ctrlr(&trid, 5, true);
2585 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2586 
2587 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2588 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2589 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2590 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
2591 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2592 
2593 	g_ut_attach_ctrlr_status = 0;
2594 	g_ut_attach_bdev_count = 5;
2595 
2596 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, 0,
2597 			      attach_ctrlr_done, NULL, NULL);
2598 	CU_ASSERT(rc == 0);
2599 
2600 	spdk_delay_us(1000);
2601 	poll_threads();
2602 
2603 	spdk_delay_us(10000);
2604 	poll_threads();
2605 
2606 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2607 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2608 
2609 	CU_ASSERT(nvme_ctrlr->num_ns == 5);
2610 	CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == true);
2611 	CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true);
2612 	CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == true);
2613 	CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true);
2614 	CU_ASSERT(nvme_ctrlr->namespaces[4]->populated == true);
2615 	CU_ASSERT(nvme_ctrlr->namespaces[0]->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
2616 	CU_ASSERT(nvme_ctrlr->namespaces[1]->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2617 	CU_ASSERT(nvme_ctrlr->namespaces[2]->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2618 	CU_ASSERT(nvme_ctrlr->namespaces[3]->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
2619 	CU_ASSERT(nvme_ctrlr->namespaces[4]->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2620 	CU_ASSERT(nvme_ctrlr->namespaces[0]->bdev != NULL);
2621 	CU_ASSERT(nvme_ctrlr->namespaces[1]->bdev != NULL);
2622 	CU_ASSERT(nvme_ctrlr->namespaces[2]->bdev != NULL);
2623 	CU_ASSERT(nvme_ctrlr->namespaces[3]->bdev != NULL);
2624 	CU_ASSERT(nvme_ctrlr->namespaces[4]->bdev != NULL);
2625 
2626 	rc = bdev_nvme_delete("nvme0", NULL);
2627 	CU_ASSERT(rc == 0);
2628 
2629 	poll_threads();
2630 
2631 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2632 }
2633 
2634 static void
2635 init_accel(void)
2636 {
2637 	spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb,
2638 				sizeof(int), "accel_p");
2639 }
2640 
2641 static void
2642 fini_accel(void)
2643 {
2644 	spdk_io_device_unregister(g_accel_p, NULL);
2645 }
2646 
2647 static void
2648 test_get_memory_domains(void)
2649 {
2650 	struct nvme_ctrlr ctrlr = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
2651 	struct nvme_ns ns = { .ctrlr = &ctrlr };
2652 	struct nvme_bdev nbdev = { .nvme_ns = &ns };
2653 	struct spdk_memory_domain *domain = (struct spdk_memory_domain *) 0xf00df00d;
2654 	struct spdk_memory_domain *domains[2] = {};
2655 	int rc = 0;
2656 
2657 	/* nvme controller doesn't have a memory domain */
2658 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, NULL);
2659 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2660 	CU_ASSERT(rc == 0)
2661 
2662 	/* nvme controller has a memory domain but array size is insufficient */
2663 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, domain);
2664 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
2665 	CU_ASSERT(rc == 1);
2666 
2667 	/* nvme controller has a memory domain but domains array is NULL */
2668 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, domain);
2669 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
2670 	CU_ASSERT(rc == 1);
2671 
2672 	/* nvme controller has a memory domain */
2673 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, domain);
2674 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1);
2675 	CU_ASSERT(rc == 1);
2676 	CU_ASSERT(domains[0] == domain);
2677 
2678 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domain);
2679 }
2680 
2681 int
2682 main(int argc, const char **argv)
2683 {
2684 	CU_pSuite	suite = NULL;
2685 	unsigned int	num_failures;
2686 
2687 	CU_set_error_action(CUEA_ABORT);
2688 	CU_initialize_registry();
2689 
2690 	suite = CU_add_suite("nvme", NULL, NULL);
2691 
2692 	CU_ADD_TEST(suite, test_create_ctrlr);
2693 	CU_ADD_TEST(suite, test_reset_ctrlr);
2694 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
2695 	CU_ADD_TEST(suite, test_failover_ctrlr);
2696 	CU_ADD_TEST(suite, test_pending_reset);
2697 	CU_ADD_TEST(suite, test_attach_ctrlr);
2698 	CU_ADD_TEST(suite, test_reconnect_qpair);
2699 	CU_ADD_TEST(suite, test_aer_cb);
2700 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
2701 	CU_ADD_TEST(suite, test_add_remove_trid);
2702 	CU_ADD_TEST(suite, test_abort);
2703 	CU_ADD_TEST(suite, test_get_io_qpair);
2704 	CU_ADD_TEST(suite, test_bdev_unregister);
2705 	CU_ADD_TEST(suite, test_compare_ns);
2706 	CU_ADD_TEST(suite, test_init_ana_log_page);
2707 	CU_ADD_TEST(suite, test_get_memory_domains);
2708 
2709 	CU_basic_set_mode(CU_BRM_VERBOSE);
2710 
2711 	allocate_threads(3);
2712 	set_thread(0);
2713 	bdev_nvme_library_init();
2714 	init_accel();
2715 
2716 	CU_basic_run_tests();
2717 
2718 	set_thread(0);
2719 	bdev_nvme_library_fini();
2720 	fini_accel();
2721 	free_threads();
2722 
2723 	num_failures = CU_get_number_of_failures();
2724 	CU_cleanup_registry();
2725 
2726 	return num_failures;
2727 }
2728