xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision 2ee6ab36f9a0e38f0e47e9dab3db40a6ea72cfd5)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 #include "spdk_cunit.h"
37 #include "spdk/thread.h"
38 #include "spdk/bdev_module.h"
39 #include "spdk/bdev_module.h"
40 
41 #include "common/lib/ut_multithread.c"
42 
43 #include "bdev/nvme/bdev_nvme.c"
44 
45 #include "unit/lib/json_mock.c"
46 
47 static void *g_accel_p = (void *)0xdeadbeaf;
48 
49 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
50 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
51 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
52 	     spdk_nvme_remove_cb remove_cb), NULL);
53 
54 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
55 		enum spdk_nvme_transport_type trtype));
56 
57 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
58 	    NULL);
59 
60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
61 
62 DEFINE_STUB_V(spdk_nvme_ctrlr_get_default_ctrlr_opts, (struct spdk_nvme_ctrlr_opts *opts,
63 		size_t opts_size));
64 
65 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
66 		struct spdk_nvme_transport_id *trid), 0);
67 
68 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
69 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
70 
71 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
72 
73 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0);
74 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf));
75 
76 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain, struct spdk_memory_domain *);
77 
78 struct spdk_memory_domain *spdk_nvme_ctrlr_get_memory_domain(const struct spdk_nvme_ctrlr *ctrlr)
79 {
80 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain);
81 
82 	return NULL;
83 }
84 
85 struct spdk_io_channel *
86 spdk_accel_engine_get_io_channel(void)
87 {
88 	return spdk_get_io_channel(g_accel_p);
89 }
90 
91 void
92 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
93 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
94 {
95 	/* Avoid warning that opts is used uninitialised */
96 	memset(opts, 0, opts_size);
97 }
98 
99 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
100 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
101 
102 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
103 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
104 
105 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
106 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
107 
108 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
109 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
110 
111 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
112 
113 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
114 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
115 
116 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
117 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
118 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
119 
120 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
121 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
122 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
123 
124 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
125 
126 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
127 
128 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
129 
130 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
131 
132 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
133 
134 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
135 
136 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
137 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
138 
139 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
140 
141 DEFINE_STUB(spdk_nvme_ns_get_csi, enum spdk_nvme_csi,
142 	    (const struct spdk_nvme_ns *ns), 0);
143 
144 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
145 		char *name, size_t *size), 0);
146 
147 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
148 	    (struct spdk_nvme_ns *ns), 0);
149 
150 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
151 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
152 
153 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
154 	    (struct spdk_nvme_ns *ns), 0);
155 
156 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
157 	    (struct spdk_nvme_ns *ns), 0);
158 
159 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
160 	    (struct spdk_nvme_ns *ns), 0);
161 
162 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
163 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
164 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
165 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
166 
167 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
168 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
169 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
170 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
171 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
172 
173 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
174 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
175 	     void *payload, uint32_t payload_size, uint64_t slba,
176 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
177 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
178 
179 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
180 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
181 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
182 
183 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
184 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
185 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
186 
187 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
188 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
189 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
190 
191 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
192 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
193 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
194 
195 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
196 
197 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
198 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
199 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
200 
201 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
202 
203 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
204 
205 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
206 
207 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
208 
209 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
210 		struct iovec *iov,
211 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
212 
213 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr));
214 
215 struct ut_nvme_req {
216 	uint16_t			opc;
217 	spdk_nvme_cmd_cb		cb_fn;
218 	void				*cb_arg;
219 	struct spdk_nvme_cpl		cpl;
220 	TAILQ_ENTRY(ut_nvme_req)	tailq;
221 };
222 
223 struct spdk_nvme_ns {
224 	struct spdk_nvme_ctrlr		*ctrlr;
225 	uint32_t			id;
226 	bool				is_active;
227 	struct spdk_uuid		uuid;
228 	enum spdk_nvme_ana_state	ana_state;
229 };
230 
231 struct spdk_nvme_qpair {
232 	struct spdk_nvme_ctrlr		*ctrlr;
233 	bool				is_connected;
234 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
235 	uint32_t			num_outstanding_reqs;
236 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
237 	struct spdk_nvme_poll_group	*poll_group;
238 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
239 };
240 
241 struct spdk_nvme_ctrlr {
242 	uint32_t			num_ns;
243 	struct spdk_nvme_ns		*ns;
244 	struct spdk_nvme_ns_data	*nsdata;
245 	struct spdk_nvme_qpair		adminq;
246 	struct spdk_nvme_ctrlr_data	cdata;
247 	bool				attached;
248 	bool				is_failed;
249 	bool				fail_reset;
250 	struct spdk_nvme_transport_id	trid;
251 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
252 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
253 	struct spdk_nvme_ctrlr_opts	opts;
254 };
255 
256 struct spdk_nvme_poll_group {
257 	void				*ctx;
258 	struct spdk_nvme_accel_fn_table	accel_fn_table;
259 	TAILQ_HEAD(, spdk_nvme_qpair)	qpairs;
260 };
261 
262 struct spdk_nvme_probe_ctx {
263 	struct spdk_nvme_transport_id	trid;
264 	void				*cb_ctx;
265 	spdk_nvme_attach_cb		attach_cb;
266 	struct spdk_nvme_ctrlr		*init_ctrlr;
267 };
268 
269 struct spdk_nvme_ctrlr_reset_ctx {
270 	struct spdk_nvme_ctrlr		*ctrlr;
271 };
272 
273 uint32_t
274 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
275 {
276 	uint32_t nsid;
277 
278 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
279 		if (ctrlr->ns[nsid - 1].is_active) {
280 			return nsid;
281 		}
282 	}
283 
284 	return 0;
285 }
286 
287 uint32_t
288 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
289 {
290 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
291 		if (ctrlr->ns[nsid - 1].is_active) {
292 			return nsid;
293 		}
294 	}
295 
296 	return 0;
297 }
298 
299 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
300 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
301 			g_ut_attached_ctrlrs);
302 static int g_ut_attach_ctrlr_status;
303 static size_t g_ut_attach_bdev_count;
304 static int g_ut_register_bdev_status;
305 
306 static void
307 ut_init_trid(struct spdk_nvme_transport_id *trid)
308 {
309 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
310 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
311 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
312 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
313 }
314 
315 static void
316 ut_init_trid2(struct spdk_nvme_transport_id *trid)
317 {
318 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
319 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
320 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
321 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
322 }
323 
324 static void
325 ut_init_trid3(struct spdk_nvme_transport_id *trid)
326 {
327 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
328 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
329 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
330 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
331 }
332 
333 static int
334 cmp_int(int a, int b)
335 {
336 	return a - b;
337 }
338 
339 int
340 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
341 			       const struct spdk_nvme_transport_id *trid2)
342 {
343 	int cmp;
344 
345 	/* We assume trtype is TCP for now. */
346 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
347 
348 	cmp = cmp_int(trid1->trtype, trid2->trtype);
349 	if (cmp) {
350 		return cmp;
351 	}
352 
353 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
354 	if (cmp) {
355 		return cmp;
356 	}
357 
358 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
359 	if (cmp) {
360 		return cmp;
361 	}
362 
363 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
364 	if (cmp) {
365 		return cmp;
366 	}
367 
368 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
369 	if (cmp) {
370 		return cmp;
371 	}
372 
373 	return 0;
374 }
375 
376 static struct spdk_nvme_ctrlr *
377 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
378 		bool ana_reporting)
379 {
380 	struct spdk_nvme_ctrlr *ctrlr;
381 	uint32_t i;
382 
383 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
384 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
385 			/* There is a ctrlr whose trid matches. */
386 			return NULL;
387 		}
388 	}
389 
390 	ctrlr = calloc(1, sizeof(*ctrlr));
391 	if (ctrlr == NULL) {
392 		return NULL;
393 	}
394 
395 	ctrlr->attached = true;
396 	ctrlr->adminq.ctrlr = ctrlr;
397 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
398 
399 	if (num_ns != 0) {
400 		ctrlr->num_ns = num_ns;
401 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
402 		if (ctrlr->ns == NULL) {
403 			free(ctrlr);
404 			return NULL;
405 		}
406 
407 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
408 		if (ctrlr->nsdata == NULL) {
409 			free(ctrlr->ns);
410 			free(ctrlr);
411 			return NULL;
412 		}
413 
414 		for (i = 0; i < num_ns; i++) {
415 			ctrlr->ns[i].id = i + 1;
416 			ctrlr->ns[i].ctrlr = ctrlr;
417 			ctrlr->ns[i].is_active = true;
418 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
419 			ctrlr->nsdata[i].nsze = 1024;
420 		}
421 
422 		ctrlr->cdata.nn = num_ns;
423 		ctrlr->cdata.nanagrpid = num_ns;
424 	}
425 
426 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
427 	ctrlr->trid = *trid;
428 	TAILQ_INIT(&ctrlr->active_io_qpairs);
429 
430 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
431 
432 	return ctrlr;
433 }
434 
435 static void
436 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
437 {
438 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
439 
440 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
441 	free(ctrlr->nsdata);
442 	free(ctrlr->ns);
443 	free(ctrlr);
444 }
445 
446 static int
447 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
448 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
449 {
450 	struct ut_nvme_req *req;
451 
452 	req = calloc(1, sizeof(*req));
453 	if (req == NULL) {
454 		return -ENOMEM;
455 	}
456 
457 	req->opc = opc;
458 	req->cb_fn = cb_fn;
459 	req->cb_arg = cb_arg;
460 
461 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
462 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
463 
464 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
465 	qpair->num_outstanding_reqs++;
466 
467 	return 0;
468 }
469 
470 static struct spdk_bdev_io *
471 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
472 		 struct spdk_io_channel *ch)
473 {
474 	struct spdk_bdev_io *bdev_io;
475 
476 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
477 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
478 	bdev_io->type = type;
479 	bdev_io->bdev = &nbdev->disk;
480 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
481 
482 	return bdev_io;
483 }
484 
485 static void
486 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
487 {
488 	bdev_io->u.bdev.iovs = &bdev_io->iov;
489 	bdev_io->u.bdev.iovcnt = 1;
490 
491 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
492 	bdev_io->iov.iov_len = 4096;
493 }
494 
495 static void
496 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
497 {
498 	if (ctrlr->is_failed) {
499 		free(ctrlr);
500 		return;
501 	}
502 
503 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
504 
505 	if (probe_ctx->attach_cb) {
506 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
507 	}
508 }
509 
510 int
511 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
512 {
513 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
514 
515 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
516 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
517 			continue;
518 		}
519 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
520 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
521 	}
522 
523 	free(probe_ctx);
524 
525 	return 0;
526 }
527 
528 struct spdk_nvme_probe_ctx *
529 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
530 			const struct spdk_nvme_ctrlr_opts *opts,
531 			spdk_nvme_attach_cb attach_cb)
532 {
533 	struct spdk_nvme_probe_ctx *probe_ctx;
534 
535 	if (trid == NULL) {
536 		return NULL;
537 	}
538 
539 	probe_ctx = calloc(1, sizeof(*probe_ctx));
540 	if (probe_ctx == NULL) {
541 		return NULL;
542 	}
543 
544 	probe_ctx->trid = *trid;
545 	probe_ctx->cb_ctx = (void *)opts;
546 	probe_ctx->attach_cb = attach_cb;
547 
548 	return probe_ctx;
549 }
550 
551 int
552 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
553 {
554 	if (ctrlr->attached) {
555 		ut_detach_ctrlr(ctrlr);
556 	}
557 
558 	return 0;
559 }
560 
561 const struct spdk_nvme_ctrlr_data *
562 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
563 {
564 	return &ctrlr->cdata;
565 }
566 
567 uint32_t
568 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
569 {
570 	return ctrlr->num_ns;
571 }
572 
573 struct spdk_nvme_ns *
574 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
575 {
576 	if (nsid < 1 || nsid > ctrlr->num_ns) {
577 		return NULL;
578 	}
579 
580 	return &ctrlr->ns[nsid - 1];
581 }
582 
583 bool
584 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
585 {
586 	if (nsid < 1 || nsid > ctrlr->num_ns) {
587 		return false;
588 	}
589 
590 	return ctrlr->ns[nsid - 1].is_active;
591 }
592 
593 union spdk_nvme_csts_register
594 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
595 {
596 	union spdk_nvme_csts_register csts;
597 
598 	csts.raw = 0;
599 
600 	return csts;
601 }
602 
603 union spdk_nvme_vs_register
604 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
605 {
606 	union spdk_nvme_vs_register vs;
607 
608 	vs.raw = 0;
609 
610 	return vs;
611 }
612 
613 struct spdk_nvme_qpair *
614 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
615 			       const struct spdk_nvme_io_qpair_opts *user_opts,
616 			       size_t opts_size)
617 {
618 	struct spdk_nvme_qpair *qpair;
619 
620 	qpair = calloc(1, sizeof(*qpair));
621 	if (qpair == NULL) {
622 		return NULL;
623 	}
624 
625 	qpair->ctrlr = ctrlr;
626 	TAILQ_INIT(&qpair->outstanding_reqs);
627 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
628 
629 	return qpair;
630 }
631 
632 int
633 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
634 				 struct spdk_nvme_qpair *qpair)
635 {
636 	if (qpair->is_connected) {
637 		return -EISCONN;
638 	}
639 
640 	qpair->is_connected = true;
641 
642 	return 0;
643 }
644 
645 int
646 spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
647 {
648 	struct spdk_nvme_ctrlr *ctrlr;
649 
650 	ctrlr = qpair->ctrlr;
651 
652 	if (ctrlr->is_failed) {
653 		return -ENXIO;
654 	}
655 	qpair->is_connected = true;
656 
657 	return 0;
658 }
659 
660 void
661 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
662 {
663 	qpair->is_connected = false;
664 }
665 
666 int
667 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
668 {
669 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
670 
671 	qpair->is_connected = false;
672 
673 	if (qpair->poll_group != NULL) {
674 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
675 	}
676 
677 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
678 
679 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
680 
681 	free(qpair);
682 
683 	return 0;
684 }
685 
686 int
687 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
688 {
689 	if (ctrlr->fail_reset) {
690 		return -EIO;
691 	}
692 
693 	ctrlr->is_failed = false;
694 
695 	return 0;
696 }
697 
698 int
699 spdk_nvme_ctrlr_reset_poll_async(struct spdk_nvme_ctrlr_reset_ctx *ctrlr_reset_ctx)
700 {
701 	struct spdk_nvme_ctrlr *ctrlr = ctrlr_reset_ctx->ctrlr;
702 
703 	free(ctrlr_reset_ctx);
704 	return spdk_nvme_ctrlr_reset(ctrlr);
705 }
706 
707 int
708 spdk_nvme_ctrlr_reset_async(struct spdk_nvme_ctrlr *ctrlr,
709 			    struct spdk_nvme_ctrlr_reset_ctx **reset_ctx)
710 {
711 	struct spdk_nvme_ctrlr_reset_ctx *ctrlr_reset_ctx;
712 
713 	ctrlr_reset_ctx = calloc(1, sizeof(*ctrlr_reset_ctx));
714 	if (!ctrlr_reset_ctx) {
715 		return -ENOMEM;
716 	}
717 
718 	ctrlr_reset_ctx->ctrlr = ctrlr;
719 	*reset_ctx = ctrlr_reset_ctx;
720 
721 	return 0;
722 }
723 
724 void
725 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
726 {
727 	ctrlr->is_failed = true;
728 }
729 
730 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
731 				 sizeof(uint32_t))
732 static void
733 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
734 {
735 	struct spdk_nvme_ana_page ana_hdr;
736 	char _ana_desc[UT_ANA_DESC_SIZE];
737 	struct spdk_nvme_ana_group_descriptor *ana_desc;
738 	struct spdk_nvme_ns *ns;
739 	uint32_t i;
740 
741 	memset(&ana_hdr, 0, sizeof(ana_hdr));
742 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
743 
744 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
745 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
746 
747 	buf += sizeof(ana_hdr);
748 	length -= sizeof(ana_hdr);
749 
750 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
751 
752 	for (i = 0; i < ctrlr->num_ns; i++) {
753 		ns = &ctrlr->ns[i];
754 
755 		if (!ns->is_active) {
756 			continue;
757 		}
758 
759 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
760 
761 		ana_desc->ana_group_id = ns->id;
762 		ana_desc->num_of_nsid = 1;
763 		ana_desc->ana_state = ns->ana_state;
764 		ana_desc->nsid[0] = ns->id;
765 
766 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
767 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
768 
769 		buf += UT_ANA_DESC_SIZE;
770 		length -= UT_ANA_DESC_SIZE;
771 	}
772 }
773 
774 int
775 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
776 				 uint8_t log_page, uint32_t nsid,
777 				 void *payload, uint32_t payload_size,
778 				 uint64_t offset,
779 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
780 {
781 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
782 		SPDK_CU_ASSERT_FATAL(offset == 0);
783 		ut_create_ana_log_page(ctrlr, payload, payload_size);
784 	}
785 
786 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
787 				      cb_fn, cb_arg);
788 }
789 
790 int
791 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
792 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
793 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
794 {
795 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
796 }
797 
798 int
799 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
800 			      void *cmd_cb_arg,
801 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
802 {
803 	struct ut_nvme_req *req = NULL, *abort_req;
804 
805 	if (qpair == NULL) {
806 		qpair = &ctrlr->adminq;
807 	}
808 
809 	abort_req = calloc(1, sizeof(*abort_req));
810 	if (abort_req == NULL) {
811 		return -ENOMEM;
812 	}
813 
814 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
815 		if (req->cb_arg == cmd_cb_arg) {
816 			break;
817 		}
818 	}
819 
820 	if (req == NULL) {
821 		free(abort_req);
822 		return -ENOENT;
823 	}
824 
825 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
826 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
827 
828 	abort_req->opc = SPDK_NVME_OPC_ABORT;
829 	abort_req->cb_fn = cb_fn;
830 	abort_req->cb_arg = cb_arg;
831 
832 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
833 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
834 	abort_req->cpl.cdw0 = 0;
835 
836 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
837 	ctrlr->adminq.num_outstanding_reqs++;
838 
839 	return 0;
840 }
841 
842 int32_t
843 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
844 {
845 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
846 }
847 
848 uint32_t
849 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
850 {
851 	return ns->id;
852 }
853 
854 struct spdk_nvme_ctrlr *
855 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
856 {
857 	return ns->ctrlr;
858 }
859 
860 static inline struct spdk_nvme_ns_data *
861 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
862 {
863 	return &ns->ctrlr->nsdata[ns->id - 1];
864 }
865 
866 const struct spdk_nvme_ns_data *
867 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
868 {
869 	return _nvme_ns_get_data(ns);
870 }
871 
872 uint64_t
873 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
874 {
875 	return _nvme_ns_get_data(ns)->nsze;
876 }
877 
878 const struct spdk_uuid *
879 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
880 {
881 	return &ns->uuid;
882 }
883 
884 int
885 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
886 			      void *metadata, uint64_t lba, uint32_t lba_count,
887 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
888 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
889 {
890 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
891 }
892 
893 int
894 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
895 			       void *buffer, void *metadata, uint64_t lba,
896 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
897 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
898 {
899 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
900 }
901 
902 int
903 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
904 			       uint64_t lba, uint32_t lba_count,
905 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
906 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
907 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
908 			       uint16_t apptag_mask, uint16_t apptag)
909 {
910 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
911 }
912 
913 int
914 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
915 				uint64_t lba, uint32_t lba_count,
916 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
917 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
918 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
919 				uint16_t apptag_mask, uint16_t apptag)
920 {
921 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
922 }
923 
924 static bool g_ut_readv_ext_called;
925 int
926 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
927 			   uint64_t lba, uint32_t lba_count,
928 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
929 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
930 			   spdk_nvme_req_next_sge_cb next_sge_fn,
931 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
932 {
933 	g_ut_readv_ext_called = true;
934 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
935 }
936 
937 static bool g_ut_writev_ext_called;
938 int
939 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
940 			    uint64_t lba, uint32_t lba_count,
941 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
942 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
943 			    spdk_nvme_req_next_sge_cb next_sge_fn,
944 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
945 {
946 	g_ut_writev_ext_called = true;
947 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
948 }
949 
950 int
951 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
952 				  uint64_t lba, uint32_t lba_count,
953 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
954 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
955 				  spdk_nvme_req_next_sge_cb next_sge_fn,
956 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
957 {
958 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
959 }
960 
961 int
962 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
963 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
964 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
965 {
966 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
967 }
968 
969 int
970 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
971 			      uint64_t lba, uint32_t lba_count,
972 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
973 			      uint32_t io_flags)
974 {
975 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
976 }
977 
978 struct spdk_nvme_poll_group *
979 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
980 {
981 	struct spdk_nvme_poll_group *group;
982 
983 	group = calloc(1, sizeof(*group));
984 	if (group == NULL) {
985 		return NULL;
986 	}
987 
988 	group->ctx = ctx;
989 	if (table != NULL) {
990 		group->accel_fn_table = *table;
991 	}
992 	TAILQ_INIT(&group->qpairs);
993 
994 	return group;
995 }
996 
997 int
998 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
999 {
1000 	if (!TAILQ_EMPTY(&group->qpairs)) {
1001 		return -EBUSY;
1002 	}
1003 
1004 	free(group);
1005 
1006 	return 0;
1007 }
1008 
1009 int32_t
1010 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1011 				    uint32_t max_completions)
1012 {
1013 	struct ut_nvme_req *req, *tmp;
1014 	uint32_t num_completions = 0;
1015 
1016 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1017 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1018 		qpair->num_outstanding_reqs--;
1019 
1020 		req->cb_fn(req->cb_arg, &req->cpl);
1021 
1022 		free(req);
1023 		num_completions++;
1024 	}
1025 
1026 	return num_completions;
1027 }
1028 
1029 int64_t
1030 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1031 		uint32_t completions_per_qpair,
1032 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1033 {
1034 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1035 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1036 
1037 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1038 
1039 	if (disconnected_qpair_cb == NULL) {
1040 		return -EINVAL;
1041 	}
1042 
1043 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
1044 		if (qpair->is_connected) {
1045 			local_completions = spdk_nvme_qpair_process_completions(qpair,
1046 					    completions_per_qpair);
1047 			if (local_completions < 0 && error_reason == 0) {
1048 				error_reason = local_completions;
1049 			} else {
1050 				num_completions += local_completions;
1051 				assert(num_completions >= 0);
1052 			}
1053 		}
1054 	}
1055 
1056 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
1057 		if (!qpair->is_connected) {
1058 			disconnected_qpair_cb(qpair, group->ctx);
1059 		}
1060 	}
1061 
1062 	return error_reason ? error_reason : num_completions;
1063 }
1064 
1065 int
1066 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1067 			 struct spdk_nvme_qpair *qpair)
1068 {
1069 	CU_ASSERT(!qpair->is_connected);
1070 
1071 	qpair->poll_group = group;
1072 	TAILQ_INSERT_TAIL(&group->qpairs, qpair, poll_group_tailq);
1073 
1074 	return 0;
1075 }
1076 
1077 int
1078 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1079 			    struct spdk_nvme_qpair *qpair)
1080 {
1081 	CU_ASSERT(!qpair->is_connected);
1082 
1083 	TAILQ_REMOVE(&group->qpairs, qpair, poll_group_tailq);
1084 
1085 	return 0;
1086 }
1087 
1088 int
1089 spdk_bdev_register(struct spdk_bdev *bdev)
1090 {
1091 	return g_ut_register_bdev_status;
1092 }
1093 
1094 void
1095 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1096 {
1097 	int rc;
1098 
1099 	rc = bdev->fn_table->destruct(bdev->ctxt);
1100 	if (rc <= 0 && cb_fn != NULL) {
1101 		cb_fn(cb_arg, rc);
1102 	}
1103 }
1104 
1105 int
1106 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1107 {
1108 	bdev->blockcnt = size;
1109 
1110 	return 0;
1111 }
1112 
1113 struct spdk_io_channel *
1114 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1115 {
1116 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1117 }
1118 
1119 void
1120 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1121 {
1122 	bdev_io->internal.status = status;
1123 	bdev_io->internal.in_submit_request = false;
1124 }
1125 
1126 void
1127 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1128 {
1129 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1130 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1131 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1132 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1133 	} else {
1134 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1135 	}
1136 
1137 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1138 	bdev_io->internal.error.nvme.sct = sct;
1139 	bdev_io->internal.error.nvme.sc = sc;
1140 
1141 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1142 }
1143 
1144 void
1145 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1146 {
1147 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1148 
1149 	ut_bdev_io_set_buf(bdev_io);
1150 
1151 	cb(ch, bdev_io, true);
1152 }
1153 
1154 static void
1155 test_create_ctrlr(void)
1156 {
1157 	struct spdk_nvme_transport_id trid = {};
1158 	struct spdk_nvme_ctrlr ctrlr = {};
1159 	int rc;
1160 
1161 	ut_init_trid(&trid);
1162 
1163 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1164 	CU_ASSERT(rc == 0);
1165 
1166 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1167 
1168 	rc = bdev_nvme_delete("nvme0", NULL);
1169 	CU_ASSERT(rc == 0);
1170 
1171 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1172 
1173 	poll_threads();
1174 
1175 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1176 }
1177 
1178 static void
1179 test_reset_ctrlr(void)
1180 {
1181 	struct spdk_nvme_transport_id trid = {};
1182 	struct spdk_nvme_ctrlr ctrlr = {};
1183 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1184 	struct nvme_ctrlr_trid *curr_trid;
1185 	struct spdk_io_channel *ch1, *ch2;
1186 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1187 	int rc;
1188 
1189 	ut_init_trid(&trid);
1190 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1191 
1192 	set_thread(0);
1193 
1194 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1195 	CU_ASSERT(rc == 0);
1196 
1197 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1198 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1199 
1200 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1201 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1202 
1203 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1204 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1205 
1206 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1207 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1208 
1209 	set_thread(1);
1210 
1211 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1212 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1213 
1214 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1215 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1216 
1217 	/* Reset starts from thread 1. */
1218 	set_thread(1);
1219 
1220 	/* Case 1: ctrlr is already being destructed. */
1221 	nvme_ctrlr->destruct = true;
1222 
1223 	rc = bdev_nvme_reset(nvme_ctrlr);
1224 	CU_ASSERT(rc == -ENXIO);
1225 
1226 	/* Case 2: reset is in progress. */
1227 	nvme_ctrlr->destruct = false;
1228 	nvme_ctrlr->resetting = true;
1229 
1230 	rc = bdev_nvme_reset(nvme_ctrlr);
1231 	CU_ASSERT(rc == -EBUSY);
1232 
1233 	/* Case 3: reset completes successfully. */
1234 	nvme_ctrlr->resetting = false;
1235 	curr_trid->is_failed = true;
1236 	ctrlr.is_failed = true;
1237 
1238 	rc = bdev_nvme_reset(nvme_ctrlr);
1239 	CU_ASSERT(rc == 0);
1240 	CU_ASSERT(nvme_ctrlr->resetting == true);
1241 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1242 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1243 
1244 	poll_thread_times(0, 1);
1245 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1246 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1247 
1248 	poll_thread_times(1, 1);
1249 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1250 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1251 	CU_ASSERT(ctrlr.is_failed == true);
1252 
1253 	poll_thread_times(1, 1);
1254 	CU_ASSERT(ctrlr.is_failed == false);
1255 
1256 	poll_thread_times(0, 1);
1257 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1258 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1259 
1260 	poll_thread_times(1, 1);
1261 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1262 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1263 	CU_ASSERT(nvme_ctrlr->resetting == true);
1264 	CU_ASSERT(curr_trid->is_failed == true);
1265 
1266 	poll_thread_times(1, 1);
1267 	CU_ASSERT(nvme_ctrlr->resetting == false);
1268 	CU_ASSERT(curr_trid->is_failed == false);
1269 
1270 	spdk_put_io_channel(ch2);
1271 
1272 	set_thread(0);
1273 
1274 	spdk_put_io_channel(ch1);
1275 
1276 	poll_threads();
1277 
1278 	rc = bdev_nvme_delete("nvme0", NULL);
1279 	CU_ASSERT(rc == 0);
1280 
1281 	poll_threads();
1282 
1283 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1284 }
1285 
1286 static void
1287 test_race_between_reset_and_destruct_ctrlr(void)
1288 {
1289 	struct spdk_nvme_transport_id trid = {};
1290 	struct spdk_nvme_ctrlr ctrlr = {};
1291 	struct nvme_ctrlr *nvme_ctrlr;
1292 	struct spdk_io_channel *ch1, *ch2;
1293 	int rc;
1294 
1295 	ut_init_trid(&trid);
1296 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1297 
1298 	set_thread(0);
1299 
1300 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1301 	CU_ASSERT(rc == 0);
1302 
1303 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1304 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1305 
1306 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1307 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1308 
1309 	set_thread(1);
1310 
1311 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1312 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1313 
1314 	/* Reset starts from thread 1. */
1315 	set_thread(1);
1316 
1317 	rc = bdev_nvme_reset(nvme_ctrlr);
1318 	CU_ASSERT(rc == 0);
1319 	CU_ASSERT(nvme_ctrlr->resetting == true);
1320 
1321 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1322 	set_thread(0);
1323 
1324 	rc = bdev_nvme_delete("nvme0", NULL);
1325 	CU_ASSERT(rc == 0);
1326 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1327 	CU_ASSERT(nvme_ctrlr->destruct == true);
1328 	CU_ASSERT(nvme_ctrlr->resetting == true);
1329 
1330 	poll_threads();
1331 
1332 	/* Reset completed but ctrlr is not still destructed yet. */
1333 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1334 	CU_ASSERT(nvme_ctrlr->destruct == true);
1335 	CU_ASSERT(nvme_ctrlr->resetting == false);
1336 
1337 	/* New reset request is rejected. */
1338 	rc = bdev_nvme_reset(nvme_ctrlr);
1339 	CU_ASSERT(rc == -ENXIO);
1340 
1341 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1342 	 * However there are two channels and destruct is not completed yet.
1343 	 */
1344 	poll_threads();
1345 
1346 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1347 
1348 	set_thread(0);
1349 
1350 	spdk_put_io_channel(ch1);
1351 
1352 	set_thread(1);
1353 
1354 	spdk_put_io_channel(ch2);
1355 
1356 	poll_threads();
1357 
1358 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1359 }
1360 
1361 static void
1362 test_failover_ctrlr(void)
1363 {
1364 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1365 	struct spdk_nvme_ctrlr ctrlr = {};
1366 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1367 	struct nvme_ctrlr_trid *curr_trid, *next_trid;
1368 	struct spdk_io_channel *ch1, *ch2;
1369 	int rc;
1370 
1371 	ut_init_trid(&trid1);
1372 	ut_init_trid2(&trid2);
1373 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1374 
1375 	set_thread(0);
1376 
1377 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
1378 	CU_ASSERT(rc == 0);
1379 
1380 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1381 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1382 
1383 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1384 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1385 
1386 	set_thread(1);
1387 
1388 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1389 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1390 
1391 	/* First, test one trid case. */
1392 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1393 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1394 
1395 	/* Failover starts from thread 1. */
1396 	set_thread(1);
1397 
1398 	/* Case 1: ctrlr is already being destructed. */
1399 	nvme_ctrlr->destruct = true;
1400 
1401 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1402 	CU_ASSERT(rc == -ENXIO);
1403 	CU_ASSERT(curr_trid->is_failed == false);
1404 
1405 	/* Case 2: reset is in progress. */
1406 	nvme_ctrlr->destruct = false;
1407 	nvme_ctrlr->resetting = true;
1408 
1409 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1410 	CU_ASSERT(rc == 0);
1411 
1412 	/* Case 3: failover is in progress. */
1413 	nvme_ctrlr->failover_in_progress = true;
1414 
1415 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1416 	CU_ASSERT(rc == 0);
1417 	CU_ASSERT(curr_trid->is_failed == false);
1418 
1419 	/* Case 4: reset completes successfully. */
1420 	nvme_ctrlr->resetting = false;
1421 	nvme_ctrlr->failover_in_progress = false;
1422 
1423 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1424 	CU_ASSERT(rc == 0);
1425 
1426 	CU_ASSERT(nvme_ctrlr->resetting == true);
1427 	CU_ASSERT(curr_trid->is_failed == true);
1428 
1429 	poll_threads();
1430 
1431 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1432 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1433 
1434 	CU_ASSERT(nvme_ctrlr->resetting == false);
1435 	CU_ASSERT(curr_trid->is_failed == false);
1436 
1437 	set_thread(0);
1438 
1439 	/* Second, test two trids case. */
1440 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1441 	CU_ASSERT(rc == 0);
1442 
1443 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1444 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1445 	CU_ASSERT(&curr_trid->trid == nvme_ctrlr->connected_trid);
1446 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1447 
1448 	/* Failover starts from thread 1. */
1449 	set_thread(1);
1450 
1451 	/* Case 5: reset is in progress. */
1452 	nvme_ctrlr->resetting = true;
1453 
1454 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1455 	CU_ASSERT(rc == -EBUSY);
1456 
1457 	/* Case 5: failover is in progress. */
1458 	nvme_ctrlr->failover_in_progress = true;
1459 
1460 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1461 	CU_ASSERT(rc == 0);
1462 
1463 	/* Case 6: failover completes successfully. */
1464 	nvme_ctrlr->resetting = false;
1465 	nvme_ctrlr->failover_in_progress = false;
1466 
1467 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1468 	CU_ASSERT(rc == 0);
1469 
1470 	CU_ASSERT(nvme_ctrlr->resetting == true);
1471 	CU_ASSERT(nvme_ctrlr->failover_in_progress == true);
1472 
1473 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1474 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1475 	CU_ASSERT(next_trid != curr_trid);
1476 	CU_ASSERT(&next_trid->trid == nvme_ctrlr->connected_trid);
1477 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1478 
1479 	poll_threads();
1480 
1481 	CU_ASSERT(nvme_ctrlr->resetting == false);
1482 	CU_ASSERT(nvme_ctrlr->failover_in_progress == false);
1483 
1484 	spdk_put_io_channel(ch2);
1485 
1486 	set_thread(0);
1487 
1488 	spdk_put_io_channel(ch1);
1489 
1490 	poll_threads();
1491 
1492 	rc = bdev_nvme_delete("nvme0", NULL);
1493 	CU_ASSERT(rc == 0);
1494 
1495 	poll_threads();
1496 
1497 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1498 }
1499 
1500 static void
1501 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1502 {
1503 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1504 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1505 }
1506 
1507 static void
1508 test_pending_reset(void)
1509 {
1510 	struct spdk_nvme_transport_id trid = {};
1511 	struct spdk_nvme_ctrlr *ctrlr;
1512 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1513 	const int STRING_SIZE = 32;
1514 	const char *attached_names[STRING_SIZE];
1515 	struct nvme_bdev *bdev;
1516 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1517 	struct spdk_io_channel *ch1, *ch2;
1518 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1519 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1520 	int rc;
1521 
1522 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1523 	ut_init_trid(&trid);
1524 
1525 	set_thread(0);
1526 
1527 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
1528 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1529 
1530 	g_ut_attach_ctrlr_status = 0;
1531 	g_ut_attach_bdev_count = 1;
1532 
1533 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1534 			      attach_ctrlr_done, NULL, NULL);
1535 	CU_ASSERT(rc == 0);
1536 
1537 	spdk_delay_us(1000);
1538 	poll_threads();
1539 
1540 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1541 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1542 
1543 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1544 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1545 
1546 	ch1 = spdk_get_io_channel(bdev);
1547 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1548 
1549 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1550 	ctrlr_ch1 = nbdev_ch1->ctrlr_ch;
1551 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1552 
1553 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1554 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1555 
1556 	set_thread(1);
1557 
1558 	ch2 = spdk_get_io_channel(bdev);
1559 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1560 
1561 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1562 	ctrlr_ch2 = nbdev_ch2->ctrlr_ch;
1563 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1564 
1565 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1566 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1567 
1568 	/* The first reset request is submitted on thread 1, and the second reset request
1569 	 * is submitted on thread 0 while processing the first request.
1570 	 */
1571 	bdev_nvme_submit_request(ch2, first_bdev_io);
1572 	CU_ASSERT(nvme_ctrlr->resetting == true);
1573 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1574 
1575 	set_thread(0);
1576 
1577 	bdev_nvme_submit_request(ch1, second_bdev_io);
1578 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1579 
1580 	poll_threads();
1581 
1582 	CU_ASSERT(nvme_ctrlr->resetting == false);
1583 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1584 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1585 
1586 	/* The first reset request is submitted on thread 1, and the second reset request
1587 	 * is submitted on thread 0 while processing the first request.
1588 	 *
1589 	 * The difference from the above scenario is that the controller is removed while
1590 	 * processing the first request. Hence both reset requests should fail.
1591 	 */
1592 	set_thread(1);
1593 
1594 	bdev_nvme_submit_request(ch2, first_bdev_io);
1595 	CU_ASSERT(nvme_ctrlr->resetting == true);
1596 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1597 
1598 	set_thread(0);
1599 
1600 	bdev_nvme_submit_request(ch1, second_bdev_io);
1601 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1602 
1603 	ctrlr->fail_reset = true;
1604 
1605 	poll_threads();
1606 
1607 	CU_ASSERT(nvme_ctrlr->resetting == false);
1608 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1609 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1610 
1611 	spdk_put_io_channel(ch1);
1612 
1613 	set_thread(1);
1614 
1615 	spdk_put_io_channel(ch2);
1616 
1617 	poll_threads();
1618 
1619 	set_thread(0);
1620 
1621 	rc = bdev_nvme_delete("nvme0", NULL);
1622 	CU_ASSERT(rc == 0);
1623 
1624 	poll_threads();
1625 
1626 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1627 
1628 	free(first_bdev_io);
1629 	free(second_bdev_io);
1630 }
1631 
1632 static void
1633 test_attach_ctrlr(void)
1634 {
1635 	struct spdk_nvme_transport_id trid = {};
1636 	struct spdk_nvme_ctrlr *ctrlr;
1637 	struct nvme_ctrlr *nvme_ctrlr;
1638 	const int STRING_SIZE = 32;
1639 	const char *attached_names[STRING_SIZE];
1640 	struct nvme_bdev *nbdev;
1641 	int rc;
1642 
1643 	set_thread(0);
1644 
1645 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1646 	ut_init_trid(&trid);
1647 
1648 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
1649 	 * by probe polling.
1650 	 */
1651 	ctrlr = ut_attach_ctrlr(&trid, 0, false);
1652 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1653 
1654 	ctrlr->is_failed = true;
1655 	g_ut_attach_ctrlr_status = -EIO;
1656 	g_ut_attach_bdev_count = 0;
1657 
1658 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1659 			      attach_ctrlr_done, NULL, NULL);
1660 	CU_ASSERT(rc == 0);
1661 
1662 	spdk_delay_us(1000);
1663 	poll_threads();
1664 
1665 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1666 
1667 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
1668 	ctrlr = ut_attach_ctrlr(&trid, 0, false);
1669 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1670 
1671 	g_ut_attach_ctrlr_status = 0;
1672 
1673 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1674 			      attach_ctrlr_done, NULL, NULL);
1675 	CU_ASSERT(rc == 0);
1676 
1677 	spdk_delay_us(1000);
1678 	poll_threads();
1679 
1680 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1681 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1682 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1683 	CU_ASSERT(nvme_ctrlr->num_ns == 0);
1684 
1685 	rc = bdev_nvme_delete("nvme0", NULL);
1686 	CU_ASSERT(rc == 0);
1687 
1688 	poll_threads();
1689 
1690 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1691 
1692 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
1693 	 * one nvme_bdev is created.
1694 	 */
1695 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
1696 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1697 
1698 	g_ut_attach_bdev_count = 1;
1699 
1700 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1701 			      attach_ctrlr_done, NULL, NULL);
1702 	CU_ASSERT(rc == 0);
1703 
1704 	spdk_delay_us(1000);
1705 	poll_threads();
1706 
1707 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1708 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1709 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1710 	CU_ASSERT(nvme_ctrlr->num_ns == 1);
1711 
1712 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
1713 	attached_names[0] = NULL;
1714 
1715 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1716 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
1717 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
1718 
1719 	rc = bdev_nvme_delete("nvme0", NULL);
1720 	CU_ASSERT(rc == 0);
1721 
1722 	poll_threads();
1723 
1724 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1725 
1726 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
1727 	 * created because creating one nvme_bdev failed.
1728 	 */
1729 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
1730 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1731 
1732 	g_ut_register_bdev_status = -EINVAL;
1733 	g_ut_attach_bdev_count = 0;
1734 
1735 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1736 			      attach_ctrlr_done, NULL, NULL);
1737 	CU_ASSERT(rc == 0);
1738 
1739 	spdk_delay_us(1000);
1740 	poll_threads();
1741 
1742 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1743 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1744 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1745 	CU_ASSERT(nvme_ctrlr->num_ns == 1);
1746 
1747 	CU_ASSERT(attached_names[0] == NULL);
1748 
1749 	rc = bdev_nvme_delete("nvme0", NULL);
1750 	CU_ASSERT(rc == 0);
1751 
1752 	poll_threads();
1753 
1754 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1755 
1756 	g_ut_register_bdev_status = 0;
1757 }
1758 
1759 static void
1760 test_reconnect_qpair(void)
1761 {
1762 	struct spdk_nvme_transport_id trid = {};
1763 	struct spdk_nvme_ctrlr ctrlr = {};
1764 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1765 	struct spdk_io_channel *ch;
1766 	struct nvme_ctrlr_channel *ctrlr_ch;
1767 	int rc;
1768 
1769 	set_thread(0);
1770 
1771 	ut_init_trid(&trid);
1772 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1773 
1774 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1775 	CU_ASSERT(rc == 0);
1776 
1777 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1778 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1779 
1780 	ch = spdk_get_io_channel(nvme_ctrlr);
1781 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1782 
1783 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
1784 	CU_ASSERT(ctrlr_ch->qpair != NULL);
1785 	CU_ASSERT(ctrlr_ch->group != NULL);
1786 	CU_ASSERT(ctrlr_ch->group->group != NULL);
1787 	CU_ASSERT(ctrlr_ch->group->poller != NULL);
1788 
1789 	/* Test if the disconnected qpair is reconnected. */
1790 	ctrlr_ch->qpair->is_connected = false;
1791 
1792 	poll_threads();
1793 
1794 	CU_ASSERT(ctrlr_ch->qpair->is_connected == true);
1795 
1796 	/* If the ctrlr is failed, reconnecting qpair should fail too. */
1797 	ctrlr_ch->qpair->is_connected = false;
1798 	ctrlr.is_failed = true;
1799 
1800 	poll_threads();
1801 
1802 	CU_ASSERT(ctrlr_ch->qpair->is_connected == false);
1803 
1804 	spdk_put_io_channel(ch);
1805 
1806 	poll_threads();
1807 
1808 	rc = bdev_nvme_delete("nvme0", NULL);
1809 	CU_ASSERT(rc == 0);
1810 
1811 	poll_threads();
1812 
1813 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1814 }
1815 
1816 static void
1817 test_aer_cb(void)
1818 {
1819 	struct spdk_nvme_transport_id trid = {};
1820 	struct spdk_nvme_ctrlr *ctrlr;
1821 	struct nvme_ctrlr *nvme_ctrlr;
1822 	struct nvme_bdev *bdev;
1823 	const int STRING_SIZE = 32;
1824 	const char *attached_names[STRING_SIZE];
1825 	union spdk_nvme_async_event_completion event = {};
1826 	struct spdk_nvme_cpl cpl = {};
1827 	int rc;
1828 
1829 	set_thread(0);
1830 
1831 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1832 	ut_init_trid(&trid);
1833 
1834 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
1835 	 * namespaces are populated.
1836 	 */
1837 	ctrlr = ut_attach_ctrlr(&trid, 4, true);
1838 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1839 
1840 	ctrlr->ns[0].is_active = false;
1841 
1842 	g_ut_attach_ctrlr_status = 0;
1843 	g_ut_attach_bdev_count = 3;
1844 
1845 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1846 			      attach_ctrlr_done, NULL, NULL);
1847 	CU_ASSERT(rc == 0);
1848 
1849 	spdk_delay_us(1000);
1850 	poll_threads();
1851 
1852 	spdk_delay_us(10000);
1853 	poll_threads();
1854 
1855 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1856 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1857 
1858 	CU_ASSERT(nvme_ctrlr->num_ns == 4);
1859 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
1860 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
1861 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
1862 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
1863 
1864 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
1865 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1866 	CU_ASSERT(bdev->disk.blockcnt == 1024);
1867 
1868 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
1869 	 * change the size of the 4th namespace.
1870 	 */
1871 	ctrlr->ns[0].is_active = true;
1872 	ctrlr->ns[2].is_active = false;
1873 	ctrlr->nsdata[3].nsze = 2048;
1874 
1875 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1876 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
1877 	cpl.cdw0 = event.raw;
1878 
1879 	aer_cb(nvme_ctrlr, &cpl);
1880 
1881 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
1882 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
1883 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
1884 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
1885 	CU_ASSERT(bdev->disk.blockcnt == 2048);
1886 
1887 	/* Change ANA state of active namespaces. */
1888 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
1889 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
1890 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
1891 
1892 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1893 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
1894 	cpl.cdw0 = event.raw;
1895 
1896 	aer_cb(nvme_ctrlr, &cpl);
1897 
1898 	spdk_delay_us(10000);
1899 	poll_threads();
1900 
1901 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
1902 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
1903 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
1904 
1905 	rc = bdev_nvme_delete("nvme0", NULL);
1906 	CU_ASSERT(rc == 0);
1907 
1908 	poll_threads();
1909 
1910 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1911 }
1912 
1913 static void
1914 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1915 			enum spdk_bdev_io_type io_type)
1916 {
1917 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1918 	struct spdk_nvme_ns *ns = NULL;
1919 	struct spdk_nvme_qpair *qpair = NULL;
1920 
1921 	CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
1922 
1923 	bdev_io->type = io_type;
1924 	bdev_io->internal.in_submit_request = true;
1925 
1926 	bdev_nvme_submit_request(ch, bdev_io);
1927 
1928 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1929 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
1930 
1931 	poll_threads();
1932 
1933 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1934 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1935 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1936 }
1937 
1938 static void
1939 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1940 		   enum spdk_bdev_io_type io_type)
1941 {
1942 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1943 	struct spdk_nvme_ns *ns = NULL;
1944 	struct spdk_nvme_qpair *qpair = NULL;
1945 
1946 	CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
1947 
1948 	bdev_io->type = io_type;
1949 	bdev_io->internal.in_submit_request = true;
1950 
1951 	bdev_nvme_submit_request(ch, bdev_io);
1952 
1953 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1954 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1955 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1956 }
1957 
1958 static void
1959 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
1960 {
1961 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1962 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
1963 	struct ut_nvme_req *req;
1964 	struct spdk_nvme_ns *ns = NULL;
1965 	struct spdk_nvme_qpair *qpair = NULL;
1966 
1967 	CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
1968 
1969 	/* Only compare and write now. */
1970 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
1971 	bdev_io->internal.in_submit_request = true;
1972 
1973 	bdev_nvme_submit_request(ch, bdev_io);
1974 
1975 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1976 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
1977 	CU_ASSERT(bio->first_fused_submitted == true);
1978 
1979 	/* First outstanding request is compare operation. */
1980 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
1981 	SPDK_CU_ASSERT_FATAL(req != NULL);
1982 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
1983 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
1984 
1985 	poll_threads();
1986 
1987 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1988 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1989 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1990 }
1991 
1992 static void
1993 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1994 			 struct spdk_nvme_ctrlr *ctrlr)
1995 {
1996 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
1997 	bdev_io->internal.in_submit_request = true;
1998 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1999 
2000 	bdev_nvme_submit_request(ch, bdev_io);
2001 
2002 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2003 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2004 
2005 	spdk_delay_us(10000);
2006 	poll_thread_times(1, 1);
2007 
2008 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2009 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2010 
2011 	poll_thread_times(0, 1);
2012 
2013 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2014 }
2015 
2016 static void
2017 test_submit_nvme_cmd(void)
2018 {
2019 	struct spdk_nvme_transport_id trid = {};
2020 	struct spdk_nvme_ctrlr *ctrlr;
2021 	struct nvme_ctrlr *nvme_ctrlr;
2022 	const int STRING_SIZE = 32;
2023 	const char *attached_names[STRING_SIZE];
2024 	struct nvme_bdev *bdev;
2025 	struct spdk_bdev_io *bdev_io;
2026 	struct spdk_io_channel *ch;
2027 	struct spdk_bdev_ext_io_opts ext_io_opts = {};
2028 	int rc;
2029 
2030 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2031 	ut_init_trid(&trid);
2032 
2033 	set_thread(1);
2034 
2035 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
2036 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2037 
2038 	g_ut_attach_ctrlr_status = 0;
2039 	g_ut_attach_bdev_count = 1;
2040 
2041 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2042 			      attach_ctrlr_done, NULL, NULL);
2043 	CU_ASSERT(rc == 0);
2044 
2045 	spdk_delay_us(1000);
2046 	poll_threads();
2047 
2048 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2049 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2050 
2051 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2052 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2053 
2054 	set_thread(0);
2055 
2056 	ch = spdk_get_io_channel(bdev);
2057 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2058 
2059 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2060 
2061 	bdev_io->u.bdev.iovs = NULL;
2062 
2063 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2064 
2065 	ut_bdev_io_set_buf(bdev_io);
2066 
2067 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2068 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2069 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2070 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2071 
2072 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2073 
2074 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2075 
2076 	/* Verify that ext NVME API is called if bdev_io ext_opts is set */
2077 	bdev_io->internal.ext_opts = &ext_io_opts;
2078 	g_ut_readv_ext_called = false;
2079 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2080 	CU_ASSERT(g_ut_readv_ext_called == true);
2081 	g_ut_readv_ext_called = false;
2082 
2083 	g_ut_writev_ext_called = false;
2084 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2085 	CU_ASSERT(g_ut_writev_ext_called == true);
2086 	g_ut_writev_ext_called = false;
2087 	bdev_io->internal.ext_opts = NULL;
2088 
2089 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2090 
2091 	free(bdev_io);
2092 
2093 	spdk_put_io_channel(ch);
2094 
2095 	poll_threads();
2096 
2097 	set_thread(1);
2098 
2099 	rc = bdev_nvme_delete("nvme0", NULL);
2100 	CU_ASSERT(rc == 0);
2101 
2102 	poll_threads();
2103 
2104 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2105 }
2106 
2107 static void
2108 test_add_remove_trid(void)
2109 {
2110 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
2111 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2112 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2113 	const int STRING_SIZE = 32;
2114 	const char *attached_names[STRING_SIZE];
2115 	struct nvme_ctrlr_trid *ctrid;
2116 	int rc;
2117 
2118 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2119 	ut_init_trid(&trid1);
2120 	ut_init_trid2(&trid2);
2121 	ut_init_trid3(&trid3);
2122 
2123 	set_thread(0);
2124 
2125 	g_ut_attach_ctrlr_status = 0;
2126 	g_ut_attach_bdev_count = 0;
2127 
2128 	ctrlr1 = ut_attach_ctrlr(&trid1, 0, false);
2129 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2130 
2131 	rc = bdev_nvme_create(&trid1, "nvme0", attached_names, STRING_SIZE, 0,
2132 			      attach_ctrlr_done, NULL, NULL);
2133 	CU_ASSERT(rc == 0);
2134 
2135 	spdk_delay_us(1000);
2136 	poll_threads();
2137 
2138 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2139 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2140 
2141 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2142 
2143 	ctrlr2 = ut_attach_ctrlr(&trid2, 0, false);
2144 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2145 
2146 	rc = bdev_nvme_create(&trid2, "nvme0", attached_names, STRING_SIZE, 0,
2147 			      attach_ctrlr_done, NULL, NULL);
2148 	CU_ASSERT(rc == 0);
2149 
2150 	spdk_delay_us(1000);
2151 	poll_threads();
2152 
2153 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2154 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2155 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) == 0) {
2156 			break;
2157 		}
2158 	}
2159 	CU_ASSERT(ctrid != NULL);
2160 
2161 	/* trid3 is not in the registered list. */
2162 	rc = bdev_nvme_delete("nvme0", &trid3);
2163 	CU_ASSERT(rc == -ENXIO);
2164 
2165 	/* trid2 is not used, and simply removed. */
2166 	rc = bdev_nvme_delete("nvme0", &trid2);
2167 	CU_ASSERT(rc == 0);
2168 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2169 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2170 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) != 0);
2171 	}
2172 
2173 	ctrlr3 = ut_attach_ctrlr(&trid3, 0, false);
2174 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2175 
2176 	rc = bdev_nvme_create(&trid3, "nvme0", attached_names, STRING_SIZE, 0,
2177 			      attach_ctrlr_done, NULL, NULL);
2178 	CU_ASSERT(rc == 0);
2179 
2180 	spdk_delay_us(1000);
2181 	poll_threads();
2182 
2183 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2184 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2185 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &trid3) == 0) {
2186 			break;
2187 		}
2188 	}
2189 	CU_ASSERT(ctrid != NULL);
2190 
2191 	/* trid1 is currently used and trid3 is an alternative path.
2192 	 * If we remove trid1, path is changed to trid3.
2193 	 */
2194 	rc = bdev_nvme_delete("nvme0", &trid1);
2195 	CU_ASSERT(rc == 0);
2196 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2197 	CU_ASSERT(nvme_ctrlr->resetting == true);
2198 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2199 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid1) != 0);
2200 	}
2201 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid3) == 0);
2202 
2203 	poll_threads();
2204 
2205 	CU_ASSERT(nvme_ctrlr->resetting == false);
2206 
2207 	/* trid3 is the current and only path. If we remove trid3, the corresponding
2208 	 * nvme_ctrlr is removed.
2209 	 */
2210 	rc = bdev_nvme_delete("nvme0", &trid3);
2211 	CU_ASSERT(rc == 0);
2212 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2213 
2214 	poll_threads();
2215 
2216 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2217 
2218 	ctrlr1 = ut_attach_ctrlr(&trid1, 0, false);
2219 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2220 
2221 	rc = bdev_nvme_create(&trid1, "nvme0", attached_names, STRING_SIZE, 0,
2222 			      attach_ctrlr_done, NULL, NULL);
2223 	CU_ASSERT(rc == 0);
2224 
2225 	spdk_delay_us(1000);
2226 	poll_threads();
2227 
2228 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2229 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2230 
2231 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2232 
2233 	ctrlr2 = ut_attach_ctrlr(&trid2, 0, false);
2234 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2235 
2236 	rc = bdev_nvme_create(&trid2, "nvme0", attached_names, STRING_SIZE, 0,
2237 			      attach_ctrlr_done, NULL, NULL);
2238 	CU_ASSERT(rc == 0);
2239 
2240 	spdk_delay_us(1000);
2241 	poll_threads();
2242 
2243 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
2244 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2245 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) == 0) {
2246 			break;
2247 		}
2248 	}
2249 	CU_ASSERT(ctrid != NULL);
2250 
2251 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2252 	rc = bdev_nvme_delete("nvme0", NULL);
2253 	CU_ASSERT(rc == 0);
2254 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2255 
2256 	poll_threads();
2257 
2258 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2259 }
2260 
2261 static void
2262 test_abort(void)
2263 {
2264 	struct spdk_nvme_transport_id trid = {};
2265 	struct spdk_nvme_ctrlr *ctrlr;
2266 	struct nvme_ctrlr *nvme_ctrlr;
2267 	const int STRING_SIZE = 32;
2268 	const char *attached_names[STRING_SIZE];
2269 	struct nvme_bdev *bdev;
2270 	struct spdk_bdev_io *write_io, *admin_io, *abort_io;
2271 	struct spdk_io_channel *ch1, *ch2;
2272 	struct nvme_bdev_channel *nbdev_ch1;
2273 	struct nvme_ctrlr_channel *ctrlr_ch1;
2274 	int rc;
2275 
2276 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2277 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2278 	 * are submitted on thread 1. Both should succeed.
2279 	 */
2280 
2281 	ut_init_trid(&trid);
2282 
2283 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
2284 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2285 
2286 	g_ut_attach_ctrlr_status = 0;
2287 	g_ut_attach_bdev_count = 1;
2288 
2289 	set_thread(1);
2290 
2291 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2292 			      attach_ctrlr_done, NULL, NULL);
2293 	CU_ASSERT(rc == 0);
2294 
2295 	spdk_delay_us(1000);
2296 	poll_threads();
2297 
2298 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2299 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2300 
2301 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2302 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2303 
2304 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2305 	ut_bdev_io_set_buf(write_io);
2306 
2307 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2308 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2309 
2310 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2311 
2312 	set_thread(0);
2313 
2314 	ch1 = spdk_get_io_channel(bdev);
2315 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2316 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2317 	ctrlr_ch1 = nbdev_ch1->ctrlr_ch;
2318 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
2319 
2320 	set_thread(1);
2321 
2322 	ch2 = spdk_get_io_channel(bdev);
2323 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2324 
2325 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2326 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2327 
2328 	/* Aborting the already completed request should fail. */
2329 	write_io->internal.in_submit_request = true;
2330 	bdev_nvme_submit_request(ch1, write_io);
2331 	poll_threads();
2332 
2333 	CU_ASSERT(write_io->internal.in_submit_request == false);
2334 
2335 	abort_io->u.abort.bio_to_abort = write_io;
2336 	abort_io->internal.in_submit_request = true;
2337 
2338 	bdev_nvme_submit_request(ch1, abort_io);
2339 
2340 	poll_threads();
2341 
2342 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2343 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2344 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2345 
2346 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2347 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2348 
2349 	admin_io->internal.in_submit_request = true;
2350 	bdev_nvme_submit_request(ch1, admin_io);
2351 	spdk_delay_us(10000);
2352 	poll_threads();
2353 
2354 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2355 
2356 	abort_io->u.abort.bio_to_abort = admin_io;
2357 	abort_io->internal.in_submit_request = true;
2358 
2359 	bdev_nvme_submit_request(ch2, abort_io);
2360 
2361 	poll_threads();
2362 
2363 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2364 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2365 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2366 
2367 	/* Aborting the write request should succeed. */
2368 	write_io->internal.in_submit_request = true;
2369 	bdev_nvme_submit_request(ch1, write_io);
2370 
2371 	CU_ASSERT(write_io->internal.in_submit_request == true);
2372 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
2373 
2374 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2375 	abort_io->u.abort.bio_to_abort = write_io;
2376 	abort_io->internal.in_submit_request = true;
2377 
2378 	bdev_nvme_submit_request(ch1, abort_io);
2379 
2380 	spdk_delay_us(10000);
2381 	poll_threads();
2382 
2383 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2384 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2385 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2386 	CU_ASSERT(write_io->internal.in_submit_request == false);
2387 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2388 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
2389 
2390 	/* Aborting the admin request should succeed. */
2391 	admin_io->internal.in_submit_request = true;
2392 	bdev_nvme_submit_request(ch1, admin_io);
2393 
2394 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2395 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2396 
2397 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2398 	abort_io->u.abort.bio_to_abort = admin_io;
2399 	abort_io->internal.in_submit_request = true;
2400 
2401 	bdev_nvme_submit_request(ch2, abort_io);
2402 
2403 	spdk_delay_us(10000);
2404 	poll_threads();
2405 
2406 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2407 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2408 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2409 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2410 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2411 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2412 
2413 	set_thread(0);
2414 
2415 	spdk_put_io_channel(ch1);
2416 
2417 	set_thread(1);
2418 
2419 	spdk_put_io_channel(ch2);
2420 
2421 	poll_threads();
2422 
2423 	free(write_io);
2424 	free(admin_io);
2425 	free(abort_io);
2426 
2427 	set_thread(1);
2428 
2429 	rc = bdev_nvme_delete("nvme0", NULL);
2430 	CU_ASSERT(rc == 0);
2431 
2432 	poll_threads();
2433 
2434 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2435 }
2436 
2437 static void
2438 test_get_io_qpair(void)
2439 {
2440 	struct spdk_nvme_transport_id trid = {};
2441 	struct spdk_nvme_ctrlr ctrlr = {};
2442 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2443 	struct spdk_io_channel *ch;
2444 	struct nvme_ctrlr_channel *ctrlr_ch;
2445 	struct spdk_nvme_qpair *qpair;
2446 	int rc;
2447 
2448 	ut_init_trid(&trid);
2449 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2450 
2451 	set_thread(0);
2452 
2453 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
2454 	CU_ASSERT(rc == 0);
2455 
2456 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2457 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2458 
2459 	ch = spdk_get_io_channel(nvme_ctrlr);
2460 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2461 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2462 	CU_ASSERT(ctrlr_ch->qpair != NULL);
2463 
2464 	qpair = bdev_nvme_get_io_qpair(ch);
2465 	CU_ASSERT(qpair == ctrlr_ch->qpair);
2466 
2467 	spdk_put_io_channel(ch);
2468 
2469 	rc = bdev_nvme_delete("nvme0", NULL);
2470 	CU_ASSERT(rc == 0);
2471 
2472 	poll_threads();
2473 
2474 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2475 }
2476 
2477 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2478  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2479  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2480  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2481  */
2482 static void
2483 test_bdev_unregister(void)
2484 {
2485 	struct spdk_nvme_transport_id trid = {};
2486 	struct spdk_nvme_ctrlr *ctrlr;
2487 	struct nvme_ctrlr *nvme_ctrlr;
2488 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2489 	const int STRING_SIZE = 32;
2490 	const char *attached_names[STRING_SIZE];
2491 	struct nvme_bdev *bdev1, *bdev2;
2492 	int rc;
2493 
2494 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2495 	ut_init_trid(&trid);
2496 
2497 	ctrlr = ut_attach_ctrlr(&trid, 2, false);
2498 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2499 
2500 	g_ut_attach_ctrlr_status = 0;
2501 	g_ut_attach_bdev_count = 2;
2502 
2503 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2504 			      attach_ctrlr_done, NULL, NULL);
2505 	CU_ASSERT(rc == 0);
2506 
2507 	spdk_delay_us(1000);
2508 	poll_threads();
2509 
2510 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2511 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2512 
2513 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
2514 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2515 
2516 	bdev1 = nvme_ns1->bdev;
2517 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2518 
2519 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
2520 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2521 
2522 	bdev2 = nvme_ns2->bdev;
2523 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2524 
2525 	bdev_nvme_destruct(&bdev1->disk);
2526 	bdev_nvme_destruct(&bdev2->disk);
2527 
2528 	poll_threads();
2529 
2530 	CU_ASSERT(nvme_ns1->bdev == NULL);
2531 	CU_ASSERT(nvme_ns2->bdev == NULL);
2532 
2533 	nvme_ctrlr->destruct = true;
2534 	_nvme_ctrlr_destruct(nvme_ctrlr);
2535 
2536 	poll_threads();
2537 
2538 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2539 }
2540 
2541 static void
2542 test_compare_ns(void)
2543 {
2544 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2545 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2546 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2547 
2548 	/* No IDs are defined. */
2549 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2550 
2551 	/* Only EUI64 are defined and not matched. */
2552 	nsdata1.eui64 = 0xABCDEF0123456789;
2553 	nsdata2.eui64 = 0xBBCDEF0123456789;
2554 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2555 
2556 	/* Only EUI64 are defined and matched. */
2557 	nsdata2.eui64 = 0xABCDEF0123456789;
2558 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2559 
2560 	/* Only NGUID are defined and not matched. */
2561 	nsdata1.eui64 = 0x0;
2562 	nsdata2.eui64 = 0x0;
2563 	nsdata1.nguid[0] = 0x12;
2564 	nsdata2.nguid[0] = 0x10;
2565 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2566 
2567 	/* Only NGUID are defined and matched. */
2568 	nsdata2.nguid[0] = 0x12;
2569 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2570 
2571 	/* Only UUID are defined and not matched. */
2572 	nsdata1.nguid[0] = 0x0;
2573 	nsdata2.nguid[0] = 0x0;
2574 	ns1.uuid.u.raw[0] = 0xAA;
2575 	ns2.uuid.u.raw[0] = 0xAB;
2576 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2577 
2578 	/* Only UUID are defined and matched. */
2579 	ns1.uuid.u.raw[0] = 0xAB;
2580 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2581 
2582 	/* All EUI64, NGUID, and UUID are defined and matched. */
2583 	nsdata1.eui64 = 0x123456789ABCDEF;
2584 	nsdata2.eui64 = 0x123456789ABCDEF;
2585 	nsdata1.nguid[15] = 0x34;
2586 	nsdata2.nguid[15] = 0x34;
2587 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2588 }
2589 
2590 static void
2591 test_init_ana_log_page(void)
2592 {
2593 	struct spdk_nvme_transport_id trid = {};
2594 	struct spdk_nvme_ctrlr *ctrlr;
2595 	struct nvme_ctrlr *nvme_ctrlr;
2596 	const int STRING_SIZE = 32;
2597 	const char *attached_names[STRING_SIZE];
2598 	int rc;
2599 
2600 	set_thread(0);
2601 
2602 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2603 	ut_init_trid(&trid);
2604 
2605 	ctrlr = ut_attach_ctrlr(&trid, 5, true);
2606 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2607 
2608 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2609 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2610 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2611 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
2612 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2613 
2614 	g_ut_attach_ctrlr_status = 0;
2615 	g_ut_attach_bdev_count = 5;
2616 
2617 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2618 			      attach_ctrlr_done, NULL, NULL);
2619 	CU_ASSERT(rc == 0);
2620 
2621 	spdk_delay_us(1000);
2622 	poll_threads();
2623 
2624 	spdk_delay_us(10000);
2625 	poll_threads();
2626 
2627 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2628 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2629 
2630 	CU_ASSERT(nvme_ctrlr->num_ns == 5);
2631 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2632 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2633 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2634 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2635 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
2636 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
2637 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2638 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2639 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
2640 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2641 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
2642 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
2643 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
2644 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
2645 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
2646 
2647 	rc = bdev_nvme_delete("nvme0", NULL);
2648 	CU_ASSERT(rc == 0);
2649 
2650 	poll_threads();
2651 
2652 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2653 }
2654 
2655 static void
2656 init_accel(void)
2657 {
2658 	spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb,
2659 				sizeof(int), "accel_p");
2660 }
2661 
2662 static void
2663 fini_accel(void)
2664 {
2665 	spdk_io_device_unregister(g_accel_p, NULL);
2666 }
2667 
2668 static void
2669 test_get_memory_domains(void)
2670 {
2671 	struct nvme_ctrlr ctrlr = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
2672 	struct nvme_ns ns = { .ctrlr = &ctrlr };
2673 	struct nvme_bdev nbdev = { .nvme_ns = &ns };
2674 	struct spdk_memory_domain *domain = (struct spdk_memory_domain *) 0xf00df00d;
2675 	struct spdk_memory_domain *domains[2] = {};
2676 	int rc = 0;
2677 
2678 	/* nvme controller doesn't have a memory domain */
2679 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, NULL);
2680 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2681 	CU_ASSERT(rc == 0)
2682 
2683 	/* nvme controller has a memory domain but array size is insufficient */
2684 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, domain);
2685 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
2686 	CU_ASSERT(rc == 1);
2687 
2688 	/* nvme controller has a memory domain but domains array is NULL */
2689 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, domain);
2690 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
2691 	CU_ASSERT(rc == 1);
2692 
2693 	/* nvme controller has a memory domain */
2694 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, domain);
2695 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1);
2696 	CU_ASSERT(rc == 1);
2697 	CU_ASSERT(domains[0] == domain);
2698 
2699 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domain);
2700 }
2701 
2702 int
2703 main(int argc, const char **argv)
2704 {
2705 	CU_pSuite	suite = NULL;
2706 	unsigned int	num_failures;
2707 
2708 	CU_set_error_action(CUEA_ABORT);
2709 	CU_initialize_registry();
2710 
2711 	suite = CU_add_suite("nvme", NULL, NULL);
2712 
2713 	CU_ADD_TEST(suite, test_create_ctrlr);
2714 	CU_ADD_TEST(suite, test_reset_ctrlr);
2715 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
2716 	CU_ADD_TEST(suite, test_failover_ctrlr);
2717 	CU_ADD_TEST(suite, test_pending_reset);
2718 	CU_ADD_TEST(suite, test_attach_ctrlr);
2719 	CU_ADD_TEST(suite, test_reconnect_qpair);
2720 	CU_ADD_TEST(suite, test_aer_cb);
2721 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
2722 	CU_ADD_TEST(suite, test_add_remove_trid);
2723 	CU_ADD_TEST(suite, test_abort);
2724 	CU_ADD_TEST(suite, test_get_io_qpair);
2725 	CU_ADD_TEST(suite, test_bdev_unregister);
2726 	CU_ADD_TEST(suite, test_compare_ns);
2727 	CU_ADD_TEST(suite, test_init_ana_log_page);
2728 	CU_ADD_TEST(suite, test_get_memory_domains);
2729 
2730 	CU_basic_set_mode(CU_BRM_VERBOSE);
2731 
2732 	allocate_threads(3);
2733 	set_thread(0);
2734 	bdev_nvme_library_init();
2735 	init_accel();
2736 
2737 	CU_basic_run_tests();
2738 
2739 	set_thread(0);
2740 	bdev_nvme_library_fini();
2741 	fini_accel();
2742 	free_threads();
2743 
2744 	num_failures = CU_get_number_of_failures();
2745 	CU_cleanup_registry();
2746 
2747 	return num_failures;
2748 }
2749