xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision 5fd9561f54daa8eff7f3bcb56c789655bca846b1)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 #include "spdk_cunit.h"
37 #include "spdk/thread.h"
38 #include "spdk/bdev_module.h"
39 #include "spdk/bdev_module.h"
40 
41 #include "common/lib/ut_multithread.c"
42 
43 #include "bdev/nvme/bdev_nvme.c"
44 
45 #include "unit/lib/json_mock.c"
46 
47 static void *g_accel_p = (void *)0xdeadbeaf;
48 
49 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
50 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
51 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
52 	     spdk_nvme_remove_cb remove_cb), NULL);
53 
54 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
55 		enum spdk_nvme_transport_type trtype));
56 
57 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
58 	    NULL);
59 
60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
61 
62 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
63 		struct spdk_nvme_transport_id *trid), 0);
64 
65 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
66 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
67 
68 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
69 
70 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0);
71 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf));
72 
73 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain, int);
74 
75 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
76 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
77 
78 int spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
79 				       struct spdk_memory_domain **domains, int array_size)
80 {
81 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain);
82 
83 	return 0;
84 }
85 
86 struct spdk_io_channel *
87 spdk_accel_engine_get_io_channel(void)
88 {
89 	return spdk_get_io_channel(g_accel_p);
90 }
91 
92 void
93 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
94 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
95 {
96 	/* Avoid warning that opts is used uninitialised */
97 	memset(opts, 0, opts_size);
98 }
99 
100 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *,
101 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
102 
103 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
104 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
105 
106 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
107 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
108 
109 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
110 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
111 
112 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
113 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
114 
115 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
116 
117 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
118 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
119 
120 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
121 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
122 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
123 
124 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
125 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
126 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
127 
128 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name,
129 		size_t *size), 0);
130 
131 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
132 
133 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
134 
135 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
136 
137 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
138 
139 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
140 
141 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
142 
143 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
144 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
145 
146 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
147 
148 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
149 		char *name, size_t *size), 0);
150 
151 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
152 	    (struct spdk_nvme_ns *ns), 0);
153 
154 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
155 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
156 
157 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
158 	    (struct spdk_nvme_ns *ns), 0);
159 
160 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
161 	    (struct spdk_nvme_ns *ns), 0);
162 
163 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
164 	    (struct spdk_nvme_ns *ns), 0);
165 
166 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
167 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
168 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
169 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
170 
171 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
172 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
173 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
174 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
175 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
176 
177 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
178 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
179 	     void *payload, uint32_t payload_size, uint64_t slba,
180 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
181 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
182 
183 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
184 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
185 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
186 
187 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
188 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
189 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
190 
191 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
192 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
193 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
194 
195 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
196 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
197 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
198 
199 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
200 
201 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
202 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
203 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
204 
205 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
206 
207 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
208 
209 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
210 
211 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
212 
213 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
214 
215 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
216 		struct iovec *iov,
217 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
218 
219 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr));
220 
221 struct ut_nvme_req {
222 	uint16_t			opc;
223 	spdk_nvme_cmd_cb		cb_fn;
224 	void				*cb_arg;
225 	struct spdk_nvme_cpl		cpl;
226 	TAILQ_ENTRY(ut_nvme_req)	tailq;
227 };
228 
229 struct spdk_nvme_ns {
230 	struct spdk_nvme_ctrlr		*ctrlr;
231 	uint32_t			id;
232 	bool				is_active;
233 	struct spdk_uuid		*uuid;
234 	enum spdk_nvme_ana_state	ana_state;
235 	enum spdk_nvme_csi		csi;
236 };
237 
238 struct spdk_nvme_qpair {
239 	struct spdk_nvme_ctrlr		*ctrlr;
240 	uint8_t				failure_reason;
241 	bool				is_connected;
242 	bool				in_completion_context;
243 	bool				delete_after_completion_context;
244 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
245 	uint32_t			num_outstanding_reqs;
246 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
247 	struct spdk_nvme_poll_group	*poll_group;
248 	void				*poll_group_tailq_head;
249 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
250 };
251 
252 struct spdk_nvme_ctrlr {
253 	uint32_t			num_ns;
254 	struct spdk_nvme_ns		*ns;
255 	struct spdk_nvme_ns_data	*nsdata;
256 	struct spdk_nvme_qpair		adminq;
257 	struct spdk_nvme_ctrlr_data	cdata;
258 	bool				attached;
259 	bool				is_failed;
260 	bool				fail_reset;
261 	struct spdk_nvme_transport_id	trid;
262 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
263 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
264 	struct spdk_nvme_ctrlr_opts	opts;
265 };
266 
267 struct spdk_nvme_poll_group {
268 	void				*ctx;
269 	struct spdk_nvme_accel_fn_table	accel_fn_table;
270 	TAILQ_HEAD(, spdk_nvme_qpair)	connected_qpairs;
271 	TAILQ_HEAD(, spdk_nvme_qpair)	disconnected_qpairs;
272 };
273 
274 struct spdk_nvme_probe_ctx {
275 	struct spdk_nvme_transport_id	trid;
276 	void				*cb_ctx;
277 	spdk_nvme_attach_cb		attach_cb;
278 	struct spdk_nvme_ctrlr		*init_ctrlr;
279 };
280 
281 uint32_t
282 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
283 {
284 	uint32_t nsid;
285 
286 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
287 		if (ctrlr->ns[nsid - 1].is_active) {
288 			return nsid;
289 		}
290 	}
291 
292 	return 0;
293 }
294 
295 uint32_t
296 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
297 {
298 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
299 		if (ctrlr->ns[nsid - 1].is_active) {
300 			return nsid;
301 		}
302 	}
303 
304 	return 0;
305 }
306 
307 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
308 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
309 			g_ut_attached_ctrlrs);
310 static int g_ut_attach_ctrlr_status;
311 static size_t g_ut_attach_bdev_count;
312 static int g_ut_register_bdev_status;
313 static struct spdk_bdev *g_ut_registered_bdev;
314 static uint16_t g_ut_cntlid;
315 static struct nvme_path_id g_any_path = {};
316 
317 static void
318 ut_init_trid(struct spdk_nvme_transport_id *trid)
319 {
320 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
321 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
322 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
323 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
324 }
325 
326 static void
327 ut_init_trid2(struct spdk_nvme_transport_id *trid)
328 {
329 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
330 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
331 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
332 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
333 }
334 
335 static void
336 ut_init_trid3(struct spdk_nvme_transport_id *trid)
337 {
338 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
339 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
340 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
341 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
342 }
343 
344 static int
345 cmp_int(int a, int b)
346 {
347 	return a - b;
348 }
349 
350 int
351 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
352 			       const struct spdk_nvme_transport_id *trid2)
353 {
354 	int cmp;
355 
356 	/* We assume trtype is TCP for now. */
357 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
358 
359 	cmp = cmp_int(trid1->trtype, trid2->trtype);
360 	if (cmp) {
361 		return cmp;
362 	}
363 
364 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
365 	if (cmp) {
366 		return cmp;
367 	}
368 
369 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
370 	if (cmp) {
371 		return cmp;
372 	}
373 
374 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
375 	if (cmp) {
376 		return cmp;
377 	}
378 
379 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
380 	if (cmp) {
381 		return cmp;
382 	}
383 
384 	return 0;
385 }
386 
387 static struct spdk_nvme_ctrlr *
388 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
389 		bool ana_reporting, bool multipath)
390 {
391 	struct spdk_nvme_ctrlr *ctrlr;
392 	uint32_t i;
393 
394 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
395 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
396 			/* There is a ctrlr whose trid matches. */
397 			return NULL;
398 		}
399 	}
400 
401 	ctrlr = calloc(1, sizeof(*ctrlr));
402 	if (ctrlr == NULL) {
403 		return NULL;
404 	}
405 
406 	ctrlr->attached = true;
407 	ctrlr->adminq.ctrlr = ctrlr;
408 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
409 	ctrlr->adminq.is_connected = true;
410 
411 	if (num_ns != 0) {
412 		ctrlr->num_ns = num_ns;
413 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
414 		if (ctrlr->ns == NULL) {
415 			free(ctrlr);
416 			return NULL;
417 		}
418 
419 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
420 		if (ctrlr->nsdata == NULL) {
421 			free(ctrlr->ns);
422 			free(ctrlr);
423 			return NULL;
424 		}
425 
426 		for (i = 0; i < num_ns; i++) {
427 			ctrlr->ns[i].id = i + 1;
428 			ctrlr->ns[i].ctrlr = ctrlr;
429 			ctrlr->ns[i].is_active = true;
430 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
431 			ctrlr->nsdata[i].nsze = 1024;
432 			ctrlr->nsdata[i].nmic.can_share = multipath;
433 		}
434 
435 		ctrlr->cdata.nn = num_ns;
436 		ctrlr->cdata.nanagrpid = num_ns;
437 	}
438 
439 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
440 	ctrlr->cdata.cmic.multi_ctrlr = multipath;
441 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
442 	ctrlr->trid = *trid;
443 	TAILQ_INIT(&ctrlr->active_io_qpairs);
444 
445 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
446 
447 	return ctrlr;
448 }
449 
450 static void
451 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
452 {
453 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
454 
455 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
456 	free(ctrlr->nsdata);
457 	free(ctrlr->ns);
458 	free(ctrlr);
459 }
460 
461 static int
462 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
463 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
464 {
465 	struct ut_nvme_req *req;
466 
467 	req = calloc(1, sizeof(*req));
468 	if (req == NULL) {
469 		return -ENOMEM;
470 	}
471 
472 	req->opc = opc;
473 	req->cb_fn = cb_fn;
474 	req->cb_arg = cb_arg;
475 
476 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
477 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
478 
479 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
480 	qpair->num_outstanding_reqs++;
481 
482 	return 0;
483 }
484 
485 static struct ut_nvme_req *
486 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
487 {
488 	struct ut_nvme_req *req;
489 
490 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
491 		if (req->cb_arg == cb_arg) {
492 			break;
493 		}
494 	}
495 
496 	return req;
497 }
498 
499 static struct spdk_bdev_io *
500 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
501 		 struct spdk_io_channel *ch)
502 {
503 	struct spdk_bdev_io *bdev_io;
504 
505 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
506 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
507 	bdev_io->type = type;
508 	bdev_io->bdev = &nbdev->disk;
509 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
510 
511 	return bdev_io;
512 }
513 
514 static void
515 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
516 {
517 	bdev_io->u.bdev.iovs = &bdev_io->iov;
518 	bdev_io->u.bdev.iovcnt = 1;
519 
520 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
521 	bdev_io->iov.iov_len = 4096;
522 }
523 
524 static void
525 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
526 {
527 	if (ctrlr->is_failed) {
528 		free(ctrlr);
529 		return;
530 	}
531 
532 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
533 	if (probe_ctx->cb_ctx) {
534 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
535 	}
536 
537 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
538 
539 	if (probe_ctx->attach_cb) {
540 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
541 	}
542 }
543 
544 int
545 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
546 {
547 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
548 
549 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
550 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
551 			continue;
552 		}
553 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
554 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
555 	}
556 
557 	free(probe_ctx);
558 
559 	return 0;
560 }
561 
562 struct spdk_nvme_probe_ctx *
563 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
564 			const struct spdk_nvme_ctrlr_opts *opts,
565 			spdk_nvme_attach_cb attach_cb)
566 {
567 	struct spdk_nvme_probe_ctx *probe_ctx;
568 
569 	if (trid == NULL) {
570 		return NULL;
571 	}
572 
573 	probe_ctx = calloc(1, sizeof(*probe_ctx));
574 	if (probe_ctx == NULL) {
575 		return NULL;
576 	}
577 
578 	probe_ctx->trid = *trid;
579 	probe_ctx->cb_ctx = (void *)opts;
580 	probe_ctx->attach_cb = attach_cb;
581 
582 	return probe_ctx;
583 }
584 
585 int
586 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
587 {
588 	if (ctrlr->attached) {
589 		ut_detach_ctrlr(ctrlr);
590 	}
591 
592 	return 0;
593 }
594 
595 int
596 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
597 {
598 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
599 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
600 
601 	return 0;
602 }
603 
604 int
605 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
606 {
607 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
608 }
609 
610 void
611 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
612 {
613 	memset(opts, 0, opts_size);
614 
615 	snprintf(opts->hostnqn, sizeof(opts->hostnqn),
616 		 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003");
617 }
618 
619 const struct spdk_nvme_ctrlr_data *
620 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
621 {
622 	return &ctrlr->cdata;
623 }
624 
625 uint32_t
626 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
627 {
628 	return ctrlr->num_ns;
629 }
630 
631 struct spdk_nvme_ns *
632 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
633 {
634 	if (nsid < 1 || nsid > ctrlr->num_ns) {
635 		return NULL;
636 	}
637 
638 	return &ctrlr->ns[nsid - 1];
639 }
640 
641 bool
642 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
643 {
644 	if (nsid < 1 || nsid > ctrlr->num_ns) {
645 		return false;
646 	}
647 
648 	return ctrlr->ns[nsid - 1].is_active;
649 }
650 
651 union spdk_nvme_csts_register
652 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
653 {
654 	union spdk_nvme_csts_register csts;
655 
656 	csts.raw = 0;
657 
658 	return csts;
659 }
660 
661 union spdk_nvme_vs_register
662 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
663 {
664 	union spdk_nvme_vs_register vs;
665 
666 	vs.raw = 0;
667 
668 	return vs;
669 }
670 
671 struct spdk_nvme_qpair *
672 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
673 			       const struct spdk_nvme_io_qpair_opts *user_opts,
674 			       size_t opts_size)
675 {
676 	struct spdk_nvme_qpair *qpair;
677 
678 	qpair = calloc(1, sizeof(*qpair));
679 	if (qpair == NULL) {
680 		return NULL;
681 	}
682 
683 	qpair->ctrlr = ctrlr;
684 	TAILQ_INIT(&qpair->outstanding_reqs);
685 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
686 
687 	return qpair;
688 }
689 
690 static void
691 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
692 {
693 	struct spdk_nvme_poll_group *group = qpair->poll_group;
694 
695 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
696 
697 	qpair->poll_group_tailq_head = &group->connected_qpairs;
698 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
699 	TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq);
700 }
701 
702 static void
703 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
704 {
705 	struct spdk_nvme_poll_group *group = qpair->poll_group;
706 
707 	CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs);
708 
709 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
710 	TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq);
711 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
712 }
713 
714 int
715 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
716 				 struct spdk_nvme_qpair *qpair)
717 {
718 	if (qpair->is_connected) {
719 		return -EISCONN;
720 	}
721 
722 	qpair->is_connected = true;
723 	qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
724 
725 	if (qpair->poll_group) {
726 		nvme_poll_group_connect_qpair(qpair);
727 	}
728 
729 	return 0;
730 }
731 
732 void
733 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
734 {
735 	if (!qpair->is_connected) {
736 		return;
737 	}
738 
739 	qpair->is_connected = false;
740 
741 	if (qpair->poll_group != NULL) {
742 		nvme_poll_group_disconnect_qpair(qpair);
743 	}
744 }
745 
746 int
747 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
748 {
749 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
750 
751 	if (qpair->in_completion_context) {
752 		qpair->delete_after_completion_context = true;
753 		return 0;
754 	}
755 
756 	spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
757 
758 	if (qpair->poll_group != NULL) {
759 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
760 	}
761 
762 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
763 
764 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
765 
766 	free(qpair);
767 
768 	return 0;
769 }
770 
771 int
772 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
773 {
774 	if (ctrlr->fail_reset) {
775 		ctrlr->is_failed = true;
776 		return -EIO;
777 	}
778 
779 	ctrlr->adminq.is_connected = true;
780 	return 0;
781 }
782 
783 void
784 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
785 {
786 }
787 
788 int
789 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
790 {
791 	ctrlr->adminq.is_connected = false;
792 	ctrlr->is_failed = false;
793 
794 	return 0;
795 }
796 
797 void
798 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
799 {
800 	ctrlr->is_failed = true;
801 }
802 
803 bool
804 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
805 {
806 	return ctrlr->is_failed;
807 }
808 
809 spdk_nvme_qp_failure_reason
810 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
811 {
812 	return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq);
813 }
814 
815 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
816 				 sizeof(uint32_t))
817 static void
818 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
819 {
820 	struct spdk_nvme_ana_page ana_hdr;
821 	char _ana_desc[UT_ANA_DESC_SIZE];
822 	struct spdk_nvme_ana_group_descriptor *ana_desc;
823 	struct spdk_nvme_ns *ns;
824 	uint32_t i;
825 
826 	memset(&ana_hdr, 0, sizeof(ana_hdr));
827 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
828 
829 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
830 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
831 
832 	buf += sizeof(ana_hdr);
833 	length -= sizeof(ana_hdr);
834 
835 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
836 
837 	for (i = 0; i < ctrlr->num_ns; i++) {
838 		ns = &ctrlr->ns[i];
839 
840 		if (!ns->is_active) {
841 			continue;
842 		}
843 
844 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
845 
846 		ana_desc->ana_group_id = ns->id;
847 		ana_desc->num_of_nsid = 1;
848 		ana_desc->ana_state = ns->ana_state;
849 		ana_desc->nsid[0] = ns->id;
850 
851 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
852 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
853 
854 		buf += UT_ANA_DESC_SIZE;
855 		length -= UT_ANA_DESC_SIZE;
856 	}
857 }
858 
859 int
860 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
861 				 uint8_t log_page, uint32_t nsid,
862 				 void *payload, uint32_t payload_size,
863 				 uint64_t offset,
864 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
865 {
866 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
867 		SPDK_CU_ASSERT_FATAL(offset == 0);
868 		ut_create_ana_log_page(ctrlr, payload, payload_size);
869 	}
870 
871 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
872 				      cb_fn, cb_arg);
873 }
874 
875 int
876 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
877 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
878 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
879 {
880 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
881 }
882 
883 int
884 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
885 			      void *cmd_cb_arg,
886 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
887 {
888 	struct ut_nvme_req *req = NULL, *abort_req;
889 
890 	if (qpair == NULL) {
891 		qpair = &ctrlr->adminq;
892 	}
893 
894 	abort_req = calloc(1, sizeof(*abort_req));
895 	if (abort_req == NULL) {
896 		return -ENOMEM;
897 	}
898 
899 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
900 		if (req->cb_arg == cmd_cb_arg) {
901 			break;
902 		}
903 	}
904 
905 	if (req == NULL) {
906 		free(abort_req);
907 		return -ENOENT;
908 	}
909 
910 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
911 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
912 
913 	abort_req->opc = SPDK_NVME_OPC_ABORT;
914 	abort_req->cb_fn = cb_fn;
915 	abort_req->cb_arg = cb_arg;
916 
917 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
918 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
919 	abort_req->cpl.cdw0 = 0;
920 
921 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
922 	ctrlr->adminq.num_outstanding_reqs++;
923 
924 	return 0;
925 }
926 
927 int32_t
928 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
929 {
930 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
931 }
932 
933 uint32_t
934 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
935 {
936 	return ns->id;
937 }
938 
939 struct spdk_nvme_ctrlr *
940 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
941 {
942 	return ns->ctrlr;
943 }
944 
945 static inline struct spdk_nvme_ns_data *
946 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
947 {
948 	return &ns->ctrlr->nsdata[ns->id - 1];
949 }
950 
951 const struct spdk_nvme_ns_data *
952 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
953 {
954 	return _nvme_ns_get_data(ns);
955 }
956 
957 uint64_t
958 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
959 {
960 	return _nvme_ns_get_data(ns)->nsze;
961 }
962 
963 const struct spdk_uuid *
964 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
965 {
966 	return ns->uuid;
967 }
968 
969 enum spdk_nvme_csi
970 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
971 	return ns->csi;
972 }
973 
974 int
975 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
976 			      void *metadata, uint64_t lba, uint32_t lba_count,
977 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
978 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
979 {
980 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
981 }
982 
983 int
984 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
985 			       void *buffer, void *metadata, uint64_t lba,
986 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
987 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
988 {
989 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
990 }
991 
992 int
993 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
994 			       uint64_t lba, uint32_t lba_count,
995 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
996 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
997 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
998 			       uint16_t apptag_mask, uint16_t apptag)
999 {
1000 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1001 }
1002 
1003 int
1004 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1005 				uint64_t lba, uint32_t lba_count,
1006 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1007 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1008 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1009 				uint16_t apptag_mask, uint16_t apptag)
1010 {
1011 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1012 }
1013 
1014 static bool g_ut_readv_ext_called;
1015 int
1016 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1017 			   uint64_t lba, uint32_t lba_count,
1018 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1019 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1020 			   spdk_nvme_req_next_sge_cb next_sge_fn,
1021 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1022 {
1023 	g_ut_readv_ext_called = true;
1024 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1025 }
1026 
1027 static bool g_ut_writev_ext_called;
1028 int
1029 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1030 			    uint64_t lba, uint32_t lba_count,
1031 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1032 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1033 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1034 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1035 {
1036 	g_ut_writev_ext_called = true;
1037 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1038 }
1039 
1040 int
1041 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1042 				  uint64_t lba, uint32_t lba_count,
1043 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1044 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1045 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1046 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1047 {
1048 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1049 }
1050 
1051 int
1052 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1053 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1054 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1055 {
1056 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1057 }
1058 
1059 int
1060 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1061 			      uint64_t lba, uint32_t lba_count,
1062 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1063 			      uint32_t io_flags)
1064 {
1065 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1066 }
1067 
1068 struct spdk_nvme_poll_group *
1069 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1070 {
1071 	struct spdk_nvme_poll_group *group;
1072 
1073 	group = calloc(1, sizeof(*group));
1074 	if (group == NULL) {
1075 		return NULL;
1076 	}
1077 
1078 	group->ctx = ctx;
1079 	if (table != NULL) {
1080 		group->accel_fn_table = *table;
1081 	}
1082 	TAILQ_INIT(&group->connected_qpairs);
1083 	TAILQ_INIT(&group->disconnected_qpairs);
1084 
1085 	return group;
1086 }
1087 
1088 int
1089 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1090 {
1091 	if (!TAILQ_EMPTY(&group->connected_qpairs) ||
1092 	    !TAILQ_EMPTY(&group->disconnected_qpairs)) {
1093 		return -EBUSY;
1094 	}
1095 
1096 	free(group);
1097 
1098 	return 0;
1099 }
1100 
1101 spdk_nvme_qp_failure_reason
1102 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair)
1103 {
1104 	return qpair->failure_reason;
1105 }
1106 
1107 int32_t
1108 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1109 				    uint32_t max_completions)
1110 {
1111 	struct ut_nvme_req *req, *tmp;
1112 	uint32_t num_completions = 0;
1113 
1114 	if (!qpair->is_connected) {
1115 		return -ENXIO;
1116 	}
1117 
1118 	qpair->in_completion_context = true;
1119 
1120 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1121 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1122 		qpair->num_outstanding_reqs--;
1123 
1124 		req->cb_fn(req->cb_arg, &req->cpl);
1125 
1126 		free(req);
1127 		num_completions++;
1128 	}
1129 
1130 	qpair->in_completion_context = false;
1131 	if (qpair->delete_after_completion_context) {
1132 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1133 	}
1134 
1135 	return num_completions;
1136 }
1137 
1138 int64_t
1139 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1140 		uint32_t completions_per_qpair,
1141 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1142 {
1143 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1144 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1145 
1146 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1147 
1148 	if (disconnected_qpair_cb == NULL) {
1149 		return -EINVAL;
1150 	}
1151 
1152 	TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1153 		disconnected_qpair_cb(qpair, group->ctx);
1154 	}
1155 
1156 	TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1157 		if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
1158 			spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
1159 			/* Bump the number of completions so this counts as "busy" */
1160 			num_completions++;
1161 			continue;
1162 		}
1163 
1164 		local_completions = spdk_nvme_qpair_process_completions(qpair,
1165 				    completions_per_qpair);
1166 		if (local_completions < 0 && error_reason == 0) {
1167 			error_reason = local_completions;
1168 		} else {
1169 			num_completions += local_completions;
1170 			assert(num_completions >= 0);
1171 		}
1172 	}
1173 
1174 	return error_reason ? error_reason : num_completions;
1175 }
1176 
1177 int
1178 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1179 			 struct spdk_nvme_qpair *qpair)
1180 {
1181 	CU_ASSERT(!qpair->is_connected);
1182 
1183 	qpair->poll_group = group;
1184 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
1185 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
1186 
1187 	return 0;
1188 }
1189 
1190 int
1191 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1192 			    struct spdk_nvme_qpair *qpair)
1193 {
1194 	CU_ASSERT(!qpair->is_connected);
1195 
1196 	if (qpair->poll_group == NULL) {
1197 		return -ENOENT;
1198 	}
1199 
1200 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
1201 
1202 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
1203 
1204 	qpair->poll_group = NULL;
1205 	qpair->poll_group_tailq_head = NULL;
1206 
1207 	return 0;
1208 }
1209 
1210 int
1211 spdk_bdev_register(struct spdk_bdev *bdev)
1212 {
1213 	g_ut_registered_bdev = bdev;
1214 
1215 	return g_ut_register_bdev_status;
1216 }
1217 
1218 void
1219 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1220 {
1221 	int rc;
1222 
1223 	rc = bdev->fn_table->destruct(bdev->ctxt);
1224 
1225 	if (bdev == g_ut_registered_bdev) {
1226 		g_ut_registered_bdev = NULL;
1227 	}
1228 
1229 	if (rc <= 0 && cb_fn != NULL) {
1230 		cb_fn(cb_arg, rc);
1231 	}
1232 }
1233 
1234 int
1235 spdk_bdev_open_ext(const char *bdev_name, bool write,
1236 		   spdk_bdev_event_cb_t event_cb, void *event_ctx,
1237 		   struct spdk_bdev_desc **desc)
1238 {
1239 	if (g_ut_registered_bdev == NULL ||
1240 	    strcmp(g_ut_registered_bdev->name, bdev_name) != 0) {
1241 		return -ENODEV;
1242 	}
1243 
1244 	*desc = (struct spdk_bdev_desc *)g_ut_registered_bdev;
1245 
1246 	return 0;
1247 }
1248 
1249 struct spdk_bdev *
1250 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
1251 {
1252 	return (struct spdk_bdev *)desc;
1253 }
1254 
1255 int
1256 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1257 {
1258 	bdev->blockcnt = size;
1259 
1260 	return 0;
1261 }
1262 
1263 struct spdk_io_channel *
1264 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1265 {
1266 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1267 }
1268 
1269 void
1270 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1271 {
1272 	bdev_io->internal.status = status;
1273 	bdev_io->internal.in_submit_request = false;
1274 }
1275 
1276 void
1277 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1278 {
1279 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1280 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1281 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1282 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1283 	} else {
1284 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1285 	}
1286 
1287 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1288 	bdev_io->internal.error.nvme.sct = sct;
1289 	bdev_io->internal.error.nvme.sc = sc;
1290 
1291 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1292 }
1293 
1294 void
1295 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1296 {
1297 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1298 
1299 	ut_bdev_io_set_buf(bdev_io);
1300 
1301 	cb(ch, bdev_io, true);
1302 }
1303 
1304 static void
1305 test_create_ctrlr(void)
1306 {
1307 	struct spdk_nvme_transport_id trid = {};
1308 	struct spdk_nvme_ctrlr ctrlr = {};
1309 	int rc;
1310 
1311 	ut_init_trid(&trid);
1312 
1313 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1314 	CU_ASSERT(rc == 0);
1315 
1316 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1317 
1318 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1319 	CU_ASSERT(rc == 0);
1320 
1321 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1322 
1323 	poll_threads();
1324 	spdk_delay_us(1000);
1325 	poll_threads();
1326 
1327 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1328 }
1329 
1330 static void
1331 test_reset_ctrlr(void)
1332 {
1333 	struct spdk_nvme_transport_id trid = {};
1334 	struct spdk_nvme_ctrlr ctrlr = {};
1335 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1336 	struct nvme_path_id *curr_trid;
1337 	struct spdk_io_channel *ch1, *ch2;
1338 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1339 	int rc;
1340 
1341 	ut_init_trid(&trid);
1342 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1343 
1344 	set_thread(0);
1345 
1346 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1347 	CU_ASSERT(rc == 0);
1348 
1349 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1350 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1351 
1352 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1353 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1354 
1355 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1356 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1357 
1358 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1359 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1360 
1361 	set_thread(1);
1362 
1363 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1364 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1365 
1366 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1367 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1368 
1369 	/* Reset starts from thread 1. */
1370 	set_thread(1);
1371 
1372 	/* Case 1: ctrlr is already being destructed. */
1373 	nvme_ctrlr->destruct = true;
1374 
1375 	rc = bdev_nvme_reset(nvme_ctrlr);
1376 	CU_ASSERT(rc == -ENXIO);
1377 
1378 	/* Case 2: reset is in progress. */
1379 	nvme_ctrlr->destruct = false;
1380 	nvme_ctrlr->resetting = true;
1381 
1382 	rc = bdev_nvme_reset(nvme_ctrlr);
1383 	CU_ASSERT(rc == -EBUSY);
1384 
1385 	/* Case 3: reset completes successfully. */
1386 	nvme_ctrlr->resetting = false;
1387 	curr_trid->is_failed = true;
1388 	ctrlr.is_failed = true;
1389 
1390 	rc = bdev_nvme_reset(nvme_ctrlr);
1391 	CU_ASSERT(rc == 0);
1392 	CU_ASSERT(nvme_ctrlr->resetting == true);
1393 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1394 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1395 
1396 	poll_thread_times(0, 3);
1397 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1398 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1399 
1400 	poll_thread_times(0, 1);
1401 	poll_thread_times(1, 1);
1402 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1403 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1404 	CU_ASSERT(ctrlr.is_failed == true);
1405 
1406 	poll_thread_times(1, 1);
1407 	poll_thread_times(0, 1);
1408 	CU_ASSERT(ctrlr.is_failed == false);
1409 	CU_ASSERT(ctrlr.adminq.is_connected == false);
1410 
1411 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1412 	poll_thread_times(0, 2);
1413 	CU_ASSERT(ctrlr.adminq.is_connected == true);
1414 
1415 	poll_thread_times(0, 1);
1416 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1417 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1418 
1419 	poll_thread_times(1, 1);
1420 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1421 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1422 	CU_ASSERT(nvme_ctrlr->resetting == true);
1423 	CU_ASSERT(curr_trid->is_failed == true);
1424 
1425 	poll_thread_times(0, 2);
1426 	CU_ASSERT(nvme_ctrlr->resetting == true);
1427 	poll_thread_times(1, 1);
1428 	CU_ASSERT(nvme_ctrlr->resetting == true);
1429 	poll_thread_times(0, 1);
1430 	CU_ASSERT(nvme_ctrlr->resetting == false);
1431 	CU_ASSERT(curr_trid->is_failed == false);
1432 
1433 	spdk_put_io_channel(ch2);
1434 
1435 	set_thread(0);
1436 
1437 	spdk_put_io_channel(ch1);
1438 
1439 	poll_threads();
1440 
1441 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1442 	CU_ASSERT(rc == 0);
1443 
1444 	poll_threads();
1445 	spdk_delay_us(1000);
1446 	poll_threads();
1447 
1448 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1449 }
1450 
1451 static void
1452 test_race_between_reset_and_destruct_ctrlr(void)
1453 {
1454 	struct spdk_nvme_transport_id trid = {};
1455 	struct spdk_nvme_ctrlr ctrlr = {};
1456 	struct nvme_ctrlr *nvme_ctrlr;
1457 	struct spdk_io_channel *ch1, *ch2;
1458 	int rc;
1459 
1460 	ut_init_trid(&trid);
1461 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1462 
1463 	set_thread(0);
1464 
1465 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1466 	CU_ASSERT(rc == 0);
1467 
1468 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1469 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1470 
1471 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1472 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1473 
1474 	set_thread(1);
1475 
1476 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1477 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1478 
1479 	/* Reset starts from thread 1. */
1480 	set_thread(1);
1481 
1482 	rc = bdev_nvme_reset(nvme_ctrlr);
1483 	CU_ASSERT(rc == 0);
1484 	CU_ASSERT(nvme_ctrlr->resetting == true);
1485 
1486 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1487 	set_thread(0);
1488 
1489 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1490 	CU_ASSERT(rc == 0);
1491 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1492 	CU_ASSERT(nvme_ctrlr->destruct == true);
1493 	CU_ASSERT(nvme_ctrlr->resetting == true);
1494 
1495 	poll_threads();
1496 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1497 	poll_threads();
1498 
1499 	/* Reset completed but ctrlr is not still destructed yet. */
1500 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1501 	CU_ASSERT(nvme_ctrlr->destruct == true);
1502 	CU_ASSERT(nvme_ctrlr->resetting == false);
1503 
1504 	/* New reset request is rejected. */
1505 	rc = bdev_nvme_reset(nvme_ctrlr);
1506 	CU_ASSERT(rc == -ENXIO);
1507 
1508 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1509 	 * However there are two channels and destruct is not completed yet.
1510 	 */
1511 	poll_threads();
1512 
1513 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1514 
1515 	set_thread(0);
1516 
1517 	spdk_put_io_channel(ch1);
1518 
1519 	set_thread(1);
1520 
1521 	spdk_put_io_channel(ch2);
1522 
1523 	poll_threads();
1524 	spdk_delay_us(1000);
1525 	poll_threads();
1526 
1527 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1528 }
1529 
1530 static void
1531 test_failover_ctrlr(void)
1532 {
1533 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1534 	struct spdk_nvme_ctrlr ctrlr = {};
1535 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1536 	struct nvme_path_id *curr_trid, *next_trid;
1537 	struct spdk_io_channel *ch1, *ch2;
1538 	int rc;
1539 
1540 	ut_init_trid(&trid1);
1541 	ut_init_trid2(&trid2);
1542 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1543 
1544 	set_thread(0);
1545 
1546 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1547 	CU_ASSERT(rc == 0);
1548 
1549 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1550 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1551 
1552 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1553 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1554 
1555 	set_thread(1);
1556 
1557 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1558 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1559 
1560 	/* First, test one trid case. */
1561 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1562 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1563 
1564 	/* Failover starts from thread 1. */
1565 	set_thread(1);
1566 
1567 	/* Case 1: ctrlr is already being destructed. */
1568 	nvme_ctrlr->destruct = true;
1569 
1570 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1571 	CU_ASSERT(rc == -ENXIO);
1572 	CU_ASSERT(curr_trid->is_failed == false);
1573 
1574 	/* Case 2: reset is in progress. */
1575 	nvme_ctrlr->destruct = false;
1576 	nvme_ctrlr->resetting = true;
1577 
1578 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1579 	CU_ASSERT(rc == -EBUSY);
1580 
1581 	/* Case 3: reset completes successfully. */
1582 	nvme_ctrlr->resetting = false;
1583 
1584 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1585 	CU_ASSERT(rc == 0);
1586 
1587 	CU_ASSERT(nvme_ctrlr->resetting == true);
1588 	CU_ASSERT(curr_trid->is_failed == true);
1589 
1590 	poll_threads();
1591 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1592 	poll_threads();
1593 
1594 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1595 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1596 
1597 	CU_ASSERT(nvme_ctrlr->resetting == false);
1598 	CU_ASSERT(curr_trid->is_failed == false);
1599 
1600 	set_thread(0);
1601 
1602 	/* Second, test two trids case. */
1603 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1604 	CU_ASSERT(rc == 0);
1605 
1606 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1607 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1608 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1609 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1610 
1611 	/* Failover starts from thread 1. */
1612 	set_thread(1);
1613 
1614 	/* Case 4: reset is in progress. */
1615 	nvme_ctrlr->resetting = true;
1616 
1617 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1618 	CU_ASSERT(rc == -EBUSY);
1619 
1620 	/* Case 5: failover completes successfully. */
1621 	nvme_ctrlr->resetting = false;
1622 
1623 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1624 	CU_ASSERT(rc == 0);
1625 
1626 	CU_ASSERT(nvme_ctrlr->resetting == true);
1627 
1628 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1629 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1630 	CU_ASSERT(next_trid != curr_trid);
1631 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1632 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1633 
1634 	poll_threads();
1635 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1636 	poll_threads();
1637 
1638 	CU_ASSERT(nvme_ctrlr->resetting == false);
1639 
1640 	spdk_put_io_channel(ch2);
1641 
1642 	set_thread(0);
1643 
1644 	spdk_put_io_channel(ch1);
1645 
1646 	poll_threads();
1647 
1648 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1649 	CU_ASSERT(rc == 0);
1650 
1651 	poll_threads();
1652 	spdk_delay_us(1000);
1653 	poll_threads();
1654 
1655 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1656 }
1657 
1658 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1659  *
1660  * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1661  * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1662  * to trid2. While processing the failed reset, trid3 was added. trid1 should
1663  * have been active, i.e., the head of the list until the failover completed.
1664  * However trid3 was inserted to the head of the list by mistake.
1665  *
1666  * I/O qpairs have smaller polling period than admin qpair. When a connection is
1667  * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1668  * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1669  * may be executed repeatedly before failover is executed. Hence this bug is real.
1670  *
1671  * The following test verifies the fix.
1672  */
1673 static void
1674 test_race_between_failover_and_add_secondary_trid(void)
1675 {
1676 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1677 	struct spdk_nvme_ctrlr ctrlr = {};
1678 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1679 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1680 	struct spdk_io_channel *ch1, *ch2;
1681 	int rc;
1682 
1683 	ut_init_trid(&trid1);
1684 	ut_init_trid2(&trid2);
1685 	ut_init_trid3(&trid3);
1686 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1687 
1688 	set_thread(0);
1689 
1690 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1691 	CU_ASSERT(rc == 0);
1692 
1693 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1694 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1695 
1696 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1697 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1698 
1699 	set_thread(1);
1700 
1701 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1702 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1703 
1704 	set_thread(0);
1705 
1706 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1707 	CU_ASSERT(rc == 0);
1708 
1709 	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1710 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1711 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1712 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1713 	path_id2 = TAILQ_NEXT(path_id1, link);
1714 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1715 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1716 
1717 	ctrlr.fail_reset = true;
1718 
1719 	rc = bdev_nvme_reset(nvme_ctrlr);
1720 	CU_ASSERT(rc == 0);
1721 
1722 	poll_threads();
1723 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1724 	poll_threads();
1725 
1726 	CU_ASSERT(path_id1->is_failed == true);
1727 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1728 
1729 	rc = bdev_nvme_reset(nvme_ctrlr);
1730 	CU_ASSERT(rc == 0);
1731 
1732 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1733 	CU_ASSERT(rc == 0);
1734 
1735 	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1736 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1737 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1738 	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1739 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1740 	path_id3 = TAILQ_NEXT(path_id2, link);
1741 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1742 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1743 
1744 	poll_threads();
1745 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1746 	poll_threads();
1747 
1748 	spdk_put_io_channel(ch1);
1749 
1750 	set_thread(1);
1751 
1752 	spdk_put_io_channel(ch2);
1753 
1754 	poll_threads();
1755 
1756 	set_thread(0);
1757 
1758 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1759 	CU_ASSERT(rc == 0);
1760 
1761 	poll_threads();
1762 	spdk_delay_us(1000);
1763 	poll_threads();
1764 
1765 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1766 }
1767 
1768 static void
1769 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1770 {
1771 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1772 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1773 }
1774 
1775 static void
1776 test_pending_reset(void)
1777 {
1778 	struct spdk_nvme_transport_id trid = {};
1779 	struct spdk_nvme_ctrlr *ctrlr;
1780 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1781 	const int STRING_SIZE = 32;
1782 	const char *attached_names[STRING_SIZE];
1783 	struct nvme_bdev *bdev;
1784 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1785 	struct spdk_io_channel *ch1, *ch2;
1786 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1787 	struct nvme_io_path *io_path1, *io_path2;
1788 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1789 	int rc;
1790 
1791 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1792 	ut_init_trid(&trid);
1793 
1794 	set_thread(0);
1795 
1796 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1797 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1798 
1799 	g_ut_attach_ctrlr_status = 0;
1800 	g_ut_attach_bdev_count = 1;
1801 
1802 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1803 			      attach_ctrlr_done, NULL, NULL, NULL, false);
1804 	CU_ASSERT(rc == 0);
1805 
1806 	spdk_delay_us(1000);
1807 	poll_threads();
1808 
1809 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1810 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1811 
1812 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1813 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1814 
1815 	ch1 = spdk_get_io_channel(bdev);
1816 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1817 
1818 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1819 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1820 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1821 	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
1822 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1823 
1824 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1825 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1826 
1827 	set_thread(1);
1828 
1829 	ch2 = spdk_get_io_channel(bdev);
1830 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1831 
1832 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1833 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1834 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1835 	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
1836 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1837 
1838 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1839 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1840 
1841 	/* The first reset request is submitted on thread 1, and the second reset request
1842 	 * is submitted on thread 0 while processing the first request.
1843 	 */
1844 	bdev_nvme_submit_request(ch2, first_bdev_io);
1845 	CU_ASSERT(nvme_ctrlr->resetting == true);
1846 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1847 
1848 	set_thread(0);
1849 
1850 	bdev_nvme_submit_request(ch1, second_bdev_io);
1851 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1852 
1853 	poll_threads();
1854 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1855 	poll_threads();
1856 
1857 	CU_ASSERT(nvme_ctrlr->resetting == false);
1858 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1859 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1860 
1861 	/* The first reset request is submitted on thread 1, and the second reset request
1862 	 * is submitted on thread 0 while processing the first request.
1863 	 *
1864 	 * The difference from the above scenario is that the controller is removed while
1865 	 * processing the first request. Hence both reset requests should fail.
1866 	 */
1867 	set_thread(1);
1868 
1869 	bdev_nvme_submit_request(ch2, first_bdev_io);
1870 	CU_ASSERT(nvme_ctrlr->resetting == true);
1871 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1872 
1873 	set_thread(0);
1874 
1875 	bdev_nvme_submit_request(ch1, second_bdev_io);
1876 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1877 
1878 	ctrlr->fail_reset = true;
1879 
1880 	poll_threads();
1881 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1882 	poll_threads();
1883 
1884 	CU_ASSERT(nvme_ctrlr->resetting == false);
1885 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1886 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1887 
1888 	spdk_put_io_channel(ch1);
1889 
1890 	set_thread(1);
1891 
1892 	spdk_put_io_channel(ch2);
1893 
1894 	poll_threads();
1895 
1896 	set_thread(0);
1897 
1898 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1899 	CU_ASSERT(rc == 0);
1900 
1901 	poll_threads();
1902 	spdk_delay_us(1000);
1903 	poll_threads();
1904 
1905 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1906 
1907 	free(first_bdev_io);
1908 	free(second_bdev_io);
1909 }
1910 
1911 static void
1912 test_attach_ctrlr(void)
1913 {
1914 	struct spdk_nvme_transport_id trid = {};
1915 	struct spdk_nvme_ctrlr *ctrlr;
1916 	struct nvme_ctrlr *nvme_ctrlr;
1917 	const int STRING_SIZE = 32;
1918 	const char *attached_names[STRING_SIZE];
1919 	struct nvme_bdev *nbdev;
1920 	int rc;
1921 
1922 	set_thread(0);
1923 
1924 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1925 	ut_init_trid(&trid);
1926 
1927 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
1928 	 * by probe polling.
1929 	 */
1930 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1931 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1932 
1933 	ctrlr->is_failed = true;
1934 	g_ut_attach_ctrlr_status = -EIO;
1935 	g_ut_attach_bdev_count = 0;
1936 
1937 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1938 			      attach_ctrlr_done, NULL, NULL, NULL, false);
1939 	CU_ASSERT(rc == 0);
1940 
1941 	spdk_delay_us(1000);
1942 	poll_threads();
1943 
1944 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1945 
1946 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
1947 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1948 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1949 
1950 	g_ut_attach_ctrlr_status = 0;
1951 
1952 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1953 			      attach_ctrlr_done, NULL, NULL, NULL, false);
1954 	CU_ASSERT(rc == 0);
1955 
1956 	spdk_delay_us(1000);
1957 	poll_threads();
1958 
1959 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1960 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1961 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1962 
1963 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1964 	CU_ASSERT(rc == 0);
1965 
1966 	poll_threads();
1967 	spdk_delay_us(1000);
1968 	poll_threads();
1969 
1970 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1971 
1972 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
1973 	 * one nvme_bdev is created.
1974 	 */
1975 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1976 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1977 
1978 	g_ut_attach_bdev_count = 1;
1979 
1980 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1981 			      attach_ctrlr_done, NULL, NULL, NULL, false);
1982 	CU_ASSERT(rc == 0);
1983 
1984 	spdk_delay_us(1000);
1985 	poll_threads();
1986 
1987 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1988 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1989 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1990 
1991 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
1992 	attached_names[0] = NULL;
1993 
1994 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1995 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
1996 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
1997 
1998 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1999 	CU_ASSERT(rc == 0);
2000 
2001 	poll_threads();
2002 	spdk_delay_us(1000);
2003 	poll_threads();
2004 
2005 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2006 
2007 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
2008 	 * created because creating one nvme_bdev failed.
2009 	 */
2010 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2011 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2012 
2013 	g_ut_register_bdev_status = -EINVAL;
2014 	g_ut_attach_bdev_count = 0;
2015 
2016 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2017 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2018 	CU_ASSERT(rc == 0);
2019 
2020 	spdk_delay_us(1000);
2021 	poll_threads();
2022 
2023 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2024 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2025 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2026 
2027 	CU_ASSERT(attached_names[0] == NULL);
2028 
2029 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2030 	CU_ASSERT(rc == 0);
2031 
2032 	poll_threads();
2033 	spdk_delay_us(1000);
2034 	poll_threads();
2035 
2036 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2037 
2038 	g_ut_register_bdev_status = 0;
2039 }
2040 
2041 static void
2042 test_aer_cb(void)
2043 {
2044 	struct spdk_nvme_transport_id trid = {};
2045 	struct spdk_nvme_ctrlr *ctrlr;
2046 	struct nvme_ctrlr *nvme_ctrlr;
2047 	struct nvme_bdev *bdev;
2048 	const int STRING_SIZE = 32;
2049 	const char *attached_names[STRING_SIZE];
2050 	union spdk_nvme_async_event_completion event = {};
2051 	struct spdk_nvme_cpl cpl = {};
2052 	int rc;
2053 
2054 	set_thread(0);
2055 
2056 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2057 	ut_init_trid(&trid);
2058 
2059 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
2060 	 * namespaces are populated.
2061 	 */
2062 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
2063 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2064 
2065 	ctrlr->ns[0].is_active = false;
2066 
2067 	g_ut_attach_ctrlr_status = 0;
2068 	g_ut_attach_bdev_count = 3;
2069 
2070 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2071 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2072 	CU_ASSERT(rc == 0);
2073 
2074 	spdk_delay_us(1000);
2075 	poll_threads();
2076 
2077 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2078 	poll_threads();
2079 
2080 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2081 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2082 
2083 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
2084 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2085 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2086 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2087 
2088 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
2089 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2090 	CU_ASSERT(bdev->disk.blockcnt == 1024);
2091 
2092 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
2093 	 * change the size of the 4th namespace.
2094 	 */
2095 	ctrlr->ns[0].is_active = true;
2096 	ctrlr->ns[2].is_active = false;
2097 	ctrlr->nsdata[3].nsze = 2048;
2098 
2099 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2100 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
2101 	cpl.cdw0 = event.raw;
2102 
2103 	aer_cb(nvme_ctrlr, &cpl);
2104 
2105 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2106 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2107 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
2108 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2109 	CU_ASSERT(bdev->disk.blockcnt == 2048);
2110 
2111 	/* Change ANA state of active namespaces. */
2112 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2113 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2114 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2115 
2116 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2117 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
2118 	cpl.cdw0 = event.raw;
2119 
2120 	aer_cb(nvme_ctrlr, &cpl);
2121 
2122 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2123 	poll_threads();
2124 
2125 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2126 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2127 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2128 
2129 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2130 	CU_ASSERT(rc == 0);
2131 
2132 	poll_threads();
2133 	spdk_delay_us(1000);
2134 	poll_threads();
2135 
2136 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2137 }
2138 
2139 static void
2140 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2141 			enum spdk_bdev_io_type io_type)
2142 {
2143 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2144 	struct nvme_io_path *io_path;
2145 	struct spdk_nvme_qpair *qpair;
2146 
2147 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2148 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2149 	qpair = io_path->qpair->qpair;
2150 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2151 
2152 	bdev_io->type = io_type;
2153 	bdev_io->internal.in_submit_request = true;
2154 
2155 	bdev_nvme_submit_request(ch, bdev_io);
2156 
2157 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2158 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2159 
2160 	poll_threads();
2161 
2162 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2163 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2164 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2165 }
2166 
2167 static void
2168 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2169 		   enum spdk_bdev_io_type io_type)
2170 {
2171 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2172 	struct nvme_io_path *io_path;
2173 	struct spdk_nvme_qpair *qpair;
2174 
2175 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2176 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2177 	qpair = io_path->qpair->qpair;
2178 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2179 
2180 	bdev_io->type = io_type;
2181 	bdev_io->internal.in_submit_request = true;
2182 
2183 	bdev_nvme_submit_request(ch, bdev_io);
2184 
2185 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2186 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2187 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2188 }
2189 
2190 static void
2191 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2192 {
2193 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2194 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2195 	struct ut_nvme_req *req;
2196 	struct nvme_io_path *io_path;
2197 	struct spdk_nvme_qpair *qpair;
2198 
2199 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2200 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2201 	qpair = io_path->qpair->qpair;
2202 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2203 
2204 	/* Only compare and write now. */
2205 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2206 	bdev_io->internal.in_submit_request = true;
2207 
2208 	bdev_nvme_submit_request(ch, bdev_io);
2209 
2210 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2211 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2212 	CU_ASSERT(bio->first_fused_submitted == true);
2213 
2214 	/* First outstanding request is compare operation. */
2215 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2216 	SPDK_CU_ASSERT_FATAL(req != NULL);
2217 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2218 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2219 
2220 	poll_threads();
2221 
2222 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2223 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2224 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2225 }
2226 
2227 static void
2228 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2229 			 struct spdk_nvme_ctrlr *ctrlr)
2230 {
2231 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2232 	bdev_io->internal.in_submit_request = true;
2233 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2234 
2235 	bdev_nvme_submit_request(ch, bdev_io);
2236 
2237 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2238 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2239 
2240 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2241 	poll_thread_times(1, 1);
2242 
2243 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2244 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2245 
2246 	poll_thread_times(0, 1);
2247 
2248 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2249 }
2250 
2251 static void
2252 test_submit_nvme_cmd(void)
2253 {
2254 	struct spdk_nvme_transport_id trid = {};
2255 	struct spdk_nvme_ctrlr *ctrlr;
2256 	struct nvme_ctrlr *nvme_ctrlr;
2257 	const int STRING_SIZE = 32;
2258 	const char *attached_names[STRING_SIZE];
2259 	struct nvme_bdev *bdev;
2260 	struct spdk_bdev_io *bdev_io;
2261 	struct spdk_io_channel *ch;
2262 	struct spdk_bdev_ext_io_opts ext_io_opts = {};
2263 	int rc;
2264 
2265 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2266 	ut_init_trid(&trid);
2267 
2268 	set_thread(1);
2269 
2270 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2271 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2272 
2273 	g_ut_attach_ctrlr_status = 0;
2274 	g_ut_attach_bdev_count = 1;
2275 
2276 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2277 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2278 	CU_ASSERT(rc == 0);
2279 
2280 	spdk_delay_us(1000);
2281 	poll_threads();
2282 
2283 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2284 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2285 
2286 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2287 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2288 
2289 	set_thread(0);
2290 
2291 	ch = spdk_get_io_channel(bdev);
2292 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2293 
2294 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2295 
2296 	bdev_io->u.bdev.iovs = NULL;
2297 
2298 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2299 
2300 	ut_bdev_io_set_buf(bdev_io);
2301 
2302 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2303 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2304 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2305 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2306 
2307 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2308 
2309 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2310 
2311 	/* Verify that ext NVME API is called if bdev_io ext_opts is set */
2312 	bdev_io->u.bdev.ext_opts = &ext_io_opts;
2313 	g_ut_readv_ext_called = false;
2314 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2315 	CU_ASSERT(g_ut_readv_ext_called == true);
2316 	g_ut_readv_ext_called = false;
2317 
2318 	g_ut_writev_ext_called = false;
2319 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2320 	CU_ASSERT(g_ut_writev_ext_called == true);
2321 	g_ut_writev_ext_called = false;
2322 	bdev_io->u.bdev.ext_opts = NULL;
2323 
2324 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2325 
2326 	free(bdev_io);
2327 
2328 	spdk_put_io_channel(ch);
2329 
2330 	poll_threads();
2331 
2332 	set_thread(1);
2333 
2334 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2335 	CU_ASSERT(rc == 0);
2336 
2337 	poll_threads();
2338 	spdk_delay_us(1000);
2339 	poll_threads();
2340 
2341 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2342 }
2343 
2344 static void
2345 test_add_remove_trid(void)
2346 {
2347 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2348 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2349 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2350 	const int STRING_SIZE = 32;
2351 	const char *attached_names[STRING_SIZE];
2352 	struct nvme_path_id *ctrid;
2353 	int rc;
2354 
2355 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2356 	ut_init_trid(&path1.trid);
2357 	ut_init_trid2(&path2.trid);
2358 	ut_init_trid3(&path3.trid);
2359 
2360 	set_thread(0);
2361 
2362 	g_ut_attach_ctrlr_status = 0;
2363 	g_ut_attach_bdev_count = 0;
2364 
2365 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2366 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2367 
2368 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2369 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2370 	CU_ASSERT(rc == 0);
2371 
2372 	spdk_delay_us(1000);
2373 	poll_threads();
2374 
2375 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2376 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2377 
2378 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2379 
2380 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2381 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2382 
2383 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2384 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2385 	CU_ASSERT(rc == 0);
2386 
2387 	spdk_delay_us(1000);
2388 	poll_threads();
2389 
2390 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2391 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2392 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2393 			break;
2394 		}
2395 	}
2396 	CU_ASSERT(ctrid != NULL);
2397 
2398 	/* trid3 is not in the registered list. */
2399 	rc = bdev_nvme_delete("nvme0", &path3);
2400 	CU_ASSERT(rc == -ENXIO);
2401 
2402 	/* trid2 is not used, and simply removed. */
2403 	rc = bdev_nvme_delete("nvme0", &path2);
2404 	CU_ASSERT(rc == 0);
2405 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2406 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2407 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2408 	}
2409 
2410 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2411 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2412 
2413 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
2414 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2415 	CU_ASSERT(rc == 0);
2416 
2417 	spdk_delay_us(1000);
2418 	poll_threads();
2419 
2420 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2421 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2422 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2423 			break;
2424 		}
2425 	}
2426 	CU_ASSERT(ctrid != NULL);
2427 
2428 	/* path1 is currently used and path3 is an alternative path.
2429 	 * If we remove path1, path is changed to path3.
2430 	 */
2431 	rc = bdev_nvme_delete("nvme0", &path1);
2432 	CU_ASSERT(rc == 0);
2433 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2434 	CU_ASSERT(nvme_ctrlr->resetting == true);
2435 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2436 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2437 	}
2438 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2439 
2440 	poll_threads();
2441 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2442 	poll_threads();
2443 
2444 	CU_ASSERT(nvme_ctrlr->resetting == false);
2445 
2446 	/* path3 is the current and only path. If we remove path3, the corresponding
2447 	 * nvme_ctrlr is removed.
2448 	 */
2449 	rc = bdev_nvme_delete("nvme0", &path3);
2450 	CU_ASSERT(rc == 0);
2451 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2452 
2453 	poll_threads();
2454 	spdk_delay_us(1000);
2455 	poll_threads();
2456 
2457 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2458 
2459 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2460 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2461 
2462 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2463 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2464 	CU_ASSERT(rc == 0);
2465 
2466 	spdk_delay_us(1000);
2467 	poll_threads();
2468 
2469 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2470 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2471 
2472 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2473 
2474 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2475 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2476 
2477 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2478 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2479 	CU_ASSERT(rc == 0);
2480 
2481 	spdk_delay_us(1000);
2482 	poll_threads();
2483 
2484 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2485 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2486 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2487 			break;
2488 		}
2489 	}
2490 	CU_ASSERT(ctrid != NULL);
2491 
2492 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2493 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2494 	CU_ASSERT(rc == 0);
2495 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2496 
2497 	poll_threads();
2498 	spdk_delay_us(1000);
2499 	poll_threads();
2500 
2501 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2502 }
2503 
2504 static void
2505 test_abort(void)
2506 {
2507 	struct spdk_nvme_transport_id trid = {};
2508 	struct nvme_ctrlr_opts opts = {};
2509 	struct spdk_nvme_ctrlr *ctrlr;
2510 	struct nvme_ctrlr *nvme_ctrlr;
2511 	const int STRING_SIZE = 32;
2512 	const char *attached_names[STRING_SIZE];
2513 	struct nvme_bdev *bdev;
2514 	struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io;
2515 	struct spdk_io_channel *ch1, *ch2;
2516 	struct nvme_bdev_channel *nbdev_ch1;
2517 	struct nvme_io_path *io_path1;
2518 	struct nvme_qpair *nvme_qpair1;
2519 	int rc;
2520 
2521 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2522 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2523 	 * are submitted on thread 1. Both should succeed.
2524 	 */
2525 
2526 	ut_init_trid(&trid);
2527 
2528 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2529 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2530 
2531 	g_ut_attach_ctrlr_status = 0;
2532 	g_ut_attach_bdev_count = 1;
2533 
2534 	set_thread(1);
2535 
2536 	opts.ctrlr_loss_timeout_sec = -1;
2537 	opts.reconnect_delay_sec = 1;
2538 
2539 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2540 			      attach_ctrlr_done, NULL, NULL, &opts, false);
2541 	CU_ASSERT(rc == 0);
2542 
2543 	spdk_delay_us(1000);
2544 	poll_threads();
2545 
2546 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2547 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2548 
2549 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2550 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2551 
2552 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2553 	ut_bdev_io_set_buf(write_io);
2554 
2555 	fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL);
2556 	ut_bdev_io_set_buf(fuse_io);
2557 
2558 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2559 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2560 
2561 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2562 
2563 	set_thread(0);
2564 
2565 	ch1 = spdk_get_io_channel(bdev);
2566 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2567 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2568 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2569 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2570 	nvme_qpair1 = io_path1->qpair;
2571 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
2572 
2573 	set_thread(1);
2574 
2575 	ch2 = spdk_get_io_channel(bdev);
2576 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2577 
2578 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2579 	fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2580 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2581 
2582 	/* Aborting the already completed request should fail. */
2583 	write_io->internal.in_submit_request = true;
2584 	bdev_nvme_submit_request(ch1, write_io);
2585 	poll_threads();
2586 
2587 	CU_ASSERT(write_io->internal.in_submit_request == false);
2588 
2589 	abort_io->u.abort.bio_to_abort = write_io;
2590 	abort_io->internal.in_submit_request = true;
2591 
2592 	bdev_nvme_submit_request(ch1, abort_io);
2593 
2594 	poll_threads();
2595 
2596 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2597 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2598 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2599 
2600 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2601 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2602 
2603 	admin_io->internal.in_submit_request = true;
2604 	bdev_nvme_submit_request(ch1, admin_io);
2605 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2606 	poll_threads();
2607 
2608 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2609 
2610 	abort_io->u.abort.bio_to_abort = admin_io;
2611 	abort_io->internal.in_submit_request = true;
2612 
2613 	bdev_nvme_submit_request(ch2, abort_io);
2614 
2615 	poll_threads();
2616 
2617 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2618 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2619 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2620 
2621 	/* Aborting the write request should succeed. */
2622 	write_io->internal.in_submit_request = true;
2623 	bdev_nvme_submit_request(ch1, write_io);
2624 
2625 	CU_ASSERT(write_io->internal.in_submit_request == true);
2626 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
2627 
2628 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2629 	abort_io->u.abort.bio_to_abort = write_io;
2630 	abort_io->internal.in_submit_request = true;
2631 
2632 	bdev_nvme_submit_request(ch1, abort_io);
2633 
2634 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2635 	poll_threads();
2636 
2637 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2638 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2639 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2640 	CU_ASSERT(write_io->internal.in_submit_request == false);
2641 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2642 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2643 
2644 	/* Aborting the fuse request should succeed. */
2645 	fuse_io->internal.in_submit_request = true;
2646 	bdev_nvme_submit_request(ch1, fuse_io);
2647 
2648 	CU_ASSERT(fuse_io->internal.in_submit_request == true);
2649 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2);
2650 
2651 	abort_io->u.abort.bio_to_abort = fuse_io;
2652 	abort_io->internal.in_submit_request = true;
2653 
2654 	bdev_nvme_submit_request(ch1, abort_io);
2655 
2656 	spdk_delay_us(10000);
2657 	poll_threads();
2658 
2659 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2660 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2661 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2662 	CU_ASSERT(fuse_io->internal.in_submit_request == false);
2663 	CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2664 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2665 
2666 	/* Aborting the admin request should succeed. */
2667 	admin_io->internal.in_submit_request = true;
2668 	bdev_nvme_submit_request(ch1, admin_io);
2669 
2670 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2671 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2672 
2673 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2674 	abort_io->u.abort.bio_to_abort = admin_io;
2675 	abort_io->internal.in_submit_request = true;
2676 
2677 	bdev_nvme_submit_request(ch2, abort_io);
2678 
2679 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2680 	poll_threads();
2681 
2682 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2683 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2684 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2685 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2686 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2687 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2688 
2689 	set_thread(0);
2690 
2691 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2692 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2693 	 * while resetting the nvme_ctrlr.
2694 	 */
2695 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
2696 
2697 	poll_thread_times(0, 3);
2698 
2699 	CU_ASSERT(nvme_qpair1->qpair == NULL);
2700 	CU_ASSERT(nvme_ctrlr->resetting == true);
2701 
2702 	write_io->internal.in_submit_request = true;
2703 
2704 	bdev_nvme_submit_request(ch1, write_io);
2705 
2706 	CU_ASSERT(write_io->internal.in_submit_request == true);
2707 	CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list));
2708 
2709 	/* Aborting the queued write request should succeed immediately. */
2710 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2711 	abort_io->u.abort.bio_to_abort = write_io;
2712 	abort_io->internal.in_submit_request = true;
2713 
2714 	bdev_nvme_submit_request(ch1, abort_io);
2715 
2716 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2717 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2718 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2719 	CU_ASSERT(write_io->internal.in_submit_request == false);
2720 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2721 
2722 	poll_threads();
2723 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2724 	poll_threads();
2725 
2726 	spdk_put_io_channel(ch1);
2727 
2728 	set_thread(1);
2729 
2730 	spdk_put_io_channel(ch2);
2731 
2732 	poll_threads();
2733 
2734 	free(write_io);
2735 	free(fuse_io);
2736 	free(admin_io);
2737 	free(abort_io);
2738 
2739 	set_thread(1);
2740 
2741 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2742 	CU_ASSERT(rc == 0);
2743 
2744 	poll_threads();
2745 	spdk_delay_us(1000);
2746 	poll_threads();
2747 
2748 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2749 }
2750 
2751 static void
2752 test_get_io_qpair(void)
2753 {
2754 	struct spdk_nvme_transport_id trid = {};
2755 	struct spdk_nvme_ctrlr ctrlr = {};
2756 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2757 	struct spdk_io_channel *ch;
2758 	struct nvme_ctrlr_channel *ctrlr_ch;
2759 	struct spdk_nvme_qpair *qpair;
2760 	int rc;
2761 
2762 	ut_init_trid(&trid);
2763 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2764 
2765 	set_thread(0);
2766 
2767 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2768 	CU_ASSERT(rc == 0);
2769 
2770 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2771 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2772 
2773 	ch = spdk_get_io_channel(nvme_ctrlr);
2774 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2775 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2776 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
2777 
2778 	qpair = bdev_nvme_get_io_qpair(ch);
2779 	CU_ASSERT(qpair == ctrlr_ch->qpair->qpair);
2780 
2781 	spdk_put_io_channel(ch);
2782 
2783 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2784 	CU_ASSERT(rc == 0);
2785 
2786 	poll_threads();
2787 	spdk_delay_us(1000);
2788 	poll_threads();
2789 
2790 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2791 }
2792 
2793 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2794  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2795  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2796  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2797  */
2798 static void
2799 test_bdev_unregister(void)
2800 {
2801 	struct spdk_nvme_transport_id trid = {};
2802 	struct spdk_nvme_ctrlr *ctrlr;
2803 	struct nvme_ctrlr *nvme_ctrlr;
2804 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2805 	const int STRING_SIZE = 32;
2806 	const char *attached_names[STRING_SIZE];
2807 	struct nvme_bdev *bdev1, *bdev2;
2808 	int rc;
2809 
2810 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2811 	ut_init_trid(&trid);
2812 
2813 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
2814 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2815 
2816 	g_ut_attach_ctrlr_status = 0;
2817 	g_ut_attach_bdev_count = 2;
2818 
2819 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2820 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2821 	CU_ASSERT(rc == 0);
2822 
2823 	spdk_delay_us(1000);
2824 	poll_threads();
2825 
2826 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2827 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2828 
2829 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
2830 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2831 
2832 	bdev1 = nvme_ns1->bdev;
2833 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2834 
2835 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
2836 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2837 
2838 	bdev2 = nvme_ns2->bdev;
2839 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2840 
2841 	bdev_nvme_destruct(&bdev1->disk);
2842 	bdev_nvme_destruct(&bdev2->disk);
2843 
2844 	poll_threads();
2845 
2846 	CU_ASSERT(nvme_ns1->bdev == NULL);
2847 	CU_ASSERT(nvme_ns2->bdev == NULL);
2848 
2849 	nvme_ctrlr->destruct = true;
2850 	_nvme_ctrlr_destruct(nvme_ctrlr);
2851 
2852 	poll_threads();
2853 	spdk_delay_us(1000);
2854 	poll_threads();
2855 
2856 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2857 }
2858 
2859 static void
2860 test_compare_ns(void)
2861 {
2862 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2863 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2864 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2865 	struct spdk_uuid uuid1 = { .u.raw = { 0xAA } };
2866 	struct spdk_uuid uuid2 = { .u.raw = { 0xAB } };
2867 
2868 	/* No IDs are defined. */
2869 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2870 
2871 	/* Only EUI64 are defined and not matched. */
2872 	nsdata1.eui64 = 0xABCDEF0123456789;
2873 	nsdata2.eui64 = 0xBBCDEF0123456789;
2874 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2875 
2876 	/* Only EUI64 are defined and matched. */
2877 	nsdata2.eui64 = 0xABCDEF0123456789;
2878 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2879 
2880 	/* Only NGUID are defined and not matched. */
2881 	nsdata1.eui64 = 0x0;
2882 	nsdata2.eui64 = 0x0;
2883 	nsdata1.nguid[0] = 0x12;
2884 	nsdata2.nguid[0] = 0x10;
2885 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2886 
2887 	/* Only NGUID are defined and matched. */
2888 	nsdata2.nguid[0] = 0x12;
2889 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2890 
2891 	/* Only UUID are defined and not matched. */
2892 	nsdata1.nguid[0] = 0x0;
2893 	nsdata2.nguid[0] = 0x0;
2894 	ns1.uuid = &uuid1;
2895 	ns2.uuid = &uuid2;
2896 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2897 
2898 	/* Only one UUID is defined. */
2899 	ns1.uuid = NULL;
2900 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2901 
2902 	/* Only UUID are defined and matched. */
2903 	ns1.uuid = &uuid2;
2904 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2905 
2906 	/* All EUI64, NGUID, and UUID are defined and matched. */
2907 	nsdata1.eui64 = 0x123456789ABCDEF;
2908 	nsdata2.eui64 = 0x123456789ABCDEF;
2909 	nsdata1.nguid[15] = 0x34;
2910 	nsdata2.nguid[15] = 0x34;
2911 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2912 
2913 	/* CSI are not matched. */
2914 	ns1.csi = SPDK_NVME_CSI_ZNS;
2915 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2916 }
2917 
2918 static void
2919 test_init_ana_log_page(void)
2920 {
2921 	struct spdk_nvme_transport_id trid = {};
2922 	struct spdk_nvme_ctrlr *ctrlr;
2923 	struct nvme_ctrlr *nvme_ctrlr;
2924 	const int STRING_SIZE = 32;
2925 	const char *attached_names[STRING_SIZE];
2926 	int rc;
2927 
2928 	set_thread(0);
2929 
2930 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2931 	ut_init_trid(&trid);
2932 
2933 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
2934 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2935 
2936 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2937 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2938 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2939 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
2940 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2941 
2942 	g_ut_attach_ctrlr_status = 0;
2943 	g_ut_attach_bdev_count = 5;
2944 
2945 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2946 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2947 	CU_ASSERT(rc == 0);
2948 
2949 	spdk_delay_us(1000);
2950 	poll_threads();
2951 
2952 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2953 	poll_threads();
2954 
2955 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2956 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2957 
2958 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2959 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2960 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2961 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2962 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
2963 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
2964 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2965 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2966 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
2967 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2968 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
2969 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
2970 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
2971 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
2972 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
2973 
2974 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2975 	CU_ASSERT(rc == 0);
2976 
2977 	poll_threads();
2978 	spdk_delay_us(1000);
2979 	poll_threads();
2980 
2981 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2982 }
2983 
2984 static void
2985 init_accel(void)
2986 {
2987 	spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb,
2988 				sizeof(int), "accel_p");
2989 }
2990 
2991 static void
2992 fini_accel(void)
2993 {
2994 	spdk_io_device_unregister(g_accel_p, NULL);
2995 }
2996 
2997 static void
2998 test_get_memory_domains(void)
2999 {
3000 	struct nvme_ctrlr ctrlr = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
3001 	struct nvme_ns ns = { .ctrlr = &ctrlr };
3002 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
3003 	struct spdk_memory_domain *domains[2] = {};
3004 	int rc = 0;
3005 
3006 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns, tailq);
3007 
3008 	/* nvme controller doesn't have memory domainы */
3009 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 0);
3010 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3011 	CU_ASSERT(rc == 0)
3012 
3013 	/* nvme controller has a memory domain */
3014 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 1);
3015 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3016 	CU_ASSERT(rc == 1);
3017 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domain);
3018 }
3019 
3020 static void
3021 test_reconnect_qpair(void)
3022 {
3023 	struct spdk_nvme_transport_id trid = {};
3024 	struct spdk_nvme_ctrlr *ctrlr;
3025 	struct nvme_ctrlr *nvme_ctrlr;
3026 	const int STRING_SIZE = 32;
3027 	const char *attached_names[STRING_SIZE];
3028 	struct nvme_bdev *bdev;
3029 	struct spdk_io_channel *ch1, *ch2;
3030 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3031 	struct nvme_io_path *io_path1, *io_path2;
3032 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
3033 	int rc;
3034 
3035 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3036 	ut_init_trid(&trid);
3037 
3038 	set_thread(0);
3039 
3040 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
3041 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3042 
3043 	g_ut_attach_ctrlr_status = 0;
3044 	g_ut_attach_bdev_count = 1;
3045 
3046 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3047 			      attach_ctrlr_done, NULL, NULL, NULL, false);
3048 	CU_ASSERT(rc == 0);
3049 
3050 	spdk_delay_us(1000);
3051 	poll_threads();
3052 
3053 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3054 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3055 
3056 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
3057 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3058 
3059 	ch1 = spdk_get_io_channel(bdev);
3060 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3061 
3062 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3063 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
3064 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3065 	nvme_qpair1 = io_path1->qpair;
3066 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
3067 
3068 	set_thread(1);
3069 
3070 	ch2 = spdk_get_io_channel(bdev);
3071 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3072 
3073 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3074 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
3075 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3076 	nvme_qpair2 = io_path2->qpair;
3077 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
3078 
3079 	/* If a qpair is disconnected, it is freed and then reconnected via
3080 	 * resetting the corresponding nvme_ctrlr.
3081 	 */
3082 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3083 	ctrlr->is_failed = true;
3084 
3085 	poll_thread_times(1, 3);
3086 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3087 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3088 	CU_ASSERT(nvme_ctrlr->resetting == true);
3089 
3090 	poll_thread_times(0, 3);
3091 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3092 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3093 	CU_ASSERT(ctrlr->is_failed == true);
3094 
3095 	poll_thread_times(1, 2);
3096 	poll_thread_times(0, 1);
3097 	CU_ASSERT(ctrlr->is_failed == false);
3098 	CU_ASSERT(ctrlr->adminq.is_connected == false);
3099 
3100 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3101 	poll_thread_times(0, 2);
3102 	CU_ASSERT(ctrlr->adminq.is_connected == true);
3103 
3104 	poll_thread_times(0, 1);
3105 	poll_thread_times(1, 1);
3106 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3107 	CU_ASSERT(nvme_qpair2->qpair != NULL);
3108 	CU_ASSERT(nvme_ctrlr->resetting == true);
3109 
3110 	poll_thread_times(0, 2);
3111 	poll_thread_times(1, 1);
3112 	poll_thread_times(0, 1);
3113 	CU_ASSERT(nvme_ctrlr->resetting == false);
3114 
3115 	poll_threads();
3116 
3117 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
3118 	 * fails, the qpair is just freed.
3119 	 */
3120 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3121 	ctrlr->is_failed = true;
3122 	ctrlr->fail_reset = true;
3123 
3124 	poll_thread_times(1, 3);
3125 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3126 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3127 	CU_ASSERT(nvme_ctrlr->resetting == true);
3128 
3129 	poll_thread_times(0, 3);
3130 	poll_thread_times(1, 1);
3131 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3132 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3133 	CU_ASSERT(ctrlr->is_failed == true);
3134 
3135 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3136 	poll_thread_times(0, 3);
3137 	poll_thread_times(1, 1);
3138 	poll_thread_times(0, 1);
3139 	CU_ASSERT(ctrlr->is_failed == true);
3140 	CU_ASSERT(nvme_ctrlr->resetting == false);
3141 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3142 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3143 
3144 	poll_threads();
3145 
3146 	spdk_put_io_channel(ch2);
3147 
3148 	set_thread(0);
3149 
3150 	spdk_put_io_channel(ch1);
3151 
3152 	poll_threads();
3153 
3154 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3155 	CU_ASSERT(rc == 0);
3156 
3157 	poll_threads();
3158 	spdk_delay_us(1000);
3159 	poll_threads();
3160 
3161 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3162 }
3163 
3164 static void
3165 test_create_bdev_ctrlr(void)
3166 {
3167 	struct nvme_path_id path1 = {}, path2 = {};
3168 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3169 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3170 	const int STRING_SIZE = 32;
3171 	const char *attached_names[STRING_SIZE];
3172 	int rc;
3173 
3174 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3175 	ut_init_trid(&path1.trid);
3176 	ut_init_trid2(&path2.trid);
3177 
3178 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3179 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3180 
3181 	g_ut_attach_ctrlr_status = 0;
3182 	g_ut_attach_bdev_count = 0;
3183 
3184 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3185 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3186 	CU_ASSERT(rc == 0);
3187 
3188 	spdk_delay_us(1000);
3189 	poll_threads();
3190 
3191 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3192 	poll_threads();
3193 
3194 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3195 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3196 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3197 
3198 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3199 	g_ut_attach_ctrlr_status = -EINVAL;
3200 
3201 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3202 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3203 
3204 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3205 
3206 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3207 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3208 	CU_ASSERT(rc == 0);
3209 
3210 	spdk_delay_us(1000);
3211 	poll_threads();
3212 
3213 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3214 	poll_threads();
3215 
3216 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3217 
3218 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3219 	g_ut_attach_ctrlr_status = 0;
3220 
3221 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3222 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3223 
3224 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3225 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3226 	CU_ASSERT(rc == 0);
3227 
3228 	spdk_delay_us(1000);
3229 	poll_threads();
3230 
3231 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3232 	poll_threads();
3233 
3234 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3235 
3236 	/* Delete two ctrlrs at once. */
3237 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3238 	CU_ASSERT(rc == 0);
3239 
3240 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3241 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3242 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3243 
3244 	poll_threads();
3245 	spdk_delay_us(1000);
3246 	poll_threads();
3247 
3248 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3249 
3250 	/* Add two ctrlrs and delete one by one. */
3251 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3252 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3253 
3254 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3255 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3256 
3257 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3258 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3259 	CU_ASSERT(rc == 0);
3260 
3261 	spdk_delay_us(1000);
3262 	poll_threads();
3263 
3264 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3265 	poll_threads();
3266 
3267 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3268 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3269 	CU_ASSERT(rc == 0);
3270 
3271 	spdk_delay_us(1000);
3272 	poll_threads();
3273 
3274 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3275 	poll_threads();
3276 
3277 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3278 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3279 
3280 	rc = bdev_nvme_delete("nvme0", &path1);
3281 	CU_ASSERT(rc == 0);
3282 
3283 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3284 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3285 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3286 
3287 	poll_threads();
3288 	spdk_delay_us(1000);
3289 	poll_threads();
3290 
3291 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3292 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3293 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3294 
3295 	rc = bdev_nvme_delete("nvme0", &path2);
3296 	CU_ASSERT(rc == 0);
3297 
3298 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3299 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3300 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3301 
3302 	poll_threads();
3303 	spdk_delay_us(1000);
3304 	poll_threads();
3305 
3306 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3307 }
3308 
3309 static struct nvme_ns *
3310 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3311 {
3312 	struct nvme_ns *nvme_ns;
3313 
3314 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3315 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3316 			return nvme_ns;
3317 		}
3318 	}
3319 
3320 	return NULL;
3321 }
3322 
3323 static void
3324 test_add_multi_ns_to_bdev(void)
3325 {
3326 	struct nvme_path_id path1 = {}, path2 = {};
3327 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3328 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3329 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3330 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3331 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3332 	const int STRING_SIZE = 32;
3333 	const char *attached_names[STRING_SIZE];
3334 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3335 	struct spdk_uuid uuid2 = { .u.raw = { 0x2 } };
3336 	struct spdk_uuid uuid3 = { .u.raw = { 0x3 } };
3337 	struct spdk_uuid uuid4 = { .u.raw = { 0x4 } };
3338 	struct spdk_uuid uuid44 = { .u.raw = { 0x44 } };
3339 	int rc;
3340 
3341 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3342 	ut_init_trid(&path1.trid);
3343 	ut_init_trid2(&path2.trid);
3344 
3345 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3346 
3347 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3348 	 * namespaces are populated.
3349 	 */
3350 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3351 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3352 
3353 	ctrlr1->ns[1].is_active = false;
3354 	ctrlr1->ns[4].is_active = false;
3355 	ctrlr1->ns[0].uuid = &uuid1;
3356 	ctrlr1->ns[2].uuid = &uuid3;
3357 	ctrlr1->ns[3].uuid = &uuid4;
3358 
3359 	g_ut_attach_ctrlr_status = 0;
3360 	g_ut_attach_bdev_count = 3;
3361 
3362 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3363 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3364 	CU_ASSERT(rc == 0);
3365 
3366 	spdk_delay_us(1000);
3367 	poll_threads();
3368 
3369 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3370 	poll_threads();
3371 
3372 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3373 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3374 	 * adding 4th namespace to a bdev should fail.
3375 	 */
3376 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3377 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3378 
3379 	ctrlr2->ns[2].is_active = false;
3380 	ctrlr2->ns[4].is_active = false;
3381 	ctrlr2->ns[0].uuid = &uuid1;
3382 	ctrlr2->ns[1].uuid = &uuid2;
3383 	ctrlr2->ns[3].uuid = &uuid44;
3384 
3385 	g_ut_attach_ctrlr_status = 0;
3386 	g_ut_attach_bdev_count = 2;
3387 
3388 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3389 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3390 	CU_ASSERT(rc == 0);
3391 
3392 	spdk_delay_us(1000);
3393 	poll_threads();
3394 
3395 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3396 	poll_threads();
3397 
3398 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3399 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3400 
3401 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3402 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3403 
3404 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3405 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3406 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3407 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3408 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3409 
3410 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3411 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3412 
3413 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3414 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3415 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3416 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3417 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3418 
3419 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3420 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3421 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3422 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3423 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3424 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3425 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3426 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3427 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3428 
3429 	CU_ASSERT(bdev1->ref == 2);
3430 	CU_ASSERT(bdev2->ref == 1);
3431 	CU_ASSERT(bdev3->ref == 1);
3432 	CU_ASSERT(bdev4->ref == 1);
3433 
3434 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3435 	rc = bdev_nvme_delete("nvme0", &path1);
3436 	CU_ASSERT(rc == 0);
3437 
3438 	poll_threads();
3439 	spdk_delay_us(1000);
3440 	poll_threads();
3441 
3442 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3443 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3444 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2);
3445 
3446 	rc = bdev_nvme_delete("nvme0", &path2);
3447 	CU_ASSERT(rc == 0);
3448 
3449 	poll_threads();
3450 	spdk_delay_us(1000);
3451 	poll_threads();
3452 
3453 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3454 
3455 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3456 	 * can be deleted when the bdev subsystem shutdown.
3457 	 */
3458 	g_ut_attach_bdev_count = 1;
3459 
3460 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3461 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3462 
3463 	ctrlr1->ns[0].uuid = &uuid1;
3464 
3465 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3466 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3467 	CU_ASSERT(rc == 0);
3468 
3469 	spdk_delay_us(1000);
3470 	poll_threads();
3471 
3472 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3473 	poll_threads();
3474 
3475 	ut_init_trid2(&path2.trid);
3476 
3477 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3478 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3479 
3480 	ctrlr2->ns[0].uuid = &uuid1;
3481 
3482 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3483 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3484 	CU_ASSERT(rc == 0);
3485 
3486 	spdk_delay_us(1000);
3487 	poll_threads();
3488 
3489 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3490 	poll_threads();
3491 
3492 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3493 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3494 
3495 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3496 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3497 
3498 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3499 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3500 
3501 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3502 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3503 
3504 	/* Check if a nvme_bdev has two nvme_ns. */
3505 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3506 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3507 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3508 
3509 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3510 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3511 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3512 
3513 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3514 	bdev_nvme_destruct(&bdev1->disk);
3515 
3516 	poll_threads();
3517 
3518 	CU_ASSERT(nvme_ns1->bdev == NULL);
3519 	CU_ASSERT(nvme_ns2->bdev == NULL);
3520 
3521 	nvme_ctrlr1->destruct = true;
3522 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3523 
3524 	poll_threads();
3525 	spdk_delay_us(1000);
3526 	poll_threads();
3527 
3528 	nvme_ctrlr2->destruct = true;
3529 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3530 
3531 	poll_threads();
3532 	spdk_delay_us(1000);
3533 	poll_threads();
3534 
3535 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3536 }
3537 
3538 static void
3539 test_add_multi_io_paths_to_nbdev_ch(void)
3540 {
3541 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3542 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3543 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3544 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3545 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3546 	const int STRING_SIZE = 32;
3547 	const char *attached_names[STRING_SIZE];
3548 	struct nvme_bdev *bdev;
3549 	struct spdk_io_channel *ch;
3550 	struct nvme_bdev_channel *nbdev_ch;
3551 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3552 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3553 	int rc;
3554 
3555 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3556 	ut_init_trid(&path1.trid);
3557 	ut_init_trid2(&path2.trid);
3558 	ut_init_trid3(&path3.trid);
3559 	g_ut_attach_ctrlr_status = 0;
3560 	g_ut_attach_bdev_count = 1;
3561 
3562 	set_thread(1);
3563 
3564 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3565 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3566 
3567 	ctrlr1->ns[0].uuid = &uuid1;
3568 
3569 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3570 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3571 	CU_ASSERT(rc == 0);
3572 
3573 	spdk_delay_us(1000);
3574 	poll_threads();
3575 
3576 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3577 	poll_threads();
3578 
3579 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3580 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3581 
3582 	ctrlr2->ns[0].uuid = &uuid1;
3583 
3584 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3585 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3586 	CU_ASSERT(rc == 0);
3587 
3588 	spdk_delay_us(1000);
3589 	poll_threads();
3590 
3591 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3592 	poll_threads();
3593 
3594 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3595 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3596 
3597 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3598 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3599 
3600 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3601 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3602 
3603 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3604 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3605 
3606 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3607 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3608 
3609 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3610 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3611 
3612 	set_thread(0);
3613 
3614 	ch = spdk_get_io_channel(bdev);
3615 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3616 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3617 
3618 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3619 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3620 
3621 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3622 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3623 
3624 	set_thread(1);
3625 
3626 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3627 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3628 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3629 
3630 	ctrlr3->ns[0].uuid = &uuid1;
3631 
3632 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
3633 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3634 	CU_ASSERT(rc == 0);
3635 
3636 	spdk_delay_us(1000);
3637 	poll_threads();
3638 
3639 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3640 	poll_threads();
3641 
3642 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid);
3643 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3644 
3645 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3646 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3647 
3648 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3649 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3650 
3651 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3652 	rc = bdev_nvme_delete("nvme0", &path2);
3653 	CU_ASSERT(rc == 0);
3654 
3655 	poll_threads();
3656 	spdk_delay_us(1000);
3657 	poll_threads();
3658 
3659 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1);
3660 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3661 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3);
3662 
3663 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3664 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3665 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3666 
3667 	set_thread(0);
3668 
3669 	spdk_put_io_channel(ch);
3670 
3671 	poll_threads();
3672 
3673 	set_thread(1);
3674 
3675 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3676 	CU_ASSERT(rc == 0);
3677 
3678 	poll_threads();
3679 	spdk_delay_us(1000);
3680 	poll_threads();
3681 
3682 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3683 }
3684 
3685 static void
3686 test_admin_path(void)
3687 {
3688 	struct nvme_path_id path1 = {}, path2 = {};
3689 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3690 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3691 	const int STRING_SIZE = 32;
3692 	const char *attached_names[STRING_SIZE];
3693 	struct nvme_bdev *bdev;
3694 	struct spdk_io_channel *ch;
3695 	struct spdk_bdev_io *bdev_io;
3696 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3697 	int rc;
3698 
3699 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3700 	ut_init_trid(&path1.trid);
3701 	ut_init_trid2(&path2.trid);
3702 	g_ut_attach_ctrlr_status = 0;
3703 	g_ut_attach_bdev_count = 1;
3704 
3705 	set_thread(0);
3706 
3707 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3708 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3709 
3710 	ctrlr1->ns[0].uuid = &uuid1;
3711 
3712 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3713 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3714 	CU_ASSERT(rc == 0);
3715 
3716 	spdk_delay_us(1000);
3717 	poll_threads();
3718 
3719 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3720 	poll_threads();
3721 
3722 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3723 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3724 
3725 	ctrlr2->ns[0].uuid = &uuid1;
3726 
3727 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3728 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3729 	CU_ASSERT(rc == 0);
3730 
3731 	spdk_delay_us(1000);
3732 	poll_threads();
3733 
3734 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3735 	poll_threads();
3736 
3737 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3738 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3739 
3740 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3741 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3742 
3743 	ch = spdk_get_io_channel(bdev);
3744 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3745 
3746 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
3747 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
3748 
3749 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
3750 	 * submitted to ctrlr2.
3751 	 */
3752 	ctrlr1->is_failed = true;
3753 	bdev_io->internal.in_submit_request = true;
3754 
3755 	bdev_nvme_submit_request(ch, bdev_io);
3756 
3757 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3758 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
3759 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
3760 
3761 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3762 	poll_threads();
3763 
3764 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3765 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3766 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3767 
3768 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
3769 	ctrlr2->is_failed = true;
3770 	bdev_io->internal.in_submit_request = true;
3771 
3772 	bdev_nvme_submit_request(ch, bdev_io);
3773 
3774 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3775 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3776 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3777 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
3778 
3779 	free(bdev_io);
3780 
3781 	spdk_put_io_channel(ch);
3782 
3783 	poll_threads();
3784 
3785 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3786 	CU_ASSERT(rc == 0);
3787 
3788 	poll_threads();
3789 	spdk_delay_us(1000);
3790 	poll_threads();
3791 
3792 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3793 }
3794 
3795 static struct nvme_io_path *
3796 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
3797 			struct nvme_ctrlr *nvme_ctrlr)
3798 {
3799 	struct nvme_io_path *io_path;
3800 
3801 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
3802 		if (io_path->qpair->ctrlr == nvme_ctrlr) {
3803 			return io_path;
3804 		}
3805 	}
3806 
3807 	return NULL;
3808 }
3809 
3810 static void
3811 test_reset_bdev_ctrlr(void)
3812 {
3813 	struct nvme_path_id path1 = {}, path2 = {};
3814 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3815 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3816 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3817 	struct nvme_path_id *curr_path1, *curr_path2;
3818 	const int STRING_SIZE = 32;
3819 	const char *attached_names[STRING_SIZE];
3820 	struct nvme_bdev *bdev;
3821 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
3822 	struct nvme_bdev_io *first_bio;
3823 	struct spdk_io_channel *ch1, *ch2;
3824 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3825 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
3826 	int rc;
3827 
3828 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3829 	ut_init_trid(&path1.trid);
3830 	ut_init_trid2(&path2.trid);
3831 	g_ut_attach_ctrlr_status = 0;
3832 	g_ut_attach_bdev_count = 1;
3833 
3834 	set_thread(0);
3835 
3836 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3837 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3838 
3839 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3840 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3841 	CU_ASSERT(rc == 0);
3842 
3843 	spdk_delay_us(1000);
3844 	poll_threads();
3845 
3846 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3847 	poll_threads();
3848 
3849 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3850 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3851 
3852 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3853 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3854 	CU_ASSERT(rc == 0);
3855 
3856 	spdk_delay_us(1000);
3857 	poll_threads();
3858 
3859 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3860 	poll_threads();
3861 
3862 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3863 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3864 
3865 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3866 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3867 
3868 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
3869 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
3870 
3871 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3872 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3873 
3874 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
3875 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
3876 
3877 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3878 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3879 
3880 	set_thread(0);
3881 
3882 	ch1 = spdk_get_io_channel(bdev);
3883 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3884 
3885 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3886 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
3887 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
3888 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
3889 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
3890 
3891 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
3892 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
3893 
3894 	set_thread(1);
3895 
3896 	ch2 = spdk_get_io_channel(bdev);
3897 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3898 
3899 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3900 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
3901 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
3902 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
3903 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
3904 
3905 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
3906 
3907 	/* The first reset request from bdev_io is submitted on thread 0.
3908 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
3909 	 *
3910 	 * A few extra polls are necessary after resetting ctrlr1 to check
3911 	 * pending reset requests for ctrlr1.
3912 	 */
3913 	ctrlr1->is_failed = true;
3914 	curr_path1->is_failed = true;
3915 	ctrlr2->is_failed = true;
3916 	curr_path2->is_failed = true;
3917 
3918 	set_thread(0);
3919 
3920 	bdev_nvme_submit_request(ch1, first_bdev_io);
3921 	CU_ASSERT(first_bio->io_path == io_path11);
3922 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3923 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
3924 
3925 	poll_thread_times(0, 3);
3926 	CU_ASSERT(io_path11->qpair->qpair == NULL);
3927 	CU_ASSERT(io_path21->qpair->qpair != NULL);
3928 
3929 	poll_thread_times(1, 2);
3930 	CU_ASSERT(io_path11->qpair->qpair == NULL);
3931 	CU_ASSERT(io_path21->qpair->qpair == NULL);
3932 	CU_ASSERT(ctrlr1->is_failed == true);
3933 
3934 	poll_thread_times(0, 1);
3935 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3936 	CU_ASSERT(ctrlr1->is_failed == false);
3937 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
3938 	CU_ASSERT(curr_path1->is_failed == true);
3939 
3940 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3941 	poll_thread_times(0, 2);
3942 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
3943 
3944 	poll_thread_times(0, 1);
3945 	CU_ASSERT(io_path11->qpair->qpair != NULL);
3946 	CU_ASSERT(io_path21->qpair->qpair == NULL);
3947 
3948 	poll_thread_times(1, 1);
3949 	CU_ASSERT(io_path11->qpair->qpair != NULL);
3950 	CU_ASSERT(io_path21->qpair->qpair != NULL);
3951 
3952 	poll_thread_times(0, 2);
3953 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3954 	poll_thread_times(1, 1);
3955 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3956 	poll_thread_times(0, 2);
3957 	CU_ASSERT(nvme_ctrlr1->resetting == false);
3958 	CU_ASSERT(curr_path1->is_failed == false);
3959 	CU_ASSERT(first_bio->io_path == io_path12);
3960 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3961 
3962 	poll_thread_times(0, 3);
3963 	CU_ASSERT(io_path12->qpair->qpair == NULL);
3964 	CU_ASSERT(io_path22->qpair->qpair != NULL);
3965 
3966 	poll_thread_times(1, 2);
3967 	CU_ASSERT(io_path12->qpair->qpair == NULL);
3968 	CU_ASSERT(io_path22->qpair->qpair == NULL);
3969 	CU_ASSERT(ctrlr2->is_failed == true);
3970 
3971 	poll_thread_times(0, 2);
3972 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3973 	CU_ASSERT(ctrlr2->is_failed == false);
3974 	CU_ASSERT(ctrlr2->adminq.is_connected == false);
3975 	CU_ASSERT(curr_path2->is_failed == true);
3976 
3977 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3978 	poll_thread_times(0, 2);
3979 	CU_ASSERT(ctrlr2->adminq.is_connected == true);
3980 
3981 	poll_thread_times(0, 1);
3982 	CU_ASSERT(io_path12->qpair->qpair != NULL);
3983 	CU_ASSERT(io_path22->qpair->qpair == NULL);
3984 
3985 	poll_thread_times(1, 2);
3986 	CU_ASSERT(io_path12->qpair->qpair != NULL);
3987 	CU_ASSERT(io_path22->qpair->qpair != NULL);
3988 
3989 	poll_thread_times(0, 2);
3990 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3991 	poll_thread_times(1, 1);
3992 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3993 	poll_thread_times(0, 2);
3994 	CU_ASSERT(first_bio->io_path == NULL);
3995 	CU_ASSERT(nvme_ctrlr2->resetting == false);
3996 	CU_ASSERT(curr_path2->is_failed == false);
3997 
3998 	poll_threads();
3999 
4000 	/* There is a race between two reset requests from bdev_io.
4001 	 *
4002 	 * The first reset request is submitted on thread 0, and the second reset
4003 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
4004 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
4005 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
4006 	 * The second is pending on ctrlr2 again. After the first completes resetting
4007 	 * ctrl2, both complete successfully.
4008 	 */
4009 	ctrlr1->is_failed = true;
4010 	curr_path1->is_failed = true;
4011 	ctrlr2->is_failed = true;
4012 	curr_path2->is_failed = true;
4013 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4014 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4015 
4016 	set_thread(0);
4017 
4018 	bdev_nvme_submit_request(ch1, first_bdev_io);
4019 
4020 	set_thread(1);
4021 
4022 	bdev_nvme_submit_request(ch2, second_bdev_io);
4023 
4024 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4025 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
4026 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io);
4027 
4028 	poll_threads();
4029 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4030 	poll_threads();
4031 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4032 	poll_threads();
4033 
4034 	CU_ASSERT(ctrlr1->is_failed == false);
4035 	CU_ASSERT(curr_path1->is_failed == false);
4036 	CU_ASSERT(ctrlr2->is_failed == false);
4037 	CU_ASSERT(curr_path2->is_failed == false);
4038 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4039 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4040 
4041 	set_thread(0);
4042 
4043 	spdk_put_io_channel(ch1);
4044 
4045 	set_thread(1);
4046 
4047 	spdk_put_io_channel(ch2);
4048 
4049 	poll_threads();
4050 
4051 	set_thread(0);
4052 
4053 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4054 	CU_ASSERT(rc == 0);
4055 
4056 	poll_threads();
4057 	spdk_delay_us(1000);
4058 	poll_threads();
4059 
4060 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4061 
4062 	free(first_bdev_io);
4063 	free(second_bdev_io);
4064 }
4065 
4066 static void
4067 test_find_io_path(void)
4068 {
4069 	struct nvme_bdev_channel nbdev_ch = {
4070 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
4071 	};
4072 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {};
4073 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
4074 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
4075 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
4076 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
4077 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
4078 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
4079 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
4080 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
4081 
4082 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
4083 
4084 	/* Test if io_path whose ANA state is not accessible is excluded. */
4085 
4086 	nvme_qpair1.qpair = &qpair1;
4087 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4088 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4089 
4090 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
4091 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4092 
4093 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
4094 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4095 
4096 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4097 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4098 
4099 	nbdev_ch.current_io_path = NULL;
4100 
4101 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4102 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4103 
4104 	nbdev_ch.current_io_path = NULL;
4105 
4106 	/* Test if io_path whose qpair is resetting is excluded. */
4107 
4108 	nvme_qpair1.qpair = NULL;
4109 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4110 
4111 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
4112 
4113 	/* Test if ANA optimized state or the first found ANA non-optimized state
4114 	 * is prioritized.
4115 	 */
4116 
4117 	nvme_qpair1.qpair = &qpair1;
4118 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4119 	nvme_qpair2.qpair = &qpair2;
4120 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4121 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
4122 
4123 	nbdev_ch.current_io_path = NULL;
4124 
4125 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4126 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4127 
4128 	nbdev_ch.current_io_path = NULL;
4129 }
4130 
4131 static void
4132 test_retry_io_if_ana_state_is_updating(void)
4133 {
4134 	struct nvme_path_id path = {};
4135 	struct nvme_ctrlr_opts opts = {};
4136 	struct spdk_nvme_ctrlr *ctrlr;
4137 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4138 	struct nvme_ctrlr *nvme_ctrlr;
4139 	const int STRING_SIZE = 32;
4140 	const char *attached_names[STRING_SIZE];
4141 	struct nvme_bdev *bdev;
4142 	struct nvme_ns *nvme_ns;
4143 	struct spdk_bdev_io *bdev_io1;
4144 	struct spdk_io_channel *ch;
4145 	struct nvme_bdev_channel *nbdev_ch;
4146 	struct nvme_io_path *io_path;
4147 	struct nvme_qpair *nvme_qpair;
4148 	int rc;
4149 
4150 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4151 	ut_init_trid(&path.trid);
4152 
4153 	set_thread(0);
4154 
4155 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4156 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4157 
4158 	g_ut_attach_ctrlr_status = 0;
4159 	g_ut_attach_bdev_count = 1;
4160 
4161 	opts.ctrlr_loss_timeout_sec = -1;
4162 	opts.reconnect_delay_sec = 1;
4163 
4164 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4165 			      attach_ctrlr_done, NULL, NULL, &opts, false);
4166 	CU_ASSERT(rc == 0);
4167 
4168 	spdk_delay_us(1000);
4169 	poll_threads();
4170 
4171 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4172 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4173 
4174 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4175 	CU_ASSERT(nvme_ctrlr != NULL);
4176 
4177 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4178 	CU_ASSERT(bdev != NULL);
4179 
4180 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4181 	CU_ASSERT(nvme_ns != NULL);
4182 
4183 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4184 	ut_bdev_io_set_buf(bdev_io1);
4185 
4186 	ch = spdk_get_io_channel(bdev);
4187 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4188 
4189 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4190 
4191 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4192 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4193 
4194 	nvme_qpair = io_path->qpair;
4195 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4196 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4197 
4198 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
4199 
4200 	/* If qpair is connected, I/O should succeed. */
4201 	bdev_io1->internal.in_submit_request = true;
4202 
4203 	bdev_nvme_submit_request(ch, bdev_io1);
4204 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4205 
4206 	poll_threads();
4207 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4208 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
4209 
4210 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
4211 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4212 	nbdev_ch->current_io_path = NULL;
4213 
4214 	bdev_io1->internal.in_submit_request = true;
4215 
4216 	bdev_nvme_submit_request(ch, bdev_io1);
4217 
4218 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4219 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4220 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4221 
4222 	/* ANA state became accessible while I/O was queued. */
4223 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4224 
4225 	spdk_delay_us(1000000);
4226 
4227 	poll_thread_times(0, 1);
4228 
4229 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4230 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4231 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4232 
4233 	poll_threads();
4234 
4235 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4236 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4237 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4238 
4239 	free(bdev_io1);
4240 
4241 	spdk_put_io_channel(ch);
4242 
4243 	poll_threads();
4244 
4245 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4246 	CU_ASSERT(rc == 0);
4247 
4248 	poll_threads();
4249 	spdk_delay_us(1000);
4250 	poll_threads();
4251 
4252 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4253 }
4254 
4255 static void
4256 test_retry_io_for_io_path_error(void)
4257 {
4258 	struct nvme_path_id path1 = {}, path2 = {};
4259 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4260 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4261 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4262 	const int STRING_SIZE = 32;
4263 	const char *attached_names[STRING_SIZE];
4264 	struct nvme_bdev *bdev;
4265 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4266 	struct spdk_bdev_io *bdev_io;
4267 	struct nvme_bdev_io *bio;
4268 	struct spdk_io_channel *ch;
4269 	struct nvme_bdev_channel *nbdev_ch;
4270 	struct nvme_io_path *io_path1, *io_path2;
4271 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
4272 	struct ut_nvme_req *req;
4273 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4274 	int rc;
4275 
4276 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4277 	ut_init_trid(&path1.trid);
4278 	ut_init_trid2(&path2.trid);
4279 
4280 	g_opts.bdev_retry_count = 1;
4281 
4282 	set_thread(0);
4283 
4284 	g_ut_attach_ctrlr_status = 0;
4285 	g_ut_attach_bdev_count = 1;
4286 
4287 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4288 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4289 
4290 	ctrlr1->ns[0].uuid = &uuid1;
4291 
4292 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4293 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4294 	CU_ASSERT(rc == 0);
4295 
4296 	spdk_delay_us(1000);
4297 	poll_threads();
4298 
4299 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4300 	poll_threads();
4301 
4302 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4303 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4304 
4305 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4306 	CU_ASSERT(nvme_ctrlr1 != NULL);
4307 
4308 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4309 	CU_ASSERT(bdev != NULL);
4310 
4311 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4312 	CU_ASSERT(nvme_ns1 != NULL);
4313 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4314 
4315 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4316 	ut_bdev_io_set_buf(bdev_io);
4317 
4318 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4319 
4320 	ch = spdk_get_io_channel(bdev);
4321 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4322 
4323 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4324 
4325 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4326 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4327 
4328 	nvme_qpair1 = io_path1->qpair;
4329 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
4330 	SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL);
4331 
4332 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4333 
4334 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4335 	bdev_io->internal.in_submit_request = true;
4336 
4337 	bdev_nvme_submit_request(ch, bdev_io);
4338 
4339 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4340 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4341 
4342 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4343 	SPDK_CU_ASSERT_FATAL(req != NULL);
4344 
4345 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4346 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4347 	req->cpl.status.dnr = 1;
4348 
4349 	poll_thread_times(0, 1);
4350 
4351 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4352 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4353 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4354 
4355 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4356 	bdev_io->internal.in_submit_request = true;
4357 
4358 	bdev_nvme_submit_request(ch, bdev_io);
4359 
4360 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4361 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4362 
4363 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4364 	SPDK_CU_ASSERT_FATAL(req != NULL);
4365 
4366 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4367 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4368 
4369 	poll_thread_times(0, 1);
4370 
4371 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4372 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4373 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4374 
4375 	poll_threads();
4376 
4377 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4378 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4379 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4380 
4381 	/* Add io_path2 dynamically, and create a multipath configuration. */
4382 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4383 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4384 
4385 	ctrlr2->ns[0].uuid = &uuid1;
4386 
4387 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4388 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4389 	CU_ASSERT(rc == 0);
4390 
4391 	spdk_delay_us(1000);
4392 	poll_threads();
4393 
4394 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4395 	poll_threads();
4396 
4397 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4398 	CU_ASSERT(nvme_ctrlr2 != NULL);
4399 
4400 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4401 	CU_ASSERT(nvme_ns2 != NULL);
4402 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4403 
4404 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4405 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4406 
4407 	nvme_qpair2 = io_path2->qpair;
4408 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
4409 	SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL);
4410 
4411 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4412 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4413 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4414 	 */
4415 	bdev_io->internal.in_submit_request = true;
4416 
4417 	bdev_nvme_submit_request(ch, bdev_io);
4418 
4419 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4420 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4421 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4422 
4423 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4424 	SPDK_CU_ASSERT_FATAL(req != NULL);
4425 
4426 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4427 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4428 
4429 	poll_thread_times(0, 1);
4430 
4431 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4432 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4433 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4434 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4435 
4436 	spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair);
4437 	nvme_qpair1->qpair = NULL;
4438 
4439 	poll_threads();
4440 
4441 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4442 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4443 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4444 
4445 	free(bdev_io);
4446 
4447 	spdk_put_io_channel(ch);
4448 
4449 	poll_threads();
4450 
4451 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4452 	CU_ASSERT(rc == 0);
4453 
4454 	poll_threads();
4455 	spdk_delay_us(1000);
4456 	poll_threads();
4457 
4458 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4459 
4460 	g_opts.bdev_retry_count = 0;
4461 }
4462 
4463 static void
4464 test_retry_io_count(void)
4465 {
4466 	struct nvme_path_id path = {};
4467 	struct spdk_nvme_ctrlr *ctrlr;
4468 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4469 	struct nvme_ctrlr *nvme_ctrlr;
4470 	const int STRING_SIZE = 32;
4471 	const char *attached_names[STRING_SIZE];
4472 	struct nvme_bdev *bdev;
4473 	struct nvme_ns *nvme_ns;
4474 	struct spdk_bdev_io *bdev_io;
4475 	struct nvme_bdev_io *bio;
4476 	struct spdk_io_channel *ch;
4477 	struct nvme_bdev_channel *nbdev_ch;
4478 	struct nvme_io_path *io_path;
4479 	struct nvme_qpair *nvme_qpair;
4480 	struct ut_nvme_req *req;
4481 	int rc;
4482 
4483 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4484 	ut_init_trid(&path.trid);
4485 
4486 	set_thread(0);
4487 
4488 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4489 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4490 
4491 	g_ut_attach_ctrlr_status = 0;
4492 	g_ut_attach_bdev_count = 1;
4493 
4494 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4495 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4496 	CU_ASSERT(rc == 0);
4497 
4498 	spdk_delay_us(1000);
4499 	poll_threads();
4500 
4501 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4502 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4503 
4504 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4505 	CU_ASSERT(nvme_ctrlr != NULL);
4506 
4507 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4508 	CU_ASSERT(bdev != NULL);
4509 
4510 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4511 	CU_ASSERT(nvme_ns != NULL);
4512 
4513 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4514 	ut_bdev_io_set_buf(bdev_io);
4515 
4516 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4517 
4518 	ch = spdk_get_io_channel(bdev);
4519 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4520 
4521 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4522 
4523 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4524 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4525 
4526 	nvme_qpair = io_path->qpair;
4527 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4528 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4529 
4530 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4531 
4532 	/* If I/O is aborted by request, it should not be retried. */
4533 	g_opts.bdev_retry_count = 1;
4534 
4535 	bdev_io->internal.in_submit_request = true;
4536 
4537 	bdev_nvme_submit_request(ch, bdev_io);
4538 
4539 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4540 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4541 
4542 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4543 	SPDK_CU_ASSERT_FATAL(req != NULL);
4544 
4545 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
4546 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4547 
4548 	poll_thread_times(0, 1);
4549 
4550 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4551 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4552 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
4553 
4554 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
4555 	 * the failed I/O should not be retried.
4556 	 */
4557 	g_opts.bdev_retry_count = 4;
4558 
4559 	bdev_io->internal.in_submit_request = true;
4560 
4561 	bdev_nvme_submit_request(ch, bdev_io);
4562 
4563 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4564 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4565 
4566 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4567 	SPDK_CU_ASSERT_FATAL(req != NULL);
4568 
4569 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4570 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4571 	bio->retry_count = 4;
4572 
4573 	poll_thread_times(0, 1);
4574 
4575 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4576 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4577 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4578 
4579 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
4580 	g_opts.bdev_retry_count = -1;
4581 
4582 	bdev_io->internal.in_submit_request = true;
4583 
4584 	bdev_nvme_submit_request(ch, bdev_io);
4585 
4586 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4587 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4588 
4589 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4590 	SPDK_CU_ASSERT_FATAL(req != NULL);
4591 
4592 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4593 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4594 	bio->retry_count = 4;
4595 
4596 	poll_thread_times(0, 1);
4597 
4598 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4599 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4600 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4601 
4602 	poll_threads();
4603 
4604 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4605 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4606 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4607 
4608 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
4609 	 * the failed I/O should be retried.
4610 	 */
4611 	g_opts.bdev_retry_count = 4;
4612 
4613 	bdev_io->internal.in_submit_request = true;
4614 
4615 	bdev_nvme_submit_request(ch, bdev_io);
4616 
4617 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4618 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4619 
4620 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4621 	SPDK_CU_ASSERT_FATAL(req != NULL);
4622 
4623 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4624 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4625 	bio->retry_count = 3;
4626 
4627 	poll_thread_times(0, 1);
4628 
4629 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4630 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4631 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4632 
4633 	poll_threads();
4634 
4635 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4636 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4637 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4638 
4639 	free(bdev_io);
4640 
4641 	spdk_put_io_channel(ch);
4642 
4643 	poll_threads();
4644 
4645 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4646 	CU_ASSERT(rc == 0);
4647 
4648 	poll_threads();
4649 	spdk_delay_us(1000);
4650 	poll_threads();
4651 
4652 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4653 
4654 	g_opts.bdev_retry_count = 0;
4655 }
4656 
4657 static void
4658 test_concurrent_read_ana_log_page(void)
4659 {
4660 	struct spdk_nvme_transport_id trid = {};
4661 	struct spdk_nvme_ctrlr *ctrlr;
4662 	struct nvme_ctrlr *nvme_ctrlr;
4663 	const int STRING_SIZE = 32;
4664 	const char *attached_names[STRING_SIZE];
4665 	int rc;
4666 
4667 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4668 	ut_init_trid(&trid);
4669 
4670 	set_thread(0);
4671 
4672 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
4673 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4674 
4675 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4676 
4677 	g_ut_attach_ctrlr_status = 0;
4678 	g_ut_attach_bdev_count = 1;
4679 
4680 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
4681 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4682 	CU_ASSERT(rc == 0);
4683 
4684 	spdk_delay_us(1000);
4685 	poll_threads();
4686 
4687 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4688 	poll_threads();
4689 
4690 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
4691 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
4692 
4693 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4694 
4695 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4696 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4697 
4698 	/* Following read request should be rejected. */
4699 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4700 
4701 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4702 
4703 	set_thread(1);
4704 
4705 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4706 
4707 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4708 
4709 	/* Reset request while reading ANA log page should not be rejected. */
4710 	rc = bdev_nvme_reset(nvme_ctrlr);
4711 	CU_ASSERT(rc == 0);
4712 
4713 	poll_threads();
4714 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4715 	poll_threads();
4716 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4717 	poll_threads();
4718 
4719 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4720 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4721 
4722 	/* Read ANA log page while resetting ctrlr should be rejected. */
4723 	rc = bdev_nvme_reset(nvme_ctrlr);
4724 	CU_ASSERT(rc == 0);
4725 
4726 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4727 
4728 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4729 
4730 	poll_threads();
4731 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4732 	poll_threads();
4733 
4734 	set_thread(0);
4735 
4736 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4737 	CU_ASSERT(rc == 0);
4738 
4739 	poll_threads();
4740 	spdk_delay_us(1000);
4741 	poll_threads();
4742 
4743 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
4744 }
4745 
4746 static void
4747 test_retry_io_for_ana_error(void)
4748 {
4749 	struct nvme_path_id path = {};
4750 	struct spdk_nvme_ctrlr *ctrlr;
4751 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4752 	struct nvme_ctrlr *nvme_ctrlr;
4753 	const int STRING_SIZE = 32;
4754 	const char *attached_names[STRING_SIZE];
4755 	struct nvme_bdev *bdev;
4756 	struct nvme_ns *nvme_ns;
4757 	struct spdk_bdev_io *bdev_io;
4758 	struct nvme_bdev_io *bio;
4759 	struct spdk_io_channel *ch;
4760 	struct nvme_bdev_channel *nbdev_ch;
4761 	struct nvme_io_path *io_path;
4762 	struct nvme_qpair *nvme_qpair;
4763 	struct ut_nvme_req *req;
4764 	uint64_t now;
4765 	int rc;
4766 
4767 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4768 	ut_init_trid(&path.trid);
4769 
4770 	g_opts.bdev_retry_count = 1;
4771 
4772 	set_thread(0);
4773 
4774 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
4775 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4776 
4777 	g_ut_attach_ctrlr_status = 0;
4778 	g_ut_attach_bdev_count = 1;
4779 
4780 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4781 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4782 	CU_ASSERT(rc == 0);
4783 
4784 	spdk_delay_us(1000);
4785 	poll_threads();
4786 
4787 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4788 	poll_threads();
4789 
4790 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4791 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4792 
4793 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4794 	CU_ASSERT(nvme_ctrlr != NULL);
4795 
4796 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4797 	CU_ASSERT(bdev != NULL);
4798 
4799 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4800 	CU_ASSERT(nvme_ns != NULL);
4801 
4802 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4803 	ut_bdev_io_set_buf(bdev_io);
4804 
4805 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4806 
4807 	ch = spdk_get_io_channel(bdev);
4808 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4809 
4810 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4811 
4812 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4813 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4814 
4815 	nvme_qpair = io_path->qpair;
4816 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4817 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4818 
4819 	now = spdk_get_ticks();
4820 
4821 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4822 
4823 	/* If I/O got ANA error, it should be queued, the corresponding namespace
4824 	 * should be freezed and its ANA state should be updated.
4825 	 */
4826 	bdev_io->internal.in_submit_request = true;
4827 
4828 	bdev_nvme_submit_request(ch, bdev_io);
4829 
4830 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4831 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4832 
4833 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4834 	SPDK_CU_ASSERT_FATAL(req != NULL);
4835 
4836 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4837 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
4838 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4839 
4840 	poll_thread_times(0, 1);
4841 
4842 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4843 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4844 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4845 	/* I/O should be retried immediately. */
4846 	CU_ASSERT(bio->retry_ticks == now);
4847 	CU_ASSERT(nvme_ns->ana_state_updating == true);
4848 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4849 
4850 	poll_threads();
4851 
4852 	/* Namespace is inaccessible, and hence I/O should be queued again. */
4853 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4854 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4855 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4856 	/* I/O should be retried after a second if no I/O path was found but
4857 	 * any I/O path may become available.
4858 	 */
4859 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
4860 
4861 	/* Namespace should be unfreezed after completing to update its ANA state. */
4862 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4863 	poll_threads();
4864 
4865 	CU_ASSERT(nvme_ns->ana_state_updating == false);
4866 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
4867 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4868 
4869 	/* Retry the queued I/O should succeed. */
4870 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
4871 	poll_threads();
4872 
4873 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4874 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4875 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4876 
4877 	free(bdev_io);
4878 
4879 	spdk_put_io_channel(ch);
4880 
4881 	poll_threads();
4882 
4883 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4884 	CU_ASSERT(rc == 0);
4885 
4886 	poll_threads();
4887 	spdk_delay_us(1000);
4888 	poll_threads();
4889 
4890 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4891 
4892 	g_opts.bdev_retry_count = 0;
4893 }
4894 
4895 static void
4896 test_retry_admin_passthru_for_path_error(void)
4897 {
4898 	struct nvme_path_id path1 = {}, path2 = {};
4899 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4900 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4901 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4902 	const int STRING_SIZE = 32;
4903 	const char *attached_names[STRING_SIZE];
4904 	struct nvme_bdev *bdev;
4905 	struct spdk_bdev_io *admin_io;
4906 	struct spdk_io_channel *ch;
4907 	struct ut_nvme_req *req;
4908 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4909 	int rc;
4910 
4911 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4912 	ut_init_trid(&path1.trid);
4913 	ut_init_trid2(&path2.trid);
4914 
4915 	g_opts.bdev_retry_count = 1;
4916 
4917 	set_thread(0);
4918 
4919 	g_ut_attach_ctrlr_status = 0;
4920 	g_ut_attach_bdev_count = 1;
4921 
4922 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4923 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4924 
4925 	ctrlr1->ns[0].uuid = &uuid1;
4926 
4927 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4928 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4929 	CU_ASSERT(rc == 0);
4930 
4931 	spdk_delay_us(1000);
4932 	poll_threads();
4933 
4934 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4935 	poll_threads();
4936 
4937 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4938 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4939 
4940 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4941 	CU_ASSERT(nvme_ctrlr1 != NULL);
4942 
4943 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4944 	CU_ASSERT(bdev != NULL);
4945 
4946 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
4947 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
4948 
4949 	ch = spdk_get_io_channel(bdev);
4950 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4951 
4952 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
4953 
4954 	/* Admin passthrough got a path error, but it should not retry if DNR is set. */
4955 	admin_io->internal.in_submit_request = true;
4956 
4957 	bdev_nvme_submit_request(ch, admin_io);
4958 
4959 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4960 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4961 
4962 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
4963 	SPDK_CU_ASSERT_FATAL(req != NULL);
4964 
4965 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4966 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4967 	req->cpl.status.dnr = 1;
4968 
4969 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4970 	poll_thread_times(0, 2);
4971 
4972 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4973 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4974 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4975 
4976 	/* Admin passthrough got a path error, but it should succeed after retry. */
4977 	admin_io->internal.in_submit_request = true;
4978 
4979 	bdev_nvme_submit_request(ch, admin_io);
4980 
4981 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4982 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4983 
4984 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
4985 	SPDK_CU_ASSERT_FATAL(req != NULL);
4986 
4987 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4988 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4989 
4990 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4991 	poll_thread_times(0, 2);
4992 
4993 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4994 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4995 
4996 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4997 	poll_threads();
4998 
4999 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
5000 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5001 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5002 
5003 	/* Add ctrlr2 dynamically, and create a multipath configuration. */
5004 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
5005 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
5006 
5007 	ctrlr2->ns[0].uuid = &uuid1;
5008 
5009 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
5010 			      attach_ctrlr_done, NULL, NULL, NULL, true);
5011 	CU_ASSERT(rc == 0);
5012 
5013 	spdk_delay_us(1000);
5014 	poll_threads();
5015 
5016 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5017 	poll_threads();
5018 
5019 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
5020 	CU_ASSERT(nvme_ctrlr2 != NULL);
5021 
5022 	/* Admin passthrough was submitted to ctrlr1, but ctrlr1 was failed.
5023 	 * Hence the admin passthrough was aborted. But ctrlr2 is avaialble.
5024 	 * So after a retry, the admin passthrough is submitted to ctrlr2 and
5025 	 * should succeed.
5026 	 */
5027 	admin_io->internal.in_submit_request = true;
5028 
5029 	bdev_nvme_submit_request(ch, admin_io);
5030 
5031 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
5032 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
5033 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5034 
5035 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
5036 	SPDK_CU_ASSERT_FATAL(req != NULL);
5037 
5038 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
5039 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5040 	ctrlr1->is_failed = true;
5041 
5042 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5043 	poll_thread_times(0, 2);
5044 
5045 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
5046 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
5047 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5048 
5049 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5050 	poll_threads();
5051 
5052 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
5053 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5054 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5055 
5056 	free(admin_io);
5057 
5058 	spdk_put_io_channel(ch);
5059 
5060 	poll_threads();
5061 
5062 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5063 	CU_ASSERT(rc == 0);
5064 
5065 	poll_threads();
5066 	spdk_delay_us(1000);
5067 	poll_threads();
5068 
5069 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5070 
5071 	g_opts.bdev_retry_count = 0;
5072 }
5073 
5074 static void
5075 test_retry_admin_passthru_by_count(void)
5076 {
5077 	struct nvme_path_id path = {};
5078 	struct spdk_nvme_ctrlr *ctrlr;
5079 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5080 	struct nvme_ctrlr *nvme_ctrlr;
5081 	const int STRING_SIZE = 32;
5082 	const char *attached_names[STRING_SIZE];
5083 	struct nvme_bdev *bdev;
5084 	struct spdk_bdev_io *admin_io;
5085 	struct nvme_bdev_io *admin_bio;
5086 	struct spdk_io_channel *ch;
5087 	struct ut_nvme_req *req;
5088 	int rc;
5089 
5090 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5091 	ut_init_trid(&path.trid);
5092 
5093 	set_thread(0);
5094 
5095 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5096 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5097 
5098 	g_ut_attach_ctrlr_status = 0;
5099 	g_ut_attach_bdev_count = 1;
5100 
5101 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5102 			      attach_ctrlr_done, NULL, NULL, NULL, false);
5103 	CU_ASSERT(rc == 0);
5104 
5105 	spdk_delay_us(1000);
5106 	poll_threads();
5107 
5108 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5109 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5110 
5111 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5112 	CU_ASSERT(nvme_ctrlr != NULL);
5113 
5114 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5115 	CU_ASSERT(bdev != NULL);
5116 
5117 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
5118 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
5119 
5120 	admin_bio = (struct nvme_bdev_io *)admin_io->driver_ctx;
5121 
5122 	ch = spdk_get_io_channel(bdev);
5123 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5124 
5125 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
5126 
5127 	/* If admin passthrough is aborted by request, it should not be retried. */
5128 	g_opts.bdev_retry_count = 1;
5129 
5130 	admin_io->internal.in_submit_request = true;
5131 
5132 	bdev_nvme_submit_request(ch, admin_io);
5133 
5134 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5135 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5136 
5137 	req = ut_get_outstanding_nvme_request(&ctrlr->adminq, admin_bio);
5138 	SPDK_CU_ASSERT_FATAL(req != NULL);
5139 
5140 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
5141 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5142 
5143 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5144 	poll_thread_times(0, 2);
5145 
5146 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5147 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5148 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
5149 
5150 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
5151 	 * the failed admin passthrough should not be retried.
5152 	 */
5153 	g_opts.bdev_retry_count = 4;
5154 
5155 	admin_io->internal.in_submit_request = true;
5156 
5157 	bdev_nvme_submit_request(ch, admin_io);
5158 
5159 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5160 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5161 
5162 	req = ut_get_outstanding_nvme_request(&ctrlr->adminq, admin_bio);
5163 	SPDK_CU_ASSERT_FATAL(req != NULL);
5164 
5165 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
5166 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5167 	admin_bio->retry_count = 4;
5168 
5169 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5170 	poll_thread_times(0, 2);
5171 
5172 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5173 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5174 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
5175 
5176 	free(admin_io);
5177 
5178 	spdk_put_io_channel(ch);
5179 
5180 	poll_threads();
5181 
5182 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5183 	CU_ASSERT(rc == 0);
5184 
5185 	poll_threads();
5186 	spdk_delay_us(1000);
5187 	poll_threads();
5188 
5189 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5190 
5191 	g_opts.bdev_retry_count = 0;
5192 }
5193 
5194 static void
5195 test_check_multipath_params(void)
5196 {
5197 	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
5198 	 * 3rd parameter is fast_io_fail_timeout_sec.
5199 	 */
5200 	CU_ASSERT(bdev_nvme_check_multipath_params(-2, 1, 0) == false);
5201 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 0, 0) == false);
5202 	CU_ASSERT(bdev_nvme_check_multipath_params(1, 0, 0) == false);
5203 	CU_ASSERT(bdev_nvme_check_multipath_params(1, 2, 0) == false);
5204 	CU_ASSERT(bdev_nvme_check_multipath_params(0, 1, 0) == false);
5205 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 1, 0) == true);
5206 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 2, 0) == true);
5207 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 0) == true);
5208 	CU_ASSERT(bdev_nvme_check_multipath_params(INT32_MAX, INT32_MAX, 0) == true);
5209 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, UINT32_MAX, 0) == true);
5210 	CU_ASSERT(bdev_nvme_check_multipath_params(0, 0, 1) == false);
5211 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 2, 1) == false);
5212 	CU_ASSERT(bdev_nvme_check_multipath_params(3, 2, 4) == false);
5213 	CU_ASSERT(bdev_nvme_check_multipath_params(3, 2, 1) == false);
5214 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 1, 1) == true);
5215 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 2) == true);
5216 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 1) == true);
5217 	CU_ASSERT(bdev_nvme_check_multipath_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
5218 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, UINT32_MAX, UINT32_MAX) == true);
5219 }
5220 
5221 static void
5222 test_retry_io_if_ctrlr_is_resetting(void)
5223 {
5224 	struct nvme_path_id path = {};
5225 	struct nvme_ctrlr_opts opts = {};
5226 	struct spdk_nvme_ctrlr *ctrlr;
5227 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5228 	struct nvme_ctrlr *nvme_ctrlr;
5229 	const int STRING_SIZE = 32;
5230 	const char *attached_names[STRING_SIZE];
5231 	struct nvme_bdev *bdev;
5232 	struct nvme_ns *nvme_ns;
5233 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5234 	struct spdk_io_channel *ch;
5235 	struct nvme_bdev_channel *nbdev_ch;
5236 	struct nvme_io_path *io_path;
5237 	struct nvme_qpair *nvme_qpair;
5238 	int rc;
5239 
5240 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5241 	ut_init_trid(&path.trid);
5242 
5243 	set_thread(0);
5244 
5245 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5246 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5247 
5248 	g_ut_attach_ctrlr_status = 0;
5249 	g_ut_attach_bdev_count = 1;
5250 
5251 	opts.ctrlr_loss_timeout_sec = -1;
5252 	opts.reconnect_delay_sec = 1;
5253 
5254 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5255 			      attach_ctrlr_done, NULL, NULL, &opts, false);
5256 	CU_ASSERT(rc == 0);
5257 
5258 	spdk_delay_us(1000);
5259 	poll_threads();
5260 
5261 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5262 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5263 
5264 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5265 	CU_ASSERT(nvme_ctrlr != NULL);
5266 
5267 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5268 	CU_ASSERT(bdev != NULL);
5269 
5270 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5271 	CU_ASSERT(nvme_ns != NULL);
5272 
5273 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5274 	ut_bdev_io_set_buf(bdev_io1);
5275 
5276 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5277 	ut_bdev_io_set_buf(bdev_io2);
5278 
5279 	ch = spdk_get_io_channel(bdev);
5280 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5281 
5282 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5283 
5284 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5285 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5286 
5287 	nvme_qpair = io_path->qpair;
5288 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5289 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5290 
5291 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5292 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5293 
5294 	/* If qpair is connected, I/O should succeed. */
5295 	bdev_io1->internal.in_submit_request = true;
5296 
5297 	bdev_nvme_submit_request(ch, bdev_io1);
5298 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5299 
5300 	poll_threads();
5301 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5302 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5303 
5304 	/* If qpair is disconnected, it is freed and then reconnected via resetting
5305 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5306 	 * while resetting the nvme_ctrlr.
5307 	 */
5308 	nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
5309 	ctrlr->is_failed = true;
5310 
5311 	poll_thread_times(0, 5);
5312 
5313 	CU_ASSERT(nvme_qpair->qpair == NULL);
5314 	CU_ASSERT(nvme_ctrlr->resetting == true);
5315 	CU_ASSERT(ctrlr->is_failed == false);
5316 
5317 	bdev_io1->internal.in_submit_request = true;
5318 
5319 	bdev_nvme_submit_request(ch, bdev_io1);
5320 
5321 	spdk_delay_us(1);
5322 
5323 	bdev_io2->internal.in_submit_request = true;
5324 
5325 	bdev_nvme_submit_request(ch, bdev_io2);
5326 
5327 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5328 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5329 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5330 	CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link));
5331 
5332 	poll_threads();
5333 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5334 	poll_threads();
5335 
5336 	CU_ASSERT(nvme_qpair->qpair != NULL);
5337 	CU_ASSERT(nvme_ctrlr->resetting == false);
5338 
5339 	spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us);
5340 
5341 	poll_thread_times(0, 1);
5342 
5343 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5344 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5345 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5346 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5347 
5348 	poll_threads();
5349 
5350 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5351 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5352 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5353 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5354 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5355 
5356 	spdk_delay_us(1);
5357 
5358 	poll_thread_times(0, 1);
5359 
5360 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5361 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5362 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5363 
5364 	poll_threads();
5365 
5366 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5367 	CU_ASSERT(bdev_io2->internal.in_submit_request == false);
5368 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5369 
5370 	free(bdev_io1);
5371 	free(bdev_io2);
5372 
5373 	spdk_put_io_channel(ch);
5374 
5375 	poll_threads();
5376 
5377 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5378 	CU_ASSERT(rc == 0);
5379 
5380 	poll_threads();
5381 	spdk_delay_us(1000);
5382 	poll_threads();
5383 
5384 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5385 }
5386 
5387 static void
5388 test_retry_admin_passthru_if_ctrlr_is_resetting(void)
5389 {
5390 	struct nvme_path_id path = {};
5391 	struct nvme_ctrlr_opts opts = {};
5392 	struct spdk_nvme_ctrlr *ctrlr;
5393 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5394 	struct nvme_ctrlr *nvme_ctrlr;
5395 	const int STRING_SIZE = 32;
5396 	const char *attached_names[STRING_SIZE];
5397 	struct nvme_bdev *bdev;
5398 	struct spdk_bdev_io *admin_io;
5399 	struct spdk_io_channel *ch;
5400 	struct nvme_bdev_channel *nbdev_ch;
5401 	int rc;
5402 
5403 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5404 	ut_init_trid(&path.trid);
5405 
5406 	g_opts.bdev_retry_count = 1;
5407 
5408 	set_thread(0);
5409 
5410 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5411 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5412 
5413 	g_ut_attach_ctrlr_status = 0;
5414 	g_ut_attach_bdev_count = 1;
5415 
5416 	opts.ctrlr_loss_timeout_sec = -1;
5417 	opts.reconnect_delay_sec = 1;
5418 
5419 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5420 			      attach_ctrlr_done, NULL, NULL, &opts, false);
5421 	CU_ASSERT(rc == 0);
5422 
5423 	spdk_delay_us(1000);
5424 	poll_threads();
5425 
5426 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5427 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5428 
5429 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5430 	CU_ASSERT(nvme_ctrlr != NULL);
5431 
5432 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5433 	CU_ASSERT(bdev != NULL);
5434 
5435 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
5436 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
5437 
5438 	ch = spdk_get_io_channel(bdev);
5439 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5440 
5441 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5442 
5443 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
5444 
5445 	/* If ctrlr is available, admin passthrough should succeed. */
5446 	admin_io->internal.in_submit_request = true;
5447 
5448 	bdev_nvme_submit_request(ch, admin_io);
5449 
5450 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5451 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5452 
5453 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5454 	poll_threads();
5455 
5456 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5457 	CU_ASSERT(admin_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5458 
5459 	/* If ctrlr is resetting, admin passthrough request should be queued
5460 	 * if it is submitted while resetting ctrlr.
5461 	 */
5462 	bdev_nvme_reset(nvme_ctrlr);
5463 
5464 	poll_thread_times(0, 1);
5465 
5466 	admin_io->internal.in_submit_request = true;
5467 
5468 	bdev_nvme_submit_request(ch, admin_io);
5469 
5470 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5471 	CU_ASSERT(admin_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5472 
5473 	poll_threads();
5474 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5475 	poll_threads();
5476 
5477 	CU_ASSERT(nvme_ctrlr->resetting == false);
5478 
5479 	spdk_delay_us(1000000 - g_opts.nvme_adminq_poll_period_us);
5480 	poll_thread_times(0, 1);
5481 
5482 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5483 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5484 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5485 
5486 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5487 	poll_threads();
5488 
5489 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5490 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5491 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5492 
5493 	free(admin_io);
5494 
5495 	spdk_put_io_channel(ch);
5496 
5497 	poll_threads();
5498 
5499 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5500 	CU_ASSERT(rc == 0);
5501 
5502 	poll_threads();
5503 	spdk_delay_us(1000);
5504 	poll_threads();
5505 
5506 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5507 
5508 	g_opts.bdev_retry_count = 0;
5509 }
5510 
5511 static void
5512 test_reconnect_ctrlr(void)
5513 {
5514 	struct spdk_nvme_transport_id trid = {};
5515 	struct spdk_nvme_ctrlr ctrlr = {};
5516 	struct nvme_ctrlr *nvme_ctrlr;
5517 	struct spdk_io_channel *ch1, *ch2;
5518 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5519 	int rc;
5520 
5521 	ut_init_trid(&trid);
5522 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5523 
5524 	set_thread(0);
5525 
5526 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5527 	CU_ASSERT(rc == 0);
5528 
5529 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5530 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5531 
5532 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2;
5533 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5534 
5535 	ch1 = spdk_get_io_channel(nvme_ctrlr);
5536 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5537 
5538 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5539 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5540 
5541 	set_thread(1);
5542 
5543 	ch2 = spdk_get_io_channel(nvme_ctrlr);
5544 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5545 
5546 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5547 
5548 	/* Reset starts from thread 1. */
5549 	set_thread(1);
5550 
5551 	/* The reset should fail and a reconnect timer should be registered. */
5552 	ctrlr.fail_reset = true;
5553 	ctrlr.is_failed = true;
5554 
5555 	rc = bdev_nvme_reset(nvme_ctrlr);
5556 	CU_ASSERT(rc == 0);
5557 	CU_ASSERT(nvme_ctrlr->resetting == true);
5558 	CU_ASSERT(ctrlr.is_failed == true);
5559 
5560 	poll_threads();
5561 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5562 	poll_threads();
5563 
5564 	CU_ASSERT(nvme_ctrlr->resetting == false);
5565 	CU_ASSERT(ctrlr.is_failed == false);
5566 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5567 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5568 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5569 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5570 
5571 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5572 	poll_threads();
5573 
5574 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5575 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5576 
5577 	/* Then a reconnect retry should suceeed. */
5578 	ctrlr.fail_reset = false;
5579 
5580 	spdk_delay_us(SPDK_SEC_TO_USEC);
5581 	poll_thread_times(0, 1);
5582 
5583 	CU_ASSERT(nvme_ctrlr->resetting == true);
5584 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5585 
5586 	poll_threads();
5587 
5588 	CU_ASSERT(nvme_ctrlr->resetting == false);
5589 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
5590 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
5591 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5592 
5593 	/* The reset should fail and a reconnect timer should be registered. */
5594 	ctrlr.fail_reset = true;
5595 	ctrlr.is_failed = true;
5596 
5597 	rc = bdev_nvme_reset(nvme_ctrlr);
5598 	CU_ASSERT(rc == 0);
5599 	CU_ASSERT(nvme_ctrlr->resetting == true);
5600 	CU_ASSERT(ctrlr.is_failed == true);
5601 
5602 	poll_threads();
5603 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5604 	poll_threads();
5605 
5606 	CU_ASSERT(nvme_ctrlr->resetting == false);
5607 	CU_ASSERT(ctrlr.is_failed == false);
5608 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5609 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5610 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5611 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5612 
5613 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5614 	poll_threads();
5615 
5616 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5617 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5618 
5619 	/* Then a reconnect retry should still fail. */
5620 	spdk_delay_us(SPDK_SEC_TO_USEC);
5621 	poll_thread_times(0, 1);
5622 
5623 	CU_ASSERT(nvme_ctrlr->resetting == true);
5624 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5625 
5626 	poll_threads();
5627 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5628 	poll_threads();
5629 
5630 	CU_ASSERT(nvme_ctrlr->resetting == false);
5631 	CU_ASSERT(ctrlr.is_failed == false);
5632 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5633 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5634 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5635 
5636 	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5637 	spdk_delay_us(SPDK_SEC_TO_USEC);
5638 	poll_threads();
5639 
5640 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5641 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5642 	CU_ASSERT(nvme_ctrlr->destruct == true);
5643 
5644 	spdk_put_io_channel(ch2);
5645 
5646 	set_thread(0);
5647 
5648 	spdk_put_io_channel(ch1);
5649 
5650 	poll_threads();
5651 	spdk_delay_us(1000);
5652 	poll_threads();
5653 
5654 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5655 }
5656 
5657 static struct nvme_path_id *
5658 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5659 		       const struct spdk_nvme_transport_id *trid)
5660 {
5661 	struct nvme_path_id *p;
5662 
5663 	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5664 		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5665 			break;
5666 		}
5667 	}
5668 
5669 	return p;
5670 }
5671 
5672 static void
5673 test_retry_failover_ctrlr(void)
5674 {
5675 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5676 	struct spdk_nvme_ctrlr ctrlr = {};
5677 	struct nvme_ctrlr *nvme_ctrlr = NULL;
5678 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5679 	struct spdk_io_channel *ch;
5680 	struct nvme_ctrlr_channel *ctrlr_ch;
5681 	int rc;
5682 
5683 	ut_init_trid(&trid1);
5684 	ut_init_trid2(&trid2);
5685 	ut_init_trid3(&trid3);
5686 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5687 
5688 	set_thread(0);
5689 
5690 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5691 	CU_ASSERT(rc == 0);
5692 
5693 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5694 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5695 
5696 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1;
5697 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5698 
5699 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5700 	CU_ASSERT(rc == 0);
5701 
5702 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5703 	CU_ASSERT(rc == 0);
5704 
5705 	ch = spdk_get_io_channel(nvme_ctrlr);
5706 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5707 
5708 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5709 
5710 	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5711 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5712 	CU_ASSERT(path_id1->is_failed == false);
5713 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5714 
5715 	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5716 	ctrlr.fail_reset = true;
5717 	ctrlr.is_failed = true;
5718 
5719 	rc = bdev_nvme_reset(nvme_ctrlr);
5720 	CU_ASSERT(rc == 0);
5721 
5722 	poll_threads();
5723 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5724 	poll_threads();
5725 
5726 	CU_ASSERT(nvme_ctrlr->resetting == false);
5727 	CU_ASSERT(ctrlr.is_failed == false);
5728 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5729 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5730 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5731 
5732 	CU_ASSERT(path_id1->is_failed == true);
5733 
5734 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5735 	poll_threads();
5736 
5737 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5738 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5739 
5740 	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5741 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5742 	CU_ASSERT(path_id2->is_failed == false);
5743 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5744 
5745 	/* If we remove trid2 while reconnect is scheduled, trid2 is removed and path_id is
5746 	 * switched to trid3 but reset is not started.
5747 	 */
5748 	rc = bdev_nvme_failover(nvme_ctrlr, true);
5749 	CU_ASSERT(rc == 0);
5750 
5751 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) == NULL);
5752 
5753 	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5754 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5755 	CU_ASSERT(path_id3->is_failed == false);
5756 	CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id);
5757 
5758 	CU_ASSERT(nvme_ctrlr->resetting == false);
5759 
5760 	/* If reconnect succeeds, trid3 should be the active path_id */
5761 	ctrlr.fail_reset = false;
5762 
5763 	spdk_delay_us(SPDK_SEC_TO_USEC);
5764 	poll_thread_times(0, 1);
5765 
5766 	CU_ASSERT(nvme_ctrlr->resetting == true);
5767 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5768 
5769 	poll_threads();
5770 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5771 	poll_threads();
5772 
5773 	CU_ASSERT(path_id3->is_failed == false);
5774 	CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id);
5775 	CU_ASSERT(nvme_ctrlr->resetting == false);
5776 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
5777 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5778 
5779 	spdk_put_io_channel(ch);
5780 
5781 	poll_threads();
5782 
5783 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5784 	CU_ASSERT(rc == 0);
5785 
5786 	poll_threads();
5787 	spdk_delay_us(1000);
5788 	poll_threads();
5789 
5790 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5791 }
5792 
5793 static void
5794 test_fail_path(void)
5795 {
5796 	struct nvme_path_id path = {};
5797 	struct nvme_ctrlr_opts opts = {};
5798 	struct spdk_nvme_ctrlr *ctrlr;
5799 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5800 	struct nvme_ctrlr *nvme_ctrlr;
5801 	const int STRING_SIZE = 32;
5802 	const char *attached_names[STRING_SIZE];
5803 	struct nvme_bdev *bdev;
5804 	struct nvme_ns *nvme_ns;
5805 	struct spdk_bdev_io *bdev_io;
5806 	struct spdk_io_channel *ch;
5807 	struct nvme_bdev_channel *nbdev_ch;
5808 	struct nvme_io_path *io_path;
5809 	struct nvme_ctrlr_channel *ctrlr_ch;
5810 	int rc;
5811 
5812 	/* The test scenario is the following.
5813 	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5814 	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5815 	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5816 	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5817 	 *   comes first. The queued I/O is failed.
5818 	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5819 	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5820 	 */
5821 
5822 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5823 	ut_init_trid(&path.trid);
5824 
5825 	set_thread(0);
5826 
5827 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5828 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5829 
5830 	g_ut_attach_ctrlr_status = 0;
5831 	g_ut_attach_bdev_count = 1;
5832 
5833 	opts.ctrlr_loss_timeout_sec = 4;
5834 	opts.reconnect_delay_sec = 1;
5835 	opts.fast_io_fail_timeout_sec = 2;
5836 
5837 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5838 			      attach_ctrlr_done, NULL, NULL, &opts, false);
5839 	CU_ASSERT(rc == 0);
5840 
5841 	spdk_delay_us(1000);
5842 	poll_threads();
5843 
5844 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5845 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5846 
5847 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5848 	CU_ASSERT(nvme_ctrlr != NULL);
5849 
5850 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5851 	CU_ASSERT(bdev != NULL);
5852 
5853 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5854 	CU_ASSERT(nvme_ns != NULL);
5855 
5856 	ch = spdk_get_io_channel(bdev);
5857 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5858 
5859 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5860 
5861 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5862 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5863 
5864 	ctrlr_ch = io_path->qpair->ctrlr_ch;
5865 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
5866 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL);
5867 
5868 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
5869 	ut_bdev_io_set_buf(bdev_io);
5870 
5871 
5872 	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
5873 	ctrlr->fail_reset = true;
5874 	ctrlr->is_failed = true;
5875 
5876 	rc = bdev_nvme_reset(nvme_ctrlr);
5877 	CU_ASSERT(rc == 0);
5878 	CU_ASSERT(nvme_ctrlr->resetting == true);
5879 	CU_ASSERT(ctrlr->is_failed == true);
5880 
5881 	poll_threads();
5882 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5883 	poll_threads();
5884 
5885 	CU_ASSERT(nvme_ctrlr->resetting == false);
5886 	CU_ASSERT(ctrlr->is_failed == false);
5887 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5888 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5889 	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
5890 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5891 
5892 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5893 	poll_threads();
5894 
5895 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5896 
5897 	/* I/O should be queued. */
5898 	bdev_io->internal.in_submit_request = true;
5899 
5900 	bdev_nvme_submit_request(ch, bdev_io);
5901 
5902 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5903 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5904 
5905 	/* After a second, the I/O should be still queued and the ctrlr should be
5906 	 * still recovering.
5907 	 */
5908 	spdk_delay_us(SPDK_SEC_TO_USEC);
5909 	poll_threads();
5910 
5911 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5912 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5913 
5914 	CU_ASSERT(nvme_ctrlr->resetting == false);
5915 	CU_ASSERT(ctrlr->is_failed == false);
5916 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5917 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5918 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5919 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5920 
5921 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5922 	poll_threads();
5923 
5924 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5925 
5926 	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
5927 	spdk_delay_us(SPDK_SEC_TO_USEC);
5928 	poll_threads();
5929 
5930 	CU_ASSERT(nvme_ctrlr->resetting == false);
5931 	CU_ASSERT(ctrlr->is_failed == false);
5932 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5933 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5934 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5935 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
5936 
5937 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5938 	poll_threads();
5939 
5940 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5941 
5942 	/* Then within a second, pending I/O should be failed. */
5943 	spdk_delay_us(SPDK_SEC_TO_USEC);
5944 	poll_threads();
5945 
5946 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5947 	poll_threads();
5948 
5949 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5950 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5951 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5952 
5953 	/* Another I/O submission should be failed immediately. */
5954 	bdev_io->internal.in_submit_request = true;
5955 
5956 	bdev_nvme_submit_request(ch, bdev_io);
5957 
5958 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5959 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5960 
5961 	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
5962 	 * be deleted.
5963 	 */
5964 	spdk_delay_us(SPDK_SEC_TO_USEC);
5965 	poll_threads();
5966 
5967 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5968 	poll_threads();
5969 
5970 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5971 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5972 	CU_ASSERT(nvme_ctrlr->destruct == true);
5973 
5974 	spdk_put_io_channel(ch);
5975 
5976 	poll_threads();
5977 	spdk_delay_us(1000);
5978 	poll_threads();
5979 
5980 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5981 
5982 	free(bdev_io);
5983 }
5984 
5985 static void
5986 test_nvme_ns_cmp(void)
5987 {
5988 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
5989 
5990 	nvme_ns1.id = 0;
5991 	nvme_ns2.id = UINT32_MAX;
5992 
5993 	CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0);
5994 	CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0);
5995 }
5996 
5997 static void
5998 test_ana_transition(void)
5999 {
6000 	struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, };
6001 	struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, };
6002 	struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, };
6003 	struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, };
6004 
6005 	/* case 1: ANA transition timedout is canceled. */
6006 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6007 	nvme_ns.ana_transition_timedout = true;
6008 
6009 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6010 
6011 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6012 
6013 	CU_ASSERT(nvme_ns.ana_transition_timedout == false);
6014 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
6015 
6016 	/* case 2: ANATT timer is kept. */
6017 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6018 	nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout,
6019 			      &nvme_ns,
6020 			      ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
6021 
6022 	desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6023 
6024 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6025 
6026 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
6027 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
6028 
6029 	/* case 3: ANATT timer is stopped. */
6030 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6031 
6032 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6033 
6034 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
6035 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
6036 
6037 	/* ANATT timer is started. */
6038 	desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6039 
6040 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6041 
6042 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
6043 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE);
6044 
6045 	/* ANATT timer is expired. */
6046 	spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
6047 
6048 	poll_threads();
6049 
6050 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
6051 	CU_ASSERT(nvme_ns.ana_transition_timedout == true);
6052 }
6053 
6054 static void
6055 _set_preferred_path_cb(void *cb_arg, int rc)
6056 {
6057 	bool *done = cb_arg;
6058 
6059 	*done = true;
6060 }
6061 
6062 static void
6063 test_set_preferred_path(void)
6064 {
6065 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
6066 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
6067 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6068 	const int STRING_SIZE = 32;
6069 	const char *attached_names[STRING_SIZE];
6070 	struct nvme_bdev *bdev;
6071 	struct spdk_io_channel *ch;
6072 	struct nvme_bdev_channel *nbdev_ch;
6073 	struct nvme_io_path *io_path;
6074 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6075 	const struct spdk_nvme_ctrlr_data *cdata;
6076 	bool done;
6077 	int rc;
6078 
6079 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6080 	ut_init_trid(&path1.trid);
6081 	ut_init_trid2(&path2.trid);
6082 	ut_init_trid3(&path3.trid);
6083 	g_ut_attach_ctrlr_status = 0;
6084 	g_ut_attach_bdev_count = 1;
6085 
6086 	set_thread(0);
6087 
6088 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6089 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6090 
6091 	ctrlr1->ns[0].uuid = &uuid1;
6092 
6093 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6094 			      attach_ctrlr_done, NULL, NULL, NULL, true);
6095 	CU_ASSERT(rc == 0);
6096 
6097 	spdk_delay_us(1000);
6098 	poll_threads();
6099 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6100 	poll_threads();
6101 
6102 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6103 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6104 
6105 	ctrlr2->ns[0].uuid = &uuid1;
6106 
6107 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6108 			      attach_ctrlr_done, NULL, NULL, NULL, true);
6109 	CU_ASSERT(rc == 0);
6110 
6111 	spdk_delay_us(1000);
6112 	poll_threads();
6113 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6114 	poll_threads();
6115 
6116 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
6117 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
6118 
6119 	ctrlr3->ns[0].uuid = &uuid1;
6120 
6121 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
6122 			      attach_ctrlr_done, NULL, NULL, NULL, true);
6123 	CU_ASSERT(rc == 0);
6124 
6125 	spdk_delay_us(1000);
6126 	poll_threads();
6127 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6128 	poll_threads();
6129 
6130 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6131 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6132 
6133 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6134 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6135 
6136 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6137 
6138 	ch = spdk_get_io_channel(bdev);
6139 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6140 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6141 
6142 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6143 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6144 
6145 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6146 
6147 	/* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path()
6148 	 * should return io_path to ctrlr2.
6149 	 */
6150 
6151 	cdata = spdk_nvme_ctrlr_get_data(ctrlr2);
6152 	done = false;
6153 
6154 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6155 
6156 	poll_threads();
6157 	CU_ASSERT(done == true);
6158 
6159 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6160 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6161 
6162 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6163 
6164 	/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
6165 	 * aquired, find_io_path() should return io_path to ctrlr3.
6166 	 */
6167 
6168 	spdk_put_io_channel(ch);
6169 
6170 	poll_threads();
6171 
6172 	cdata = spdk_nvme_ctrlr_get_data(ctrlr3);
6173 	done = false;
6174 
6175 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6176 
6177 	poll_threads();
6178 	CU_ASSERT(done == true);
6179 
6180 	ch = spdk_get_io_channel(bdev);
6181 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6182 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6183 
6184 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6185 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6186 
6187 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3);
6188 
6189 	spdk_put_io_channel(ch);
6190 
6191 	poll_threads();
6192 
6193 	rc = bdev_nvme_delete("nvme0", &g_any_path);
6194 	CU_ASSERT(rc == 0);
6195 
6196 	poll_threads();
6197 	spdk_delay_us(1000);
6198 	poll_threads();
6199 
6200 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6201 }
6202 
6203 static void
6204 test_find_next_io_path(void)
6205 {
6206 	struct nvme_bdev_channel nbdev_ch = {
6207 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6208 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6209 	};
6210 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6211 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6212 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6213 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6214 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6215 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6216 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6217 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6218 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6219 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6220 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6221 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {};
6222 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6223 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6224 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6225 
6226 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6227 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6228 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6229 
6230 	/* nbdev_ch->current_io_path is filled always when bdev_nvme_find_next_io_path() is called. */
6231 
6232 	nbdev_ch.current_io_path = &io_path2;
6233 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6234 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6235 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6236 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6237 
6238 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6239 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6240 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6241 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6242 
6243 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6244 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6245 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6246 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6247 
6248 	nbdev_ch.current_io_path = &io_path3;
6249 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6250 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6251 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6252 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6253 }
6254 
6255 int
6256 main(int argc, const char **argv)
6257 {
6258 	CU_pSuite	suite = NULL;
6259 	unsigned int	num_failures;
6260 
6261 	CU_set_error_action(CUEA_ABORT);
6262 	CU_initialize_registry();
6263 
6264 	suite = CU_add_suite("nvme", NULL, NULL);
6265 
6266 	CU_ADD_TEST(suite, test_create_ctrlr);
6267 	CU_ADD_TEST(suite, test_reset_ctrlr);
6268 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
6269 	CU_ADD_TEST(suite, test_failover_ctrlr);
6270 	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
6271 	CU_ADD_TEST(suite, test_pending_reset);
6272 	CU_ADD_TEST(suite, test_attach_ctrlr);
6273 	CU_ADD_TEST(suite, test_aer_cb);
6274 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
6275 	CU_ADD_TEST(suite, test_add_remove_trid);
6276 	CU_ADD_TEST(suite, test_abort);
6277 	CU_ADD_TEST(suite, test_get_io_qpair);
6278 	CU_ADD_TEST(suite, test_bdev_unregister);
6279 	CU_ADD_TEST(suite, test_compare_ns);
6280 	CU_ADD_TEST(suite, test_init_ana_log_page);
6281 	CU_ADD_TEST(suite, test_get_memory_domains);
6282 	CU_ADD_TEST(suite, test_reconnect_qpair);
6283 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
6284 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
6285 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
6286 	CU_ADD_TEST(suite, test_admin_path);
6287 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
6288 	CU_ADD_TEST(suite, test_find_io_path);
6289 	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
6290 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
6291 	CU_ADD_TEST(suite, test_retry_io_count);
6292 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
6293 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
6294 	CU_ADD_TEST(suite, test_retry_admin_passthru_for_path_error);
6295 	CU_ADD_TEST(suite, test_retry_admin_passthru_by_count);
6296 	CU_ADD_TEST(suite, test_check_multipath_params);
6297 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
6298 	CU_ADD_TEST(suite, test_retry_admin_passthru_if_ctrlr_is_resetting);
6299 	CU_ADD_TEST(suite, test_reconnect_ctrlr);
6300 	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
6301 	CU_ADD_TEST(suite, test_fail_path);
6302 	CU_ADD_TEST(suite, test_nvme_ns_cmp);
6303 	CU_ADD_TEST(suite, test_ana_transition);
6304 	CU_ADD_TEST(suite, test_set_preferred_path);
6305 	CU_ADD_TEST(suite, test_find_next_io_path);
6306 
6307 	CU_basic_set_mode(CU_BRM_VERBOSE);
6308 
6309 	allocate_threads(3);
6310 	set_thread(0);
6311 	bdev_nvme_library_init();
6312 	init_accel();
6313 
6314 	CU_basic_run_tests();
6315 
6316 	set_thread(0);
6317 	bdev_nvme_library_fini();
6318 	fini_accel();
6319 	free_threads();
6320 
6321 	num_failures = CU_get_number_of_failures();
6322 	CU_cleanup_registry();
6323 
6324 	return num_failures;
6325 }
6326