xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision 80e81273e2ea32a96f12f23a7a1cbdb0fe6f70f7)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 #include "spdk_cunit.h"
37 #include "spdk/thread.h"
38 #include "spdk/bdev_module.h"
39 #include "spdk/bdev_module.h"
40 
41 #include "common/lib/ut_multithread.c"
42 
43 #include "bdev/nvme/bdev_nvme.c"
44 
45 #include "unit/lib/json_mock.c"
46 
47 static void *g_accel_p = (void *)0xdeadbeaf;
48 
49 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
50 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
51 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
52 	     spdk_nvme_remove_cb remove_cb), NULL);
53 
54 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
55 		enum spdk_nvme_transport_type trtype));
56 
57 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
58 	    NULL);
59 
60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
61 
62 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
63 		struct spdk_nvme_transport_id *trid), 0);
64 
65 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
66 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
67 
68 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
69 
70 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0);
71 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf));
72 
73 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain, int);
74 
75 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
76 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
77 
78 int spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
79 				       struct spdk_memory_domain **domains, int array_size)
80 {
81 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain);
82 
83 	return 0;
84 }
85 
86 struct spdk_io_channel *
87 spdk_accel_engine_get_io_channel(void)
88 {
89 	return spdk_get_io_channel(g_accel_p);
90 }
91 
92 void
93 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
94 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
95 {
96 	/* Avoid warning that opts is used uninitialised */
97 	memset(opts, 0, opts_size);
98 }
99 
100 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
101 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
102 
103 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
104 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
105 
106 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
107 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
108 
109 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
110 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
111 
112 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
113 
114 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
115 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
116 
117 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
118 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
119 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
120 
121 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
122 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
123 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
124 
125 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
126 
127 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
128 
129 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
130 
131 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
132 
133 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
134 
135 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
136 
137 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
138 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
139 
140 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
141 
142 DEFINE_STUB(spdk_nvme_ns_get_csi, enum spdk_nvme_csi,
143 	    (const struct spdk_nvme_ns *ns), 0);
144 
145 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
146 		char *name, size_t *size), 0);
147 
148 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
149 	    (struct spdk_nvme_ns *ns), 0);
150 
151 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
152 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
153 
154 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
155 	    (struct spdk_nvme_ns *ns), 0);
156 
157 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
158 	    (struct spdk_nvme_ns *ns), 0);
159 
160 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
161 	    (struct spdk_nvme_ns *ns), 0);
162 
163 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
164 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
165 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
166 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
167 
168 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
169 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
170 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
171 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
172 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
173 
174 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
175 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
176 	     void *payload, uint32_t payload_size, uint64_t slba,
177 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
178 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
179 
180 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
181 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
182 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
183 
184 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
185 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
186 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
187 
188 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
189 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
190 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
191 
192 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
193 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
194 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
195 
196 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
197 
198 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
199 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
200 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
201 
202 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
203 
204 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
205 
206 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
207 
208 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
209 
210 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
211 		struct iovec *iov,
212 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
213 
214 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr));
215 
216 struct ut_nvme_req {
217 	uint16_t			opc;
218 	spdk_nvme_cmd_cb		cb_fn;
219 	void				*cb_arg;
220 	struct spdk_nvme_cpl		cpl;
221 	TAILQ_ENTRY(ut_nvme_req)	tailq;
222 };
223 
224 struct spdk_nvme_ns {
225 	struct spdk_nvme_ctrlr		*ctrlr;
226 	uint32_t			id;
227 	bool				is_active;
228 	struct spdk_uuid		uuid;
229 	enum spdk_nvme_ana_state	ana_state;
230 };
231 
232 struct spdk_nvme_qpair {
233 	struct spdk_nvme_ctrlr		*ctrlr;
234 	bool				is_connected;
235 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
236 	uint32_t			num_outstanding_reqs;
237 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
238 	struct spdk_nvme_poll_group	*poll_group;
239 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
240 };
241 
242 struct spdk_nvme_ctrlr {
243 	uint32_t			num_ns;
244 	struct spdk_nvme_ns		*ns;
245 	struct spdk_nvme_ns_data	*nsdata;
246 	struct spdk_nvme_qpair		adminq;
247 	struct spdk_nvme_ctrlr_data	cdata;
248 	bool				attached;
249 	bool				is_failed;
250 	bool				fail_reset;
251 	struct spdk_nvme_transport_id	trid;
252 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
253 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
254 	struct spdk_nvme_ctrlr_opts	opts;
255 };
256 
257 struct spdk_nvme_poll_group {
258 	void				*ctx;
259 	struct spdk_nvme_accel_fn_table	accel_fn_table;
260 	TAILQ_HEAD(, spdk_nvme_qpair)	qpairs;
261 };
262 
263 struct spdk_nvme_probe_ctx {
264 	struct spdk_nvme_transport_id	trid;
265 	void				*cb_ctx;
266 	spdk_nvme_attach_cb		attach_cb;
267 	struct spdk_nvme_ctrlr		*init_ctrlr;
268 };
269 
270 uint32_t
271 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
272 {
273 	uint32_t nsid;
274 
275 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
276 		if (ctrlr->ns[nsid - 1].is_active) {
277 			return nsid;
278 		}
279 	}
280 
281 	return 0;
282 }
283 
284 uint32_t
285 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
286 {
287 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
288 		if (ctrlr->ns[nsid - 1].is_active) {
289 			return nsid;
290 		}
291 	}
292 
293 	return 0;
294 }
295 
296 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
297 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
298 			g_ut_attached_ctrlrs);
299 static int g_ut_attach_ctrlr_status;
300 static size_t g_ut_attach_bdev_count;
301 static int g_ut_register_bdev_status;
302 static uint16_t g_ut_cntlid;
303 static struct nvme_path_id g_any_path = {};
304 
305 static void
306 ut_init_trid(struct spdk_nvme_transport_id *trid)
307 {
308 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
309 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
310 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
311 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
312 }
313 
314 static void
315 ut_init_trid2(struct spdk_nvme_transport_id *trid)
316 {
317 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
318 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
319 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
320 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
321 }
322 
323 static void
324 ut_init_trid3(struct spdk_nvme_transport_id *trid)
325 {
326 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
327 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
328 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
329 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
330 }
331 
332 static int
333 cmp_int(int a, int b)
334 {
335 	return a - b;
336 }
337 
338 int
339 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
340 			       const struct spdk_nvme_transport_id *trid2)
341 {
342 	int cmp;
343 
344 	/* We assume trtype is TCP for now. */
345 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
346 
347 	cmp = cmp_int(trid1->trtype, trid2->trtype);
348 	if (cmp) {
349 		return cmp;
350 	}
351 
352 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
353 	if (cmp) {
354 		return cmp;
355 	}
356 
357 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
358 	if (cmp) {
359 		return cmp;
360 	}
361 
362 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
363 	if (cmp) {
364 		return cmp;
365 	}
366 
367 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
368 	if (cmp) {
369 		return cmp;
370 	}
371 
372 	return 0;
373 }
374 
375 static struct spdk_nvme_ctrlr *
376 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
377 		bool ana_reporting, bool multipath)
378 {
379 	struct spdk_nvme_ctrlr *ctrlr;
380 	uint32_t i;
381 
382 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
383 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
384 			/* There is a ctrlr whose trid matches. */
385 			return NULL;
386 		}
387 	}
388 
389 	ctrlr = calloc(1, sizeof(*ctrlr));
390 	if (ctrlr == NULL) {
391 		return NULL;
392 	}
393 
394 	ctrlr->attached = true;
395 	ctrlr->adminq.ctrlr = ctrlr;
396 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
397 
398 	if (num_ns != 0) {
399 		ctrlr->num_ns = num_ns;
400 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
401 		if (ctrlr->ns == NULL) {
402 			free(ctrlr);
403 			return NULL;
404 		}
405 
406 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
407 		if (ctrlr->nsdata == NULL) {
408 			free(ctrlr->ns);
409 			free(ctrlr);
410 			return NULL;
411 		}
412 
413 		for (i = 0; i < num_ns; i++) {
414 			ctrlr->ns[i].id = i + 1;
415 			ctrlr->ns[i].ctrlr = ctrlr;
416 			ctrlr->ns[i].is_active = true;
417 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
418 			ctrlr->nsdata[i].nsze = 1024;
419 			ctrlr->nsdata[i].nmic.can_share = multipath;
420 		}
421 
422 		ctrlr->cdata.nn = num_ns;
423 		ctrlr->cdata.nanagrpid = num_ns;
424 	}
425 
426 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
427 	ctrlr->cdata.cmic.multi_ctrlr = multipath;
428 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
429 	ctrlr->trid = *trid;
430 	TAILQ_INIT(&ctrlr->active_io_qpairs);
431 
432 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
433 
434 	return ctrlr;
435 }
436 
437 static void
438 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
439 {
440 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
441 
442 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
443 	free(ctrlr->nsdata);
444 	free(ctrlr->ns);
445 	free(ctrlr);
446 }
447 
448 static int
449 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
450 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
451 {
452 	struct ut_nvme_req *req;
453 
454 	req = calloc(1, sizeof(*req));
455 	if (req == NULL) {
456 		return -ENOMEM;
457 	}
458 
459 	req->opc = opc;
460 	req->cb_fn = cb_fn;
461 	req->cb_arg = cb_arg;
462 
463 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
464 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
465 
466 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
467 	qpair->num_outstanding_reqs++;
468 
469 	return 0;
470 }
471 
472 static struct ut_nvme_req *
473 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
474 {
475 	struct ut_nvme_req *req;
476 
477 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
478 		if (req->cb_arg == cb_arg) {
479 			break;
480 		}
481 	}
482 
483 	return req;
484 }
485 
486 static struct spdk_bdev_io *
487 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
488 		 struct spdk_io_channel *ch)
489 {
490 	struct spdk_bdev_io *bdev_io;
491 
492 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
493 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
494 	bdev_io->type = type;
495 	bdev_io->bdev = &nbdev->disk;
496 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
497 
498 	return bdev_io;
499 }
500 
501 static void
502 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
503 {
504 	bdev_io->u.bdev.iovs = &bdev_io->iov;
505 	bdev_io->u.bdev.iovcnt = 1;
506 
507 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
508 	bdev_io->iov.iov_len = 4096;
509 }
510 
511 static void
512 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
513 {
514 	if (ctrlr->is_failed) {
515 		free(ctrlr);
516 		return;
517 	}
518 
519 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
520 	if (probe_ctx->cb_ctx) {
521 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
522 	}
523 
524 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
525 
526 	if (probe_ctx->attach_cb) {
527 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
528 	}
529 }
530 
531 int
532 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
533 {
534 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
535 
536 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
537 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
538 			continue;
539 		}
540 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
541 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
542 	}
543 
544 	free(probe_ctx);
545 
546 	return 0;
547 }
548 
549 struct spdk_nvme_probe_ctx *
550 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
551 			const struct spdk_nvme_ctrlr_opts *opts,
552 			spdk_nvme_attach_cb attach_cb)
553 {
554 	struct spdk_nvme_probe_ctx *probe_ctx;
555 
556 	if (trid == NULL) {
557 		return NULL;
558 	}
559 
560 	probe_ctx = calloc(1, sizeof(*probe_ctx));
561 	if (probe_ctx == NULL) {
562 		return NULL;
563 	}
564 
565 	probe_ctx->trid = *trid;
566 	probe_ctx->cb_ctx = (void *)opts;
567 	probe_ctx->attach_cb = attach_cb;
568 
569 	return probe_ctx;
570 }
571 
572 int
573 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
574 {
575 	if (ctrlr->attached) {
576 		ut_detach_ctrlr(ctrlr);
577 	}
578 
579 	return 0;
580 }
581 
582 int
583 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
584 {
585 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
586 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
587 
588 	return 0;
589 }
590 
591 int
592 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
593 {
594 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
595 }
596 
597 void
598 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
599 {
600 	memset(opts, 0, opts_size);
601 
602 	snprintf(opts->hostnqn, sizeof(opts->hostnqn),
603 		 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003");
604 }
605 
606 const struct spdk_nvme_ctrlr_data *
607 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
608 {
609 	return &ctrlr->cdata;
610 }
611 
612 uint32_t
613 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
614 {
615 	return ctrlr->num_ns;
616 }
617 
618 struct spdk_nvme_ns *
619 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
620 {
621 	if (nsid < 1 || nsid > ctrlr->num_ns) {
622 		return NULL;
623 	}
624 
625 	return &ctrlr->ns[nsid - 1];
626 }
627 
628 bool
629 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
630 {
631 	if (nsid < 1 || nsid > ctrlr->num_ns) {
632 		return false;
633 	}
634 
635 	return ctrlr->ns[nsid - 1].is_active;
636 }
637 
638 union spdk_nvme_csts_register
639 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
640 {
641 	union spdk_nvme_csts_register csts;
642 
643 	csts.raw = 0;
644 
645 	return csts;
646 }
647 
648 union spdk_nvme_vs_register
649 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
650 {
651 	union spdk_nvme_vs_register vs;
652 
653 	vs.raw = 0;
654 
655 	return vs;
656 }
657 
658 struct spdk_nvme_qpair *
659 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
660 			       const struct spdk_nvme_io_qpair_opts *user_opts,
661 			       size_t opts_size)
662 {
663 	struct spdk_nvme_qpair *qpair;
664 
665 	qpair = calloc(1, sizeof(*qpair));
666 	if (qpair == NULL) {
667 		return NULL;
668 	}
669 
670 	qpair->ctrlr = ctrlr;
671 	TAILQ_INIT(&qpair->outstanding_reqs);
672 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
673 
674 	return qpair;
675 }
676 
677 int
678 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
679 				 struct spdk_nvme_qpair *qpair)
680 {
681 	if (qpair->is_connected) {
682 		return -EISCONN;
683 	}
684 
685 	qpair->is_connected = true;
686 
687 	return 0;
688 }
689 
690 int
691 spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
692 {
693 	struct spdk_nvme_ctrlr *ctrlr;
694 
695 	ctrlr = qpair->ctrlr;
696 
697 	if (ctrlr->is_failed) {
698 		return -ENXIO;
699 	}
700 	qpair->is_connected = true;
701 
702 	return 0;
703 }
704 
705 void
706 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
707 {
708 	qpair->is_connected = false;
709 }
710 
711 int
712 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
713 {
714 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
715 
716 	qpair->is_connected = false;
717 
718 	if (qpair->poll_group != NULL) {
719 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
720 	}
721 
722 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
723 
724 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
725 
726 	free(qpair);
727 
728 	return 0;
729 }
730 
731 int
732 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
733 {
734 	if (ctrlr->fail_reset) {
735 		ctrlr->is_failed = true;
736 		return -EIO;
737 	}
738 
739 	return 0;
740 }
741 
742 void
743 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
744 {
745 }
746 
747 int
748 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
749 {
750 	ctrlr->is_failed = false;
751 
752 	return 0;
753 }
754 
755 void
756 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
757 {
758 	ctrlr->is_failed = true;
759 }
760 
761 bool
762 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
763 {
764 	return ctrlr->is_failed;
765 }
766 
767 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
768 				 sizeof(uint32_t))
769 static void
770 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
771 {
772 	struct spdk_nvme_ana_page ana_hdr;
773 	char _ana_desc[UT_ANA_DESC_SIZE];
774 	struct spdk_nvme_ana_group_descriptor *ana_desc;
775 	struct spdk_nvme_ns *ns;
776 	uint32_t i;
777 
778 	memset(&ana_hdr, 0, sizeof(ana_hdr));
779 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
780 
781 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
782 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
783 
784 	buf += sizeof(ana_hdr);
785 	length -= sizeof(ana_hdr);
786 
787 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
788 
789 	for (i = 0; i < ctrlr->num_ns; i++) {
790 		ns = &ctrlr->ns[i];
791 
792 		if (!ns->is_active) {
793 			continue;
794 		}
795 
796 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
797 
798 		ana_desc->ana_group_id = ns->id;
799 		ana_desc->num_of_nsid = 1;
800 		ana_desc->ana_state = ns->ana_state;
801 		ana_desc->nsid[0] = ns->id;
802 
803 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
804 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
805 
806 		buf += UT_ANA_DESC_SIZE;
807 		length -= UT_ANA_DESC_SIZE;
808 	}
809 }
810 
811 int
812 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
813 				 uint8_t log_page, uint32_t nsid,
814 				 void *payload, uint32_t payload_size,
815 				 uint64_t offset,
816 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
817 {
818 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
819 		SPDK_CU_ASSERT_FATAL(offset == 0);
820 		ut_create_ana_log_page(ctrlr, payload, payload_size);
821 	}
822 
823 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
824 				      cb_fn, cb_arg);
825 }
826 
827 int
828 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
829 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
830 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
831 {
832 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
833 }
834 
835 int
836 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
837 			      void *cmd_cb_arg,
838 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
839 {
840 	struct ut_nvme_req *req = NULL, *abort_req;
841 
842 	if (qpair == NULL) {
843 		qpair = &ctrlr->adminq;
844 	}
845 
846 	abort_req = calloc(1, sizeof(*abort_req));
847 	if (abort_req == NULL) {
848 		return -ENOMEM;
849 	}
850 
851 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
852 		if (req->cb_arg == cmd_cb_arg) {
853 			break;
854 		}
855 	}
856 
857 	if (req == NULL) {
858 		free(abort_req);
859 		return -ENOENT;
860 	}
861 
862 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
863 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
864 
865 	abort_req->opc = SPDK_NVME_OPC_ABORT;
866 	abort_req->cb_fn = cb_fn;
867 	abort_req->cb_arg = cb_arg;
868 
869 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
870 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
871 	abort_req->cpl.cdw0 = 0;
872 
873 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
874 	ctrlr->adminq.num_outstanding_reqs++;
875 
876 	return 0;
877 }
878 
879 int32_t
880 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
881 {
882 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
883 }
884 
885 uint32_t
886 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
887 {
888 	return ns->id;
889 }
890 
891 struct spdk_nvme_ctrlr *
892 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
893 {
894 	return ns->ctrlr;
895 }
896 
897 static inline struct spdk_nvme_ns_data *
898 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
899 {
900 	return &ns->ctrlr->nsdata[ns->id - 1];
901 }
902 
903 const struct spdk_nvme_ns_data *
904 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
905 {
906 	return _nvme_ns_get_data(ns);
907 }
908 
909 uint64_t
910 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
911 {
912 	return _nvme_ns_get_data(ns)->nsze;
913 }
914 
915 const struct spdk_uuid *
916 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
917 {
918 	return &ns->uuid;
919 }
920 
921 int
922 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
923 			      void *metadata, uint64_t lba, uint32_t lba_count,
924 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
925 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
926 {
927 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
928 }
929 
930 int
931 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
932 			       void *buffer, void *metadata, uint64_t lba,
933 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
934 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
935 {
936 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
937 }
938 
939 int
940 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
941 			       uint64_t lba, uint32_t lba_count,
942 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
943 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
944 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
945 			       uint16_t apptag_mask, uint16_t apptag)
946 {
947 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
948 }
949 
950 int
951 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
952 				uint64_t lba, uint32_t lba_count,
953 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
954 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
955 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
956 				uint16_t apptag_mask, uint16_t apptag)
957 {
958 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
959 }
960 
961 static bool g_ut_readv_ext_called;
962 int
963 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
964 			   uint64_t lba, uint32_t lba_count,
965 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
966 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
967 			   spdk_nvme_req_next_sge_cb next_sge_fn,
968 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
969 {
970 	g_ut_readv_ext_called = true;
971 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
972 }
973 
974 static bool g_ut_writev_ext_called;
975 int
976 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
977 			    uint64_t lba, uint32_t lba_count,
978 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
979 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
980 			    spdk_nvme_req_next_sge_cb next_sge_fn,
981 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
982 {
983 	g_ut_writev_ext_called = true;
984 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
985 }
986 
987 int
988 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
989 				  uint64_t lba, uint32_t lba_count,
990 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
991 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
992 				  spdk_nvme_req_next_sge_cb next_sge_fn,
993 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
994 {
995 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
996 }
997 
998 int
999 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1000 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1001 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1002 {
1003 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1004 }
1005 
1006 int
1007 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1008 			      uint64_t lba, uint32_t lba_count,
1009 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1010 			      uint32_t io_flags)
1011 {
1012 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1013 }
1014 
1015 struct spdk_nvme_poll_group *
1016 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1017 {
1018 	struct spdk_nvme_poll_group *group;
1019 
1020 	group = calloc(1, sizeof(*group));
1021 	if (group == NULL) {
1022 		return NULL;
1023 	}
1024 
1025 	group->ctx = ctx;
1026 	if (table != NULL) {
1027 		group->accel_fn_table = *table;
1028 	}
1029 	TAILQ_INIT(&group->qpairs);
1030 
1031 	return group;
1032 }
1033 
1034 int
1035 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1036 {
1037 	if (!TAILQ_EMPTY(&group->qpairs)) {
1038 		return -EBUSY;
1039 	}
1040 
1041 	free(group);
1042 
1043 	return 0;
1044 }
1045 
1046 int32_t
1047 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1048 				    uint32_t max_completions)
1049 {
1050 	struct ut_nvme_req *req, *tmp;
1051 	uint32_t num_completions = 0;
1052 
1053 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1054 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1055 		qpair->num_outstanding_reqs--;
1056 
1057 		req->cb_fn(req->cb_arg, &req->cpl);
1058 
1059 		free(req);
1060 		num_completions++;
1061 	}
1062 
1063 	return num_completions;
1064 }
1065 
1066 int64_t
1067 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1068 		uint32_t completions_per_qpair,
1069 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1070 {
1071 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1072 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1073 
1074 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1075 
1076 	if (disconnected_qpair_cb == NULL) {
1077 		return -EINVAL;
1078 	}
1079 
1080 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
1081 		if (qpair->is_connected) {
1082 			local_completions = spdk_nvme_qpair_process_completions(qpair,
1083 					    completions_per_qpair);
1084 			if (local_completions < 0 && error_reason == 0) {
1085 				error_reason = local_completions;
1086 			} else {
1087 				num_completions += local_completions;
1088 				assert(num_completions >= 0);
1089 			}
1090 		}
1091 	}
1092 
1093 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
1094 		if (!qpair->is_connected) {
1095 			disconnected_qpair_cb(qpair, group->ctx);
1096 		}
1097 	}
1098 
1099 	return error_reason ? error_reason : num_completions;
1100 }
1101 
1102 int
1103 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1104 			 struct spdk_nvme_qpair *qpair)
1105 {
1106 	CU_ASSERT(!qpair->is_connected);
1107 
1108 	qpair->poll_group = group;
1109 	TAILQ_INSERT_TAIL(&group->qpairs, qpair, poll_group_tailq);
1110 
1111 	return 0;
1112 }
1113 
1114 int
1115 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1116 			    struct spdk_nvme_qpair *qpair)
1117 {
1118 	CU_ASSERT(!qpair->is_connected);
1119 
1120 	TAILQ_REMOVE(&group->qpairs, qpair, poll_group_tailq);
1121 
1122 	return 0;
1123 }
1124 
1125 int
1126 spdk_bdev_register(struct spdk_bdev *bdev)
1127 {
1128 	return g_ut_register_bdev_status;
1129 }
1130 
1131 void
1132 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1133 {
1134 	int rc;
1135 
1136 	rc = bdev->fn_table->destruct(bdev->ctxt);
1137 	if (rc <= 0 && cb_fn != NULL) {
1138 		cb_fn(cb_arg, rc);
1139 	}
1140 }
1141 
1142 int
1143 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1144 {
1145 	bdev->blockcnt = size;
1146 
1147 	return 0;
1148 }
1149 
1150 struct spdk_io_channel *
1151 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1152 {
1153 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1154 }
1155 
1156 void
1157 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1158 {
1159 	bdev_io->internal.status = status;
1160 	bdev_io->internal.in_submit_request = false;
1161 }
1162 
1163 void
1164 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1165 {
1166 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1167 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1168 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1169 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1170 	} else {
1171 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1172 	}
1173 
1174 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1175 	bdev_io->internal.error.nvme.sct = sct;
1176 	bdev_io->internal.error.nvme.sc = sc;
1177 
1178 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1179 }
1180 
1181 void
1182 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1183 {
1184 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1185 
1186 	ut_bdev_io_set_buf(bdev_io);
1187 
1188 	cb(ch, bdev_io, true);
1189 }
1190 
1191 static void
1192 test_create_ctrlr(void)
1193 {
1194 	struct spdk_nvme_transport_id trid = {};
1195 	struct spdk_nvme_ctrlr ctrlr = {};
1196 	int rc;
1197 
1198 	ut_init_trid(&trid);
1199 
1200 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1201 	CU_ASSERT(rc == 0);
1202 
1203 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1204 
1205 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1206 	CU_ASSERT(rc == 0);
1207 
1208 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1209 
1210 	poll_threads();
1211 	spdk_delay_us(1000);
1212 	poll_threads();
1213 
1214 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1215 }
1216 
1217 static void
1218 test_reset_ctrlr(void)
1219 {
1220 	struct spdk_nvme_transport_id trid = {};
1221 	struct spdk_nvme_ctrlr ctrlr = {};
1222 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1223 	struct nvme_path_id *curr_trid;
1224 	struct spdk_io_channel *ch1, *ch2;
1225 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1226 	int rc;
1227 
1228 	ut_init_trid(&trid);
1229 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1230 
1231 	set_thread(0);
1232 
1233 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1234 	CU_ASSERT(rc == 0);
1235 
1236 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1237 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1238 
1239 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1240 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1241 
1242 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1243 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1244 
1245 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1246 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1247 
1248 	set_thread(1);
1249 
1250 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1251 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1252 
1253 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1254 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1255 
1256 	/* Reset starts from thread 1. */
1257 	set_thread(1);
1258 
1259 	/* Case 1: ctrlr is already being destructed. */
1260 	nvme_ctrlr->destruct = true;
1261 
1262 	rc = bdev_nvme_reset(nvme_ctrlr);
1263 	CU_ASSERT(rc == -ENXIO);
1264 
1265 	/* Case 2: reset is in progress. */
1266 	nvme_ctrlr->destruct = false;
1267 	nvme_ctrlr->resetting = true;
1268 
1269 	rc = bdev_nvme_reset(nvme_ctrlr);
1270 	CU_ASSERT(rc == -EBUSY);
1271 
1272 	/* Case 3: reset completes successfully. */
1273 	nvme_ctrlr->resetting = false;
1274 	curr_trid->is_failed = true;
1275 	ctrlr.is_failed = true;
1276 
1277 	rc = bdev_nvme_reset(nvme_ctrlr);
1278 	CU_ASSERT(rc == 0);
1279 	CU_ASSERT(nvme_ctrlr->resetting == true);
1280 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1281 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1282 
1283 	poll_thread_times(0, 3);
1284 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1285 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1286 
1287 	poll_thread_times(1, 1);
1288 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1289 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1290 	CU_ASSERT(ctrlr.is_failed == true);
1291 
1292 	poll_thread_times(0, 1);
1293 	CU_ASSERT(ctrlr.is_failed == false);
1294 
1295 	poll_thread_times(0, 1);
1296 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1297 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1298 
1299 	poll_thread_times(1, 1);
1300 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1301 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1302 	CU_ASSERT(nvme_ctrlr->resetting == true);
1303 	CU_ASSERT(curr_trid->is_failed == true);
1304 
1305 	poll_thread_times(0, 2);
1306 	CU_ASSERT(nvme_ctrlr->resetting == true);
1307 	poll_thread_times(1, 1);
1308 	CU_ASSERT(nvme_ctrlr->resetting == true);
1309 	poll_thread_times(0, 1);
1310 	CU_ASSERT(nvme_ctrlr->resetting == false);
1311 	CU_ASSERT(curr_trid->is_failed == false);
1312 
1313 	spdk_put_io_channel(ch2);
1314 
1315 	set_thread(0);
1316 
1317 	spdk_put_io_channel(ch1);
1318 
1319 	poll_threads();
1320 
1321 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1322 	CU_ASSERT(rc == 0);
1323 
1324 	poll_threads();
1325 	spdk_delay_us(1000);
1326 	poll_threads();
1327 
1328 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1329 }
1330 
1331 static void
1332 test_race_between_reset_and_destruct_ctrlr(void)
1333 {
1334 	struct spdk_nvme_transport_id trid = {};
1335 	struct spdk_nvme_ctrlr ctrlr = {};
1336 	struct nvme_ctrlr *nvme_ctrlr;
1337 	struct spdk_io_channel *ch1, *ch2;
1338 	int rc;
1339 
1340 	ut_init_trid(&trid);
1341 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1342 
1343 	set_thread(0);
1344 
1345 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1346 	CU_ASSERT(rc == 0);
1347 
1348 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1349 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1350 
1351 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1352 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1353 
1354 	set_thread(1);
1355 
1356 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1357 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1358 
1359 	/* Reset starts from thread 1. */
1360 	set_thread(1);
1361 
1362 	rc = bdev_nvme_reset(nvme_ctrlr);
1363 	CU_ASSERT(rc == 0);
1364 	CU_ASSERT(nvme_ctrlr->resetting == true);
1365 
1366 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1367 	set_thread(0);
1368 
1369 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1370 	CU_ASSERT(rc == 0);
1371 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1372 	CU_ASSERT(nvme_ctrlr->destruct == true);
1373 	CU_ASSERT(nvme_ctrlr->resetting == true);
1374 
1375 	poll_threads();
1376 
1377 	/* Reset completed but ctrlr is not still destructed yet. */
1378 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1379 	CU_ASSERT(nvme_ctrlr->destruct == true);
1380 	CU_ASSERT(nvme_ctrlr->resetting == false);
1381 
1382 	/* New reset request is rejected. */
1383 	rc = bdev_nvme_reset(nvme_ctrlr);
1384 	CU_ASSERT(rc == -ENXIO);
1385 
1386 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1387 	 * However there are two channels and destruct is not completed yet.
1388 	 */
1389 	poll_threads();
1390 
1391 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1392 
1393 	set_thread(0);
1394 
1395 	spdk_put_io_channel(ch1);
1396 
1397 	set_thread(1);
1398 
1399 	spdk_put_io_channel(ch2);
1400 
1401 	poll_threads();
1402 	spdk_delay_us(1000);
1403 	poll_threads();
1404 
1405 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1406 }
1407 
1408 static void
1409 test_failover_ctrlr(void)
1410 {
1411 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1412 	struct spdk_nvme_ctrlr ctrlr = {};
1413 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1414 	struct nvme_path_id *curr_trid, *next_trid;
1415 	struct spdk_io_channel *ch1, *ch2;
1416 	int rc;
1417 
1418 	ut_init_trid(&trid1);
1419 	ut_init_trid2(&trid2);
1420 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1421 
1422 	set_thread(0);
1423 
1424 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1425 	CU_ASSERT(rc == 0);
1426 
1427 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1428 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1429 
1430 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1431 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1432 
1433 	set_thread(1);
1434 
1435 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1436 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1437 
1438 	/* First, test one trid case. */
1439 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1440 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1441 
1442 	/* Failover starts from thread 1. */
1443 	set_thread(1);
1444 
1445 	/* Case 1: ctrlr is already being destructed. */
1446 	nvme_ctrlr->destruct = true;
1447 
1448 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1449 	CU_ASSERT(rc == -ENXIO);
1450 	CU_ASSERT(curr_trid->is_failed == false);
1451 
1452 	/* Case 2: reset is in progress. */
1453 	nvme_ctrlr->destruct = false;
1454 	nvme_ctrlr->resetting = true;
1455 
1456 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1457 	CU_ASSERT(rc == -EBUSY);
1458 
1459 	/* Case 3: reset completes successfully. */
1460 	nvme_ctrlr->resetting = false;
1461 
1462 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1463 	CU_ASSERT(rc == 0);
1464 
1465 	CU_ASSERT(nvme_ctrlr->resetting == true);
1466 	CU_ASSERT(curr_trid->is_failed == true);
1467 
1468 	poll_threads();
1469 
1470 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1471 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1472 
1473 	CU_ASSERT(nvme_ctrlr->resetting == false);
1474 	CU_ASSERT(curr_trid->is_failed == false);
1475 
1476 	set_thread(0);
1477 
1478 	/* Second, test two trids case. */
1479 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1480 	CU_ASSERT(rc == 0);
1481 
1482 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1483 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1484 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1485 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1486 
1487 	/* Failover starts from thread 1. */
1488 	set_thread(1);
1489 
1490 	/* Case 4: reset is in progress. */
1491 	nvme_ctrlr->resetting = true;
1492 
1493 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1494 	CU_ASSERT(rc == -EBUSY);
1495 
1496 	/* Case 5: failover completes successfully. */
1497 	nvme_ctrlr->resetting = false;
1498 
1499 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1500 	CU_ASSERT(rc == 0);
1501 
1502 	CU_ASSERT(nvme_ctrlr->resetting == true);
1503 
1504 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1505 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1506 	CU_ASSERT(next_trid != curr_trid);
1507 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1508 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1509 
1510 	poll_threads();
1511 
1512 	CU_ASSERT(nvme_ctrlr->resetting == false);
1513 
1514 	spdk_put_io_channel(ch2);
1515 
1516 	set_thread(0);
1517 
1518 	spdk_put_io_channel(ch1);
1519 
1520 	poll_threads();
1521 
1522 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1523 	CU_ASSERT(rc == 0);
1524 
1525 	poll_threads();
1526 	spdk_delay_us(1000);
1527 	poll_threads();
1528 
1529 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1530 }
1531 
1532 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1533  *
1534  * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1535  * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1536  * to trid2. While processing the failed reset, trid3 was added. trid1 should
1537  * have been active, i.e., the head of the list until the failover completed.
1538  * However trid3 was inserted to the head of the list by mistake.
1539  *
1540  * I/O qpairs have smaller polling period than admin qpair. When a connection is
1541  * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1542  * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1543  * may be executed repeatedly before failover is executed. Hence this bug is real.
1544  *
1545  * The following test verifies the fix.
1546  */
1547 static void
1548 test_race_between_failover_and_add_secondary_trid(void)
1549 {
1550 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1551 	struct spdk_nvme_ctrlr ctrlr = {};
1552 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1553 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1554 	struct spdk_io_channel *ch1, *ch2;
1555 	int rc;
1556 
1557 	ut_init_trid(&trid1);
1558 	ut_init_trid2(&trid2);
1559 	ut_init_trid3(&trid3);
1560 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1561 
1562 	set_thread(0);
1563 
1564 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1565 	CU_ASSERT(rc == 0);
1566 
1567 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1568 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1569 
1570 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1571 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1572 
1573 	set_thread(1);
1574 
1575 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1576 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1577 
1578 	set_thread(0);
1579 
1580 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1581 	CU_ASSERT(rc == 0);
1582 
1583 	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1584 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1585 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1586 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1587 	path_id2 = TAILQ_NEXT(path_id1, link);
1588 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1589 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1590 
1591 	ctrlr.fail_reset = true;
1592 
1593 	rc = bdev_nvme_reset(nvme_ctrlr);
1594 	CU_ASSERT(rc == 0);
1595 
1596 	poll_threads();
1597 
1598 	CU_ASSERT(path_id1->is_failed == true);
1599 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1600 
1601 	rc = bdev_nvme_reset(nvme_ctrlr);
1602 	CU_ASSERT(rc == 0);
1603 
1604 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1605 	CU_ASSERT(rc == 0);
1606 
1607 	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1608 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1609 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1610 	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1611 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1612 	path_id3 = TAILQ_NEXT(path_id2, link);
1613 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1614 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1615 
1616 	poll_threads();
1617 
1618 	spdk_put_io_channel(ch1);
1619 
1620 	set_thread(1);
1621 
1622 	spdk_put_io_channel(ch2);
1623 
1624 	poll_threads();
1625 
1626 	set_thread(0);
1627 
1628 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1629 	CU_ASSERT(rc == 0);
1630 
1631 	poll_threads();
1632 	spdk_delay_us(1000);
1633 	poll_threads();
1634 
1635 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1636 }
1637 
1638 static void
1639 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1640 {
1641 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1642 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1643 }
1644 
1645 static void
1646 test_pending_reset(void)
1647 {
1648 	struct spdk_nvme_transport_id trid = {};
1649 	struct spdk_nvme_ctrlr *ctrlr;
1650 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1651 	const int STRING_SIZE = 32;
1652 	const char *attached_names[STRING_SIZE];
1653 	struct nvme_bdev *bdev;
1654 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1655 	struct spdk_io_channel *ch1, *ch2;
1656 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1657 	struct nvme_io_path *io_path1, *io_path2;
1658 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1659 	int rc;
1660 
1661 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1662 	ut_init_trid(&trid);
1663 
1664 	set_thread(0);
1665 
1666 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1667 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1668 
1669 	g_ut_attach_ctrlr_status = 0;
1670 	g_ut_attach_bdev_count = 1;
1671 
1672 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1673 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
1674 	CU_ASSERT(rc == 0);
1675 
1676 	spdk_delay_us(1000);
1677 	poll_threads();
1678 
1679 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1680 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1681 
1682 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1683 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1684 
1685 	ch1 = spdk_get_io_channel(bdev);
1686 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1687 
1688 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1689 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1690 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1691 	ctrlr_ch1 = io_path1->ctrlr_ch;
1692 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1693 
1694 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1695 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1696 
1697 	set_thread(1);
1698 
1699 	ch2 = spdk_get_io_channel(bdev);
1700 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1701 
1702 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1703 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1704 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1705 	ctrlr_ch2 = io_path2->ctrlr_ch;
1706 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1707 
1708 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1709 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1710 
1711 	/* The first reset request is submitted on thread 1, and the second reset request
1712 	 * is submitted on thread 0 while processing the first request.
1713 	 */
1714 	bdev_nvme_submit_request(ch2, first_bdev_io);
1715 	CU_ASSERT(nvme_ctrlr->resetting == true);
1716 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1717 
1718 	set_thread(0);
1719 
1720 	bdev_nvme_submit_request(ch1, second_bdev_io);
1721 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1722 
1723 	poll_threads();
1724 
1725 	CU_ASSERT(nvme_ctrlr->resetting == false);
1726 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1727 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1728 
1729 	/* The first reset request is submitted on thread 1, and the second reset request
1730 	 * is submitted on thread 0 while processing the first request.
1731 	 *
1732 	 * The difference from the above scenario is that the controller is removed while
1733 	 * processing the first request. Hence both reset requests should fail.
1734 	 */
1735 	set_thread(1);
1736 
1737 	bdev_nvme_submit_request(ch2, first_bdev_io);
1738 	CU_ASSERT(nvme_ctrlr->resetting == true);
1739 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1740 
1741 	set_thread(0);
1742 
1743 	bdev_nvme_submit_request(ch1, second_bdev_io);
1744 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1745 
1746 	ctrlr->fail_reset = true;
1747 
1748 	poll_threads();
1749 
1750 	CU_ASSERT(nvme_ctrlr->resetting == false);
1751 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1752 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1753 
1754 	spdk_put_io_channel(ch1);
1755 
1756 	set_thread(1);
1757 
1758 	spdk_put_io_channel(ch2);
1759 
1760 	poll_threads();
1761 
1762 	set_thread(0);
1763 
1764 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1765 	CU_ASSERT(rc == 0);
1766 
1767 	poll_threads();
1768 	spdk_delay_us(1000);
1769 	poll_threads();
1770 
1771 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1772 
1773 	free(first_bdev_io);
1774 	free(second_bdev_io);
1775 }
1776 
1777 static void
1778 test_attach_ctrlr(void)
1779 {
1780 	struct spdk_nvme_transport_id trid = {};
1781 	struct spdk_nvme_ctrlr *ctrlr;
1782 	struct nvme_ctrlr *nvme_ctrlr;
1783 	const int STRING_SIZE = 32;
1784 	const char *attached_names[STRING_SIZE];
1785 	struct nvme_bdev *nbdev;
1786 	int rc;
1787 
1788 	set_thread(0);
1789 
1790 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1791 	ut_init_trid(&trid);
1792 
1793 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
1794 	 * by probe polling.
1795 	 */
1796 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1797 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1798 
1799 	ctrlr->is_failed = true;
1800 	g_ut_attach_ctrlr_status = -EIO;
1801 	g_ut_attach_bdev_count = 0;
1802 
1803 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1804 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
1805 	CU_ASSERT(rc == 0);
1806 
1807 	spdk_delay_us(1000);
1808 	poll_threads();
1809 
1810 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1811 
1812 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
1813 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1814 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1815 
1816 	g_ut_attach_ctrlr_status = 0;
1817 
1818 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1819 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
1820 	CU_ASSERT(rc == 0);
1821 
1822 	spdk_delay_us(1000);
1823 	poll_threads();
1824 
1825 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1826 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1827 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1828 
1829 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1830 	CU_ASSERT(rc == 0);
1831 
1832 	poll_threads();
1833 	spdk_delay_us(1000);
1834 	poll_threads();
1835 
1836 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1837 
1838 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
1839 	 * one nvme_bdev is created.
1840 	 */
1841 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1842 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1843 
1844 	g_ut_attach_bdev_count = 1;
1845 
1846 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1847 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
1848 	CU_ASSERT(rc == 0);
1849 
1850 	spdk_delay_us(1000);
1851 	poll_threads();
1852 
1853 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1854 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1855 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1856 
1857 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
1858 	attached_names[0] = NULL;
1859 
1860 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1861 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
1862 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
1863 
1864 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1865 	CU_ASSERT(rc == 0);
1866 
1867 	poll_threads();
1868 	spdk_delay_us(1000);
1869 	poll_threads();
1870 
1871 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1872 
1873 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
1874 	 * created because creating one nvme_bdev failed.
1875 	 */
1876 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1877 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1878 
1879 	g_ut_register_bdev_status = -EINVAL;
1880 	g_ut_attach_bdev_count = 0;
1881 
1882 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1883 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
1884 	CU_ASSERT(rc == 0);
1885 
1886 	spdk_delay_us(1000);
1887 	poll_threads();
1888 
1889 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1890 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1891 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1892 
1893 	CU_ASSERT(attached_names[0] == NULL);
1894 
1895 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1896 	CU_ASSERT(rc == 0);
1897 
1898 	poll_threads();
1899 	spdk_delay_us(1000);
1900 	poll_threads();
1901 
1902 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1903 
1904 	g_ut_register_bdev_status = 0;
1905 }
1906 
1907 static void
1908 test_aer_cb(void)
1909 {
1910 	struct spdk_nvme_transport_id trid = {};
1911 	struct spdk_nvme_ctrlr *ctrlr;
1912 	struct nvme_ctrlr *nvme_ctrlr;
1913 	struct nvme_bdev *bdev;
1914 	const int STRING_SIZE = 32;
1915 	const char *attached_names[STRING_SIZE];
1916 	union spdk_nvme_async_event_completion event = {};
1917 	struct spdk_nvme_cpl cpl = {};
1918 	int rc;
1919 
1920 	set_thread(0);
1921 
1922 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1923 	ut_init_trid(&trid);
1924 
1925 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
1926 	 * namespaces are populated.
1927 	 */
1928 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
1929 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1930 
1931 	ctrlr->ns[0].is_active = false;
1932 
1933 	g_ut_attach_ctrlr_status = 0;
1934 	g_ut_attach_bdev_count = 3;
1935 
1936 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1937 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
1938 	CU_ASSERT(rc == 0);
1939 
1940 	spdk_delay_us(1000);
1941 	poll_threads();
1942 
1943 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1944 	poll_threads();
1945 
1946 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1947 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1948 
1949 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
1950 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
1951 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
1952 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
1953 
1954 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
1955 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1956 	CU_ASSERT(bdev->disk.blockcnt == 1024);
1957 
1958 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
1959 	 * change the size of the 4th namespace.
1960 	 */
1961 	ctrlr->ns[0].is_active = true;
1962 	ctrlr->ns[2].is_active = false;
1963 	ctrlr->nsdata[3].nsze = 2048;
1964 
1965 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1966 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
1967 	cpl.cdw0 = event.raw;
1968 
1969 	aer_cb(nvme_ctrlr, &cpl);
1970 
1971 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
1972 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
1973 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
1974 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
1975 	CU_ASSERT(bdev->disk.blockcnt == 2048);
1976 
1977 	/* Change ANA state of active namespaces. */
1978 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
1979 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
1980 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
1981 
1982 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1983 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
1984 	cpl.cdw0 = event.raw;
1985 
1986 	aer_cb(nvme_ctrlr, &cpl);
1987 
1988 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1989 	poll_threads();
1990 
1991 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
1992 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
1993 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
1994 
1995 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1996 	CU_ASSERT(rc == 0);
1997 
1998 	poll_threads();
1999 	spdk_delay_us(1000);
2000 	poll_threads();
2001 
2002 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2003 }
2004 
2005 static void
2006 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2007 			enum spdk_bdev_io_type io_type)
2008 {
2009 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2010 	struct nvme_io_path *io_path;
2011 	struct spdk_nvme_qpair *qpair;
2012 
2013 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2014 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2015 	qpair = io_path->ctrlr_ch->qpair;
2016 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2017 
2018 	bdev_io->type = io_type;
2019 	bdev_io->internal.in_submit_request = true;
2020 
2021 	bdev_nvme_submit_request(ch, bdev_io);
2022 
2023 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2024 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2025 
2026 	poll_threads();
2027 
2028 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2029 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2030 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2031 }
2032 
2033 static void
2034 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2035 		   enum spdk_bdev_io_type io_type)
2036 {
2037 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2038 	struct nvme_io_path *io_path;
2039 	struct spdk_nvme_qpair *qpair;
2040 
2041 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2042 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2043 	qpair = io_path->ctrlr_ch->qpair;
2044 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2045 
2046 	bdev_io->type = io_type;
2047 	bdev_io->internal.in_submit_request = true;
2048 
2049 	bdev_nvme_submit_request(ch, bdev_io);
2050 
2051 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2052 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2053 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2054 }
2055 
2056 static void
2057 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2058 {
2059 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2060 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2061 	struct ut_nvme_req *req;
2062 	struct nvme_io_path *io_path;
2063 	struct spdk_nvme_qpair *qpair;
2064 
2065 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2066 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2067 	qpair = io_path->ctrlr_ch->qpair;
2068 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2069 
2070 	/* Only compare and write now. */
2071 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2072 	bdev_io->internal.in_submit_request = true;
2073 
2074 	bdev_nvme_submit_request(ch, bdev_io);
2075 
2076 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2077 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2078 	CU_ASSERT(bio->first_fused_submitted == true);
2079 
2080 	/* First outstanding request is compare operation. */
2081 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2082 	SPDK_CU_ASSERT_FATAL(req != NULL);
2083 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2084 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2085 
2086 	poll_threads();
2087 
2088 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2089 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2090 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2091 }
2092 
2093 static void
2094 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2095 			 struct spdk_nvme_ctrlr *ctrlr)
2096 {
2097 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2098 	bdev_io->internal.in_submit_request = true;
2099 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2100 
2101 	bdev_nvme_submit_request(ch, bdev_io);
2102 
2103 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2104 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2105 
2106 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2107 	poll_thread_times(1, 1);
2108 
2109 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2110 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2111 
2112 	poll_thread_times(0, 1);
2113 
2114 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2115 }
2116 
2117 static void
2118 test_submit_nvme_cmd(void)
2119 {
2120 	struct spdk_nvme_transport_id trid = {};
2121 	struct spdk_nvme_ctrlr *ctrlr;
2122 	struct nvme_ctrlr *nvme_ctrlr;
2123 	const int STRING_SIZE = 32;
2124 	const char *attached_names[STRING_SIZE];
2125 	struct nvme_bdev *bdev;
2126 	struct spdk_bdev_io *bdev_io;
2127 	struct spdk_io_channel *ch;
2128 	struct spdk_bdev_ext_io_opts ext_io_opts = {};
2129 	int rc;
2130 
2131 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2132 	ut_init_trid(&trid);
2133 
2134 	set_thread(1);
2135 
2136 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2137 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2138 
2139 	g_ut_attach_ctrlr_status = 0;
2140 	g_ut_attach_bdev_count = 1;
2141 
2142 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2143 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2144 	CU_ASSERT(rc == 0);
2145 
2146 	spdk_delay_us(1000);
2147 	poll_threads();
2148 
2149 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2150 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2151 
2152 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2153 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2154 
2155 	set_thread(0);
2156 
2157 	ch = spdk_get_io_channel(bdev);
2158 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2159 
2160 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2161 
2162 	bdev_io->u.bdev.iovs = NULL;
2163 
2164 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2165 
2166 	ut_bdev_io_set_buf(bdev_io);
2167 
2168 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2169 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2170 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2171 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2172 
2173 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2174 
2175 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2176 
2177 	/* Verify that ext NVME API is called if bdev_io ext_opts is set */
2178 	bdev_io->internal.ext_opts = &ext_io_opts;
2179 	g_ut_readv_ext_called = false;
2180 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2181 	CU_ASSERT(g_ut_readv_ext_called == true);
2182 	g_ut_readv_ext_called = false;
2183 
2184 	g_ut_writev_ext_called = false;
2185 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2186 	CU_ASSERT(g_ut_writev_ext_called == true);
2187 	g_ut_writev_ext_called = false;
2188 	bdev_io->internal.ext_opts = NULL;
2189 
2190 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2191 
2192 	free(bdev_io);
2193 
2194 	spdk_put_io_channel(ch);
2195 
2196 	poll_threads();
2197 
2198 	set_thread(1);
2199 
2200 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2201 	CU_ASSERT(rc == 0);
2202 
2203 	poll_threads();
2204 	spdk_delay_us(1000);
2205 	poll_threads();
2206 
2207 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2208 }
2209 
2210 static void
2211 test_add_remove_trid(void)
2212 {
2213 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2214 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2215 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2216 	const int STRING_SIZE = 32;
2217 	const char *attached_names[STRING_SIZE];
2218 	struct nvme_path_id *ctrid;
2219 	int rc;
2220 
2221 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2222 	ut_init_trid(&path1.trid);
2223 	ut_init_trid2(&path2.trid);
2224 	ut_init_trid3(&path3.trid);
2225 
2226 	set_thread(0);
2227 
2228 	g_ut_attach_ctrlr_status = 0;
2229 	g_ut_attach_bdev_count = 0;
2230 
2231 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2232 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2233 
2234 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2235 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2236 	CU_ASSERT(rc == 0);
2237 
2238 	spdk_delay_us(1000);
2239 	poll_threads();
2240 
2241 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2242 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2243 
2244 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2245 
2246 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2247 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2248 
2249 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2250 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2251 	CU_ASSERT(rc == 0);
2252 
2253 	spdk_delay_us(1000);
2254 	poll_threads();
2255 
2256 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2257 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2258 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2259 			break;
2260 		}
2261 	}
2262 	CU_ASSERT(ctrid != NULL);
2263 
2264 	/* trid3 is not in the registered list. */
2265 	rc = bdev_nvme_delete("nvme0", &path3);
2266 	CU_ASSERT(rc == -ENXIO);
2267 
2268 	/* trid2 is not used, and simply removed. */
2269 	rc = bdev_nvme_delete("nvme0", &path2);
2270 	CU_ASSERT(rc == 0);
2271 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2272 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2273 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2274 	}
2275 
2276 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2277 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2278 
2279 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 0,
2280 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2281 	CU_ASSERT(rc == 0);
2282 
2283 	spdk_delay_us(1000);
2284 	poll_threads();
2285 
2286 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2287 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2288 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2289 			break;
2290 		}
2291 	}
2292 	CU_ASSERT(ctrid != NULL);
2293 
2294 	/* path1 is currently used and path3 is an alternative path.
2295 	 * If we remove path1, path is changed to path3.
2296 	 */
2297 	rc = bdev_nvme_delete("nvme0", &path1);
2298 	CU_ASSERT(rc == 0);
2299 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2300 	CU_ASSERT(nvme_ctrlr->resetting == true);
2301 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2302 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2303 	}
2304 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2305 
2306 	poll_threads();
2307 
2308 	CU_ASSERT(nvme_ctrlr->resetting == false);
2309 
2310 	/* path3 is the current and only path. If we remove path3, the corresponding
2311 	 * nvme_ctrlr is removed.
2312 	 */
2313 	rc = bdev_nvme_delete("nvme0", &path3);
2314 	CU_ASSERT(rc == 0);
2315 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2316 
2317 	poll_threads();
2318 	spdk_delay_us(1000);
2319 	poll_threads();
2320 
2321 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2322 
2323 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2324 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2325 
2326 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2327 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2328 	CU_ASSERT(rc == 0);
2329 
2330 	spdk_delay_us(1000);
2331 	poll_threads();
2332 
2333 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2334 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2335 
2336 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2337 
2338 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2339 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2340 
2341 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2342 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2343 	CU_ASSERT(rc == 0);
2344 
2345 	spdk_delay_us(1000);
2346 	poll_threads();
2347 
2348 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2349 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2350 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2351 			break;
2352 		}
2353 	}
2354 	CU_ASSERT(ctrid != NULL);
2355 
2356 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2357 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2358 	CU_ASSERT(rc == 0);
2359 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2360 
2361 	poll_threads();
2362 	spdk_delay_us(1000);
2363 	poll_threads();
2364 
2365 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2366 }
2367 
2368 static void
2369 test_abort(void)
2370 {
2371 	struct spdk_nvme_transport_id trid = {};
2372 	struct spdk_nvme_ctrlr *ctrlr;
2373 	struct nvme_ctrlr *nvme_ctrlr;
2374 	const int STRING_SIZE = 32;
2375 	const char *attached_names[STRING_SIZE];
2376 	struct nvme_bdev *bdev;
2377 	struct spdk_bdev_io *write_io, *admin_io, *abort_io;
2378 	struct spdk_io_channel *ch1, *ch2;
2379 	struct nvme_bdev_channel *nbdev_ch1;
2380 	struct nvme_io_path *io_path1;
2381 	struct nvme_ctrlr_channel *ctrlr_ch1;
2382 	int rc;
2383 
2384 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2385 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2386 	 * are submitted on thread 1. Both should succeed.
2387 	 */
2388 
2389 	ut_init_trid(&trid);
2390 
2391 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2392 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2393 
2394 	g_ut_attach_ctrlr_status = 0;
2395 	g_ut_attach_bdev_count = 1;
2396 
2397 	set_thread(1);
2398 
2399 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2400 			      attach_ctrlr_done, NULL, NULL, false, -1, 1, 0);
2401 	CU_ASSERT(rc == 0);
2402 
2403 	spdk_delay_us(1000);
2404 	poll_threads();
2405 
2406 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2407 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2408 
2409 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2410 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2411 
2412 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2413 	ut_bdev_io_set_buf(write_io);
2414 
2415 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2416 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2417 
2418 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2419 
2420 	set_thread(0);
2421 
2422 	ch1 = spdk_get_io_channel(bdev);
2423 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2424 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2425 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2426 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2427 	ctrlr_ch1 = io_path1->ctrlr_ch;
2428 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
2429 
2430 	set_thread(1);
2431 
2432 	ch2 = spdk_get_io_channel(bdev);
2433 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2434 
2435 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2436 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2437 
2438 	/* Aborting the already completed request should fail. */
2439 	write_io->internal.in_submit_request = true;
2440 	bdev_nvme_submit_request(ch1, write_io);
2441 	poll_threads();
2442 
2443 	CU_ASSERT(write_io->internal.in_submit_request == false);
2444 
2445 	abort_io->u.abort.bio_to_abort = write_io;
2446 	abort_io->internal.in_submit_request = true;
2447 
2448 	bdev_nvme_submit_request(ch1, abort_io);
2449 
2450 	poll_threads();
2451 
2452 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2453 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2454 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2455 
2456 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2457 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2458 
2459 	admin_io->internal.in_submit_request = true;
2460 	bdev_nvme_submit_request(ch1, admin_io);
2461 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2462 	poll_threads();
2463 
2464 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2465 
2466 	abort_io->u.abort.bio_to_abort = admin_io;
2467 	abort_io->internal.in_submit_request = true;
2468 
2469 	bdev_nvme_submit_request(ch2, abort_io);
2470 
2471 	poll_threads();
2472 
2473 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2474 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2475 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2476 
2477 	/* Aborting the write request should succeed. */
2478 	write_io->internal.in_submit_request = true;
2479 	bdev_nvme_submit_request(ch1, write_io);
2480 
2481 	CU_ASSERT(write_io->internal.in_submit_request == true);
2482 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
2483 
2484 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2485 	abort_io->u.abort.bio_to_abort = write_io;
2486 	abort_io->internal.in_submit_request = true;
2487 
2488 	bdev_nvme_submit_request(ch1, abort_io);
2489 
2490 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2491 	poll_threads();
2492 
2493 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2494 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2495 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2496 	CU_ASSERT(write_io->internal.in_submit_request == false);
2497 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2498 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
2499 
2500 	/* Aborting the admin request should succeed. */
2501 	admin_io->internal.in_submit_request = true;
2502 	bdev_nvme_submit_request(ch1, admin_io);
2503 
2504 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2505 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2506 
2507 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2508 	abort_io->u.abort.bio_to_abort = admin_io;
2509 	abort_io->internal.in_submit_request = true;
2510 
2511 	bdev_nvme_submit_request(ch2, abort_io);
2512 
2513 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2514 	poll_threads();
2515 
2516 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2517 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2518 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2519 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2520 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2521 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2522 
2523 	set_thread(0);
2524 
2525 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2526 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2527 	 * while resetting the nvme_ctrlr.
2528 	 */
2529 	ctrlr_ch1->qpair->is_connected = false;
2530 
2531 	poll_thread_times(0, 3);
2532 
2533 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2534 	CU_ASSERT(nvme_ctrlr->resetting == true);
2535 
2536 	write_io->internal.in_submit_request = true;
2537 
2538 	bdev_nvme_submit_request(ch1, write_io);
2539 
2540 	CU_ASSERT(write_io->internal.in_submit_request == true);
2541 	CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list));
2542 
2543 	/* Aborting the queued write request should succeed immediately. */
2544 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2545 	abort_io->u.abort.bio_to_abort = write_io;
2546 	abort_io->internal.in_submit_request = true;
2547 
2548 	bdev_nvme_submit_request(ch1, abort_io);
2549 
2550 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2551 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2552 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2553 	CU_ASSERT(write_io->internal.in_submit_request == false);
2554 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2555 
2556 	spdk_put_io_channel(ch1);
2557 
2558 	set_thread(1);
2559 
2560 	spdk_put_io_channel(ch2);
2561 
2562 	poll_threads();
2563 
2564 	free(write_io);
2565 	free(admin_io);
2566 	free(abort_io);
2567 
2568 	set_thread(1);
2569 
2570 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2571 	CU_ASSERT(rc == 0);
2572 
2573 	poll_threads();
2574 	spdk_delay_us(1000);
2575 	poll_threads();
2576 
2577 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2578 }
2579 
2580 static void
2581 test_get_io_qpair(void)
2582 {
2583 	struct spdk_nvme_transport_id trid = {};
2584 	struct spdk_nvme_ctrlr ctrlr = {};
2585 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2586 	struct spdk_io_channel *ch;
2587 	struct nvme_ctrlr_channel *ctrlr_ch;
2588 	struct spdk_nvme_qpair *qpair;
2589 	int rc;
2590 
2591 	ut_init_trid(&trid);
2592 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2593 
2594 	set_thread(0);
2595 
2596 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2597 	CU_ASSERT(rc == 0);
2598 
2599 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2600 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2601 
2602 	ch = spdk_get_io_channel(nvme_ctrlr);
2603 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2604 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2605 	CU_ASSERT(ctrlr_ch->qpair != NULL);
2606 
2607 	qpair = bdev_nvme_get_io_qpair(ch);
2608 	CU_ASSERT(qpair == ctrlr_ch->qpair);
2609 
2610 	spdk_put_io_channel(ch);
2611 
2612 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2613 	CU_ASSERT(rc == 0);
2614 
2615 	poll_threads();
2616 	spdk_delay_us(1000);
2617 	poll_threads();
2618 
2619 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2620 }
2621 
2622 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2623  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2624  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2625  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2626  */
2627 static void
2628 test_bdev_unregister(void)
2629 {
2630 	struct spdk_nvme_transport_id trid = {};
2631 	struct spdk_nvme_ctrlr *ctrlr;
2632 	struct nvme_ctrlr *nvme_ctrlr;
2633 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2634 	const int STRING_SIZE = 32;
2635 	const char *attached_names[STRING_SIZE];
2636 	struct nvme_bdev *bdev1, *bdev2;
2637 	int rc;
2638 
2639 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2640 	ut_init_trid(&trid);
2641 
2642 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
2643 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2644 
2645 	g_ut_attach_ctrlr_status = 0;
2646 	g_ut_attach_bdev_count = 2;
2647 
2648 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2649 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2650 	CU_ASSERT(rc == 0);
2651 
2652 	spdk_delay_us(1000);
2653 	poll_threads();
2654 
2655 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2656 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2657 
2658 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
2659 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2660 
2661 	bdev1 = nvme_ns1->bdev;
2662 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2663 
2664 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
2665 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2666 
2667 	bdev2 = nvme_ns2->bdev;
2668 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2669 
2670 	bdev_nvme_destruct(&bdev1->disk);
2671 	bdev_nvme_destruct(&bdev2->disk);
2672 
2673 	poll_threads();
2674 
2675 	CU_ASSERT(nvme_ns1->bdev == NULL);
2676 	CU_ASSERT(nvme_ns2->bdev == NULL);
2677 
2678 	nvme_ctrlr->destruct = true;
2679 	_nvme_ctrlr_destruct(nvme_ctrlr);
2680 
2681 	poll_threads();
2682 	spdk_delay_us(1000);
2683 	poll_threads();
2684 
2685 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2686 }
2687 
2688 static void
2689 test_compare_ns(void)
2690 {
2691 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2692 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2693 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2694 
2695 	/* No IDs are defined. */
2696 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2697 
2698 	/* Only EUI64 are defined and not matched. */
2699 	nsdata1.eui64 = 0xABCDEF0123456789;
2700 	nsdata2.eui64 = 0xBBCDEF0123456789;
2701 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2702 
2703 	/* Only EUI64 are defined and matched. */
2704 	nsdata2.eui64 = 0xABCDEF0123456789;
2705 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2706 
2707 	/* Only NGUID are defined and not matched. */
2708 	nsdata1.eui64 = 0x0;
2709 	nsdata2.eui64 = 0x0;
2710 	nsdata1.nguid[0] = 0x12;
2711 	nsdata2.nguid[0] = 0x10;
2712 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2713 
2714 	/* Only NGUID are defined and matched. */
2715 	nsdata2.nguid[0] = 0x12;
2716 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2717 
2718 	/* Only UUID are defined and not matched. */
2719 	nsdata1.nguid[0] = 0x0;
2720 	nsdata2.nguid[0] = 0x0;
2721 	ns1.uuid.u.raw[0] = 0xAA;
2722 	ns2.uuid.u.raw[0] = 0xAB;
2723 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2724 
2725 	/* Only UUID are defined and matched. */
2726 	ns1.uuid.u.raw[0] = 0xAB;
2727 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2728 
2729 	/* All EUI64, NGUID, and UUID are defined and matched. */
2730 	nsdata1.eui64 = 0x123456789ABCDEF;
2731 	nsdata2.eui64 = 0x123456789ABCDEF;
2732 	nsdata1.nguid[15] = 0x34;
2733 	nsdata2.nguid[15] = 0x34;
2734 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2735 }
2736 
2737 static void
2738 test_init_ana_log_page(void)
2739 {
2740 	struct spdk_nvme_transport_id trid = {};
2741 	struct spdk_nvme_ctrlr *ctrlr;
2742 	struct nvme_ctrlr *nvme_ctrlr;
2743 	const int STRING_SIZE = 32;
2744 	const char *attached_names[STRING_SIZE];
2745 	int rc;
2746 
2747 	set_thread(0);
2748 
2749 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2750 	ut_init_trid(&trid);
2751 
2752 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
2753 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2754 
2755 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2756 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2757 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2758 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
2759 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2760 
2761 	g_ut_attach_ctrlr_status = 0;
2762 	g_ut_attach_bdev_count = 5;
2763 
2764 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2765 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2766 	CU_ASSERT(rc == 0);
2767 
2768 	spdk_delay_us(1000);
2769 	poll_threads();
2770 
2771 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2772 	poll_threads();
2773 
2774 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2775 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2776 
2777 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2778 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2779 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2780 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2781 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
2782 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
2783 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2784 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2785 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
2786 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2787 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
2788 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
2789 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
2790 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
2791 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
2792 
2793 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2794 	CU_ASSERT(rc == 0);
2795 
2796 	poll_threads();
2797 	spdk_delay_us(1000);
2798 	poll_threads();
2799 
2800 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2801 }
2802 
2803 static void
2804 init_accel(void)
2805 {
2806 	spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb,
2807 				sizeof(int), "accel_p");
2808 }
2809 
2810 static void
2811 fini_accel(void)
2812 {
2813 	spdk_io_device_unregister(g_accel_p, NULL);
2814 }
2815 
2816 static void
2817 test_get_memory_domains(void)
2818 {
2819 	struct nvme_ctrlr ctrlr = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
2820 	struct nvme_ns ns = { .ctrlr = &ctrlr };
2821 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
2822 	struct spdk_memory_domain *domains[2] = {};
2823 	int rc = 0;
2824 
2825 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns, tailq);
2826 
2827 	/* nvme controller doesn't have memory domainы */
2828 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 0);
2829 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2830 	CU_ASSERT(rc == 0)
2831 
2832 	/* nvme controller has a memory domain */
2833 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 1);
2834 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2835 	CU_ASSERT(rc == 1);
2836 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domain);
2837 }
2838 
2839 static void
2840 test_reconnect_qpair(void)
2841 {
2842 	struct spdk_nvme_transport_id trid = {};
2843 	struct spdk_nvme_ctrlr *ctrlr;
2844 	struct nvme_ctrlr *nvme_ctrlr;
2845 	const int STRING_SIZE = 32;
2846 	const char *attached_names[STRING_SIZE];
2847 	struct nvme_bdev *bdev;
2848 	struct spdk_io_channel *ch1, *ch2;
2849 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
2850 	struct nvme_io_path *io_path1, *io_path2;
2851 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
2852 	int rc;
2853 
2854 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2855 	ut_init_trid(&trid);
2856 
2857 	set_thread(0);
2858 
2859 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2860 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2861 
2862 	g_ut_attach_ctrlr_status = 0;
2863 	g_ut_attach_bdev_count = 1;
2864 
2865 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2866 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2867 	CU_ASSERT(rc == 0);
2868 
2869 	spdk_delay_us(1000);
2870 	poll_threads();
2871 
2872 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2873 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2874 
2875 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2876 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2877 
2878 	ch1 = spdk_get_io_channel(bdev);
2879 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2880 
2881 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2882 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2883 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2884 	ctrlr_ch1 = io_path1->ctrlr_ch;
2885 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
2886 
2887 	set_thread(1);
2888 
2889 	ch2 = spdk_get_io_channel(bdev);
2890 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2891 
2892 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
2893 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
2894 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
2895 	ctrlr_ch2 = io_path2->ctrlr_ch;
2896 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
2897 
2898 	/* If a qpair is disconnected, it is freed and then reconnected via
2899 	 * resetting the corresponding nvme_ctrlr.
2900 	 */
2901 	ctrlr_ch2->qpair->is_connected = false;
2902 	ctrlr->is_failed = true;
2903 
2904 	poll_thread_times(1, 1);
2905 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
2906 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2907 	CU_ASSERT(nvme_ctrlr->resetting == true);
2908 
2909 	poll_thread_times(0, 2);
2910 	poll_thread_times(1, 1);
2911 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2912 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2913 	CU_ASSERT(ctrlr->is_failed == true);
2914 
2915 	poll_thread_times(0, 1);
2916 	CU_ASSERT(ctrlr->is_failed == false);
2917 
2918 	poll_thread_times(0, 1);
2919 	poll_thread_times(1, 1);
2920 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
2921 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
2922 	CU_ASSERT(nvme_ctrlr->resetting == true);
2923 
2924 	poll_thread_times(0, 2);
2925 	poll_thread_times(1, 1);
2926 	poll_thread_times(0, 1);
2927 	CU_ASSERT(nvme_ctrlr->resetting == false);
2928 
2929 	poll_threads();
2930 
2931 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
2932 	 * fails, the qpair is just freed.
2933 	 */
2934 	ctrlr_ch2->qpair->is_connected = false;
2935 	ctrlr->is_failed = true;
2936 	ctrlr->fail_reset = true;
2937 
2938 	poll_thread_times(1, 1);
2939 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
2940 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2941 	CU_ASSERT(nvme_ctrlr->resetting == true);
2942 
2943 	poll_thread_times(0, 2);
2944 	poll_thread_times(1, 1);
2945 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2946 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2947 	CU_ASSERT(ctrlr->is_failed == true);
2948 
2949 	poll_thread_times(0, 2);
2950 	poll_thread_times(1, 1);
2951 	poll_thread_times(0, 1);
2952 	CU_ASSERT(ctrlr->is_failed == true);
2953 	CU_ASSERT(nvme_ctrlr->resetting == false);
2954 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2955 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2956 
2957 	poll_threads();
2958 
2959 	spdk_put_io_channel(ch2);
2960 
2961 	set_thread(0);
2962 
2963 	spdk_put_io_channel(ch1);
2964 
2965 	poll_threads();
2966 
2967 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2968 	CU_ASSERT(rc == 0);
2969 
2970 	poll_threads();
2971 	spdk_delay_us(1000);
2972 	poll_threads();
2973 
2974 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2975 }
2976 
2977 static void
2978 test_create_bdev_ctrlr(void)
2979 {
2980 	struct nvme_path_id path1 = {}, path2 = {};
2981 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
2982 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
2983 	const int STRING_SIZE = 32;
2984 	const char *attached_names[STRING_SIZE];
2985 	int rc;
2986 
2987 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2988 	ut_init_trid(&path1.trid);
2989 	ut_init_trid2(&path2.trid);
2990 
2991 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
2992 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2993 
2994 	g_ut_attach_ctrlr_status = 0;
2995 	g_ut_attach_bdev_count = 0;
2996 
2997 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2998 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
2999 	CU_ASSERT(rc == 0);
3000 
3001 	spdk_delay_us(1000);
3002 	poll_threads();
3003 
3004 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3005 	poll_threads();
3006 
3007 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3008 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3009 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3010 
3011 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3012 	g_ut_attach_ctrlr_status = -EINVAL;
3013 
3014 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3015 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3016 
3017 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3018 
3019 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3020 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3021 	CU_ASSERT(rc == 0);
3022 
3023 	spdk_delay_us(1000);
3024 	poll_threads();
3025 
3026 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3027 	poll_threads();
3028 
3029 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3030 
3031 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3032 	g_ut_attach_ctrlr_status = 0;
3033 
3034 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3035 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3036 
3037 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3038 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3039 	CU_ASSERT(rc == 0);
3040 
3041 	spdk_delay_us(1000);
3042 	poll_threads();
3043 
3044 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3045 	poll_threads();
3046 
3047 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3048 
3049 	/* Delete two ctrlrs at once. */
3050 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3051 	CU_ASSERT(rc == 0);
3052 
3053 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3054 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3055 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3056 
3057 	poll_threads();
3058 	spdk_delay_us(1000);
3059 	poll_threads();
3060 
3061 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3062 
3063 	/* Add two ctrlrs and delete one by one. */
3064 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3065 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3066 
3067 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3068 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3069 
3070 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3071 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3072 	CU_ASSERT(rc == 0);
3073 
3074 	spdk_delay_us(1000);
3075 	poll_threads();
3076 
3077 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3078 	poll_threads();
3079 
3080 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3081 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3082 	CU_ASSERT(rc == 0);
3083 
3084 	spdk_delay_us(1000);
3085 	poll_threads();
3086 
3087 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3088 	poll_threads();
3089 
3090 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3091 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3092 
3093 	rc = bdev_nvme_delete("nvme0", &path1);
3094 	CU_ASSERT(rc == 0);
3095 
3096 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3097 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3098 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3099 
3100 	poll_threads();
3101 	spdk_delay_us(1000);
3102 	poll_threads();
3103 
3104 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3105 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3106 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3107 
3108 	rc = bdev_nvme_delete("nvme0", &path2);
3109 	CU_ASSERT(rc == 0);
3110 
3111 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3112 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3113 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3114 
3115 	poll_threads();
3116 	spdk_delay_us(1000);
3117 	poll_threads();
3118 
3119 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3120 }
3121 
3122 static struct nvme_ns *
3123 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3124 {
3125 	struct nvme_ns *nvme_ns;
3126 
3127 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3128 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3129 			return nvme_ns;
3130 		}
3131 	}
3132 
3133 	return NULL;
3134 }
3135 
3136 static void
3137 test_add_multi_ns_to_bdev(void)
3138 {
3139 	struct nvme_path_id path1 = {}, path2 = {};
3140 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3141 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3142 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3143 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3144 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3145 	const int STRING_SIZE = 32;
3146 	const char *attached_names[STRING_SIZE];
3147 	int rc;
3148 
3149 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3150 	ut_init_trid(&path1.trid);
3151 	ut_init_trid2(&path2.trid);
3152 
3153 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3154 
3155 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3156 	 * namespaces are populated.
3157 	 */
3158 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3159 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3160 
3161 	ctrlr1->ns[1].is_active = false;
3162 	ctrlr1->ns[4].is_active = false;
3163 	memset(&ctrlr1->ns[0].uuid, 0x1, sizeof(struct spdk_uuid));
3164 	memset(&ctrlr1->ns[2].uuid, 0x3, sizeof(struct spdk_uuid));
3165 	memset(&ctrlr1->ns[3].uuid, 0x4, sizeof(struct spdk_uuid));
3166 
3167 	g_ut_attach_ctrlr_status = 0;
3168 	g_ut_attach_bdev_count = 3;
3169 
3170 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 0,
3171 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3172 	CU_ASSERT(rc == 0);
3173 
3174 	spdk_delay_us(1000);
3175 	poll_threads();
3176 
3177 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3178 	poll_threads();
3179 
3180 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3181 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3182 	 * adding 4th namespace to a bdev should fail.
3183 	 */
3184 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3185 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3186 
3187 	ctrlr2->ns[2].is_active = false;
3188 	ctrlr2->ns[4].is_active = false;
3189 	memset(&ctrlr2->ns[0].uuid, 0x1, sizeof(struct spdk_uuid));
3190 	memset(&ctrlr2->ns[1].uuid, 0x2, sizeof(struct spdk_uuid));
3191 	memset(&ctrlr2->ns[3].uuid, 0x44, sizeof(struct spdk_uuid));
3192 
3193 	g_ut_attach_ctrlr_status = 0;
3194 	g_ut_attach_bdev_count = 2;
3195 
3196 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 0,
3197 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3198 	CU_ASSERT(rc == 0);
3199 
3200 	spdk_delay_us(1000);
3201 	poll_threads();
3202 
3203 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3204 	poll_threads();
3205 
3206 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3207 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3208 
3209 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3210 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3211 
3212 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3213 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3214 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3215 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3216 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3217 
3218 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3219 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3220 
3221 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3222 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3223 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3224 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3225 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3226 
3227 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3228 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3229 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3230 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3231 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3232 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3233 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3234 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3235 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3236 
3237 	CU_ASSERT(bdev1->ref == 2);
3238 	CU_ASSERT(bdev2->ref == 1);
3239 	CU_ASSERT(bdev3->ref == 1);
3240 	CU_ASSERT(bdev4->ref == 1);
3241 
3242 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3243 	rc = bdev_nvme_delete("nvme0", &path1);
3244 	CU_ASSERT(rc == 0);
3245 
3246 	poll_threads();
3247 	spdk_delay_us(1000);
3248 	poll_threads();
3249 
3250 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3251 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3252 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2);
3253 
3254 	rc = bdev_nvme_delete("nvme0", &path2);
3255 	CU_ASSERT(rc == 0);
3256 
3257 	poll_threads();
3258 	spdk_delay_us(1000);
3259 	poll_threads();
3260 
3261 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3262 
3263 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3264 	 * can be deleted when the bdev subsystem shutdown.
3265 	 */
3266 	g_ut_attach_bdev_count = 1;
3267 
3268 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3269 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3270 
3271 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3272 
3273 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 0,
3274 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3275 	CU_ASSERT(rc == 0);
3276 
3277 	spdk_delay_us(1000);
3278 	poll_threads();
3279 
3280 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3281 	poll_threads();
3282 
3283 	ut_init_trid2(&path2.trid);
3284 
3285 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3286 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3287 
3288 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3289 
3290 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 0,
3291 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3292 	CU_ASSERT(rc == 0);
3293 
3294 	spdk_delay_us(1000);
3295 	poll_threads();
3296 
3297 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3298 	poll_threads();
3299 
3300 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3301 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3302 
3303 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3304 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3305 
3306 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3307 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3308 
3309 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3310 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3311 
3312 	/* Check if a nvme_bdev has two nvme_ns. */
3313 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3314 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3315 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3316 
3317 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3318 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3319 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3320 
3321 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3322 	bdev_nvme_destruct(&bdev1->disk);
3323 
3324 	poll_threads();
3325 
3326 	CU_ASSERT(nvme_ns1->bdev == NULL);
3327 	CU_ASSERT(nvme_ns2->bdev == NULL);
3328 
3329 	nvme_ctrlr1->destruct = true;
3330 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3331 
3332 	poll_threads();
3333 	spdk_delay_us(1000);
3334 	poll_threads();
3335 
3336 	nvme_ctrlr2->destruct = true;
3337 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3338 
3339 	poll_threads();
3340 	spdk_delay_us(1000);
3341 	poll_threads();
3342 
3343 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3344 }
3345 
3346 static void
3347 test_add_multi_io_paths_to_nbdev_ch(void)
3348 {
3349 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3350 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3351 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3352 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3353 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3354 	const int STRING_SIZE = 32;
3355 	const char *attached_names[STRING_SIZE];
3356 	struct nvme_bdev *bdev;
3357 	struct spdk_io_channel *ch;
3358 	struct nvme_bdev_channel *nbdev_ch;
3359 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3360 	int rc;
3361 
3362 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3363 	ut_init_trid(&path1.trid);
3364 	ut_init_trid2(&path2.trid);
3365 	ut_init_trid3(&path3.trid);
3366 	g_ut_attach_ctrlr_status = 0;
3367 	g_ut_attach_bdev_count = 1;
3368 
3369 	set_thread(1);
3370 
3371 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3372 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3373 
3374 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3375 
3376 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3377 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3378 	CU_ASSERT(rc == 0);
3379 
3380 	spdk_delay_us(1000);
3381 	poll_threads();
3382 
3383 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3384 	poll_threads();
3385 
3386 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3387 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3388 
3389 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3390 
3391 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3392 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3393 	CU_ASSERT(rc == 0);
3394 
3395 	spdk_delay_us(1000);
3396 	poll_threads();
3397 
3398 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3399 	poll_threads();
3400 
3401 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3402 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3403 
3404 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3405 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3406 
3407 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3408 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3409 
3410 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3411 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3412 
3413 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3414 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3415 
3416 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3417 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3418 
3419 	set_thread(0);
3420 
3421 	ch = spdk_get_io_channel(bdev);
3422 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3423 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3424 
3425 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3426 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3427 
3428 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3429 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3430 
3431 	set_thread(1);
3432 
3433 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3434 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3435 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3436 
3437 	memset(&ctrlr3->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3438 
3439 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 0,
3440 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3441 	CU_ASSERT(rc == 0);
3442 
3443 	spdk_delay_us(1000);
3444 	poll_threads();
3445 
3446 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3447 	poll_threads();
3448 
3449 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid);
3450 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3451 
3452 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3453 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3454 
3455 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3456 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3457 
3458 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3459 	rc = bdev_nvme_delete("nvme0", &path2);
3460 	CU_ASSERT(rc == 0);
3461 
3462 	poll_threads();
3463 	spdk_delay_us(1000);
3464 	poll_threads();
3465 
3466 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1);
3467 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3468 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3);
3469 
3470 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3471 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3472 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3473 
3474 	set_thread(0);
3475 
3476 	spdk_put_io_channel(ch);
3477 
3478 	poll_threads();
3479 
3480 	set_thread(1);
3481 
3482 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3483 	CU_ASSERT(rc == 0);
3484 
3485 	poll_threads();
3486 	spdk_delay_us(1000);
3487 	poll_threads();
3488 
3489 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3490 }
3491 
3492 static void
3493 test_admin_path(void)
3494 {
3495 	struct nvme_path_id path1 = {}, path2 = {};
3496 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3497 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3498 	const int STRING_SIZE = 32;
3499 	const char *attached_names[STRING_SIZE];
3500 	struct nvme_bdev *bdev;
3501 	struct spdk_io_channel *ch;
3502 	struct spdk_bdev_io *bdev_io;
3503 	int rc;
3504 
3505 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3506 	ut_init_trid(&path1.trid);
3507 	ut_init_trid2(&path2.trid);
3508 	g_ut_attach_ctrlr_status = 0;
3509 	g_ut_attach_bdev_count = 1;
3510 
3511 	set_thread(0);
3512 
3513 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3514 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3515 
3516 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3517 
3518 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3519 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3520 	CU_ASSERT(rc == 0);
3521 
3522 	spdk_delay_us(1000);
3523 	poll_threads();
3524 
3525 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3526 	poll_threads();
3527 
3528 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3529 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3530 
3531 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3532 
3533 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3534 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3535 	CU_ASSERT(rc == 0);
3536 
3537 	spdk_delay_us(1000);
3538 	poll_threads();
3539 
3540 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3541 	poll_threads();
3542 
3543 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3544 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3545 
3546 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3547 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3548 
3549 	ch = spdk_get_io_channel(bdev);
3550 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3551 
3552 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
3553 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
3554 
3555 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
3556 	 * submitted to ctrlr2.
3557 	 */
3558 	ctrlr1->is_failed = true;
3559 	bdev_io->internal.in_submit_request = true;
3560 
3561 	bdev_nvme_submit_request(ch, bdev_io);
3562 
3563 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3564 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
3565 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
3566 
3567 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3568 	poll_threads();
3569 
3570 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3571 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3572 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3573 
3574 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
3575 	ctrlr2->is_failed = true;
3576 	bdev_io->internal.in_submit_request = true;
3577 
3578 	bdev_nvme_submit_request(ch, bdev_io);
3579 
3580 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3581 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3582 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3583 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
3584 
3585 	free(bdev_io);
3586 
3587 	spdk_put_io_channel(ch);
3588 
3589 	poll_threads();
3590 
3591 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3592 	CU_ASSERT(rc == 0);
3593 
3594 	poll_threads();
3595 	spdk_delay_us(1000);
3596 	poll_threads();
3597 
3598 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3599 }
3600 
3601 static struct nvme_io_path *
3602 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
3603 			struct nvme_ctrlr *nvme_ctrlr)
3604 {
3605 	struct nvme_io_path *io_path;
3606 	struct nvme_ctrlr *_nvme_ctrlr;
3607 
3608 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
3609 		_nvme_ctrlr = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(io_path->ctrlr_ch));
3610 		if (_nvme_ctrlr == nvme_ctrlr) {
3611 			return io_path;
3612 		}
3613 	}
3614 
3615 	return NULL;
3616 }
3617 
3618 static void
3619 test_reset_bdev_ctrlr(void)
3620 {
3621 	struct nvme_path_id path1 = {}, path2 = {};
3622 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3623 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3624 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3625 	struct nvme_path_id *curr_path1, *curr_path2;
3626 	const int STRING_SIZE = 32;
3627 	const char *attached_names[STRING_SIZE];
3628 	struct nvme_bdev *bdev;
3629 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
3630 	struct nvme_bdev_io *first_bio;
3631 	struct spdk_io_channel *ch1, *ch2;
3632 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3633 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
3634 	int rc;
3635 
3636 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3637 	ut_init_trid(&path1.trid);
3638 	ut_init_trid2(&path2.trid);
3639 	g_ut_attach_ctrlr_status = 0;
3640 	g_ut_attach_bdev_count = 1;
3641 
3642 	set_thread(0);
3643 
3644 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3645 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3646 
3647 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3648 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3649 	CU_ASSERT(rc == 0);
3650 
3651 	spdk_delay_us(1000);
3652 	poll_threads();
3653 
3654 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3655 	poll_threads();
3656 
3657 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3658 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3659 
3660 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3661 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3662 	CU_ASSERT(rc == 0);
3663 
3664 	spdk_delay_us(1000);
3665 	poll_threads();
3666 
3667 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3668 	poll_threads();
3669 
3670 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3671 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3672 
3673 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3674 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3675 
3676 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
3677 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
3678 
3679 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3680 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3681 
3682 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
3683 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
3684 
3685 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3686 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3687 
3688 	set_thread(0);
3689 
3690 	ch1 = spdk_get_io_channel(bdev);
3691 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3692 
3693 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3694 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
3695 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
3696 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
3697 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
3698 
3699 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
3700 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
3701 
3702 	set_thread(1);
3703 
3704 	ch2 = spdk_get_io_channel(bdev);
3705 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3706 
3707 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3708 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
3709 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
3710 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
3711 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
3712 
3713 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
3714 
3715 	/* The first reset request from bdev_io is submitted on thread 0.
3716 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
3717 	 *
3718 	 * A few extra polls are necessary after resetting ctrlr1 to check
3719 	 * pending reset requests for ctrlr1.
3720 	 */
3721 	ctrlr1->is_failed = true;
3722 	curr_path1->is_failed = true;
3723 	ctrlr2->is_failed = true;
3724 	curr_path2->is_failed = true;
3725 
3726 	set_thread(0);
3727 
3728 	bdev_nvme_submit_request(ch1, first_bdev_io);
3729 	CU_ASSERT(first_bio->io_path == io_path11);
3730 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3731 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
3732 
3733 	poll_thread_times(0, 2);
3734 	CU_ASSERT(io_path11->ctrlr_ch->qpair == NULL);
3735 	CU_ASSERT(io_path21->ctrlr_ch->qpair != NULL);
3736 
3737 	poll_thread_times(1, 1);
3738 	CU_ASSERT(io_path11->ctrlr_ch->qpair == NULL);
3739 	CU_ASSERT(io_path21->ctrlr_ch->qpair == NULL);
3740 	CU_ASSERT(ctrlr1->is_failed == true);
3741 
3742 	poll_thread_times(0, 1);
3743 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3744 	CU_ASSERT(ctrlr1->is_failed == false);
3745 	CU_ASSERT(curr_path1->is_failed == true);
3746 
3747 	poll_thread_times(0, 1);
3748 	CU_ASSERT(io_path11->ctrlr_ch->qpair != NULL);
3749 	CU_ASSERT(io_path21->ctrlr_ch->qpair == NULL);
3750 
3751 	poll_thread_times(1, 1);
3752 	CU_ASSERT(io_path11->ctrlr_ch->qpair != NULL);
3753 	CU_ASSERT(io_path21->ctrlr_ch->qpair != NULL);
3754 
3755 	poll_thread_times(0, 2);
3756 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3757 	poll_thread_times(1, 1);
3758 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3759 	poll_thread_times(0, 2);
3760 	CU_ASSERT(nvme_ctrlr1->resetting == false);
3761 	CU_ASSERT(curr_path1->is_failed == false);
3762 	CU_ASSERT(first_bio->io_path == io_path12);
3763 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3764 
3765 	poll_thread_times(0, 2);
3766 	CU_ASSERT(io_path12->ctrlr_ch->qpair == NULL);
3767 	CU_ASSERT(io_path22->ctrlr_ch->qpair != NULL);
3768 
3769 	poll_thread_times(1, 1);
3770 	CU_ASSERT(io_path12->ctrlr_ch->qpair == NULL);
3771 	CU_ASSERT(io_path22->ctrlr_ch->qpair == NULL);
3772 	CU_ASSERT(ctrlr2->is_failed == true);
3773 
3774 	poll_thread_times(0, 2);
3775 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3776 	CU_ASSERT(ctrlr2->is_failed == false);
3777 	CU_ASSERT(curr_path2->is_failed == true);
3778 
3779 	poll_thread_times(0, 1);
3780 	CU_ASSERT(io_path12->ctrlr_ch->qpair != NULL);
3781 	CU_ASSERT(io_path22->ctrlr_ch->qpair == NULL);
3782 
3783 	poll_thread_times(1, 2);
3784 	CU_ASSERT(io_path12->ctrlr_ch->qpair != NULL);
3785 	CU_ASSERT(io_path22->ctrlr_ch->qpair != NULL);
3786 
3787 	poll_thread_times(0, 2);
3788 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3789 	poll_thread_times(1, 1);
3790 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3791 	poll_thread_times(0, 2);
3792 	CU_ASSERT(first_bio->io_path == NULL);
3793 	CU_ASSERT(nvme_ctrlr2->resetting == false);
3794 	CU_ASSERT(curr_path2->is_failed == false);
3795 
3796 	poll_threads();
3797 
3798 	/* There is a race between two reset requests from bdev_io.
3799 	 *
3800 	 * The first reset request is submitted on thread 0, and the second reset
3801 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
3802 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
3803 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
3804 	 * The second is pending on ctrlr2 again. After the first completes resetting
3805 	 * ctrl2, both complete successfully.
3806 	 */
3807 	ctrlr1->is_failed = true;
3808 	curr_path1->is_failed = true;
3809 	ctrlr2->is_failed = true;
3810 	curr_path2->is_failed = true;
3811 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3812 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3813 
3814 	set_thread(0);
3815 
3816 	bdev_nvme_submit_request(ch1, first_bdev_io);
3817 
3818 	set_thread(1);
3819 
3820 	bdev_nvme_submit_request(ch2, second_bdev_io);
3821 
3822 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3823 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
3824 	CU_ASSERT(TAILQ_FIRST(&io_path21->ctrlr_ch->pending_resets) == second_bdev_io);
3825 
3826 	poll_threads();
3827 
3828 	CU_ASSERT(ctrlr1->is_failed == false);
3829 	CU_ASSERT(curr_path1->is_failed == false);
3830 	CU_ASSERT(ctrlr2->is_failed == false);
3831 	CU_ASSERT(curr_path2->is_failed == false);
3832 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3833 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3834 
3835 	set_thread(0);
3836 
3837 	spdk_put_io_channel(ch1);
3838 
3839 	set_thread(1);
3840 
3841 	spdk_put_io_channel(ch2);
3842 
3843 	poll_threads();
3844 
3845 	set_thread(0);
3846 
3847 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3848 	CU_ASSERT(rc == 0);
3849 
3850 	poll_threads();
3851 	spdk_delay_us(1000);
3852 	poll_threads();
3853 
3854 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3855 
3856 	free(first_bdev_io);
3857 	free(second_bdev_io);
3858 }
3859 
3860 static void
3861 test_find_io_path(void)
3862 {
3863 	struct nvme_bdev_channel nbdev_ch = {
3864 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
3865 	};
3866 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
3867 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
3868 	struct nvme_io_path io_path1 = { .ctrlr_ch = &ctrlr_ch1, .nvme_ns = &nvme_ns1, };
3869 	struct nvme_io_path io_path2 = { .ctrlr_ch = &ctrlr_ch2, .nvme_ns = &nvme_ns2, };
3870 
3871 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
3872 
3873 	/* Test if io_path whose ANA state is not accessible is excluded. */
3874 
3875 	ctrlr_ch1.qpair = (struct spdk_nvme_qpair *)0x1;
3876 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3877 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3878 
3879 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
3880 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3881 
3882 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
3883 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3884 
3885 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3886 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
3887 
3888 	nbdev_ch.current_io_path = NULL;
3889 
3890 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3891 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
3892 
3893 	nbdev_ch.current_io_path = NULL;
3894 
3895 	/* Test if io_path whose qpair is resetting is excluded. */
3896 
3897 	ctrlr_ch1.qpair = NULL;
3898 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3899 
3900 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
3901 
3902 	/* Test if ANA optimized state or the first found ANA non-optimized state
3903 	 * is prioritized.
3904 	 */
3905 
3906 	ctrlr_ch1.qpair = (struct spdk_nvme_qpair *)0x1;
3907 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3908 	ctrlr_ch2.qpair = (struct spdk_nvme_qpair *)0x1;
3909 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3910 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
3911 
3912 	nbdev_ch.current_io_path = NULL;
3913 
3914 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3915 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
3916 
3917 	nbdev_ch.current_io_path = NULL;
3918 }
3919 
3920 static void
3921 test_retry_io_if_ana_state_is_updating(void)
3922 {
3923 	struct nvme_path_id path = {};
3924 	struct spdk_nvme_ctrlr *ctrlr;
3925 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3926 	struct nvme_ctrlr *nvme_ctrlr;
3927 	const int STRING_SIZE = 32;
3928 	const char *attached_names[STRING_SIZE];
3929 	struct nvme_bdev *bdev;
3930 	struct nvme_ns *nvme_ns;
3931 	struct spdk_bdev_io *bdev_io1;
3932 	struct spdk_io_channel *ch;
3933 	struct nvme_bdev_channel *nbdev_ch;
3934 	struct nvme_io_path *io_path;
3935 	struct nvme_ctrlr_channel *ctrlr_ch;
3936 	int rc;
3937 
3938 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3939 	ut_init_trid(&path.trid);
3940 
3941 	set_thread(0);
3942 
3943 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
3944 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3945 
3946 	g_ut_attach_ctrlr_status = 0;
3947 	g_ut_attach_bdev_count = 1;
3948 
3949 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
3950 			      attach_ctrlr_done, NULL, NULL, false, -1, 1, 0);
3951 	CU_ASSERT(rc == 0);
3952 
3953 	spdk_delay_us(1000);
3954 	poll_threads();
3955 
3956 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3957 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3958 
3959 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
3960 	CU_ASSERT(nvme_ctrlr != NULL);
3961 
3962 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3963 	CU_ASSERT(bdev != NULL);
3964 
3965 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
3966 	CU_ASSERT(nvme_ns != NULL);
3967 
3968 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
3969 	ut_bdev_io_set_buf(bdev_io1);
3970 
3971 	ch = spdk_get_io_channel(bdev);
3972 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3973 
3974 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3975 
3976 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
3977 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
3978 
3979 	ctrlr_ch = io_path->ctrlr_ch;
3980 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
3981 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
3982 
3983 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
3984 
3985 	/* If qpair is connected, I/O should succeed. */
3986 	bdev_io1->internal.in_submit_request = true;
3987 
3988 	bdev_nvme_submit_request(ch, bdev_io1);
3989 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
3990 
3991 	poll_threads();
3992 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
3993 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
3994 
3995 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
3996 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3997 	nbdev_ch->current_io_path = NULL;
3998 
3999 	bdev_io1->internal.in_submit_request = true;
4000 
4001 	bdev_nvme_submit_request(ch, bdev_io1);
4002 
4003 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4004 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4005 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4006 
4007 	/* ANA state became accessible while I/O was queued. */
4008 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4009 
4010 	spdk_delay_us(1000000);
4011 
4012 	poll_thread_times(0, 1);
4013 
4014 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4015 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4016 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4017 
4018 	poll_threads();
4019 
4020 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4021 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4022 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4023 
4024 	free(bdev_io1);
4025 
4026 	spdk_put_io_channel(ch);
4027 
4028 	poll_threads();
4029 
4030 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4031 	CU_ASSERT(rc == 0);
4032 
4033 	poll_threads();
4034 	spdk_delay_us(1000);
4035 	poll_threads();
4036 
4037 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4038 }
4039 
4040 static void
4041 test_retry_io_for_io_path_error(void)
4042 {
4043 	struct nvme_path_id path1 = {}, path2 = {};
4044 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4045 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4046 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4047 	const int STRING_SIZE = 32;
4048 	const char *attached_names[STRING_SIZE];
4049 	struct nvme_bdev *bdev;
4050 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4051 	struct spdk_bdev_io *bdev_io;
4052 	struct nvme_bdev_io *bio;
4053 	struct spdk_io_channel *ch;
4054 	struct nvme_bdev_channel *nbdev_ch;
4055 	struct nvme_io_path *io_path1, *io_path2;
4056 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
4057 	struct ut_nvme_req *req;
4058 	int rc;
4059 
4060 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4061 	ut_init_trid(&path1.trid);
4062 	ut_init_trid2(&path2.trid);
4063 
4064 	g_opts.bdev_retry_count = 1;
4065 
4066 	set_thread(0);
4067 
4068 	g_ut_attach_ctrlr_status = 0;
4069 	g_ut_attach_bdev_count = 1;
4070 
4071 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4072 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4073 
4074 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
4075 
4076 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
4077 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
4078 	CU_ASSERT(rc == 0);
4079 
4080 	spdk_delay_us(1000);
4081 	poll_threads();
4082 
4083 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4084 	poll_threads();
4085 
4086 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4087 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4088 
4089 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4090 	CU_ASSERT(nvme_ctrlr1 != NULL);
4091 
4092 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4093 	CU_ASSERT(bdev != NULL);
4094 
4095 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4096 	CU_ASSERT(nvme_ns1 != NULL);
4097 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4098 
4099 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4100 	ut_bdev_io_set_buf(bdev_io);
4101 
4102 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4103 
4104 	ch = spdk_get_io_channel(bdev);
4105 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4106 
4107 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4108 
4109 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4110 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4111 
4112 	ctrlr_ch1 = io_path1->ctrlr_ch;
4113 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
4114 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1->qpair != NULL);
4115 
4116 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4117 
4118 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4119 	bdev_io->internal.in_submit_request = true;
4120 
4121 	bdev_nvme_submit_request(ch, bdev_io);
4122 
4123 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
4124 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4125 
4126 	req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
4127 	SPDK_CU_ASSERT_FATAL(req != NULL);
4128 
4129 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4130 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4131 	req->cpl.status.dnr = 1;
4132 
4133 	poll_thread_times(0, 1);
4134 
4135 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4136 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4137 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4138 
4139 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4140 	bdev_io->internal.in_submit_request = true;
4141 
4142 	bdev_nvme_submit_request(ch, bdev_io);
4143 
4144 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
4145 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4146 
4147 	req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
4148 	SPDK_CU_ASSERT_FATAL(req != NULL);
4149 
4150 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4151 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4152 
4153 	poll_thread_times(0, 1);
4154 
4155 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4156 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4157 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4158 
4159 	poll_threads();
4160 
4161 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4162 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4163 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4164 
4165 	/* Add io_path2 dynamically, and create a multipath configuration. */
4166 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4167 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4168 
4169 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
4170 
4171 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
4172 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
4173 	CU_ASSERT(rc == 0);
4174 
4175 	spdk_delay_us(1000);
4176 	poll_threads();
4177 
4178 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4179 	poll_threads();
4180 
4181 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4182 	CU_ASSERT(nvme_ctrlr2 != NULL);
4183 
4184 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4185 	CU_ASSERT(nvme_ns2 != NULL);
4186 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4187 
4188 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4189 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4190 
4191 	ctrlr_ch2 = io_path2->ctrlr_ch;
4192 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
4193 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2->qpair != NULL);
4194 
4195 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4196 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4197 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4198 	 */
4199 	bdev_io->internal.in_submit_request = true;
4200 
4201 	bdev_nvme_submit_request(ch, bdev_io);
4202 
4203 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
4204 	CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
4205 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4206 
4207 	req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
4208 	SPDK_CU_ASSERT_FATAL(req != NULL);
4209 
4210 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4211 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4212 
4213 	poll_thread_times(0, 1);
4214 
4215 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4216 	CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
4217 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4218 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4219 
4220 	bdev_nvme_destroy_qpair(ctrlr_ch1);
4221 
4222 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
4223 
4224 	poll_threads();
4225 
4226 	CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
4227 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4228 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4229 
4230 	free(bdev_io);
4231 
4232 	spdk_put_io_channel(ch);
4233 
4234 	poll_threads();
4235 
4236 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4237 	CU_ASSERT(rc == 0);
4238 
4239 	poll_threads();
4240 	spdk_delay_us(1000);
4241 	poll_threads();
4242 
4243 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4244 
4245 	g_opts.bdev_retry_count = 0;
4246 }
4247 
4248 static void
4249 test_retry_io_count(void)
4250 {
4251 	struct nvme_path_id path = {};
4252 	struct spdk_nvme_ctrlr *ctrlr;
4253 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4254 	struct nvme_ctrlr *nvme_ctrlr;
4255 	const int STRING_SIZE = 32;
4256 	const char *attached_names[STRING_SIZE];
4257 	struct nvme_bdev *bdev;
4258 	struct nvme_ns *nvme_ns;
4259 	struct spdk_bdev_io *bdev_io;
4260 	struct nvme_bdev_io *bio;
4261 	struct spdk_io_channel *ch;
4262 	struct nvme_bdev_channel *nbdev_ch;
4263 	struct nvme_io_path *io_path;
4264 	struct nvme_ctrlr_channel *ctrlr_ch;
4265 	struct ut_nvme_req *req;
4266 	int rc;
4267 
4268 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4269 	ut_init_trid(&path.trid);
4270 
4271 	set_thread(0);
4272 
4273 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4274 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4275 
4276 	g_ut_attach_ctrlr_status = 0;
4277 	g_ut_attach_bdev_count = 1;
4278 
4279 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
4280 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
4281 	CU_ASSERT(rc == 0);
4282 
4283 	spdk_delay_us(1000);
4284 	poll_threads();
4285 
4286 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4287 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4288 
4289 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4290 	CU_ASSERT(nvme_ctrlr != NULL);
4291 
4292 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4293 	CU_ASSERT(bdev != NULL);
4294 
4295 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4296 	CU_ASSERT(nvme_ns != NULL);
4297 
4298 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4299 	ut_bdev_io_set_buf(bdev_io);
4300 
4301 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4302 
4303 	ch = spdk_get_io_channel(bdev);
4304 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4305 
4306 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4307 
4308 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4309 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4310 
4311 	ctrlr_ch = io_path->ctrlr_ch;
4312 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
4313 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
4314 
4315 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4316 
4317 	/* If I/O is aborted by request, it should not be retried. */
4318 	g_opts.bdev_retry_count = 1;
4319 
4320 	bdev_io->internal.in_submit_request = true;
4321 
4322 	bdev_nvme_submit_request(ch, bdev_io);
4323 
4324 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4325 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4326 
4327 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4328 	SPDK_CU_ASSERT_FATAL(req != NULL);
4329 
4330 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
4331 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4332 
4333 	poll_thread_times(0, 1);
4334 
4335 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4336 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4337 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
4338 
4339 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
4340 	 * the failed I/O should not be retried.
4341 	 */
4342 	g_opts.bdev_retry_count = 4;
4343 
4344 	bdev_io->internal.in_submit_request = true;
4345 
4346 	bdev_nvme_submit_request(ch, bdev_io);
4347 
4348 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4349 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4350 
4351 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4352 	SPDK_CU_ASSERT_FATAL(req != NULL);
4353 
4354 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4355 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4356 	bio->retry_count = 4;
4357 
4358 	poll_thread_times(0, 1);
4359 
4360 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4361 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4362 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4363 
4364 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
4365 	g_opts.bdev_retry_count = -1;
4366 
4367 	bdev_io->internal.in_submit_request = true;
4368 
4369 	bdev_nvme_submit_request(ch, bdev_io);
4370 
4371 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4372 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4373 
4374 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4375 	SPDK_CU_ASSERT_FATAL(req != NULL);
4376 
4377 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4378 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4379 	bio->retry_count = 4;
4380 
4381 	poll_thread_times(0, 1);
4382 
4383 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4384 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4385 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4386 
4387 	poll_threads();
4388 
4389 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4390 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4391 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4392 
4393 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
4394 	 * the failed I/O should be retried.
4395 	 */
4396 	g_opts.bdev_retry_count = 4;
4397 
4398 	bdev_io->internal.in_submit_request = true;
4399 
4400 	bdev_nvme_submit_request(ch, bdev_io);
4401 
4402 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4403 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4404 
4405 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4406 	SPDK_CU_ASSERT_FATAL(req != NULL);
4407 
4408 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4409 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4410 	bio->retry_count = 3;
4411 
4412 	poll_thread_times(0, 1);
4413 
4414 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4415 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4416 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4417 
4418 	poll_threads();
4419 
4420 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4421 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4422 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4423 
4424 	free(bdev_io);
4425 
4426 	spdk_put_io_channel(ch);
4427 
4428 	poll_threads();
4429 
4430 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4431 	CU_ASSERT(rc == 0);
4432 
4433 	poll_threads();
4434 	spdk_delay_us(1000);
4435 	poll_threads();
4436 
4437 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4438 
4439 	g_opts.bdev_retry_count = 0;
4440 }
4441 
4442 static void
4443 test_concurrent_read_ana_log_page(void)
4444 {
4445 	struct spdk_nvme_transport_id trid = {};
4446 	struct spdk_nvme_ctrlr *ctrlr;
4447 	struct nvme_ctrlr *nvme_ctrlr;
4448 	const int STRING_SIZE = 32;
4449 	const char *attached_names[STRING_SIZE];
4450 	int rc;
4451 
4452 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4453 	ut_init_trid(&trid);
4454 
4455 	set_thread(0);
4456 
4457 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
4458 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4459 
4460 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4461 
4462 	g_ut_attach_ctrlr_status = 0;
4463 	g_ut_attach_bdev_count = 1;
4464 
4465 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
4466 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
4467 	CU_ASSERT(rc == 0);
4468 
4469 	spdk_delay_us(1000);
4470 	poll_threads();
4471 
4472 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4473 	poll_threads();
4474 
4475 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
4476 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
4477 
4478 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4479 
4480 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4481 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4482 
4483 	/* Following read request should be rejected. */
4484 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4485 
4486 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4487 
4488 	set_thread(1);
4489 
4490 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4491 
4492 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4493 
4494 	/* Reset request while reading ANA log page should not be rejected. */
4495 	rc = bdev_nvme_reset(nvme_ctrlr);
4496 	CU_ASSERT(rc == 0);
4497 
4498 	poll_threads();
4499 
4500 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4501 	poll_threads();
4502 
4503 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4504 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4505 
4506 	/* Read ANA log page while resetting ctrlr should be rejected. */
4507 	rc = bdev_nvme_reset(nvme_ctrlr);
4508 	CU_ASSERT(rc == 0);
4509 
4510 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4511 
4512 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4513 
4514 	set_thread(0);
4515 
4516 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4517 	CU_ASSERT(rc == 0);
4518 
4519 	poll_threads();
4520 	spdk_delay_us(1000);
4521 	poll_threads();
4522 
4523 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
4524 }
4525 
4526 static void
4527 test_retry_io_for_ana_error(void)
4528 {
4529 	struct nvme_path_id path = {};
4530 	struct spdk_nvme_ctrlr *ctrlr;
4531 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4532 	struct nvme_ctrlr *nvme_ctrlr;
4533 	const int STRING_SIZE = 32;
4534 	const char *attached_names[STRING_SIZE];
4535 	struct nvme_bdev *bdev;
4536 	struct nvme_ns *nvme_ns;
4537 	struct spdk_bdev_io *bdev_io;
4538 	struct nvme_bdev_io *bio;
4539 	struct spdk_io_channel *ch;
4540 	struct nvme_bdev_channel *nbdev_ch;
4541 	struct nvme_io_path *io_path;
4542 	struct nvme_ctrlr_channel *ctrlr_ch;
4543 	struct ut_nvme_req *req;
4544 	uint64_t now;
4545 	int rc;
4546 
4547 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4548 	ut_init_trid(&path.trid);
4549 
4550 	g_opts.bdev_retry_count = 1;
4551 
4552 	set_thread(0);
4553 
4554 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
4555 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4556 
4557 	g_ut_attach_ctrlr_status = 0;
4558 	g_ut_attach_bdev_count = 1;
4559 
4560 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
4561 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
4562 	CU_ASSERT(rc == 0);
4563 
4564 	spdk_delay_us(1000);
4565 	poll_threads();
4566 
4567 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4568 	poll_threads();
4569 
4570 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4571 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4572 
4573 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4574 	CU_ASSERT(nvme_ctrlr != NULL);
4575 
4576 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4577 	CU_ASSERT(bdev != NULL);
4578 
4579 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4580 	CU_ASSERT(nvme_ns != NULL);
4581 
4582 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4583 	ut_bdev_io_set_buf(bdev_io);
4584 
4585 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4586 
4587 	ch = spdk_get_io_channel(bdev);
4588 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4589 
4590 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4591 
4592 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4593 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4594 
4595 	ctrlr_ch = io_path->ctrlr_ch;
4596 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
4597 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
4598 
4599 	now = spdk_get_ticks();
4600 
4601 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4602 
4603 	/* If I/O got ANA error, it should be queued, the corresponding namespace
4604 	 * should be freezed and its ANA state should be updated.
4605 	 */
4606 	bdev_io->internal.in_submit_request = true;
4607 
4608 	bdev_nvme_submit_request(ch, bdev_io);
4609 
4610 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4611 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4612 
4613 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4614 	SPDK_CU_ASSERT_FATAL(req != NULL);
4615 
4616 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4617 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
4618 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4619 
4620 	poll_thread_times(0, 1);
4621 
4622 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4623 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4624 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4625 	/* I/O should be retried immediately. */
4626 	CU_ASSERT(bio->retry_ticks == now);
4627 	CU_ASSERT(nvme_ns->ana_state_updating == true);
4628 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4629 
4630 	poll_threads();
4631 
4632 	/* Namespace is inaccessible, and hence I/O should be queued again. */
4633 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4634 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4635 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4636 	/* I/O should be retried after a second if no I/O path was found but
4637 	 * any I/O path may become available.
4638 	 */
4639 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
4640 
4641 	/* Namespace should be unfreezed after completing to update its ANA state. */
4642 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4643 	poll_threads();
4644 
4645 	CU_ASSERT(nvme_ns->ana_state_updating == false);
4646 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
4647 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4648 
4649 	/* Retry the queued I/O should succeed. */
4650 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
4651 	poll_threads();
4652 
4653 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4654 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4655 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4656 
4657 	free(bdev_io);
4658 
4659 	spdk_put_io_channel(ch);
4660 
4661 	poll_threads();
4662 
4663 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4664 	CU_ASSERT(rc == 0);
4665 
4666 	poll_threads();
4667 	spdk_delay_us(1000);
4668 	poll_threads();
4669 
4670 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4671 
4672 	g_opts.bdev_retry_count = 0;
4673 }
4674 
4675 static void
4676 test_retry_admin_passthru_for_path_error(void)
4677 {
4678 	struct nvme_path_id path1 = {}, path2 = {};
4679 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4680 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4681 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4682 	const int STRING_SIZE = 32;
4683 	const char *attached_names[STRING_SIZE];
4684 	struct nvme_bdev *bdev;
4685 	struct spdk_bdev_io *admin_io;
4686 	struct spdk_io_channel *ch;
4687 	struct ut_nvme_req *req;
4688 	int rc;
4689 
4690 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4691 	ut_init_trid(&path1.trid);
4692 	ut_init_trid2(&path2.trid);
4693 
4694 	g_opts.bdev_retry_count = 1;
4695 
4696 	set_thread(0);
4697 
4698 	g_ut_attach_ctrlr_status = 0;
4699 	g_ut_attach_bdev_count = 1;
4700 
4701 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4702 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4703 
4704 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
4705 
4706 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
4707 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
4708 	CU_ASSERT(rc == 0);
4709 
4710 	spdk_delay_us(1000);
4711 	poll_threads();
4712 
4713 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4714 	poll_threads();
4715 
4716 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4717 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4718 
4719 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4720 	CU_ASSERT(nvme_ctrlr1 != NULL);
4721 
4722 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4723 	CU_ASSERT(bdev != NULL);
4724 
4725 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
4726 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
4727 
4728 	ch = spdk_get_io_channel(bdev);
4729 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4730 
4731 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
4732 
4733 	/* Admin passthrough got a path error, but it should not retry if DNR is set. */
4734 	admin_io->internal.in_submit_request = true;
4735 
4736 	bdev_nvme_submit_request(ch, admin_io);
4737 
4738 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4739 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4740 
4741 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
4742 	SPDK_CU_ASSERT_FATAL(req != NULL);
4743 
4744 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4745 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4746 	req->cpl.status.dnr = 1;
4747 
4748 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4749 	poll_thread_times(0, 2);
4750 
4751 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4752 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4753 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4754 
4755 	/* Admin passthrough got a path error, but it should succeed after retry. */
4756 	admin_io->internal.in_submit_request = true;
4757 
4758 	bdev_nvme_submit_request(ch, admin_io);
4759 
4760 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4761 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4762 
4763 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
4764 	SPDK_CU_ASSERT_FATAL(req != NULL);
4765 
4766 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4767 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4768 
4769 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4770 	poll_thread_times(0, 2);
4771 
4772 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4773 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4774 
4775 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4776 	poll_threads();
4777 
4778 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4779 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4780 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4781 
4782 	/* Add ctrlr2 dynamically, and create a multipath configuration. */
4783 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4784 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4785 
4786 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
4787 
4788 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
4789 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
4790 	CU_ASSERT(rc == 0);
4791 
4792 	spdk_delay_us(1000);
4793 	poll_threads();
4794 
4795 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4796 	poll_threads();
4797 
4798 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4799 	CU_ASSERT(nvme_ctrlr2 != NULL);
4800 
4801 	/* Admin passthrough was submitted to ctrlr1, but ctrlr1 was failed.
4802 	 * Hence the admin passthrough was aborted. But ctrlr2 is avaialble.
4803 	 * So after a retry, the admin passthrough is submitted to ctrlr2 and
4804 	 * should succeed.
4805 	 */
4806 	admin_io->internal.in_submit_request = true;
4807 
4808 	bdev_nvme_submit_request(ch, admin_io);
4809 
4810 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4811 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4812 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4813 
4814 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
4815 	SPDK_CU_ASSERT_FATAL(req != NULL);
4816 
4817 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4818 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4819 	ctrlr1->is_failed = true;
4820 
4821 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4822 	poll_thread_times(0, 2);
4823 
4824 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4825 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
4826 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4827 
4828 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4829 	poll_threads();
4830 
4831 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4832 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4833 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4834 
4835 	free(admin_io);
4836 
4837 	spdk_put_io_channel(ch);
4838 
4839 	poll_threads();
4840 
4841 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4842 	CU_ASSERT(rc == 0);
4843 
4844 	poll_threads();
4845 	spdk_delay_us(1000);
4846 	poll_threads();
4847 
4848 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4849 
4850 	g_opts.bdev_retry_count = 0;
4851 }
4852 
4853 static void
4854 test_retry_admin_passthru_by_count(void)
4855 {
4856 	struct nvme_path_id path = {};
4857 	struct spdk_nvme_ctrlr *ctrlr;
4858 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4859 	struct nvme_ctrlr *nvme_ctrlr;
4860 	const int STRING_SIZE = 32;
4861 	const char *attached_names[STRING_SIZE];
4862 	struct nvme_bdev *bdev;
4863 	struct spdk_bdev_io *admin_io;
4864 	struct nvme_bdev_io *admin_bio;
4865 	struct spdk_io_channel *ch;
4866 	struct ut_nvme_req *req;
4867 	int rc;
4868 
4869 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4870 	ut_init_trid(&path.trid);
4871 
4872 	set_thread(0);
4873 
4874 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4875 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4876 
4877 	g_ut_attach_ctrlr_status = 0;
4878 	g_ut_attach_bdev_count = 1;
4879 
4880 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
4881 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
4882 	CU_ASSERT(rc == 0);
4883 
4884 	spdk_delay_us(1000);
4885 	poll_threads();
4886 
4887 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4888 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4889 
4890 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4891 	CU_ASSERT(nvme_ctrlr != NULL);
4892 
4893 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4894 	CU_ASSERT(bdev != NULL);
4895 
4896 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
4897 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
4898 
4899 	admin_bio = (struct nvme_bdev_io *)admin_io->driver_ctx;
4900 
4901 	ch = spdk_get_io_channel(bdev);
4902 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4903 
4904 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
4905 
4906 	/* If admin passthrough is aborted by request, it should not be retried. */
4907 	g_opts.bdev_retry_count = 1;
4908 
4909 	admin_io->internal.in_submit_request = true;
4910 
4911 	bdev_nvme_submit_request(ch, admin_io);
4912 
4913 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4914 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4915 
4916 	req = ut_get_outstanding_nvme_request(&ctrlr->adminq, admin_bio);
4917 	SPDK_CU_ASSERT_FATAL(req != NULL);
4918 
4919 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
4920 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4921 
4922 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4923 	poll_thread_times(0, 2);
4924 
4925 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4926 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4927 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
4928 
4929 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
4930 	 * the failed admin passthrough should not be retried.
4931 	 */
4932 	g_opts.bdev_retry_count = 4;
4933 
4934 	admin_io->internal.in_submit_request = true;
4935 
4936 	bdev_nvme_submit_request(ch, admin_io);
4937 
4938 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4939 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4940 
4941 	req = ut_get_outstanding_nvme_request(&ctrlr->adminq, admin_bio);
4942 	SPDK_CU_ASSERT_FATAL(req != NULL);
4943 
4944 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
4945 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4946 	admin_bio->retry_count = 4;
4947 
4948 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4949 	poll_thread_times(0, 2);
4950 
4951 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4952 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4953 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4954 
4955 	free(admin_io);
4956 
4957 	spdk_put_io_channel(ch);
4958 
4959 	poll_threads();
4960 
4961 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4962 	CU_ASSERT(rc == 0);
4963 
4964 	poll_threads();
4965 	spdk_delay_us(1000);
4966 	poll_threads();
4967 
4968 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4969 
4970 	g_opts.bdev_retry_count = 0;
4971 }
4972 
4973 static void
4974 test_check_multipath_params(void)
4975 {
4976 	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
4977 	 * 3rd parameter is fast_io_fail_timeout_sec.
4978 	 */
4979 	CU_ASSERT(bdev_nvme_check_multipath_params(-2, 1, 0) == false);
4980 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 0, 0) == false);
4981 	CU_ASSERT(bdev_nvme_check_multipath_params(1, 0, 0) == false);
4982 	CU_ASSERT(bdev_nvme_check_multipath_params(1, 2, 0) == false);
4983 	CU_ASSERT(bdev_nvme_check_multipath_params(0, 1, 0) == false);
4984 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 1, 0) == true);
4985 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 2, 0) == true);
4986 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 0) == true);
4987 	CU_ASSERT(bdev_nvme_check_multipath_params(INT32_MAX, INT32_MAX, 0) == true);
4988 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, UINT32_MAX, 0) == true);
4989 	CU_ASSERT(bdev_nvme_check_multipath_params(0, 0, 1) == false);
4990 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 2, 1) == false);
4991 	CU_ASSERT(bdev_nvme_check_multipath_params(3, 2, 4) == false);
4992 	CU_ASSERT(bdev_nvme_check_multipath_params(3, 2, 1) == false);
4993 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 1, 1) == true);
4994 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 2) == true);
4995 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 1) == true);
4996 	CU_ASSERT(bdev_nvme_check_multipath_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
4997 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, UINT32_MAX, UINT32_MAX) == true);
4998 }
4999 
5000 static void
5001 test_retry_io_if_ctrlr_is_resetting(void)
5002 {
5003 	struct nvme_path_id path = {};
5004 	struct spdk_nvme_ctrlr *ctrlr;
5005 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5006 	struct nvme_ctrlr *nvme_ctrlr;
5007 	const int STRING_SIZE = 32;
5008 	const char *attached_names[STRING_SIZE];
5009 	struct nvme_bdev *bdev;
5010 	struct nvme_ns *nvme_ns;
5011 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5012 	struct spdk_io_channel *ch;
5013 	struct nvme_bdev_channel *nbdev_ch;
5014 	struct nvme_io_path *io_path;
5015 	struct nvme_ctrlr_channel *ctrlr_ch;
5016 	int rc;
5017 
5018 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5019 	ut_init_trid(&path.trid);
5020 
5021 	set_thread(0);
5022 
5023 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5024 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5025 
5026 	g_ut_attach_ctrlr_status = 0;
5027 	g_ut_attach_bdev_count = 1;
5028 
5029 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
5030 			      attach_ctrlr_done, NULL, NULL, false, -1, 1, 0);
5031 	CU_ASSERT(rc == 0);
5032 
5033 	spdk_delay_us(1000);
5034 	poll_threads();
5035 
5036 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5037 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5038 
5039 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5040 	CU_ASSERT(nvme_ctrlr != NULL);
5041 
5042 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5043 	CU_ASSERT(bdev != NULL);
5044 
5045 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5046 	CU_ASSERT(nvme_ns != NULL);
5047 
5048 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5049 	ut_bdev_io_set_buf(bdev_io1);
5050 
5051 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5052 	ut_bdev_io_set_buf(bdev_io2);
5053 
5054 	ch = spdk_get_io_channel(bdev);
5055 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5056 
5057 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5058 
5059 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5060 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5061 
5062 	ctrlr_ch = io_path->ctrlr_ch;
5063 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
5064 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
5065 
5066 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5067 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5068 
5069 	/* If qpair is connected, I/O should succeed. */
5070 	bdev_io1->internal.in_submit_request = true;
5071 
5072 	bdev_nvme_submit_request(ch, bdev_io1);
5073 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5074 
5075 	poll_threads();
5076 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5077 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5078 
5079 	/* If qpair is disconnected, it is freed and then reconnected via resetting
5080 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5081 	 * while resetting the nvme_ctrlr.
5082 	 */
5083 	ctrlr_ch->qpair->is_connected = false;
5084 	ctrlr->is_failed = true;
5085 
5086 	poll_thread_times(0, 4);
5087 
5088 	CU_ASSERT(ctrlr_ch->qpair == NULL);
5089 	CU_ASSERT(nvme_ctrlr->resetting == true);
5090 	CU_ASSERT(ctrlr->is_failed == false);
5091 
5092 	bdev_io1->internal.in_submit_request = true;
5093 
5094 	bdev_nvme_submit_request(ch, bdev_io1);
5095 
5096 	spdk_delay_us(1);
5097 
5098 	bdev_io2->internal.in_submit_request = true;
5099 
5100 	bdev_nvme_submit_request(ch, bdev_io2);
5101 
5102 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5103 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5104 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5105 	CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link));
5106 
5107 	poll_threads();
5108 
5109 	CU_ASSERT(ctrlr_ch->qpair != NULL);
5110 	CU_ASSERT(nvme_ctrlr->resetting == false);
5111 
5112 	spdk_delay_us(999999);
5113 
5114 	poll_thread_times(0, 1);
5115 
5116 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
5117 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5118 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5119 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5120 
5121 	poll_threads();
5122 
5123 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
5124 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5125 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5126 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5127 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5128 
5129 	spdk_delay_us(1);
5130 
5131 	poll_thread_times(0, 1);
5132 
5133 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
5134 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5135 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5136 
5137 	poll_threads();
5138 
5139 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
5140 	CU_ASSERT(bdev_io2->internal.in_submit_request == false);
5141 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5142 
5143 	free(bdev_io1);
5144 	free(bdev_io2);
5145 
5146 	spdk_put_io_channel(ch);
5147 
5148 	poll_threads();
5149 
5150 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5151 	CU_ASSERT(rc == 0);
5152 
5153 	poll_threads();
5154 	spdk_delay_us(1000);
5155 	poll_threads();
5156 
5157 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5158 }
5159 
5160 static void
5161 test_retry_admin_passthru_if_ctrlr_is_resetting(void)
5162 {
5163 	struct nvme_path_id path = {};
5164 	struct spdk_nvme_ctrlr *ctrlr;
5165 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5166 	struct nvme_ctrlr *nvme_ctrlr;
5167 	const int STRING_SIZE = 32;
5168 	const char *attached_names[STRING_SIZE];
5169 	struct nvme_bdev *bdev;
5170 	struct spdk_bdev_io *admin_io;
5171 	struct spdk_io_channel *ch;
5172 	struct nvme_bdev_channel *nbdev_ch;
5173 	int rc;
5174 
5175 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5176 	ut_init_trid(&path.trid);
5177 
5178 	g_opts.bdev_retry_count = 1;
5179 
5180 	set_thread(0);
5181 
5182 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5183 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5184 
5185 	g_ut_attach_ctrlr_status = 0;
5186 	g_ut_attach_bdev_count = 1;
5187 
5188 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
5189 			      attach_ctrlr_done, NULL, NULL, false, -1, 1, 0);
5190 	CU_ASSERT(rc == 0);
5191 
5192 	spdk_delay_us(1000);
5193 	poll_threads();
5194 
5195 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5196 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5197 
5198 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5199 	CU_ASSERT(nvme_ctrlr != NULL);
5200 
5201 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5202 	CU_ASSERT(bdev != NULL);
5203 
5204 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
5205 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
5206 
5207 	ch = spdk_get_io_channel(bdev);
5208 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5209 
5210 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5211 
5212 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
5213 
5214 	/* If ctrlr is available, admin passthrough should succeed. */
5215 	admin_io->internal.in_submit_request = true;
5216 
5217 	bdev_nvme_submit_request(ch, admin_io);
5218 
5219 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5220 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5221 
5222 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5223 	poll_threads();
5224 
5225 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5226 	CU_ASSERT(admin_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5227 
5228 	/* If ctrlr is resetting, admin passthrough request should be queued
5229 	 * if it is submitted while resetting ctrlr.
5230 	 */
5231 	bdev_nvme_reset(nvme_ctrlr);
5232 
5233 	poll_thread_times(0, 1);
5234 
5235 	admin_io->internal.in_submit_request = true;
5236 
5237 	bdev_nvme_submit_request(ch, admin_io);
5238 
5239 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5240 	CU_ASSERT(admin_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5241 
5242 	poll_threads();
5243 
5244 	CU_ASSERT(nvme_ctrlr->resetting == false);
5245 
5246 	spdk_delay_us(1000000);
5247 	poll_thread_times(0, 1);
5248 
5249 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5250 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5251 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5252 
5253 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5254 	poll_threads();
5255 
5256 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5257 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5258 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5259 
5260 	free(admin_io);
5261 
5262 	spdk_put_io_channel(ch);
5263 
5264 	poll_threads();
5265 
5266 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5267 	CU_ASSERT(rc == 0);
5268 
5269 	poll_threads();
5270 	spdk_delay_us(1000);
5271 	poll_threads();
5272 
5273 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5274 
5275 	g_opts.bdev_retry_count = 0;
5276 }
5277 
5278 static void
5279 test_reconnect_ctrlr(void)
5280 {
5281 	struct spdk_nvme_transport_id trid = {};
5282 	struct spdk_nvme_ctrlr ctrlr = {};
5283 	struct nvme_ctrlr *nvme_ctrlr;
5284 	struct spdk_io_channel *ch1, *ch2;
5285 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5286 	int rc;
5287 
5288 	ut_init_trid(&trid);
5289 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5290 
5291 	set_thread(0);
5292 
5293 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5294 	CU_ASSERT(rc == 0);
5295 
5296 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5297 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5298 
5299 	nvme_ctrlr->ctrlr_loss_timeout_sec = 2;
5300 	nvme_ctrlr->reconnect_delay_sec = 1;
5301 
5302 	ch1 = spdk_get_io_channel(nvme_ctrlr);
5303 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5304 
5305 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5306 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5307 
5308 	set_thread(1);
5309 
5310 	ch2 = spdk_get_io_channel(nvme_ctrlr);
5311 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5312 
5313 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5314 
5315 	/* Reset starts from thread 1. */
5316 	set_thread(1);
5317 
5318 	/* The reset should fail and a reconnect timer should be registered. */
5319 	ctrlr.fail_reset = true;
5320 	ctrlr.is_failed = true;
5321 
5322 	rc = bdev_nvme_reset(nvme_ctrlr);
5323 	CU_ASSERT(rc == 0);
5324 	CU_ASSERT(nvme_ctrlr->resetting == true);
5325 	CU_ASSERT(ctrlr.is_failed == true);
5326 
5327 	poll_threads();
5328 
5329 	CU_ASSERT(nvme_ctrlr->resetting == false);
5330 	CU_ASSERT(ctrlr.is_failed == false);
5331 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
5332 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
5333 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5334 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5335 
5336 	/* Then a reconnect retry should suceeed. */
5337 	ctrlr.fail_reset = false;
5338 
5339 	spdk_delay_us(SPDK_SEC_TO_USEC);
5340 	poll_thread_times(0, 1);
5341 
5342 	CU_ASSERT(nvme_ctrlr->resetting == true);
5343 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5344 
5345 	poll_threads();
5346 
5347 	CU_ASSERT(nvme_ctrlr->resetting == false);
5348 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5349 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
5350 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5351 
5352 	/* The reset should fail and a reconnect timer should be registered. */
5353 	ctrlr.fail_reset = true;
5354 	ctrlr.is_failed = true;
5355 
5356 	rc = bdev_nvme_reset(nvme_ctrlr);
5357 	CU_ASSERT(rc == 0);
5358 	CU_ASSERT(nvme_ctrlr->resetting == true);
5359 	CU_ASSERT(ctrlr.is_failed == true);
5360 
5361 	poll_threads();
5362 
5363 	CU_ASSERT(nvme_ctrlr->resetting == false);
5364 	CU_ASSERT(ctrlr.is_failed == false);
5365 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
5366 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
5367 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5368 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5369 
5370 	/* Then a reconnect retry should still fail. */
5371 	spdk_delay_us(SPDK_SEC_TO_USEC);
5372 	poll_thread_times(0, 1);
5373 
5374 	CU_ASSERT(nvme_ctrlr->resetting == true);
5375 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5376 
5377 	poll_threads();
5378 
5379 	CU_ASSERT(nvme_ctrlr->resetting == false);
5380 	CU_ASSERT(ctrlr.is_failed == false);
5381 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
5382 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
5383 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5384 
5385 	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5386 	spdk_delay_us(SPDK_SEC_TO_USEC);
5387 	poll_threads();
5388 
5389 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5390 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5391 	CU_ASSERT(nvme_ctrlr->destruct == true);
5392 
5393 	spdk_put_io_channel(ch2);
5394 
5395 	set_thread(0);
5396 
5397 	spdk_put_io_channel(ch1);
5398 
5399 	poll_threads();
5400 	spdk_delay_us(1000);
5401 	poll_threads();
5402 
5403 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5404 }
5405 
5406 static struct nvme_path_id *
5407 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5408 		       const struct spdk_nvme_transport_id *trid)
5409 {
5410 	struct nvme_path_id *p;
5411 
5412 	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5413 		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5414 			break;
5415 		}
5416 	}
5417 
5418 	return p;
5419 }
5420 
5421 static void
5422 test_retry_failover_ctrlr(void)
5423 {
5424 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5425 	struct spdk_nvme_ctrlr ctrlr = {};
5426 	struct nvme_ctrlr *nvme_ctrlr = NULL;
5427 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5428 	struct spdk_io_channel *ch;
5429 	struct nvme_ctrlr_channel *ctrlr_ch;
5430 	int rc;
5431 
5432 	ut_init_trid(&trid1);
5433 	ut_init_trid2(&trid2);
5434 	ut_init_trid3(&trid3);
5435 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5436 
5437 	set_thread(0);
5438 
5439 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5440 	CU_ASSERT(rc == 0);
5441 
5442 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5443 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5444 
5445 	nvme_ctrlr->ctrlr_loss_timeout_sec = -1;
5446 	nvme_ctrlr->reconnect_delay_sec = 1;
5447 
5448 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5449 	CU_ASSERT(rc == 0);
5450 
5451 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5452 	CU_ASSERT(rc == 0);
5453 
5454 	ch = spdk_get_io_channel(nvme_ctrlr);
5455 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5456 
5457 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5458 
5459 	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5460 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5461 	CU_ASSERT(path_id1->is_failed == false);
5462 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5463 
5464 	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5465 	ctrlr.fail_reset = true;
5466 	ctrlr.is_failed = true;
5467 
5468 	rc = bdev_nvme_reset(nvme_ctrlr);
5469 	CU_ASSERT(rc == 0);
5470 
5471 	poll_threads();
5472 
5473 	CU_ASSERT(nvme_ctrlr->resetting == false);
5474 	CU_ASSERT(ctrlr.is_failed == false);
5475 	CU_ASSERT(ctrlr_ch->qpair == NULL);
5476 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5477 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5478 
5479 	CU_ASSERT(path_id1->is_failed == true);
5480 
5481 	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5482 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5483 	CU_ASSERT(path_id2->is_failed == false);
5484 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5485 
5486 	/* If we remove trid2 while reconnect is scheduled, trid2 is removed and path_id is
5487 	 * switched to trid3 but reset is not started.
5488 	 */
5489 	rc = bdev_nvme_failover(nvme_ctrlr, true);
5490 	CU_ASSERT(rc == 0);
5491 
5492 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) == NULL);
5493 
5494 	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5495 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5496 	CU_ASSERT(path_id3->is_failed == false);
5497 	CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id);
5498 
5499 	CU_ASSERT(nvme_ctrlr->resetting == false);
5500 
5501 	/* If reconnect succeeds, trid3 should be the active path_id */
5502 	ctrlr.fail_reset = false;
5503 
5504 	spdk_delay_us(SPDK_SEC_TO_USEC);
5505 	poll_thread_times(0, 1);
5506 
5507 	CU_ASSERT(nvme_ctrlr->resetting == true);
5508 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5509 
5510 	poll_threads();
5511 
5512 	CU_ASSERT(path_id3->is_failed == false);
5513 	CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id);
5514 	CU_ASSERT(nvme_ctrlr->resetting == false);
5515 	CU_ASSERT(ctrlr_ch->qpair != NULL);
5516 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5517 
5518 	spdk_put_io_channel(ch);
5519 
5520 	poll_threads();
5521 
5522 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5523 	CU_ASSERT(rc == 0);
5524 
5525 	poll_threads();
5526 	spdk_delay_us(1000);
5527 	poll_threads();
5528 
5529 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5530 }
5531 
5532 static void
5533 test_fail_path(void)
5534 {
5535 	struct nvme_path_id path = {};
5536 	struct spdk_nvme_ctrlr *ctrlr;
5537 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5538 	struct nvme_ctrlr *nvme_ctrlr;
5539 	const int STRING_SIZE = 32;
5540 	const char *attached_names[STRING_SIZE];
5541 	struct nvme_bdev *bdev;
5542 	struct nvme_ns *nvme_ns;
5543 	struct spdk_bdev_io *bdev_io;
5544 	struct spdk_io_channel *ch;
5545 	struct nvme_bdev_channel *nbdev_ch;
5546 	struct nvme_io_path *io_path;
5547 	struct nvme_ctrlr_channel *ctrlr_ch;
5548 	int rc;
5549 
5550 	/* The test scenario is the following.
5551 	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5552 	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5553 	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5554 	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5555 	 *   comes first. The queued I/O is failed.
5556 	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5557 	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5558 	 */
5559 
5560 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5561 	ut_init_trid(&path.trid);
5562 
5563 	set_thread(0);
5564 
5565 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5566 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5567 
5568 	g_ut_attach_ctrlr_status = 0;
5569 	g_ut_attach_bdev_count = 1;
5570 
5571 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
5572 			      attach_ctrlr_done, NULL, NULL, false, 4, 1, 2);
5573 	CU_ASSERT(rc == 0);
5574 
5575 	spdk_delay_us(1000);
5576 	poll_threads();
5577 
5578 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5579 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5580 
5581 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5582 	CU_ASSERT(nvme_ctrlr != NULL);
5583 
5584 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5585 	CU_ASSERT(bdev != NULL);
5586 
5587 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5588 	CU_ASSERT(nvme_ns != NULL);
5589 
5590 	ch = spdk_get_io_channel(bdev);
5591 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5592 
5593 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5594 
5595 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5596 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5597 
5598 	ctrlr_ch = io_path->ctrlr_ch;
5599 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
5600 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
5601 
5602 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
5603 	ut_bdev_io_set_buf(bdev_io);
5604 
5605 
5606 	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
5607 	ctrlr->fail_reset = true;
5608 	ctrlr->is_failed = true;
5609 
5610 	rc = bdev_nvme_reset(nvme_ctrlr);
5611 	CU_ASSERT(rc == 0);
5612 	CU_ASSERT(nvme_ctrlr->resetting == true);
5613 	CU_ASSERT(ctrlr->is_failed == true);
5614 
5615 	poll_threads();
5616 
5617 	CU_ASSERT(nvme_ctrlr->resetting == false);
5618 	CU_ASSERT(ctrlr->is_failed == false);
5619 	CU_ASSERT(ctrlr_ch->qpair == NULL);
5620 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5621 	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
5622 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5623 
5624 	/* I/O should be queued. */
5625 	bdev_io->internal.in_submit_request = true;
5626 
5627 	bdev_nvme_submit_request(ch, bdev_io);
5628 
5629 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5630 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5631 
5632 	/* After a second, the I/O should be still queued and the ctrlr should be
5633 	 * still recovering.
5634 	 */
5635 	spdk_delay_us(SPDK_SEC_TO_USEC);
5636 	poll_threads();
5637 
5638 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5639 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5640 
5641 	CU_ASSERT(nvme_ctrlr->resetting == false);
5642 	CU_ASSERT(ctrlr->is_failed == false);
5643 	CU_ASSERT(ctrlr_ch->qpair == NULL);
5644 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5645 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5646 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5647 
5648 	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
5649 	spdk_delay_us(SPDK_SEC_TO_USEC);
5650 	poll_threads();
5651 
5652 	CU_ASSERT(nvme_ctrlr->resetting == false);
5653 	CU_ASSERT(ctrlr->is_failed == false);
5654 	CU_ASSERT(ctrlr_ch->qpair == NULL);
5655 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5656 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5657 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
5658 
5659 	/* Then within a second, pending I/O should be failed. */
5660 	spdk_delay_us(SPDK_SEC_TO_USEC);
5661 	poll_threads();
5662 
5663 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5664 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5665 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5666 
5667 	/* Another I/O submission should be failed immediately. */
5668 	bdev_io->internal.in_submit_request = true;
5669 
5670 	bdev_nvme_submit_request(ch, bdev_io);
5671 
5672 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5673 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5674 
5675 	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
5676 	 * be deleted.
5677 	 */
5678 	spdk_delay_us(SPDK_SEC_TO_USEC);
5679 	poll_threads();
5680 
5681 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5682 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5683 	CU_ASSERT(nvme_ctrlr->destruct == true);
5684 
5685 	spdk_put_io_channel(ch);
5686 
5687 	poll_threads();
5688 	spdk_delay_us(1000);
5689 	poll_threads();
5690 
5691 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5692 
5693 	free(bdev_io);
5694 }
5695 
5696 int
5697 main(int argc, const char **argv)
5698 {
5699 	CU_pSuite	suite = NULL;
5700 	unsigned int	num_failures;
5701 
5702 	CU_set_error_action(CUEA_ABORT);
5703 	CU_initialize_registry();
5704 
5705 	suite = CU_add_suite("nvme", NULL, NULL);
5706 
5707 	CU_ADD_TEST(suite, test_create_ctrlr);
5708 	CU_ADD_TEST(suite, test_reset_ctrlr);
5709 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
5710 	CU_ADD_TEST(suite, test_failover_ctrlr);
5711 	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
5712 	CU_ADD_TEST(suite, test_pending_reset);
5713 	CU_ADD_TEST(suite, test_attach_ctrlr);
5714 	CU_ADD_TEST(suite, test_aer_cb);
5715 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
5716 	CU_ADD_TEST(suite, test_add_remove_trid);
5717 	CU_ADD_TEST(suite, test_abort);
5718 	CU_ADD_TEST(suite, test_get_io_qpair);
5719 	CU_ADD_TEST(suite, test_bdev_unregister);
5720 	CU_ADD_TEST(suite, test_compare_ns);
5721 	CU_ADD_TEST(suite, test_init_ana_log_page);
5722 	CU_ADD_TEST(suite, test_get_memory_domains);
5723 	CU_ADD_TEST(suite, test_reconnect_qpair);
5724 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
5725 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
5726 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
5727 	CU_ADD_TEST(suite, test_admin_path);
5728 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
5729 	CU_ADD_TEST(suite, test_find_io_path);
5730 	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
5731 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
5732 	CU_ADD_TEST(suite, test_retry_io_count);
5733 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
5734 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
5735 	CU_ADD_TEST(suite, test_retry_admin_passthru_for_path_error);
5736 	CU_ADD_TEST(suite, test_retry_admin_passthru_by_count);
5737 	CU_ADD_TEST(suite, test_check_multipath_params);
5738 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
5739 	CU_ADD_TEST(suite, test_retry_admin_passthru_if_ctrlr_is_resetting);
5740 	CU_ADD_TEST(suite, test_reconnect_ctrlr);
5741 	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
5742 	CU_ADD_TEST(suite, test_fail_path);
5743 
5744 	CU_basic_set_mode(CU_BRM_VERBOSE);
5745 
5746 	allocate_threads(3);
5747 	set_thread(0);
5748 	bdev_nvme_library_init();
5749 	init_accel();
5750 
5751 	CU_basic_run_tests();
5752 
5753 	set_thread(0);
5754 	bdev_nvme_library_fini();
5755 	fini_accel();
5756 	free_threads();
5757 
5758 	num_failures = CU_get_number_of_failures();
5759 	CU_cleanup_registry();
5760 
5761 	return num_failures;
5762 }
5763