xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision c39647df83e4be9bcc49025132c48bf2414ef8b1)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 #include "spdk_cunit.h"
37 #include "spdk/thread.h"
38 #include "spdk/bdev_module.h"
39 #include "spdk/bdev_module.h"
40 
41 #include "common/lib/ut_multithread.c"
42 
43 #include "bdev/nvme/bdev_nvme.c"
44 
45 #include "unit/lib/json_mock.c"
46 
47 static void *g_accel_p = (void *)0xdeadbeaf;
48 
49 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
50 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
51 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
52 	     spdk_nvme_remove_cb remove_cb), NULL);
53 
54 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
55 		enum spdk_nvme_transport_type trtype));
56 
57 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
58 	    NULL);
59 
60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
61 
62 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
63 		struct spdk_nvme_transport_id *trid), 0);
64 
65 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
66 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
67 
68 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
69 
70 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0);
71 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf));
72 
73 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain, int);
74 
75 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
76 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
77 
78 int spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
79 				       struct spdk_memory_domain **domains, int array_size)
80 {
81 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain);
82 
83 	return 0;
84 }
85 
86 struct spdk_io_channel *
87 spdk_accel_engine_get_io_channel(void)
88 {
89 	return spdk_get_io_channel(g_accel_p);
90 }
91 
92 void
93 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
94 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
95 {
96 	/* Avoid warning that opts is used uninitialised */
97 	memset(opts, 0, opts_size);
98 }
99 
100 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
101 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
102 
103 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
104 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
105 
106 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
107 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
108 
109 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
110 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
111 
112 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
113 
114 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
115 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
116 
117 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
118 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
119 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
120 
121 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
122 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
123 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
124 
125 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
126 
127 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
128 
129 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
130 
131 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
132 
133 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
134 
135 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
136 
137 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
138 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
139 
140 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
141 
142 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
143 		char *name, size_t *size), 0);
144 
145 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
146 	    (struct spdk_nvme_ns *ns), 0);
147 
148 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
149 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
150 
151 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
152 	    (struct spdk_nvme_ns *ns), 0);
153 
154 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
155 	    (struct spdk_nvme_ns *ns), 0);
156 
157 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
158 	    (struct spdk_nvme_ns *ns), 0);
159 
160 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
161 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
162 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
163 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
164 
165 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
166 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
167 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
168 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
169 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
170 
171 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
172 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
173 	     void *payload, uint32_t payload_size, uint64_t slba,
174 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
175 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
176 
177 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
178 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
179 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
180 
181 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
182 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
183 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
184 
185 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
186 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
187 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
188 
189 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
190 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
191 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
192 
193 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
194 
195 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
196 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
197 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
198 
199 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
200 
201 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
202 
203 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
204 
205 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
206 
207 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
208 		struct iovec *iov,
209 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
210 
211 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr));
212 
213 struct ut_nvme_req {
214 	uint16_t			opc;
215 	spdk_nvme_cmd_cb		cb_fn;
216 	void				*cb_arg;
217 	struct spdk_nvme_cpl		cpl;
218 	TAILQ_ENTRY(ut_nvme_req)	tailq;
219 };
220 
221 struct spdk_nvme_ns {
222 	struct spdk_nvme_ctrlr		*ctrlr;
223 	uint32_t			id;
224 	bool				is_active;
225 	struct spdk_uuid		*uuid;
226 	enum spdk_nvme_ana_state	ana_state;
227 	enum spdk_nvme_csi		csi;
228 };
229 
230 struct spdk_nvme_qpair {
231 	struct spdk_nvme_ctrlr		*ctrlr;
232 	bool				is_failed;
233 	bool				is_connected;
234 	bool				in_completion_context;
235 	bool				delete_after_completion_context;
236 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
237 	uint32_t			num_outstanding_reqs;
238 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
239 	struct spdk_nvme_poll_group	*poll_group;
240 	void				*poll_group_tailq_head;
241 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
242 };
243 
244 struct spdk_nvme_ctrlr {
245 	uint32_t			num_ns;
246 	struct spdk_nvme_ns		*ns;
247 	struct spdk_nvme_ns_data	*nsdata;
248 	struct spdk_nvme_qpair		adminq;
249 	struct spdk_nvme_ctrlr_data	cdata;
250 	bool				attached;
251 	bool				is_failed;
252 	bool				fail_reset;
253 	struct spdk_nvme_transport_id	trid;
254 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
255 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
256 	struct spdk_nvme_ctrlr_opts	opts;
257 };
258 
259 struct spdk_nvme_poll_group {
260 	void				*ctx;
261 	struct spdk_nvme_accel_fn_table	accel_fn_table;
262 	TAILQ_HEAD(, spdk_nvme_qpair)	connected_qpairs;
263 	TAILQ_HEAD(, spdk_nvme_qpair)	disconnected_qpairs;
264 	bool				in_completion_context;
265 	uint64_t			num_qpairs_to_delete;
266 };
267 
268 struct spdk_nvme_probe_ctx {
269 	struct spdk_nvme_transport_id	trid;
270 	void				*cb_ctx;
271 	spdk_nvme_attach_cb		attach_cb;
272 	struct spdk_nvme_ctrlr		*init_ctrlr;
273 };
274 
275 uint32_t
276 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
277 {
278 	uint32_t nsid;
279 
280 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
281 		if (ctrlr->ns[nsid - 1].is_active) {
282 			return nsid;
283 		}
284 	}
285 
286 	return 0;
287 }
288 
289 uint32_t
290 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
291 {
292 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
293 		if (ctrlr->ns[nsid - 1].is_active) {
294 			return nsid;
295 		}
296 	}
297 
298 	return 0;
299 }
300 
301 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
302 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
303 			g_ut_attached_ctrlrs);
304 static int g_ut_attach_ctrlr_status;
305 static size_t g_ut_attach_bdev_count;
306 static int g_ut_register_bdev_status;
307 static uint16_t g_ut_cntlid;
308 static struct nvme_path_id g_any_path = {};
309 
310 static void
311 ut_init_trid(struct spdk_nvme_transport_id *trid)
312 {
313 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
314 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
315 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
316 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
317 }
318 
319 static void
320 ut_init_trid2(struct spdk_nvme_transport_id *trid)
321 {
322 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
323 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
324 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
325 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
326 }
327 
328 static void
329 ut_init_trid3(struct spdk_nvme_transport_id *trid)
330 {
331 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
332 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
333 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
334 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
335 }
336 
337 static int
338 cmp_int(int a, int b)
339 {
340 	return a - b;
341 }
342 
343 int
344 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
345 			       const struct spdk_nvme_transport_id *trid2)
346 {
347 	int cmp;
348 
349 	/* We assume trtype is TCP for now. */
350 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
351 
352 	cmp = cmp_int(trid1->trtype, trid2->trtype);
353 	if (cmp) {
354 		return cmp;
355 	}
356 
357 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
358 	if (cmp) {
359 		return cmp;
360 	}
361 
362 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
363 	if (cmp) {
364 		return cmp;
365 	}
366 
367 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
368 	if (cmp) {
369 		return cmp;
370 	}
371 
372 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
373 	if (cmp) {
374 		return cmp;
375 	}
376 
377 	return 0;
378 }
379 
380 static struct spdk_nvme_ctrlr *
381 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
382 		bool ana_reporting, bool multipath)
383 {
384 	struct spdk_nvme_ctrlr *ctrlr;
385 	uint32_t i;
386 
387 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
388 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
389 			/* There is a ctrlr whose trid matches. */
390 			return NULL;
391 		}
392 	}
393 
394 	ctrlr = calloc(1, sizeof(*ctrlr));
395 	if (ctrlr == NULL) {
396 		return NULL;
397 	}
398 
399 	ctrlr->attached = true;
400 	ctrlr->adminq.ctrlr = ctrlr;
401 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
402 	ctrlr->adminq.is_connected = true;
403 
404 	if (num_ns != 0) {
405 		ctrlr->num_ns = num_ns;
406 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
407 		if (ctrlr->ns == NULL) {
408 			free(ctrlr);
409 			return NULL;
410 		}
411 
412 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
413 		if (ctrlr->nsdata == NULL) {
414 			free(ctrlr->ns);
415 			free(ctrlr);
416 			return NULL;
417 		}
418 
419 		for (i = 0; i < num_ns; i++) {
420 			ctrlr->ns[i].id = i + 1;
421 			ctrlr->ns[i].ctrlr = ctrlr;
422 			ctrlr->ns[i].is_active = true;
423 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
424 			ctrlr->nsdata[i].nsze = 1024;
425 			ctrlr->nsdata[i].nmic.can_share = multipath;
426 		}
427 
428 		ctrlr->cdata.nn = num_ns;
429 		ctrlr->cdata.nanagrpid = num_ns;
430 	}
431 
432 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
433 	ctrlr->cdata.cmic.multi_ctrlr = multipath;
434 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
435 	ctrlr->trid = *trid;
436 	TAILQ_INIT(&ctrlr->active_io_qpairs);
437 
438 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
439 
440 	return ctrlr;
441 }
442 
443 static void
444 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
445 {
446 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
447 
448 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
449 	free(ctrlr->nsdata);
450 	free(ctrlr->ns);
451 	free(ctrlr);
452 }
453 
454 static int
455 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
456 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
457 {
458 	struct ut_nvme_req *req;
459 
460 	req = calloc(1, sizeof(*req));
461 	if (req == NULL) {
462 		return -ENOMEM;
463 	}
464 
465 	req->opc = opc;
466 	req->cb_fn = cb_fn;
467 	req->cb_arg = cb_arg;
468 
469 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
470 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
471 
472 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
473 	qpair->num_outstanding_reqs++;
474 
475 	return 0;
476 }
477 
478 static struct ut_nvme_req *
479 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
480 {
481 	struct ut_nvme_req *req;
482 
483 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
484 		if (req->cb_arg == cb_arg) {
485 			break;
486 		}
487 	}
488 
489 	return req;
490 }
491 
492 static struct spdk_bdev_io *
493 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
494 		 struct spdk_io_channel *ch)
495 {
496 	struct spdk_bdev_io *bdev_io;
497 
498 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
499 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
500 	bdev_io->type = type;
501 	bdev_io->bdev = &nbdev->disk;
502 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
503 
504 	return bdev_io;
505 }
506 
507 static void
508 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
509 {
510 	bdev_io->u.bdev.iovs = &bdev_io->iov;
511 	bdev_io->u.bdev.iovcnt = 1;
512 
513 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
514 	bdev_io->iov.iov_len = 4096;
515 }
516 
517 static void
518 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
519 {
520 	if (ctrlr->is_failed) {
521 		free(ctrlr);
522 		return;
523 	}
524 
525 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
526 	if (probe_ctx->cb_ctx) {
527 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
528 	}
529 
530 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
531 
532 	if (probe_ctx->attach_cb) {
533 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
534 	}
535 }
536 
537 int
538 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
539 {
540 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
541 
542 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
543 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
544 			continue;
545 		}
546 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
547 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
548 	}
549 
550 	free(probe_ctx);
551 
552 	return 0;
553 }
554 
555 struct spdk_nvme_probe_ctx *
556 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
557 			const struct spdk_nvme_ctrlr_opts *opts,
558 			spdk_nvme_attach_cb attach_cb)
559 {
560 	struct spdk_nvme_probe_ctx *probe_ctx;
561 
562 	if (trid == NULL) {
563 		return NULL;
564 	}
565 
566 	probe_ctx = calloc(1, sizeof(*probe_ctx));
567 	if (probe_ctx == NULL) {
568 		return NULL;
569 	}
570 
571 	probe_ctx->trid = *trid;
572 	probe_ctx->cb_ctx = (void *)opts;
573 	probe_ctx->attach_cb = attach_cb;
574 
575 	return probe_ctx;
576 }
577 
578 int
579 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
580 {
581 	if (ctrlr->attached) {
582 		ut_detach_ctrlr(ctrlr);
583 	}
584 
585 	return 0;
586 }
587 
588 int
589 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
590 {
591 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
592 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
593 
594 	return 0;
595 }
596 
597 int
598 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
599 {
600 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
601 }
602 
603 void
604 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
605 {
606 	memset(opts, 0, opts_size);
607 
608 	snprintf(opts->hostnqn, sizeof(opts->hostnqn),
609 		 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003");
610 }
611 
612 const struct spdk_nvme_ctrlr_data *
613 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
614 {
615 	return &ctrlr->cdata;
616 }
617 
618 uint32_t
619 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
620 {
621 	return ctrlr->num_ns;
622 }
623 
624 struct spdk_nvme_ns *
625 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
626 {
627 	if (nsid < 1 || nsid > ctrlr->num_ns) {
628 		return NULL;
629 	}
630 
631 	return &ctrlr->ns[nsid - 1];
632 }
633 
634 bool
635 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
636 {
637 	if (nsid < 1 || nsid > ctrlr->num_ns) {
638 		return false;
639 	}
640 
641 	return ctrlr->ns[nsid - 1].is_active;
642 }
643 
644 union spdk_nvme_csts_register
645 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
646 {
647 	union spdk_nvme_csts_register csts;
648 
649 	csts.raw = 0;
650 
651 	return csts;
652 }
653 
654 union spdk_nvme_vs_register
655 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
656 {
657 	union spdk_nvme_vs_register vs;
658 
659 	vs.raw = 0;
660 
661 	return vs;
662 }
663 
664 struct spdk_nvme_qpair *
665 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
666 			       const struct spdk_nvme_io_qpair_opts *user_opts,
667 			       size_t opts_size)
668 {
669 	struct spdk_nvme_qpair *qpair;
670 
671 	qpair = calloc(1, sizeof(*qpair));
672 	if (qpair == NULL) {
673 		return NULL;
674 	}
675 
676 	qpair->ctrlr = ctrlr;
677 	TAILQ_INIT(&qpair->outstanding_reqs);
678 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
679 
680 	return qpair;
681 }
682 
683 static void
684 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
685 {
686 	struct spdk_nvme_poll_group *group = qpair->poll_group;
687 
688 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
689 
690 	qpair->poll_group_tailq_head = &group->connected_qpairs;
691 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
692 	TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq);
693 }
694 
695 static void
696 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
697 {
698 	struct spdk_nvme_poll_group *group = qpair->poll_group;
699 
700 	CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs);
701 
702 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
703 	TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq);
704 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
705 }
706 
707 int
708 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
709 				 struct spdk_nvme_qpair *qpair)
710 {
711 	if (qpair->is_connected) {
712 		return -EISCONN;
713 	}
714 
715 	qpair->is_connected = true;
716 
717 	if (qpair->poll_group) {
718 		nvme_poll_group_connect_qpair(qpair);
719 	}
720 
721 	return 0;
722 }
723 
724 void
725 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
726 {
727 	if (!qpair->is_connected) {
728 		return;
729 	}
730 
731 	qpair->is_failed = false;
732 	qpair->is_connected = false;
733 
734 	if (qpair->poll_group != NULL) {
735 		nvme_poll_group_disconnect_qpair(qpair);
736 	}
737 }
738 
739 int
740 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
741 {
742 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
743 
744 	if (qpair->in_completion_context) {
745 		qpair->delete_after_completion_context = true;
746 		return 0;
747 	}
748 
749 	if (qpair->poll_group && qpair->poll_group->in_completion_context) {
750 		qpair->poll_group->num_qpairs_to_delete++;
751 		qpair->delete_after_completion_context = true;
752 		return 0;
753 	}
754 
755 	spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
756 
757 	if (qpair->poll_group != NULL) {
758 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
759 	}
760 
761 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
762 
763 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
764 
765 	free(qpair);
766 
767 	return 0;
768 }
769 
770 int
771 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
772 {
773 	if (ctrlr->fail_reset) {
774 		ctrlr->is_failed = true;
775 		return -EIO;
776 	}
777 
778 	ctrlr->adminq.is_connected = true;
779 	return 0;
780 }
781 
782 void
783 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
784 {
785 }
786 
787 int
788 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
789 {
790 	ctrlr->adminq.is_connected = false;
791 	ctrlr->is_failed = false;
792 
793 	return 0;
794 }
795 
796 void
797 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
798 {
799 	ctrlr->is_failed = true;
800 }
801 
802 bool
803 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
804 {
805 	return ctrlr->is_failed;
806 }
807 
808 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
809 				 sizeof(uint32_t))
810 static void
811 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
812 {
813 	struct spdk_nvme_ana_page ana_hdr;
814 	char _ana_desc[UT_ANA_DESC_SIZE];
815 	struct spdk_nvme_ana_group_descriptor *ana_desc;
816 	struct spdk_nvme_ns *ns;
817 	uint32_t i;
818 
819 	memset(&ana_hdr, 0, sizeof(ana_hdr));
820 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
821 
822 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
823 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
824 
825 	buf += sizeof(ana_hdr);
826 	length -= sizeof(ana_hdr);
827 
828 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
829 
830 	for (i = 0; i < ctrlr->num_ns; i++) {
831 		ns = &ctrlr->ns[i];
832 
833 		if (!ns->is_active) {
834 			continue;
835 		}
836 
837 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
838 
839 		ana_desc->ana_group_id = ns->id;
840 		ana_desc->num_of_nsid = 1;
841 		ana_desc->ana_state = ns->ana_state;
842 		ana_desc->nsid[0] = ns->id;
843 
844 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
845 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
846 
847 		buf += UT_ANA_DESC_SIZE;
848 		length -= UT_ANA_DESC_SIZE;
849 	}
850 }
851 
852 int
853 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
854 				 uint8_t log_page, uint32_t nsid,
855 				 void *payload, uint32_t payload_size,
856 				 uint64_t offset,
857 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
858 {
859 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
860 		SPDK_CU_ASSERT_FATAL(offset == 0);
861 		ut_create_ana_log_page(ctrlr, payload, payload_size);
862 	}
863 
864 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
865 				      cb_fn, cb_arg);
866 }
867 
868 int
869 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
870 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
871 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
872 {
873 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
874 }
875 
876 int
877 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
878 			      void *cmd_cb_arg,
879 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
880 {
881 	struct ut_nvme_req *req = NULL, *abort_req;
882 
883 	if (qpair == NULL) {
884 		qpair = &ctrlr->adminq;
885 	}
886 
887 	abort_req = calloc(1, sizeof(*abort_req));
888 	if (abort_req == NULL) {
889 		return -ENOMEM;
890 	}
891 
892 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
893 		if (req->cb_arg == cmd_cb_arg) {
894 			break;
895 		}
896 	}
897 
898 	if (req == NULL) {
899 		free(abort_req);
900 		return -ENOENT;
901 	}
902 
903 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
904 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
905 
906 	abort_req->opc = SPDK_NVME_OPC_ABORT;
907 	abort_req->cb_fn = cb_fn;
908 	abort_req->cb_arg = cb_arg;
909 
910 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
911 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
912 	abort_req->cpl.cdw0 = 0;
913 
914 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
915 	ctrlr->adminq.num_outstanding_reqs++;
916 
917 	return 0;
918 }
919 
920 int32_t
921 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
922 {
923 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
924 }
925 
926 uint32_t
927 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
928 {
929 	return ns->id;
930 }
931 
932 struct spdk_nvme_ctrlr *
933 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
934 {
935 	return ns->ctrlr;
936 }
937 
938 static inline struct spdk_nvme_ns_data *
939 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
940 {
941 	return &ns->ctrlr->nsdata[ns->id - 1];
942 }
943 
944 const struct spdk_nvme_ns_data *
945 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
946 {
947 	return _nvme_ns_get_data(ns);
948 }
949 
950 uint64_t
951 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
952 {
953 	return _nvme_ns_get_data(ns)->nsze;
954 }
955 
956 const struct spdk_uuid *
957 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
958 {
959 	return ns->uuid;
960 }
961 
962 enum spdk_nvme_csi
963 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
964 	return ns->csi;
965 }
966 
967 int
968 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
969 			      void *metadata, uint64_t lba, uint32_t lba_count,
970 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
971 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
972 {
973 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
974 }
975 
976 int
977 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
978 			       void *buffer, void *metadata, uint64_t lba,
979 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
980 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
981 {
982 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
983 }
984 
985 int
986 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
987 			       uint64_t lba, uint32_t lba_count,
988 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
989 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
990 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
991 			       uint16_t apptag_mask, uint16_t apptag)
992 {
993 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
994 }
995 
996 int
997 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
998 				uint64_t lba, uint32_t lba_count,
999 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1000 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1001 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1002 				uint16_t apptag_mask, uint16_t apptag)
1003 {
1004 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1005 }
1006 
1007 static bool g_ut_readv_ext_called;
1008 int
1009 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1010 			   uint64_t lba, uint32_t lba_count,
1011 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1012 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1013 			   spdk_nvme_req_next_sge_cb next_sge_fn,
1014 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1015 {
1016 	g_ut_readv_ext_called = true;
1017 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1018 }
1019 
1020 static bool g_ut_writev_ext_called;
1021 int
1022 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1023 			    uint64_t lba, uint32_t lba_count,
1024 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1025 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1026 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1027 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1028 {
1029 	g_ut_writev_ext_called = true;
1030 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1031 }
1032 
1033 int
1034 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1035 				  uint64_t lba, uint32_t lba_count,
1036 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1037 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1038 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1039 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1040 {
1041 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1042 }
1043 
1044 int
1045 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1046 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1047 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1048 {
1049 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1050 }
1051 
1052 int
1053 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1054 			      uint64_t lba, uint32_t lba_count,
1055 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1056 			      uint32_t io_flags)
1057 {
1058 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1059 }
1060 
1061 struct spdk_nvme_poll_group *
1062 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1063 {
1064 	struct spdk_nvme_poll_group *group;
1065 
1066 	group = calloc(1, sizeof(*group));
1067 	if (group == NULL) {
1068 		return NULL;
1069 	}
1070 
1071 	group->ctx = ctx;
1072 	if (table != NULL) {
1073 		group->accel_fn_table = *table;
1074 	}
1075 	TAILQ_INIT(&group->connected_qpairs);
1076 	TAILQ_INIT(&group->disconnected_qpairs);
1077 
1078 	return group;
1079 }
1080 
1081 int
1082 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1083 {
1084 	if (!TAILQ_EMPTY(&group->connected_qpairs) ||
1085 	    !TAILQ_EMPTY(&group->disconnected_qpairs)) {
1086 		return -EBUSY;
1087 	}
1088 
1089 	free(group);
1090 
1091 	return 0;
1092 }
1093 
1094 int32_t
1095 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1096 				    uint32_t max_completions)
1097 {
1098 	struct ut_nvme_req *req, *tmp;
1099 	uint32_t num_completions = 0;
1100 
1101 	if (!qpair->is_connected) {
1102 		return -ENXIO;
1103 	}
1104 
1105 	qpair->in_completion_context = true;
1106 
1107 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1108 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1109 		qpair->num_outstanding_reqs--;
1110 
1111 		req->cb_fn(req->cb_arg, &req->cpl);
1112 
1113 		free(req);
1114 		num_completions++;
1115 	}
1116 
1117 	qpair->in_completion_context = false;
1118 	if (qpair->delete_after_completion_context) {
1119 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1120 	}
1121 
1122 	return num_completions;
1123 }
1124 
1125 int64_t
1126 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1127 		uint32_t completions_per_qpair,
1128 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1129 {
1130 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1131 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1132 
1133 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1134 
1135 	if (disconnected_qpair_cb == NULL) {
1136 		return -EINVAL;
1137 	}
1138 
1139 	group->in_completion_context = true;
1140 
1141 	TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1142 		disconnected_qpair_cb(qpair, group->ctx);
1143 	}
1144 
1145 	TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1146 		if (qpair->is_failed) {
1147 			spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
1148 			continue;
1149 		}
1150 
1151 		local_completions = spdk_nvme_qpair_process_completions(qpair,
1152 				    completions_per_qpair);
1153 		if (local_completions < 0 && error_reason == 0) {
1154 			error_reason = local_completions;
1155 		} else {
1156 			num_completions += local_completions;
1157 			assert(num_completions >= 0);
1158 		}
1159 	}
1160 
1161 	group->in_completion_context = false;
1162 
1163 	if (group->num_qpairs_to_delete > 0) {
1164 		TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1165 			if (qpair->delete_after_completion_context) {
1166 				spdk_nvme_ctrlr_free_io_qpair(qpair);
1167 				CU_ASSERT(group->num_qpairs_to_delete > 0);
1168 				group->num_qpairs_to_delete--;
1169 			}
1170 		}
1171 
1172 		TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1173 			if (qpair->delete_after_completion_context) {
1174 				spdk_nvme_ctrlr_free_io_qpair(qpair);
1175 				CU_ASSERT(group->num_qpairs_to_delete > 0);
1176 				group->num_qpairs_to_delete--;
1177 			}
1178 		}
1179 
1180 		CU_ASSERT(group->num_qpairs_to_delete == 0);
1181 	}
1182 
1183 	return error_reason ? error_reason : num_completions;
1184 }
1185 
1186 int
1187 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1188 			 struct spdk_nvme_qpair *qpair)
1189 {
1190 	CU_ASSERT(!qpair->is_connected);
1191 
1192 	qpair->poll_group = group;
1193 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
1194 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
1195 
1196 	return 0;
1197 }
1198 
1199 int
1200 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1201 			    struct spdk_nvme_qpair *qpair)
1202 {
1203 	CU_ASSERT(!qpair->is_connected);
1204 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
1205 
1206 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
1207 
1208 	qpair->poll_group = NULL;
1209 	qpair->poll_group_tailq_head = NULL;
1210 
1211 	return 0;
1212 }
1213 
1214 int
1215 spdk_bdev_register(struct spdk_bdev *bdev)
1216 {
1217 	return g_ut_register_bdev_status;
1218 }
1219 
1220 void
1221 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1222 {
1223 	int rc;
1224 
1225 	rc = bdev->fn_table->destruct(bdev->ctxt);
1226 	if (rc <= 0 && cb_fn != NULL) {
1227 		cb_fn(cb_arg, rc);
1228 	}
1229 }
1230 
1231 int
1232 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1233 {
1234 	bdev->blockcnt = size;
1235 
1236 	return 0;
1237 }
1238 
1239 struct spdk_io_channel *
1240 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1241 {
1242 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1243 }
1244 
1245 void
1246 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1247 {
1248 	bdev_io->internal.status = status;
1249 	bdev_io->internal.in_submit_request = false;
1250 }
1251 
1252 void
1253 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1254 {
1255 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1256 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1257 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1258 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1259 	} else {
1260 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1261 	}
1262 
1263 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1264 	bdev_io->internal.error.nvme.sct = sct;
1265 	bdev_io->internal.error.nvme.sc = sc;
1266 
1267 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1268 }
1269 
1270 void
1271 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1272 {
1273 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1274 
1275 	ut_bdev_io_set_buf(bdev_io);
1276 
1277 	cb(ch, bdev_io, true);
1278 }
1279 
1280 static void
1281 test_create_ctrlr(void)
1282 {
1283 	struct spdk_nvme_transport_id trid = {};
1284 	struct spdk_nvme_ctrlr ctrlr = {};
1285 	int rc;
1286 
1287 	ut_init_trid(&trid);
1288 
1289 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1290 	CU_ASSERT(rc == 0);
1291 
1292 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1293 
1294 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1295 	CU_ASSERT(rc == 0);
1296 
1297 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1298 
1299 	poll_threads();
1300 	spdk_delay_us(1000);
1301 	poll_threads();
1302 
1303 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1304 }
1305 
1306 static void
1307 test_reset_ctrlr(void)
1308 {
1309 	struct spdk_nvme_transport_id trid = {};
1310 	struct spdk_nvme_ctrlr ctrlr = {};
1311 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1312 	struct nvme_path_id *curr_trid;
1313 	struct spdk_io_channel *ch1, *ch2;
1314 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1315 	int rc;
1316 
1317 	ut_init_trid(&trid);
1318 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1319 
1320 	set_thread(0);
1321 
1322 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1323 	CU_ASSERT(rc == 0);
1324 
1325 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1326 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1327 
1328 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1329 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1330 
1331 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1332 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1333 
1334 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1335 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1336 
1337 	set_thread(1);
1338 
1339 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1340 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1341 
1342 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1343 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1344 
1345 	/* Reset starts from thread 1. */
1346 	set_thread(1);
1347 
1348 	/* Case 1: ctrlr is already being destructed. */
1349 	nvme_ctrlr->destruct = true;
1350 
1351 	rc = bdev_nvme_reset(nvme_ctrlr);
1352 	CU_ASSERT(rc == -ENXIO);
1353 
1354 	/* Case 2: reset is in progress. */
1355 	nvme_ctrlr->destruct = false;
1356 	nvme_ctrlr->resetting = true;
1357 
1358 	rc = bdev_nvme_reset(nvme_ctrlr);
1359 	CU_ASSERT(rc == -EBUSY);
1360 
1361 	/* Case 3: reset completes successfully. */
1362 	nvme_ctrlr->resetting = false;
1363 	curr_trid->is_failed = true;
1364 	ctrlr.is_failed = true;
1365 
1366 	rc = bdev_nvme_reset(nvme_ctrlr);
1367 	CU_ASSERT(rc == 0);
1368 	CU_ASSERT(nvme_ctrlr->resetting == true);
1369 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1370 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1371 
1372 	poll_thread_times(0, 3);
1373 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1374 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1375 
1376 	poll_thread_times(1, 1);
1377 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1378 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1379 	CU_ASSERT(ctrlr.is_failed == true);
1380 
1381 	poll_thread_times(0, 1);
1382 	CU_ASSERT(ctrlr.is_failed == false);
1383 
1384 	poll_thread_times(0, 1);
1385 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1386 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1387 
1388 	poll_thread_times(1, 1);
1389 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1390 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1391 	CU_ASSERT(nvme_ctrlr->resetting == true);
1392 	CU_ASSERT(curr_trid->is_failed == true);
1393 
1394 	poll_thread_times(0, 2);
1395 	CU_ASSERT(nvme_ctrlr->resetting == true);
1396 	poll_thread_times(1, 1);
1397 	CU_ASSERT(nvme_ctrlr->resetting == true);
1398 	poll_thread_times(0, 1);
1399 	CU_ASSERT(nvme_ctrlr->resetting == false);
1400 	CU_ASSERT(curr_trid->is_failed == false);
1401 
1402 	spdk_put_io_channel(ch2);
1403 
1404 	set_thread(0);
1405 
1406 	spdk_put_io_channel(ch1);
1407 
1408 	poll_threads();
1409 
1410 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1411 	CU_ASSERT(rc == 0);
1412 
1413 	poll_threads();
1414 	spdk_delay_us(1000);
1415 	poll_threads();
1416 
1417 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1418 }
1419 
1420 static void
1421 test_race_between_reset_and_destruct_ctrlr(void)
1422 {
1423 	struct spdk_nvme_transport_id trid = {};
1424 	struct spdk_nvme_ctrlr ctrlr = {};
1425 	struct nvme_ctrlr *nvme_ctrlr;
1426 	struct spdk_io_channel *ch1, *ch2;
1427 	int rc;
1428 
1429 	ut_init_trid(&trid);
1430 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1431 
1432 	set_thread(0);
1433 
1434 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1435 	CU_ASSERT(rc == 0);
1436 
1437 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1438 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1439 
1440 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1441 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1442 
1443 	set_thread(1);
1444 
1445 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1446 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1447 
1448 	/* Reset starts from thread 1. */
1449 	set_thread(1);
1450 
1451 	rc = bdev_nvme_reset(nvme_ctrlr);
1452 	CU_ASSERT(rc == 0);
1453 	CU_ASSERT(nvme_ctrlr->resetting == true);
1454 
1455 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1456 	set_thread(0);
1457 
1458 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1459 	CU_ASSERT(rc == 0);
1460 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1461 	CU_ASSERT(nvme_ctrlr->destruct == true);
1462 	CU_ASSERT(nvme_ctrlr->resetting == true);
1463 
1464 	poll_threads();
1465 
1466 	/* Reset completed but ctrlr is not still destructed yet. */
1467 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1468 	CU_ASSERT(nvme_ctrlr->destruct == true);
1469 	CU_ASSERT(nvme_ctrlr->resetting == false);
1470 
1471 	/* New reset request is rejected. */
1472 	rc = bdev_nvme_reset(nvme_ctrlr);
1473 	CU_ASSERT(rc == -ENXIO);
1474 
1475 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1476 	 * However there are two channels and destruct is not completed yet.
1477 	 */
1478 	poll_threads();
1479 
1480 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1481 
1482 	set_thread(0);
1483 
1484 	spdk_put_io_channel(ch1);
1485 
1486 	set_thread(1);
1487 
1488 	spdk_put_io_channel(ch2);
1489 
1490 	poll_threads();
1491 	spdk_delay_us(1000);
1492 	poll_threads();
1493 
1494 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1495 }
1496 
1497 static void
1498 test_failover_ctrlr(void)
1499 {
1500 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1501 	struct spdk_nvme_ctrlr ctrlr = {};
1502 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1503 	struct nvme_path_id *curr_trid, *next_trid;
1504 	struct spdk_io_channel *ch1, *ch2;
1505 	int rc;
1506 
1507 	ut_init_trid(&trid1);
1508 	ut_init_trid2(&trid2);
1509 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1510 
1511 	set_thread(0);
1512 
1513 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1514 	CU_ASSERT(rc == 0);
1515 
1516 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1517 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1518 
1519 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1520 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1521 
1522 	set_thread(1);
1523 
1524 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1525 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1526 
1527 	/* First, test one trid case. */
1528 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1529 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1530 
1531 	/* Failover starts from thread 1. */
1532 	set_thread(1);
1533 
1534 	/* Case 1: ctrlr is already being destructed. */
1535 	nvme_ctrlr->destruct = true;
1536 
1537 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1538 	CU_ASSERT(rc == -ENXIO);
1539 	CU_ASSERT(curr_trid->is_failed == false);
1540 
1541 	/* Case 2: reset is in progress. */
1542 	nvme_ctrlr->destruct = false;
1543 	nvme_ctrlr->resetting = true;
1544 
1545 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1546 	CU_ASSERT(rc == -EBUSY);
1547 
1548 	/* Case 3: reset completes successfully. */
1549 	nvme_ctrlr->resetting = false;
1550 
1551 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1552 	CU_ASSERT(rc == 0);
1553 
1554 	CU_ASSERT(nvme_ctrlr->resetting == true);
1555 	CU_ASSERT(curr_trid->is_failed == true);
1556 
1557 	poll_threads();
1558 
1559 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1560 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1561 
1562 	CU_ASSERT(nvme_ctrlr->resetting == false);
1563 	CU_ASSERT(curr_trid->is_failed == false);
1564 
1565 	set_thread(0);
1566 
1567 	/* Second, test two trids case. */
1568 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1569 	CU_ASSERT(rc == 0);
1570 
1571 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1572 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1573 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1574 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1575 
1576 	/* Failover starts from thread 1. */
1577 	set_thread(1);
1578 
1579 	/* Case 4: reset is in progress. */
1580 	nvme_ctrlr->resetting = true;
1581 
1582 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1583 	CU_ASSERT(rc == -EBUSY);
1584 
1585 	/* Case 5: failover completes successfully. */
1586 	nvme_ctrlr->resetting = false;
1587 
1588 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1589 	CU_ASSERT(rc == 0);
1590 
1591 	CU_ASSERT(nvme_ctrlr->resetting == true);
1592 
1593 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1594 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1595 	CU_ASSERT(next_trid != curr_trid);
1596 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1597 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1598 
1599 	poll_threads();
1600 
1601 	CU_ASSERT(nvme_ctrlr->resetting == false);
1602 
1603 	spdk_put_io_channel(ch2);
1604 
1605 	set_thread(0);
1606 
1607 	spdk_put_io_channel(ch1);
1608 
1609 	poll_threads();
1610 
1611 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1612 	CU_ASSERT(rc == 0);
1613 
1614 	poll_threads();
1615 	spdk_delay_us(1000);
1616 	poll_threads();
1617 
1618 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1619 }
1620 
1621 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1622  *
1623  * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1624  * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1625  * to trid2. While processing the failed reset, trid3 was added. trid1 should
1626  * have been active, i.e., the head of the list until the failover completed.
1627  * However trid3 was inserted to the head of the list by mistake.
1628  *
1629  * I/O qpairs have smaller polling period than admin qpair. When a connection is
1630  * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1631  * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1632  * may be executed repeatedly before failover is executed. Hence this bug is real.
1633  *
1634  * The following test verifies the fix.
1635  */
1636 static void
1637 test_race_between_failover_and_add_secondary_trid(void)
1638 {
1639 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1640 	struct spdk_nvme_ctrlr ctrlr = {};
1641 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1642 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1643 	struct spdk_io_channel *ch1, *ch2;
1644 	int rc;
1645 
1646 	ut_init_trid(&trid1);
1647 	ut_init_trid2(&trid2);
1648 	ut_init_trid3(&trid3);
1649 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1650 
1651 	set_thread(0);
1652 
1653 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1654 	CU_ASSERT(rc == 0);
1655 
1656 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1657 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1658 
1659 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1660 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1661 
1662 	set_thread(1);
1663 
1664 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1665 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1666 
1667 	set_thread(0);
1668 
1669 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1670 	CU_ASSERT(rc == 0);
1671 
1672 	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1673 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1674 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1675 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1676 	path_id2 = TAILQ_NEXT(path_id1, link);
1677 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1678 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1679 
1680 	ctrlr.fail_reset = true;
1681 
1682 	rc = bdev_nvme_reset(nvme_ctrlr);
1683 	CU_ASSERT(rc == 0);
1684 
1685 	poll_threads();
1686 
1687 	CU_ASSERT(path_id1->is_failed == true);
1688 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1689 
1690 	rc = bdev_nvme_reset(nvme_ctrlr);
1691 	CU_ASSERT(rc == 0);
1692 
1693 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1694 	CU_ASSERT(rc == 0);
1695 
1696 	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1697 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1698 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1699 	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1700 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1701 	path_id3 = TAILQ_NEXT(path_id2, link);
1702 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1703 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1704 
1705 	poll_threads();
1706 
1707 	spdk_put_io_channel(ch1);
1708 
1709 	set_thread(1);
1710 
1711 	spdk_put_io_channel(ch2);
1712 
1713 	poll_threads();
1714 
1715 	set_thread(0);
1716 
1717 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1718 	CU_ASSERT(rc == 0);
1719 
1720 	poll_threads();
1721 	spdk_delay_us(1000);
1722 	poll_threads();
1723 
1724 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1725 }
1726 
1727 static void
1728 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1729 {
1730 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1731 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1732 }
1733 
1734 static void
1735 test_pending_reset(void)
1736 {
1737 	struct spdk_nvme_transport_id trid = {};
1738 	struct spdk_nvme_ctrlr *ctrlr;
1739 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1740 	const int STRING_SIZE = 32;
1741 	const char *attached_names[STRING_SIZE];
1742 	struct nvme_bdev *bdev;
1743 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1744 	struct spdk_io_channel *ch1, *ch2;
1745 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1746 	struct nvme_io_path *io_path1, *io_path2;
1747 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1748 	int rc;
1749 
1750 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1751 	ut_init_trid(&trid);
1752 
1753 	set_thread(0);
1754 
1755 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1756 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1757 
1758 	g_ut_attach_ctrlr_status = 0;
1759 	g_ut_attach_bdev_count = 1;
1760 
1761 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1762 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
1763 	CU_ASSERT(rc == 0);
1764 
1765 	spdk_delay_us(1000);
1766 	poll_threads();
1767 
1768 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1769 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1770 
1771 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1772 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1773 
1774 	ch1 = spdk_get_io_channel(bdev);
1775 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1776 
1777 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1778 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1779 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1780 	ctrlr_ch1 = io_path1->ctrlr_ch;
1781 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1782 
1783 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1784 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1785 
1786 	set_thread(1);
1787 
1788 	ch2 = spdk_get_io_channel(bdev);
1789 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1790 
1791 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1792 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1793 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1794 	ctrlr_ch2 = io_path2->ctrlr_ch;
1795 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1796 
1797 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1798 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1799 
1800 	/* The first reset request is submitted on thread 1, and the second reset request
1801 	 * is submitted on thread 0 while processing the first request.
1802 	 */
1803 	bdev_nvme_submit_request(ch2, first_bdev_io);
1804 	CU_ASSERT(nvme_ctrlr->resetting == true);
1805 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1806 
1807 	set_thread(0);
1808 
1809 	bdev_nvme_submit_request(ch1, second_bdev_io);
1810 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1811 
1812 	poll_threads();
1813 
1814 	CU_ASSERT(nvme_ctrlr->resetting == false);
1815 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1816 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1817 
1818 	/* The first reset request is submitted on thread 1, and the second reset request
1819 	 * is submitted on thread 0 while processing the first request.
1820 	 *
1821 	 * The difference from the above scenario is that the controller is removed while
1822 	 * processing the first request. Hence both reset requests should fail.
1823 	 */
1824 	set_thread(1);
1825 
1826 	bdev_nvme_submit_request(ch2, first_bdev_io);
1827 	CU_ASSERT(nvme_ctrlr->resetting == true);
1828 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1829 
1830 	set_thread(0);
1831 
1832 	bdev_nvme_submit_request(ch1, second_bdev_io);
1833 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1834 
1835 	ctrlr->fail_reset = true;
1836 
1837 	poll_threads();
1838 
1839 	CU_ASSERT(nvme_ctrlr->resetting == false);
1840 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1841 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1842 
1843 	spdk_put_io_channel(ch1);
1844 
1845 	set_thread(1);
1846 
1847 	spdk_put_io_channel(ch2);
1848 
1849 	poll_threads();
1850 
1851 	set_thread(0);
1852 
1853 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1854 	CU_ASSERT(rc == 0);
1855 
1856 	poll_threads();
1857 	spdk_delay_us(1000);
1858 	poll_threads();
1859 
1860 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1861 
1862 	free(first_bdev_io);
1863 	free(second_bdev_io);
1864 }
1865 
1866 static void
1867 test_attach_ctrlr(void)
1868 {
1869 	struct spdk_nvme_transport_id trid = {};
1870 	struct spdk_nvme_ctrlr *ctrlr;
1871 	struct nvme_ctrlr *nvme_ctrlr;
1872 	const int STRING_SIZE = 32;
1873 	const char *attached_names[STRING_SIZE];
1874 	struct nvme_bdev *nbdev;
1875 	int rc;
1876 
1877 	set_thread(0);
1878 
1879 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1880 	ut_init_trid(&trid);
1881 
1882 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
1883 	 * by probe polling.
1884 	 */
1885 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1886 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1887 
1888 	ctrlr->is_failed = true;
1889 	g_ut_attach_ctrlr_status = -EIO;
1890 	g_ut_attach_bdev_count = 0;
1891 
1892 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1893 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
1894 	CU_ASSERT(rc == 0);
1895 
1896 	spdk_delay_us(1000);
1897 	poll_threads();
1898 
1899 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1900 
1901 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
1902 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1903 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1904 
1905 	g_ut_attach_ctrlr_status = 0;
1906 
1907 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1908 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
1909 	CU_ASSERT(rc == 0);
1910 
1911 	spdk_delay_us(1000);
1912 	poll_threads();
1913 
1914 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1915 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1916 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1917 
1918 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1919 	CU_ASSERT(rc == 0);
1920 
1921 	poll_threads();
1922 	spdk_delay_us(1000);
1923 	poll_threads();
1924 
1925 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1926 
1927 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
1928 	 * one nvme_bdev is created.
1929 	 */
1930 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1931 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1932 
1933 	g_ut_attach_bdev_count = 1;
1934 
1935 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1936 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
1937 	CU_ASSERT(rc == 0);
1938 
1939 	spdk_delay_us(1000);
1940 	poll_threads();
1941 
1942 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1943 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1944 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1945 
1946 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
1947 	attached_names[0] = NULL;
1948 
1949 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1950 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
1951 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
1952 
1953 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1954 	CU_ASSERT(rc == 0);
1955 
1956 	poll_threads();
1957 	spdk_delay_us(1000);
1958 	poll_threads();
1959 
1960 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1961 
1962 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
1963 	 * created because creating one nvme_bdev failed.
1964 	 */
1965 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1966 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1967 
1968 	g_ut_register_bdev_status = -EINVAL;
1969 	g_ut_attach_bdev_count = 0;
1970 
1971 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1972 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
1973 	CU_ASSERT(rc == 0);
1974 
1975 	spdk_delay_us(1000);
1976 	poll_threads();
1977 
1978 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1979 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1980 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1981 
1982 	CU_ASSERT(attached_names[0] == NULL);
1983 
1984 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1985 	CU_ASSERT(rc == 0);
1986 
1987 	poll_threads();
1988 	spdk_delay_us(1000);
1989 	poll_threads();
1990 
1991 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1992 
1993 	g_ut_register_bdev_status = 0;
1994 }
1995 
1996 static void
1997 test_aer_cb(void)
1998 {
1999 	struct spdk_nvme_transport_id trid = {};
2000 	struct spdk_nvme_ctrlr *ctrlr;
2001 	struct nvme_ctrlr *nvme_ctrlr;
2002 	struct nvme_bdev *bdev;
2003 	const int STRING_SIZE = 32;
2004 	const char *attached_names[STRING_SIZE];
2005 	union spdk_nvme_async_event_completion event = {};
2006 	struct spdk_nvme_cpl cpl = {};
2007 	int rc;
2008 
2009 	set_thread(0);
2010 
2011 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2012 	ut_init_trid(&trid);
2013 
2014 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
2015 	 * namespaces are populated.
2016 	 */
2017 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
2018 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2019 
2020 	ctrlr->ns[0].is_active = false;
2021 
2022 	g_ut_attach_ctrlr_status = 0;
2023 	g_ut_attach_bdev_count = 3;
2024 
2025 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2026 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2027 	CU_ASSERT(rc == 0);
2028 
2029 	spdk_delay_us(1000);
2030 	poll_threads();
2031 
2032 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2033 	poll_threads();
2034 
2035 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2036 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2037 
2038 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
2039 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2040 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2041 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2042 
2043 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
2044 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2045 	CU_ASSERT(bdev->disk.blockcnt == 1024);
2046 
2047 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
2048 	 * change the size of the 4th namespace.
2049 	 */
2050 	ctrlr->ns[0].is_active = true;
2051 	ctrlr->ns[2].is_active = false;
2052 	ctrlr->nsdata[3].nsze = 2048;
2053 
2054 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2055 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
2056 	cpl.cdw0 = event.raw;
2057 
2058 	aer_cb(nvme_ctrlr, &cpl);
2059 
2060 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2061 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2062 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
2063 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2064 	CU_ASSERT(bdev->disk.blockcnt == 2048);
2065 
2066 	/* Change ANA state of active namespaces. */
2067 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2068 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2069 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2070 
2071 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2072 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
2073 	cpl.cdw0 = event.raw;
2074 
2075 	aer_cb(nvme_ctrlr, &cpl);
2076 
2077 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2078 	poll_threads();
2079 
2080 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2081 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2082 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2083 
2084 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2085 	CU_ASSERT(rc == 0);
2086 
2087 	poll_threads();
2088 	spdk_delay_us(1000);
2089 	poll_threads();
2090 
2091 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2092 }
2093 
2094 static void
2095 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2096 			enum spdk_bdev_io_type io_type)
2097 {
2098 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2099 	struct nvme_io_path *io_path;
2100 	struct spdk_nvme_qpair *qpair;
2101 
2102 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2103 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2104 	qpair = io_path->ctrlr_ch->qpair;
2105 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2106 
2107 	bdev_io->type = io_type;
2108 	bdev_io->internal.in_submit_request = true;
2109 
2110 	bdev_nvme_submit_request(ch, bdev_io);
2111 
2112 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2113 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2114 
2115 	poll_threads();
2116 
2117 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2118 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2119 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2120 }
2121 
2122 static void
2123 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2124 		   enum spdk_bdev_io_type io_type)
2125 {
2126 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2127 	struct nvme_io_path *io_path;
2128 	struct spdk_nvme_qpair *qpair;
2129 
2130 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2131 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2132 	qpair = io_path->ctrlr_ch->qpair;
2133 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2134 
2135 	bdev_io->type = io_type;
2136 	bdev_io->internal.in_submit_request = true;
2137 
2138 	bdev_nvme_submit_request(ch, bdev_io);
2139 
2140 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2141 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2142 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2143 }
2144 
2145 static void
2146 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2147 {
2148 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2149 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2150 	struct ut_nvme_req *req;
2151 	struct nvme_io_path *io_path;
2152 	struct spdk_nvme_qpair *qpair;
2153 
2154 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2155 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2156 	qpair = io_path->ctrlr_ch->qpair;
2157 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2158 
2159 	/* Only compare and write now. */
2160 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2161 	bdev_io->internal.in_submit_request = true;
2162 
2163 	bdev_nvme_submit_request(ch, bdev_io);
2164 
2165 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2166 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2167 	CU_ASSERT(bio->first_fused_submitted == true);
2168 
2169 	/* First outstanding request is compare operation. */
2170 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2171 	SPDK_CU_ASSERT_FATAL(req != NULL);
2172 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2173 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2174 
2175 	poll_threads();
2176 
2177 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2178 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2179 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2180 }
2181 
2182 static void
2183 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2184 			 struct spdk_nvme_ctrlr *ctrlr)
2185 {
2186 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2187 	bdev_io->internal.in_submit_request = true;
2188 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2189 
2190 	bdev_nvme_submit_request(ch, bdev_io);
2191 
2192 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2193 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2194 
2195 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2196 	poll_thread_times(1, 1);
2197 
2198 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2199 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2200 
2201 	poll_thread_times(0, 1);
2202 
2203 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2204 }
2205 
2206 static void
2207 test_submit_nvme_cmd(void)
2208 {
2209 	struct spdk_nvme_transport_id trid = {};
2210 	struct spdk_nvme_ctrlr *ctrlr;
2211 	struct nvme_ctrlr *nvme_ctrlr;
2212 	const int STRING_SIZE = 32;
2213 	const char *attached_names[STRING_SIZE];
2214 	struct nvme_bdev *bdev;
2215 	struct spdk_bdev_io *bdev_io;
2216 	struct spdk_io_channel *ch;
2217 	struct spdk_bdev_ext_io_opts ext_io_opts = {};
2218 	int rc;
2219 
2220 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2221 	ut_init_trid(&trid);
2222 
2223 	set_thread(1);
2224 
2225 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2226 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2227 
2228 	g_ut_attach_ctrlr_status = 0;
2229 	g_ut_attach_bdev_count = 1;
2230 
2231 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2232 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2233 	CU_ASSERT(rc == 0);
2234 
2235 	spdk_delay_us(1000);
2236 	poll_threads();
2237 
2238 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2239 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2240 
2241 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2242 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2243 
2244 	set_thread(0);
2245 
2246 	ch = spdk_get_io_channel(bdev);
2247 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2248 
2249 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2250 
2251 	bdev_io->u.bdev.iovs = NULL;
2252 
2253 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2254 
2255 	ut_bdev_io_set_buf(bdev_io);
2256 
2257 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2258 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2259 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2260 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2261 
2262 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2263 
2264 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2265 
2266 	/* Verify that ext NVME API is called if bdev_io ext_opts is set */
2267 	bdev_io->internal.ext_opts = &ext_io_opts;
2268 	g_ut_readv_ext_called = false;
2269 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2270 	CU_ASSERT(g_ut_readv_ext_called == true);
2271 	g_ut_readv_ext_called = false;
2272 
2273 	g_ut_writev_ext_called = false;
2274 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2275 	CU_ASSERT(g_ut_writev_ext_called == true);
2276 	g_ut_writev_ext_called = false;
2277 	bdev_io->internal.ext_opts = NULL;
2278 
2279 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2280 
2281 	free(bdev_io);
2282 
2283 	spdk_put_io_channel(ch);
2284 
2285 	poll_threads();
2286 
2287 	set_thread(1);
2288 
2289 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2290 	CU_ASSERT(rc == 0);
2291 
2292 	poll_threads();
2293 	spdk_delay_us(1000);
2294 	poll_threads();
2295 
2296 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2297 }
2298 
2299 static void
2300 test_add_remove_trid(void)
2301 {
2302 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2303 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2304 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2305 	const int STRING_SIZE = 32;
2306 	const char *attached_names[STRING_SIZE];
2307 	struct nvme_path_id *ctrid;
2308 	int rc;
2309 
2310 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2311 	ut_init_trid(&path1.trid);
2312 	ut_init_trid2(&path2.trid);
2313 	ut_init_trid3(&path3.trid);
2314 
2315 	set_thread(0);
2316 
2317 	g_ut_attach_ctrlr_status = 0;
2318 	g_ut_attach_bdev_count = 0;
2319 
2320 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2321 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2322 
2323 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2324 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2325 	CU_ASSERT(rc == 0);
2326 
2327 	spdk_delay_us(1000);
2328 	poll_threads();
2329 
2330 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2331 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2332 
2333 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2334 
2335 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2336 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2337 
2338 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2339 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2340 	CU_ASSERT(rc == 0);
2341 
2342 	spdk_delay_us(1000);
2343 	poll_threads();
2344 
2345 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2346 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2347 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2348 			break;
2349 		}
2350 	}
2351 	CU_ASSERT(ctrid != NULL);
2352 
2353 	/* trid3 is not in the registered list. */
2354 	rc = bdev_nvme_delete("nvme0", &path3);
2355 	CU_ASSERT(rc == -ENXIO);
2356 
2357 	/* trid2 is not used, and simply removed. */
2358 	rc = bdev_nvme_delete("nvme0", &path2);
2359 	CU_ASSERT(rc == 0);
2360 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2361 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2362 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2363 	}
2364 
2365 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2366 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2367 
2368 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 0,
2369 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2370 	CU_ASSERT(rc == 0);
2371 
2372 	spdk_delay_us(1000);
2373 	poll_threads();
2374 
2375 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2376 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2377 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2378 			break;
2379 		}
2380 	}
2381 	CU_ASSERT(ctrid != NULL);
2382 
2383 	/* path1 is currently used and path3 is an alternative path.
2384 	 * If we remove path1, path is changed to path3.
2385 	 */
2386 	rc = bdev_nvme_delete("nvme0", &path1);
2387 	CU_ASSERT(rc == 0);
2388 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2389 	CU_ASSERT(nvme_ctrlr->resetting == true);
2390 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2391 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2392 	}
2393 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2394 
2395 	poll_threads();
2396 
2397 	CU_ASSERT(nvme_ctrlr->resetting == false);
2398 
2399 	/* path3 is the current and only path. If we remove path3, the corresponding
2400 	 * nvme_ctrlr is removed.
2401 	 */
2402 	rc = bdev_nvme_delete("nvme0", &path3);
2403 	CU_ASSERT(rc == 0);
2404 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2405 
2406 	poll_threads();
2407 	spdk_delay_us(1000);
2408 	poll_threads();
2409 
2410 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2411 
2412 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2413 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2414 
2415 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2416 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2417 	CU_ASSERT(rc == 0);
2418 
2419 	spdk_delay_us(1000);
2420 	poll_threads();
2421 
2422 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2423 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2424 
2425 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2426 
2427 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2428 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2429 
2430 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2431 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2432 	CU_ASSERT(rc == 0);
2433 
2434 	spdk_delay_us(1000);
2435 	poll_threads();
2436 
2437 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2438 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2439 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2440 			break;
2441 		}
2442 	}
2443 	CU_ASSERT(ctrid != NULL);
2444 
2445 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2446 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2447 	CU_ASSERT(rc == 0);
2448 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2449 
2450 	poll_threads();
2451 	spdk_delay_us(1000);
2452 	poll_threads();
2453 
2454 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2455 }
2456 
2457 static void
2458 test_abort(void)
2459 {
2460 	struct spdk_nvme_transport_id trid = {};
2461 	struct spdk_nvme_ctrlr *ctrlr;
2462 	struct nvme_ctrlr *nvme_ctrlr;
2463 	const int STRING_SIZE = 32;
2464 	const char *attached_names[STRING_SIZE];
2465 	struct nvme_bdev *bdev;
2466 	struct spdk_bdev_io *write_io, *admin_io, *abort_io;
2467 	struct spdk_io_channel *ch1, *ch2;
2468 	struct nvme_bdev_channel *nbdev_ch1;
2469 	struct nvme_io_path *io_path1;
2470 	struct nvme_ctrlr_channel *ctrlr_ch1;
2471 	int rc;
2472 
2473 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2474 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2475 	 * are submitted on thread 1. Both should succeed.
2476 	 */
2477 
2478 	ut_init_trid(&trid);
2479 
2480 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2481 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2482 
2483 	g_ut_attach_ctrlr_status = 0;
2484 	g_ut_attach_bdev_count = 1;
2485 
2486 	set_thread(1);
2487 
2488 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2489 			      attach_ctrlr_done, NULL, NULL, false, -1, 1, 0);
2490 	CU_ASSERT(rc == 0);
2491 
2492 	spdk_delay_us(1000);
2493 	poll_threads();
2494 
2495 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2496 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2497 
2498 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2499 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2500 
2501 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2502 	ut_bdev_io_set_buf(write_io);
2503 
2504 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2505 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2506 
2507 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2508 
2509 	set_thread(0);
2510 
2511 	ch1 = spdk_get_io_channel(bdev);
2512 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2513 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2514 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2515 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2516 	ctrlr_ch1 = io_path1->ctrlr_ch;
2517 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
2518 
2519 	set_thread(1);
2520 
2521 	ch2 = spdk_get_io_channel(bdev);
2522 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2523 
2524 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2525 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2526 
2527 	/* Aborting the already completed request should fail. */
2528 	write_io->internal.in_submit_request = true;
2529 	bdev_nvme_submit_request(ch1, write_io);
2530 	poll_threads();
2531 
2532 	CU_ASSERT(write_io->internal.in_submit_request == false);
2533 
2534 	abort_io->u.abort.bio_to_abort = write_io;
2535 	abort_io->internal.in_submit_request = true;
2536 
2537 	bdev_nvme_submit_request(ch1, abort_io);
2538 
2539 	poll_threads();
2540 
2541 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2542 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2543 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2544 
2545 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2546 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2547 
2548 	admin_io->internal.in_submit_request = true;
2549 	bdev_nvme_submit_request(ch1, admin_io);
2550 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2551 	poll_threads();
2552 
2553 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2554 
2555 	abort_io->u.abort.bio_to_abort = admin_io;
2556 	abort_io->internal.in_submit_request = true;
2557 
2558 	bdev_nvme_submit_request(ch2, abort_io);
2559 
2560 	poll_threads();
2561 
2562 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2563 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2564 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2565 
2566 	/* Aborting the write request should succeed. */
2567 	write_io->internal.in_submit_request = true;
2568 	bdev_nvme_submit_request(ch1, write_io);
2569 
2570 	CU_ASSERT(write_io->internal.in_submit_request == true);
2571 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
2572 
2573 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2574 	abort_io->u.abort.bio_to_abort = write_io;
2575 	abort_io->internal.in_submit_request = true;
2576 
2577 	bdev_nvme_submit_request(ch1, abort_io);
2578 
2579 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2580 	poll_threads();
2581 
2582 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2583 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2584 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2585 	CU_ASSERT(write_io->internal.in_submit_request == false);
2586 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2587 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
2588 
2589 	/* Aborting the admin request should succeed. */
2590 	admin_io->internal.in_submit_request = true;
2591 	bdev_nvme_submit_request(ch1, admin_io);
2592 
2593 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2594 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2595 
2596 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2597 	abort_io->u.abort.bio_to_abort = admin_io;
2598 	abort_io->internal.in_submit_request = true;
2599 
2600 	bdev_nvme_submit_request(ch2, abort_io);
2601 
2602 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2603 	poll_threads();
2604 
2605 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2606 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2607 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2608 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2609 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2610 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2611 
2612 	set_thread(0);
2613 
2614 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2615 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2616 	 * while resetting the nvme_ctrlr.
2617 	 */
2618 	ctrlr_ch1->qpair->is_failed = true;
2619 
2620 	poll_thread_times(0, 3);
2621 
2622 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2623 	CU_ASSERT(nvme_ctrlr->resetting == true);
2624 
2625 	write_io->internal.in_submit_request = true;
2626 
2627 	bdev_nvme_submit_request(ch1, write_io);
2628 
2629 	CU_ASSERT(write_io->internal.in_submit_request == true);
2630 	CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list));
2631 
2632 	/* Aborting the queued write request should succeed immediately. */
2633 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2634 	abort_io->u.abort.bio_to_abort = write_io;
2635 	abort_io->internal.in_submit_request = true;
2636 
2637 	bdev_nvme_submit_request(ch1, abort_io);
2638 
2639 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2640 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2641 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2642 	CU_ASSERT(write_io->internal.in_submit_request == false);
2643 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2644 
2645 	spdk_put_io_channel(ch1);
2646 
2647 	set_thread(1);
2648 
2649 	spdk_put_io_channel(ch2);
2650 
2651 	poll_threads();
2652 
2653 	free(write_io);
2654 	free(admin_io);
2655 	free(abort_io);
2656 
2657 	set_thread(1);
2658 
2659 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2660 	CU_ASSERT(rc == 0);
2661 
2662 	poll_threads();
2663 	spdk_delay_us(1000);
2664 	poll_threads();
2665 
2666 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2667 }
2668 
2669 static void
2670 test_get_io_qpair(void)
2671 {
2672 	struct spdk_nvme_transport_id trid = {};
2673 	struct spdk_nvme_ctrlr ctrlr = {};
2674 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2675 	struct spdk_io_channel *ch;
2676 	struct nvme_ctrlr_channel *ctrlr_ch;
2677 	struct spdk_nvme_qpair *qpair;
2678 	int rc;
2679 
2680 	ut_init_trid(&trid);
2681 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2682 
2683 	set_thread(0);
2684 
2685 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2686 	CU_ASSERT(rc == 0);
2687 
2688 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2689 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2690 
2691 	ch = spdk_get_io_channel(nvme_ctrlr);
2692 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2693 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2694 	CU_ASSERT(ctrlr_ch->qpair != NULL);
2695 
2696 	qpair = bdev_nvme_get_io_qpair(ch);
2697 	CU_ASSERT(qpair == ctrlr_ch->qpair);
2698 
2699 	spdk_put_io_channel(ch);
2700 
2701 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2702 	CU_ASSERT(rc == 0);
2703 
2704 	poll_threads();
2705 	spdk_delay_us(1000);
2706 	poll_threads();
2707 
2708 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2709 }
2710 
2711 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2712  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2713  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2714  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2715  */
2716 static void
2717 test_bdev_unregister(void)
2718 {
2719 	struct spdk_nvme_transport_id trid = {};
2720 	struct spdk_nvme_ctrlr *ctrlr;
2721 	struct nvme_ctrlr *nvme_ctrlr;
2722 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2723 	const int STRING_SIZE = 32;
2724 	const char *attached_names[STRING_SIZE];
2725 	struct nvme_bdev *bdev1, *bdev2;
2726 	int rc;
2727 
2728 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2729 	ut_init_trid(&trid);
2730 
2731 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
2732 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2733 
2734 	g_ut_attach_ctrlr_status = 0;
2735 	g_ut_attach_bdev_count = 2;
2736 
2737 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2738 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2739 	CU_ASSERT(rc == 0);
2740 
2741 	spdk_delay_us(1000);
2742 	poll_threads();
2743 
2744 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2745 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2746 
2747 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
2748 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2749 
2750 	bdev1 = nvme_ns1->bdev;
2751 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2752 
2753 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
2754 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2755 
2756 	bdev2 = nvme_ns2->bdev;
2757 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2758 
2759 	bdev_nvme_destruct(&bdev1->disk);
2760 	bdev_nvme_destruct(&bdev2->disk);
2761 
2762 	poll_threads();
2763 
2764 	CU_ASSERT(nvme_ns1->bdev == NULL);
2765 	CU_ASSERT(nvme_ns2->bdev == NULL);
2766 
2767 	nvme_ctrlr->destruct = true;
2768 	_nvme_ctrlr_destruct(nvme_ctrlr);
2769 
2770 	poll_threads();
2771 	spdk_delay_us(1000);
2772 	poll_threads();
2773 
2774 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2775 }
2776 
2777 static void
2778 test_compare_ns(void)
2779 {
2780 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2781 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2782 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2783 	struct spdk_uuid uuid1 = { .u.raw = { 0xAA } };
2784 	struct spdk_uuid uuid2 = { .u.raw = { 0xAB } };
2785 
2786 	/* No IDs are defined. */
2787 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2788 
2789 	/* Only EUI64 are defined and not matched. */
2790 	nsdata1.eui64 = 0xABCDEF0123456789;
2791 	nsdata2.eui64 = 0xBBCDEF0123456789;
2792 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2793 
2794 	/* Only EUI64 are defined and matched. */
2795 	nsdata2.eui64 = 0xABCDEF0123456789;
2796 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2797 
2798 	/* Only NGUID are defined and not matched. */
2799 	nsdata1.eui64 = 0x0;
2800 	nsdata2.eui64 = 0x0;
2801 	nsdata1.nguid[0] = 0x12;
2802 	nsdata2.nguid[0] = 0x10;
2803 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2804 
2805 	/* Only NGUID are defined and matched. */
2806 	nsdata2.nguid[0] = 0x12;
2807 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2808 
2809 	/* Only UUID are defined and not matched. */
2810 	nsdata1.nguid[0] = 0x0;
2811 	nsdata2.nguid[0] = 0x0;
2812 	ns1.uuid = &uuid1;
2813 	ns2.uuid = &uuid2;
2814 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2815 
2816 	/* Only one UUID is defined. */
2817 	ns1.uuid = NULL;
2818 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2819 
2820 	/* Only UUID are defined and matched. */
2821 	ns1.uuid = &uuid2;
2822 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2823 
2824 	/* All EUI64, NGUID, and UUID are defined and matched. */
2825 	nsdata1.eui64 = 0x123456789ABCDEF;
2826 	nsdata2.eui64 = 0x123456789ABCDEF;
2827 	nsdata1.nguid[15] = 0x34;
2828 	nsdata2.nguid[15] = 0x34;
2829 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2830 
2831 	/* CSI are not matched. */
2832 	ns1.csi = SPDK_NVME_CSI_ZNS;
2833 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2834 }
2835 
2836 static void
2837 test_init_ana_log_page(void)
2838 {
2839 	struct spdk_nvme_transport_id trid = {};
2840 	struct spdk_nvme_ctrlr *ctrlr;
2841 	struct nvme_ctrlr *nvme_ctrlr;
2842 	const int STRING_SIZE = 32;
2843 	const char *attached_names[STRING_SIZE];
2844 	int rc;
2845 
2846 	set_thread(0);
2847 
2848 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2849 	ut_init_trid(&trid);
2850 
2851 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
2852 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2853 
2854 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2855 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2856 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2857 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
2858 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2859 
2860 	g_ut_attach_ctrlr_status = 0;
2861 	g_ut_attach_bdev_count = 5;
2862 
2863 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2864 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2865 	CU_ASSERT(rc == 0);
2866 
2867 	spdk_delay_us(1000);
2868 	poll_threads();
2869 
2870 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2871 	poll_threads();
2872 
2873 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2874 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2875 
2876 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2877 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2878 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2879 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2880 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
2881 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
2882 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2883 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2884 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
2885 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2886 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
2887 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
2888 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
2889 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
2890 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
2891 
2892 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2893 	CU_ASSERT(rc == 0);
2894 
2895 	poll_threads();
2896 	spdk_delay_us(1000);
2897 	poll_threads();
2898 
2899 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2900 }
2901 
2902 static void
2903 init_accel(void)
2904 {
2905 	spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb,
2906 				sizeof(int), "accel_p");
2907 }
2908 
2909 static void
2910 fini_accel(void)
2911 {
2912 	spdk_io_device_unregister(g_accel_p, NULL);
2913 }
2914 
2915 static void
2916 test_get_memory_domains(void)
2917 {
2918 	struct nvme_ctrlr ctrlr = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
2919 	struct nvme_ns ns = { .ctrlr = &ctrlr };
2920 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
2921 	struct spdk_memory_domain *domains[2] = {};
2922 	int rc = 0;
2923 
2924 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns, tailq);
2925 
2926 	/* nvme controller doesn't have memory domainы */
2927 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 0);
2928 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2929 	CU_ASSERT(rc == 0)
2930 
2931 	/* nvme controller has a memory domain */
2932 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 1);
2933 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2934 	CU_ASSERT(rc == 1);
2935 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domain);
2936 }
2937 
2938 static void
2939 test_reconnect_qpair(void)
2940 {
2941 	struct spdk_nvme_transport_id trid = {};
2942 	struct spdk_nvme_ctrlr *ctrlr;
2943 	struct nvme_ctrlr *nvme_ctrlr;
2944 	const int STRING_SIZE = 32;
2945 	const char *attached_names[STRING_SIZE];
2946 	struct nvme_bdev *bdev;
2947 	struct spdk_io_channel *ch1, *ch2;
2948 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
2949 	struct nvme_io_path *io_path1, *io_path2;
2950 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
2951 	int rc;
2952 
2953 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2954 	ut_init_trid(&trid);
2955 
2956 	set_thread(0);
2957 
2958 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2959 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2960 
2961 	g_ut_attach_ctrlr_status = 0;
2962 	g_ut_attach_bdev_count = 1;
2963 
2964 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2965 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
2966 	CU_ASSERT(rc == 0);
2967 
2968 	spdk_delay_us(1000);
2969 	poll_threads();
2970 
2971 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2972 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2973 
2974 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2975 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2976 
2977 	ch1 = spdk_get_io_channel(bdev);
2978 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2979 
2980 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2981 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2982 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2983 	ctrlr_ch1 = io_path1->ctrlr_ch;
2984 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
2985 
2986 	set_thread(1);
2987 
2988 	ch2 = spdk_get_io_channel(bdev);
2989 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2990 
2991 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
2992 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
2993 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
2994 	ctrlr_ch2 = io_path2->ctrlr_ch;
2995 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
2996 
2997 	/* If a qpair is disconnected, it is freed and then reconnected via
2998 	 * resetting the corresponding nvme_ctrlr.
2999 	 */
3000 	ctrlr_ch2->qpair->is_failed = true;
3001 	ctrlr->is_failed = true;
3002 
3003 	poll_thread_times(1, 2);
3004 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
3005 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
3006 	CU_ASSERT(nvme_ctrlr->resetting == true);
3007 
3008 	poll_thread_times(0, 2);
3009 	poll_thread_times(1, 1);
3010 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
3011 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
3012 	CU_ASSERT(ctrlr->is_failed == true);
3013 
3014 	poll_thread_times(0, 1);
3015 	CU_ASSERT(ctrlr->is_failed == false);
3016 
3017 	poll_thread_times(0, 1);
3018 	poll_thread_times(1, 1);
3019 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
3020 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
3021 	CU_ASSERT(nvme_ctrlr->resetting == true);
3022 
3023 	poll_thread_times(0, 2);
3024 	poll_thread_times(1, 1);
3025 	poll_thread_times(0, 1);
3026 	CU_ASSERT(nvme_ctrlr->resetting == false);
3027 
3028 	poll_threads();
3029 
3030 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
3031 	 * fails, the qpair is just freed.
3032 	 */
3033 	ctrlr_ch2->qpair->is_failed = true;
3034 	ctrlr->is_failed = true;
3035 	ctrlr->fail_reset = true;
3036 
3037 	poll_thread_times(1, 2);
3038 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
3039 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
3040 	CU_ASSERT(nvme_ctrlr->resetting == true);
3041 
3042 	poll_thread_times(0, 2);
3043 	poll_thread_times(1, 1);
3044 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
3045 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
3046 	CU_ASSERT(ctrlr->is_failed == true);
3047 
3048 	poll_thread_times(0, 2);
3049 	poll_thread_times(1, 1);
3050 	poll_thread_times(0, 1);
3051 	CU_ASSERT(ctrlr->is_failed == true);
3052 	CU_ASSERT(nvme_ctrlr->resetting == false);
3053 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
3054 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
3055 
3056 	poll_threads();
3057 
3058 	spdk_put_io_channel(ch2);
3059 
3060 	set_thread(0);
3061 
3062 	spdk_put_io_channel(ch1);
3063 
3064 	poll_threads();
3065 
3066 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3067 	CU_ASSERT(rc == 0);
3068 
3069 	poll_threads();
3070 	spdk_delay_us(1000);
3071 	poll_threads();
3072 
3073 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3074 }
3075 
3076 static void
3077 test_create_bdev_ctrlr(void)
3078 {
3079 	struct nvme_path_id path1 = {}, path2 = {};
3080 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3081 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3082 	const int STRING_SIZE = 32;
3083 	const char *attached_names[STRING_SIZE];
3084 	int rc;
3085 
3086 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3087 	ut_init_trid(&path1.trid);
3088 	ut_init_trid2(&path2.trid);
3089 
3090 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3091 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3092 
3093 	g_ut_attach_ctrlr_status = 0;
3094 	g_ut_attach_bdev_count = 0;
3095 
3096 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3097 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3098 	CU_ASSERT(rc == 0);
3099 
3100 	spdk_delay_us(1000);
3101 	poll_threads();
3102 
3103 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3104 	poll_threads();
3105 
3106 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3107 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3108 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3109 
3110 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3111 	g_ut_attach_ctrlr_status = -EINVAL;
3112 
3113 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3114 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3115 
3116 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3117 
3118 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3119 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3120 	CU_ASSERT(rc == 0);
3121 
3122 	spdk_delay_us(1000);
3123 	poll_threads();
3124 
3125 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3126 	poll_threads();
3127 
3128 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3129 
3130 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3131 	g_ut_attach_ctrlr_status = 0;
3132 
3133 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3134 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3135 
3136 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3137 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3138 	CU_ASSERT(rc == 0);
3139 
3140 	spdk_delay_us(1000);
3141 	poll_threads();
3142 
3143 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3144 	poll_threads();
3145 
3146 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3147 
3148 	/* Delete two ctrlrs at once. */
3149 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3150 	CU_ASSERT(rc == 0);
3151 
3152 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3153 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3154 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3155 
3156 	poll_threads();
3157 	spdk_delay_us(1000);
3158 	poll_threads();
3159 
3160 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3161 
3162 	/* Add two ctrlrs and delete one by one. */
3163 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3164 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3165 
3166 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3167 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3168 
3169 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3170 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3171 	CU_ASSERT(rc == 0);
3172 
3173 	spdk_delay_us(1000);
3174 	poll_threads();
3175 
3176 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3177 	poll_threads();
3178 
3179 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3180 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3181 	CU_ASSERT(rc == 0);
3182 
3183 	spdk_delay_us(1000);
3184 	poll_threads();
3185 
3186 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3187 	poll_threads();
3188 
3189 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3190 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3191 
3192 	rc = bdev_nvme_delete("nvme0", &path1);
3193 	CU_ASSERT(rc == 0);
3194 
3195 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3196 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3197 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3198 
3199 	poll_threads();
3200 	spdk_delay_us(1000);
3201 	poll_threads();
3202 
3203 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3204 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3205 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3206 
3207 	rc = bdev_nvme_delete("nvme0", &path2);
3208 	CU_ASSERT(rc == 0);
3209 
3210 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3211 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3212 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3213 
3214 	poll_threads();
3215 	spdk_delay_us(1000);
3216 	poll_threads();
3217 
3218 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3219 }
3220 
3221 static struct nvme_ns *
3222 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3223 {
3224 	struct nvme_ns *nvme_ns;
3225 
3226 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3227 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3228 			return nvme_ns;
3229 		}
3230 	}
3231 
3232 	return NULL;
3233 }
3234 
3235 static void
3236 test_add_multi_ns_to_bdev(void)
3237 {
3238 	struct nvme_path_id path1 = {}, path2 = {};
3239 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3240 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3241 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3242 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3243 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3244 	const int STRING_SIZE = 32;
3245 	const char *attached_names[STRING_SIZE];
3246 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3247 	struct spdk_uuid uuid2 = { .u.raw = { 0x2 } };
3248 	struct spdk_uuid uuid3 = { .u.raw = { 0x3 } };
3249 	struct spdk_uuid uuid4 = { .u.raw = { 0x4 } };
3250 	struct spdk_uuid uuid44 = { .u.raw = { 0x44 } };
3251 	int rc;
3252 
3253 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3254 	ut_init_trid(&path1.trid);
3255 	ut_init_trid2(&path2.trid);
3256 
3257 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3258 
3259 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3260 	 * namespaces are populated.
3261 	 */
3262 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3263 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3264 
3265 	ctrlr1->ns[1].is_active = false;
3266 	ctrlr1->ns[4].is_active = false;
3267 	ctrlr1->ns[0].uuid = &uuid1;
3268 	ctrlr1->ns[2].uuid = &uuid3;
3269 	ctrlr1->ns[3].uuid = &uuid4;
3270 
3271 	g_ut_attach_ctrlr_status = 0;
3272 	g_ut_attach_bdev_count = 3;
3273 
3274 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 0,
3275 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3276 	CU_ASSERT(rc == 0);
3277 
3278 	spdk_delay_us(1000);
3279 	poll_threads();
3280 
3281 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3282 	poll_threads();
3283 
3284 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3285 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3286 	 * adding 4th namespace to a bdev should fail.
3287 	 */
3288 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3289 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3290 
3291 	ctrlr2->ns[2].is_active = false;
3292 	ctrlr2->ns[4].is_active = false;
3293 	ctrlr2->ns[0].uuid = &uuid1;
3294 	ctrlr2->ns[1].uuid = &uuid2;
3295 	ctrlr2->ns[3].uuid = &uuid44;
3296 
3297 	g_ut_attach_ctrlr_status = 0;
3298 	g_ut_attach_bdev_count = 2;
3299 
3300 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 0,
3301 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3302 	CU_ASSERT(rc == 0);
3303 
3304 	spdk_delay_us(1000);
3305 	poll_threads();
3306 
3307 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3308 	poll_threads();
3309 
3310 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3311 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3312 
3313 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3314 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3315 
3316 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3317 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3318 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3319 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3320 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3321 
3322 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3323 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3324 
3325 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3326 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3327 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3328 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3329 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3330 
3331 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3332 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3333 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3334 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3335 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3336 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3337 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3338 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3339 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3340 
3341 	CU_ASSERT(bdev1->ref == 2);
3342 	CU_ASSERT(bdev2->ref == 1);
3343 	CU_ASSERT(bdev3->ref == 1);
3344 	CU_ASSERT(bdev4->ref == 1);
3345 
3346 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3347 	rc = bdev_nvme_delete("nvme0", &path1);
3348 	CU_ASSERT(rc == 0);
3349 
3350 	poll_threads();
3351 	spdk_delay_us(1000);
3352 	poll_threads();
3353 
3354 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3355 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3356 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2);
3357 
3358 	rc = bdev_nvme_delete("nvme0", &path2);
3359 	CU_ASSERT(rc == 0);
3360 
3361 	poll_threads();
3362 	spdk_delay_us(1000);
3363 	poll_threads();
3364 
3365 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3366 
3367 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3368 	 * can be deleted when the bdev subsystem shutdown.
3369 	 */
3370 	g_ut_attach_bdev_count = 1;
3371 
3372 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3373 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3374 
3375 	ctrlr1->ns[0].uuid = &uuid1;
3376 
3377 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 0,
3378 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3379 	CU_ASSERT(rc == 0);
3380 
3381 	spdk_delay_us(1000);
3382 	poll_threads();
3383 
3384 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3385 	poll_threads();
3386 
3387 	ut_init_trid2(&path2.trid);
3388 
3389 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3390 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3391 
3392 	ctrlr2->ns[0].uuid = &uuid1;
3393 
3394 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 0,
3395 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3396 	CU_ASSERT(rc == 0);
3397 
3398 	spdk_delay_us(1000);
3399 	poll_threads();
3400 
3401 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3402 	poll_threads();
3403 
3404 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3405 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3406 
3407 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3408 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3409 
3410 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3411 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3412 
3413 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3414 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3415 
3416 	/* Check if a nvme_bdev has two nvme_ns. */
3417 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3418 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3419 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3420 
3421 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3422 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3423 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3424 
3425 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3426 	bdev_nvme_destruct(&bdev1->disk);
3427 
3428 	poll_threads();
3429 
3430 	CU_ASSERT(nvme_ns1->bdev == NULL);
3431 	CU_ASSERT(nvme_ns2->bdev == NULL);
3432 
3433 	nvme_ctrlr1->destruct = true;
3434 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3435 
3436 	poll_threads();
3437 	spdk_delay_us(1000);
3438 	poll_threads();
3439 
3440 	nvme_ctrlr2->destruct = true;
3441 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3442 
3443 	poll_threads();
3444 	spdk_delay_us(1000);
3445 	poll_threads();
3446 
3447 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3448 }
3449 
3450 static void
3451 test_add_multi_io_paths_to_nbdev_ch(void)
3452 {
3453 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3454 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3455 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3456 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3457 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3458 	const int STRING_SIZE = 32;
3459 	const char *attached_names[STRING_SIZE];
3460 	struct nvme_bdev *bdev;
3461 	struct spdk_io_channel *ch;
3462 	struct nvme_bdev_channel *nbdev_ch;
3463 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3464 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3465 	int rc;
3466 
3467 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3468 	ut_init_trid(&path1.trid);
3469 	ut_init_trid2(&path2.trid);
3470 	ut_init_trid3(&path3.trid);
3471 	g_ut_attach_ctrlr_status = 0;
3472 	g_ut_attach_bdev_count = 1;
3473 
3474 	set_thread(1);
3475 
3476 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3477 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3478 
3479 	ctrlr1->ns[0].uuid = &uuid1;
3480 
3481 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3482 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3483 	CU_ASSERT(rc == 0);
3484 
3485 	spdk_delay_us(1000);
3486 	poll_threads();
3487 
3488 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3489 	poll_threads();
3490 
3491 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3492 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3493 
3494 	ctrlr2->ns[0].uuid = &uuid1;
3495 
3496 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3497 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3498 	CU_ASSERT(rc == 0);
3499 
3500 	spdk_delay_us(1000);
3501 	poll_threads();
3502 
3503 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3504 	poll_threads();
3505 
3506 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3507 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3508 
3509 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3510 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3511 
3512 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3513 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3514 
3515 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3516 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3517 
3518 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3519 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3520 
3521 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3522 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3523 
3524 	set_thread(0);
3525 
3526 	ch = spdk_get_io_channel(bdev);
3527 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3528 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3529 
3530 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3531 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3532 
3533 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3534 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3535 
3536 	set_thread(1);
3537 
3538 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3539 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3540 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3541 
3542 	ctrlr3->ns[0].uuid = &uuid1;
3543 
3544 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 0,
3545 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3546 	CU_ASSERT(rc == 0);
3547 
3548 	spdk_delay_us(1000);
3549 	poll_threads();
3550 
3551 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3552 	poll_threads();
3553 
3554 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid);
3555 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3556 
3557 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3558 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3559 
3560 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3561 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3562 
3563 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3564 	rc = bdev_nvme_delete("nvme0", &path2);
3565 	CU_ASSERT(rc == 0);
3566 
3567 	poll_threads();
3568 	spdk_delay_us(1000);
3569 	poll_threads();
3570 
3571 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1);
3572 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3573 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3);
3574 
3575 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3576 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3577 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3578 
3579 	set_thread(0);
3580 
3581 	spdk_put_io_channel(ch);
3582 
3583 	poll_threads();
3584 
3585 	set_thread(1);
3586 
3587 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3588 	CU_ASSERT(rc == 0);
3589 
3590 	poll_threads();
3591 	spdk_delay_us(1000);
3592 	poll_threads();
3593 
3594 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3595 }
3596 
3597 static void
3598 test_admin_path(void)
3599 {
3600 	struct nvme_path_id path1 = {}, path2 = {};
3601 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3602 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3603 	const int STRING_SIZE = 32;
3604 	const char *attached_names[STRING_SIZE];
3605 	struct nvme_bdev *bdev;
3606 	struct spdk_io_channel *ch;
3607 	struct spdk_bdev_io *bdev_io;
3608 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3609 	int rc;
3610 
3611 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3612 	ut_init_trid(&path1.trid);
3613 	ut_init_trid2(&path2.trid);
3614 	g_ut_attach_ctrlr_status = 0;
3615 	g_ut_attach_bdev_count = 1;
3616 
3617 	set_thread(0);
3618 
3619 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3620 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3621 
3622 	ctrlr1->ns[0].uuid = &uuid1;
3623 
3624 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3625 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3626 	CU_ASSERT(rc == 0);
3627 
3628 	spdk_delay_us(1000);
3629 	poll_threads();
3630 
3631 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3632 	poll_threads();
3633 
3634 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3635 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3636 
3637 	ctrlr2->ns[0].uuid = &uuid1;
3638 
3639 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3640 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3641 	CU_ASSERT(rc == 0);
3642 
3643 	spdk_delay_us(1000);
3644 	poll_threads();
3645 
3646 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3647 	poll_threads();
3648 
3649 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3650 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3651 
3652 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3653 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3654 
3655 	ch = spdk_get_io_channel(bdev);
3656 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3657 
3658 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
3659 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
3660 
3661 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
3662 	 * submitted to ctrlr2.
3663 	 */
3664 	ctrlr1->is_failed = true;
3665 	bdev_io->internal.in_submit_request = true;
3666 
3667 	bdev_nvme_submit_request(ch, bdev_io);
3668 
3669 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3670 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
3671 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
3672 
3673 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3674 	poll_threads();
3675 
3676 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3677 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3678 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3679 
3680 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
3681 	ctrlr2->is_failed = true;
3682 	bdev_io->internal.in_submit_request = true;
3683 
3684 	bdev_nvme_submit_request(ch, bdev_io);
3685 
3686 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3687 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3688 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3689 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
3690 
3691 	free(bdev_io);
3692 
3693 	spdk_put_io_channel(ch);
3694 
3695 	poll_threads();
3696 
3697 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3698 	CU_ASSERT(rc == 0);
3699 
3700 	poll_threads();
3701 	spdk_delay_us(1000);
3702 	poll_threads();
3703 
3704 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3705 }
3706 
3707 static struct nvme_io_path *
3708 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
3709 			struct nvme_ctrlr *nvme_ctrlr)
3710 {
3711 	struct nvme_io_path *io_path;
3712 	struct nvme_ctrlr *_nvme_ctrlr;
3713 
3714 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
3715 		_nvme_ctrlr = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(io_path->ctrlr_ch));
3716 		if (_nvme_ctrlr == nvme_ctrlr) {
3717 			return io_path;
3718 		}
3719 	}
3720 
3721 	return NULL;
3722 }
3723 
3724 static void
3725 test_reset_bdev_ctrlr(void)
3726 {
3727 	struct nvme_path_id path1 = {}, path2 = {};
3728 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3729 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3730 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3731 	struct nvme_path_id *curr_path1, *curr_path2;
3732 	const int STRING_SIZE = 32;
3733 	const char *attached_names[STRING_SIZE];
3734 	struct nvme_bdev *bdev;
3735 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
3736 	struct nvme_bdev_io *first_bio;
3737 	struct spdk_io_channel *ch1, *ch2;
3738 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3739 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
3740 	int rc;
3741 
3742 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3743 	ut_init_trid(&path1.trid);
3744 	ut_init_trid2(&path2.trid);
3745 	g_ut_attach_ctrlr_status = 0;
3746 	g_ut_attach_bdev_count = 1;
3747 
3748 	set_thread(0);
3749 
3750 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3751 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3752 
3753 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3754 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3755 	CU_ASSERT(rc == 0);
3756 
3757 	spdk_delay_us(1000);
3758 	poll_threads();
3759 
3760 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3761 	poll_threads();
3762 
3763 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3764 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3765 
3766 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3767 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
3768 	CU_ASSERT(rc == 0);
3769 
3770 	spdk_delay_us(1000);
3771 	poll_threads();
3772 
3773 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3774 	poll_threads();
3775 
3776 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3777 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3778 
3779 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3780 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3781 
3782 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
3783 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
3784 
3785 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3786 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3787 
3788 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
3789 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
3790 
3791 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3792 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3793 
3794 	set_thread(0);
3795 
3796 	ch1 = spdk_get_io_channel(bdev);
3797 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3798 
3799 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3800 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
3801 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
3802 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
3803 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
3804 
3805 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
3806 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
3807 
3808 	set_thread(1);
3809 
3810 	ch2 = spdk_get_io_channel(bdev);
3811 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3812 
3813 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3814 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
3815 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
3816 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
3817 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
3818 
3819 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
3820 
3821 	/* The first reset request from bdev_io is submitted on thread 0.
3822 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
3823 	 *
3824 	 * A few extra polls are necessary after resetting ctrlr1 to check
3825 	 * pending reset requests for ctrlr1.
3826 	 */
3827 	ctrlr1->is_failed = true;
3828 	curr_path1->is_failed = true;
3829 	ctrlr2->is_failed = true;
3830 	curr_path2->is_failed = true;
3831 
3832 	set_thread(0);
3833 
3834 	bdev_nvme_submit_request(ch1, first_bdev_io);
3835 	CU_ASSERT(first_bio->io_path == io_path11);
3836 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3837 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
3838 
3839 	poll_thread_times(0, 2);
3840 	CU_ASSERT(io_path11->ctrlr_ch->qpair == NULL);
3841 	CU_ASSERT(io_path21->ctrlr_ch->qpair != NULL);
3842 
3843 	poll_thread_times(1, 1);
3844 	CU_ASSERT(io_path11->ctrlr_ch->qpair == NULL);
3845 	CU_ASSERT(io_path21->ctrlr_ch->qpair == NULL);
3846 	CU_ASSERT(ctrlr1->is_failed == true);
3847 
3848 	poll_thread_times(0, 1);
3849 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3850 	CU_ASSERT(ctrlr1->is_failed == false);
3851 	CU_ASSERT(curr_path1->is_failed == true);
3852 
3853 	poll_thread_times(0, 1);
3854 	CU_ASSERT(io_path11->ctrlr_ch->qpair != NULL);
3855 	CU_ASSERT(io_path21->ctrlr_ch->qpair == NULL);
3856 
3857 	poll_thread_times(1, 1);
3858 	CU_ASSERT(io_path11->ctrlr_ch->qpair != NULL);
3859 	CU_ASSERT(io_path21->ctrlr_ch->qpair != NULL);
3860 
3861 	poll_thread_times(0, 2);
3862 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3863 	poll_thread_times(1, 1);
3864 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3865 	poll_thread_times(0, 2);
3866 	CU_ASSERT(nvme_ctrlr1->resetting == false);
3867 	CU_ASSERT(curr_path1->is_failed == false);
3868 	CU_ASSERT(first_bio->io_path == io_path12);
3869 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3870 
3871 	poll_thread_times(0, 2);
3872 	CU_ASSERT(io_path12->ctrlr_ch->qpair == NULL);
3873 	CU_ASSERT(io_path22->ctrlr_ch->qpair != NULL);
3874 
3875 	poll_thread_times(1, 1);
3876 	CU_ASSERT(io_path12->ctrlr_ch->qpair == NULL);
3877 	CU_ASSERT(io_path22->ctrlr_ch->qpair == NULL);
3878 	CU_ASSERT(ctrlr2->is_failed == true);
3879 
3880 	poll_thread_times(0, 2);
3881 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3882 	CU_ASSERT(ctrlr2->is_failed == false);
3883 	CU_ASSERT(curr_path2->is_failed == true);
3884 
3885 	poll_thread_times(0, 1);
3886 	CU_ASSERT(io_path12->ctrlr_ch->qpair != NULL);
3887 	CU_ASSERT(io_path22->ctrlr_ch->qpair == NULL);
3888 
3889 	poll_thread_times(1, 2);
3890 	CU_ASSERT(io_path12->ctrlr_ch->qpair != NULL);
3891 	CU_ASSERT(io_path22->ctrlr_ch->qpair != NULL);
3892 
3893 	poll_thread_times(0, 2);
3894 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3895 	poll_thread_times(1, 1);
3896 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3897 	poll_thread_times(0, 2);
3898 	CU_ASSERT(first_bio->io_path == NULL);
3899 	CU_ASSERT(nvme_ctrlr2->resetting == false);
3900 	CU_ASSERT(curr_path2->is_failed == false);
3901 
3902 	poll_threads();
3903 
3904 	/* There is a race between two reset requests from bdev_io.
3905 	 *
3906 	 * The first reset request is submitted on thread 0, and the second reset
3907 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
3908 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
3909 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
3910 	 * The second is pending on ctrlr2 again. After the first completes resetting
3911 	 * ctrl2, both complete successfully.
3912 	 */
3913 	ctrlr1->is_failed = true;
3914 	curr_path1->is_failed = true;
3915 	ctrlr2->is_failed = true;
3916 	curr_path2->is_failed = true;
3917 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3918 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3919 
3920 	set_thread(0);
3921 
3922 	bdev_nvme_submit_request(ch1, first_bdev_io);
3923 
3924 	set_thread(1);
3925 
3926 	bdev_nvme_submit_request(ch2, second_bdev_io);
3927 
3928 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3929 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
3930 	CU_ASSERT(TAILQ_FIRST(&io_path21->ctrlr_ch->pending_resets) == second_bdev_io);
3931 
3932 	poll_threads();
3933 
3934 	CU_ASSERT(ctrlr1->is_failed == false);
3935 	CU_ASSERT(curr_path1->is_failed == false);
3936 	CU_ASSERT(ctrlr2->is_failed == false);
3937 	CU_ASSERT(curr_path2->is_failed == false);
3938 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3939 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3940 
3941 	set_thread(0);
3942 
3943 	spdk_put_io_channel(ch1);
3944 
3945 	set_thread(1);
3946 
3947 	spdk_put_io_channel(ch2);
3948 
3949 	poll_threads();
3950 
3951 	set_thread(0);
3952 
3953 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3954 	CU_ASSERT(rc == 0);
3955 
3956 	poll_threads();
3957 	spdk_delay_us(1000);
3958 	poll_threads();
3959 
3960 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3961 
3962 	free(first_bdev_io);
3963 	free(second_bdev_io);
3964 }
3965 
3966 static void
3967 test_find_io_path(void)
3968 {
3969 	struct nvme_bdev_channel nbdev_ch = {
3970 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
3971 	};
3972 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
3973 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
3974 	struct nvme_io_path io_path1 = { .ctrlr_ch = &ctrlr_ch1, .nvme_ns = &nvme_ns1, };
3975 	struct nvme_io_path io_path2 = { .ctrlr_ch = &ctrlr_ch2, .nvme_ns = &nvme_ns2, };
3976 
3977 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
3978 
3979 	/* Test if io_path whose ANA state is not accessible is excluded. */
3980 
3981 	ctrlr_ch1.qpair = (struct spdk_nvme_qpair *)0x1;
3982 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3983 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3984 
3985 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
3986 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3987 
3988 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
3989 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3990 
3991 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3992 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
3993 
3994 	nbdev_ch.current_io_path = NULL;
3995 
3996 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3997 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
3998 
3999 	nbdev_ch.current_io_path = NULL;
4000 
4001 	/* Test if io_path whose qpair is resetting is excluded. */
4002 
4003 	ctrlr_ch1.qpair = NULL;
4004 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4005 
4006 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
4007 
4008 	/* Test if ANA optimized state or the first found ANA non-optimized state
4009 	 * is prioritized.
4010 	 */
4011 
4012 	ctrlr_ch1.qpair = (struct spdk_nvme_qpair *)0x1;
4013 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4014 	ctrlr_ch2.qpair = (struct spdk_nvme_qpair *)0x1;
4015 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4016 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
4017 
4018 	nbdev_ch.current_io_path = NULL;
4019 
4020 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4021 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4022 
4023 	nbdev_ch.current_io_path = NULL;
4024 }
4025 
4026 static void
4027 test_retry_io_if_ana_state_is_updating(void)
4028 {
4029 	struct nvme_path_id path = {};
4030 	struct spdk_nvme_ctrlr *ctrlr;
4031 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4032 	struct nvme_ctrlr *nvme_ctrlr;
4033 	const int STRING_SIZE = 32;
4034 	const char *attached_names[STRING_SIZE];
4035 	struct nvme_bdev *bdev;
4036 	struct nvme_ns *nvme_ns;
4037 	struct spdk_bdev_io *bdev_io1;
4038 	struct spdk_io_channel *ch;
4039 	struct nvme_bdev_channel *nbdev_ch;
4040 	struct nvme_io_path *io_path;
4041 	struct nvme_ctrlr_channel *ctrlr_ch;
4042 	int rc;
4043 
4044 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4045 	ut_init_trid(&path.trid);
4046 
4047 	set_thread(0);
4048 
4049 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4050 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4051 
4052 	g_ut_attach_ctrlr_status = 0;
4053 	g_ut_attach_bdev_count = 1;
4054 
4055 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
4056 			      attach_ctrlr_done, NULL, NULL, false, -1, 1, 0);
4057 	CU_ASSERT(rc == 0);
4058 
4059 	spdk_delay_us(1000);
4060 	poll_threads();
4061 
4062 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4063 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4064 
4065 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4066 	CU_ASSERT(nvme_ctrlr != NULL);
4067 
4068 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4069 	CU_ASSERT(bdev != NULL);
4070 
4071 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4072 	CU_ASSERT(nvme_ns != NULL);
4073 
4074 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4075 	ut_bdev_io_set_buf(bdev_io1);
4076 
4077 	ch = spdk_get_io_channel(bdev);
4078 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4079 
4080 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4081 
4082 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4083 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4084 
4085 	ctrlr_ch = io_path->ctrlr_ch;
4086 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
4087 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
4088 
4089 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
4090 
4091 	/* If qpair is connected, I/O should succeed. */
4092 	bdev_io1->internal.in_submit_request = true;
4093 
4094 	bdev_nvme_submit_request(ch, bdev_io1);
4095 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4096 
4097 	poll_threads();
4098 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4099 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
4100 
4101 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
4102 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4103 	nbdev_ch->current_io_path = NULL;
4104 
4105 	bdev_io1->internal.in_submit_request = true;
4106 
4107 	bdev_nvme_submit_request(ch, bdev_io1);
4108 
4109 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4110 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4111 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4112 
4113 	/* ANA state became accessible while I/O was queued. */
4114 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4115 
4116 	spdk_delay_us(1000000);
4117 
4118 	poll_thread_times(0, 1);
4119 
4120 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4121 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4122 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4123 
4124 	poll_threads();
4125 
4126 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4127 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4128 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4129 
4130 	free(bdev_io1);
4131 
4132 	spdk_put_io_channel(ch);
4133 
4134 	poll_threads();
4135 
4136 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4137 	CU_ASSERT(rc == 0);
4138 
4139 	poll_threads();
4140 	spdk_delay_us(1000);
4141 	poll_threads();
4142 
4143 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4144 }
4145 
4146 static void
4147 test_retry_io_for_io_path_error(void)
4148 {
4149 	struct nvme_path_id path1 = {}, path2 = {};
4150 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4151 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4152 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4153 	const int STRING_SIZE = 32;
4154 	const char *attached_names[STRING_SIZE];
4155 	struct nvme_bdev *bdev;
4156 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4157 	struct spdk_bdev_io *bdev_io;
4158 	struct nvme_bdev_io *bio;
4159 	struct spdk_io_channel *ch;
4160 	struct nvme_bdev_channel *nbdev_ch;
4161 	struct nvme_io_path *io_path1, *io_path2;
4162 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
4163 	struct ut_nvme_req *req;
4164 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4165 	int rc;
4166 
4167 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4168 	ut_init_trid(&path1.trid);
4169 	ut_init_trid2(&path2.trid);
4170 
4171 	g_opts.bdev_retry_count = 1;
4172 
4173 	set_thread(0);
4174 
4175 	g_ut_attach_ctrlr_status = 0;
4176 	g_ut_attach_bdev_count = 1;
4177 
4178 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4179 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4180 
4181 	ctrlr1->ns[0].uuid = &uuid1;
4182 
4183 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
4184 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
4185 	CU_ASSERT(rc == 0);
4186 
4187 	spdk_delay_us(1000);
4188 	poll_threads();
4189 
4190 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4191 	poll_threads();
4192 
4193 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4194 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4195 
4196 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4197 	CU_ASSERT(nvme_ctrlr1 != NULL);
4198 
4199 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4200 	CU_ASSERT(bdev != NULL);
4201 
4202 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4203 	CU_ASSERT(nvme_ns1 != NULL);
4204 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4205 
4206 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4207 	ut_bdev_io_set_buf(bdev_io);
4208 
4209 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4210 
4211 	ch = spdk_get_io_channel(bdev);
4212 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4213 
4214 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4215 
4216 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4217 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4218 
4219 	ctrlr_ch1 = io_path1->ctrlr_ch;
4220 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
4221 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1->qpair != NULL);
4222 
4223 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4224 
4225 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4226 	bdev_io->internal.in_submit_request = true;
4227 
4228 	bdev_nvme_submit_request(ch, bdev_io);
4229 
4230 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
4231 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4232 
4233 	req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
4234 	SPDK_CU_ASSERT_FATAL(req != NULL);
4235 
4236 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4237 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4238 	req->cpl.status.dnr = 1;
4239 
4240 	poll_thread_times(0, 1);
4241 
4242 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4243 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4244 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4245 
4246 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4247 	bdev_io->internal.in_submit_request = true;
4248 
4249 	bdev_nvme_submit_request(ch, bdev_io);
4250 
4251 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
4252 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4253 
4254 	req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
4255 	SPDK_CU_ASSERT_FATAL(req != NULL);
4256 
4257 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4258 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4259 
4260 	poll_thread_times(0, 1);
4261 
4262 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4263 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4264 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4265 
4266 	poll_threads();
4267 
4268 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4269 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4270 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4271 
4272 	/* Add io_path2 dynamically, and create a multipath configuration. */
4273 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4274 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4275 
4276 	ctrlr2->ns[0].uuid = &uuid1;
4277 
4278 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
4279 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
4280 	CU_ASSERT(rc == 0);
4281 
4282 	spdk_delay_us(1000);
4283 	poll_threads();
4284 
4285 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4286 	poll_threads();
4287 
4288 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4289 	CU_ASSERT(nvme_ctrlr2 != NULL);
4290 
4291 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4292 	CU_ASSERT(nvme_ns2 != NULL);
4293 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4294 
4295 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4296 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4297 
4298 	ctrlr_ch2 = io_path2->ctrlr_ch;
4299 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
4300 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2->qpair != NULL);
4301 
4302 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4303 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4304 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4305 	 */
4306 	bdev_io->internal.in_submit_request = true;
4307 
4308 	bdev_nvme_submit_request(ch, bdev_io);
4309 
4310 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
4311 	CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
4312 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4313 
4314 	req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
4315 	SPDK_CU_ASSERT_FATAL(req != NULL);
4316 
4317 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4318 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4319 
4320 	poll_thread_times(0, 1);
4321 
4322 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4323 	CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
4324 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4325 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4326 
4327 	bdev_nvme_destroy_qpair(ctrlr_ch1);
4328 
4329 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
4330 
4331 	poll_threads();
4332 
4333 	CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
4334 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4335 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4336 
4337 	free(bdev_io);
4338 
4339 	spdk_put_io_channel(ch);
4340 
4341 	poll_threads();
4342 
4343 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4344 	CU_ASSERT(rc == 0);
4345 
4346 	poll_threads();
4347 	spdk_delay_us(1000);
4348 	poll_threads();
4349 
4350 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4351 
4352 	g_opts.bdev_retry_count = 0;
4353 }
4354 
4355 static void
4356 test_retry_io_count(void)
4357 {
4358 	struct nvme_path_id path = {};
4359 	struct spdk_nvme_ctrlr *ctrlr;
4360 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4361 	struct nvme_ctrlr *nvme_ctrlr;
4362 	const int STRING_SIZE = 32;
4363 	const char *attached_names[STRING_SIZE];
4364 	struct nvme_bdev *bdev;
4365 	struct nvme_ns *nvme_ns;
4366 	struct spdk_bdev_io *bdev_io;
4367 	struct nvme_bdev_io *bio;
4368 	struct spdk_io_channel *ch;
4369 	struct nvme_bdev_channel *nbdev_ch;
4370 	struct nvme_io_path *io_path;
4371 	struct nvme_ctrlr_channel *ctrlr_ch;
4372 	struct ut_nvme_req *req;
4373 	int rc;
4374 
4375 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4376 	ut_init_trid(&path.trid);
4377 
4378 	set_thread(0);
4379 
4380 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4381 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4382 
4383 	g_ut_attach_ctrlr_status = 0;
4384 	g_ut_attach_bdev_count = 1;
4385 
4386 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
4387 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
4388 	CU_ASSERT(rc == 0);
4389 
4390 	spdk_delay_us(1000);
4391 	poll_threads();
4392 
4393 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4394 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4395 
4396 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4397 	CU_ASSERT(nvme_ctrlr != NULL);
4398 
4399 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4400 	CU_ASSERT(bdev != NULL);
4401 
4402 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4403 	CU_ASSERT(nvme_ns != NULL);
4404 
4405 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4406 	ut_bdev_io_set_buf(bdev_io);
4407 
4408 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4409 
4410 	ch = spdk_get_io_channel(bdev);
4411 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4412 
4413 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4414 
4415 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4416 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4417 
4418 	ctrlr_ch = io_path->ctrlr_ch;
4419 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
4420 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
4421 
4422 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4423 
4424 	/* If I/O is aborted by request, it should not be retried. */
4425 	g_opts.bdev_retry_count = 1;
4426 
4427 	bdev_io->internal.in_submit_request = true;
4428 
4429 	bdev_nvme_submit_request(ch, bdev_io);
4430 
4431 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4432 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4433 
4434 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4435 	SPDK_CU_ASSERT_FATAL(req != NULL);
4436 
4437 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
4438 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4439 
4440 	poll_thread_times(0, 1);
4441 
4442 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4443 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4444 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
4445 
4446 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
4447 	 * the failed I/O should not be retried.
4448 	 */
4449 	g_opts.bdev_retry_count = 4;
4450 
4451 	bdev_io->internal.in_submit_request = true;
4452 
4453 	bdev_nvme_submit_request(ch, bdev_io);
4454 
4455 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4456 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4457 
4458 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4459 	SPDK_CU_ASSERT_FATAL(req != NULL);
4460 
4461 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4462 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4463 	bio->retry_count = 4;
4464 
4465 	poll_thread_times(0, 1);
4466 
4467 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4468 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4469 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4470 
4471 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
4472 	g_opts.bdev_retry_count = -1;
4473 
4474 	bdev_io->internal.in_submit_request = true;
4475 
4476 	bdev_nvme_submit_request(ch, bdev_io);
4477 
4478 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4479 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4480 
4481 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4482 	SPDK_CU_ASSERT_FATAL(req != NULL);
4483 
4484 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4485 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4486 	bio->retry_count = 4;
4487 
4488 	poll_thread_times(0, 1);
4489 
4490 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4491 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4492 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4493 
4494 	poll_threads();
4495 
4496 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4497 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4498 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4499 
4500 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
4501 	 * the failed I/O should be retried.
4502 	 */
4503 	g_opts.bdev_retry_count = 4;
4504 
4505 	bdev_io->internal.in_submit_request = true;
4506 
4507 	bdev_nvme_submit_request(ch, bdev_io);
4508 
4509 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4510 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4511 
4512 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4513 	SPDK_CU_ASSERT_FATAL(req != NULL);
4514 
4515 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4516 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4517 	bio->retry_count = 3;
4518 
4519 	poll_thread_times(0, 1);
4520 
4521 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4522 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4523 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4524 
4525 	poll_threads();
4526 
4527 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4528 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4529 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4530 
4531 	free(bdev_io);
4532 
4533 	spdk_put_io_channel(ch);
4534 
4535 	poll_threads();
4536 
4537 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4538 	CU_ASSERT(rc == 0);
4539 
4540 	poll_threads();
4541 	spdk_delay_us(1000);
4542 	poll_threads();
4543 
4544 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4545 
4546 	g_opts.bdev_retry_count = 0;
4547 }
4548 
4549 static void
4550 test_concurrent_read_ana_log_page(void)
4551 {
4552 	struct spdk_nvme_transport_id trid = {};
4553 	struct spdk_nvme_ctrlr *ctrlr;
4554 	struct nvme_ctrlr *nvme_ctrlr;
4555 	const int STRING_SIZE = 32;
4556 	const char *attached_names[STRING_SIZE];
4557 	int rc;
4558 
4559 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4560 	ut_init_trid(&trid);
4561 
4562 	set_thread(0);
4563 
4564 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
4565 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4566 
4567 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4568 
4569 	g_ut_attach_ctrlr_status = 0;
4570 	g_ut_attach_bdev_count = 1;
4571 
4572 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
4573 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
4574 	CU_ASSERT(rc == 0);
4575 
4576 	spdk_delay_us(1000);
4577 	poll_threads();
4578 
4579 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4580 	poll_threads();
4581 
4582 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
4583 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
4584 
4585 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4586 
4587 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4588 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4589 
4590 	/* Following read request should be rejected. */
4591 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4592 
4593 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4594 
4595 	set_thread(1);
4596 
4597 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4598 
4599 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4600 
4601 	/* Reset request while reading ANA log page should not be rejected. */
4602 	rc = bdev_nvme_reset(nvme_ctrlr);
4603 	CU_ASSERT(rc == 0);
4604 
4605 	poll_threads();
4606 
4607 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4608 	poll_threads();
4609 
4610 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4611 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4612 
4613 	/* Read ANA log page while resetting ctrlr should be rejected. */
4614 	rc = bdev_nvme_reset(nvme_ctrlr);
4615 	CU_ASSERT(rc == 0);
4616 
4617 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4618 
4619 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4620 
4621 	set_thread(0);
4622 
4623 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4624 	CU_ASSERT(rc == 0);
4625 
4626 	poll_threads();
4627 	spdk_delay_us(1000);
4628 	poll_threads();
4629 
4630 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
4631 }
4632 
4633 static void
4634 test_retry_io_for_ana_error(void)
4635 {
4636 	struct nvme_path_id path = {};
4637 	struct spdk_nvme_ctrlr *ctrlr;
4638 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4639 	struct nvme_ctrlr *nvme_ctrlr;
4640 	const int STRING_SIZE = 32;
4641 	const char *attached_names[STRING_SIZE];
4642 	struct nvme_bdev *bdev;
4643 	struct nvme_ns *nvme_ns;
4644 	struct spdk_bdev_io *bdev_io;
4645 	struct nvme_bdev_io *bio;
4646 	struct spdk_io_channel *ch;
4647 	struct nvme_bdev_channel *nbdev_ch;
4648 	struct nvme_io_path *io_path;
4649 	struct nvme_ctrlr_channel *ctrlr_ch;
4650 	struct ut_nvme_req *req;
4651 	uint64_t now;
4652 	int rc;
4653 
4654 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4655 	ut_init_trid(&path.trid);
4656 
4657 	g_opts.bdev_retry_count = 1;
4658 
4659 	set_thread(0);
4660 
4661 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
4662 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4663 
4664 	g_ut_attach_ctrlr_status = 0;
4665 	g_ut_attach_bdev_count = 1;
4666 
4667 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
4668 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
4669 	CU_ASSERT(rc == 0);
4670 
4671 	spdk_delay_us(1000);
4672 	poll_threads();
4673 
4674 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4675 	poll_threads();
4676 
4677 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4678 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4679 
4680 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4681 	CU_ASSERT(nvme_ctrlr != NULL);
4682 
4683 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4684 	CU_ASSERT(bdev != NULL);
4685 
4686 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4687 	CU_ASSERT(nvme_ns != NULL);
4688 
4689 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4690 	ut_bdev_io_set_buf(bdev_io);
4691 
4692 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4693 
4694 	ch = spdk_get_io_channel(bdev);
4695 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4696 
4697 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4698 
4699 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4700 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4701 
4702 	ctrlr_ch = io_path->ctrlr_ch;
4703 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
4704 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
4705 
4706 	now = spdk_get_ticks();
4707 
4708 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4709 
4710 	/* If I/O got ANA error, it should be queued, the corresponding namespace
4711 	 * should be freezed and its ANA state should be updated.
4712 	 */
4713 	bdev_io->internal.in_submit_request = true;
4714 
4715 	bdev_nvme_submit_request(ch, bdev_io);
4716 
4717 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4718 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4719 
4720 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4721 	SPDK_CU_ASSERT_FATAL(req != NULL);
4722 
4723 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4724 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
4725 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4726 
4727 	poll_thread_times(0, 1);
4728 
4729 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4730 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4731 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4732 	/* I/O should be retried immediately. */
4733 	CU_ASSERT(bio->retry_ticks == now);
4734 	CU_ASSERT(nvme_ns->ana_state_updating == true);
4735 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4736 
4737 	poll_threads();
4738 
4739 	/* Namespace is inaccessible, and hence I/O should be queued again. */
4740 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4741 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4742 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4743 	/* I/O should be retried after a second if no I/O path was found but
4744 	 * any I/O path may become available.
4745 	 */
4746 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
4747 
4748 	/* Namespace should be unfreezed after completing to update its ANA state. */
4749 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4750 	poll_threads();
4751 
4752 	CU_ASSERT(nvme_ns->ana_state_updating == false);
4753 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
4754 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4755 
4756 	/* Retry the queued I/O should succeed. */
4757 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
4758 	poll_threads();
4759 
4760 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4761 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4762 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4763 
4764 	free(bdev_io);
4765 
4766 	spdk_put_io_channel(ch);
4767 
4768 	poll_threads();
4769 
4770 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4771 	CU_ASSERT(rc == 0);
4772 
4773 	poll_threads();
4774 	spdk_delay_us(1000);
4775 	poll_threads();
4776 
4777 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4778 
4779 	g_opts.bdev_retry_count = 0;
4780 }
4781 
4782 static void
4783 test_retry_admin_passthru_for_path_error(void)
4784 {
4785 	struct nvme_path_id path1 = {}, path2 = {};
4786 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4787 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4788 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4789 	const int STRING_SIZE = 32;
4790 	const char *attached_names[STRING_SIZE];
4791 	struct nvme_bdev *bdev;
4792 	struct spdk_bdev_io *admin_io;
4793 	struct spdk_io_channel *ch;
4794 	struct ut_nvme_req *req;
4795 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4796 	int rc;
4797 
4798 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4799 	ut_init_trid(&path1.trid);
4800 	ut_init_trid2(&path2.trid);
4801 
4802 	g_opts.bdev_retry_count = 1;
4803 
4804 	set_thread(0);
4805 
4806 	g_ut_attach_ctrlr_status = 0;
4807 	g_ut_attach_bdev_count = 1;
4808 
4809 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4810 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4811 
4812 	ctrlr1->ns[0].uuid = &uuid1;
4813 
4814 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
4815 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
4816 	CU_ASSERT(rc == 0);
4817 
4818 	spdk_delay_us(1000);
4819 	poll_threads();
4820 
4821 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4822 	poll_threads();
4823 
4824 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4825 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4826 
4827 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4828 	CU_ASSERT(nvme_ctrlr1 != NULL);
4829 
4830 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4831 	CU_ASSERT(bdev != NULL);
4832 
4833 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
4834 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
4835 
4836 	ch = spdk_get_io_channel(bdev);
4837 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4838 
4839 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
4840 
4841 	/* Admin passthrough got a path error, but it should not retry if DNR is set. */
4842 	admin_io->internal.in_submit_request = true;
4843 
4844 	bdev_nvme_submit_request(ch, admin_io);
4845 
4846 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4847 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4848 
4849 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
4850 	SPDK_CU_ASSERT_FATAL(req != NULL);
4851 
4852 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4853 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4854 	req->cpl.status.dnr = 1;
4855 
4856 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4857 	poll_thread_times(0, 2);
4858 
4859 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4860 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4861 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4862 
4863 	/* Admin passthrough got a path error, but it should succeed after retry. */
4864 	admin_io->internal.in_submit_request = true;
4865 
4866 	bdev_nvme_submit_request(ch, admin_io);
4867 
4868 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4869 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4870 
4871 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
4872 	SPDK_CU_ASSERT_FATAL(req != NULL);
4873 
4874 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4875 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4876 
4877 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4878 	poll_thread_times(0, 2);
4879 
4880 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4881 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4882 
4883 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4884 	poll_threads();
4885 
4886 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4887 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4888 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4889 
4890 	/* Add ctrlr2 dynamically, and create a multipath configuration. */
4891 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4892 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4893 
4894 	ctrlr2->ns[0].uuid = &uuid1;
4895 
4896 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
4897 			      attach_ctrlr_done, NULL, NULL, true, 0, 0, 0);
4898 	CU_ASSERT(rc == 0);
4899 
4900 	spdk_delay_us(1000);
4901 	poll_threads();
4902 
4903 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4904 	poll_threads();
4905 
4906 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4907 	CU_ASSERT(nvme_ctrlr2 != NULL);
4908 
4909 	/* Admin passthrough was submitted to ctrlr1, but ctrlr1 was failed.
4910 	 * Hence the admin passthrough was aborted. But ctrlr2 is avaialble.
4911 	 * So after a retry, the admin passthrough is submitted to ctrlr2 and
4912 	 * should succeed.
4913 	 */
4914 	admin_io->internal.in_submit_request = true;
4915 
4916 	bdev_nvme_submit_request(ch, admin_io);
4917 
4918 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4919 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4920 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4921 
4922 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
4923 	SPDK_CU_ASSERT_FATAL(req != NULL);
4924 
4925 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4926 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4927 	ctrlr1->is_failed = true;
4928 
4929 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4930 	poll_thread_times(0, 2);
4931 
4932 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4933 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
4934 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4935 
4936 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4937 	poll_threads();
4938 
4939 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4940 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4941 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4942 
4943 	free(admin_io);
4944 
4945 	spdk_put_io_channel(ch);
4946 
4947 	poll_threads();
4948 
4949 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4950 	CU_ASSERT(rc == 0);
4951 
4952 	poll_threads();
4953 	spdk_delay_us(1000);
4954 	poll_threads();
4955 
4956 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4957 
4958 	g_opts.bdev_retry_count = 0;
4959 }
4960 
4961 static void
4962 test_retry_admin_passthru_by_count(void)
4963 {
4964 	struct nvme_path_id path = {};
4965 	struct spdk_nvme_ctrlr *ctrlr;
4966 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4967 	struct nvme_ctrlr *nvme_ctrlr;
4968 	const int STRING_SIZE = 32;
4969 	const char *attached_names[STRING_SIZE];
4970 	struct nvme_bdev *bdev;
4971 	struct spdk_bdev_io *admin_io;
4972 	struct nvme_bdev_io *admin_bio;
4973 	struct spdk_io_channel *ch;
4974 	struct ut_nvme_req *req;
4975 	int rc;
4976 
4977 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4978 	ut_init_trid(&path.trid);
4979 
4980 	set_thread(0);
4981 
4982 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4983 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4984 
4985 	g_ut_attach_ctrlr_status = 0;
4986 	g_ut_attach_bdev_count = 1;
4987 
4988 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
4989 			      attach_ctrlr_done, NULL, NULL, false, 0, 0, 0);
4990 	CU_ASSERT(rc == 0);
4991 
4992 	spdk_delay_us(1000);
4993 	poll_threads();
4994 
4995 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4996 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4997 
4998 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4999 	CU_ASSERT(nvme_ctrlr != NULL);
5000 
5001 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5002 	CU_ASSERT(bdev != NULL);
5003 
5004 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
5005 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
5006 
5007 	admin_bio = (struct nvme_bdev_io *)admin_io->driver_ctx;
5008 
5009 	ch = spdk_get_io_channel(bdev);
5010 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5011 
5012 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
5013 
5014 	/* If admin passthrough is aborted by request, it should not be retried. */
5015 	g_opts.bdev_retry_count = 1;
5016 
5017 	admin_io->internal.in_submit_request = true;
5018 
5019 	bdev_nvme_submit_request(ch, admin_io);
5020 
5021 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5022 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5023 
5024 	req = ut_get_outstanding_nvme_request(&ctrlr->adminq, admin_bio);
5025 	SPDK_CU_ASSERT_FATAL(req != NULL);
5026 
5027 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
5028 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5029 
5030 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5031 	poll_thread_times(0, 2);
5032 
5033 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5034 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5035 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
5036 
5037 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
5038 	 * the failed admin passthrough should not be retried.
5039 	 */
5040 	g_opts.bdev_retry_count = 4;
5041 
5042 	admin_io->internal.in_submit_request = true;
5043 
5044 	bdev_nvme_submit_request(ch, admin_io);
5045 
5046 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5047 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5048 
5049 	req = ut_get_outstanding_nvme_request(&ctrlr->adminq, admin_bio);
5050 	SPDK_CU_ASSERT_FATAL(req != NULL);
5051 
5052 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
5053 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5054 	admin_bio->retry_count = 4;
5055 
5056 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5057 	poll_thread_times(0, 2);
5058 
5059 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5060 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5061 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
5062 
5063 	free(admin_io);
5064 
5065 	spdk_put_io_channel(ch);
5066 
5067 	poll_threads();
5068 
5069 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5070 	CU_ASSERT(rc == 0);
5071 
5072 	poll_threads();
5073 	spdk_delay_us(1000);
5074 	poll_threads();
5075 
5076 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5077 
5078 	g_opts.bdev_retry_count = 0;
5079 }
5080 
5081 static void
5082 test_check_multipath_params(void)
5083 {
5084 	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
5085 	 * 3rd parameter is fast_io_fail_timeout_sec.
5086 	 */
5087 	CU_ASSERT(bdev_nvme_check_multipath_params(-2, 1, 0) == false);
5088 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 0, 0) == false);
5089 	CU_ASSERT(bdev_nvme_check_multipath_params(1, 0, 0) == false);
5090 	CU_ASSERT(bdev_nvme_check_multipath_params(1, 2, 0) == false);
5091 	CU_ASSERT(bdev_nvme_check_multipath_params(0, 1, 0) == false);
5092 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 1, 0) == true);
5093 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 2, 0) == true);
5094 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 0) == true);
5095 	CU_ASSERT(bdev_nvme_check_multipath_params(INT32_MAX, INT32_MAX, 0) == true);
5096 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, UINT32_MAX, 0) == true);
5097 	CU_ASSERT(bdev_nvme_check_multipath_params(0, 0, 1) == false);
5098 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 2, 1) == false);
5099 	CU_ASSERT(bdev_nvme_check_multipath_params(3, 2, 4) == false);
5100 	CU_ASSERT(bdev_nvme_check_multipath_params(3, 2, 1) == false);
5101 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 1, 1) == true);
5102 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 2) == true);
5103 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 1) == true);
5104 	CU_ASSERT(bdev_nvme_check_multipath_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
5105 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, UINT32_MAX, UINT32_MAX) == true);
5106 }
5107 
5108 static void
5109 test_retry_io_if_ctrlr_is_resetting(void)
5110 {
5111 	struct nvme_path_id path = {};
5112 	struct spdk_nvme_ctrlr *ctrlr;
5113 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5114 	struct nvme_ctrlr *nvme_ctrlr;
5115 	const int STRING_SIZE = 32;
5116 	const char *attached_names[STRING_SIZE];
5117 	struct nvme_bdev *bdev;
5118 	struct nvme_ns *nvme_ns;
5119 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5120 	struct spdk_io_channel *ch;
5121 	struct nvme_bdev_channel *nbdev_ch;
5122 	struct nvme_io_path *io_path;
5123 	struct nvme_ctrlr_channel *ctrlr_ch;
5124 	int rc;
5125 
5126 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5127 	ut_init_trid(&path.trid);
5128 
5129 	set_thread(0);
5130 
5131 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5132 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5133 
5134 	g_ut_attach_ctrlr_status = 0;
5135 	g_ut_attach_bdev_count = 1;
5136 
5137 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
5138 			      attach_ctrlr_done, NULL, NULL, false, -1, 1, 0);
5139 	CU_ASSERT(rc == 0);
5140 
5141 	spdk_delay_us(1000);
5142 	poll_threads();
5143 
5144 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5145 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5146 
5147 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5148 	CU_ASSERT(nvme_ctrlr != NULL);
5149 
5150 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5151 	CU_ASSERT(bdev != NULL);
5152 
5153 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5154 	CU_ASSERT(nvme_ns != NULL);
5155 
5156 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5157 	ut_bdev_io_set_buf(bdev_io1);
5158 
5159 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5160 	ut_bdev_io_set_buf(bdev_io2);
5161 
5162 	ch = spdk_get_io_channel(bdev);
5163 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5164 
5165 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5166 
5167 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5168 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5169 
5170 	ctrlr_ch = io_path->ctrlr_ch;
5171 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
5172 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
5173 
5174 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5175 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5176 
5177 	/* If qpair is connected, I/O should succeed. */
5178 	bdev_io1->internal.in_submit_request = true;
5179 
5180 	bdev_nvme_submit_request(ch, bdev_io1);
5181 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5182 
5183 	poll_threads();
5184 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5185 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5186 
5187 	/* If qpair is disconnected, it is freed and then reconnected via resetting
5188 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5189 	 * while resetting the nvme_ctrlr.
5190 	 */
5191 	ctrlr_ch->qpair->is_failed = true;
5192 	ctrlr->is_failed = true;
5193 
5194 	poll_thread_times(0, 5);
5195 
5196 	CU_ASSERT(ctrlr_ch->qpair == NULL);
5197 	CU_ASSERT(nvme_ctrlr->resetting == true);
5198 	CU_ASSERT(ctrlr->is_failed == false);
5199 
5200 	bdev_io1->internal.in_submit_request = true;
5201 
5202 	bdev_nvme_submit_request(ch, bdev_io1);
5203 
5204 	spdk_delay_us(1);
5205 
5206 	bdev_io2->internal.in_submit_request = true;
5207 
5208 	bdev_nvme_submit_request(ch, bdev_io2);
5209 
5210 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5211 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5212 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5213 	CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link));
5214 
5215 	poll_threads();
5216 
5217 	CU_ASSERT(ctrlr_ch->qpair != NULL);
5218 	CU_ASSERT(nvme_ctrlr->resetting == false);
5219 
5220 	spdk_delay_us(999999);
5221 
5222 	poll_thread_times(0, 1);
5223 
5224 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
5225 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5226 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5227 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5228 
5229 	poll_threads();
5230 
5231 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
5232 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5233 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5234 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5235 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5236 
5237 	spdk_delay_us(1);
5238 
5239 	poll_thread_times(0, 1);
5240 
5241 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
5242 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5243 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5244 
5245 	poll_threads();
5246 
5247 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
5248 	CU_ASSERT(bdev_io2->internal.in_submit_request == false);
5249 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5250 
5251 	free(bdev_io1);
5252 	free(bdev_io2);
5253 
5254 	spdk_put_io_channel(ch);
5255 
5256 	poll_threads();
5257 
5258 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5259 	CU_ASSERT(rc == 0);
5260 
5261 	poll_threads();
5262 	spdk_delay_us(1000);
5263 	poll_threads();
5264 
5265 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5266 }
5267 
5268 static void
5269 test_retry_admin_passthru_if_ctrlr_is_resetting(void)
5270 {
5271 	struct nvme_path_id path = {};
5272 	struct spdk_nvme_ctrlr *ctrlr;
5273 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5274 	struct nvme_ctrlr *nvme_ctrlr;
5275 	const int STRING_SIZE = 32;
5276 	const char *attached_names[STRING_SIZE];
5277 	struct nvme_bdev *bdev;
5278 	struct spdk_bdev_io *admin_io;
5279 	struct spdk_io_channel *ch;
5280 	struct nvme_bdev_channel *nbdev_ch;
5281 	int rc;
5282 
5283 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5284 	ut_init_trid(&path.trid);
5285 
5286 	g_opts.bdev_retry_count = 1;
5287 
5288 	set_thread(0);
5289 
5290 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5291 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5292 
5293 	g_ut_attach_ctrlr_status = 0;
5294 	g_ut_attach_bdev_count = 1;
5295 
5296 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
5297 			      attach_ctrlr_done, NULL, NULL, false, -1, 1, 0);
5298 	CU_ASSERT(rc == 0);
5299 
5300 	spdk_delay_us(1000);
5301 	poll_threads();
5302 
5303 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5304 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5305 
5306 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5307 	CU_ASSERT(nvme_ctrlr != NULL);
5308 
5309 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5310 	CU_ASSERT(bdev != NULL);
5311 
5312 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
5313 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
5314 
5315 	ch = spdk_get_io_channel(bdev);
5316 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5317 
5318 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5319 
5320 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
5321 
5322 	/* If ctrlr is available, admin passthrough should succeed. */
5323 	admin_io->internal.in_submit_request = true;
5324 
5325 	bdev_nvme_submit_request(ch, admin_io);
5326 
5327 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5328 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5329 
5330 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5331 	poll_threads();
5332 
5333 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5334 	CU_ASSERT(admin_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5335 
5336 	/* If ctrlr is resetting, admin passthrough request should be queued
5337 	 * if it is submitted while resetting ctrlr.
5338 	 */
5339 	bdev_nvme_reset(nvme_ctrlr);
5340 
5341 	poll_thread_times(0, 1);
5342 
5343 	admin_io->internal.in_submit_request = true;
5344 
5345 	bdev_nvme_submit_request(ch, admin_io);
5346 
5347 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5348 	CU_ASSERT(admin_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5349 
5350 	poll_threads();
5351 
5352 	CU_ASSERT(nvme_ctrlr->resetting == false);
5353 
5354 	spdk_delay_us(1000000);
5355 	poll_thread_times(0, 1);
5356 
5357 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5358 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5359 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5360 
5361 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5362 	poll_threads();
5363 
5364 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5365 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5366 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5367 
5368 	free(admin_io);
5369 
5370 	spdk_put_io_channel(ch);
5371 
5372 	poll_threads();
5373 
5374 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5375 	CU_ASSERT(rc == 0);
5376 
5377 	poll_threads();
5378 	spdk_delay_us(1000);
5379 	poll_threads();
5380 
5381 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5382 
5383 	g_opts.bdev_retry_count = 0;
5384 }
5385 
5386 static void
5387 test_reconnect_ctrlr(void)
5388 {
5389 	struct spdk_nvme_transport_id trid = {};
5390 	struct spdk_nvme_ctrlr ctrlr = {};
5391 	struct nvme_ctrlr *nvme_ctrlr;
5392 	struct spdk_io_channel *ch1, *ch2;
5393 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5394 	int rc;
5395 
5396 	ut_init_trid(&trid);
5397 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5398 
5399 	set_thread(0);
5400 
5401 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5402 	CU_ASSERT(rc == 0);
5403 
5404 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5405 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5406 
5407 	nvme_ctrlr->ctrlr_loss_timeout_sec = 2;
5408 	nvme_ctrlr->reconnect_delay_sec = 1;
5409 
5410 	ch1 = spdk_get_io_channel(nvme_ctrlr);
5411 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5412 
5413 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5414 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5415 
5416 	set_thread(1);
5417 
5418 	ch2 = spdk_get_io_channel(nvme_ctrlr);
5419 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5420 
5421 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5422 
5423 	/* Reset starts from thread 1. */
5424 	set_thread(1);
5425 
5426 	/* The reset should fail and a reconnect timer should be registered. */
5427 	ctrlr.fail_reset = true;
5428 	ctrlr.is_failed = true;
5429 
5430 	rc = bdev_nvme_reset(nvme_ctrlr);
5431 	CU_ASSERT(rc == 0);
5432 	CU_ASSERT(nvme_ctrlr->resetting == true);
5433 	CU_ASSERT(ctrlr.is_failed == true);
5434 
5435 	poll_threads();
5436 
5437 	CU_ASSERT(nvme_ctrlr->resetting == false);
5438 	CU_ASSERT(ctrlr.is_failed == false);
5439 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
5440 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
5441 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5442 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5443 
5444 	/* Then a reconnect retry should suceeed. */
5445 	ctrlr.fail_reset = false;
5446 
5447 	spdk_delay_us(SPDK_SEC_TO_USEC);
5448 	poll_thread_times(0, 1);
5449 
5450 	CU_ASSERT(nvme_ctrlr->resetting == true);
5451 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5452 
5453 	poll_threads();
5454 
5455 	CU_ASSERT(nvme_ctrlr->resetting == false);
5456 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5457 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
5458 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5459 
5460 	/* The reset should fail and a reconnect timer should be registered. */
5461 	ctrlr.fail_reset = true;
5462 	ctrlr.is_failed = true;
5463 
5464 	rc = bdev_nvme_reset(nvme_ctrlr);
5465 	CU_ASSERT(rc == 0);
5466 	CU_ASSERT(nvme_ctrlr->resetting == true);
5467 	CU_ASSERT(ctrlr.is_failed == true);
5468 
5469 	poll_threads();
5470 
5471 	CU_ASSERT(nvme_ctrlr->resetting == false);
5472 	CU_ASSERT(ctrlr.is_failed == false);
5473 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
5474 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
5475 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5476 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5477 
5478 	/* Then a reconnect retry should still fail. */
5479 	spdk_delay_us(SPDK_SEC_TO_USEC);
5480 	poll_thread_times(0, 1);
5481 
5482 	CU_ASSERT(nvme_ctrlr->resetting == true);
5483 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5484 
5485 	poll_threads();
5486 
5487 	CU_ASSERT(nvme_ctrlr->resetting == false);
5488 	CU_ASSERT(ctrlr.is_failed == false);
5489 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
5490 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
5491 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5492 
5493 	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5494 	spdk_delay_us(SPDK_SEC_TO_USEC);
5495 	poll_threads();
5496 
5497 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5498 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5499 	CU_ASSERT(nvme_ctrlr->destruct == true);
5500 
5501 	spdk_put_io_channel(ch2);
5502 
5503 	set_thread(0);
5504 
5505 	spdk_put_io_channel(ch1);
5506 
5507 	poll_threads();
5508 	spdk_delay_us(1000);
5509 	poll_threads();
5510 
5511 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5512 }
5513 
5514 static struct nvme_path_id *
5515 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5516 		       const struct spdk_nvme_transport_id *trid)
5517 {
5518 	struct nvme_path_id *p;
5519 
5520 	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5521 		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5522 			break;
5523 		}
5524 	}
5525 
5526 	return p;
5527 }
5528 
5529 static void
5530 test_retry_failover_ctrlr(void)
5531 {
5532 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5533 	struct spdk_nvme_ctrlr ctrlr = {};
5534 	struct nvme_ctrlr *nvme_ctrlr = NULL;
5535 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5536 	struct spdk_io_channel *ch;
5537 	struct nvme_ctrlr_channel *ctrlr_ch;
5538 	int rc;
5539 
5540 	ut_init_trid(&trid1);
5541 	ut_init_trid2(&trid2);
5542 	ut_init_trid3(&trid3);
5543 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5544 
5545 	set_thread(0);
5546 
5547 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5548 	CU_ASSERT(rc == 0);
5549 
5550 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5551 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5552 
5553 	nvme_ctrlr->ctrlr_loss_timeout_sec = -1;
5554 	nvme_ctrlr->reconnect_delay_sec = 1;
5555 
5556 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5557 	CU_ASSERT(rc == 0);
5558 
5559 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5560 	CU_ASSERT(rc == 0);
5561 
5562 	ch = spdk_get_io_channel(nvme_ctrlr);
5563 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5564 
5565 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5566 
5567 	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5568 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5569 	CU_ASSERT(path_id1->is_failed == false);
5570 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5571 
5572 	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5573 	ctrlr.fail_reset = true;
5574 	ctrlr.is_failed = true;
5575 
5576 	rc = bdev_nvme_reset(nvme_ctrlr);
5577 	CU_ASSERT(rc == 0);
5578 
5579 	poll_threads();
5580 
5581 	CU_ASSERT(nvme_ctrlr->resetting == false);
5582 	CU_ASSERT(ctrlr.is_failed == false);
5583 	CU_ASSERT(ctrlr_ch->qpair == NULL);
5584 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5585 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5586 
5587 	CU_ASSERT(path_id1->is_failed == true);
5588 
5589 	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5590 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5591 	CU_ASSERT(path_id2->is_failed == false);
5592 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5593 
5594 	/* If we remove trid2 while reconnect is scheduled, trid2 is removed and path_id is
5595 	 * switched to trid3 but reset is not started.
5596 	 */
5597 	rc = bdev_nvme_failover(nvme_ctrlr, true);
5598 	CU_ASSERT(rc == 0);
5599 
5600 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) == NULL);
5601 
5602 	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5603 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5604 	CU_ASSERT(path_id3->is_failed == false);
5605 	CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id);
5606 
5607 	CU_ASSERT(nvme_ctrlr->resetting == false);
5608 
5609 	/* If reconnect succeeds, trid3 should be the active path_id */
5610 	ctrlr.fail_reset = false;
5611 
5612 	spdk_delay_us(SPDK_SEC_TO_USEC);
5613 	poll_thread_times(0, 1);
5614 
5615 	CU_ASSERT(nvme_ctrlr->resetting == true);
5616 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5617 
5618 	poll_threads();
5619 
5620 	CU_ASSERT(path_id3->is_failed == false);
5621 	CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id);
5622 	CU_ASSERT(nvme_ctrlr->resetting == false);
5623 	CU_ASSERT(ctrlr_ch->qpair != NULL);
5624 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5625 
5626 	spdk_put_io_channel(ch);
5627 
5628 	poll_threads();
5629 
5630 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5631 	CU_ASSERT(rc == 0);
5632 
5633 	poll_threads();
5634 	spdk_delay_us(1000);
5635 	poll_threads();
5636 
5637 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5638 }
5639 
5640 static void
5641 test_fail_path(void)
5642 {
5643 	struct nvme_path_id path = {};
5644 	struct spdk_nvme_ctrlr *ctrlr;
5645 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5646 	struct nvme_ctrlr *nvme_ctrlr;
5647 	const int STRING_SIZE = 32;
5648 	const char *attached_names[STRING_SIZE];
5649 	struct nvme_bdev *bdev;
5650 	struct nvme_ns *nvme_ns;
5651 	struct spdk_bdev_io *bdev_io;
5652 	struct spdk_io_channel *ch;
5653 	struct nvme_bdev_channel *nbdev_ch;
5654 	struct nvme_io_path *io_path;
5655 	struct nvme_ctrlr_channel *ctrlr_ch;
5656 	int rc;
5657 
5658 	/* The test scenario is the following.
5659 	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5660 	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5661 	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5662 	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5663 	 *   comes first. The queued I/O is failed.
5664 	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5665 	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5666 	 */
5667 
5668 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5669 	ut_init_trid(&path.trid);
5670 
5671 	set_thread(0);
5672 
5673 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5674 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5675 
5676 	g_ut_attach_ctrlr_status = 0;
5677 	g_ut_attach_bdev_count = 1;
5678 
5679 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
5680 			      attach_ctrlr_done, NULL, NULL, false, 4, 1, 2);
5681 	CU_ASSERT(rc == 0);
5682 
5683 	spdk_delay_us(1000);
5684 	poll_threads();
5685 
5686 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5687 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5688 
5689 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5690 	CU_ASSERT(nvme_ctrlr != NULL);
5691 
5692 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5693 	CU_ASSERT(bdev != NULL);
5694 
5695 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5696 	CU_ASSERT(nvme_ns != NULL);
5697 
5698 	ch = spdk_get_io_channel(bdev);
5699 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5700 
5701 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5702 
5703 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5704 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5705 
5706 	ctrlr_ch = io_path->ctrlr_ch;
5707 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
5708 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
5709 
5710 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
5711 	ut_bdev_io_set_buf(bdev_io);
5712 
5713 
5714 	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
5715 	ctrlr->fail_reset = true;
5716 	ctrlr->is_failed = true;
5717 
5718 	rc = bdev_nvme_reset(nvme_ctrlr);
5719 	CU_ASSERT(rc == 0);
5720 	CU_ASSERT(nvme_ctrlr->resetting == true);
5721 	CU_ASSERT(ctrlr->is_failed == true);
5722 
5723 	poll_threads();
5724 
5725 	CU_ASSERT(nvme_ctrlr->resetting == false);
5726 	CU_ASSERT(ctrlr->is_failed == false);
5727 	CU_ASSERT(ctrlr_ch->qpair == NULL);
5728 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5729 	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
5730 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5731 
5732 	/* I/O should be queued. */
5733 	bdev_io->internal.in_submit_request = true;
5734 
5735 	bdev_nvme_submit_request(ch, bdev_io);
5736 
5737 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5738 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5739 
5740 	/* After a second, the I/O should be still queued and the ctrlr should be
5741 	 * still recovering.
5742 	 */
5743 	spdk_delay_us(SPDK_SEC_TO_USEC);
5744 	poll_threads();
5745 
5746 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5747 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5748 
5749 	CU_ASSERT(nvme_ctrlr->resetting == false);
5750 	CU_ASSERT(ctrlr->is_failed == false);
5751 	CU_ASSERT(ctrlr_ch->qpair == NULL);
5752 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5753 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5754 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5755 
5756 	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
5757 	spdk_delay_us(SPDK_SEC_TO_USEC);
5758 	poll_threads();
5759 
5760 	CU_ASSERT(nvme_ctrlr->resetting == false);
5761 	CU_ASSERT(ctrlr->is_failed == false);
5762 	CU_ASSERT(ctrlr_ch->qpair == NULL);
5763 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5764 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5765 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
5766 
5767 	/* Then within a second, pending I/O should be failed. */
5768 	spdk_delay_us(SPDK_SEC_TO_USEC);
5769 	poll_threads();
5770 
5771 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5772 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5773 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5774 
5775 	/* Another I/O submission should be failed immediately. */
5776 	bdev_io->internal.in_submit_request = true;
5777 
5778 	bdev_nvme_submit_request(ch, bdev_io);
5779 
5780 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5781 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5782 
5783 	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
5784 	 * be deleted.
5785 	 */
5786 	spdk_delay_us(SPDK_SEC_TO_USEC);
5787 	poll_threads();
5788 
5789 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5790 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5791 	CU_ASSERT(nvme_ctrlr->destruct == true);
5792 
5793 	spdk_put_io_channel(ch);
5794 
5795 	poll_threads();
5796 	spdk_delay_us(1000);
5797 	poll_threads();
5798 
5799 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5800 
5801 	free(bdev_io);
5802 }
5803 
5804 int
5805 main(int argc, const char **argv)
5806 {
5807 	CU_pSuite	suite = NULL;
5808 	unsigned int	num_failures;
5809 
5810 	CU_set_error_action(CUEA_ABORT);
5811 	CU_initialize_registry();
5812 
5813 	suite = CU_add_suite("nvme", NULL, NULL);
5814 
5815 	CU_ADD_TEST(suite, test_create_ctrlr);
5816 	CU_ADD_TEST(suite, test_reset_ctrlr);
5817 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
5818 	CU_ADD_TEST(suite, test_failover_ctrlr);
5819 	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
5820 	CU_ADD_TEST(suite, test_pending_reset);
5821 	CU_ADD_TEST(suite, test_attach_ctrlr);
5822 	CU_ADD_TEST(suite, test_aer_cb);
5823 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
5824 	CU_ADD_TEST(suite, test_add_remove_trid);
5825 	CU_ADD_TEST(suite, test_abort);
5826 	CU_ADD_TEST(suite, test_get_io_qpair);
5827 	CU_ADD_TEST(suite, test_bdev_unregister);
5828 	CU_ADD_TEST(suite, test_compare_ns);
5829 	CU_ADD_TEST(suite, test_init_ana_log_page);
5830 	CU_ADD_TEST(suite, test_get_memory_domains);
5831 	CU_ADD_TEST(suite, test_reconnect_qpair);
5832 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
5833 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
5834 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
5835 	CU_ADD_TEST(suite, test_admin_path);
5836 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
5837 	CU_ADD_TEST(suite, test_find_io_path);
5838 	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
5839 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
5840 	CU_ADD_TEST(suite, test_retry_io_count);
5841 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
5842 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
5843 	CU_ADD_TEST(suite, test_retry_admin_passthru_for_path_error);
5844 	CU_ADD_TEST(suite, test_retry_admin_passthru_by_count);
5845 	CU_ADD_TEST(suite, test_check_multipath_params);
5846 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
5847 	CU_ADD_TEST(suite, test_retry_admin_passthru_if_ctrlr_is_resetting);
5848 	CU_ADD_TEST(suite, test_reconnect_ctrlr);
5849 	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
5850 	CU_ADD_TEST(suite, test_fail_path);
5851 
5852 	CU_basic_set_mode(CU_BRM_VERBOSE);
5853 
5854 	allocate_threads(3);
5855 	set_thread(0);
5856 	bdev_nvme_library_init();
5857 	init_accel();
5858 
5859 	CU_basic_run_tests();
5860 
5861 	set_thread(0);
5862 	bdev_nvme_library_fini();
5863 	fini_accel();
5864 	free_threads();
5865 
5866 	num_failures = CU_get_number_of_failures();
5867 	CU_cleanup_registry();
5868 
5869 	return num_failures;
5870 }
5871