xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision cc6920a4763d4b9a43aa40583c8397d8f14fa100)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 #include "spdk_cunit.h"
37 #include "spdk/thread.h"
38 #include "spdk/bdev_module.h"
39 #include "spdk/bdev_module.h"
40 
41 #include "common/lib/ut_multithread.c"
42 
43 #include "bdev/nvme/bdev_nvme.c"
44 
45 #include "unit/lib/json_mock.c"
46 
47 static void *g_accel_p = (void *)0xdeadbeaf;
48 
49 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
50 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
51 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
52 	     spdk_nvme_remove_cb remove_cb), NULL);
53 
54 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
55 		enum spdk_nvme_transport_type trtype));
56 
57 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
58 	    NULL);
59 
60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
61 
62 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
63 		struct spdk_nvme_transport_id *trid), 0);
64 
65 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
66 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
67 
68 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
69 
70 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0);
71 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf));
72 
73 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain, int);
74 
75 int spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
76 				       struct spdk_memory_domain **domains, int array_size)
77 {
78 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain);
79 
80 	return 0;
81 }
82 
83 struct spdk_io_channel *
84 spdk_accel_engine_get_io_channel(void)
85 {
86 	return spdk_get_io_channel(g_accel_p);
87 }
88 
89 void
90 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
91 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
92 {
93 	/* Avoid warning that opts is used uninitialised */
94 	memset(opts, 0, opts_size);
95 }
96 
97 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
98 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
99 
100 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
101 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
102 
103 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
104 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
105 
106 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
107 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
108 
109 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
110 
111 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
112 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
113 
114 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
115 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
116 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
117 
118 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
119 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
120 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
121 
122 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
123 
124 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
125 
126 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
127 
128 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
129 
130 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
131 
132 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
133 
134 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
135 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
136 
137 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
138 
139 DEFINE_STUB(spdk_nvme_ns_get_csi, enum spdk_nvme_csi,
140 	    (const struct spdk_nvme_ns *ns), 0);
141 
142 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
143 		char *name, size_t *size), 0);
144 
145 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
146 	    (struct spdk_nvme_ns *ns), 0);
147 
148 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
149 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
150 
151 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
152 	    (struct spdk_nvme_ns *ns), 0);
153 
154 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
155 	    (struct spdk_nvme_ns *ns), 0);
156 
157 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
158 	    (struct spdk_nvme_ns *ns), 0);
159 
160 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
161 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
162 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
163 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
164 
165 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
166 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
167 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
168 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
169 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
170 
171 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
172 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
173 	     void *payload, uint32_t payload_size, uint64_t slba,
174 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
175 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
176 
177 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
178 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
179 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
180 
181 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
182 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
183 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
184 
185 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
186 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
187 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
188 
189 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
190 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
191 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
192 
193 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
194 
195 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
196 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
197 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
198 
199 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
200 
201 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
202 
203 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
204 
205 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
206 
207 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
208 		struct iovec *iov,
209 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
210 
211 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr));
212 
213 struct ut_nvme_req {
214 	uint16_t			opc;
215 	spdk_nvme_cmd_cb		cb_fn;
216 	void				*cb_arg;
217 	struct spdk_nvme_cpl		cpl;
218 	TAILQ_ENTRY(ut_nvme_req)	tailq;
219 };
220 
221 struct spdk_nvme_ns {
222 	struct spdk_nvme_ctrlr		*ctrlr;
223 	uint32_t			id;
224 	bool				is_active;
225 	struct spdk_uuid		uuid;
226 	enum spdk_nvme_ana_state	ana_state;
227 };
228 
229 struct spdk_nvme_qpair {
230 	struct spdk_nvme_ctrlr		*ctrlr;
231 	bool				is_connected;
232 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
233 	uint32_t			num_outstanding_reqs;
234 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
235 	struct spdk_nvme_poll_group	*poll_group;
236 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
237 };
238 
239 struct spdk_nvme_ctrlr {
240 	uint32_t			num_ns;
241 	struct spdk_nvme_ns		*ns;
242 	struct spdk_nvme_ns_data	*nsdata;
243 	struct spdk_nvme_qpair		adminq;
244 	struct spdk_nvme_ctrlr_data	cdata;
245 	bool				attached;
246 	bool				is_failed;
247 	bool				fail_reset;
248 	struct spdk_nvme_transport_id	trid;
249 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
250 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
251 	struct spdk_nvme_ctrlr_opts	opts;
252 };
253 
254 struct spdk_nvme_poll_group {
255 	void				*ctx;
256 	struct spdk_nvme_accel_fn_table	accel_fn_table;
257 	TAILQ_HEAD(, spdk_nvme_qpair)	qpairs;
258 };
259 
260 struct spdk_nvme_probe_ctx {
261 	struct spdk_nvme_transport_id	trid;
262 	void				*cb_ctx;
263 	spdk_nvme_attach_cb		attach_cb;
264 	struct spdk_nvme_ctrlr		*init_ctrlr;
265 };
266 
267 struct spdk_nvme_ctrlr_reset_ctx {
268 	struct spdk_nvme_ctrlr		*ctrlr;
269 };
270 
271 uint32_t
272 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
273 {
274 	uint32_t nsid;
275 
276 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
277 		if (ctrlr->ns[nsid - 1].is_active) {
278 			return nsid;
279 		}
280 	}
281 
282 	return 0;
283 }
284 
285 uint32_t
286 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
287 {
288 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
289 		if (ctrlr->ns[nsid - 1].is_active) {
290 			return nsid;
291 		}
292 	}
293 
294 	return 0;
295 }
296 
297 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
298 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
299 			g_ut_attached_ctrlrs);
300 static int g_ut_attach_ctrlr_status;
301 static size_t g_ut_attach_bdev_count;
302 static int g_ut_register_bdev_status;
303 static uint16_t g_ut_cntlid;
304 static struct nvme_path_id g_any_path = {};
305 
306 static void
307 ut_init_trid(struct spdk_nvme_transport_id *trid)
308 {
309 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
310 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
311 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
312 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
313 }
314 
315 static void
316 ut_init_trid2(struct spdk_nvme_transport_id *trid)
317 {
318 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
319 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
320 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
321 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
322 }
323 
324 static void
325 ut_init_trid3(struct spdk_nvme_transport_id *trid)
326 {
327 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
328 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
329 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
330 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
331 }
332 
333 static int
334 cmp_int(int a, int b)
335 {
336 	return a - b;
337 }
338 
339 int
340 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
341 			       const struct spdk_nvme_transport_id *trid2)
342 {
343 	int cmp;
344 
345 	/* We assume trtype is TCP for now. */
346 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
347 
348 	cmp = cmp_int(trid1->trtype, trid2->trtype);
349 	if (cmp) {
350 		return cmp;
351 	}
352 
353 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
354 	if (cmp) {
355 		return cmp;
356 	}
357 
358 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
359 	if (cmp) {
360 		return cmp;
361 	}
362 
363 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
364 	if (cmp) {
365 		return cmp;
366 	}
367 
368 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
369 	if (cmp) {
370 		return cmp;
371 	}
372 
373 	return 0;
374 }
375 
376 static struct spdk_nvme_ctrlr *
377 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
378 		bool ana_reporting, bool multi_ctrlr)
379 {
380 	struct spdk_nvme_ctrlr *ctrlr;
381 	uint32_t i;
382 
383 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
384 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
385 			/* There is a ctrlr whose trid matches. */
386 			return NULL;
387 		}
388 	}
389 
390 	ctrlr = calloc(1, sizeof(*ctrlr));
391 	if (ctrlr == NULL) {
392 		return NULL;
393 	}
394 
395 	ctrlr->attached = true;
396 	ctrlr->adminq.ctrlr = ctrlr;
397 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
398 
399 	if (num_ns != 0) {
400 		ctrlr->num_ns = num_ns;
401 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
402 		if (ctrlr->ns == NULL) {
403 			free(ctrlr);
404 			return NULL;
405 		}
406 
407 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
408 		if (ctrlr->nsdata == NULL) {
409 			free(ctrlr->ns);
410 			free(ctrlr);
411 			return NULL;
412 		}
413 
414 		for (i = 0; i < num_ns; i++) {
415 			ctrlr->ns[i].id = i + 1;
416 			ctrlr->ns[i].ctrlr = ctrlr;
417 			ctrlr->ns[i].is_active = true;
418 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
419 			ctrlr->nsdata[i].nsze = 1024;
420 		}
421 
422 		ctrlr->cdata.nn = num_ns;
423 		ctrlr->cdata.nanagrpid = num_ns;
424 	}
425 
426 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
427 	ctrlr->cdata.cmic.multi_ctrlr = multi_ctrlr;
428 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
429 	ctrlr->trid = *trid;
430 	TAILQ_INIT(&ctrlr->active_io_qpairs);
431 
432 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
433 
434 	return ctrlr;
435 }
436 
437 static void
438 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
439 {
440 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
441 
442 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
443 	free(ctrlr->nsdata);
444 	free(ctrlr->ns);
445 	free(ctrlr);
446 }
447 
448 static int
449 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
450 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
451 {
452 	struct ut_nvme_req *req;
453 
454 	req = calloc(1, sizeof(*req));
455 	if (req == NULL) {
456 		return -ENOMEM;
457 	}
458 
459 	req->opc = opc;
460 	req->cb_fn = cb_fn;
461 	req->cb_arg = cb_arg;
462 
463 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
464 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
465 
466 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
467 	qpair->num_outstanding_reqs++;
468 
469 	return 0;
470 }
471 
472 static struct ut_nvme_req *
473 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
474 {
475 	struct ut_nvme_req *req;
476 
477 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
478 		if (req->cb_arg == cb_arg) {
479 			break;
480 		}
481 	}
482 
483 	return req;
484 }
485 
486 static struct spdk_bdev_io *
487 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
488 		 struct spdk_io_channel *ch)
489 {
490 	struct spdk_bdev_io *bdev_io;
491 
492 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
493 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
494 	bdev_io->type = type;
495 	bdev_io->bdev = &nbdev->disk;
496 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
497 
498 	return bdev_io;
499 }
500 
501 static void
502 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
503 {
504 	bdev_io->u.bdev.iovs = &bdev_io->iov;
505 	bdev_io->u.bdev.iovcnt = 1;
506 
507 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
508 	bdev_io->iov.iov_len = 4096;
509 }
510 
511 static void
512 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
513 {
514 	if (ctrlr->is_failed) {
515 		free(ctrlr);
516 		return;
517 	}
518 
519 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
520 	if (probe_ctx->cb_ctx) {
521 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
522 	}
523 
524 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
525 
526 	if (probe_ctx->attach_cb) {
527 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
528 	}
529 }
530 
531 int
532 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
533 {
534 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
535 
536 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
537 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
538 			continue;
539 		}
540 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
541 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
542 	}
543 
544 	free(probe_ctx);
545 
546 	return 0;
547 }
548 
549 struct spdk_nvme_probe_ctx *
550 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
551 			const struct spdk_nvme_ctrlr_opts *opts,
552 			spdk_nvme_attach_cb attach_cb)
553 {
554 	struct spdk_nvme_probe_ctx *probe_ctx;
555 
556 	if (trid == NULL) {
557 		return NULL;
558 	}
559 
560 	probe_ctx = calloc(1, sizeof(*probe_ctx));
561 	if (probe_ctx == NULL) {
562 		return NULL;
563 	}
564 
565 	probe_ctx->trid = *trid;
566 	probe_ctx->cb_ctx = (void *)opts;
567 	probe_ctx->attach_cb = attach_cb;
568 
569 	return probe_ctx;
570 }
571 
572 int
573 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
574 {
575 	if (ctrlr->attached) {
576 		ut_detach_ctrlr(ctrlr);
577 	}
578 
579 	return 0;
580 }
581 
582 int
583 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
584 {
585 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
586 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
587 
588 	return 0;
589 }
590 
591 int
592 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
593 {
594 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
595 }
596 
597 void
598 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
599 {
600 	memset(opts, 0, opts_size);
601 
602 	snprintf(opts->hostnqn, sizeof(opts->hostnqn),
603 		 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003");
604 }
605 
606 const struct spdk_nvme_ctrlr_data *
607 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
608 {
609 	return &ctrlr->cdata;
610 }
611 
612 uint32_t
613 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
614 {
615 	return ctrlr->num_ns;
616 }
617 
618 struct spdk_nvme_ns *
619 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
620 {
621 	if (nsid < 1 || nsid > ctrlr->num_ns) {
622 		return NULL;
623 	}
624 
625 	return &ctrlr->ns[nsid - 1];
626 }
627 
628 bool
629 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
630 {
631 	if (nsid < 1 || nsid > ctrlr->num_ns) {
632 		return false;
633 	}
634 
635 	return ctrlr->ns[nsid - 1].is_active;
636 }
637 
638 union spdk_nvme_csts_register
639 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
640 {
641 	union spdk_nvme_csts_register csts;
642 
643 	csts.raw = 0;
644 
645 	return csts;
646 }
647 
648 union spdk_nvme_vs_register
649 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
650 {
651 	union spdk_nvme_vs_register vs;
652 
653 	vs.raw = 0;
654 
655 	return vs;
656 }
657 
658 struct spdk_nvme_qpair *
659 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
660 			       const struct spdk_nvme_io_qpair_opts *user_opts,
661 			       size_t opts_size)
662 {
663 	struct spdk_nvme_qpair *qpair;
664 
665 	qpair = calloc(1, sizeof(*qpair));
666 	if (qpair == NULL) {
667 		return NULL;
668 	}
669 
670 	qpair->ctrlr = ctrlr;
671 	TAILQ_INIT(&qpair->outstanding_reqs);
672 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
673 
674 	return qpair;
675 }
676 
677 int
678 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
679 				 struct spdk_nvme_qpair *qpair)
680 {
681 	if (qpair->is_connected) {
682 		return -EISCONN;
683 	}
684 
685 	qpair->is_connected = true;
686 
687 	return 0;
688 }
689 
690 int
691 spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
692 {
693 	struct spdk_nvme_ctrlr *ctrlr;
694 
695 	ctrlr = qpair->ctrlr;
696 
697 	if (ctrlr->is_failed) {
698 		return -ENXIO;
699 	}
700 	qpair->is_connected = true;
701 
702 	return 0;
703 }
704 
705 void
706 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
707 {
708 	qpair->is_connected = false;
709 }
710 
711 int
712 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
713 {
714 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
715 
716 	qpair->is_connected = false;
717 
718 	if (qpair->poll_group != NULL) {
719 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
720 	}
721 
722 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
723 
724 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
725 
726 	free(qpair);
727 
728 	return 0;
729 }
730 
731 int
732 spdk_nvme_ctrlr_reset_poll_async(struct spdk_nvme_ctrlr_reset_ctx *ctrlr_reset_ctx)
733 {
734 	struct spdk_nvme_ctrlr *ctrlr = ctrlr_reset_ctx->ctrlr;
735 
736 	free(ctrlr_reset_ctx);
737 
738 	if (ctrlr->fail_reset) {
739 		ctrlr->is_failed = true;
740 		return -EIO;
741 	}
742 
743 	return 0;
744 }
745 
746 int
747 spdk_nvme_ctrlr_reset_async(struct spdk_nvme_ctrlr *ctrlr,
748 			    struct spdk_nvme_ctrlr_reset_ctx **reset_ctx)
749 {
750 	struct spdk_nvme_ctrlr_reset_ctx *ctrlr_reset_ctx;
751 
752 	ctrlr_reset_ctx = calloc(1, sizeof(*ctrlr_reset_ctx));
753 	if (!ctrlr_reset_ctx) {
754 		return -ENOMEM;
755 	}
756 
757 	ctrlr->is_failed = false;
758 
759 	ctrlr_reset_ctx->ctrlr = ctrlr;
760 	*reset_ctx = ctrlr_reset_ctx;
761 
762 	return 0;
763 }
764 
765 void
766 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
767 {
768 	ctrlr->is_failed = true;
769 }
770 
771 bool
772 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
773 {
774 	return ctrlr->is_failed;
775 }
776 
777 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
778 				 sizeof(uint32_t))
779 static void
780 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
781 {
782 	struct spdk_nvme_ana_page ana_hdr;
783 	char _ana_desc[UT_ANA_DESC_SIZE];
784 	struct spdk_nvme_ana_group_descriptor *ana_desc;
785 	struct spdk_nvme_ns *ns;
786 	uint32_t i;
787 
788 	memset(&ana_hdr, 0, sizeof(ana_hdr));
789 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
790 
791 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
792 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
793 
794 	buf += sizeof(ana_hdr);
795 	length -= sizeof(ana_hdr);
796 
797 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
798 
799 	for (i = 0; i < ctrlr->num_ns; i++) {
800 		ns = &ctrlr->ns[i];
801 
802 		if (!ns->is_active) {
803 			continue;
804 		}
805 
806 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
807 
808 		ana_desc->ana_group_id = ns->id;
809 		ana_desc->num_of_nsid = 1;
810 		ana_desc->ana_state = ns->ana_state;
811 		ana_desc->nsid[0] = ns->id;
812 
813 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
814 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
815 
816 		buf += UT_ANA_DESC_SIZE;
817 		length -= UT_ANA_DESC_SIZE;
818 	}
819 }
820 
821 int
822 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
823 				 uint8_t log_page, uint32_t nsid,
824 				 void *payload, uint32_t payload_size,
825 				 uint64_t offset,
826 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
827 {
828 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
829 		SPDK_CU_ASSERT_FATAL(offset == 0);
830 		ut_create_ana_log_page(ctrlr, payload, payload_size);
831 	}
832 
833 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
834 				      cb_fn, cb_arg);
835 }
836 
837 int
838 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
839 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
840 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
841 {
842 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
843 }
844 
845 int
846 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
847 			      void *cmd_cb_arg,
848 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
849 {
850 	struct ut_nvme_req *req = NULL, *abort_req;
851 
852 	if (qpair == NULL) {
853 		qpair = &ctrlr->adminq;
854 	}
855 
856 	abort_req = calloc(1, sizeof(*abort_req));
857 	if (abort_req == NULL) {
858 		return -ENOMEM;
859 	}
860 
861 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
862 		if (req->cb_arg == cmd_cb_arg) {
863 			break;
864 		}
865 	}
866 
867 	if (req == NULL) {
868 		free(abort_req);
869 		return -ENOENT;
870 	}
871 
872 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
873 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
874 
875 	abort_req->opc = SPDK_NVME_OPC_ABORT;
876 	abort_req->cb_fn = cb_fn;
877 	abort_req->cb_arg = cb_arg;
878 
879 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
880 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
881 	abort_req->cpl.cdw0 = 0;
882 
883 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
884 	ctrlr->adminq.num_outstanding_reqs++;
885 
886 	return 0;
887 }
888 
889 int32_t
890 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
891 {
892 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
893 }
894 
895 uint32_t
896 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
897 {
898 	return ns->id;
899 }
900 
901 struct spdk_nvme_ctrlr *
902 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
903 {
904 	return ns->ctrlr;
905 }
906 
907 static inline struct spdk_nvme_ns_data *
908 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
909 {
910 	return &ns->ctrlr->nsdata[ns->id - 1];
911 }
912 
913 const struct spdk_nvme_ns_data *
914 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
915 {
916 	return _nvme_ns_get_data(ns);
917 }
918 
919 uint64_t
920 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
921 {
922 	return _nvme_ns_get_data(ns)->nsze;
923 }
924 
925 const struct spdk_uuid *
926 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
927 {
928 	return &ns->uuid;
929 }
930 
931 int
932 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
933 			      void *metadata, uint64_t lba, uint32_t lba_count,
934 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
935 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
936 {
937 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
938 }
939 
940 int
941 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
942 			       void *buffer, void *metadata, uint64_t lba,
943 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
944 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
945 {
946 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
947 }
948 
949 int
950 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
951 			       uint64_t lba, uint32_t lba_count,
952 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
953 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
954 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
955 			       uint16_t apptag_mask, uint16_t apptag)
956 {
957 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
958 }
959 
960 int
961 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
962 				uint64_t lba, uint32_t lba_count,
963 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
964 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
965 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
966 				uint16_t apptag_mask, uint16_t apptag)
967 {
968 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
969 }
970 
971 static bool g_ut_readv_ext_called;
972 int
973 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
974 			   uint64_t lba, uint32_t lba_count,
975 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
976 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
977 			   spdk_nvme_req_next_sge_cb next_sge_fn,
978 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
979 {
980 	g_ut_readv_ext_called = true;
981 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
982 }
983 
984 static bool g_ut_writev_ext_called;
985 int
986 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
987 			    uint64_t lba, uint32_t lba_count,
988 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
989 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
990 			    spdk_nvme_req_next_sge_cb next_sge_fn,
991 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
992 {
993 	g_ut_writev_ext_called = true;
994 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
995 }
996 
997 int
998 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
999 				  uint64_t lba, uint32_t lba_count,
1000 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1001 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1002 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1003 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1004 {
1005 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1006 }
1007 
1008 int
1009 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1010 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1011 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1012 {
1013 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1014 }
1015 
1016 int
1017 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1018 			      uint64_t lba, uint32_t lba_count,
1019 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1020 			      uint32_t io_flags)
1021 {
1022 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1023 }
1024 
1025 struct spdk_nvme_poll_group *
1026 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1027 {
1028 	struct spdk_nvme_poll_group *group;
1029 
1030 	group = calloc(1, sizeof(*group));
1031 	if (group == NULL) {
1032 		return NULL;
1033 	}
1034 
1035 	group->ctx = ctx;
1036 	if (table != NULL) {
1037 		group->accel_fn_table = *table;
1038 	}
1039 	TAILQ_INIT(&group->qpairs);
1040 
1041 	return group;
1042 }
1043 
1044 int
1045 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1046 {
1047 	if (!TAILQ_EMPTY(&group->qpairs)) {
1048 		return -EBUSY;
1049 	}
1050 
1051 	free(group);
1052 
1053 	return 0;
1054 }
1055 
1056 int32_t
1057 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1058 				    uint32_t max_completions)
1059 {
1060 	struct ut_nvme_req *req, *tmp;
1061 	uint32_t num_completions = 0;
1062 
1063 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1064 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1065 		qpair->num_outstanding_reqs--;
1066 
1067 		req->cb_fn(req->cb_arg, &req->cpl);
1068 
1069 		free(req);
1070 		num_completions++;
1071 	}
1072 
1073 	return num_completions;
1074 }
1075 
1076 int64_t
1077 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1078 		uint32_t completions_per_qpair,
1079 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1080 {
1081 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1082 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1083 
1084 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1085 
1086 	if (disconnected_qpair_cb == NULL) {
1087 		return -EINVAL;
1088 	}
1089 
1090 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
1091 		if (qpair->is_connected) {
1092 			local_completions = spdk_nvme_qpair_process_completions(qpair,
1093 					    completions_per_qpair);
1094 			if (local_completions < 0 && error_reason == 0) {
1095 				error_reason = local_completions;
1096 			} else {
1097 				num_completions += local_completions;
1098 				assert(num_completions >= 0);
1099 			}
1100 		}
1101 	}
1102 
1103 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
1104 		if (!qpair->is_connected) {
1105 			disconnected_qpair_cb(qpair, group->ctx);
1106 		}
1107 	}
1108 
1109 	return error_reason ? error_reason : num_completions;
1110 }
1111 
1112 int
1113 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1114 			 struct spdk_nvme_qpair *qpair)
1115 {
1116 	CU_ASSERT(!qpair->is_connected);
1117 
1118 	qpair->poll_group = group;
1119 	TAILQ_INSERT_TAIL(&group->qpairs, qpair, poll_group_tailq);
1120 
1121 	return 0;
1122 }
1123 
1124 int
1125 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1126 			    struct spdk_nvme_qpair *qpair)
1127 {
1128 	CU_ASSERT(!qpair->is_connected);
1129 
1130 	TAILQ_REMOVE(&group->qpairs, qpair, poll_group_tailq);
1131 
1132 	return 0;
1133 }
1134 
1135 int
1136 spdk_bdev_register(struct spdk_bdev *bdev)
1137 {
1138 	return g_ut_register_bdev_status;
1139 }
1140 
1141 void
1142 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1143 {
1144 	int rc;
1145 
1146 	rc = bdev->fn_table->destruct(bdev->ctxt);
1147 	if (rc <= 0 && cb_fn != NULL) {
1148 		cb_fn(cb_arg, rc);
1149 	}
1150 }
1151 
1152 int
1153 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1154 {
1155 	bdev->blockcnt = size;
1156 
1157 	return 0;
1158 }
1159 
1160 struct spdk_io_channel *
1161 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1162 {
1163 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1164 }
1165 
1166 void
1167 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1168 {
1169 	bdev_io->internal.status = status;
1170 	bdev_io->internal.in_submit_request = false;
1171 }
1172 
1173 void
1174 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1175 {
1176 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1177 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1178 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1179 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1180 	} else {
1181 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1182 	}
1183 
1184 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1185 	bdev_io->internal.error.nvme.sct = sct;
1186 	bdev_io->internal.error.nvme.sc = sc;
1187 
1188 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1189 }
1190 
1191 void
1192 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1193 {
1194 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1195 
1196 	ut_bdev_io_set_buf(bdev_io);
1197 
1198 	cb(ch, bdev_io, true);
1199 }
1200 
1201 static void
1202 test_create_ctrlr(void)
1203 {
1204 	struct spdk_nvme_transport_id trid = {};
1205 	struct spdk_nvme_ctrlr ctrlr = {};
1206 	int rc;
1207 
1208 	ut_init_trid(&trid);
1209 
1210 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1211 	CU_ASSERT(rc == 0);
1212 
1213 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1214 
1215 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1216 	CU_ASSERT(rc == 0);
1217 
1218 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1219 
1220 	poll_threads();
1221 	spdk_delay_us(1000);
1222 	poll_threads();
1223 
1224 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1225 }
1226 
1227 static void
1228 test_reset_ctrlr(void)
1229 {
1230 	struct spdk_nvme_transport_id trid = {};
1231 	struct spdk_nvme_ctrlr ctrlr = {};
1232 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1233 	struct nvme_path_id *curr_trid;
1234 	struct spdk_io_channel *ch1, *ch2;
1235 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1236 	int rc;
1237 
1238 	ut_init_trid(&trid);
1239 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1240 
1241 	set_thread(0);
1242 
1243 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1244 	CU_ASSERT(rc == 0);
1245 
1246 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1247 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1248 
1249 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1250 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1251 
1252 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1253 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1254 
1255 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1256 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1257 
1258 	set_thread(1);
1259 
1260 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1261 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1262 
1263 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1264 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1265 
1266 	/* Reset starts from thread 1. */
1267 	set_thread(1);
1268 
1269 	/* Case 1: ctrlr is already being destructed. */
1270 	nvme_ctrlr->destruct = true;
1271 
1272 	rc = bdev_nvme_reset(nvme_ctrlr);
1273 	CU_ASSERT(rc == -ENXIO);
1274 
1275 	/* Case 2: reset is in progress. */
1276 	nvme_ctrlr->destruct = false;
1277 	nvme_ctrlr->resetting = true;
1278 
1279 	rc = bdev_nvme_reset(nvme_ctrlr);
1280 	CU_ASSERT(rc == -EBUSY);
1281 
1282 	/* Case 3: reset completes successfully. */
1283 	nvme_ctrlr->resetting = false;
1284 	curr_trid->is_failed = true;
1285 	ctrlr.is_failed = true;
1286 
1287 	rc = bdev_nvme_reset(nvme_ctrlr);
1288 	CU_ASSERT(rc == 0);
1289 	CU_ASSERT(nvme_ctrlr->resetting == true);
1290 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1291 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1292 
1293 	poll_thread_times(0, 3);
1294 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1295 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1296 
1297 	poll_thread_times(1, 1);
1298 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1299 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1300 	CU_ASSERT(ctrlr.is_failed == true);
1301 
1302 	poll_thread_times(0, 1);
1303 	CU_ASSERT(ctrlr.is_failed == false);
1304 
1305 	poll_thread_times(0, 1);
1306 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1307 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1308 
1309 	poll_thread_times(1, 1);
1310 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1311 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1312 	CU_ASSERT(nvme_ctrlr->resetting == true);
1313 	CU_ASSERT(curr_trid->is_failed == true);
1314 
1315 	poll_thread_times(0, 2);
1316 	CU_ASSERT(nvme_ctrlr->resetting == true);
1317 	poll_thread_times(1, 1);
1318 	CU_ASSERT(nvme_ctrlr->resetting == true);
1319 	poll_thread_times(0, 1);
1320 	CU_ASSERT(nvme_ctrlr->resetting == false);
1321 	CU_ASSERT(curr_trid->is_failed == false);
1322 
1323 	spdk_put_io_channel(ch2);
1324 
1325 	set_thread(0);
1326 
1327 	spdk_put_io_channel(ch1);
1328 
1329 	poll_threads();
1330 
1331 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1332 	CU_ASSERT(rc == 0);
1333 
1334 	poll_threads();
1335 	spdk_delay_us(1000);
1336 	poll_threads();
1337 
1338 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1339 }
1340 
1341 static void
1342 test_race_between_reset_and_destruct_ctrlr(void)
1343 {
1344 	struct spdk_nvme_transport_id trid = {};
1345 	struct spdk_nvme_ctrlr ctrlr = {};
1346 	struct nvme_ctrlr *nvme_ctrlr;
1347 	struct spdk_io_channel *ch1, *ch2;
1348 	int rc;
1349 
1350 	ut_init_trid(&trid);
1351 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1352 
1353 	set_thread(0);
1354 
1355 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1356 	CU_ASSERT(rc == 0);
1357 
1358 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1359 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1360 
1361 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1362 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1363 
1364 	set_thread(1);
1365 
1366 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1367 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1368 
1369 	/* Reset starts from thread 1. */
1370 	set_thread(1);
1371 
1372 	rc = bdev_nvme_reset(nvme_ctrlr);
1373 	CU_ASSERT(rc == 0);
1374 	CU_ASSERT(nvme_ctrlr->resetting == true);
1375 
1376 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1377 	set_thread(0);
1378 
1379 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1380 	CU_ASSERT(rc == 0);
1381 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1382 	CU_ASSERT(nvme_ctrlr->destruct == true);
1383 	CU_ASSERT(nvme_ctrlr->resetting == true);
1384 
1385 	poll_threads();
1386 
1387 	/* Reset completed but ctrlr is not still destructed yet. */
1388 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1389 	CU_ASSERT(nvme_ctrlr->destruct == true);
1390 	CU_ASSERT(nvme_ctrlr->resetting == false);
1391 
1392 	/* New reset request is rejected. */
1393 	rc = bdev_nvme_reset(nvme_ctrlr);
1394 	CU_ASSERT(rc == -ENXIO);
1395 
1396 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1397 	 * However there are two channels and destruct is not completed yet.
1398 	 */
1399 	poll_threads();
1400 
1401 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1402 
1403 	set_thread(0);
1404 
1405 	spdk_put_io_channel(ch1);
1406 
1407 	set_thread(1);
1408 
1409 	spdk_put_io_channel(ch2);
1410 
1411 	poll_threads();
1412 	spdk_delay_us(1000);
1413 	poll_threads();
1414 
1415 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1416 }
1417 
1418 static void
1419 test_failover_ctrlr(void)
1420 {
1421 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1422 	struct spdk_nvme_ctrlr ctrlr = {};
1423 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1424 	struct nvme_path_id *curr_trid, *next_trid;
1425 	struct spdk_io_channel *ch1, *ch2;
1426 	int rc;
1427 
1428 	ut_init_trid(&trid1);
1429 	ut_init_trid2(&trid2);
1430 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1431 
1432 	set_thread(0);
1433 
1434 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
1435 	CU_ASSERT(rc == 0);
1436 
1437 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1438 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1439 
1440 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1441 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1442 
1443 	set_thread(1);
1444 
1445 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1446 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1447 
1448 	/* First, test one trid case. */
1449 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1450 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1451 
1452 	/* Failover starts from thread 1. */
1453 	set_thread(1);
1454 
1455 	/* Case 1: ctrlr is already being destructed. */
1456 	nvme_ctrlr->destruct = true;
1457 
1458 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1459 	CU_ASSERT(rc == -ENXIO);
1460 	CU_ASSERT(curr_trid->is_failed == false);
1461 
1462 	/* Case 2: reset is in progress. */
1463 	nvme_ctrlr->destruct = false;
1464 	nvme_ctrlr->resetting = true;
1465 
1466 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1467 	CU_ASSERT(rc == 0);
1468 
1469 	/* Case 3: failover is in progress. */
1470 	nvme_ctrlr->failover_in_progress = true;
1471 
1472 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1473 	CU_ASSERT(rc == 0);
1474 	CU_ASSERT(curr_trid->is_failed == false);
1475 
1476 	/* Case 4: reset completes successfully. */
1477 	nvme_ctrlr->resetting = false;
1478 	nvme_ctrlr->failover_in_progress = false;
1479 
1480 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1481 	CU_ASSERT(rc == 0);
1482 
1483 	CU_ASSERT(nvme_ctrlr->resetting == true);
1484 	CU_ASSERT(curr_trid->is_failed == true);
1485 
1486 	poll_threads();
1487 
1488 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1489 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1490 
1491 	CU_ASSERT(nvme_ctrlr->resetting == false);
1492 	CU_ASSERT(curr_trid->is_failed == false);
1493 
1494 	set_thread(0);
1495 
1496 	/* Second, test two trids case. */
1497 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1498 	CU_ASSERT(rc == 0);
1499 
1500 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1501 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1502 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1503 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1504 
1505 	/* Failover starts from thread 1. */
1506 	set_thread(1);
1507 
1508 	/* Case 5: reset is in progress. */
1509 	nvme_ctrlr->resetting = true;
1510 
1511 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1512 	CU_ASSERT(rc == -EBUSY);
1513 
1514 	/* Case 5: failover is in progress. */
1515 	nvme_ctrlr->failover_in_progress = true;
1516 
1517 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1518 	CU_ASSERT(rc == 0);
1519 
1520 	/* Case 6: failover completes successfully. */
1521 	nvme_ctrlr->resetting = false;
1522 	nvme_ctrlr->failover_in_progress = false;
1523 
1524 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1525 	CU_ASSERT(rc == 0);
1526 
1527 	CU_ASSERT(nvme_ctrlr->resetting == true);
1528 	CU_ASSERT(nvme_ctrlr->failover_in_progress == true);
1529 
1530 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1531 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1532 	CU_ASSERT(next_trid != curr_trid);
1533 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1534 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1535 
1536 	poll_threads();
1537 
1538 	CU_ASSERT(nvme_ctrlr->resetting == false);
1539 	CU_ASSERT(nvme_ctrlr->failover_in_progress == false);
1540 
1541 	spdk_put_io_channel(ch2);
1542 
1543 	set_thread(0);
1544 
1545 	spdk_put_io_channel(ch1);
1546 
1547 	poll_threads();
1548 
1549 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1550 	CU_ASSERT(rc == 0);
1551 
1552 	poll_threads();
1553 	spdk_delay_us(1000);
1554 	poll_threads();
1555 
1556 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1557 }
1558 
1559 static void
1560 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1561 {
1562 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1563 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1564 }
1565 
1566 static void
1567 test_pending_reset(void)
1568 {
1569 	struct spdk_nvme_transport_id trid = {};
1570 	struct spdk_nvme_ctrlr *ctrlr;
1571 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1572 	const int STRING_SIZE = 32;
1573 	const char *attached_names[STRING_SIZE];
1574 	struct nvme_bdev *bdev;
1575 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1576 	struct spdk_io_channel *ch1, *ch2;
1577 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1578 	struct nvme_io_path *io_path1, *io_path2;
1579 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1580 	int rc;
1581 
1582 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1583 	ut_init_trid(&trid);
1584 
1585 	set_thread(0);
1586 
1587 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1588 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1589 
1590 	g_ut_attach_ctrlr_status = 0;
1591 	g_ut_attach_bdev_count = 1;
1592 
1593 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1594 			      attach_ctrlr_done, NULL, NULL, false);
1595 	CU_ASSERT(rc == 0);
1596 
1597 	spdk_delay_us(1000);
1598 	poll_threads();
1599 
1600 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1601 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1602 
1603 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1604 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1605 
1606 	ch1 = spdk_get_io_channel(bdev);
1607 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1608 
1609 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1610 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1611 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1612 	ctrlr_ch1 = io_path1->ctrlr_ch;
1613 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1614 
1615 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1616 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1617 
1618 	set_thread(1);
1619 
1620 	ch2 = spdk_get_io_channel(bdev);
1621 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1622 
1623 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1624 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1625 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1626 	ctrlr_ch2 = io_path2->ctrlr_ch;
1627 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1628 
1629 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1630 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1631 
1632 	/* The first reset request is submitted on thread 1, and the second reset request
1633 	 * is submitted on thread 0 while processing the first request.
1634 	 */
1635 	bdev_nvme_submit_request(ch2, first_bdev_io);
1636 	CU_ASSERT(nvme_ctrlr->resetting == true);
1637 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1638 
1639 	set_thread(0);
1640 
1641 	bdev_nvme_submit_request(ch1, second_bdev_io);
1642 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1643 
1644 	poll_threads();
1645 
1646 	CU_ASSERT(nvme_ctrlr->resetting == false);
1647 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1648 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1649 
1650 	/* The first reset request is submitted on thread 1, and the second reset request
1651 	 * is submitted on thread 0 while processing the first request.
1652 	 *
1653 	 * The difference from the above scenario is that the controller is removed while
1654 	 * processing the first request. Hence both reset requests should fail.
1655 	 */
1656 	set_thread(1);
1657 
1658 	bdev_nvme_submit_request(ch2, first_bdev_io);
1659 	CU_ASSERT(nvme_ctrlr->resetting == true);
1660 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1661 
1662 	set_thread(0);
1663 
1664 	bdev_nvme_submit_request(ch1, second_bdev_io);
1665 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1666 
1667 	ctrlr->fail_reset = true;
1668 
1669 	poll_threads();
1670 
1671 	CU_ASSERT(nvme_ctrlr->resetting == false);
1672 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1673 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1674 
1675 	spdk_put_io_channel(ch1);
1676 
1677 	set_thread(1);
1678 
1679 	spdk_put_io_channel(ch2);
1680 
1681 	poll_threads();
1682 
1683 	set_thread(0);
1684 
1685 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1686 	CU_ASSERT(rc == 0);
1687 
1688 	poll_threads();
1689 	spdk_delay_us(1000);
1690 	poll_threads();
1691 
1692 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1693 
1694 	free(first_bdev_io);
1695 	free(second_bdev_io);
1696 }
1697 
1698 static void
1699 test_attach_ctrlr(void)
1700 {
1701 	struct spdk_nvme_transport_id trid = {};
1702 	struct spdk_nvme_ctrlr *ctrlr;
1703 	struct nvme_ctrlr *nvme_ctrlr;
1704 	const int STRING_SIZE = 32;
1705 	const char *attached_names[STRING_SIZE];
1706 	struct nvme_bdev *nbdev;
1707 	int rc;
1708 
1709 	set_thread(0);
1710 
1711 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1712 	ut_init_trid(&trid);
1713 
1714 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
1715 	 * by probe polling.
1716 	 */
1717 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1718 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1719 
1720 	ctrlr->is_failed = true;
1721 	g_ut_attach_ctrlr_status = -EIO;
1722 	g_ut_attach_bdev_count = 0;
1723 
1724 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1725 			      attach_ctrlr_done, NULL, NULL, false);
1726 	CU_ASSERT(rc == 0);
1727 
1728 	spdk_delay_us(1000);
1729 	poll_threads();
1730 
1731 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1732 
1733 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
1734 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1735 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1736 
1737 	g_ut_attach_ctrlr_status = 0;
1738 
1739 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1740 			      attach_ctrlr_done, NULL, NULL, false);
1741 	CU_ASSERT(rc == 0);
1742 
1743 	spdk_delay_us(1000);
1744 	poll_threads();
1745 
1746 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1747 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1748 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1749 
1750 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1751 	CU_ASSERT(rc == 0);
1752 
1753 	poll_threads();
1754 	spdk_delay_us(1000);
1755 	poll_threads();
1756 
1757 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1758 
1759 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
1760 	 * one nvme_bdev is created.
1761 	 */
1762 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1763 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1764 
1765 	g_ut_attach_bdev_count = 1;
1766 
1767 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1768 			      attach_ctrlr_done, NULL, NULL, false);
1769 	CU_ASSERT(rc == 0);
1770 
1771 	spdk_delay_us(1000);
1772 	poll_threads();
1773 
1774 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1775 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1776 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1777 
1778 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
1779 	attached_names[0] = NULL;
1780 
1781 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1782 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
1783 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
1784 
1785 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1786 	CU_ASSERT(rc == 0);
1787 
1788 	poll_threads();
1789 	spdk_delay_us(1000);
1790 	poll_threads();
1791 
1792 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1793 
1794 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
1795 	 * created because creating one nvme_bdev failed.
1796 	 */
1797 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1798 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1799 
1800 	g_ut_register_bdev_status = -EINVAL;
1801 	g_ut_attach_bdev_count = 0;
1802 
1803 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1804 			      attach_ctrlr_done, NULL, NULL, false);
1805 	CU_ASSERT(rc == 0);
1806 
1807 	spdk_delay_us(1000);
1808 	poll_threads();
1809 
1810 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1811 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1812 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1813 
1814 	CU_ASSERT(attached_names[0] == NULL);
1815 
1816 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1817 	CU_ASSERT(rc == 0);
1818 
1819 	poll_threads();
1820 	spdk_delay_us(1000);
1821 	poll_threads();
1822 
1823 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1824 
1825 	g_ut_register_bdev_status = 0;
1826 }
1827 
1828 static void
1829 test_aer_cb(void)
1830 {
1831 	struct spdk_nvme_transport_id trid = {};
1832 	struct spdk_nvme_ctrlr *ctrlr;
1833 	struct nvme_ctrlr *nvme_ctrlr;
1834 	struct nvme_bdev *bdev;
1835 	const int STRING_SIZE = 32;
1836 	const char *attached_names[STRING_SIZE];
1837 	union spdk_nvme_async_event_completion event = {};
1838 	struct spdk_nvme_cpl cpl = {};
1839 	int rc;
1840 
1841 	set_thread(0);
1842 
1843 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1844 	ut_init_trid(&trid);
1845 
1846 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
1847 	 * namespaces are populated.
1848 	 */
1849 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
1850 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1851 
1852 	ctrlr->ns[0].is_active = false;
1853 
1854 	g_ut_attach_ctrlr_status = 0;
1855 	g_ut_attach_bdev_count = 3;
1856 
1857 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1858 			      attach_ctrlr_done, NULL, NULL, false);
1859 	CU_ASSERT(rc == 0);
1860 
1861 	spdk_delay_us(1000);
1862 	poll_threads();
1863 
1864 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1865 	poll_threads();
1866 
1867 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1868 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1869 
1870 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
1871 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
1872 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
1873 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
1874 
1875 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
1876 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1877 	CU_ASSERT(bdev->disk.blockcnt == 1024);
1878 
1879 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
1880 	 * change the size of the 4th namespace.
1881 	 */
1882 	ctrlr->ns[0].is_active = true;
1883 	ctrlr->ns[2].is_active = false;
1884 	ctrlr->nsdata[3].nsze = 2048;
1885 
1886 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1887 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
1888 	cpl.cdw0 = event.raw;
1889 
1890 	aer_cb(nvme_ctrlr, &cpl);
1891 
1892 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
1893 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
1894 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
1895 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
1896 	CU_ASSERT(bdev->disk.blockcnt == 2048);
1897 
1898 	/* Change ANA state of active namespaces. */
1899 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
1900 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
1901 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
1902 
1903 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1904 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
1905 	cpl.cdw0 = event.raw;
1906 
1907 	aer_cb(nvme_ctrlr, &cpl);
1908 
1909 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1910 	poll_threads();
1911 
1912 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
1913 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
1914 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
1915 
1916 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1917 	CU_ASSERT(rc == 0);
1918 
1919 	poll_threads();
1920 	spdk_delay_us(1000);
1921 	poll_threads();
1922 
1923 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1924 }
1925 
1926 static void
1927 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1928 			enum spdk_bdev_io_type io_type)
1929 {
1930 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1931 	struct nvme_io_path *io_path;
1932 	struct spdk_nvme_qpair *qpair;
1933 
1934 	io_path = bdev_nvme_find_io_path(nbdev_ch);
1935 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
1936 	qpair = io_path->ctrlr_ch->qpair;
1937 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
1938 
1939 	bdev_io->type = io_type;
1940 	bdev_io->internal.in_submit_request = true;
1941 
1942 	bdev_nvme_submit_request(ch, bdev_io);
1943 
1944 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1945 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
1946 
1947 	poll_threads();
1948 
1949 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1950 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1951 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1952 }
1953 
1954 static void
1955 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1956 		   enum spdk_bdev_io_type io_type)
1957 {
1958 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1959 	struct nvme_io_path *io_path;
1960 	struct spdk_nvme_qpair *qpair;
1961 
1962 	io_path = bdev_nvme_find_io_path(nbdev_ch);
1963 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
1964 	qpair = io_path->ctrlr_ch->qpair;
1965 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
1966 
1967 	bdev_io->type = io_type;
1968 	bdev_io->internal.in_submit_request = true;
1969 
1970 	bdev_nvme_submit_request(ch, bdev_io);
1971 
1972 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1973 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1974 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1975 }
1976 
1977 static void
1978 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
1979 {
1980 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1981 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
1982 	struct ut_nvme_req *req;
1983 	struct nvme_io_path *io_path;
1984 	struct spdk_nvme_qpair *qpair;
1985 
1986 	io_path = bdev_nvme_find_io_path(nbdev_ch);
1987 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
1988 	qpair = io_path->ctrlr_ch->qpair;
1989 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
1990 
1991 	/* Only compare and write now. */
1992 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
1993 	bdev_io->internal.in_submit_request = true;
1994 
1995 	bdev_nvme_submit_request(ch, bdev_io);
1996 
1997 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1998 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
1999 	CU_ASSERT(bio->first_fused_submitted == true);
2000 
2001 	/* First outstanding request is compare operation. */
2002 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2003 	SPDK_CU_ASSERT_FATAL(req != NULL);
2004 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2005 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2006 
2007 	poll_threads();
2008 
2009 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2010 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2011 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2012 }
2013 
2014 static void
2015 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2016 			 struct spdk_nvme_ctrlr *ctrlr)
2017 {
2018 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2019 	bdev_io->internal.in_submit_request = true;
2020 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2021 
2022 	bdev_nvme_submit_request(ch, bdev_io);
2023 
2024 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2025 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2026 
2027 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2028 	poll_thread_times(1, 1);
2029 
2030 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2031 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2032 
2033 	poll_thread_times(0, 1);
2034 
2035 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2036 }
2037 
2038 static void
2039 test_submit_nvme_cmd(void)
2040 {
2041 	struct spdk_nvme_transport_id trid = {};
2042 	struct spdk_nvme_ctrlr *ctrlr;
2043 	struct nvme_ctrlr *nvme_ctrlr;
2044 	const int STRING_SIZE = 32;
2045 	const char *attached_names[STRING_SIZE];
2046 	struct nvme_bdev *bdev;
2047 	struct spdk_bdev_io *bdev_io;
2048 	struct spdk_io_channel *ch;
2049 	struct spdk_bdev_ext_io_opts ext_io_opts = {};
2050 	int rc;
2051 
2052 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2053 	ut_init_trid(&trid);
2054 
2055 	set_thread(1);
2056 
2057 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2058 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2059 
2060 	g_ut_attach_ctrlr_status = 0;
2061 	g_ut_attach_bdev_count = 1;
2062 
2063 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2064 			      attach_ctrlr_done, NULL, NULL, false);
2065 	CU_ASSERT(rc == 0);
2066 
2067 	spdk_delay_us(1000);
2068 	poll_threads();
2069 
2070 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2071 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2072 
2073 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2074 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2075 
2076 	set_thread(0);
2077 
2078 	ch = spdk_get_io_channel(bdev);
2079 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2080 
2081 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2082 
2083 	bdev_io->u.bdev.iovs = NULL;
2084 
2085 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2086 
2087 	ut_bdev_io_set_buf(bdev_io);
2088 
2089 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2090 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2091 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2092 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2093 
2094 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2095 
2096 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2097 
2098 	/* Verify that ext NVME API is called if bdev_io ext_opts is set */
2099 	bdev_io->internal.ext_opts = &ext_io_opts;
2100 	g_ut_readv_ext_called = false;
2101 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2102 	CU_ASSERT(g_ut_readv_ext_called == true);
2103 	g_ut_readv_ext_called = false;
2104 
2105 	g_ut_writev_ext_called = false;
2106 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2107 	CU_ASSERT(g_ut_writev_ext_called == true);
2108 	g_ut_writev_ext_called = false;
2109 	bdev_io->internal.ext_opts = NULL;
2110 
2111 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2112 
2113 	free(bdev_io);
2114 
2115 	spdk_put_io_channel(ch);
2116 
2117 	poll_threads();
2118 
2119 	set_thread(1);
2120 
2121 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2122 	CU_ASSERT(rc == 0);
2123 
2124 	poll_threads();
2125 	spdk_delay_us(1000);
2126 	poll_threads();
2127 
2128 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2129 }
2130 
2131 static void
2132 test_add_remove_trid(void)
2133 {
2134 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2135 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2136 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2137 	const int STRING_SIZE = 32;
2138 	const char *attached_names[STRING_SIZE];
2139 	struct nvme_path_id *ctrid;
2140 	int rc;
2141 
2142 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2143 	ut_init_trid(&path1.trid);
2144 	ut_init_trid2(&path2.trid);
2145 	ut_init_trid3(&path3.trid);
2146 
2147 	set_thread(0);
2148 
2149 	g_ut_attach_ctrlr_status = 0;
2150 	g_ut_attach_bdev_count = 0;
2151 
2152 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2153 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2154 
2155 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2156 			      attach_ctrlr_done, NULL, NULL, false);
2157 	CU_ASSERT(rc == 0);
2158 
2159 	spdk_delay_us(1000);
2160 	poll_threads();
2161 
2162 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2163 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2164 
2165 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2166 
2167 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2168 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2169 
2170 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2171 			      attach_ctrlr_done, NULL, NULL, false);
2172 	CU_ASSERT(rc == 0);
2173 
2174 	spdk_delay_us(1000);
2175 	poll_threads();
2176 
2177 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2178 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2179 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2180 			break;
2181 		}
2182 	}
2183 	CU_ASSERT(ctrid != NULL);
2184 
2185 	/* trid3 is not in the registered list. */
2186 	rc = bdev_nvme_delete("nvme0", &path3);
2187 	CU_ASSERT(rc == -ENXIO);
2188 
2189 	/* trid2 is not used, and simply removed. */
2190 	rc = bdev_nvme_delete("nvme0", &path2);
2191 	CU_ASSERT(rc == 0);
2192 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2193 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2194 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2195 	}
2196 
2197 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2198 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2199 
2200 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 0,
2201 			      attach_ctrlr_done, NULL, NULL, false);
2202 	CU_ASSERT(rc == 0);
2203 
2204 	spdk_delay_us(1000);
2205 	poll_threads();
2206 
2207 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2208 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2209 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2210 			break;
2211 		}
2212 	}
2213 	CU_ASSERT(ctrid != NULL);
2214 
2215 	/* path1 is currently used and path3 is an alternative path.
2216 	 * If we remove path1, path is changed to path3.
2217 	 */
2218 	rc = bdev_nvme_delete("nvme0", &path1);
2219 	CU_ASSERT(rc == 0);
2220 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2221 	CU_ASSERT(nvme_ctrlr->resetting == true);
2222 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2223 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2224 	}
2225 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2226 
2227 	poll_threads();
2228 
2229 	CU_ASSERT(nvme_ctrlr->resetting == false);
2230 
2231 	/* path3 is the current and only path. If we remove path3, the corresponding
2232 	 * nvme_ctrlr is removed.
2233 	 */
2234 	rc = bdev_nvme_delete("nvme0", &path3);
2235 	CU_ASSERT(rc == 0);
2236 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2237 
2238 	poll_threads();
2239 	spdk_delay_us(1000);
2240 	poll_threads();
2241 
2242 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2243 
2244 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2245 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2246 
2247 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2248 			      attach_ctrlr_done, NULL, NULL, false);
2249 	CU_ASSERT(rc == 0);
2250 
2251 	spdk_delay_us(1000);
2252 	poll_threads();
2253 
2254 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2255 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2256 
2257 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2258 
2259 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2260 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2261 
2262 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2263 			      attach_ctrlr_done, NULL, NULL, false);
2264 	CU_ASSERT(rc == 0);
2265 
2266 	spdk_delay_us(1000);
2267 	poll_threads();
2268 
2269 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2270 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2271 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2272 			break;
2273 		}
2274 	}
2275 	CU_ASSERT(ctrid != NULL);
2276 
2277 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2278 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2279 	CU_ASSERT(rc == 0);
2280 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2281 
2282 	poll_threads();
2283 	spdk_delay_us(1000);
2284 	poll_threads();
2285 
2286 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2287 }
2288 
2289 static void
2290 test_abort(void)
2291 {
2292 	struct spdk_nvme_transport_id trid = {};
2293 	struct spdk_nvme_ctrlr *ctrlr;
2294 	struct nvme_ctrlr *nvme_ctrlr;
2295 	const int STRING_SIZE = 32;
2296 	const char *attached_names[STRING_SIZE];
2297 	struct nvme_bdev *bdev;
2298 	struct spdk_bdev_io *write_io, *admin_io, *abort_io;
2299 	struct spdk_io_channel *ch1, *ch2;
2300 	struct nvme_bdev_channel *nbdev_ch1;
2301 	struct nvme_io_path *io_path1;
2302 	struct nvme_ctrlr_channel *ctrlr_ch1;
2303 	int rc;
2304 
2305 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2306 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2307 	 * are submitted on thread 1. Both should succeed.
2308 	 */
2309 
2310 	ut_init_trid(&trid);
2311 
2312 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2313 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2314 
2315 	g_ut_attach_ctrlr_status = 0;
2316 	g_ut_attach_bdev_count = 1;
2317 
2318 	set_thread(1);
2319 
2320 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2321 			      attach_ctrlr_done, NULL, NULL, false);
2322 	CU_ASSERT(rc == 0);
2323 
2324 	spdk_delay_us(1000);
2325 	poll_threads();
2326 
2327 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2328 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2329 
2330 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2331 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2332 
2333 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2334 	ut_bdev_io_set_buf(write_io);
2335 
2336 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2337 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2338 
2339 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2340 
2341 	set_thread(0);
2342 
2343 	ch1 = spdk_get_io_channel(bdev);
2344 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2345 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2346 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2347 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2348 	ctrlr_ch1 = io_path1->ctrlr_ch;
2349 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
2350 
2351 	set_thread(1);
2352 
2353 	ch2 = spdk_get_io_channel(bdev);
2354 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2355 
2356 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2357 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2358 
2359 	/* Aborting the already completed request should fail. */
2360 	write_io->internal.in_submit_request = true;
2361 	bdev_nvme_submit_request(ch1, write_io);
2362 	poll_threads();
2363 
2364 	CU_ASSERT(write_io->internal.in_submit_request == false);
2365 
2366 	abort_io->u.abort.bio_to_abort = write_io;
2367 	abort_io->internal.in_submit_request = true;
2368 
2369 	bdev_nvme_submit_request(ch1, abort_io);
2370 
2371 	poll_threads();
2372 
2373 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2374 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2375 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2376 
2377 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2378 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2379 
2380 	admin_io->internal.in_submit_request = true;
2381 	bdev_nvme_submit_request(ch1, admin_io);
2382 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2383 	poll_threads();
2384 
2385 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2386 
2387 	abort_io->u.abort.bio_to_abort = admin_io;
2388 	abort_io->internal.in_submit_request = true;
2389 
2390 	bdev_nvme_submit_request(ch2, abort_io);
2391 
2392 	poll_threads();
2393 
2394 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2395 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2396 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2397 
2398 	/* Aborting the write request should succeed. */
2399 	write_io->internal.in_submit_request = true;
2400 	bdev_nvme_submit_request(ch1, write_io);
2401 
2402 	CU_ASSERT(write_io->internal.in_submit_request == true);
2403 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
2404 
2405 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2406 	abort_io->u.abort.bio_to_abort = write_io;
2407 	abort_io->internal.in_submit_request = true;
2408 
2409 	bdev_nvme_submit_request(ch1, abort_io);
2410 
2411 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2412 	poll_threads();
2413 
2414 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2415 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2416 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2417 	CU_ASSERT(write_io->internal.in_submit_request == false);
2418 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2419 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
2420 
2421 	/* Aborting the admin request should succeed. */
2422 	admin_io->internal.in_submit_request = true;
2423 	bdev_nvme_submit_request(ch1, admin_io);
2424 
2425 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2426 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2427 
2428 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2429 	abort_io->u.abort.bio_to_abort = admin_io;
2430 	abort_io->internal.in_submit_request = true;
2431 
2432 	bdev_nvme_submit_request(ch2, abort_io);
2433 
2434 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2435 	poll_threads();
2436 
2437 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2438 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2439 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2440 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2441 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2442 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2443 
2444 	set_thread(0);
2445 
2446 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2447 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2448 	 * while resetting the nvme_ctrlr.
2449 	 */
2450 	ctrlr_ch1->qpair->is_connected = false;
2451 
2452 	poll_thread_times(0, 3);
2453 
2454 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2455 	CU_ASSERT(nvme_ctrlr->resetting == true);
2456 
2457 	write_io->internal.in_submit_request = true;
2458 
2459 	bdev_nvme_submit_request(ch1, write_io);
2460 
2461 	CU_ASSERT(write_io->internal.in_submit_request == true);
2462 	CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list));
2463 
2464 	/* Aborting the queued write request should succeed immediately. */
2465 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2466 	abort_io->u.abort.bio_to_abort = write_io;
2467 	abort_io->internal.in_submit_request = true;
2468 
2469 	bdev_nvme_submit_request(ch1, abort_io);
2470 
2471 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2472 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2473 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2474 	CU_ASSERT(write_io->internal.in_submit_request == false);
2475 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2476 
2477 	spdk_put_io_channel(ch1);
2478 
2479 	set_thread(1);
2480 
2481 	spdk_put_io_channel(ch2);
2482 
2483 	poll_threads();
2484 
2485 	free(write_io);
2486 	free(admin_io);
2487 	free(abort_io);
2488 
2489 	set_thread(1);
2490 
2491 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2492 	CU_ASSERT(rc == 0);
2493 
2494 	poll_threads();
2495 	spdk_delay_us(1000);
2496 	poll_threads();
2497 
2498 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2499 }
2500 
2501 static void
2502 test_get_io_qpair(void)
2503 {
2504 	struct spdk_nvme_transport_id trid = {};
2505 	struct spdk_nvme_ctrlr ctrlr = {};
2506 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2507 	struct spdk_io_channel *ch;
2508 	struct nvme_ctrlr_channel *ctrlr_ch;
2509 	struct spdk_nvme_qpair *qpair;
2510 	int rc;
2511 
2512 	ut_init_trid(&trid);
2513 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2514 
2515 	set_thread(0);
2516 
2517 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
2518 	CU_ASSERT(rc == 0);
2519 
2520 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2521 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2522 
2523 	ch = spdk_get_io_channel(nvme_ctrlr);
2524 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2525 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2526 	CU_ASSERT(ctrlr_ch->qpair != NULL);
2527 
2528 	qpair = bdev_nvme_get_io_qpair(ch);
2529 	CU_ASSERT(qpair == ctrlr_ch->qpair);
2530 
2531 	spdk_put_io_channel(ch);
2532 
2533 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2534 	CU_ASSERT(rc == 0);
2535 
2536 	poll_threads();
2537 	spdk_delay_us(1000);
2538 	poll_threads();
2539 
2540 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2541 }
2542 
2543 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2544  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2545  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2546  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2547  */
2548 static void
2549 test_bdev_unregister(void)
2550 {
2551 	struct spdk_nvme_transport_id trid = {};
2552 	struct spdk_nvme_ctrlr *ctrlr;
2553 	struct nvme_ctrlr *nvme_ctrlr;
2554 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2555 	const int STRING_SIZE = 32;
2556 	const char *attached_names[STRING_SIZE];
2557 	struct nvme_bdev *bdev1, *bdev2;
2558 	int rc;
2559 
2560 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2561 	ut_init_trid(&trid);
2562 
2563 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
2564 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2565 
2566 	g_ut_attach_ctrlr_status = 0;
2567 	g_ut_attach_bdev_count = 2;
2568 
2569 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2570 			      attach_ctrlr_done, NULL, NULL, false);
2571 	CU_ASSERT(rc == 0);
2572 
2573 	spdk_delay_us(1000);
2574 	poll_threads();
2575 
2576 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2577 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2578 
2579 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
2580 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2581 
2582 	bdev1 = nvme_ns1->bdev;
2583 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2584 
2585 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
2586 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2587 
2588 	bdev2 = nvme_ns2->bdev;
2589 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2590 
2591 	bdev_nvme_destruct(&bdev1->disk);
2592 	bdev_nvme_destruct(&bdev2->disk);
2593 
2594 	poll_threads();
2595 
2596 	CU_ASSERT(nvme_ns1->bdev == NULL);
2597 	CU_ASSERT(nvme_ns2->bdev == NULL);
2598 
2599 	nvme_ctrlr->destruct = true;
2600 	_nvme_ctrlr_destruct(nvme_ctrlr);
2601 
2602 	poll_threads();
2603 	spdk_delay_us(1000);
2604 	poll_threads();
2605 
2606 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2607 }
2608 
2609 static void
2610 test_compare_ns(void)
2611 {
2612 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2613 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2614 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2615 
2616 	/* No IDs are defined. */
2617 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2618 
2619 	/* Only EUI64 are defined and not matched. */
2620 	nsdata1.eui64 = 0xABCDEF0123456789;
2621 	nsdata2.eui64 = 0xBBCDEF0123456789;
2622 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2623 
2624 	/* Only EUI64 are defined and matched. */
2625 	nsdata2.eui64 = 0xABCDEF0123456789;
2626 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2627 
2628 	/* Only NGUID are defined and not matched. */
2629 	nsdata1.eui64 = 0x0;
2630 	nsdata2.eui64 = 0x0;
2631 	nsdata1.nguid[0] = 0x12;
2632 	nsdata2.nguid[0] = 0x10;
2633 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2634 
2635 	/* Only NGUID are defined and matched. */
2636 	nsdata2.nguid[0] = 0x12;
2637 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2638 
2639 	/* Only UUID are defined and not matched. */
2640 	nsdata1.nguid[0] = 0x0;
2641 	nsdata2.nguid[0] = 0x0;
2642 	ns1.uuid.u.raw[0] = 0xAA;
2643 	ns2.uuid.u.raw[0] = 0xAB;
2644 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2645 
2646 	/* Only UUID are defined and matched. */
2647 	ns1.uuid.u.raw[0] = 0xAB;
2648 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2649 
2650 	/* All EUI64, NGUID, and UUID are defined and matched. */
2651 	nsdata1.eui64 = 0x123456789ABCDEF;
2652 	nsdata2.eui64 = 0x123456789ABCDEF;
2653 	nsdata1.nguid[15] = 0x34;
2654 	nsdata2.nguid[15] = 0x34;
2655 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2656 }
2657 
2658 static void
2659 test_init_ana_log_page(void)
2660 {
2661 	struct spdk_nvme_transport_id trid = {};
2662 	struct spdk_nvme_ctrlr *ctrlr;
2663 	struct nvme_ctrlr *nvme_ctrlr;
2664 	const int STRING_SIZE = 32;
2665 	const char *attached_names[STRING_SIZE];
2666 	int rc;
2667 
2668 	set_thread(0);
2669 
2670 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2671 	ut_init_trid(&trid);
2672 
2673 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
2674 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2675 
2676 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2677 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2678 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2679 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
2680 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2681 
2682 	g_ut_attach_ctrlr_status = 0;
2683 	g_ut_attach_bdev_count = 5;
2684 
2685 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2686 			      attach_ctrlr_done, NULL, NULL, false);
2687 	CU_ASSERT(rc == 0);
2688 
2689 	spdk_delay_us(1000);
2690 	poll_threads();
2691 
2692 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2693 	poll_threads();
2694 
2695 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2696 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2697 
2698 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2699 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2700 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2701 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2702 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
2703 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
2704 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2705 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2706 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
2707 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2708 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
2709 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
2710 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
2711 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
2712 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
2713 
2714 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2715 	CU_ASSERT(rc == 0);
2716 
2717 	poll_threads();
2718 	spdk_delay_us(1000);
2719 	poll_threads();
2720 
2721 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2722 }
2723 
2724 static void
2725 init_accel(void)
2726 {
2727 	spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb,
2728 				sizeof(int), "accel_p");
2729 }
2730 
2731 static void
2732 fini_accel(void)
2733 {
2734 	spdk_io_device_unregister(g_accel_p, NULL);
2735 }
2736 
2737 static void
2738 test_get_memory_domains(void)
2739 {
2740 	struct nvme_ctrlr ctrlr = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
2741 	struct nvme_ns ns = { .ctrlr = &ctrlr };
2742 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
2743 	struct spdk_memory_domain *domains[2] = {};
2744 	int rc = 0;
2745 
2746 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns, tailq);
2747 
2748 	/* nvme controller doesn't have memory domainы */
2749 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 0);
2750 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2751 	CU_ASSERT(rc == 0)
2752 
2753 	/* nvme controller has a memory domain */
2754 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 1);
2755 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2756 	CU_ASSERT(rc == 1);
2757 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domain);
2758 }
2759 
2760 static void
2761 test_reconnect_qpair(void)
2762 {
2763 	struct spdk_nvme_transport_id trid = {};
2764 	struct spdk_nvme_ctrlr *ctrlr;
2765 	struct nvme_ctrlr *nvme_ctrlr;
2766 	const int STRING_SIZE = 32;
2767 	const char *attached_names[STRING_SIZE];
2768 	struct nvme_bdev *bdev;
2769 	struct spdk_io_channel *ch1, *ch2;
2770 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
2771 	struct nvme_io_path *io_path1, *io_path2;
2772 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
2773 	int rc;
2774 
2775 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2776 	ut_init_trid(&trid);
2777 
2778 	set_thread(0);
2779 
2780 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2781 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2782 
2783 	g_ut_attach_ctrlr_status = 0;
2784 	g_ut_attach_bdev_count = 1;
2785 
2786 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2787 			      attach_ctrlr_done, NULL, NULL, false);
2788 	CU_ASSERT(rc == 0);
2789 
2790 	spdk_delay_us(1000);
2791 	poll_threads();
2792 
2793 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2794 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2795 
2796 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2797 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2798 
2799 	ch1 = spdk_get_io_channel(bdev);
2800 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2801 
2802 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2803 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2804 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2805 	ctrlr_ch1 = io_path1->ctrlr_ch;
2806 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
2807 
2808 	set_thread(1);
2809 
2810 	ch2 = spdk_get_io_channel(bdev);
2811 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2812 
2813 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
2814 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
2815 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
2816 	ctrlr_ch2 = io_path2->ctrlr_ch;
2817 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
2818 
2819 	/* If a qpair is disconnected, it is freed and then reconnected via
2820 	 * resetting the corresponding nvme_ctrlr.
2821 	 */
2822 	ctrlr_ch2->qpair->is_connected = false;
2823 	ctrlr->is_failed = true;
2824 
2825 	poll_thread_times(1, 1);
2826 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
2827 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2828 	CU_ASSERT(nvme_ctrlr->resetting == true);
2829 
2830 	poll_thread_times(0, 2);
2831 	poll_thread_times(1, 1);
2832 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2833 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2834 	CU_ASSERT(ctrlr->is_failed == true);
2835 
2836 	poll_thread_times(0, 1);
2837 	CU_ASSERT(ctrlr->is_failed == false);
2838 
2839 	poll_thread_times(0, 1);
2840 	poll_thread_times(1, 1);
2841 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
2842 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
2843 	CU_ASSERT(nvme_ctrlr->resetting == true);
2844 
2845 	poll_thread_times(0, 2);
2846 	poll_thread_times(1, 1);
2847 	poll_thread_times(0, 1);
2848 	CU_ASSERT(nvme_ctrlr->resetting == false);
2849 
2850 	poll_threads();
2851 
2852 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
2853 	 * fails, the qpair is just freed.
2854 	 */
2855 	ctrlr_ch2->qpair->is_connected = false;
2856 	ctrlr->is_failed = true;
2857 	ctrlr->fail_reset = true;
2858 
2859 	poll_thread_times(1, 1);
2860 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
2861 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2862 	CU_ASSERT(nvme_ctrlr->resetting == true);
2863 
2864 	poll_thread_times(0, 2);
2865 	poll_thread_times(1, 1);
2866 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2867 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2868 	CU_ASSERT(ctrlr->is_failed == true);
2869 
2870 	poll_thread_times(0, 2);
2871 	poll_thread_times(1, 1);
2872 	poll_thread_times(0, 1);
2873 	CU_ASSERT(ctrlr->is_failed == true);
2874 	CU_ASSERT(nvme_ctrlr->resetting == false);
2875 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2876 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2877 
2878 	poll_threads();
2879 
2880 	spdk_put_io_channel(ch2);
2881 
2882 	set_thread(0);
2883 
2884 	spdk_put_io_channel(ch1);
2885 
2886 	poll_threads();
2887 
2888 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2889 	CU_ASSERT(rc == 0);
2890 
2891 	poll_threads();
2892 	spdk_delay_us(1000);
2893 	poll_threads();
2894 
2895 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2896 }
2897 
2898 static void
2899 test_create_bdev_ctrlr(void)
2900 {
2901 	struct nvme_path_id path1 = {}, path2 = {};
2902 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
2903 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
2904 	const int STRING_SIZE = 32;
2905 	const char *attached_names[STRING_SIZE];
2906 	int rc;
2907 
2908 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2909 	ut_init_trid(&path1.trid);
2910 	ut_init_trid2(&path2.trid);
2911 
2912 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
2913 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2914 
2915 	g_ut_attach_ctrlr_status = 0;
2916 	g_ut_attach_bdev_count = 0;
2917 
2918 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2919 			      attach_ctrlr_done, NULL, NULL, true);
2920 
2921 	spdk_delay_us(1000);
2922 	poll_threads();
2923 
2924 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2925 	poll_threads();
2926 
2927 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
2928 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
2929 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
2930 
2931 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
2932 	g_ut_attach_ctrlr_status = -EINVAL;
2933 
2934 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
2935 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2936 
2937 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
2938 
2939 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2940 			      attach_ctrlr_done, NULL, NULL, true);
2941 	CU_ASSERT(rc == 0);
2942 
2943 	spdk_delay_us(1000);
2944 	poll_threads();
2945 
2946 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2947 	poll_threads();
2948 
2949 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
2950 
2951 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
2952 	g_ut_attach_ctrlr_status = 0;
2953 
2954 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
2955 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2956 
2957 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2958 			      attach_ctrlr_done, NULL, NULL, true);
2959 	CU_ASSERT(rc == 0);
2960 
2961 	spdk_delay_us(1000);
2962 	poll_threads();
2963 
2964 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2965 	poll_threads();
2966 
2967 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
2968 
2969 	/* Delete two ctrlrs at once. */
2970 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2971 	CU_ASSERT(rc == 0);
2972 
2973 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
2974 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
2975 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
2976 
2977 	poll_threads();
2978 	spdk_delay_us(1000);
2979 	poll_threads();
2980 
2981 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
2982 
2983 	/* Add two ctrlrs and delete one by one. */
2984 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
2985 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2986 
2987 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
2988 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2989 
2990 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2991 			      attach_ctrlr_done, NULL, NULL, true);
2992 	CU_ASSERT(rc == 0);
2993 
2994 	spdk_delay_us(1000);
2995 	poll_threads();
2996 
2997 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2998 	poll_threads();
2999 
3000 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3001 			      attach_ctrlr_done, NULL, NULL, true);
3002 	CU_ASSERT(rc == 0);
3003 
3004 	spdk_delay_us(1000);
3005 	poll_threads();
3006 
3007 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3008 	poll_threads();
3009 
3010 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3011 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3012 
3013 	rc = bdev_nvme_delete("nvme0", &path1);
3014 	CU_ASSERT(rc == 0);
3015 
3016 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3017 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3018 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3019 
3020 	poll_threads();
3021 	spdk_delay_us(1000);
3022 	poll_threads();
3023 
3024 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3025 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3026 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3027 
3028 	rc = bdev_nvme_delete("nvme0", &path2);
3029 	CU_ASSERT(rc == 0);
3030 
3031 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3032 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3033 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3034 
3035 	poll_threads();
3036 	spdk_delay_us(1000);
3037 	poll_threads();
3038 
3039 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3040 }
3041 
3042 static struct nvme_ns *
3043 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3044 {
3045 	struct nvme_ns *nvme_ns;
3046 
3047 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3048 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3049 			return nvme_ns;
3050 		}
3051 	}
3052 
3053 	return NULL;
3054 }
3055 
3056 static void
3057 test_add_multi_ns_to_bdev(void)
3058 {
3059 	struct nvme_path_id path1 = {}, path2 = {};
3060 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3061 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3062 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3063 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3064 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3065 	const int STRING_SIZE = 32;
3066 	const char *attached_names[STRING_SIZE];
3067 	int rc;
3068 
3069 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3070 	ut_init_trid(&path1.trid);
3071 	ut_init_trid2(&path2.trid);
3072 
3073 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3074 
3075 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3076 	 * namespaces are populated.
3077 	 */
3078 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3079 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3080 
3081 	ctrlr1->ns[1].is_active = false;
3082 	ctrlr1->ns[4].is_active = false;
3083 	memset(&ctrlr1->ns[0].uuid, 0x1, sizeof(struct spdk_uuid));
3084 	memset(&ctrlr1->ns[2].uuid, 0x3, sizeof(struct spdk_uuid));
3085 	memset(&ctrlr1->ns[3].uuid, 0x4, sizeof(struct spdk_uuid));
3086 
3087 	g_ut_attach_ctrlr_status = 0;
3088 	g_ut_attach_bdev_count = 3;
3089 
3090 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 0,
3091 			      attach_ctrlr_done, NULL, NULL, true);
3092 	CU_ASSERT(rc == 0);
3093 
3094 	spdk_delay_us(1000);
3095 	poll_threads();
3096 
3097 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3098 	poll_threads();
3099 
3100 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3101 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3102 	 * adding 4th namespace to a bdev should fail.
3103 	 */
3104 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3105 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3106 
3107 	ctrlr2->ns[2].is_active = false;
3108 	ctrlr2->ns[4].is_active = false;
3109 	memset(&ctrlr2->ns[0].uuid, 0x1, sizeof(struct spdk_uuid));
3110 	memset(&ctrlr2->ns[1].uuid, 0x2, sizeof(struct spdk_uuid));
3111 	memset(&ctrlr2->ns[3].uuid, 0x44, sizeof(struct spdk_uuid));
3112 
3113 	g_ut_attach_ctrlr_status = 0;
3114 	g_ut_attach_bdev_count = 2;
3115 
3116 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 0,
3117 			      attach_ctrlr_done, NULL, NULL, true);
3118 	CU_ASSERT(rc == 0);
3119 
3120 	spdk_delay_us(1000);
3121 	poll_threads();
3122 
3123 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3124 	poll_threads();
3125 
3126 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3127 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3128 
3129 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3130 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3131 
3132 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3133 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3134 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3135 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3136 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3137 
3138 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3139 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3140 
3141 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3142 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3143 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3144 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3145 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3146 
3147 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3148 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3149 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3150 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3151 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3152 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3153 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3154 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3155 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3156 
3157 	CU_ASSERT(bdev1->ref == 2);
3158 	CU_ASSERT(bdev2->ref == 1);
3159 	CU_ASSERT(bdev3->ref == 1);
3160 	CU_ASSERT(bdev4->ref == 1);
3161 
3162 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3163 	rc = bdev_nvme_delete("nvme0", &path1);
3164 	CU_ASSERT(rc == 0);
3165 
3166 	poll_threads();
3167 	spdk_delay_us(1000);
3168 	poll_threads();
3169 
3170 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3171 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3172 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2);
3173 
3174 	rc = bdev_nvme_delete("nvme0", &path2);
3175 	CU_ASSERT(rc == 0);
3176 
3177 	poll_threads();
3178 	spdk_delay_us(1000);
3179 	poll_threads();
3180 
3181 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3182 
3183 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3184 	 * can be deleted when the bdev subsystem shutdown.
3185 	 */
3186 	g_ut_attach_bdev_count = 1;
3187 
3188 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3189 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3190 
3191 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3192 
3193 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 0,
3194 			      attach_ctrlr_done, NULL, NULL, true);
3195 	CU_ASSERT(rc == 0);
3196 
3197 	spdk_delay_us(1000);
3198 	poll_threads();
3199 
3200 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3201 	poll_threads();
3202 
3203 	ut_init_trid2(&path2.trid);
3204 
3205 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3206 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3207 
3208 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3209 
3210 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 0,
3211 			      attach_ctrlr_done, NULL, NULL, true);
3212 	CU_ASSERT(rc == 0);
3213 
3214 	spdk_delay_us(1000);
3215 	poll_threads();
3216 
3217 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3218 	poll_threads();
3219 
3220 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3221 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3222 
3223 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3224 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3225 
3226 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3227 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3228 
3229 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3230 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3231 
3232 	/* Check if a nvme_bdev has two nvme_ns. */
3233 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3234 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3235 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3236 
3237 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3238 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3239 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3240 
3241 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3242 	bdev_nvme_destruct(&bdev1->disk);
3243 
3244 	poll_threads();
3245 
3246 	CU_ASSERT(nvme_ns1->bdev == NULL);
3247 	CU_ASSERT(nvme_ns2->bdev == NULL);
3248 
3249 	nvme_ctrlr1->destruct = true;
3250 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3251 
3252 	poll_threads();
3253 	spdk_delay_us(1000);
3254 	poll_threads();
3255 
3256 	nvme_ctrlr2->destruct = true;
3257 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3258 
3259 	poll_threads();
3260 	spdk_delay_us(1000);
3261 	poll_threads();
3262 
3263 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3264 }
3265 
3266 static void
3267 test_add_multi_io_paths_to_nbdev_ch(void)
3268 {
3269 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3270 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3271 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3272 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3273 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3274 	const int STRING_SIZE = 32;
3275 	const char *attached_names[STRING_SIZE];
3276 	struct nvme_bdev *bdev;
3277 	struct spdk_io_channel *ch;
3278 	struct nvme_bdev_channel *nbdev_ch;
3279 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3280 	int rc;
3281 
3282 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3283 	ut_init_trid(&path1.trid);
3284 	ut_init_trid2(&path2.trid);
3285 	ut_init_trid3(&path3.trid);
3286 	g_ut_attach_ctrlr_status = 0;
3287 	g_ut_attach_bdev_count = 1;
3288 
3289 	set_thread(1);
3290 
3291 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3292 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3293 
3294 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3295 
3296 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3297 			      attach_ctrlr_done, NULL, NULL, true);
3298 	CU_ASSERT(rc == 0);
3299 
3300 	spdk_delay_us(1000);
3301 	poll_threads();
3302 
3303 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3304 	poll_threads();
3305 
3306 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3307 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3308 
3309 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3310 
3311 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3312 			      attach_ctrlr_done, NULL, NULL, true);
3313 	CU_ASSERT(rc == 0);
3314 
3315 	spdk_delay_us(1000);
3316 	poll_threads();
3317 
3318 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3319 	poll_threads();
3320 
3321 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3322 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3323 
3324 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3325 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3326 
3327 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3328 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3329 
3330 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3331 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3332 
3333 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3334 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3335 
3336 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3337 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3338 
3339 	set_thread(0);
3340 
3341 	ch = spdk_get_io_channel(bdev);
3342 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3343 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3344 
3345 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3346 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3347 
3348 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3349 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3350 
3351 	set_thread(1);
3352 
3353 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3354 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3355 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3356 
3357 	memset(&ctrlr3->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3358 
3359 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 0,
3360 			      attach_ctrlr_done, NULL, NULL, true);
3361 	CU_ASSERT(rc == 0);
3362 
3363 	spdk_delay_us(1000);
3364 	poll_threads();
3365 
3366 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3367 	poll_threads();
3368 
3369 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid);
3370 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3371 
3372 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3373 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3374 
3375 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3376 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3377 
3378 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3379 	rc = bdev_nvme_delete("nvme0", &path2);
3380 	CU_ASSERT(rc == 0);
3381 
3382 	poll_threads();
3383 	spdk_delay_us(1000);
3384 	poll_threads();
3385 
3386 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1);
3387 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3388 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3);
3389 
3390 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3391 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3392 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3393 
3394 	set_thread(0);
3395 
3396 	spdk_put_io_channel(ch);
3397 
3398 	poll_threads();
3399 
3400 	set_thread(1);
3401 
3402 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3403 	CU_ASSERT(rc == 0);
3404 
3405 	poll_threads();
3406 	spdk_delay_us(1000);
3407 	poll_threads();
3408 
3409 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3410 }
3411 
3412 static void
3413 test_admin_path(void)
3414 {
3415 	struct nvme_path_id path1 = {}, path2 = {};
3416 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3417 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3418 	const int STRING_SIZE = 32;
3419 	const char *attached_names[STRING_SIZE];
3420 	struct nvme_bdev *bdev;
3421 	struct spdk_io_channel *ch;
3422 	struct spdk_bdev_io *bdev_io;
3423 	int rc;
3424 
3425 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3426 	ut_init_trid(&path1.trid);
3427 	ut_init_trid2(&path2.trid);
3428 	g_ut_attach_ctrlr_status = 0;
3429 	g_ut_attach_bdev_count = 1;
3430 
3431 	set_thread(0);
3432 
3433 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3434 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3435 
3436 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3437 
3438 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3439 			      attach_ctrlr_done, NULL, NULL, true);
3440 	CU_ASSERT(rc == 0);
3441 
3442 	spdk_delay_us(1000);
3443 	poll_threads();
3444 
3445 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3446 	poll_threads();
3447 
3448 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3449 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3450 
3451 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3452 
3453 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3454 			      attach_ctrlr_done, NULL, NULL, true);
3455 	CU_ASSERT(rc == 0);
3456 
3457 	spdk_delay_us(1000);
3458 	poll_threads();
3459 
3460 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3461 	poll_threads();
3462 
3463 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3464 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3465 
3466 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3467 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3468 
3469 	ch = spdk_get_io_channel(bdev);
3470 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3471 
3472 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
3473 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
3474 
3475 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
3476 	 * submitted to ctrlr2.
3477 	 */
3478 	ctrlr1->is_failed = true;
3479 	bdev_io->internal.in_submit_request = true;
3480 
3481 	bdev_nvme_submit_request(ch, bdev_io);
3482 
3483 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3484 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
3485 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
3486 
3487 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3488 	poll_threads();
3489 
3490 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3491 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3492 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3493 
3494 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
3495 	ctrlr2->is_failed = true;
3496 	bdev_io->internal.in_submit_request = true;
3497 
3498 	bdev_nvme_submit_request(ch, bdev_io);
3499 
3500 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3501 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3502 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3503 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
3504 
3505 	free(bdev_io);
3506 
3507 	spdk_put_io_channel(ch);
3508 
3509 	poll_threads();
3510 
3511 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3512 	CU_ASSERT(rc == 0);
3513 
3514 	poll_threads();
3515 	spdk_delay_us(1000);
3516 	poll_threads();
3517 
3518 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3519 }
3520 
3521 static struct nvme_io_path *
3522 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
3523 			struct nvme_ctrlr *nvme_ctrlr)
3524 {
3525 	struct nvme_io_path *io_path;
3526 	struct nvme_ctrlr *_nvme_ctrlr;
3527 
3528 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
3529 		_nvme_ctrlr = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(io_path->ctrlr_ch));
3530 		if (_nvme_ctrlr == nvme_ctrlr) {
3531 			return io_path;
3532 		}
3533 	}
3534 
3535 	return NULL;
3536 }
3537 
3538 static void
3539 test_reset_bdev_ctrlr(void)
3540 {
3541 	struct nvme_path_id path1 = {}, path2 = {};
3542 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3543 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3544 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3545 	struct nvme_path_id *curr_path1, *curr_path2;
3546 	const int STRING_SIZE = 32;
3547 	const char *attached_names[STRING_SIZE];
3548 	struct nvme_bdev *bdev;
3549 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
3550 	struct nvme_bdev_io *first_bio;
3551 	struct spdk_io_channel *ch1, *ch2;
3552 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3553 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
3554 	int rc;
3555 
3556 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3557 	ut_init_trid(&path1.trid);
3558 	ut_init_trid2(&path2.trid);
3559 	g_ut_attach_ctrlr_status = 0;
3560 	g_ut_attach_bdev_count = 1;
3561 
3562 	set_thread(0);
3563 
3564 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3565 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3566 
3567 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3568 			      attach_ctrlr_done, NULL, NULL, true);
3569 	CU_ASSERT(rc == 0);
3570 
3571 	spdk_delay_us(1000);
3572 	poll_threads();
3573 
3574 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3575 	poll_threads();
3576 
3577 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3578 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3579 
3580 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3581 			      attach_ctrlr_done, NULL, NULL, true);
3582 	CU_ASSERT(rc == 0);
3583 
3584 	spdk_delay_us(1000);
3585 	poll_threads();
3586 
3587 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3588 	poll_threads();
3589 
3590 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3591 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3592 
3593 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3594 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3595 
3596 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
3597 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
3598 
3599 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3600 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3601 
3602 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
3603 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
3604 
3605 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3606 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3607 
3608 	set_thread(0);
3609 
3610 	ch1 = spdk_get_io_channel(bdev);
3611 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3612 
3613 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3614 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
3615 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
3616 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
3617 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
3618 
3619 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
3620 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
3621 
3622 	set_thread(1);
3623 
3624 	ch2 = spdk_get_io_channel(bdev);
3625 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3626 
3627 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3628 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
3629 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
3630 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
3631 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
3632 
3633 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
3634 
3635 	/* The first reset request from bdev_io is submitted on thread 0.
3636 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
3637 	 *
3638 	 * A few extra polls are necessary after resetting ctrlr1 to check
3639 	 * pending reset requests for ctrlr1.
3640 	 */
3641 	ctrlr1->is_failed = true;
3642 	curr_path1->is_failed = true;
3643 	ctrlr2->is_failed = true;
3644 	curr_path2->is_failed = true;
3645 
3646 	set_thread(0);
3647 
3648 	bdev_nvme_submit_request(ch1, first_bdev_io);
3649 	CU_ASSERT(first_bio->io_path == io_path11);
3650 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3651 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
3652 
3653 	poll_thread_times(0, 2);
3654 	CU_ASSERT(io_path11->ctrlr_ch->qpair == NULL);
3655 	CU_ASSERT(io_path21->ctrlr_ch->qpair != NULL);
3656 
3657 	poll_thread_times(1, 1);
3658 	CU_ASSERT(io_path11->ctrlr_ch->qpair == NULL);
3659 	CU_ASSERT(io_path21->ctrlr_ch->qpair == NULL);
3660 	CU_ASSERT(ctrlr1->is_failed == true);
3661 
3662 	poll_thread_times(0, 1);
3663 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3664 	CU_ASSERT(ctrlr1->is_failed == false);
3665 	CU_ASSERT(curr_path1->is_failed == true);
3666 
3667 	poll_thread_times(0, 1);
3668 	CU_ASSERT(io_path11->ctrlr_ch->qpair != NULL);
3669 	CU_ASSERT(io_path21->ctrlr_ch->qpair == NULL);
3670 
3671 	poll_thread_times(1, 1);
3672 	CU_ASSERT(io_path11->ctrlr_ch->qpair != NULL);
3673 	CU_ASSERT(io_path21->ctrlr_ch->qpair != NULL);
3674 
3675 	poll_thread_times(0, 2);
3676 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3677 	poll_thread_times(1, 1);
3678 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3679 	poll_thread_times(0, 2);
3680 	CU_ASSERT(nvme_ctrlr1->resetting == false);
3681 	CU_ASSERT(curr_path1->is_failed == false);
3682 	CU_ASSERT(first_bio->io_path == io_path12);
3683 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3684 
3685 	poll_thread_times(0, 2);
3686 	CU_ASSERT(io_path12->ctrlr_ch->qpair == NULL);
3687 	CU_ASSERT(io_path22->ctrlr_ch->qpair != NULL);
3688 
3689 	poll_thread_times(1, 1);
3690 	CU_ASSERT(io_path12->ctrlr_ch->qpair == NULL);
3691 	CU_ASSERT(io_path22->ctrlr_ch->qpair == NULL);
3692 	CU_ASSERT(ctrlr2->is_failed == true);
3693 
3694 	poll_thread_times(0, 2);
3695 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3696 	CU_ASSERT(ctrlr2->is_failed == false);
3697 	CU_ASSERT(curr_path2->is_failed == true);
3698 
3699 	poll_thread_times(0, 1);
3700 	CU_ASSERT(io_path12->ctrlr_ch->qpair != NULL);
3701 	CU_ASSERT(io_path22->ctrlr_ch->qpair == NULL);
3702 
3703 	poll_thread_times(1, 2);
3704 	CU_ASSERT(io_path12->ctrlr_ch->qpair != NULL);
3705 	CU_ASSERT(io_path22->ctrlr_ch->qpair != NULL);
3706 
3707 	poll_thread_times(0, 2);
3708 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3709 	poll_thread_times(1, 1);
3710 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3711 	poll_thread_times(0, 2);
3712 	CU_ASSERT(first_bio->io_path == NULL);
3713 	CU_ASSERT(nvme_ctrlr2->resetting == false);
3714 	CU_ASSERT(curr_path2->is_failed == false);
3715 
3716 	poll_threads();
3717 
3718 	/* There is a race between two reset requests from bdev_io.
3719 	 *
3720 	 * The first reset request is submitted on thread 0, and the second reset
3721 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
3722 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
3723 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
3724 	 * The second is pending on ctrlr2 again. After the first completes resetting
3725 	 * ctrl2, both complete successfully.
3726 	 */
3727 	ctrlr1->is_failed = true;
3728 	curr_path1->is_failed = true;
3729 	ctrlr2->is_failed = true;
3730 	curr_path2->is_failed = true;
3731 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3732 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3733 
3734 	set_thread(0);
3735 
3736 	bdev_nvme_submit_request(ch1, first_bdev_io);
3737 
3738 	set_thread(1);
3739 
3740 	bdev_nvme_submit_request(ch2, second_bdev_io);
3741 
3742 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3743 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
3744 	CU_ASSERT(TAILQ_FIRST(&io_path21->ctrlr_ch->pending_resets) == second_bdev_io);
3745 
3746 	poll_threads();
3747 
3748 	CU_ASSERT(ctrlr1->is_failed == false);
3749 	CU_ASSERT(curr_path1->is_failed == false);
3750 	CU_ASSERT(ctrlr2->is_failed == false);
3751 	CU_ASSERT(curr_path2->is_failed == false);
3752 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3753 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3754 
3755 	set_thread(0);
3756 
3757 	spdk_put_io_channel(ch1);
3758 
3759 	set_thread(1);
3760 
3761 	spdk_put_io_channel(ch2);
3762 
3763 	poll_threads();
3764 
3765 	set_thread(0);
3766 
3767 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3768 	CU_ASSERT(rc == 0);
3769 
3770 	poll_threads();
3771 	spdk_delay_us(1000);
3772 	poll_threads();
3773 
3774 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3775 
3776 	free(first_bdev_io);
3777 	free(second_bdev_io);
3778 }
3779 
3780 static void
3781 test_find_io_path(void)
3782 {
3783 	struct nvme_bdev_channel nbdev_ch = {
3784 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
3785 	};
3786 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
3787 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
3788 	struct nvme_io_path io_path1 = { .ctrlr_ch = &ctrlr_ch1, .nvme_ns = &nvme_ns1, };
3789 	struct nvme_io_path io_path2 = { .ctrlr_ch = &ctrlr_ch2, .nvme_ns = &nvme_ns2, };
3790 
3791 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
3792 
3793 	/* Test if io_path whose ANA state is not accessible is excluded. */
3794 
3795 	ctrlr_ch1.qpair = (struct spdk_nvme_qpair *)0x1;
3796 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3797 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3798 
3799 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
3800 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3801 
3802 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
3803 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3804 
3805 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3806 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
3807 
3808 	nbdev_ch.current_io_path = NULL;
3809 
3810 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3811 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
3812 
3813 	nbdev_ch.current_io_path = NULL;
3814 
3815 	/* Test if io_path whose qpair is resetting is excluced. */
3816 
3817 	ctrlr_ch1.qpair = NULL;
3818 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3819 
3820 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
3821 
3822 	/* Test if ANA optimized state or the first found ANA non-optimized state
3823 	 * is prioritized.
3824 	 */
3825 
3826 	ctrlr_ch1.qpair = (struct spdk_nvme_qpair *)0x1;
3827 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3828 	ctrlr_ch2.qpair = (struct spdk_nvme_qpair *)0x1;
3829 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3830 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
3831 
3832 	nbdev_ch.current_io_path = NULL;
3833 
3834 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3835 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
3836 
3837 	nbdev_ch.current_io_path = NULL;
3838 }
3839 
3840 static void
3841 test_retry_io_if_ctrlr_is_resetting(void)
3842 {
3843 	struct nvme_path_id path = {};
3844 	struct spdk_nvme_ctrlr *ctrlr;
3845 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3846 	struct nvme_ctrlr *nvme_ctrlr;
3847 	const int STRING_SIZE = 32;
3848 	const char *attached_names[STRING_SIZE];
3849 	struct nvme_bdev *bdev;
3850 	struct nvme_ns *nvme_ns;
3851 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
3852 	struct spdk_io_channel *ch;
3853 	struct nvme_bdev_channel *nbdev_ch;
3854 	struct nvme_io_path *io_path;
3855 	struct nvme_ctrlr_channel *ctrlr_ch;
3856 	int rc;
3857 
3858 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3859 	ut_init_trid(&path.trid);
3860 
3861 	set_thread(0);
3862 
3863 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
3864 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3865 
3866 	g_ut_attach_ctrlr_status = 0;
3867 	g_ut_attach_bdev_count = 1;
3868 
3869 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
3870 			      attach_ctrlr_done, NULL, NULL, false);
3871 	CU_ASSERT(rc == 0);
3872 
3873 	spdk_delay_us(1000);
3874 	poll_threads();
3875 
3876 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3877 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3878 
3879 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
3880 	CU_ASSERT(nvme_ctrlr != NULL);
3881 
3882 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3883 	CU_ASSERT(bdev != NULL);
3884 
3885 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
3886 	CU_ASSERT(nvme_ns != NULL);
3887 
3888 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
3889 	ut_bdev_io_set_buf(bdev_io1);
3890 
3891 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
3892 	ut_bdev_io_set_buf(bdev_io1);
3893 
3894 	ch = spdk_get_io_channel(bdev);
3895 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3896 
3897 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3898 
3899 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
3900 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
3901 
3902 	ctrlr_ch = io_path->ctrlr_ch;
3903 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
3904 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
3905 
3906 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
3907 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
3908 
3909 	/* If qpair is connected, I/O should succeed. */
3910 	bdev_io1->internal.in_submit_request = true;
3911 
3912 	bdev_nvme_submit_request(ch, bdev_io1);
3913 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
3914 
3915 	poll_threads();
3916 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
3917 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
3918 
3919 	/* If qpair is disconnected, it is freed and then reconnected via resetting
3920 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
3921 	 * while resetting the nvme_ctrlr.
3922 	 */
3923 	ctrlr_ch->qpair->is_connected = false;
3924 	ctrlr->is_failed = true;
3925 
3926 	poll_thread_times(0, 4);
3927 
3928 	CU_ASSERT(ctrlr_ch->qpair == NULL);
3929 	CU_ASSERT(nvme_ctrlr->resetting == true);
3930 	CU_ASSERT(ctrlr->is_failed == false);
3931 
3932 	bdev_io1->internal.in_submit_request = true;
3933 
3934 	bdev_nvme_submit_request(ch, bdev_io1);
3935 
3936 	spdk_delay_us(1);
3937 
3938 	bdev_io2->internal.in_submit_request = true;
3939 
3940 	bdev_nvme_submit_request(ch, bdev_io2);
3941 
3942 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
3943 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
3944 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
3945 	CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link));
3946 
3947 	poll_threads();
3948 
3949 	CU_ASSERT(ctrlr_ch->qpair != NULL);
3950 	CU_ASSERT(nvme_ctrlr->resetting == false);
3951 
3952 	spdk_delay_us(999999);
3953 
3954 	poll_thread_times(0, 1);
3955 
3956 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
3957 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
3958 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
3959 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
3960 
3961 	poll_threads();
3962 
3963 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
3964 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
3965 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3966 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
3967 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
3968 
3969 	spdk_delay_us(1);
3970 
3971 	poll_thread_times(0, 1);
3972 
3973 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
3974 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
3975 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
3976 
3977 	poll_threads();
3978 
3979 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
3980 	CU_ASSERT(bdev_io2->internal.in_submit_request == false);
3981 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3982 
3983 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
3984 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3985 	nbdev_ch->current_io_path = NULL;
3986 
3987 	bdev_io1->internal.in_submit_request = true;
3988 
3989 	bdev_nvme_submit_request(ch, bdev_io1);
3990 
3991 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
3992 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
3993 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
3994 
3995 	/* ANA state became accessible while I/O was queued. */
3996 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3997 
3998 	spdk_delay_us(1000000);
3999 
4000 	poll_thread_times(0, 1);
4001 
4002 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4003 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4004 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4005 
4006 	poll_threads();
4007 
4008 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4009 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4010 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4011 
4012 	free(bdev_io1);
4013 	free(bdev_io2);
4014 
4015 	spdk_put_io_channel(ch);
4016 
4017 	poll_threads();
4018 
4019 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4020 	CU_ASSERT(rc == 0);
4021 
4022 	poll_threads();
4023 	spdk_delay_us(1000);
4024 	poll_threads();
4025 
4026 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4027 }
4028 
4029 static void
4030 test_retry_io_for_io_path_error(void)
4031 {
4032 	struct nvme_path_id path1 = {}, path2 = {};
4033 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4034 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4035 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4036 	const int STRING_SIZE = 32;
4037 	const char *attached_names[STRING_SIZE];
4038 	struct nvme_bdev *bdev;
4039 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4040 	struct spdk_bdev_io *bdev_io;
4041 	struct nvme_bdev_io *bio;
4042 	struct spdk_io_channel *ch;
4043 	struct nvme_bdev_channel *nbdev_ch;
4044 	struct nvme_io_path *io_path1, *io_path2;
4045 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
4046 	struct ut_nvme_req *req;
4047 	int rc;
4048 
4049 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4050 	ut_init_trid(&path1.trid);
4051 	ut_init_trid2(&path2.trid);
4052 
4053 	g_opts.bdev_retry_count = 1;
4054 
4055 	set_thread(0);
4056 
4057 	g_ut_attach_ctrlr_status = 0;
4058 	g_ut_attach_bdev_count = 1;
4059 
4060 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4061 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4062 
4063 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
4064 
4065 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
4066 			      attach_ctrlr_done, NULL, NULL, true);
4067 	CU_ASSERT(rc == 0);
4068 
4069 	spdk_delay_us(1000);
4070 	poll_threads();
4071 
4072 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4073 	poll_threads();
4074 
4075 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4076 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4077 
4078 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4079 	CU_ASSERT(nvme_ctrlr1 != NULL);
4080 
4081 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4082 	CU_ASSERT(bdev != NULL);
4083 
4084 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4085 	CU_ASSERT(nvme_ns1 != NULL);
4086 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4087 
4088 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4089 	ut_bdev_io_set_buf(bdev_io);
4090 
4091 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4092 
4093 	ch = spdk_get_io_channel(bdev);
4094 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4095 
4096 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4097 
4098 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4099 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4100 
4101 	ctrlr_ch1 = io_path1->ctrlr_ch;
4102 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
4103 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1->qpair != NULL);
4104 
4105 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4106 
4107 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4108 	bdev_io->internal.in_submit_request = true;
4109 
4110 	bdev_nvme_submit_request(ch, bdev_io);
4111 
4112 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
4113 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4114 
4115 	req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
4116 	SPDK_CU_ASSERT_FATAL(req != NULL);
4117 
4118 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4119 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4120 	req->cpl.status.dnr = 1;
4121 
4122 	poll_thread_times(0, 1);
4123 
4124 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4125 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4126 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4127 
4128 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4129 	bdev_io->internal.in_submit_request = true;
4130 
4131 	bdev_nvme_submit_request(ch, bdev_io);
4132 
4133 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
4134 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4135 
4136 	req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
4137 	SPDK_CU_ASSERT_FATAL(req != NULL);
4138 
4139 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4140 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4141 
4142 	poll_thread_times(0, 1);
4143 
4144 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4145 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4146 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4147 
4148 	poll_threads();
4149 
4150 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4151 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4152 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4153 
4154 	/* Add io_path2 dynamically, and create a multipath configuration. */
4155 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4156 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4157 
4158 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
4159 
4160 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
4161 			      attach_ctrlr_done, NULL, NULL, true);
4162 	CU_ASSERT(rc == 0);
4163 
4164 	spdk_delay_us(1000);
4165 	poll_threads();
4166 
4167 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4168 	poll_threads();
4169 
4170 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4171 	CU_ASSERT(nvme_ctrlr2 != NULL);
4172 
4173 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4174 	CU_ASSERT(nvme_ns2 != NULL);
4175 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4176 
4177 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4178 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4179 
4180 	ctrlr_ch2 = io_path2->ctrlr_ch;
4181 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
4182 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2->qpair != NULL);
4183 
4184 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4185 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4186 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4187 	 */
4188 	bdev_io->internal.in_submit_request = true;
4189 
4190 	bdev_nvme_submit_request(ch, bdev_io);
4191 
4192 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
4193 	CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
4194 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4195 
4196 	req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
4197 	SPDK_CU_ASSERT_FATAL(req != NULL);
4198 
4199 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4200 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4201 
4202 	poll_thread_times(0, 1);
4203 
4204 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4205 	CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
4206 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4207 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4208 
4209 	bdev_nvme_destroy_qpair(ctrlr_ch1);
4210 
4211 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
4212 
4213 	poll_threads();
4214 
4215 	CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
4216 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4217 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4218 
4219 	free(bdev_io);
4220 
4221 	spdk_put_io_channel(ch);
4222 
4223 	poll_threads();
4224 
4225 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4226 	CU_ASSERT(rc == 0);
4227 
4228 	poll_threads();
4229 	spdk_delay_us(1000);
4230 	poll_threads();
4231 
4232 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4233 
4234 	g_opts.bdev_retry_count = 0;
4235 }
4236 
4237 static void
4238 test_retry_io_count(void)
4239 {
4240 	struct nvme_path_id path = {};
4241 	struct spdk_nvme_ctrlr *ctrlr;
4242 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4243 	struct nvme_ctrlr *nvme_ctrlr;
4244 	const int STRING_SIZE = 32;
4245 	const char *attached_names[STRING_SIZE];
4246 	struct nvme_bdev *bdev;
4247 	struct nvme_ns *nvme_ns;
4248 	struct spdk_bdev_io *bdev_io;
4249 	struct nvme_bdev_io *bio;
4250 	struct spdk_io_channel *ch;
4251 	struct nvme_bdev_channel *nbdev_ch;
4252 	struct nvme_io_path *io_path;
4253 	struct nvme_ctrlr_channel *ctrlr_ch;
4254 	struct ut_nvme_req *req;
4255 	int rc;
4256 
4257 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4258 	ut_init_trid(&path.trid);
4259 
4260 	set_thread(0);
4261 
4262 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4263 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4264 
4265 	g_ut_attach_ctrlr_status = 0;
4266 	g_ut_attach_bdev_count = 1;
4267 
4268 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
4269 			      attach_ctrlr_done, NULL, NULL, false);
4270 	CU_ASSERT(rc == 0);
4271 
4272 	spdk_delay_us(1000);
4273 	poll_threads();
4274 
4275 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4276 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4277 
4278 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4279 	CU_ASSERT(nvme_ctrlr != NULL);
4280 
4281 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4282 	CU_ASSERT(bdev != NULL);
4283 
4284 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4285 	CU_ASSERT(nvme_ns != NULL);
4286 
4287 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4288 	ut_bdev_io_set_buf(bdev_io);
4289 
4290 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4291 
4292 	ch = spdk_get_io_channel(bdev);
4293 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4294 
4295 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4296 
4297 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4298 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4299 
4300 	ctrlr_ch = io_path->ctrlr_ch;
4301 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
4302 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
4303 
4304 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4305 
4306 	/* If I/O is aborted by request, it should not be retried. */
4307 	g_opts.bdev_retry_count = 1;
4308 
4309 	bdev_io->internal.in_submit_request = true;
4310 
4311 	bdev_nvme_submit_request(ch, bdev_io);
4312 
4313 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4314 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4315 
4316 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4317 	SPDK_CU_ASSERT_FATAL(req != NULL);
4318 
4319 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
4320 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4321 
4322 	poll_thread_times(0, 1);
4323 
4324 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4325 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4326 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
4327 
4328 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
4329 	 * the failed I/O should not be retried.
4330 	 */
4331 	g_opts.bdev_retry_count = 4;
4332 
4333 	bdev_io->internal.in_submit_request = true;
4334 
4335 	bdev_nvme_submit_request(ch, bdev_io);
4336 
4337 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4338 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4339 
4340 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4341 	SPDK_CU_ASSERT_FATAL(req != NULL);
4342 
4343 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4344 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4345 	bio->retry_count = 4;
4346 
4347 	poll_thread_times(0, 1);
4348 
4349 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4350 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4351 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4352 
4353 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
4354 	g_opts.bdev_retry_count = -1;
4355 
4356 	bdev_io->internal.in_submit_request = true;
4357 
4358 	bdev_nvme_submit_request(ch, bdev_io);
4359 
4360 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4361 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4362 
4363 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4364 	SPDK_CU_ASSERT_FATAL(req != NULL);
4365 
4366 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4367 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4368 	bio->retry_count = 4;
4369 
4370 	poll_thread_times(0, 1);
4371 
4372 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4373 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4374 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4375 
4376 	poll_threads();
4377 
4378 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4379 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4380 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4381 
4382 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
4383 	 * the failed I/O should be retried.
4384 	 */
4385 	g_opts.bdev_retry_count = 4;
4386 
4387 	bdev_io->internal.in_submit_request = true;
4388 
4389 	bdev_nvme_submit_request(ch, bdev_io);
4390 
4391 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4392 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4393 
4394 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4395 	SPDK_CU_ASSERT_FATAL(req != NULL);
4396 
4397 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4398 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4399 	bio->retry_count = 3;
4400 
4401 	poll_thread_times(0, 1);
4402 
4403 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4404 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4405 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4406 
4407 	poll_threads();
4408 
4409 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4410 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4411 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4412 
4413 	free(bdev_io);
4414 
4415 	spdk_put_io_channel(ch);
4416 
4417 	poll_threads();
4418 
4419 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4420 	CU_ASSERT(rc == 0);
4421 
4422 	poll_threads();
4423 	spdk_delay_us(1000);
4424 	poll_threads();
4425 
4426 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4427 
4428 	g_opts.bdev_retry_count = 0;
4429 }
4430 
4431 static void
4432 test_concurrent_read_ana_log_page(void)
4433 {
4434 	struct spdk_nvme_transport_id trid = {};
4435 	struct spdk_nvme_ctrlr *ctrlr;
4436 	struct nvme_ctrlr *nvme_ctrlr;
4437 	const int STRING_SIZE = 32;
4438 	const char *attached_names[STRING_SIZE];
4439 	int rc;
4440 
4441 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4442 	ut_init_trid(&trid);
4443 
4444 	set_thread(0);
4445 
4446 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
4447 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4448 
4449 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4450 
4451 	g_ut_attach_ctrlr_status = 0;
4452 	g_ut_attach_bdev_count = 1;
4453 
4454 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
4455 			      attach_ctrlr_done, NULL, NULL, false);
4456 	CU_ASSERT(rc == 0);
4457 
4458 	spdk_delay_us(1000);
4459 	poll_threads();
4460 
4461 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4462 	poll_threads();
4463 
4464 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
4465 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
4466 
4467 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4468 
4469 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4470 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4471 
4472 	/* Following read request should be rejected. */
4473 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4474 
4475 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4476 
4477 	set_thread(1);
4478 
4479 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4480 
4481 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4482 
4483 	/* Reset request while reading ANA log page should not be rejected. */
4484 	rc = bdev_nvme_reset(nvme_ctrlr);
4485 	CU_ASSERT(rc == 0);
4486 
4487 	poll_threads();
4488 
4489 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4490 	poll_threads();
4491 
4492 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4493 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4494 
4495 	/* Read ANA log page while resetting ctrlr should be rejected. */
4496 	rc = bdev_nvme_reset(nvme_ctrlr);
4497 	CU_ASSERT(rc == 0);
4498 
4499 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4500 
4501 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4502 
4503 	set_thread(0);
4504 
4505 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4506 	CU_ASSERT(rc == 0);
4507 
4508 	poll_threads();
4509 	spdk_delay_us(1000);
4510 	poll_threads();
4511 
4512 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
4513 }
4514 
4515 static void
4516 test_retry_io_for_ana_error(void)
4517 {
4518 	struct nvme_path_id path = {};
4519 	struct spdk_nvme_ctrlr *ctrlr;
4520 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4521 	struct nvme_ctrlr *nvme_ctrlr;
4522 	const int STRING_SIZE = 32;
4523 	const char *attached_names[STRING_SIZE];
4524 	struct nvme_bdev *bdev;
4525 	struct nvme_ns *nvme_ns;
4526 	struct spdk_bdev_io *bdev_io;
4527 	struct nvme_bdev_io *bio;
4528 	struct spdk_io_channel *ch;
4529 	struct nvme_bdev_channel *nbdev_ch;
4530 	struct nvme_io_path *io_path;
4531 	struct nvme_ctrlr_channel *ctrlr_ch;
4532 	struct ut_nvme_req *req;
4533 	uint64_t now;
4534 	int rc;
4535 
4536 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4537 	ut_init_trid(&path.trid);
4538 
4539 	g_opts.bdev_retry_count = 1;
4540 
4541 	set_thread(0);
4542 
4543 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
4544 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4545 
4546 	g_ut_attach_ctrlr_status = 0;
4547 	g_ut_attach_bdev_count = 1;
4548 
4549 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
4550 			      attach_ctrlr_done, NULL, NULL, false);
4551 	CU_ASSERT(rc == 0);
4552 
4553 	spdk_delay_us(1000);
4554 	poll_threads();
4555 
4556 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4557 	poll_threads();
4558 
4559 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4560 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4561 
4562 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4563 	CU_ASSERT(nvme_ctrlr != NULL);
4564 
4565 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4566 	CU_ASSERT(bdev != NULL);
4567 
4568 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4569 	CU_ASSERT(nvme_ns != NULL);
4570 
4571 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4572 	ut_bdev_io_set_buf(bdev_io);
4573 
4574 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4575 
4576 	ch = spdk_get_io_channel(bdev);
4577 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4578 
4579 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4580 
4581 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4582 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4583 
4584 	ctrlr_ch = io_path->ctrlr_ch;
4585 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
4586 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
4587 
4588 	now = spdk_get_ticks();
4589 
4590 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4591 
4592 	/* If I/O got ANA error, it should be queued, the corresponding namespace
4593 	 * should be freezed and its ANA state should be updated.
4594 	 */
4595 	bdev_io->internal.in_submit_request = true;
4596 
4597 	bdev_nvme_submit_request(ch, bdev_io);
4598 
4599 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4600 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4601 
4602 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4603 	SPDK_CU_ASSERT_FATAL(req != NULL);
4604 
4605 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4606 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
4607 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4608 
4609 	poll_thread_times(0, 1);
4610 
4611 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4612 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4613 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4614 	/* I/O should be retried immediately. */
4615 	CU_ASSERT(bio->retry_ticks == now);
4616 	CU_ASSERT(nvme_ns->ana_state_updating == true);
4617 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4618 
4619 	poll_threads();
4620 
4621 	/* Namespace is inaccessible, and hence I/O should be queued again. */
4622 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4623 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4624 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4625 	/* I/O should be retried after a second if no I/O path was found but
4626 	 * any I/O path may become available.
4627 	 */
4628 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
4629 
4630 	/* Namespace should be unfreezed after completing to update its ANA state. */
4631 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4632 	poll_threads();
4633 
4634 	CU_ASSERT(nvme_ns->ana_state_updating == false);
4635 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
4636 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4637 
4638 	/* Retry the queued I/O should succeed. */
4639 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
4640 	poll_threads();
4641 
4642 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4643 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4644 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4645 
4646 	free(bdev_io);
4647 
4648 	spdk_put_io_channel(ch);
4649 
4650 	poll_threads();
4651 
4652 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4653 	CU_ASSERT(rc == 0);
4654 
4655 	poll_threads();
4656 	spdk_delay_us(1000);
4657 	poll_threads();
4658 
4659 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4660 
4661 	g_opts.bdev_retry_count = 0;
4662 }
4663 
4664 static void
4665 test_retry_admin_passthru_if_ctrlr_is_resetting(void)
4666 {
4667 	struct nvme_path_id path = {};
4668 	struct spdk_nvme_ctrlr *ctrlr;
4669 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4670 	struct nvme_ctrlr *nvme_ctrlr;
4671 	const int STRING_SIZE = 32;
4672 	const char *attached_names[STRING_SIZE];
4673 	struct nvme_bdev *bdev;
4674 	struct spdk_bdev_io *admin_io;
4675 	struct spdk_io_channel *ch;
4676 	struct nvme_bdev_channel *nbdev_ch;
4677 	int rc;
4678 
4679 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4680 	ut_init_trid(&path.trid);
4681 
4682 	g_opts.bdev_retry_count = 1;
4683 
4684 	set_thread(0);
4685 
4686 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4687 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4688 
4689 	g_ut_attach_ctrlr_status = 0;
4690 	g_ut_attach_bdev_count = 1;
4691 
4692 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
4693 			      attach_ctrlr_done, NULL, NULL, false);
4694 	CU_ASSERT(rc == 0);
4695 
4696 	spdk_delay_us(1000);
4697 	poll_threads();
4698 
4699 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4700 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4701 
4702 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4703 	CU_ASSERT(nvme_ctrlr != NULL);
4704 
4705 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4706 	CU_ASSERT(bdev != NULL);
4707 
4708 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
4709 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
4710 
4711 	ch = spdk_get_io_channel(bdev);
4712 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4713 
4714 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4715 
4716 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
4717 
4718 	/* If ctrlr is available, admin passthrough should succeed. */
4719 	admin_io->internal.in_submit_request = true;
4720 
4721 	bdev_nvme_submit_request(ch, admin_io);
4722 
4723 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4724 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4725 
4726 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4727 	poll_threads();
4728 
4729 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4730 	CU_ASSERT(admin_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
4731 
4732 	/* If ctrlr is resetting, admin passthrough request should be queued
4733 	 * if it is submitted while resetting ctrlr.
4734 	 */
4735 	bdev_nvme_reset(nvme_ctrlr);
4736 
4737 	poll_thread_times(0, 1);
4738 
4739 	admin_io->internal.in_submit_request = true;
4740 
4741 	bdev_nvme_submit_request(ch, admin_io);
4742 
4743 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4744 	CU_ASSERT(admin_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4745 
4746 	poll_threads();
4747 
4748 	CU_ASSERT(nvme_ctrlr->resetting == false);
4749 
4750 	spdk_delay_us(1000000);
4751 	poll_thread_times(0, 1);
4752 
4753 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4754 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4755 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4756 
4757 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4758 	poll_threads();
4759 
4760 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4761 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4762 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4763 
4764 	free(admin_io);
4765 
4766 	spdk_put_io_channel(ch);
4767 
4768 	poll_threads();
4769 
4770 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4771 	CU_ASSERT(rc == 0);
4772 
4773 	poll_threads();
4774 	spdk_delay_us(1000);
4775 	poll_threads();
4776 
4777 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4778 
4779 	g_opts.bdev_retry_count = 0;
4780 }
4781 
4782 static void
4783 test_retry_admin_passthru_for_path_error(void)
4784 {
4785 	struct nvme_path_id path1 = {}, path2 = {};
4786 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4787 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4788 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4789 	const int STRING_SIZE = 32;
4790 	const char *attached_names[STRING_SIZE];
4791 	struct nvme_bdev *bdev;
4792 	struct spdk_bdev_io *admin_io;
4793 	struct spdk_io_channel *ch;
4794 	struct ut_nvme_req *req;
4795 	int rc;
4796 
4797 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4798 	ut_init_trid(&path1.trid);
4799 	ut_init_trid2(&path2.trid);
4800 
4801 	g_opts.bdev_retry_count = 1;
4802 
4803 	set_thread(0);
4804 
4805 	g_ut_attach_ctrlr_status = 0;
4806 	g_ut_attach_bdev_count = 1;
4807 
4808 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4809 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4810 
4811 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
4812 
4813 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
4814 			      attach_ctrlr_done, NULL, NULL, true);
4815 	CU_ASSERT(rc == 0);
4816 
4817 	spdk_delay_us(1000);
4818 	poll_threads();
4819 
4820 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4821 	poll_threads();
4822 
4823 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4824 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4825 
4826 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4827 	CU_ASSERT(nvme_ctrlr1 != NULL);
4828 
4829 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4830 	CU_ASSERT(bdev != NULL);
4831 
4832 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
4833 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
4834 
4835 	ch = spdk_get_io_channel(bdev);
4836 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4837 
4838 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
4839 
4840 	/* Admin passthrough got a path error, but it should not retry if DNR is set. */
4841 	admin_io->internal.in_submit_request = true;
4842 
4843 	bdev_nvme_submit_request(ch, admin_io);
4844 
4845 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4846 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4847 
4848 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
4849 	SPDK_CU_ASSERT_FATAL(req != NULL);
4850 
4851 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4852 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4853 	req->cpl.status.dnr = 1;
4854 
4855 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4856 	poll_thread_times(0, 2);
4857 
4858 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4859 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4860 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4861 
4862 	/* Admin passthrough got a path error, but it should succeed after retry. */
4863 	admin_io->internal.in_submit_request = true;
4864 
4865 	bdev_nvme_submit_request(ch, admin_io);
4866 
4867 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4868 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4869 
4870 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
4871 	SPDK_CU_ASSERT_FATAL(req != NULL);
4872 
4873 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4874 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4875 
4876 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4877 	poll_thread_times(0, 2);
4878 
4879 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4880 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4881 
4882 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4883 	poll_threads();
4884 
4885 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4886 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4887 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4888 
4889 	/* Add ctrlr2 dynamically, and create a multipath configuration. */
4890 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4891 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4892 
4893 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
4894 
4895 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
4896 			      attach_ctrlr_done, NULL, NULL, true);
4897 	CU_ASSERT(rc == 0);
4898 
4899 	spdk_delay_us(1000);
4900 	poll_threads();
4901 
4902 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4903 	poll_threads();
4904 
4905 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4906 	CU_ASSERT(nvme_ctrlr2 != NULL);
4907 
4908 	/* Admin passthrough was submitted to ctrlr1, but ctrlr1 was failed.
4909 	 * Hence the admin passthrough was aborted. But ctrlr2 is avaialble.
4910 	 * So after a retry, the admin passthrough is submitted to ctrlr2 and
4911 	 * should succeed.
4912 	 */
4913 	admin_io->internal.in_submit_request = true;
4914 
4915 	bdev_nvme_submit_request(ch, admin_io);
4916 
4917 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4918 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4919 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4920 
4921 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
4922 	SPDK_CU_ASSERT_FATAL(req != NULL);
4923 
4924 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4925 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4926 	ctrlr1->is_failed = true;
4927 
4928 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4929 	poll_thread_times(0, 2);
4930 
4931 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4932 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
4933 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4934 
4935 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4936 	poll_threads();
4937 
4938 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4939 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4940 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4941 
4942 	free(admin_io);
4943 
4944 	spdk_put_io_channel(ch);
4945 
4946 	poll_threads();
4947 
4948 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4949 	CU_ASSERT(rc == 0);
4950 
4951 	poll_threads();
4952 	spdk_delay_us(1000);
4953 	poll_threads();
4954 
4955 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4956 
4957 	g_opts.bdev_retry_count = 0;
4958 }
4959 
4960 static void
4961 test_retry_admin_passthru_by_count(void)
4962 {
4963 	struct nvme_path_id path = {};
4964 	struct spdk_nvme_ctrlr *ctrlr;
4965 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4966 	struct nvme_ctrlr *nvme_ctrlr;
4967 	const int STRING_SIZE = 32;
4968 	const char *attached_names[STRING_SIZE];
4969 	struct nvme_bdev *bdev;
4970 	struct spdk_bdev_io *admin_io;
4971 	struct nvme_bdev_io *admin_bio;
4972 	struct spdk_io_channel *ch;
4973 	struct ut_nvme_req *req;
4974 	int rc;
4975 
4976 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4977 	ut_init_trid(&path.trid);
4978 
4979 	set_thread(0);
4980 
4981 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4982 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4983 
4984 	g_ut_attach_ctrlr_status = 0;
4985 	g_ut_attach_bdev_count = 1;
4986 
4987 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
4988 			      attach_ctrlr_done, NULL, NULL, false);
4989 	CU_ASSERT(rc == 0);
4990 
4991 	spdk_delay_us(1000);
4992 	poll_threads();
4993 
4994 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4995 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4996 
4997 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4998 	CU_ASSERT(nvme_ctrlr != NULL);
4999 
5000 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5001 	CU_ASSERT(bdev != NULL);
5002 
5003 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
5004 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
5005 
5006 	admin_bio = (struct nvme_bdev_io *)admin_io->driver_ctx;
5007 
5008 	ch = spdk_get_io_channel(bdev);
5009 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5010 
5011 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
5012 
5013 	/* If admin passthrough is aborted by request, it should not be retried. */
5014 	g_opts.bdev_retry_count = 1;
5015 
5016 	admin_io->internal.in_submit_request = true;
5017 
5018 	bdev_nvme_submit_request(ch, admin_io);
5019 
5020 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5021 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5022 
5023 	req = ut_get_outstanding_nvme_request(&ctrlr->adminq, admin_bio);
5024 	SPDK_CU_ASSERT_FATAL(req != NULL);
5025 
5026 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
5027 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5028 
5029 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5030 	poll_thread_times(0, 2);
5031 
5032 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5033 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5034 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
5035 
5036 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
5037 	 * the failed admin passthrough should not be retried.
5038 	 */
5039 	g_opts.bdev_retry_count = 4;
5040 
5041 	admin_io->internal.in_submit_request = true;
5042 
5043 	bdev_nvme_submit_request(ch, admin_io);
5044 
5045 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5046 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5047 
5048 	req = ut_get_outstanding_nvme_request(&ctrlr->adminq, admin_bio);
5049 	SPDK_CU_ASSERT_FATAL(req != NULL);
5050 
5051 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
5052 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5053 	admin_bio->retry_count = 4;
5054 
5055 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5056 	poll_thread_times(0, 2);
5057 
5058 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5059 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5060 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
5061 
5062 	free(admin_io);
5063 
5064 	spdk_put_io_channel(ch);
5065 
5066 	poll_threads();
5067 
5068 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5069 	CU_ASSERT(rc == 0);
5070 
5071 	poll_threads();
5072 	spdk_delay_us(1000);
5073 	poll_threads();
5074 
5075 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5076 
5077 	g_opts.bdev_retry_count = 0;
5078 }
5079 
5080 int
5081 main(int argc, const char **argv)
5082 {
5083 	CU_pSuite	suite = NULL;
5084 	unsigned int	num_failures;
5085 
5086 	CU_set_error_action(CUEA_ABORT);
5087 	CU_initialize_registry();
5088 
5089 	suite = CU_add_suite("nvme", NULL, NULL);
5090 
5091 	CU_ADD_TEST(suite, test_create_ctrlr);
5092 	CU_ADD_TEST(suite, test_reset_ctrlr);
5093 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
5094 	CU_ADD_TEST(suite, test_failover_ctrlr);
5095 	CU_ADD_TEST(suite, test_pending_reset);
5096 	CU_ADD_TEST(suite, test_attach_ctrlr);
5097 	CU_ADD_TEST(suite, test_aer_cb);
5098 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
5099 	CU_ADD_TEST(suite, test_add_remove_trid);
5100 	CU_ADD_TEST(suite, test_abort);
5101 	CU_ADD_TEST(suite, test_get_io_qpair);
5102 	CU_ADD_TEST(suite, test_bdev_unregister);
5103 	CU_ADD_TEST(suite, test_compare_ns);
5104 	CU_ADD_TEST(suite, test_init_ana_log_page);
5105 	CU_ADD_TEST(suite, test_get_memory_domains);
5106 	CU_ADD_TEST(suite, test_reconnect_qpair);
5107 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
5108 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
5109 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
5110 	CU_ADD_TEST(suite, test_admin_path);
5111 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
5112 	CU_ADD_TEST(suite, test_find_io_path);
5113 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
5114 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
5115 	CU_ADD_TEST(suite, test_retry_io_count);
5116 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
5117 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
5118 	CU_ADD_TEST(suite, test_retry_admin_passthru_if_ctrlr_is_resetting);
5119 	CU_ADD_TEST(suite, test_retry_admin_passthru_for_path_error);
5120 	CU_ADD_TEST(suite, test_retry_admin_passthru_by_count);
5121 
5122 	CU_basic_set_mode(CU_BRM_VERBOSE);
5123 
5124 	allocate_threads(3);
5125 	set_thread(0);
5126 	bdev_nvme_library_init();
5127 	init_accel();
5128 
5129 	CU_basic_run_tests();
5130 
5131 	set_thread(0);
5132 	bdev_nvme_library_fini();
5133 	fini_accel();
5134 	free_threads();
5135 
5136 	num_failures = CU_get_number_of_failures();
5137 	CU_cleanup_registry();
5138 
5139 	return num_failures;
5140 }
5141