xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision 7b8e7212a6b9541dda27a83042cc40a76c29a58c)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 #include "spdk_cunit.h"
37 #include "spdk/thread.h"
38 #include "spdk/bdev_module.h"
39 #include "spdk/bdev_module.h"
40 
41 #include "common/lib/ut_multithread.c"
42 
43 #include "bdev/nvme/bdev_nvme.c"
44 
45 #include "unit/lib/json_mock.c"
46 
47 static void *g_accel_p = (void *)0xdeadbeaf;
48 
49 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
50 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
51 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
52 	     spdk_nvme_remove_cb remove_cb), NULL);
53 
54 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
55 		enum spdk_nvme_transport_type trtype));
56 
57 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
58 	    NULL);
59 
60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
61 
62 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
63 		struct spdk_nvme_transport_id *trid), 0);
64 
65 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
66 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
67 
68 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
69 
70 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0);
71 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf));
72 
73 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain, int);
74 
75 int spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
76 				       struct spdk_memory_domain **domains, int array_size)
77 {
78 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain);
79 
80 	return 0;
81 }
82 
83 struct spdk_io_channel *
84 spdk_accel_engine_get_io_channel(void)
85 {
86 	return spdk_get_io_channel(g_accel_p);
87 }
88 
89 void
90 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
91 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
92 {
93 	/* Avoid warning that opts is used uninitialised */
94 	memset(opts, 0, opts_size);
95 }
96 
97 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
98 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
99 
100 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
101 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
102 
103 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
104 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
105 
106 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
107 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
108 
109 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
110 
111 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
112 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
113 
114 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
115 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
116 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
117 
118 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
119 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
120 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
121 
122 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
123 
124 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
125 
126 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
127 
128 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
129 
130 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
131 
132 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
133 
134 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
135 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
136 
137 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
138 
139 DEFINE_STUB(spdk_nvme_ns_get_csi, enum spdk_nvme_csi,
140 	    (const struct spdk_nvme_ns *ns), 0);
141 
142 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
143 		char *name, size_t *size), 0);
144 
145 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
146 	    (struct spdk_nvme_ns *ns), 0);
147 
148 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
149 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
150 
151 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
152 	    (struct spdk_nvme_ns *ns), 0);
153 
154 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
155 	    (struct spdk_nvme_ns *ns), 0);
156 
157 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
158 	    (struct spdk_nvme_ns *ns), 0);
159 
160 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
161 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
162 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
163 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
164 
165 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
166 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
167 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
168 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
169 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
170 
171 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
172 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
173 	     void *payload, uint32_t payload_size, uint64_t slba,
174 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
175 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
176 
177 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
178 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
179 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
180 
181 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
182 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
183 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
184 
185 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
186 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
187 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
188 
189 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
190 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
191 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
192 
193 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
194 
195 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
196 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
197 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
198 
199 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
200 
201 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
202 
203 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
204 
205 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
206 
207 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
208 		struct iovec *iov,
209 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
210 
211 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr));
212 
213 struct ut_nvme_req {
214 	uint16_t			opc;
215 	spdk_nvme_cmd_cb		cb_fn;
216 	void				*cb_arg;
217 	struct spdk_nvme_cpl		cpl;
218 	TAILQ_ENTRY(ut_nvme_req)	tailq;
219 };
220 
221 struct spdk_nvme_ns {
222 	struct spdk_nvme_ctrlr		*ctrlr;
223 	uint32_t			id;
224 	bool				is_active;
225 	struct spdk_uuid		uuid;
226 	enum spdk_nvme_ana_state	ana_state;
227 };
228 
229 struct spdk_nvme_qpair {
230 	struct spdk_nvme_ctrlr		*ctrlr;
231 	bool				is_connected;
232 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
233 	uint32_t			num_outstanding_reqs;
234 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
235 	struct spdk_nvme_poll_group	*poll_group;
236 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
237 };
238 
239 struct spdk_nvme_ctrlr {
240 	uint32_t			num_ns;
241 	struct spdk_nvme_ns		*ns;
242 	struct spdk_nvme_ns_data	*nsdata;
243 	struct spdk_nvme_qpair		adminq;
244 	struct spdk_nvme_ctrlr_data	cdata;
245 	bool				attached;
246 	bool				is_failed;
247 	bool				fail_reset;
248 	struct spdk_nvme_transport_id	trid;
249 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
250 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
251 	struct spdk_nvme_ctrlr_opts	opts;
252 };
253 
254 struct spdk_nvme_poll_group {
255 	void				*ctx;
256 	struct spdk_nvme_accel_fn_table	accel_fn_table;
257 	TAILQ_HEAD(, spdk_nvme_qpair)	qpairs;
258 };
259 
260 struct spdk_nvme_probe_ctx {
261 	struct spdk_nvme_transport_id	trid;
262 	void				*cb_ctx;
263 	spdk_nvme_attach_cb		attach_cb;
264 	struct spdk_nvme_ctrlr		*init_ctrlr;
265 };
266 
267 struct spdk_nvme_ctrlr_reset_ctx {
268 	struct spdk_nvme_ctrlr		*ctrlr;
269 };
270 
271 uint32_t
272 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
273 {
274 	uint32_t nsid;
275 
276 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
277 		if (ctrlr->ns[nsid - 1].is_active) {
278 			return nsid;
279 		}
280 	}
281 
282 	return 0;
283 }
284 
285 uint32_t
286 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
287 {
288 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
289 		if (ctrlr->ns[nsid - 1].is_active) {
290 			return nsid;
291 		}
292 	}
293 
294 	return 0;
295 }
296 
297 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
298 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
299 			g_ut_attached_ctrlrs);
300 static int g_ut_attach_ctrlr_status;
301 static size_t g_ut_attach_bdev_count;
302 static int g_ut_register_bdev_status;
303 static uint16_t g_ut_cntlid;
304 static struct nvme_path_id g_any_path = {};
305 
306 static void
307 ut_init_trid(struct spdk_nvme_transport_id *trid)
308 {
309 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
310 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
311 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
312 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
313 }
314 
315 static void
316 ut_init_trid2(struct spdk_nvme_transport_id *trid)
317 {
318 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
319 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
320 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
321 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
322 }
323 
324 static void
325 ut_init_trid3(struct spdk_nvme_transport_id *trid)
326 {
327 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
328 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
329 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
330 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
331 }
332 
333 static int
334 cmp_int(int a, int b)
335 {
336 	return a - b;
337 }
338 
339 int
340 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
341 			       const struct spdk_nvme_transport_id *trid2)
342 {
343 	int cmp;
344 
345 	/* We assume trtype is TCP for now. */
346 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
347 
348 	cmp = cmp_int(trid1->trtype, trid2->trtype);
349 	if (cmp) {
350 		return cmp;
351 	}
352 
353 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
354 	if (cmp) {
355 		return cmp;
356 	}
357 
358 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
359 	if (cmp) {
360 		return cmp;
361 	}
362 
363 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
364 	if (cmp) {
365 		return cmp;
366 	}
367 
368 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
369 	if (cmp) {
370 		return cmp;
371 	}
372 
373 	return 0;
374 }
375 
376 static struct spdk_nvme_ctrlr *
377 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
378 		bool ana_reporting, bool multi_ctrlr)
379 {
380 	struct spdk_nvme_ctrlr *ctrlr;
381 	uint32_t i;
382 
383 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
384 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
385 			/* There is a ctrlr whose trid matches. */
386 			return NULL;
387 		}
388 	}
389 
390 	ctrlr = calloc(1, sizeof(*ctrlr));
391 	if (ctrlr == NULL) {
392 		return NULL;
393 	}
394 
395 	ctrlr->attached = true;
396 	ctrlr->adminq.ctrlr = ctrlr;
397 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
398 
399 	if (num_ns != 0) {
400 		ctrlr->num_ns = num_ns;
401 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
402 		if (ctrlr->ns == NULL) {
403 			free(ctrlr);
404 			return NULL;
405 		}
406 
407 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
408 		if (ctrlr->nsdata == NULL) {
409 			free(ctrlr->ns);
410 			free(ctrlr);
411 			return NULL;
412 		}
413 
414 		for (i = 0; i < num_ns; i++) {
415 			ctrlr->ns[i].id = i + 1;
416 			ctrlr->ns[i].ctrlr = ctrlr;
417 			ctrlr->ns[i].is_active = true;
418 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
419 			ctrlr->nsdata[i].nsze = 1024;
420 		}
421 
422 		ctrlr->cdata.nn = num_ns;
423 		ctrlr->cdata.nanagrpid = num_ns;
424 	}
425 
426 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
427 	ctrlr->cdata.cmic.multi_ctrlr = multi_ctrlr;
428 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
429 	ctrlr->trid = *trid;
430 	TAILQ_INIT(&ctrlr->active_io_qpairs);
431 
432 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
433 
434 	return ctrlr;
435 }
436 
437 static void
438 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
439 {
440 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
441 
442 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
443 	free(ctrlr->nsdata);
444 	free(ctrlr->ns);
445 	free(ctrlr);
446 }
447 
448 static int
449 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
450 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
451 {
452 	struct ut_nvme_req *req;
453 
454 	req = calloc(1, sizeof(*req));
455 	if (req == NULL) {
456 		return -ENOMEM;
457 	}
458 
459 	req->opc = opc;
460 	req->cb_fn = cb_fn;
461 	req->cb_arg = cb_arg;
462 
463 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
464 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
465 
466 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
467 	qpair->num_outstanding_reqs++;
468 
469 	return 0;
470 }
471 
472 static struct ut_nvme_req *
473 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
474 {
475 	struct ut_nvme_req *req;
476 
477 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
478 		if (req->cb_arg == cb_arg) {
479 			break;
480 		}
481 	}
482 
483 	return req;
484 }
485 
486 static struct spdk_bdev_io *
487 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
488 		 struct spdk_io_channel *ch)
489 {
490 	struct spdk_bdev_io *bdev_io;
491 
492 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
493 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
494 	bdev_io->type = type;
495 	bdev_io->bdev = &nbdev->disk;
496 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
497 
498 	return bdev_io;
499 }
500 
501 static void
502 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
503 {
504 	bdev_io->u.bdev.iovs = &bdev_io->iov;
505 	bdev_io->u.bdev.iovcnt = 1;
506 
507 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
508 	bdev_io->iov.iov_len = 4096;
509 }
510 
511 static void
512 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
513 {
514 	if (ctrlr->is_failed) {
515 		free(ctrlr);
516 		return;
517 	}
518 
519 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
520 	if (probe_ctx->cb_ctx) {
521 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
522 	}
523 
524 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
525 
526 	if (probe_ctx->attach_cb) {
527 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
528 	}
529 }
530 
531 int
532 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
533 {
534 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
535 
536 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
537 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
538 			continue;
539 		}
540 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
541 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
542 	}
543 
544 	free(probe_ctx);
545 
546 	return 0;
547 }
548 
549 struct spdk_nvme_probe_ctx *
550 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
551 			const struct spdk_nvme_ctrlr_opts *opts,
552 			spdk_nvme_attach_cb attach_cb)
553 {
554 	struct spdk_nvme_probe_ctx *probe_ctx;
555 
556 	if (trid == NULL) {
557 		return NULL;
558 	}
559 
560 	probe_ctx = calloc(1, sizeof(*probe_ctx));
561 	if (probe_ctx == NULL) {
562 		return NULL;
563 	}
564 
565 	probe_ctx->trid = *trid;
566 	probe_ctx->cb_ctx = (void *)opts;
567 	probe_ctx->attach_cb = attach_cb;
568 
569 	return probe_ctx;
570 }
571 
572 int
573 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
574 {
575 	if (ctrlr->attached) {
576 		ut_detach_ctrlr(ctrlr);
577 	}
578 
579 	return 0;
580 }
581 
582 int
583 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
584 {
585 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
586 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
587 
588 	return 0;
589 }
590 
591 int
592 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
593 {
594 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
595 }
596 
597 void
598 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
599 {
600 	memset(opts, 0, opts_size);
601 
602 	snprintf(opts->hostnqn, sizeof(opts->hostnqn),
603 		 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003");
604 }
605 
606 const struct spdk_nvme_ctrlr_data *
607 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
608 {
609 	return &ctrlr->cdata;
610 }
611 
612 uint32_t
613 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
614 {
615 	return ctrlr->num_ns;
616 }
617 
618 struct spdk_nvme_ns *
619 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
620 {
621 	if (nsid < 1 || nsid > ctrlr->num_ns) {
622 		return NULL;
623 	}
624 
625 	return &ctrlr->ns[nsid - 1];
626 }
627 
628 bool
629 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
630 {
631 	if (nsid < 1 || nsid > ctrlr->num_ns) {
632 		return false;
633 	}
634 
635 	return ctrlr->ns[nsid - 1].is_active;
636 }
637 
638 union spdk_nvme_csts_register
639 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
640 {
641 	union spdk_nvme_csts_register csts;
642 
643 	csts.raw = 0;
644 
645 	return csts;
646 }
647 
648 union spdk_nvme_vs_register
649 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
650 {
651 	union spdk_nvme_vs_register vs;
652 
653 	vs.raw = 0;
654 
655 	return vs;
656 }
657 
658 struct spdk_nvme_qpair *
659 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
660 			       const struct spdk_nvme_io_qpair_opts *user_opts,
661 			       size_t opts_size)
662 {
663 	struct spdk_nvme_qpair *qpair;
664 
665 	qpair = calloc(1, sizeof(*qpair));
666 	if (qpair == NULL) {
667 		return NULL;
668 	}
669 
670 	qpair->ctrlr = ctrlr;
671 	TAILQ_INIT(&qpair->outstanding_reqs);
672 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
673 
674 	return qpair;
675 }
676 
677 int
678 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
679 				 struct spdk_nvme_qpair *qpair)
680 {
681 	if (qpair->is_connected) {
682 		return -EISCONN;
683 	}
684 
685 	qpair->is_connected = true;
686 
687 	return 0;
688 }
689 
690 int
691 spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
692 {
693 	struct spdk_nvme_ctrlr *ctrlr;
694 
695 	ctrlr = qpair->ctrlr;
696 
697 	if (ctrlr->is_failed) {
698 		return -ENXIO;
699 	}
700 	qpair->is_connected = true;
701 
702 	return 0;
703 }
704 
705 void
706 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
707 {
708 	qpair->is_connected = false;
709 }
710 
711 int
712 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
713 {
714 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
715 
716 	qpair->is_connected = false;
717 
718 	if (qpair->poll_group != NULL) {
719 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
720 	}
721 
722 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
723 
724 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
725 
726 	free(qpair);
727 
728 	return 0;
729 }
730 
731 int
732 spdk_nvme_ctrlr_reset_poll_async(struct spdk_nvme_ctrlr_reset_ctx *ctrlr_reset_ctx)
733 {
734 	struct spdk_nvme_ctrlr *ctrlr = ctrlr_reset_ctx->ctrlr;
735 
736 	free(ctrlr_reset_ctx);
737 
738 	if (ctrlr->fail_reset) {
739 		ctrlr->is_failed = true;
740 		return -EIO;
741 	}
742 
743 	return 0;
744 }
745 
746 int
747 spdk_nvme_ctrlr_reset_async(struct spdk_nvme_ctrlr *ctrlr,
748 			    struct spdk_nvme_ctrlr_reset_ctx **reset_ctx)
749 {
750 	struct spdk_nvme_ctrlr_reset_ctx *ctrlr_reset_ctx;
751 
752 	ctrlr_reset_ctx = calloc(1, sizeof(*ctrlr_reset_ctx));
753 	if (!ctrlr_reset_ctx) {
754 		return -ENOMEM;
755 	}
756 
757 	ctrlr->is_failed = false;
758 
759 	ctrlr_reset_ctx->ctrlr = ctrlr;
760 	*reset_ctx = ctrlr_reset_ctx;
761 
762 	return 0;
763 }
764 
765 void
766 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
767 {
768 	ctrlr->is_failed = true;
769 }
770 
771 bool
772 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
773 {
774 	return ctrlr->is_failed;
775 }
776 
777 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
778 				 sizeof(uint32_t))
779 static void
780 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
781 {
782 	struct spdk_nvme_ana_page ana_hdr;
783 	char _ana_desc[UT_ANA_DESC_SIZE];
784 	struct spdk_nvme_ana_group_descriptor *ana_desc;
785 	struct spdk_nvme_ns *ns;
786 	uint32_t i;
787 
788 	memset(&ana_hdr, 0, sizeof(ana_hdr));
789 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
790 
791 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
792 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
793 
794 	buf += sizeof(ana_hdr);
795 	length -= sizeof(ana_hdr);
796 
797 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
798 
799 	for (i = 0; i < ctrlr->num_ns; i++) {
800 		ns = &ctrlr->ns[i];
801 
802 		if (!ns->is_active) {
803 			continue;
804 		}
805 
806 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
807 
808 		ana_desc->ana_group_id = ns->id;
809 		ana_desc->num_of_nsid = 1;
810 		ana_desc->ana_state = ns->ana_state;
811 		ana_desc->nsid[0] = ns->id;
812 
813 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
814 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
815 
816 		buf += UT_ANA_DESC_SIZE;
817 		length -= UT_ANA_DESC_SIZE;
818 	}
819 }
820 
821 int
822 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
823 				 uint8_t log_page, uint32_t nsid,
824 				 void *payload, uint32_t payload_size,
825 				 uint64_t offset,
826 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
827 {
828 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
829 		SPDK_CU_ASSERT_FATAL(offset == 0);
830 		ut_create_ana_log_page(ctrlr, payload, payload_size);
831 	}
832 
833 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
834 				      cb_fn, cb_arg);
835 }
836 
837 int
838 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
839 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
840 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
841 {
842 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
843 }
844 
845 int
846 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
847 			      void *cmd_cb_arg,
848 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
849 {
850 	struct ut_nvme_req *req = NULL, *abort_req;
851 
852 	if (qpair == NULL) {
853 		qpair = &ctrlr->adminq;
854 	}
855 
856 	abort_req = calloc(1, sizeof(*abort_req));
857 	if (abort_req == NULL) {
858 		return -ENOMEM;
859 	}
860 
861 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
862 		if (req->cb_arg == cmd_cb_arg) {
863 			break;
864 		}
865 	}
866 
867 	if (req == NULL) {
868 		free(abort_req);
869 		return -ENOENT;
870 	}
871 
872 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
873 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
874 
875 	abort_req->opc = SPDK_NVME_OPC_ABORT;
876 	abort_req->cb_fn = cb_fn;
877 	abort_req->cb_arg = cb_arg;
878 
879 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
880 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
881 	abort_req->cpl.cdw0 = 0;
882 
883 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
884 	ctrlr->adminq.num_outstanding_reqs++;
885 
886 	return 0;
887 }
888 
889 int32_t
890 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
891 {
892 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
893 }
894 
895 uint32_t
896 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
897 {
898 	return ns->id;
899 }
900 
901 struct spdk_nvme_ctrlr *
902 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
903 {
904 	return ns->ctrlr;
905 }
906 
907 static inline struct spdk_nvme_ns_data *
908 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
909 {
910 	return &ns->ctrlr->nsdata[ns->id - 1];
911 }
912 
913 const struct spdk_nvme_ns_data *
914 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
915 {
916 	return _nvme_ns_get_data(ns);
917 }
918 
919 uint64_t
920 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
921 {
922 	return _nvme_ns_get_data(ns)->nsze;
923 }
924 
925 const struct spdk_uuid *
926 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
927 {
928 	return &ns->uuid;
929 }
930 
931 int
932 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
933 			      void *metadata, uint64_t lba, uint32_t lba_count,
934 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
935 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
936 {
937 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
938 }
939 
940 int
941 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
942 			       void *buffer, void *metadata, uint64_t lba,
943 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
944 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
945 {
946 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
947 }
948 
949 int
950 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
951 			       uint64_t lba, uint32_t lba_count,
952 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
953 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
954 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
955 			       uint16_t apptag_mask, uint16_t apptag)
956 {
957 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
958 }
959 
960 int
961 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
962 				uint64_t lba, uint32_t lba_count,
963 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
964 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
965 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
966 				uint16_t apptag_mask, uint16_t apptag)
967 {
968 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
969 }
970 
971 static bool g_ut_readv_ext_called;
972 int
973 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
974 			   uint64_t lba, uint32_t lba_count,
975 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
976 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
977 			   spdk_nvme_req_next_sge_cb next_sge_fn,
978 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
979 {
980 	g_ut_readv_ext_called = true;
981 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
982 }
983 
984 static bool g_ut_writev_ext_called;
985 int
986 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
987 			    uint64_t lba, uint32_t lba_count,
988 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
989 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
990 			    spdk_nvme_req_next_sge_cb next_sge_fn,
991 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
992 {
993 	g_ut_writev_ext_called = true;
994 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
995 }
996 
997 int
998 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
999 				  uint64_t lba, uint32_t lba_count,
1000 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1001 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1002 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1003 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1004 {
1005 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1006 }
1007 
1008 int
1009 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1010 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1011 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1012 {
1013 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1014 }
1015 
1016 int
1017 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1018 			      uint64_t lba, uint32_t lba_count,
1019 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1020 			      uint32_t io_flags)
1021 {
1022 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1023 }
1024 
1025 struct spdk_nvme_poll_group *
1026 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1027 {
1028 	struct spdk_nvme_poll_group *group;
1029 
1030 	group = calloc(1, sizeof(*group));
1031 	if (group == NULL) {
1032 		return NULL;
1033 	}
1034 
1035 	group->ctx = ctx;
1036 	if (table != NULL) {
1037 		group->accel_fn_table = *table;
1038 	}
1039 	TAILQ_INIT(&group->qpairs);
1040 
1041 	return group;
1042 }
1043 
1044 int
1045 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1046 {
1047 	if (!TAILQ_EMPTY(&group->qpairs)) {
1048 		return -EBUSY;
1049 	}
1050 
1051 	free(group);
1052 
1053 	return 0;
1054 }
1055 
1056 int32_t
1057 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1058 				    uint32_t max_completions)
1059 {
1060 	struct ut_nvme_req *req, *tmp;
1061 	uint32_t num_completions = 0;
1062 
1063 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1064 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1065 		qpair->num_outstanding_reqs--;
1066 
1067 		req->cb_fn(req->cb_arg, &req->cpl);
1068 
1069 		free(req);
1070 		num_completions++;
1071 	}
1072 
1073 	return num_completions;
1074 }
1075 
1076 int64_t
1077 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1078 		uint32_t completions_per_qpair,
1079 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1080 {
1081 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1082 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1083 
1084 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1085 
1086 	if (disconnected_qpair_cb == NULL) {
1087 		return -EINVAL;
1088 	}
1089 
1090 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
1091 		if (qpair->is_connected) {
1092 			local_completions = spdk_nvme_qpair_process_completions(qpair,
1093 					    completions_per_qpair);
1094 			if (local_completions < 0 && error_reason == 0) {
1095 				error_reason = local_completions;
1096 			} else {
1097 				num_completions += local_completions;
1098 				assert(num_completions >= 0);
1099 			}
1100 		}
1101 	}
1102 
1103 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
1104 		if (!qpair->is_connected) {
1105 			disconnected_qpair_cb(qpair, group->ctx);
1106 		}
1107 	}
1108 
1109 	return error_reason ? error_reason : num_completions;
1110 }
1111 
1112 int
1113 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1114 			 struct spdk_nvme_qpair *qpair)
1115 {
1116 	CU_ASSERT(!qpair->is_connected);
1117 
1118 	qpair->poll_group = group;
1119 	TAILQ_INSERT_TAIL(&group->qpairs, qpair, poll_group_tailq);
1120 
1121 	return 0;
1122 }
1123 
1124 int
1125 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1126 			    struct spdk_nvme_qpair *qpair)
1127 {
1128 	CU_ASSERT(!qpair->is_connected);
1129 
1130 	TAILQ_REMOVE(&group->qpairs, qpair, poll_group_tailq);
1131 
1132 	return 0;
1133 }
1134 
1135 int
1136 spdk_bdev_register(struct spdk_bdev *bdev)
1137 {
1138 	return g_ut_register_bdev_status;
1139 }
1140 
1141 void
1142 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1143 {
1144 	int rc;
1145 
1146 	rc = bdev->fn_table->destruct(bdev->ctxt);
1147 	if (rc <= 0 && cb_fn != NULL) {
1148 		cb_fn(cb_arg, rc);
1149 	}
1150 }
1151 
1152 int
1153 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1154 {
1155 	bdev->blockcnt = size;
1156 
1157 	return 0;
1158 }
1159 
1160 struct spdk_io_channel *
1161 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1162 {
1163 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1164 }
1165 
1166 void
1167 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1168 {
1169 	bdev_io->internal.status = status;
1170 	bdev_io->internal.in_submit_request = false;
1171 }
1172 
1173 void
1174 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1175 {
1176 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1177 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1178 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1179 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1180 	} else {
1181 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1182 	}
1183 
1184 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1185 	bdev_io->internal.error.nvme.sct = sct;
1186 	bdev_io->internal.error.nvme.sc = sc;
1187 
1188 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1189 }
1190 
1191 void
1192 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1193 {
1194 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1195 
1196 	ut_bdev_io_set_buf(bdev_io);
1197 
1198 	cb(ch, bdev_io, true);
1199 }
1200 
1201 static void
1202 test_create_ctrlr(void)
1203 {
1204 	struct spdk_nvme_transport_id trid = {};
1205 	struct spdk_nvme_ctrlr ctrlr = {};
1206 	int rc;
1207 
1208 	ut_init_trid(&trid);
1209 
1210 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1211 	CU_ASSERT(rc == 0);
1212 
1213 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1214 
1215 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1216 	CU_ASSERT(rc == 0);
1217 
1218 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1219 
1220 	poll_threads();
1221 	spdk_delay_us(1000);
1222 	poll_threads();
1223 
1224 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1225 }
1226 
1227 static void
1228 test_reset_ctrlr(void)
1229 {
1230 	struct spdk_nvme_transport_id trid = {};
1231 	struct spdk_nvme_ctrlr ctrlr = {};
1232 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1233 	struct nvme_path_id *curr_trid;
1234 	struct spdk_io_channel *ch1, *ch2;
1235 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1236 	int rc;
1237 
1238 	ut_init_trid(&trid);
1239 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1240 
1241 	set_thread(0);
1242 
1243 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1244 	CU_ASSERT(rc == 0);
1245 
1246 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1247 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1248 
1249 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1250 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1251 
1252 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1253 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1254 
1255 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1256 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1257 
1258 	set_thread(1);
1259 
1260 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1261 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1262 
1263 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1264 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1265 
1266 	/* Reset starts from thread 1. */
1267 	set_thread(1);
1268 
1269 	/* Case 1: ctrlr is already being destructed. */
1270 	nvme_ctrlr->destruct = true;
1271 
1272 	rc = bdev_nvme_reset(nvme_ctrlr);
1273 	CU_ASSERT(rc == -ENXIO);
1274 
1275 	/* Case 2: reset is in progress. */
1276 	nvme_ctrlr->destruct = false;
1277 	nvme_ctrlr->resetting = true;
1278 
1279 	rc = bdev_nvme_reset(nvme_ctrlr);
1280 	CU_ASSERT(rc == -EBUSY);
1281 
1282 	/* Case 3: reset completes successfully. */
1283 	nvme_ctrlr->resetting = false;
1284 	curr_trid->is_failed = true;
1285 	ctrlr.is_failed = true;
1286 
1287 	rc = bdev_nvme_reset(nvme_ctrlr);
1288 	CU_ASSERT(rc == 0);
1289 	CU_ASSERT(nvme_ctrlr->resetting == true);
1290 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1291 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1292 
1293 	poll_thread_times(0, 1);
1294 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1295 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1296 
1297 	poll_thread_times(1, 1);
1298 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1299 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1300 	CU_ASSERT(ctrlr.is_failed == true);
1301 
1302 	poll_thread_times(1, 1);
1303 	CU_ASSERT(ctrlr.is_failed == false);
1304 
1305 	poll_thread_times(0, 1);
1306 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1307 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1308 
1309 	poll_thread_times(1, 1);
1310 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1311 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1312 	CU_ASSERT(nvme_ctrlr->resetting == true);
1313 	CU_ASSERT(curr_trid->is_failed == true);
1314 
1315 	poll_thread_times(1, 1);
1316 	CU_ASSERT(nvme_ctrlr->resetting == true);
1317 	poll_thread_times(0, 1);
1318 	CU_ASSERT(nvme_ctrlr->resetting == true);
1319 	poll_thread_times(1, 1);
1320 	CU_ASSERT(nvme_ctrlr->resetting == true);
1321 	poll_thread_times(1, 1);
1322 	CU_ASSERT(nvme_ctrlr->resetting == false);
1323 	CU_ASSERT(curr_trid->is_failed == false);
1324 
1325 	spdk_put_io_channel(ch2);
1326 
1327 	set_thread(0);
1328 
1329 	spdk_put_io_channel(ch1);
1330 
1331 	poll_threads();
1332 
1333 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1334 	CU_ASSERT(rc == 0);
1335 
1336 	poll_threads();
1337 	spdk_delay_us(1000);
1338 	poll_threads();
1339 
1340 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1341 }
1342 
1343 static void
1344 test_race_between_reset_and_destruct_ctrlr(void)
1345 {
1346 	struct spdk_nvme_transport_id trid = {};
1347 	struct spdk_nvme_ctrlr ctrlr = {};
1348 	struct nvme_ctrlr *nvme_ctrlr;
1349 	struct spdk_io_channel *ch1, *ch2;
1350 	int rc;
1351 
1352 	ut_init_trid(&trid);
1353 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1354 
1355 	set_thread(0);
1356 
1357 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1358 	CU_ASSERT(rc == 0);
1359 
1360 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1361 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1362 
1363 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1364 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1365 
1366 	set_thread(1);
1367 
1368 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1369 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1370 
1371 	/* Reset starts from thread 1. */
1372 	set_thread(1);
1373 
1374 	rc = bdev_nvme_reset(nvme_ctrlr);
1375 	CU_ASSERT(rc == 0);
1376 	CU_ASSERT(nvme_ctrlr->resetting == true);
1377 
1378 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1379 	set_thread(0);
1380 
1381 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1382 	CU_ASSERT(rc == 0);
1383 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1384 	CU_ASSERT(nvme_ctrlr->destruct == true);
1385 	CU_ASSERT(nvme_ctrlr->resetting == true);
1386 
1387 	poll_threads();
1388 
1389 	/* Reset completed but ctrlr is not still destructed yet. */
1390 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1391 	CU_ASSERT(nvme_ctrlr->destruct == true);
1392 	CU_ASSERT(nvme_ctrlr->resetting == false);
1393 
1394 	/* New reset request is rejected. */
1395 	rc = bdev_nvme_reset(nvme_ctrlr);
1396 	CU_ASSERT(rc == -ENXIO);
1397 
1398 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1399 	 * However there are two channels and destruct is not completed yet.
1400 	 */
1401 	poll_threads();
1402 
1403 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1404 
1405 	set_thread(0);
1406 
1407 	spdk_put_io_channel(ch1);
1408 
1409 	set_thread(1);
1410 
1411 	spdk_put_io_channel(ch2);
1412 
1413 	poll_threads();
1414 	spdk_delay_us(1000);
1415 	poll_threads();
1416 
1417 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1418 }
1419 
1420 static void
1421 test_failover_ctrlr(void)
1422 {
1423 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1424 	struct spdk_nvme_ctrlr ctrlr = {};
1425 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1426 	struct nvme_path_id *curr_trid, *next_trid;
1427 	struct spdk_io_channel *ch1, *ch2;
1428 	int rc;
1429 
1430 	ut_init_trid(&trid1);
1431 	ut_init_trid2(&trid2);
1432 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1433 
1434 	set_thread(0);
1435 
1436 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
1437 	CU_ASSERT(rc == 0);
1438 
1439 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1440 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1441 
1442 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1443 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1444 
1445 	set_thread(1);
1446 
1447 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1448 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1449 
1450 	/* First, test one trid case. */
1451 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1452 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1453 
1454 	/* Failover starts from thread 1. */
1455 	set_thread(1);
1456 
1457 	/* Case 1: ctrlr is already being destructed. */
1458 	nvme_ctrlr->destruct = true;
1459 
1460 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1461 	CU_ASSERT(rc == -ENXIO);
1462 	CU_ASSERT(curr_trid->is_failed == false);
1463 
1464 	/* Case 2: reset is in progress. */
1465 	nvme_ctrlr->destruct = false;
1466 	nvme_ctrlr->resetting = true;
1467 
1468 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1469 	CU_ASSERT(rc == 0);
1470 
1471 	/* Case 3: failover is in progress. */
1472 	nvme_ctrlr->failover_in_progress = true;
1473 
1474 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1475 	CU_ASSERT(rc == 0);
1476 	CU_ASSERT(curr_trid->is_failed == false);
1477 
1478 	/* Case 4: reset completes successfully. */
1479 	nvme_ctrlr->resetting = false;
1480 	nvme_ctrlr->failover_in_progress = false;
1481 
1482 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1483 	CU_ASSERT(rc == 0);
1484 
1485 	CU_ASSERT(nvme_ctrlr->resetting == true);
1486 	CU_ASSERT(curr_trid->is_failed == true);
1487 
1488 	poll_threads();
1489 
1490 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1491 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1492 
1493 	CU_ASSERT(nvme_ctrlr->resetting == false);
1494 	CU_ASSERT(curr_trid->is_failed == false);
1495 
1496 	set_thread(0);
1497 
1498 	/* Second, test two trids case. */
1499 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1500 	CU_ASSERT(rc == 0);
1501 
1502 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1503 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1504 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1505 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1506 
1507 	/* Failover starts from thread 1. */
1508 	set_thread(1);
1509 
1510 	/* Case 5: reset is in progress. */
1511 	nvme_ctrlr->resetting = true;
1512 
1513 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1514 	CU_ASSERT(rc == -EBUSY);
1515 
1516 	/* Case 5: failover is in progress. */
1517 	nvme_ctrlr->failover_in_progress = true;
1518 
1519 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1520 	CU_ASSERT(rc == 0);
1521 
1522 	/* Case 6: failover completes successfully. */
1523 	nvme_ctrlr->resetting = false;
1524 	nvme_ctrlr->failover_in_progress = false;
1525 
1526 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1527 	CU_ASSERT(rc == 0);
1528 
1529 	CU_ASSERT(nvme_ctrlr->resetting == true);
1530 	CU_ASSERT(nvme_ctrlr->failover_in_progress == true);
1531 
1532 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1533 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1534 	CU_ASSERT(next_trid != curr_trid);
1535 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1536 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1537 
1538 	poll_threads();
1539 
1540 	CU_ASSERT(nvme_ctrlr->resetting == false);
1541 	CU_ASSERT(nvme_ctrlr->failover_in_progress == false);
1542 
1543 	spdk_put_io_channel(ch2);
1544 
1545 	set_thread(0);
1546 
1547 	spdk_put_io_channel(ch1);
1548 
1549 	poll_threads();
1550 
1551 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1552 	CU_ASSERT(rc == 0);
1553 
1554 	poll_threads();
1555 	spdk_delay_us(1000);
1556 	poll_threads();
1557 
1558 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1559 }
1560 
1561 static void
1562 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1563 {
1564 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1565 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1566 }
1567 
1568 static void
1569 test_pending_reset(void)
1570 {
1571 	struct spdk_nvme_transport_id trid = {};
1572 	struct spdk_nvme_ctrlr *ctrlr;
1573 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1574 	const int STRING_SIZE = 32;
1575 	const char *attached_names[STRING_SIZE];
1576 	struct nvme_bdev *bdev;
1577 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1578 	struct spdk_io_channel *ch1, *ch2;
1579 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1580 	struct nvme_io_path *io_path1, *io_path2;
1581 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1582 	int rc;
1583 
1584 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1585 	ut_init_trid(&trid);
1586 
1587 	set_thread(0);
1588 
1589 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1590 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1591 
1592 	g_ut_attach_ctrlr_status = 0;
1593 	g_ut_attach_bdev_count = 1;
1594 
1595 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1596 			      attach_ctrlr_done, NULL, NULL, false);
1597 	CU_ASSERT(rc == 0);
1598 
1599 	spdk_delay_us(1000);
1600 	poll_threads();
1601 
1602 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1603 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1604 
1605 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1606 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1607 
1608 	ch1 = spdk_get_io_channel(bdev);
1609 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1610 
1611 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1612 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1613 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1614 	ctrlr_ch1 = io_path1->ctrlr_ch;
1615 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1616 
1617 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1618 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1619 
1620 	set_thread(1);
1621 
1622 	ch2 = spdk_get_io_channel(bdev);
1623 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1624 
1625 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1626 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1627 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1628 	ctrlr_ch2 = io_path2->ctrlr_ch;
1629 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1630 
1631 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1632 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1633 
1634 	/* The first reset request is submitted on thread 1, and the second reset request
1635 	 * is submitted on thread 0 while processing the first request.
1636 	 */
1637 	bdev_nvme_submit_request(ch2, first_bdev_io);
1638 	CU_ASSERT(nvme_ctrlr->resetting == true);
1639 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1640 
1641 	set_thread(0);
1642 
1643 	bdev_nvme_submit_request(ch1, second_bdev_io);
1644 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1645 
1646 	poll_threads();
1647 
1648 	CU_ASSERT(nvme_ctrlr->resetting == false);
1649 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1650 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1651 
1652 	/* The first reset request is submitted on thread 1, and the second reset request
1653 	 * is submitted on thread 0 while processing the first request.
1654 	 *
1655 	 * The difference from the above scenario is that the controller is removed while
1656 	 * processing the first request. Hence both reset requests should fail.
1657 	 */
1658 	set_thread(1);
1659 
1660 	bdev_nvme_submit_request(ch2, first_bdev_io);
1661 	CU_ASSERT(nvme_ctrlr->resetting == true);
1662 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1663 
1664 	set_thread(0);
1665 
1666 	bdev_nvme_submit_request(ch1, second_bdev_io);
1667 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1668 
1669 	ctrlr->fail_reset = true;
1670 
1671 	poll_threads();
1672 
1673 	CU_ASSERT(nvme_ctrlr->resetting == false);
1674 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1675 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1676 
1677 	spdk_put_io_channel(ch1);
1678 
1679 	set_thread(1);
1680 
1681 	spdk_put_io_channel(ch2);
1682 
1683 	poll_threads();
1684 
1685 	set_thread(0);
1686 
1687 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1688 	CU_ASSERT(rc == 0);
1689 
1690 	poll_threads();
1691 	spdk_delay_us(1000);
1692 	poll_threads();
1693 
1694 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1695 
1696 	free(first_bdev_io);
1697 	free(second_bdev_io);
1698 }
1699 
1700 static void
1701 test_attach_ctrlr(void)
1702 {
1703 	struct spdk_nvme_transport_id trid = {};
1704 	struct spdk_nvme_ctrlr *ctrlr;
1705 	struct nvme_ctrlr *nvme_ctrlr;
1706 	const int STRING_SIZE = 32;
1707 	const char *attached_names[STRING_SIZE];
1708 	struct nvme_bdev *nbdev;
1709 	int rc;
1710 
1711 	set_thread(0);
1712 
1713 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1714 	ut_init_trid(&trid);
1715 
1716 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
1717 	 * by probe polling.
1718 	 */
1719 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1720 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1721 
1722 	ctrlr->is_failed = true;
1723 	g_ut_attach_ctrlr_status = -EIO;
1724 	g_ut_attach_bdev_count = 0;
1725 
1726 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1727 			      attach_ctrlr_done, NULL, NULL, false);
1728 	CU_ASSERT(rc == 0);
1729 
1730 	spdk_delay_us(1000);
1731 	poll_threads();
1732 
1733 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1734 
1735 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
1736 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1737 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1738 
1739 	g_ut_attach_ctrlr_status = 0;
1740 
1741 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1742 			      attach_ctrlr_done, NULL, NULL, false);
1743 	CU_ASSERT(rc == 0);
1744 
1745 	spdk_delay_us(1000);
1746 	poll_threads();
1747 
1748 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1749 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1750 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1751 
1752 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1753 	CU_ASSERT(rc == 0);
1754 
1755 	poll_threads();
1756 	spdk_delay_us(1000);
1757 	poll_threads();
1758 
1759 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1760 
1761 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
1762 	 * one nvme_bdev is created.
1763 	 */
1764 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1765 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1766 
1767 	g_ut_attach_bdev_count = 1;
1768 
1769 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1770 			      attach_ctrlr_done, NULL, NULL, false);
1771 	CU_ASSERT(rc == 0);
1772 
1773 	spdk_delay_us(1000);
1774 	poll_threads();
1775 
1776 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1777 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1778 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1779 
1780 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
1781 	attached_names[0] = NULL;
1782 
1783 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1784 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
1785 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
1786 
1787 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1788 	CU_ASSERT(rc == 0);
1789 
1790 	poll_threads();
1791 	spdk_delay_us(1000);
1792 	poll_threads();
1793 
1794 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1795 
1796 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
1797 	 * created because creating one nvme_bdev failed.
1798 	 */
1799 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1800 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1801 
1802 	g_ut_register_bdev_status = -EINVAL;
1803 	g_ut_attach_bdev_count = 0;
1804 
1805 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1806 			      attach_ctrlr_done, NULL, NULL, false);
1807 	CU_ASSERT(rc == 0);
1808 
1809 	spdk_delay_us(1000);
1810 	poll_threads();
1811 
1812 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1813 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1814 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1815 
1816 	CU_ASSERT(attached_names[0] == NULL);
1817 
1818 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1819 	CU_ASSERT(rc == 0);
1820 
1821 	poll_threads();
1822 	spdk_delay_us(1000);
1823 	poll_threads();
1824 
1825 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1826 
1827 	g_ut_register_bdev_status = 0;
1828 }
1829 
1830 static void
1831 test_aer_cb(void)
1832 {
1833 	struct spdk_nvme_transport_id trid = {};
1834 	struct spdk_nvme_ctrlr *ctrlr;
1835 	struct nvme_ctrlr *nvme_ctrlr;
1836 	struct nvme_bdev *bdev;
1837 	const int STRING_SIZE = 32;
1838 	const char *attached_names[STRING_SIZE];
1839 	union spdk_nvme_async_event_completion event = {};
1840 	struct spdk_nvme_cpl cpl = {};
1841 	int rc;
1842 
1843 	set_thread(0);
1844 
1845 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1846 	ut_init_trid(&trid);
1847 
1848 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
1849 	 * namespaces are populated.
1850 	 */
1851 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
1852 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1853 
1854 	ctrlr->ns[0].is_active = false;
1855 
1856 	g_ut_attach_ctrlr_status = 0;
1857 	g_ut_attach_bdev_count = 3;
1858 
1859 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1860 			      attach_ctrlr_done, NULL, NULL, false);
1861 	CU_ASSERT(rc == 0);
1862 
1863 	spdk_delay_us(1000);
1864 	poll_threads();
1865 
1866 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1867 	poll_threads();
1868 
1869 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1870 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1871 
1872 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
1873 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
1874 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
1875 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
1876 
1877 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
1878 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1879 	CU_ASSERT(bdev->disk.blockcnt == 1024);
1880 
1881 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
1882 	 * change the size of the 4th namespace.
1883 	 */
1884 	ctrlr->ns[0].is_active = true;
1885 	ctrlr->ns[2].is_active = false;
1886 	ctrlr->nsdata[3].nsze = 2048;
1887 
1888 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1889 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
1890 	cpl.cdw0 = event.raw;
1891 
1892 	aer_cb(nvme_ctrlr, &cpl);
1893 
1894 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
1895 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
1896 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
1897 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
1898 	CU_ASSERT(bdev->disk.blockcnt == 2048);
1899 
1900 	/* Change ANA state of active namespaces. */
1901 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
1902 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
1903 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
1904 
1905 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1906 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
1907 	cpl.cdw0 = event.raw;
1908 
1909 	aer_cb(nvme_ctrlr, &cpl);
1910 
1911 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1912 	poll_threads();
1913 
1914 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
1915 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
1916 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
1917 
1918 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1919 	CU_ASSERT(rc == 0);
1920 
1921 	poll_threads();
1922 	spdk_delay_us(1000);
1923 	poll_threads();
1924 
1925 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1926 }
1927 
1928 static void
1929 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1930 			enum spdk_bdev_io_type io_type)
1931 {
1932 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1933 	struct nvme_io_path *io_path;
1934 	struct spdk_nvme_qpair *qpair;
1935 
1936 	io_path = bdev_nvme_find_io_path(nbdev_ch);
1937 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
1938 	qpair = io_path->ctrlr_ch->qpair;
1939 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
1940 
1941 	bdev_io->type = io_type;
1942 	bdev_io->internal.in_submit_request = true;
1943 
1944 	bdev_nvme_submit_request(ch, bdev_io);
1945 
1946 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1947 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
1948 
1949 	poll_threads();
1950 
1951 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1952 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1953 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1954 }
1955 
1956 static void
1957 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1958 		   enum spdk_bdev_io_type io_type)
1959 {
1960 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1961 	struct nvme_io_path *io_path;
1962 	struct spdk_nvme_qpair *qpair;
1963 
1964 	io_path = bdev_nvme_find_io_path(nbdev_ch);
1965 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
1966 	qpair = io_path->ctrlr_ch->qpair;
1967 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
1968 
1969 	bdev_io->type = io_type;
1970 	bdev_io->internal.in_submit_request = true;
1971 
1972 	bdev_nvme_submit_request(ch, bdev_io);
1973 
1974 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1975 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1976 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1977 }
1978 
1979 static void
1980 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
1981 {
1982 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1983 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
1984 	struct ut_nvme_req *req;
1985 	struct nvme_io_path *io_path;
1986 	struct spdk_nvme_qpair *qpair;
1987 
1988 	io_path = bdev_nvme_find_io_path(nbdev_ch);
1989 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
1990 	qpair = io_path->ctrlr_ch->qpair;
1991 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
1992 
1993 	/* Only compare and write now. */
1994 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
1995 	bdev_io->internal.in_submit_request = true;
1996 
1997 	bdev_nvme_submit_request(ch, bdev_io);
1998 
1999 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2000 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2001 	CU_ASSERT(bio->first_fused_submitted == true);
2002 
2003 	/* First outstanding request is compare operation. */
2004 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2005 	SPDK_CU_ASSERT_FATAL(req != NULL);
2006 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2007 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2008 
2009 	poll_threads();
2010 
2011 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2012 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2013 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2014 }
2015 
2016 static void
2017 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2018 			 struct spdk_nvme_ctrlr *ctrlr)
2019 {
2020 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2021 	bdev_io->internal.in_submit_request = true;
2022 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2023 
2024 	bdev_nvme_submit_request(ch, bdev_io);
2025 
2026 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2027 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2028 
2029 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2030 	poll_thread_times(1, 1);
2031 
2032 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2033 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2034 
2035 	poll_thread_times(0, 1);
2036 
2037 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2038 }
2039 
2040 static void
2041 test_submit_nvme_cmd(void)
2042 {
2043 	struct spdk_nvme_transport_id trid = {};
2044 	struct spdk_nvme_ctrlr *ctrlr;
2045 	struct nvme_ctrlr *nvme_ctrlr;
2046 	const int STRING_SIZE = 32;
2047 	const char *attached_names[STRING_SIZE];
2048 	struct nvme_bdev *bdev;
2049 	struct spdk_bdev_io *bdev_io;
2050 	struct spdk_io_channel *ch;
2051 	struct spdk_bdev_ext_io_opts ext_io_opts = {};
2052 	int rc;
2053 
2054 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2055 	ut_init_trid(&trid);
2056 
2057 	set_thread(1);
2058 
2059 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2060 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2061 
2062 	g_ut_attach_ctrlr_status = 0;
2063 	g_ut_attach_bdev_count = 1;
2064 
2065 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2066 			      attach_ctrlr_done, NULL, NULL, false);
2067 	CU_ASSERT(rc == 0);
2068 
2069 	spdk_delay_us(1000);
2070 	poll_threads();
2071 
2072 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2073 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2074 
2075 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2076 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2077 
2078 	set_thread(0);
2079 
2080 	ch = spdk_get_io_channel(bdev);
2081 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2082 
2083 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2084 
2085 	bdev_io->u.bdev.iovs = NULL;
2086 
2087 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2088 
2089 	ut_bdev_io_set_buf(bdev_io);
2090 
2091 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2092 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2093 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2094 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2095 
2096 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2097 
2098 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2099 
2100 	/* Verify that ext NVME API is called if bdev_io ext_opts is set */
2101 	bdev_io->internal.ext_opts = &ext_io_opts;
2102 	g_ut_readv_ext_called = false;
2103 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2104 	CU_ASSERT(g_ut_readv_ext_called == true);
2105 	g_ut_readv_ext_called = false;
2106 
2107 	g_ut_writev_ext_called = false;
2108 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2109 	CU_ASSERT(g_ut_writev_ext_called == true);
2110 	g_ut_writev_ext_called = false;
2111 	bdev_io->internal.ext_opts = NULL;
2112 
2113 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2114 
2115 	free(bdev_io);
2116 
2117 	spdk_put_io_channel(ch);
2118 
2119 	poll_threads();
2120 
2121 	set_thread(1);
2122 
2123 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2124 	CU_ASSERT(rc == 0);
2125 
2126 	poll_threads();
2127 	spdk_delay_us(1000);
2128 	poll_threads();
2129 
2130 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2131 }
2132 
2133 static void
2134 test_add_remove_trid(void)
2135 {
2136 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2137 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2138 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2139 	const int STRING_SIZE = 32;
2140 	const char *attached_names[STRING_SIZE];
2141 	struct nvme_path_id *ctrid;
2142 	int rc;
2143 
2144 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2145 	ut_init_trid(&path1.trid);
2146 	ut_init_trid2(&path2.trid);
2147 	ut_init_trid3(&path3.trid);
2148 
2149 	set_thread(0);
2150 
2151 	g_ut_attach_ctrlr_status = 0;
2152 	g_ut_attach_bdev_count = 0;
2153 
2154 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2155 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2156 
2157 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2158 			      attach_ctrlr_done, NULL, NULL, false);
2159 	CU_ASSERT(rc == 0);
2160 
2161 	spdk_delay_us(1000);
2162 	poll_threads();
2163 
2164 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2165 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2166 
2167 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2168 
2169 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2170 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2171 
2172 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2173 			      attach_ctrlr_done, NULL, NULL, false);
2174 	CU_ASSERT(rc == 0);
2175 
2176 	spdk_delay_us(1000);
2177 	poll_threads();
2178 
2179 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2180 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2181 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2182 			break;
2183 		}
2184 	}
2185 	CU_ASSERT(ctrid != NULL);
2186 
2187 	/* trid3 is not in the registered list. */
2188 	rc = bdev_nvme_delete("nvme0", &path3);
2189 	CU_ASSERT(rc == -ENXIO);
2190 
2191 	/* trid2 is not used, and simply removed. */
2192 	rc = bdev_nvme_delete("nvme0", &path2);
2193 	CU_ASSERT(rc == 0);
2194 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2195 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2196 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2197 	}
2198 
2199 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2200 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2201 
2202 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 0,
2203 			      attach_ctrlr_done, NULL, NULL, false);
2204 	CU_ASSERT(rc == 0);
2205 
2206 	spdk_delay_us(1000);
2207 	poll_threads();
2208 
2209 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2210 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2211 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2212 			break;
2213 		}
2214 	}
2215 	CU_ASSERT(ctrid != NULL);
2216 
2217 	/* path1 is currently used and path3 is an alternative path.
2218 	 * If we remove path1, path is changed to path3.
2219 	 */
2220 	rc = bdev_nvme_delete("nvme0", &path1);
2221 	CU_ASSERT(rc == 0);
2222 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2223 	CU_ASSERT(nvme_ctrlr->resetting == true);
2224 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2225 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2226 	}
2227 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2228 
2229 	poll_threads();
2230 
2231 	CU_ASSERT(nvme_ctrlr->resetting == false);
2232 
2233 	/* path3 is the current and only path. If we remove path3, the corresponding
2234 	 * nvme_ctrlr is removed.
2235 	 */
2236 	rc = bdev_nvme_delete("nvme0", &path3);
2237 	CU_ASSERT(rc == 0);
2238 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2239 
2240 	poll_threads();
2241 	spdk_delay_us(1000);
2242 	poll_threads();
2243 
2244 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2245 
2246 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2247 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2248 
2249 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2250 			      attach_ctrlr_done, NULL, NULL, false);
2251 	CU_ASSERT(rc == 0);
2252 
2253 	spdk_delay_us(1000);
2254 	poll_threads();
2255 
2256 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2257 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2258 
2259 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2260 
2261 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2262 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2263 
2264 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2265 			      attach_ctrlr_done, NULL, NULL, false);
2266 	CU_ASSERT(rc == 0);
2267 
2268 	spdk_delay_us(1000);
2269 	poll_threads();
2270 
2271 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2272 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2273 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2274 			break;
2275 		}
2276 	}
2277 	CU_ASSERT(ctrid != NULL);
2278 
2279 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2280 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2281 	CU_ASSERT(rc == 0);
2282 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2283 
2284 	poll_threads();
2285 	spdk_delay_us(1000);
2286 	poll_threads();
2287 
2288 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2289 }
2290 
2291 static void
2292 test_abort(void)
2293 {
2294 	struct spdk_nvme_transport_id trid = {};
2295 	struct spdk_nvme_ctrlr *ctrlr;
2296 	struct nvme_ctrlr *nvme_ctrlr;
2297 	const int STRING_SIZE = 32;
2298 	const char *attached_names[STRING_SIZE];
2299 	struct nvme_bdev *bdev;
2300 	struct spdk_bdev_io *write_io, *admin_io, *abort_io;
2301 	struct spdk_io_channel *ch1, *ch2;
2302 	struct nvme_bdev_channel *nbdev_ch1;
2303 	struct nvme_io_path *io_path1;
2304 	struct nvme_ctrlr_channel *ctrlr_ch1;
2305 	int rc;
2306 
2307 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2308 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2309 	 * are submitted on thread 1. Both should succeed.
2310 	 */
2311 
2312 	ut_init_trid(&trid);
2313 
2314 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2315 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2316 
2317 	g_ut_attach_ctrlr_status = 0;
2318 	g_ut_attach_bdev_count = 1;
2319 
2320 	set_thread(1);
2321 
2322 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2323 			      attach_ctrlr_done, NULL, NULL, false);
2324 	CU_ASSERT(rc == 0);
2325 
2326 	spdk_delay_us(1000);
2327 	poll_threads();
2328 
2329 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2330 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2331 
2332 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2333 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2334 
2335 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2336 	ut_bdev_io_set_buf(write_io);
2337 
2338 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2339 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2340 
2341 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2342 
2343 	set_thread(0);
2344 
2345 	ch1 = spdk_get_io_channel(bdev);
2346 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2347 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2348 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2349 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2350 	ctrlr_ch1 = io_path1->ctrlr_ch;
2351 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
2352 
2353 	set_thread(1);
2354 
2355 	ch2 = spdk_get_io_channel(bdev);
2356 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2357 
2358 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2359 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2360 
2361 	/* Aborting the already completed request should fail. */
2362 	write_io->internal.in_submit_request = true;
2363 	bdev_nvme_submit_request(ch1, write_io);
2364 	poll_threads();
2365 
2366 	CU_ASSERT(write_io->internal.in_submit_request == false);
2367 
2368 	abort_io->u.abort.bio_to_abort = write_io;
2369 	abort_io->internal.in_submit_request = true;
2370 
2371 	bdev_nvme_submit_request(ch1, abort_io);
2372 
2373 	poll_threads();
2374 
2375 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2376 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2377 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2378 
2379 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2380 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2381 
2382 	admin_io->internal.in_submit_request = true;
2383 	bdev_nvme_submit_request(ch1, admin_io);
2384 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2385 	poll_threads();
2386 
2387 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2388 
2389 	abort_io->u.abort.bio_to_abort = admin_io;
2390 	abort_io->internal.in_submit_request = true;
2391 
2392 	bdev_nvme_submit_request(ch2, abort_io);
2393 
2394 	poll_threads();
2395 
2396 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2397 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2398 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2399 
2400 	/* Aborting the write request should succeed. */
2401 	write_io->internal.in_submit_request = true;
2402 	bdev_nvme_submit_request(ch1, write_io);
2403 
2404 	CU_ASSERT(write_io->internal.in_submit_request == true);
2405 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
2406 
2407 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2408 	abort_io->u.abort.bio_to_abort = write_io;
2409 	abort_io->internal.in_submit_request = true;
2410 
2411 	bdev_nvme_submit_request(ch1, abort_io);
2412 
2413 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2414 	poll_threads();
2415 
2416 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2417 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2418 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2419 	CU_ASSERT(write_io->internal.in_submit_request == false);
2420 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2421 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
2422 
2423 	/* Aborting the admin request should succeed. */
2424 	admin_io->internal.in_submit_request = true;
2425 	bdev_nvme_submit_request(ch1, admin_io);
2426 
2427 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2428 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2429 
2430 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2431 	abort_io->u.abort.bio_to_abort = admin_io;
2432 	abort_io->internal.in_submit_request = true;
2433 
2434 	bdev_nvme_submit_request(ch2, abort_io);
2435 
2436 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2437 	poll_threads();
2438 
2439 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2440 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2441 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2442 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2443 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2444 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2445 
2446 	set_thread(0);
2447 
2448 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2449 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2450 	 * while resetting the nvme_ctrlr.
2451 	 */
2452 	ctrlr_ch1->qpair->is_connected = false;
2453 
2454 	poll_thread_times(0, 3);
2455 
2456 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2457 	CU_ASSERT(nvme_ctrlr->resetting == true);
2458 
2459 	write_io->internal.in_submit_request = true;
2460 
2461 	bdev_nvme_submit_request(ch1, write_io);
2462 
2463 	CU_ASSERT(write_io->internal.in_submit_request == true);
2464 	CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list));
2465 
2466 	/* Aborting the queued write request should succeed immediately. */
2467 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2468 	abort_io->u.abort.bio_to_abort = write_io;
2469 	abort_io->internal.in_submit_request = true;
2470 
2471 	bdev_nvme_submit_request(ch1, abort_io);
2472 
2473 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2474 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2475 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2476 	CU_ASSERT(write_io->internal.in_submit_request == false);
2477 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2478 
2479 	spdk_put_io_channel(ch1);
2480 
2481 	set_thread(1);
2482 
2483 	spdk_put_io_channel(ch2);
2484 
2485 	poll_threads();
2486 
2487 	free(write_io);
2488 	free(admin_io);
2489 	free(abort_io);
2490 
2491 	set_thread(1);
2492 
2493 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2494 	CU_ASSERT(rc == 0);
2495 
2496 	poll_threads();
2497 	spdk_delay_us(1000);
2498 	poll_threads();
2499 
2500 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2501 }
2502 
2503 static void
2504 test_get_io_qpair(void)
2505 {
2506 	struct spdk_nvme_transport_id trid = {};
2507 	struct spdk_nvme_ctrlr ctrlr = {};
2508 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2509 	struct spdk_io_channel *ch;
2510 	struct nvme_ctrlr_channel *ctrlr_ch;
2511 	struct spdk_nvme_qpair *qpair;
2512 	int rc;
2513 
2514 	ut_init_trid(&trid);
2515 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2516 
2517 	set_thread(0);
2518 
2519 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
2520 	CU_ASSERT(rc == 0);
2521 
2522 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2523 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2524 
2525 	ch = spdk_get_io_channel(nvme_ctrlr);
2526 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2527 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2528 	CU_ASSERT(ctrlr_ch->qpair != NULL);
2529 
2530 	qpair = bdev_nvme_get_io_qpair(ch);
2531 	CU_ASSERT(qpair == ctrlr_ch->qpair);
2532 
2533 	spdk_put_io_channel(ch);
2534 
2535 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2536 	CU_ASSERT(rc == 0);
2537 
2538 	poll_threads();
2539 	spdk_delay_us(1000);
2540 	poll_threads();
2541 
2542 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2543 }
2544 
2545 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2546  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2547  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2548  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2549  */
2550 static void
2551 test_bdev_unregister(void)
2552 {
2553 	struct spdk_nvme_transport_id trid = {};
2554 	struct spdk_nvme_ctrlr *ctrlr;
2555 	struct nvme_ctrlr *nvme_ctrlr;
2556 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2557 	const int STRING_SIZE = 32;
2558 	const char *attached_names[STRING_SIZE];
2559 	struct nvme_bdev *bdev1, *bdev2;
2560 	int rc;
2561 
2562 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2563 	ut_init_trid(&trid);
2564 
2565 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
2566 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2567 
2568 	g_ut_attach_ctrlr_status = 0;
2569 	g_ut_attach_bdev_count = 2;
2570 
2571 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2572 			      attach_ctrlr_done, NULL, NULL, false);
2573 	CU_ASSERT(rc == 0);
2574 
2575 	spdk_delay_us(1000);
2576 	poll_threads();
2577 
2578 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2579 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2580 
2581 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
2582 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2583 
2584 	bdev1 = nvme_ns1->bdev;
2585 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2586 
2587 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
2588 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2589 
2590 	bdev2 = nvme_ns2->bdev;
2591 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2592 
2593 	bdev_nvme_destruct(&bdev1->disk);
2594 	bdev_nvme_destruct(&bdev2->disk);
2595 
2596 	poll_threads();
2597 
2598 	CU_ASSERT(nvme_ns1->bdev == NULL);
2599 	CU_ASSERT(nvme_ns2->bdev == NULL);
2600 
2601 	nvme_ctrlr->destruct = true;
2602 	_nvme_ctrlr_destruct(nvme_ctrlr);
2603 
2604 	poll_threads();
2605 	spdk_delay_us(1000);
2606 	poll_threads();
2607 
2608 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2609 }
2610 
2611 static void
2612 test_compare_ns(void)
2613 {
2614 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2615 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2616 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2617 
2618 	/* No IDs are defined. */
2619 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2620 
2621 	/* Only EUI64 are defined and not matched. */
2622 	nsdata1.eui64 = 0xABCDEF0123456789;
2623 	nsdata2.eui64 = 0xBBCDEF0123456789;
2624 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2625 
2626 	/* Only EUI64 are defined and matched. */
2627 	nsdata2.eui64 = 0xABCDEF0123456789;
2628 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2629 
2630 	/* Only NGUID are defined and not matched. */
2631 	nsdata1.eui64 = 0x0;
2632 	nsdata2.eui64 = 0x0;
2633 	nsdata1.nguid[0] = 0x12;
2634 	nsdata2.nguid[0] = 0x10;
2635 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2636 
2637 	/* Only NGUID are defined and matched. */
2638 	nsdata2.nguid[0] = 0x12;
2639 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2640 
2641 	/* Only UUID are defined and not matched. */
2642 	nsdata1.nguid[0] = 0x0;
2643 	nsdata2.nguid[0] = 0x0;
2644 	ns1.uuid.u.raw[0] = 0xAA;
2645 	ns2.uuid.u.raw[0] = 0xAB;
2646 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2647 
2648 	/* Only UUID are defined and matched. */
2649 	ns1.uuid.u.raw[0] = 0xAB;
2650 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2651 
2652 	/* All EUI64, NGUID, and UUID are defined and matched. */
2653 	nsdata1.eui64 = 0x123456789ABCDEF;
2654 	nsdata2.eui64 = 0x123456789ABCDEF;
2655 	nsdata1.nguid[15] = 0x34;
2656 	nsdata2.nguid[15] = 0x34;
2657 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2658 }
2659 
2660 static void
2661 test_init_ana_log_page(void)
2662 {
2663 	struct spdk_nvme_transport_id trid = {};
2664 	struct spdk_nvme_ctrlr *ctrlr;
2665 	struct nvme_ctrlr *nvme_ctrlr;
2666 	const int STRING_SIZE = 32;
2667 	const char *attached_names[STRING_SIZE];
2668 	int rc;
2669 
2670 	set_thread(0);
2671 
2672 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2673 	ut_init_trid(&trid);
2674 
2675 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
2676 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2677 
2678 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2679 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2680 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2681 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
2682 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2683 
2684 	g_ut_attach_ctrlr_status = 0;
2685 	g_ut_attach_bdev_count = 5;
2686 
2687 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2688 			      attach_ctrlr_done, NULL, NULL, false);
2689 	CU_ASSERT(rc == 0);
2690 
2691 	spdk_delay_us(1000);
2692 	poll_threads();
2693 
2694 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2695 	poll_threads();
2696 
2697 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2698 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2699 
2700 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2701 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2702 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2703 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2704 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
2705 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
2706 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2707 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2708 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
2709 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2710 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
2711 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
2712 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
2713 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
2714 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
2715 
2716 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2717 	CU_ASSERT(rc == 0);
2718 
2719 	poll_threads();
2720 	spdk_delay_us(1000);
2721 	poll_threads();
2722 
2723 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2724 }
2725 
2726 static void
2727 init_accel(void)
2728 {
2729 	spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb,
2730 				sizeof(int), "accel_p");
2731 }
2732 
2733 static void
2734 fini_accel(void)
2735 {
2736 	spdk_io_device_unregister(g_accel_p, NULL);
2737 }
2738 
2739 static void
2740 test_get_memory_domains(void)
2741 {
2742 	struct nvme_ctrlr ctrlr = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
2743 	struct nvme_ns ns = { .ctrlr = &ctrlr };
2744 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
2745 	struct spdk_memory_domain *domains[2] = {};
2746 	int rc = 0;
2747 
2748 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns, tailq);
2749 
2750 	/* nvme controller doesn't have memory domainы */
2751 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 0);
2752 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2753 	CU_ASSERT(rc == 0)
2754 
2755 	/* nvme controller has a memory domain */
2756 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 1);
2757 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2758 	CU_ASSERT(rc == 1);
2759 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domain);
2760 }
2761 
2762 static void
2763 test_reconnect_qpair(void)
2764 {
2765 	struct spdk_nvme_transport_id trid = {};
2766 	struct spdk_nvme_ctrlr *ctrlr;
2767 	struct nvme_ctrlr *nvme_ctrlr;
2768 	const int STRING_SIZE = 32;
2769 	const char *attached_names[STRING_SIZE];
2770 	struct nvme_bdev *bdev;
2771 	struct spdk_io_channel *ch1, *ch2;
2772 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
2773 	struct nvme_io_path *io_path1, *io_path2;
2774 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
2775 	int rc;
2776 
2777 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2778 	ut_init_trid(&trid);
2779 
2780 	set_thread(0);
2781 
2782 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2783 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2784 
2785 	g_ut_attach_ctrlr_status = 0;
2786 	g_ut_attach_bdev_count = 1;
2787 
2788 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2789 			      attach_ctrlr_done, NULL, NULL, false);
2790 	CU_ASSERT(rc == 0);
2791 
2792 	spdk_delay_us(1000);
2793 	poll_threads();
2794 
2795 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2796 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2797 
2798 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2799 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2800 
2801 	ch1 = spdk_get_io_channel(bdev);
2802 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2803 
2804 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2805 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2806 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2807 	ctrlr_ch1 = io_path1->ctrlr_ch;
2808 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
2809 
2810 	set_thread(1);
2811 
2812 	ch2 = spdk_get_io_channel(bdev);
2813 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2814 
2815 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
2816 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
2817 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
2818 	ctrlr_ch2 = io_path2->ctrlr_ch;
2819 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
2820 
2821 	/* If a qpair is disconnected, it is freed and then reconnected via
2822 	 * resetting the corresponding nvme_ctrlr.
2823 	 */
2824 	ctrlr_ch2->qpair->is_connected = false;
2825 	ctrlr->is_failed = true;
2826 
2827 	poll_thread_times(1, 1);
2828 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
2829 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2830 	CU_ASSERT(nvme_ctrlr->resetting == true);
2831 
2832 	poll_thread_times(0, 1);
2833 	poll_thread_times(1, 1);
2834 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2835 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2836 	CU_ASSERT(ctrlr->is_failed == true);
2837 
2838 	poll_thread_times(1, 1);
2839 	CU_ASSERT(ctrlr->is_failed == false);
2840 
2841 	poll_thread_times(0, 1);
2842 	poll_thread_times(1, 1);
2843 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
2844 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
2845 	CU_ASSERT(nvme_ctrlr->resetting == true);
2846 
2847 	poll_thread_times(1, 1);
2848 	poll_thread_times(0, 1);
2849 	poll_thread_times(1, 1);
2850 	poll_thread_times(1, 1);
2851 	CU_ASSERT(nvme_ctrlr->resetting == false);
2852 
2853 	poll_threads();
2854 
2855 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
2856 	 * fails, the qpair is just freed.
2857 	 */
2858 	ctrlr_ch2->qpair->is_connected = false;
2859 	ctrlr->is_failed = true;
2860 	ctrlr->fail_reset = true;
2861 
2862 	poll_thread_times(1, 1);
2863 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
2864 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2865 	CU_ASSERT(nvme_ctrlr->resetting == true);
2866 
2867 	poll_thread_times(0, 1);
2868 	poll_thread_times(1, 1);
2869 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2870 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2871 	CU_ASSERT(ctrlr->is_failed == true);
2872 
2873 	poll_thread_times(1, 1);
2874 	poll_thread_times(0, 1);
2875 	poll_thread_times(1, 1);
2876 	poll_thread_times(1, 1);
2877 	CU_ASSERT(ctrlr->is_failed == true);
2878 	CU_ASSERT(nvme_ctrlr->resetting == false);
2879 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2880 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2881 
2882 	poll_threads();
2883 
2884 	spdk_put_io_channel(ch2);
2885 
2886 	set_thread(0);
2887 
2888 	spdk_put_io_channel(ch1);
2889 
2890 	poll_threads();
2891 
2892 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2893 	CU_ASSERT(rc == 0);
2894 
2895 	poll_threads();
2896 	spdk_delay_us(1000);
2897 	poll_threads();
2898 
2899 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2900 }
2901 
2902 static void
2903 test_create_bdev_ctrlr(void)
2904 {
2905 	struct nvme_path_id path1 = {}, path2 = {};
2906 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
2907 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
2908 	const int STRING_SIZE = 32;
2909 	const char *attached_names[STRING_SIZE];
2910 	int rc;
2911 
2912 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2913 	ut_init_trid(&path1.trid);
2914 	ut_init_trid2(&path2.trid);
2915 
2916 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
2917 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2918 
2919 	g_ut_attach_ctrlr_status = 0;
2920 	g_ut_attach_bdev_count = 0;
2921 
2922 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2923 			      attach_ctrlr_done, NULL, NULL, true);
2924 
2925 	spdk_delay_us(1000);
2926 	poll_threads();
2927 
2928 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2929 	poll_threads();
2930 
2931 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
2932 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
2933 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
2934 
2935 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
2936 	g_ut_attach_ctrlr_status = -EINVAL;
2937 
2938 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
2939 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2940 
2941 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
2942 
2943 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2944 			      attach_ctrlr_done, NULL, NULL, true);
2945 	CU_ASSERT(rc == 0);
2946 
2947 	spdk_delay_us(1000);
2948 	poll_threads();
2949 
2950 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2951 	poll_threads();
2952 
2953 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
2954 
2955 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
2956 	g_ut_attach_ctrlr_status = 0;
2957 
2958 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
2959 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2960 
2961 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2962 			      attach_ctrlr_done, NULL, NULL, true);
2963 	CU_ASSERT(rc == 0);
2964 
2965 	spdk_delay_us(1000);
2966 	poll_threads();
2967 
2968 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2969 	poll_threads();
2970 
2971 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
2972 
2973 	/* Delete two ctrlrs at once. */
2974 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2975 	CU_ASSERT(rc == 0);
2976 
2977 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
2978 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
2979 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
2980 
2981 	poll_threads();
2982 	spdk_delay_us(1000);
2983 	poll_threads();
2984 
2985 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
2986 
2987 	/* Add two ctrlrs and delete one by one. */
2988 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
2989 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2990 
2991 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
2992 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2993 
2994 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2995 			      attach_ctrlr_done, NULL, NULL, true);
2996 	CU_ASSERT(rc == 0);
2997 
2998 	spdk_delay_us(1000);
2999 	poll_threads();
3000 
3001 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3002 	poll_threads();
3003 
3004 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3005 			      attach_ctrlr_done, NULL, NULL, true);
3006 	CU_ASSERT(rc == 0);
3007 
3008 	spdk_delay_us(1000);
3009 	poll_threads();
3010 
3011 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3012 	poll_threads();
3013 
3014 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3015 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3016 
3017 	rc = bdev_nvme_delete("nvme0", &path1);
3018 	CU_ASSERT(rc == 0);
3019 
3020 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3021 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3022 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3023 
3024 	poll_threads();
3025 	spdk_delay_us(1000);
3026 	poll_threads();
3027 
3028 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3029 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3030 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3031 
3032 	rc = bdev_nvme_delete("nvme0", &path2);
3033 	CU_ASSERT(rc == 0);
3034 
3035 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3036 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3037 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3038 
3039 	poll_threads();
3040 	spdk_delay_us(1000);
3041 	poll_threads();
3042 
3043 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3044 }
3045 
3046 static struct nvme_ns *
3047 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3048 {
3049 	struct nvme_ns *nvme_ns;
3050 
3051 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3052 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3053 			return nvme_ns;
3054 		}
3055 	}
3056 
3057 	return NULL;
3058 }
3059 
3060 static void
3061 test_add_multi_ns_to_bdev(void)
3062 {
3063 	struct nvme_path_id path1 = {}, path2 = {};
3064 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3065 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3066 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3067 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3068 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3069 	const int STRING_SIZE = 32;
3070 	const char *attached_names[STRING_SIZE];
3071 	int rc;
3072 
3073 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3074 	ut_init_trid(&path1.trid);
3075 	ut_init_trid2(&path2.trid);
3076 
3077 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3078 
3079 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3080 	 * namespaces are populated.
3081 	 */
3082 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3083 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3084 
3085 	ctrlr1->ns[1].is_active = false;
3086 	ctrlr1->ns[4].is_active = false;
3087 	memset(&ctrlr1->ns[0].uuid, 0x1, sizeof(struct spdk_uuid));
3088 	memset(&ctrlr1->ns[2].uuid, 0x3, sizeof(struct spdk_uuid));
3089 	memset(&ctrlr1->ns[3].uuid, 0x4, sizeof(struct spdk_uuid));
3090 
3091 	g_ut_attach_ctrlr_status = 0;
3092 	g_ut_attach_bdev_count = 3;
3093 
3094 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 0,
3095 			      attach_ctrlr_done, NULL, NULL, true);
3096 	CU_ASSERT(rc == 0);
3097 
3098 	spdk_delay_us(1000);
3099 	poll_threads();
3100 
3101 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3102 	poll_threads();
3103 
3104 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3105 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3106 	 * adding 4th namespace to a bdev should fail.
3107 	 */
3108 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3109 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3110 
3111 	ctrlr2->ns[2].is_active = false;
3112 	ctrlr2->ns[4].is_active = false;
3113 	memset(&ctrlr2->ns[0].uuid, 0x1, sizeof(struct spdk_uuid));
3114 	memset(&ctrlr2->ns[1].uuid, 0x2, sizeof(struct spdk_uuid));
3115 	memset(&ctrlr2->ns[3].uuid, 0x44, sizeof(struct spdk_uuid));
3116 
3117 	g_ut_attach_ctrlr_status = 0;
3118 	g_ut_attach_bdev_count = 2;
3119 
3120 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 0,
3121 			      attach_ctrlr_done, NULL, NULL, true);
3122 	CU_ASSERT(rc == 0);
3123 
3124 	spdk_delay_us(1000);
3125 	poll_threads();
3126 
3127 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3128 	poll_threads();
3129 
3130 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3131 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3132 
3133 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3134 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3135 
3136 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3137 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3138 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3139 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3140 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3141 
3142 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3143 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3144 
3145 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3146 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3147 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3148 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3149 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3150 
3151 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3152 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3153 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3154 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3155 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3156 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3157 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3158 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3159 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3160 
3161 	CU_ASSERT(bdev1->ref == 2);
3162 	CU_ASSERT(bdev2->ref == 1);
3163 	CU_ASSERT(bdev3->ref == 1);
3164 	CU_ASSERT(bdev4->ref == 1);
3165 
3166 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3167 	rc = bdev_nvme_delete("nvme0", &path1);
3168 	CU_ASSERT(rc == 0);
3169 
3170 	poll_threads();
3171 	spdk_delay_us(1000);
3172 	poll_threads();
3173 
3174 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3175 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3176 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2);
3177 
3178 	rc = bdev_nvme_delete("nvme0", &path2);
3179 	CU_ASSERT(rc == 0);
3180 
3181 	poll_threads();
3182 	spdk_delay_us(1000);
3183 	poll_threads();
3184 
3185 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3186 
3187 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3188 	 * can be deleted when the bdev subsystem shutdown.
3189 	 */
3190 	g_ut_attach_bdev_count = 1;
3191 
3192 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3193 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3194 
3195 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3196 
3197 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 0,
3198 			      attach_ctrlr_done, NULL, NULL, true);
3199 	CU_ASSERT(rc == 0);
3200 
3201 	spdk_delay_us(1000);
3202 	poll_threads();
3203 
3204 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3205 	poll_threads();
3206 
3207 	ut_init_trid2(&path2.trid);
3208 
3209 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3210 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3211 
3212 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3213 
3214 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 0,
3215 			      attach_ctrlr_done, NULL, NULL, true);
3216 	CU_ASSERT(rc == 0);
3217 
3218 	spdk_delay_us(1000);
3219 	poll_threads();
3220 
3221 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3222 	poll_threads();
3223 
3224 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3225 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3226 
3227 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3228 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3229 
3230 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3231 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3232 
3233 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3234 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3235 
3236 	/* Check if a nvme_bdev has two nvme_ns. */
3237 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3238 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3239 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3240 
3241 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3242 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3243 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3244 
3245 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3246 	bdev_nvme_destruct(&bdev1->disk);
3247 
3248 	poll_threads();
3249 
3250 	CU_ASSERT(nvme_ns1->bdev == NULL);
3251 	CU_ASSERT(nvme_ns2->bdev == NULL);
3252 
3253 	nvme_ctrlr1->destruct = true;
3254 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3255 
3256 	poll_threads();
3257 	spdk_delay_us(1000);
3258 	poll_threads();
3259 
3260 	nvme_ctrlr2->destruct = true;
3261 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3262 
3263 	poll_threads();
3264 	spdk_delay_us(1000);
3265 	poll_threads();
3266 
3267 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3268 }
3269 
3270 static void
3271 test_add_multi_io_paths_to_nbdev_ch(void)
3272 {
3273 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3274 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3275 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3276 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3277 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3278 	const int STRING_SIZE = 32;
3279 	const char *attached_names[STRING_SIZE];
3280 	struct nvme_bdev *bdev;
3281 	struct spdk_io_channel *ch;
3282 	struct nvme_bdev_channel *nbdev_ch;
3283 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3284 	int rc;
3285 
3286 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3287 	ut_init_trid(&path1.trid);
3288 	ut_init_trid2(&path2.trid);
3289 	ut_init_trid3(&path3.trid);
3290 	g_ut_attach_ctrlr_status = 0;
3291 	g_ut_attach_bdev_count = 1;
3292 
3293 	set_thread(1);
3294 
3295 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3296 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3297 
3298 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3299 
3300 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3301 			      attach_ctrlr_done, NULL, NULL, true);
3302 	CU_ASSERT(rc == 0);
3303 
3304 	spdk_delay_us(1000);
3305 	poll_threads();
3306 
3307 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3308 	poll_threads();
3309 
3310 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3311 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3312 
3313 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3314 
3315 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3316 			      attach_ctrlr_done, NULL, NULL, true);
3317 	CU_ASSERT(rc == 0);
3318 
3319 	spdk_delay_us(1000);
3320 	poll_threads();
3321 
3322 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3323 	poll_threads();
3324 
3325 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3326 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3327 
3328 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3329 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3330 
3331 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3332 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3333 
3334 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3335 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3336 
3337 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3338 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3339 
3340 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3341 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3342 
3343 	set_thread(0);
3344 
3345 	ch = spdk_get_io_channel(bdev);
3346 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3347 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3348 
3349 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3350 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3351 
3352 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3353 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3354 
3355 	set_thread(1);
3356 
3357 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3358 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3359 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3360 
3361 	memset(&ctrlr3->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3362 
3363 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 0,
3364 			      attach_ctrlr_done, NULL, NULL, true);
3365 	CU_ASSERT(rc == 0);
3366 
3367 	spdk_delay_us(1000);
3368 	poll_threads();
3369 
3370 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3371 	poll_threads();
3372 
3373 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid);
3374 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3375 
3376 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3377 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3378 
3379 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3380 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3381 
3382 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3383 	rc = bdev_nvme_delete("nvme0", &path2);
3384 	CU_ASSERT(rc == 0);
3385 
3386 	poll_threads();
3387 	spdk_delay_us(1000);
3388 	poll_threads();
3389 
3390 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1);
3391 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3392 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3);
3393 
3394 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3395 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3396 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3397 
3398 	set_thread(0);
3399 
3400 	spdk_put_io_channel(ch);
3401 
3402 	poll_threads();
3403 
3404 	set_thread(1);
3405 
3406 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3407 	CU_ASSERT(rc == 0);
3408 
3409 	poll_threads();
3410 	spdk_delay_us(1000);
3411 	poll_threads();
3412 
3413 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3414 }
3415 
3416 static void
3417 test_admin_path(void)
3418 {
3419 	struct nvme_path_id path1 = {}, path2 = {};
3420 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3421 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3422 	const int STRING_SIZE = 32;
3423 	const char *attached_names[STRING_SIZE];
3424 	struct nvme_bdev *bdev;
3425 	struct spdk_io_channel *ch;
3426 	struct spdk_bdev_io *bdev_io;
3427 	int rc;
3428 
3429 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3430 	ut_init_trid(&path1.trid);
3431 	ut_init_trid2(&path2.trid);
3432 	g_ut_attach_ctrlr_status = 0;
3433 	g_ut_attach_bdev_count = 1;
3434 
3435 	set_thread(0);
3436 
3437 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3438 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3439 
3440 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3441 
3442 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3443 			      attach_ctrlr_done, NULL, NULL, true);
3444 	CU_ASSERT(rc == 0);
3445 
3446 	spdk_delay_us(1000);
3447 	poll_threads();
3448 
3449 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3450 	poll_threads();
3451 
3452 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3453 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3454 
3455 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3456 
3457 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3458 			      attach_ctrlr_done, NULL, NULL, true);
3459 	CU_ASSERT(rc == 0);
3460 
3461 	spdk_delay_us(1000);
3462 	poll_threads();
3463 
3464 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3465 	poll_threads();
3466 
3467 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3468 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3469 
3470 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3471 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3472 
3473 	ch = spdk_get_io_channel(bdev);
3474 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3475 
3476 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
3477 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
3478 
3479 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
3480 	 * submitted to ctrlr2.
3481 	 */
3482 	ctrlr1->is_failed = true;
3483 	bdev_io->internal.in_submit_request = true;
3484 
3485 	bdev_nvme_submit_request(ch, bdev_io);
3486 
3487 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3488 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
3489 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
3490 
3491 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3492 	poll_threads();
3493 
3494 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3495 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3496 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3497 
3498 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
3499 	ctrlr2->is_failed = true;
3500 	bdev_io->internal.in_submit_request = true;
3501 
3502 	bdev_nvme_submit_request(ch, bdev_io);
3503 
3504 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3505 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3506 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3507 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
3508 
3509 	free(bdev_io);
3510 
3511 	spdk_put_io_channel(ch);
3512 
3513 	poll_threads();
3514 
3515 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3516 	CU_ASSERT(rc == 0);
3517 
3518 	poll_threads();
3519 	spdk_delay_us(1000);
3520 	poll_threads();
3521 
3522 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3523 }
3524 
3525 static struct nvme_io_path *
3526 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
3527 			struct nvme_ctrlr *nvme_ctrlr)
3528 {
3529 	struct nvme_io_path *io_path;
3530 	struct nvme_ctrlr *_nvme_ctrlr;
3531 
3532 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
3533 		_nvme_ctrlr = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(io_path->ctrlr_ch));
3534 		if (_nvme_ctrlr == nvme_ctrlr) {
3535 			return io_path;
3536 		}
3537 	}
3538 
3539 	return NULL;
3540 }
3541 
3542 static void
3543 test_reset_bdev_ctrlr(void)
3544 {
3545 	struct nvme_path_id path1 = {}, path2 = {};
3546 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3547 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3548 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3549 	struct nvme_path_id *curr_path1, *curr_path2;
3550 	const int STRING_SIZE = 32;
3551 	const char *attached_names[STRING_SIZE];
3552 	struct nvme_bdev *bdev;
3553 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
3554 	struct nvme_bdev_io *first_bio;
3555 	struct spdk_io_channel *ch1, *ch2;
3556 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3557 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
3558 	int rc;
3559 
3560 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3561 	ut_init_trid(&path1.trid);
3562 	ut_init_trid2(&path2.trid);
3563 	g_ut_attach_ctrlr_status = 0;
3564 	g_ut_attach_bdev_count = 1;
3565 
3566 	set_thread(0);
3567 
3568 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3569 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3570 
3571 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3572 			      attach_ctrlr_done, NULL, NULL, true);
3573 	CU_ASSERT(rc == 0);
3574 
3575 	spdk_delay_us(1000);
3576 	poll_threads();
3577 
3578 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3579 	poll_threads();
3580 
3581 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3582 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3583 
3584 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3585 			      attach_ctrlr_done, NULL, NULL, true);
3586 	CU_ASSERT(rc == 0);
3587 
3588 	spdk_delay_us(1000);
3589 	poll_threads();
3590 
3591 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3592 	poll_threads();
3593 
3594 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3595 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3596 
3597 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3598 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3599 
3600 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
3601 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
3602 
3603 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3604 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3605 
3606 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
3607 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
3608 
3609 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3610 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3611 
3612 	set_thread(0);
3613 
3614 	ch1 = spdk_get_io_channel(bdev);
3615 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3616 
3617 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3618 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
3619 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
3620 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
3621 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
3622 
3623 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
3624 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
3625 
3626 	set_thread(1);
3627 
3628 	ch2 = spdk_get_io_channel(bdev);
3629 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3630 
3631 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3632 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
3633 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
3634 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
3635 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
3636 
3637 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
3638 
3639 	/* The first reset request from bdev_io is submitted on thread 0.
3640 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
3641 	 *
3642 	 * A few extra polls are necessary after resetting ctrlr1 to check
3643 	 * pending reset requests for ctrlr1.
3644 	 */
3645 	ctrlr1->is_failed = true;
3646 	curr_path1->is_failed = true;
3647 	ctrlr2->is_failed = true;
3648 	curr_path2->is_failed = true;
3649 
3650 	set_thread(0);
3651 
3652 	bdev_nvme_submit_request(ch1, first_bdev_io);
3653 	CU_ASSERT(first_bio->io_path == io_path11);
3654 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3655 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
3656 
3657 	poll_thread_times(0, 1);
3658 	CU_ASSERT(io_path11->ctrlr_ch->qpair == NULL);
3659 	CU_ASSERT(io_path21->ctrlr_ch->qpair != NULL);
3660 
3661 	poll_thread_times(1, 1);
3662 	CU_ASSERT(io_path11->ctrlr_ch->qpair == NULL);
3663 	CU_ASSERT(io_path21->ctrlr_ch->qpair == NULL);
3664 	CU_ASSERT(ctrlr1->is_failed == true);
3665 
3666 	poll_thread_times(0, 1);
3667 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3668 	CU_ASSERT(ctrlr1->is_failed == false);
3669 	CU_ASSERT(curr_path1->is_failed == true);
3670 
3671 	poll_thread_times(0, 1);
3672 	CU_ASSERT(io_path11->ctrlr_ch->qpair != NULL);
3673 	CU_ASSERT(io_path21->ctrlr_ch->qpair == NULL);
3674 
3675 	poll_thread_times(1, 1);
3676 	CU_ASSERT(io_path11->ctrlr_ch->qpair != NULL);
3677 	CU_ASSERT(io_path21->ctrlr_ch->qpair != NULL);
3678 
3679 	poll_thread_times(0, 2);
3680 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3681 	poll_thread_times(1, 1);
3682 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3683 	poll_thread_times(0, 1);
3684 	CU_ASSERT(nvme_ctrlr1->resetting == false);
3685 	CU_ASSERT(curr_path1->is_failed == false);
3686 	CU_ASSERT(first_bio->io_path == io_path12);
3687 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3688 
3689 	poll_thread_times(0, 1);
3690 	CU_ASSERT(io_path12->ctrlr_ch->qpair == NULL);
3691 	CU_ASSERT(io_path22->ctrlr_ch->qpair != NULL);
3692 
3693 	poll_thread_times(1, 1);
3694 	CU_ASSERT(io_path12->ctrlr_ch->qpair == NULL);
3695 	CU_ASSERT(io_path22->ctrlr_ch->qpair == NULL);
3696 	CU_ASSERT(ctrlr2->is_failed == true);
3697 
3698 	poll_thread_times(0, 2);
3699 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3700 	CU_ASSERT(ctrlr2->is_failed == false);
3701 	CU_ASSERT(curr_path2->is_failed == true);
3702 
3703 	poll_thread_times(0, 1);
3704 	CU_ASSERT(io_path12->ctrlr_ch->qpair != NULL);
3705 	CU_ASSERT(io_path22->ctrlr_ch->qpair == NULL);
3706 
3707 	poll_thread_times(1, 2);
3708 	CU_ASSERT(io_path12->ctrlr_ch->qpair != NULL);
3709 	CU_ASSERT(io_path22->ctrlr_ch->qpair != NULL);
3710 
3711 	poll_thread_times(0, 2);
3712 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3713 	poll_thread_times(1, 1);
3714 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3715 	poll_thread_times(0, 1);
3716 	CU_ASSERT(first_bio->io_path == NULL);
3717 	CU_ASSERT(nvme_ctrlr2->resetting == false);
3718 	CU_ASSERT(curr_path2->is_failed == false);
3719 
3720 	poll_threads();
3721 
3722 	/* There is a race between two reset requests from bdev_io.
3723 	 *
3724 	 * The first reset request is submitted on thread 0, and the second reset
3725 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
3726 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
3727 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
3728 	 * The second is pending on ctrlr2 again. After the first completes resetting
3729 	 * ctrl2, both complete successfully.
3730 	 */
3731 	ctrlr1->is_failed = true;
3732 	curr_path1->is_failed = true;
3733 	ctrlr2->is_failed = true;
3734 	curr_path2->is_failed = true;
3735 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3736 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3737 
3738 	set_thread(0);
3739 
3740 	bdev_nvme_submit_request(ch1, first_bdev_io);
3741 
3742 	set_thread(1);
3743 
3744 	bdev_nvme_submit_request(ch2, second_bdev_io);
3745 
3746 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3747 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
3748 	CU_ASSERT(TAILQ_FIRST(&io_path21->ctrlr_ch->pending_resets) == second_bdev_io);
3749 
3750 	poll_threads();
3751 
3752 	CU_ASSERT(ctrlr1->is_failed == false);
3753 	CU_ASSERT(curr_path1->is_failed == false);
3754 	CU_ASSERT(ctrlr2->is_failed == false);
3755 	CU_ASSERT(curr_path2->is_failed == false);
3756 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3757 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3758 
3759 	set_thread(0);
3760 
3761 	spdk_put_io_channel(ch1);
3762 
3763 	set_thread(1);
3764 
3765 	spdk_put_io_channel(ch2);
3766 
3767 	poll_threads();
3768 
3769 	set_thread(0);
3770 
3771 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3772 	CU_ASSERT(rc == 0);
3773 
3774 	poll_threads();
3775 	spdk_delay_us(1000);
3776 	poll_threads();
3777 
3778 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3779 
3780 	free(first_bdev_io);
3781 	free(second_bdev_io);
3782 }
3783 
3784 static void
3785 test_find_io_path(void)
3786 {
3787 	struct nvme_bdev_channel nbdev_ch = {
3788 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
3789 	};
3790 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
3791 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
3792 	struct nvme_io_path io_path1 = { .ctrlr_ch = &ctrlr_ch1, .nvme_ns = &nvme_ns1, };
3793 	struct nvme_io_path io_path2 = { .ctrlr_ch = &ctrlr_ch2, .nvme_ns = &nvme_ns2, };
3794 
3795 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
3796 
3797 	/* Test if io_path whose ANA state is not accessible is excluded. */
3798 
3799 	ctrlr_ch1.qpair = (struct spdk_nvme_qpair *)0x1;
3800 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3801 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3802 
3803 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
3804 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3805 
3806 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
3807 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3808 
3809 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3810 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
3811 
3812 	nbdev_ch.current_io_path = NULL;
3813 
3814 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3815 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
3816 
3817 	nbdev_ch.current_io_path = NULL;
3818 
3819 	/* Test if io_path whose qpair is resetting is excluced. */
3820 
3821 	ctrlr_ch1.qpair = NULL;
3822 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3823 
3824 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
3825 
3826 	/* Test if ANA optimized state or the first found ANA non-optimized state
3827 	 * is prioritized.
3828 	 */
3829 
3830 	ctrlr_ch1.qpair = (struct spdk_nvme_qpair *)0x1;
3831 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3832 	ctrlr_ch2.qpair = (struct spdk_nvme_qpair *)0x1;
3833 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3834 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
3835 
3836 	nbdev_ch.current_io_path = NULL;
3837 
3838 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3839 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
3840 
3841 	nbdev_ch.current_io_path = NULL;
3842 }
3843 
3844 static void
3845 test_retry_io_if_ctrlr_is_resetting(void)
3846 {
3847 	struct nvme_path_id path = {};
3848 	struct spdk_nvme_ctrlr *ctrlr;
3849 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3850 	struct nvme_ctrlr *nvme_ctrlr;
3851 	const int STRING_SIZE = 32;
3852 	const char *attached_names[STRING_SIZE];
3853 	struct nvme_bdev *bdev;
3854 	struct nvme_ns *nvme_ns;
3855 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
3856 	struct spdk_io_channel *ch;
3857 	struct nvme_bdev_channel *nbdev_ch;
3858 	struct nvme_io_path *io_path;
3859 	struct nvme_ctrlr_channel *ctrlr_ch;
3860 	int rc;
3861 
3862 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3863 	ut_init_trid(&path.trid);
3864 
3865 	set_thread(0);
3866 
3867 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
3868 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3869 
3870 	g_ut_attach_ctrlr_status = 0;
3871 	g_ut_attach_bdev_count = 1;
3872 
3873 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
3874 			      attach_ctrlr_done, NULL, NULL, false);
3875 	CU_ASSERT(rc == 0);
3876 
3877 	spdk_delay_us(1000);
3878 	poll_threads();
3879 
3880 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3881 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3882 
3883 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
3884 	CU_ASSERT(nvme_ctrlr != NULL);
3885 
3886 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3887 	CU_ASSERT(bdev != NULL);
3888 
3889 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
3890 	CU_ASSERT(nvme_ns != NULL);
3891 
3892 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
3893 	ut_bdev_io_set_buf(bdev_io1);
3894 
3895 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
3896 	ut_bdev_io_set_buf(bdev_io1);
3897 
3898 	ch = spdk_get_io_channel(bdev);
3899 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3900 
3901 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3902 
3903 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
3904 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
3905 
3906 	ctrlr_ch = io_path->ctrlr_ch;
3907 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
3908 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
3909 
3910 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
3911 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
3912 
3913 	/* If qpair is connected, I/O should succeed. */
3914 	bdev_io1->internal.in_submit_request = true;
3915 
3916 	bdev_nvme_submit_request(ch, bdev_io1);
3917 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
3918 
3919 	poll_threads();
3920 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
3921 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
3922 
3923 	/* If qpair is disconnected, it is freed and then reconnected via resetting
3924 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
3925 	 * while resetting the nvme_ctrlr.
3926 	 */
3927 	ctrlr_ch->qpair->is_connected = false;
3928 	ctrlr->is_failed = true;
3929 
3930 	poll_thread_times(0, 3);
3931 
3932 	CU_ASSERT(ctrlr_ch->qpair == NULL);
3933 	CU_ASSERT(nvme_ctrlr->resetting == true);
3934 	CU_ASSERT(ctrlr->is_failed == false);
3935 
3936 	bdev_io1->internal.in_submit_request = true;
3937 
3938 	bdev_nvme_submit_request(ch, bdev_io1);
3939 
3940 	spdk_delay_us(1);
3941 
3942 	bdev_io2->internal.in_submit_request = true;
3943 
3944 	bdev_nvme_submit_request(ch, bdev_io2);
3945 
3946 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
3947 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
3948 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
3949 	CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link));
3950 
3951 	poll_threads();
3952 
3953 	CU_ASSERT(ctrlr_ch->qpair != NULL);
3954 	CU_ASSERT(nvme_ctrlr->resetting == false);
3955 
3956 	spdk_delay_us(999999);
3957 
3958 	poll_thread_times(0, 1);
3959 
3960 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
3961 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
3962 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
3963 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
3964 
3965 	poll_threads();
3966 
3967 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
3968 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
3969 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3970 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
3971 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
3972 
3973 	spdk_delay_us(1);
3974 
3975 	poll_thread_times(0, 1);
3976 
3977 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
3978 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
3979 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
3980 
3981 	poll_threads();
3982 
3983 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
3984 	CU_ASSERT(bdev_io2->internal.in_submit_request == false);
3985 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3986 
3987 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
3988 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3989 	nbdev_ch->current_io_path = NULL;
3990 
3991 	bdev_io1->internal.in_submit_request = true;
3992 
3993 	bdev_nvme_submit_request(ch, bdev_io1);
3994 
3995 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
3996 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
3997 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
3998 
3999 	/* ANA state became accessible while I/O was queued. */
4000 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4001 
4002 	spdk_delay_us(1000000);
4003 
4004 	poll_thread_times(0, 1);
4005 
4006 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4007 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4008 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4009 
4010 	poll_threads();
4011 
4012 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4013 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4014 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4015 
4016 	free(bdev_io1);
4017 	free(bdev_io2);
4018 
4019 	spdk_put_io_channel(ch);
4020 
4021 	poll_threads();
4022 
4023 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4024 	CU_ASSERT(rc == 0);
4025 
4026 	poll_threads();
4027 	spdk_delay_us(1000);
4028 	poll_threads();
4029 
4030 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4031 }
4032 
4033 static void
4034 test_retry_io_for_io_path_error(void)
4035 {
4036 	struct nvme_path_id path1 = {}, path2 = {};
4037 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4038 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4039 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4040 	const int STRING_SIZE = 32;
4041 	const char *attached_names[STRING_SIZE];
4042 	struct nvme_bdev *bdev;
4043 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4044 	struct spdk_bdev_io *bdev_io;
4045 	struct nvme_bdev_io *bio;
4046 	struct spdk_io_channel *ch;
4047 	struct nvme_bdev_channel *nbdev_ch;
4048 	struct nvme_io_path *io_path1, *io_path2;
4049 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
4050 	struct ut_nvme_req *req;
4051 	int rc;
4052 
4053 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4054 	ut_init_trid(&path1.trid);
4055 	ut_init_trid2(&path2.trid);
4056 
4057 	g_opts.bdev_retry_count = 1;
4058 
4059 	set_thread(0);
4060 
4061 	g_ut_attach_ctrlr_status = 0;
4062 	g_ut_attach_bdev_count = 1;
4063 
4064 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4065 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4066 
4067 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
4068 
4069 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
4070 			      attach_ctrlr_done, NULL, NULL, true);
4071 	CU_ASSERT(rc == 0);
4072 
4073 	spdk_delay_us(1000);
4074 	poll_threads();
4075 
4076 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4077 	poll_threads();
4078 
4079 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4080 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4081 
4082 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4083 	CU_ASSERT(nvme_ctrlr1 != NULL);
4084 
4085 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4086 	CU_ASSERT(bdev != NULL);
4087 
4088 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4089 	CU_ASSERT(nvme_ns1 != NULL);
4090 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4091 
4092 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4093 	ut_bdev_io_set_buf(bdev_io);
4094 
4095 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4096 
4097 	ch = spdk_get_io_channel(bdev);
4098 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4099 
4100 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4101 
4102 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4103 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4104 
4105 	ctrlr_ch1 = io_path1->ctrlr_ch;
4106 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
4107 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1->qpair != NULL);
4108 
4109 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4110 
4111 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4112 	bdev_io->internal.in_submit_request = true;
4113 
4114 	bdev_nvme_submit_request(ch, bdev_io);
4115 
4116 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
4117 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4118 
4119 	req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
4120 	SPDK_CU_ASSERT_FATAL(req != NULL);
4121 
4122 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4123 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4124 	req->cpl.status.dnr = 1;
4125 
4126 	poll_thread_times(0, 1);
4127 
4128 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4129 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4130 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4131 
4132 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4133 	bdev_io->internal.in_submit_request = true;
4134 
4135 	bdev_nvme_submit_request(ch, bdev_io);
4136 
4137 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
4138 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4139 
4140 	req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
4141 	SPDK_CU_ASSERT_FATAL(req != NULL);
4142 
4143 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4144 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4145 
4146 	poll_thread_times(0, 1);
4147 
4148 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4149 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4150 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4151 
4152 	poll_threads();
4153 
4154 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4155 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4156 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4157 
4158 	/* Add io_path2 dynamically, and create a multipath configuration. */
4159 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4160 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4161 
4162 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
4163 
4164 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
4165 			      attach_ctrlr_done, NULL, NULL, true);
4166 	CU_ASSERT(rc == 0);
4167 
4168 	spdk_delay_us(1000);
4169 	poll_threads();
4170 
4171 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4172 	poll_threads();
4173 
4174 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4175 	CU_ASSERT(nvme_ctrlr2 != NULL);
4176 
4177 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4178 	CU_ASSERT(nvme_ns2 != NULL);
4179 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4180 
4181 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4182 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4183 
4184 	ctrlr_ch2 = io_path2->ctrlr_ch;
4185 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
4186 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2->qpair != NULL);
4187 
4188 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4189 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4190 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4191 	 */
4192 	bdev_io->internal.in_submit_request = true;
4193 
4194 	bdev_nvme_submit_request(ch, bdev_io);
4195 
4196 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
4197 	CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
4198 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4199 
4200 	req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
4201 	SPDK_CU_ASSERT_FATAL(req != NULL);
4202 
4203 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4204 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4205 
4206 	poll_thread_times(0, 1);
4207 
4208 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
4209 	CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
4210 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4211 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4212 
4213 	bdev_nvme_destroy_qpair(ctrlr_ch1);
4214 
4215 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
4216 
4217 	poll_threads();
4218 
4219 	CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
4220 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4221 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4222 
4223 	free(bdev_io);
4224 
4225 	spdk_put_io_channel(ch);
4226 
4227 	poll_threads();
4228 
4229 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4230 	CU_ASSERT(rc == 0);
4231 
4232 	poll_threads();
4233 	spdk_delay_us(1000);
4234 	poll_threads();
4235 
4236 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4237 
4238 	g_opts.bdev_retry_count = 0;
4239 }
4240 
4241 static void
4242 test_retry_io_count(void)
4243 {
4244 	struct nvme_path_id path = {};
4245 	struct spdk_nvme_ctrlr *ctrlr;
4246 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4247 	struct nvme_ctrlr *nvme_ctrlr;
4248 	const int STRING_SIZE = 32;
4249 	const char *attached_names[STRING_SIZE];
4250 	struct nvme_bdev *bdev;
4251 	struct nvme_ns *nvme_ns;
4252 	struct spdk_bdev_io *bdev_io;
4253 	struct nvme_bdev_io *bio;
4254 	struct spdk_io_channel *ch;
4255 	struct nvme_bdev_channel *nbdev_ch;
4256 	struct nvme_io_path *io_path;
4257 	struct nvme_ctrlr_channel *ctrlr_ch;
4258 	struct ut_nvme_req *req;
4259 	int rc;
4260 
4261 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4262 	ut_init_trid(&path.trid);
4263 
4264 	set_thread(0);
4265 
4266 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4267 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4268 
4269 	g_ut_attach_ctrlr_status = 0;
4270 	g_ut_attach_bdev_count = 1;
4271 
4272 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
4273 			      attach_ctrlr_done, NULL, NULL, false);
4274 	CU_ASSERT(rc == 0);
4275 
4276 	spdk_delay_us(1000);
4277 	poll_threads();
4278 
4279 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4280 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4281 
4282 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4283 	CU_ASSERT(nvme_ctrlr != NULL);
4284 
4285 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4286 	CU_ASSERT(bdev != NULL);
4287 
4288 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4289 	CU_ASSERT(nvme_ns != NULL);
4290 
4291 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4292 	ut_bdev_io_set_buf(bdev_io);
4293 
4294 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4295 
4296 	ch = spdk_get_io_channel(bdev);
4297 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4298 
4299 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4300 
4301 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4302 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4303 
4304 	ctrlr_ch = io_path->ctrlr_ch;
4305 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
4306 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
4307 
4308 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4309 
4310 	/* If I/O is aborted by request, it should not be retried. */
4311 	g_opts.bdev_retry_count = 1;
4312 
4313 	bdev_io->internal.in_submit_request = true;
4314 
4315 	bdev_nvme_submit_request(ch, bdev_io);
4316 
4317 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4318 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4319 
4320 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4321 	SPDK_CU_ASSERT_FATAL(req != NULL);
4322 
4323 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
4324 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4325 
4326 	poll_thread_times(0, 1);
4327 
4328 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4329 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4330 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
4331 
4332 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
4333 	 * the failed I/O should not be retried.
4334 	 */
4335 	g_opts.bdev_retry_count = 4;
4336 
4337 	bdev_io->internal.in_submit_request = true;
4338 
4339 	bdev_nvme_submit_request(ch, bdev_io);
4340 
4341 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4342 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4343 
4344 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4345 	SPDK_CU_ASSERT_FATAL(req != NULL);
4346 
4347 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4348 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4349 	bio->retry_count = 4;
4350 
4351 	poll_thread_times(0, 1);
4352 
4353 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4354 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4355 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4356 
4357 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
4358 	g_opts.bdev_retry_count = -1;
4359 
4360 	bdev_io->internal.in_submit_request = true;
4361 
4362 	bdev_nvme_submit_request(ch, bdev_io);
4363 
4364 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4365 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4366 
4367 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4368 	SPDK_CU_ASSERT_FATAL(req != NULL);
4369 
4370 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4371 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4372 	bio->retry_count = 4;
4373 
4374 	poll_thread_times(0, 1);
4375 
4376 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4377 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4378 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4379 
4380 	poll_threads();
4381 
4382 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4383 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4384 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4385 
4386 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
4387 	 * the failed I/O should be retried.
4388 	 */
4389 	g_opts.bdev_retry_count = 4;
4390 
4391 	bdev_io->internal.in_submit_request = true;
4392 
4393 	bdev_nvme_submit_request(ch, bdev_io);
4394 
4395 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4396 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4397 
4398 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4399 	SPDK_CU_ASSERT_FATAL(req != NULL);
4400 
4401 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4402 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4403 	bio->retry_count = 3;
4404 
4405 	poll_thread_times(0, 1);
4406 
4407 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4408 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4409 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4410 
4411 	poll_threads();
4412 
4413 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4414 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4415 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4416 
4417 	free(bdev_io);
4418 
4419 	spdk_put_io_channel(ch);
4420 
4421 	poll_threads();
4422 
4423 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4424 	CU_ASSERT(rc == 0);
4425 
4426 	poll_threads();
4427 	spdk_delay_us(1000);
4428 	poll_threads();
4429 
4430 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4431 
4432 	g_opts.bdev_retry_count = 0;
4433 }
4434 
4435 static void
4436 test_concurrent_read_ana_log_page(void)
4437 {
4438 	struct spdk_nvme_transport_id trid = {};
4439 	struct spdk_nvme_ctrlr *ctrlr;
4440 	struct nvme_ctrlr *nvme_ctrlr;
4441 	const int STRING_SIZE = 32;
4442 	const char *attached_names[STRING_SIZE];
4443 	int rc;
4444 
4445 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4446 	ut_init_trid(&trid);
4447 
4448 	set_thread(0);
4449 
4450 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
4451 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4452 
4453 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4454 
4455 	g_ut_attach_ctrlr_status = 0;
4456 	g_ut_attach_bdev_count = 1;
4457 
4458 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
4459 			      attach_ctrlr_done, NULL, NULL, false);
4460 	CU_ASSERT(rc == 0);
4461 
4462 	spdk_delay_us(1000);
4463 	poll_threads();
4464 
4465 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4466 	poll_threads();
4467 
4468 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
4469 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
4470 
4471 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4472 
4473 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4474 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4475 
4476 	/* Following read request should be rejected. */
4477 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4478 
4479 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4480 
4481 	set_thread(1);
4482 
4483 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4484 
4485 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4486 
4487 	/* Reset request while reading ANA log page should not be rejected. */
4488 	rc = bdev_nvme_reset(nvme_ctrlr);
4489 	CU_ASSERT(rc == 0);
4490 
4491 	poll_threads();
4492 
4493 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4494 	poll_threads();
4495 
4496 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4497 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4498 
4499 	/* Read ANA log page while resetting ctrlr should be rejected. */
4500 	rc = bdev_nvme_reset(nvme_ctrlr);
4501 	CU_ASSERT(rc == 0);
4502 
4503 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4504 
4505 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4506 
4507 	set_thread(0);
4508 
4509 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4510 	CU_ASSERT(rc == 0);
4511 
4512 	poll_threads();
4513 	spdk_delay_us(1000);
4514 	poll_threads();
4515 
4516 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
4517 }
4518 
4519 static void
4520 test_retry_io_for_ana_error(void)
4521 {
4522 	struct nvme_path_id path = {};
4523 	struct spdk_nvme_ctrlr *ctrlr;
4524 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4525 	struct nvme_ctrlr *nvme_ctrlr;
4526 	const int STRING_SIZE = 32;
4527 	const char *attached_names[STRING_SIZE];
4528 	struct nvme_bdev *bdev;
4529 	struct nvme_ns *nvme_ns;
4530 	struct spdk_bdev_io *bdev_io;
4531 	struct nvme_bdev_io *bio;
4532 	struct spdk_io_channel *ch;
4533 	struct nvme_bdev_channel *nbdev_ch;
4534 	struct nvme_io_path *io_path;
4535 	struct nvme_ctrlr_channel *ctrlr_ch;
4536 	struct ut_nvme_req *req;
4537 	uint64_t now;
4538 	int rc;
4539 
4540 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4541 	ut_init_trid(&path.trid);
4542 
4543 	g_opts.bdev_retry_count = 1;
4544 
4545 	set_thread(0);
4546 
4547 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
4548 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4549 
4550 	g_ut_attach_ctrlr_status = 0;
4551 	g_ut_attach_bdev_count = 1;
4552 
4553 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 0,
4554 			      attach_ctrlr_done, NULL, NULL, false);
4555 	CU_ASSERT(rc == 0);
4556 
4557 	spdk_delay_us(1000);
4558 	poll_threads();
4559 
4560 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4561 	poll_threads();
4562 
4563 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4564 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4565 
4566 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4567 	CU_ASSERT(nvme_ctrlr != NULL);
4568 
4569 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4570 	CU_ASSERT(bdev != NULL);
4571 
4572 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4573 	CU_ASSERT(nvme_ns != NULL);
4574 
4575 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4576 	ut_bdev_io_set_buf(bdev_io);
4577 
4578 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4579 
4580 	ch = spdk_get_io_channel(bdev);
4581 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4582 
4583 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4584 
4585 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4586 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4587 
4588 	ctrlr_ch = io_path->ctrlr_ch;
4589 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
4590 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
4591 
4592 	now = spdk_get_ticks();
4593 
4594 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4595 
4596 	/* If I/O got ANA error, it should be queued, the corresponding namespace
4597 	 * should be freezed and its ANA state should be updated.
4598 	 */
4599 	bdev_io->internal.in_submit_request = true;
4600 
4601 	bdev_nvme_submit_request(ch, bdev_io);
4602 
4603 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
4604 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4605 
4606 	req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
4607 	SPDK_CU_ASSERT_FATAL(req != NULL);
4608 
4609 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4610 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
4611 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4612 
4613 	poll_thread_times(0, 1);
4614 
4615 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4616 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4617 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4618 	/* I/O should be retried immediately. */
4619 	CU_ASSERT(bio->retry_ticks == now);
4620 	CU_ASSERT(nvme_ns->ana_state_updating == true);
4621 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4622 
4623 	poll_threads();
4624 
4625 	/* Namespace is inaccessible, and hence I/O should be queued again. */
4626 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4627 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4628 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4629 	/* I/O should be retried after a second if no I/O path was found but
4630 	 * any I/O path may become available.
4631 	 */
4632 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
4633 
4634 	/* Namespace should be unfreezed after completing to update its ANA state. */
4635 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4636 	poll_threads();
4637 
4638 	CU_ASSERT(nvme_ns->ana_state_updating == false);
4639 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
4640 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4641 
4642 	/* Retry the queued I/O should succeed. */
4643 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
4644 	poll_threads();
4645 
4646 	CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
4647 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4648 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4649 
4650 	free(bdev_io);
4651 
4652 	spdk_put_io_channel(ch);
4653 
4654 	poll_threads();
4655 
4656 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4657 	CU_ASSERT(rc == 0);
4658 
4659 	poll_threads();
4660 	spdk_delay_us(1000);
4661 	poll_threads();
4662 
4663 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4664 
4665 	g_opts.bdev_retry_count = 0;
4666 }
4667 
4668 int
4669 main(int argc, const char **argv)
4670 {
4671 	CU_pSuite	suite = NULL;
4672 	unsigned int	num_failures;
4673 
4674 	CU_set_error_action(CUEA_ABORT);
4675 	CU_initialize_registry();
4676 
4677 	suite = CU_add_suite("nvme", NULL, NULL);
4678 
4679 	CU_ADD_TEST(suite, test_create_ctrlr);
4680 	CU_ADD_TEST(suite, test_reset_ctrlr);
4681 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
4682 	CU_ADD_TEST(suite, test_failover_ctrlr);
4683 	CU_ADD_TEST(suite, test_pending_reset);
4684 	CU_ADD_TEST(suite, test_attach_ctrlr);
4685 	CU_ADD_TEST(suite, test_aer_cb);
4686 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
4687 	CU_ADD_TEST(suite, test_add_remove_trid);
4688 	CU_ADD_TEST(suite, test_abort);
4689 	CU_ADD_TEST(suite, test_get_io_qpair);
4690 	CU_ADD_TEST(suite, test_bdev_unregister);
4691 	CU_ADD_TEST(suite, test_compare_ns);
4692 	CU_ADD_TEST(suite, test_init_ana_log_page);
4693 	CU_ADD_TEST(suite, test_get_memory_domains);
4694 	CU_ADD_TEST(suite, test_reconnect_qpair);
4695 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
4696 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
4697 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
4698 	CU_ADD_TEST(suite, test_admin_path);
4699 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
4700 	CU_ADD_TEST(suite, test_find_io_path);
4701 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
4702 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
4703 	CU_ADD_TEST(suite, test_retry_io_count);
4704 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
4705 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
4706 
4707 	CU_basic_set_mode(CU_BRM_VERBOSE);
4708 
4709 	allocate_threads(3);
4710 	set_thread(0);
4711 	bdev_nvme_library_init();
4712 	init_accel();
4713 
4714 	CU_basic_run_tests();
4715 
4716 	set_thread(0);
4717 	bdev_nvme_library_fini();
4718 	fini_accel();
4719 	free_threads();
4720 
4721 	num_failures = CU_get_number_of_failures();
4722 	CU_cleanup_registry();
4723 
4724 	return num_failures;
4725 }
4726