xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision a1dfa7ec92a6c49538482c8bb73f0b1ce040441f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_cunit.h"
9 #include "spdk/thread.h"
10 #include "spdk/bdev_module.h"
11 #include "spdk/bdev_module.h"
12 
13 #include "common/lib/ut_multithread.c"
14 
15 #include "bdev/nvme/bdev_nvme.c"
16 
17 #include "unit/lib/json_mock.c"
18 
19 #include "bdev/nvme/bdev_mdns_client.c"
20 
21 static void *g_accel_p = (void *)0xdeadbeaf;
22 
23 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
24 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
25 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
26 	     spdk_nvme_remove_cb remove_cb), NULL);
27 
28 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
29 		enum spdk_nvme_transport_type trtype));
30 
31 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
32 	    NULL);
33 
34 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
35 
36 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
37 		struct spdk_nvme_transport_id *trid), 0);
38 
39 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
40 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
41 
42 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
43 
44 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
45 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
46 
47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
48 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
49 
50 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int);
51 
52 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request,
53 		int error_code, const char *msg));
54 
55 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts,
56 		size_t opts_size));
57 
58 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts,
59 		size_t opts_size), 0);
60 
61 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
62 
63 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat,
64 					enum spdk_bdev_reset_stat_mode mode));
65 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total,
66 				      struct spdk_bdev_io_stat *add));
67 
68 int
69 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
70 				   struct spdk_memory_domain **domains, int array_size)
71 {
72 	int i, min_array_size;
73 
74 	if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) {
75 		min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size);
76 		for (i = 0; i < min_array_size; i++) {
77 			domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5;
78 		}
79 	}
80 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains);
81 
82 	return 0;
83 }
84 
85 struct spdk_io_channel *
86 spdk_accel_get_io_channel(void)
87 {
88 	return spdk_get_io_channel(g_accel_p);
89 }
90 
91 void
92 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
93 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
94 {
95 	/* Avoid warning that opts is used uninitialised */
96 	memset(opts, 0, opts_size);
97 }
98 
99 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *,
100 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
101 
102 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
103 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
104 
105 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
106 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
107 
108 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
109 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
110 
111 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
112 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
113 
114 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true);
115 
116 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
117 
118 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
119 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
120 
121 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
122 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
123 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
124 
125 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
126 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
127 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
128 
129 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name,
130 		size_t *size), 0);
131 
132 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
133 
134 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
135 
136 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
137 
138 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
139 
140 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
141 
142 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
143 
144 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
145 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
146 
147 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
148 
149 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
150 		char *name, size_t *size), 0);
151 
152 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
153 	    (struct spdk_nvme_ns *ns), 0);
154 
155 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
156 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
157 
158 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
159 	    (struct spdk_nvme_ns *ns), 0);
160 
161 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
162 	    (struct spdk_nvme_ns *ns), 0);
163 
164 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
165 	    (struct spdk_nvme_ns *ns), 0);
166 
167 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
168 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
169 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
170 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
171 
172 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
173 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
174 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
175 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
176 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
177 
178 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
179 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
180 	     void *payload, uint32_t payload_size, uint64_t slba,
181 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
182 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
183 
184 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
185 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
186 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
187 
188 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
189 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
190 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
191 
192 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
193 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
194 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
195 
196 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
197 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
198 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
199 
200 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
201 
202 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
203 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
204 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
205 
206 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *,
207 	    (const struct spdk_nvme_status *status), NULL);
208 
209 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *,
210 	    (const struct spdk_nvme_status *status), NULL);
211 
212 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
213 
214 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
215 
216 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
217 
218 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
219 
220 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
221 
222 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
223 		struct iovec *iov,
224 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
225 
226 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr));
227 
228 struct ut_nvme_req {
229 	uint16_t			opc;
230 	spdk_nvme_cmd_cb		cb_fn;
231 	void				*cb_arg;
232 	struct spdk_nvme_cpl		cpl;
233 	TAILQ_ENTRY(ut_nvme_req)	tailq;
234 };
235 
236 struct spdk_nvme_ns {
237 	struct spdk_nvme_ctrlr		*ctrlr;
238 	uint32_t			id;
239 	bool				is_active;
240 	struct spdk_uuid		*uuid;
241 	enum spdk_nvme_ana_state	ana_state;
242 	enum spdk_nvme_csi		csi;
243 };
244 
245 struct spdk_nvme_qpair {
246 	struct spdk_nvme_ctrlr		*ctrlr;
247 	uint8_t				failure_reason;
248 	bool				is_connected;
249 	bool				in_completion_context;
250 	bool				delete_after_completion_context;
251 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
252 	uint32_t			num_outstanding_reqs;
253 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
254 	struct spdk_nvme_poll_group	*poll_group;
255 	void				*poll_group_tailq_head;
256 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
257 };
258 
259 struct spdk_nvme_ctrlr {
260 	uint32_t			num_ns;
261 	struct spdk_nvme_ns		*ns;
262 	struct spdk_nvme_ns_data	*nsdata;
263 	struct spdk_nvme_qpair		adminq;
264 	struct spdk_nvme_ctrlr_data	cdata;
265 	bool				attached;
266 	bool				is_failed;
267 	bool				fail_reset;
268 	bool				is_removed;
269 	struct spdk_nvme_transport_id	trid;
270 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
271 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
272 	struct spdk_nvme_ctrlr_opts	opts;
273 };
274 
275 struct spdk_nvme_poll_group {
276 	void				*ctx;
277 	struct spdk_nvme_accel_fn_table	accel_fn_table;
278 	TAILQ_HEAD(, spdk_nvme_qpair)	connected_qpairs;
279 	TAILQ_HEAD(, spdk_nvme_qpair)	disconnected_qpairs;
280 };
281 
282 struct spdk_nvme_probe_ctx {
283 	struct spdk_nvme_transport_id	trid;
284 	void				*cb_ctx;
285 	spdk_nvme_attach_cb		attach_cb;
286 	struct spdk_nvme_ctrlr		*init_ctrlr;
287 };
288 
289 uint32_t
290 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
291 {
292 	uint32_t nsid;
293 
294 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
295 		if (ctrlr->ns[nsid - 1].is_active) {
296 			return nsid;
297 		}
298 	}
299 
300 	return 0;
301 }
302 
303 uint32_t
304 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
305 {
306 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
307 		if (ctrlr->ns[nsid - 1].is_active) {
308 			return nsid;
309 		}
310 	}
311 
312 	return 0;
313 }
314 
315 uint32_t
316 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair)
317 {
318 	return qpair->num_outstanding_reqs;
319 }
320 
321 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
322 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
323 			g_ut_attached_ctrlrs);
324 static int g_ut_attach_ctrlr_status;
325 static size_t g_ut_attach_bdev_count;
326 static int g_ut_register_bdev_status;
327 static struct spdk_bdev *g_ut_registered_bdev;
328 static uint16_t g_ut_cntlid;
329 static struct nvme_path_id g_any_path = {};
330 
331 static void
332 ut_init_trid(struct spdk_nvme_transport_id *trid)
333 {
334 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
335 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
336 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
337 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
338 }
339 
340 static void
341 ut_init_trid2(struct spdk_nvme_transport_id *trid)
342 {
343 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
344 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
345 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
346 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
347 }
348 
349 static void
350 ut_init_trid3(struct spdk_nvme_transport_id *trid)
351 {
352 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
353 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
354 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
355 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
356 }
357 
358 static int
359 cmp_int(int a, int b)
360 {
361 	return a - b;
362 }
363 
364 int
365 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
366 			       const struct spdk_nvme_transport_id *trid2)
367 {
368 	int cmp;
369 
370 	/* We assume trtype is TCP for now. */
371 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
372 
373 	cmp = cmp_int(trid1->trtype, trid2->trtype);
374 	if (cmp) {
375 		return cmp;
376 	}
377 
378 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
379 	if (cmp) {
380 		return cmp;
381 	}
382 
383 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
384 	if (cmp) {
385 		return cmp;
386 	}
387 
388 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
389 	if (cmp) {
390 		return cmp;
391 	}
392 
393 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
394 	if (cmp) {
395 		return cmp;
396 	}
397 
398 	return 0;
399 }
400 
401 static struct spdk_nvme_ctrlr *
402 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
403 		bool ana_reporting, bool multipath)
404 {
405 	struct spdk_nvme_ctrlr *ctrlr;
406 	uint32_t i;
407 
408 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
409 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
410 			/* There is a ctrlr whose trid matches. */
411 			return NULL;
412 		}
413 	}
414 
415 	ctrlr = calloc(1, sizeof(*ctrlr));
416 	if (ctrlr == NULL) {
417 		return NULL;
418 	}
419 
420 	ctrlr->attached = true;
421 	ctrlr->adminq.ctrlr = ctrlr;
422 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
423 	ctrlr->adminq.is_connected = true;
424 
425 	if (num_ns != 0) {
426 		ctrlr->num_ns = num_ns;
427 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
428 		if (ctrlr->ns == NULL) {
429 			free(ctrlr);
430 			return NULL;
431 		}
432 
433 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
434 		if (ctrlr->nsdata == NULL) {
435 			free(ctrlr->ns);
436 			free(ctrlr);
437 			return NULL;
438 		}
439 
440 		for (i = 0; i < num_ns; i++) {
441 			ctrlr->ns[i].id = i + 1;
442 			ctrlr->ns[i].ctrlr = ctrlr;
443 			ctrlr->ns[i].is_active = true;
444 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
445 			ctrlr->nsdata[i].nsze = 1024;
446 			ctrlr->nsdata[i].nmic.can_share = multipath;
447 		}
448 
449 		ctrlr->cdata.nn = num_ns;
450 		ctrlr->cdata.mnan = num_ns;
451 		ctrlr->cdata.nanagrpid = num_ns;
452 	}
453 
454 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
455 	ctrlr->cdata.cmic.multi_ctrlr = multipath;
456 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
457 	ctrlr->trid = *trid;
458 	TAILQ_INIT(&ctrlr->active_io_qpairs);
459 
460 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
461 
462 	return ctrlr;
463 }
464 
465 static void
466 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
467 {
468 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
469 
470 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
471 	free(ctrlr->nsdata);
472 	free(ctrlr->ns);
473 	free(ctrlr);
474 }
475 
476 static int
477 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
478 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
479 {
480 	struct ut_nvme_req *req;
481 
482 	req = calloc(1, sizeof(*req));
483 	if (req == NULL) {
484 		return -ENOMEM;
485 	}
486 
487 	req->opc = opc;
488 	req->cb_fn = cb_fn;
489 	req->cb_arg = cb_arg;
490 
491 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
492 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
493 
494 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
495 	qpair->num_outstanding_reqs++;
496 
497 	return 0;
498 }
499 
500 static struct ut_nvme_req *
501 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
502 {
503 	struct ut_nvme_req *req;
504 
505 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
506 		if (req->cb_arg == cb_arg) {
507 			break;
508 		}
509 	}
510 
511 	return req;
512 }
513 
514 static struct spdk_bdev_io *
515 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
516 		 struct spdk_io_channel *ch)
517 {
518 	struct spdk_bdev_io *bdev_io;
519 
520 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
521 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
522 	bdev_io->type = type;
523 	bdev_io->bdev = &nbdev->disk;
524 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
525 
526 	return bdev_io;
527 }
528 
529 static void
530 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
531 {
532 	bdev_io->u.bdev.iovs = &bdev_io->iov;
533 	bdev_io->u.bdev.iovcnt = 1;
534 
535 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
536 	bdev_io->iov.iov_len = 4096;
537 }
538 
539 static void
540 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
541 {
542 	if (ctrlr->is_failed) {
543 		free(ctrlr);
544 		return;
545 	}
546 
547 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
548 	if (probe_ctx->cb_ctx) {
549 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
550 	}
551 
552 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
553 
554 	if (probe_ctx->attach_cb) {
555 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
556 	}
557 }
558 
559 int
560 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
561 {
562 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
563 
564 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
565 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
566 			continue;
567 		}
568 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
569 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
570 	}
571 
572 	free(probe_ctx);
573 
574 	return 0;
575 }
576 
577 struct spdk_nvme_probe_ctx *
578 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
579 			const struct spdk_nvme_ctrlr_opts *opts,
580 			spdk_nvme_attach_cb attach_cb)
581 {
582 	struct spdk_nvme_probe_ctx *probe_ctx;
583 
584 	if (trid == NULL) {
585 		return NULL;
586 	}
587 
588 	probe_ctx = calloc(1, sizeof(*probe_ctx));
589 	if (probe_ctx == NULL) {
590 		return NULL;
591 	}
592 
593 	probe_ctx->trid = *trid;
594 	probe_ctx->cb_ctx = (void *)opts;
595 	probe_ctx->attach_cb = attach_cb;
596 
597 	return probe_ctx;
598 }
599 
600 int
601 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
602 {
603 	if (ctrlr->attached) {
604 		ut_detach_ctrlr(ctrlr);
605 	}
606 
607 	return 0;
608 }
609 
610 int
611 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
612 {
613 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
614 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
615 
616 	return 0;
617 }
618 
619 int
620 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
621 {
622 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
623 }
624 
625 void
626 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
627 {
628 	memset(opts, 0, opts_size);
629 
630 	snprintf(opts->hostnqn, sizeof(opts->hostnqn),
631 		 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003");
632 }
633 
634 const struct spdk_nvme_ctrlr_data *
635 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
636 {
637 	return &ctrlr->cdata;
638 }
639 
640 uint32_t
641 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
642 {
643 	return ctrlr->num_ns;
644 }
645 
646 struct spdk_nvme_ns *
647 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
648 {
649 	if (nsid < 1 || nsid > ctrlr->num_ns) {
650 		return NULL;
651 	}
652 
653 	return &ctrlr->ns[nsid - 1];
654 }
655 
656 bool
657 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
658 {
659 	if (nsid < 1 || nsid > ctrlr->num_ns) {
660 		return false;
661 	}
662 
663 	return ctrlr->ns[nsid - 1].is_active;
664 }
665 
666 union spdk_nvme_csts_register
667 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
668 {
669 	union spdk_nvme_csts_register csts;
670 
671 	csts.raw = 0;
672 
673 	return csts;
674 }
675 
676 union spdk_nvme_vs_register
677 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
678 {
679 	union spdk_nvme_vs_register vs;
680 
681 	vs.raw = 0;
682 
683 	return vs;
684 }
685 
686 struct spdk_nvme_qpair *
687 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
688 			       const struct spdk_nvme_io_qpair_opts *user_opts,
689 			       size_t opts_size)
690 {
691 	struct spdk_nvme_qpair *qpair;
692 
693 	qpair = calloc(1, sizeof(*qpair));
694 	if (qpair == NULL) {
695 		return NULL;
696 	}
697 
698 	qpair->ctrlr = ctrlr;
699 	TAILQ_INIT(&qpair->outstanding_reqs);
700 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
701 
702 	return qpair;
703 }
704 
705 static void
706 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
707 {
708 	struct spdk_nvme_poll_group *group = qpair->poll_group;
709 
710 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
711 
712 	qpair->poll_group_tailq_head = &group->connected_qpairs;
713 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
714 	TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq);
715 }
716 
717 static void
718 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
719 {
720 	struct spdk_nvme_poll_group *group = qpair->poll_group;
721 
722 	CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs);
723 
724 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
725 	TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq);
726 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
727 }
728 
729 int
730 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
731 				 struct spdk_nvme_qpair *qpair)
732 {
733 	if (qpair->is_connected) {
734 		return -EISCONN;
735 	}
736 
737 	qpair->is_connected = true;
738 	qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
739 
740 	if (qpair->poll_group) {
741 		nvme_poll_group_connect_qpair(qpair);
742 	}
743 
744 	return 0;
745 }
746 
747 void
748 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
749 {
750 	if (!qpair->is_connected) {
751 		return;
752 	}
753 
754 	qpair->is_connected = false;
755 
756 	if (qpair->poll_group != NULL) {
757 		nvme_poll_group_disconnect_qpair(qpair);
758 	}
759 }
760 
761 int
762 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
763 {
764 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
765 
766 	if (qpair->in_completion_context) {
767 		qpair->delete_after_completion_context = true;
768 		return 0;
769 	}
770 
771 	spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
772 
773 	if (qpair->poll_group != NULL) {
774 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
775 	}
776 
777 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
778 
779 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
780 
781 	free(qpair);
782 
783 	return 0;
784 }
785 
786 int
787 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
788 {
789 	if (ctrlr->fail_reset) {
790 		ctrlr->is_failed = true;
791 		return -EIO;
792 	}
793 
794 	ctrlr->adminq.is_connected = true;
795 	return 0;
796 }
797 
798 void
799 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
800 {
801 }
802 
803 int
804 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
805 {
806 	if (ctrlr->is_removed) {
807 		return -ENXIO;
808 	}
809 
810 	ctrlr->adminq.is_connected = false;
811 	ctrlr->is_failed = false;
812 
813 	return 0;
814 }
815 
816 void
817 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
818 {
819 	ctrlr->is_failed = true;
820 }
821 
822 bool
823 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
824 {
825 	return ctrlr->is_failed;
826 }
827 
828 spdk_nvme_qp_failure_reason
829 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
830 {
831 	return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq);
832 }
833 
834 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
835 				 sizeof(uint32_t))
836 static void
837 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
838 {
839 	struct spdk_nvme_ana_page ana_hdr;
840 	char _ana_desc[UT_ANA_DESC_SIZE];
841 	struct spdk_nvme_ana_group_descriptor *ana_desc;
842 	struct spdk_nvme_ns *ns;
843 	uint32_t i;
844 
845 	memset(&ana_hdr, 0, sizeof(ana_hdr));
846 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
847 
848 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
849 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
850 
851 	buf += sizeof(ana_hdr);
852 	length -= sizeof(ana_hdr);
853 
854 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
855 
856 	for (i = 0; i < ctrlr->num_ns; i++) {
857 		ns = &ctrlr->ns[i];
858 
859 		if (!ns->is_active) {
860 			continue;
861 		}
862 
863 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
864 
865 		ana_desc->ana_group_id = ns->id;
866 		ana_desc->num_of_nsid = 1;
867 		ana_desc->ana_state = ns->ana_state;
868 		ana_desc->nsid[0] = ns->id;
869 
870 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
871 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
872 
873 		buf += UT_ANA_DESC_SIZE;
874 		length -= UT_ANA_DESC_SIZE;
875 	}
876 }
877 
878 int
879 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
880 				 uint8_t log_page, uint32_t nsid,
881 				 void *payload, uint32_t payload_size,
882 				 uint64_t offset,
883 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
884 {
885 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
886 		SPDK_CU_ASSERT_FATAL(offset == 0);
887 		ut_create_ana_log_page(ctrlr, payload, payload_size);
888 	}
889 
890 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
891 				      cb_fn, cb_arg);
892 }
893 
894 int
895 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
896 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
897 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
898 {
899 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
900 }
901 
902 int
903 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
904 			      void *cmd_cb_arg,
905 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
906 {
907 	struct ut_nvme_req *req = NULL, *abort_req;
908 
909 	if (qpair == NULL) {
910 		qpair = &ctrlr->adminq;
911 	}
912 
913 	abort_req = calloc(1, sizeof(*abort_req));
914 	if (abort_req == NULL) {
915 		return -ENOMEM;
916 	}
917 
918 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
919 		if (req->cb_arg == cmd_cb_arg) {
920 			break;
921 		}
922 	}
923 
924 	if (req == NULL) {
925 		free(abort_req);
926 		return -ENOENT;
927 	}
928 
929 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
930 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
931 
932 	abort_req->opc = SPDK_NVME_OPC_ABORT;
933 	abort_req->cb_fn = cb_fn;
934 	abort_req->cb_arg = cb_arg;
935 
936 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
937 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
938 	abort_req->cpl.cdw0 = 0;
939 
940 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
941 	ctrlr->adminq.num_outstanding_reqs++;
942 
943 	return 0;
944 }
945 
946 int32_t
947 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
948 {
949 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
950 }
951 
952 uint32_t
953 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
954 {
955 	return ns->id;
956 }
957 
958 struct spdk_nvme_ctrlr *
959 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
960 {
961 	return ns->ctrlr;
962 }
963 
964 static inline struct spdk_nvme_ns_data *
965 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
966 {
967 	return &ns->ctrlr->nsdata[ns->id - 1];
968 }
969 
970 const struct spdk_nvme_ns_data *
971 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
972 {
973 	return _nvme_ns_get_data(ns);
974 }
975 
976 uint64_t
977 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
978 {
979 	return _nvme_ns_get_data(ns)->nsze;
980 }
981 
982 const struct spdk_uuid *
983 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
984 {
985 	return ns->uuid;
986 }
987 
988 enum spdk_nvme_csi
989 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
990 	return ns->csi;
991 }
992 
993 int
994 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
995 			      void *metadata, uint64_t lba, uint32_t lba_count,
996 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
997 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
998 {
999 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1000 }
1001 
1002 int
1003 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1004 			       void *buffer, void *metadata, uint64_t lba,
1005 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1006 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1007 {
1008 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1009 }
1010 
1011 int
1012 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1013 			       uint64_t lba, uint32_t lba_count,
1014 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1015 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1016 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1017 			       uint16_t apptag_mask, uint16_t apptag)
1018 {
1019 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1020 }
1021 
1022 int
1023 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1024 				uint64_t lba, uint32_t lba_count,
1025 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1026 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1027 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1028 				uint16_t apptag_mask, uint16_t apptag)
1029 {
1030 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1031 }
1032 
1033 static bool g_ut_readv_ext_called;
1034 int
1035 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1036 			   uint64_t lba, uint32_t lba_count,
1037 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1038 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1039 			   spdk_nvme_req_next_sge_cb next_sge_fn,
1040 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1041 {
1042 	g_ut_readv_ext_called = true;
1043 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1044 }
1045 
1046 static bool g_ut_writev_ext_called;
1047 int
1048 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1049 			    uint64_t lba, uint32_t lba_count,
1050 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1051 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1052 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1053 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1054 {
1055 	g_ut_writev_ext_called = true;
1056 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1057 }
1058 
1059 int
1060 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1061 				  uint64_t lba, uint32_t lba_count,
1062 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1063 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1064 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1065 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1066 {
1067 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1068 }
1069 
1070 int
1071 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1072 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1073 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1074 {
1075 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1076 }
1077 
1078 int
1079 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1080 			      uint64_t lba, uint32_t lba_count,
1081 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1082 			      uint32_t io_flags)
1083 {
1084 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1085 }
1086 
1087 int
1088 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1089 		      const struct spdk_nvme_scc_source_range *ranges,
1090 		      uint16_t num_ranges, uint64_t dest_lba,
1091 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1092 {
1093 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg);
1094 }
1095 
1096 struct spdk_nvme_poll_group *
1097 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1098 {
1099 	struct spdk_nvme_poll_group *group;
1100 
1101 	group = calloc(1, sizeof(*group));
1102 	if (group == NULL) {
1103 		return NULL;
1104 	}
1105 
1106 	group->ctx = ctx;
1107 	if (table != NULL) {
1108 		group->accel_fn_table = *table;
1109 	}
1110 	TAILQ_INIT(&group->connected_qpairs);
1111 	TAILQ_INIT(&group->disconnected_qpairs);
1112 
1113 	return group;
1114 }
1115 
1116 int
1117 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1118 {
1119 	if (!TAILQ_EMPTY(&group->connected_qpairs) ||
1120 	    !TAILQ_EMPTY(&group->disconnected_qpairs)) {
1121 		return -EBUSY;
1122 	}
1123 
1124 	free(group);
1125 
1126 	return 0;
1127 }
1128 
1129 spdk_nvme_qp_failure_reason
1130 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair)
1131 {
1132 	return qpair->failure_reason;
1133 }
1134 
1135 int32_t
1136 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1137 				    uint32_t max_completions)
1138 {
1139 	struct ut_nvme_req *req, *tmp;
1140 	uint32_t num_completions = 0;
1141 
1142 	if (!qpair->is_connected) {
1143 		return -ENXIO;
1144 	}
1145 
1146 	qpair->in_completion_context = true;
1147 
1148 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1149 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1150 		qpair->num_outstanding_reqs--;
1151 
1152 		req->cb_fn(req->cb_arg, &req->cpl);
1153 
1154 		free(req);
1155 		num_completions++;
1156 	}
1157 
1158 	qpair->in_completion_context = false;
1159 	if (qpair->delete_after_completion_context) {
1160 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1161 	}
1162 
1163 	return num_completions;
1164 }
1165 
1166 int64_t
1167 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1168 		uint32_t completions_per_qpair,
1169 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1170 {
1171 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1172 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1173 
1174 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1175 
1176 	if (disconnected_qpair_cb == NULL) {
1177 		return -EINVAL;
1178 	}
1179 
1180 	TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1181 		disconnected_qpair_cb(qpair, group->ctx);
1182 	}
1183 
1184 	TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1185 		if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
1186 			spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
1187 			/* Bump the number of completions so this counts as "busy" */
1188 			num_completions++;
1189 			continue;
1190 		}
1191 
1192 		local_completions = spdk_nvme_qpair_process_completions(qpair,
1193 				    completions_per_qpair);
1194 		if (local_completions < 0 && error_reason == 0) {
1195 			error_reason = local_completions;
1196 		} else {
1197 			num_completions += local_completions;
1198 			assert(num_completions >= 0);
1199 		}
1200 	}
1201 
1202 	return error_reason ? error_reason : num_completions;
1203 }
1204 
1205 int
1206 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1207 			 struct spdk_nvme_qpair *qpair)
1208 {
1209 	CU_ASSERT(!qpair->is_connected);
1210 
1211 	qpair->poll_group = group;
1212 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
1213 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
1214 
1215 	return 0;
1216 }
1217 
1218 int
1219 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1220 			    struct spdk_nvme_qpair *qpair)
1221 {
1222 	CU_ASSERT(!qpair->is_connected);
1223 
1224 	if (qpair->poll_group == NULL) {
1225 		return -ENOENT;
1226 	}
1227 
1228 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
1229 
1230 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
1231 
1232 	qpair->poll_group = NULL;
1233 	qpair->poll_group_tailq_head = NULL;
1234 
1235 	return 0;
1236 }
1237 
1238 int
1239 spdk_bdev_register(struct spdk_bdev *bdev)
1240 {
1241 	g_ut_registered_bdev = bdev;
1242 
1243 	return g_ut_register_bdev_status;
1244 }
1245 
1246 void
1247 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1248 {
1249 	int rc;
1250 
1251 	rc = bdev->fn_table->destruct(bdev->ctxt);
1252 
1253 	if (bdev == g_ut_registered_bdev) {
1254 		g_ut_registered_bdev = NULL;
1255 	}
1256 
1257 	if (rc <= 0 && cb_fn != NULL) {
1258 		cb_fn(cb_arg, rc);
1259 	}
1260 }
1261 
1262 int
1263 spdk_bdev_open_ext(const char *bdev_name, bool write,
1264 		   spdk_bdev_event_cb_t event_cb, void *event_ctx,
1265 		   struct spdk_bdev_desc **desc)
1266 {
1267 	if (g_ut_registered_bdev == NULL ||
1268 	    strcmp(g_ut_registered_bdev->name, bdev_name) != 0) {
1269 		return -ENODEV;
1270 	}
1271 
1272 	*desc = (struct spdk_bdev_desc *)g_ut_registered_bdev;
1273 
1274 	return 0;
1275 }
1276 
1277 struct spdk_bdev *
1278 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
1279 {
1280 	return (struct spdk_bdev *)desc;
1281 }
1282 
1283 int
1284 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1285 {
1286 	bdev->blockcnt = size;
1287 
1288 	return 0;
1289 }
1290 
1291 struct spdk_io_channel *
1292 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1293 {
1294 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1295 }
1296 
1297 void
1298 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1299 {
1300 	bdev_io->internal.status = status;
1301 	bdev_io->internal.in_submit_request = false;
1302 }
1303 
1304 void
1305 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1306 {
1307 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1308 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1309 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1310 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1311 	} else {
1312 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1313 	}
1314 
1315 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1316 	bdev_io->internal.error.nvme.sct = sct;
1317 	bdev_io->internal.error.nvme.sc = sc;
1318 
1319 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1320 }
1321 
1322 void
1323 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1324 {
1325 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1326 
1327 	ut_bdev_io_set_buf(bdev_io);
1328 
1329 	cb(ch, bdev_io, true);
1330 }
1331 
1332 static void
1333 test_create_ctrlr(void)
1334 {
1335 	struct spdk_nvme_transport_id trid = {};
1336 	struct spdk_nvme_ctrlr ctrlr = {};
1337 	int rc;
1338 
1339 	ut_init_trid(&trid);
1340 
1341 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1342 	CU_ASSERT(rc == 0);
1343 
1344 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1345 
1346 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1347 	CU_ASSERT(rc == 0);
1348 
1349 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1350 
1351 	poll_threads();
1352 	spdk_delay_us(1000);
1353 	poll_threads();
1354 
1355 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1356 }
1357 
1358 static void
1359 ut_check_hotplug_on_reset(void *cb_arg, bool success)
1360 {
1361 	bool *detect_remove = cb_arg;
1362 
1363 	CU_ASSERT(success == false);
1364 	SPDK_CU_ASSERT_FATAL(detect_remove != NULL);
1365 
1366 	*detect_remove = true;
1367 }
1368 
1369 static void
1370 test_reset_ctrlr(void)
1371 {
1372 	struct spdk_nvme_transport_id trid = {};
1373 	struct spdk_nvme_ctrlr ctrlr = {};
1374 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1375 	struct nvme_path_id *curr_trid;
1376 	struct spdk_io_channel *ch1, *ch2;
1377 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1378 	bool detect_remove;
1379 	int rc;
1380 
1381 	ut_init_trid(&trid);
1382 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1383 
1384 	set_thread(0);
1385 
1386 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1387 	CU_ASSERT(rc == 0);
1388 
1389 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1390 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1391 
1392 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1393 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1394 
1395 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1396 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1397 
1398 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1399 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1400 
1401 	set_thread(1);
1402 
1403 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1404 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1405 
1406 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1407 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1408 
1409 	/* Reset starts from thread 1. */
1410 	set_thread(1);
1411 
1412 	/* Case 1: ctrlr is already being destructed. */
1413 	nvme_ctrlr->destruct = true;
1414 
1415 	rc = bdev_nvme_reset(nvme_ctrlr);
1416 	CU_ASSERT(rc == -ENXIO);
1417 
1418 	/* Case 2: reset is in progress. */
1419 	nvme_ctrlr->destruct = false;
1420 	nvme_ctrlr->resetting = true;
1421 
1422 	rc = bdev_nvme_reset(nvme_ctrlr);
1423 	CU_ASSERT(rc == -EBUSY);
1424 
1425 	/* Case 3: reset completes successfully. */
1426 	nvme_ctrlr->resetting = false;
1427 	curr_trid->is_failed = true;
1428 	ctrlr.is_failed = true;
1429 
1430 	rc = bdev_nvme_reset(nvme_ctrlr);
1431 	CU_ASSERT(rc == 0);
1432 	CU_ASSERT(nvme_ctrlr->resetting == true);
1433 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1434 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1435 
1436 	poll_thread_times(0, 3);
1437 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1438 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1439 
1440 	poll_thread_times(0, 1);
1441 	poll_thread_times(1, 1);
1442 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1443 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1444 	CU_ASSERT(ctrlr.is_failed == true);
1445 
1446 	poll_thread_times(1, 1);
1447 	poll_thread_times(0, 1);
1448 	CU_ASSERT(ctrlr.is_failed == false);
1449 	CU_ASSERT(ctrlr.adminq.is_connected == false);
1450 
1451 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1452 	poll_thread_times(0, 2);
1453 	CU_ASSERT(ctrlr.adminq.is_connected == true);
1454 
1455 	poll_thread_times(0, 1);
1456 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1457 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1458 
1459 	poll_thread_times(1, 1);
1460 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1461 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1462 	CU_ASSERT(nvme_ctrlr->resetting == true);
1463 	CU_ASSERT(curr_trid->is_failed == true);
1464 
1465 	poll_thread_times(0, 2);
1466 	CU_ASSERT(nvme_ctrlr->resetting == true);
1467 	poll_thread_times(1, 1);
1468 	CU_ASSERT(nvme_ctrlr->resetting == true);
1469 	poll_thread_times(0, 1);
1470 	CU_ASSERT(nvme_ctrlr->resetting == false);
1471 	CU_ASSERT(curr_trid->is_failed == false);
1472 
1473 	/* Case 4: ctrlr is already removed. */
1474 	ctrlr.is_removed = true;
1475 
1476 	rc = bdev_nvme_reset(nvme_ctrlr);
1477 	CU_ASSERT(rc == 0);
1478 
1479 	detect_remove = false;
1480 	nvme_ctrlr->reset_cb_fn = ut_check_hotplug_on_reset;
1481 	nvme_ctrlr->reset_cb_arg = &detect_remove;
1482 
1483 	poll_threads();
1484 
1485 	CU_ASSERT(nvme_ctrlr->reset_cb_fn == NULL);
1486 	CU_ASSERT(nvme_ctrlr->reset_cb_arg == NULL);
1487 	CU_ASSERT(detect_remove == true);
1488 
1489 	ctrlr.is_removed = false;
1490 
1491 	spdk_put_io_channel(ch2);
1492 
1493 	set_thread(0);
1494 
1495 	spdk_put_io_channel(ch1);
1496 
1497 	poll_threads();
1498 
1499 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1500 	CU_ASSERT(rc == 0);
1501 
1502 	poll_threads();
1503 	spdk_delay_us(1000);
1504 	poll_threads();
1505 
1506 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1507 }
1508 
1509 static void
1510 test_race_between_reset_and_destruct_ctrlr(void)
1511 {
1512 	struct spdk_nvme_transport_id trid = {};
1513 	struct spdk_nvme_ctrlr ctrlr = {};
1514 	struct nvme_ctrlr *nvme_ctrlr;
1515 	struct spdk_io_channel *ch1, *ch2;
1516 	int rc;
1517 
1518 	ut_init_trid(&trid);
1519 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1520 
1521 	set_thread(0);
1522 
1523 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1524 	CU_ASSERT(rc == 0);
1525 
1526 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1527 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1528 
1529 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1530 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1531 
1532 	set_thread(1);
1533 
1534 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1535 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1536 
1537 	/* Reset starts from thread 1. */
1538 	set_thread(1);
1539 
1540 	rc = bdev_nvme_reset(nvme_ctrlr);
1541 	CU_ASSERT(rc == 0);
1542 	CU_ASSERT(nvme_ctrlr->resetting == true);
1543 
1544 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1545 	set_thread(0);
1546 
1547 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1548 	CU_ASSERT(rc == 0);
1549 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1550 	CU_ASSERT(nvme_ctrlr->destruct == true);
1551 	CU_ASSERT(nvme_ctrlr->resetting == true);
1552 
1553 	poll_threads();
1554 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1555 	poll_threads();
1556 
1557 	/* Reset completed but ctrlr is not still destructed yet. */
1558 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1559 	CU_ASSERT(nvme_ctrlr->destruct == true);
1560 	CU_ASSERT(nvme_ctrlr->resetting == false);
1561 
1562 	/* New reset request is rejected. */
1563 	rc = bdev_nvme_reset(nvme_ctrlr);
1564 	CU_ASSERT(rc == -ENXIO);
1565 
1566 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1567 	 * However there are two channels and destruct is not completed yet.
1568 	 */
1569 	poll_threads();
1570 
1571 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1572 
1573 	set_thread(0);
1574 
1575 	spdk_put_io_channel(ch1);
1576 
1577 	set_thread(1);
1578 
1579 	spdk_put_io_channel(ch2);
1580 
1581 	poll_threads();
1582 	spdk_delay_us(1000);
1583 	poll_threads();
1584 
1585 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1586 }
1587 
1588 static void
1589 test_failover_ctrlr(void)
1590 {
1591 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1592 	struct spdk_nvme_ctrlr ctrlr = {};
1593 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1594 	struct nvme_path_id *curr_trid, *next_trid;
1595 	struct spdk_io_channel *ch1, *ch2;
1596 	int rc;
1597 
1598 	ut_init_trid(&trid1);
1599 	ut_init_trid2(&trid2);
1600 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1601 
1602 	set_thread(0);
1603 
1604 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1605 	CU_ASSERT(rc == 0);
1606 
1607 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1608 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1609 
1610 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1611 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1612 
1613 	set_thread(1);
1614 
1615 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1616 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1617 
1618 	/* First, test one trid case. */
1619 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1620 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1621 
1622 	/* Failover starts from thread 1. */
1623 	set_thread(1);
1624 
1625 	/* Case 1: ctrlr is already being destructed. */
1626 	nvme_ctrlr->destruct = true;
1627 
1628 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1629 	CU_ASSERT(rc == -ENXIO);
1630 	CU_ASSERT(curr_trid->is_failed == false);
1631 
1632 	/* Case 2: reset is in progress. */
1633 	nvme_ctrlr->destruct = false;
1634 	nvme_ctrlr->resetting = true;
1635 
1636 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1637 	CU_ASSERT(rc == -EBUSY);
1638 
1639 	/* Case 3: reset completes successfully. */
1640 	nvme_ctrlr->resetting = false;
1641 
1642 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1643 	CU_ASSERT(rc == 0);
1644 
1645 	CU_ASSERT(nvme_ctrlr->resetting == true);
1646 	CU_ASSERT(curr_trid->is_failed == true);
1647 
1648 	poll_threads();
1649 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1650 	poll_threads();
1651 
1652 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1653 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1654 
1655 	CU_ASSERT(nvme_ctrlr->resetting == false);
1656 	CU_ASSERT(curr_trid->is_failed == false);
1657 
1658 	set_thread(0);
1659 
1660 	/* Second, test two trids case. */
1661 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1662 	CU_ASSERT(rc == 0);
1663 
1664 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1665 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1666 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1667 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1668 
1669 	/* Failover starts from thread 1. */
1670 	set_thread(1);
1671 
1672 	/* Case 4: reset is in progress. */
1673 	nvme_ctrlr->resetting = true;
1674 
1675 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1676 	CU_ASSERT(rc == -EBUSY);
1677 
1678 	/* Case 5: failover completes successfully. */
1679 	nvme_ctrlr->resetting = false;
1680 
1681 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1682 	CU_ASSERT(rc == 0);
1683 
1684 	CU_ASSERT(nvme_ctrlr->resetting == true);
1685 
1686 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1687 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1688 	CU_ASSERT(next_trid != curr_trid);
1689 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1690 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1691 
1692 	poll_threads();
1693 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1694 	poll_threads();
1695 
1696 	CU_ASSERT(nvme_ctrlr->resetting == false);
1697 
1698 	spdk_put_io_channel(ch2);
1699 
1700 	set_thread(0);
1701 
1702 	spdk_put_io_channel(ch1);
1703 
1704 	poll_threads();
1705 
1706 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1707 	CU_ASSERT(rc == 0);
1708 
1709 	poll_threads();
1710 	spdk_delay_us(1000);
1711 	poll_threads();
1712 
1713 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1714 }
1715 
1716 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1717  *
1718  * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1719  * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1720  * to trid2. While processing the failed reset, trid3 was added. trid1 should
1721  * have been active, i.e., the head of the list until the failover completed.
1722  * However trid3 was inserted to the head of the list by mistake.
1723  *
1724  * I/O qpairs have smaller polling period than admin qpair. When a connection is
1725  * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1726  * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1727  * may be executed repeatedly before failover is executed. Hence this bug is real.
1728  *
1729  * The following test verifies the fix.
1730  */
1731 static void
1732 test_race_between_failover_and_add_secondary_trid(void)
1733 {
1734 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1735 	struct spdk_nvme_ctrlr ctrlr = {};
1736 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1737 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1738 	struct spdk_io_channel *ch1, *ch2;
1739 	int rc;
1740 
1741 	ut_init_trid(&trid1);
1742 	ut_init_trid2(&trid2);
1743 	ut_init_trid3(&trid3);
1744 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1745 
1746 	set_thread(0);
1747 
1748 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1749 	CU_ASSERT(rc == 0);
1750 
1751 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1752 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1753 
1754 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1755 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1756 
1757 	set_thread(1);
1758 
1759 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1760 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1761 
1762 	set_thread(0);
1763 
1764 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1765 	CU_ASSERT(rc == 0);
1766 
1767 	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1768 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1769 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1770 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1771 	path_id2 = TAILQ_NEXT(path_id1, link);
1772 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1773 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1774 
1775 	ctrlr.fail_reset = true;
1776 
1777 	rc = bdev_nvme_reset(nvme_ctrlr);
1778 	CU_ASSERT(rc == 0);
1779 
1780 	poll_threads();
1781 
1782 	CU_ASSERT(path_id1->is_failed == true);
1783 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1784 
1785 	rc = bdev_nvme_reset(nvme_ctrlr);
1786 	CU_ASSERT(rc == 0);
1787 
1788 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1789 	CU_ASSERT(rc == 0);
1790 
1791 	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1792 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1793 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1794 	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1795 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1796 	path_id3 = TAILQ_NEXT(path_id2, link);
1797 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1798 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1799 
1800 	poll_threads();
1801 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1802 	poll_threads();
1803 
1804 	spdk_put_io_channel(ch1);
1805 
1806 	set_thread(1);
1807 
1808 	spdk_put_io_channel(ch2);
1809 
1810 	poll_threads();
1811 
1812 	set_thread(0);
1813 
1814 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1815 	CU_ASSERT(rc == 0);
1816 
1817 	poll_threads();
1818 	spdk_delay_us(1000);
1819 	poll_threads();
1820 
1821 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1822 }
1823 
1824 static void
1825 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1826 {
1827 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1828 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1829 }
1830 
1831 static void
1832 test_pending_reset(void)
1833 {
1834 	struct spdk_nvme_transport_id trid = {};
1835 	struct spdk_nvme_ctrlr *ctrlr;
1836 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1837 	const int STRING_SIZE = 32;
1838 	const char *attached_names[STRING_SIZE];
1839 	struct nvme_bdev *bdev;
1840 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1841 	struct spdk_io_channel *ch1, *ch2;
1842 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1843 	struct nvme_io_path *io_path1, *io_path2;
1844 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1845 	int rc;
1846 
1847 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1848 	ut_init_trid(&trid);
1849 
1850 	set_thread(0);
1851 
1852 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1853 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1854 
1855 	g_ut_attach_ctrlr_status = 0;
1856 	g_ut_attach_bdev_count = 1;
1857 
1858 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1859 			      attach_ctrlr_done, NULL, NULL, NULL, false);
1860 	CU_ASSERT(rc == 0);
1861 
1862 	spdk_delay_us(1000);
1863 	poll_threads();
1864 
1865 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1866 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1867 
1868 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1869 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1870 
1871 	ch1 = spdk_get_io_channel(bdev);
1872 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1873 
1874 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1875 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1876 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1877 	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
1878 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1879 
1880 	set_thread(1);
1881 
1882 	ch2 = spdk_get_io_channel(bdev);
1883 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1884 
1885 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1886 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1887 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1888 	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
1889 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1890 
1891 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1892 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1893 
1894 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1895 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1896 
1897 	/* The first reset request is submitted on thread 1, and the second reset request
1898 	 * is submitted on thread 0 while processing the first request.
1899 	 */
1900 	bdev_nvme_submit_request(ch2, first_bdev_io);
1901 	CU_ASSERT(nvme_ctrlr->resetting == true);
1902 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1903 
1904 	set_thread(0);
1905 
1906 	bdev_nvme_submit_request(ch1, second_bdev_io);
1907 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1908 
1909 	poll_threads();
1910 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1911 	poll_threads();
1912 
1913 	CU_ASSERT(nvme_ctrlr->resetting == false);
1914 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1915 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1916 
1917 	/* The first reset request is submitted on thread 1, and the second reset request
1918 	 * is submitted on thread 0 while processing the first request.
1919 	 *
1920 	 * The difference from the above scenario is that the controller is removed while
1921 	 * processing the first request. Hence both reset requests should fail.
1922 	 */
1923 	set_thread(1);
1924 
1925 	bdev_nvme_submit_request(ch2, first_bdev_io);
1926 	CU_ASSERT(nvme_ctrlr->resetting == true);
1927 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1928 
1929 	set_thread(0);
1930 
1931 	bdev_nvme_submit_request(ch1, second_bdev_io);
1932 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1933 
1934 	ctrlr->fail_reset = true;
1935 
1936 	poll_threads();
1937 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1938 	poll_threads();
1939 
1940 	CU_ASSERT(nvme_ctrlr->resetting == false);
1941 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1942 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1943 
1944 	spdk_put_io_channel(ch1);
1945 
1946 	set_thread(1);
1947 
1948 	spdk_put_io_channel(ch2);
1949 
1950 	poll_threads();
1951 
1952 	set_thread(0);
1953 
1954 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1955 	CU_ASSERT(rc == 0);
1956 
1957 	poll_threads();
1958 	spdk_delay_us(1000);
1959 	poll_threads();
1960 
1961 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1962 
1963 	free(first_bdev_io);
1964 	free(second_bdev_io);
1965 }
1966 
1967 static void
1968 test_attach_ctrlr(void)
1969 {
1970 	struct spdk_nvme_transport_id trid = {};
1971 	struct spdk_nvme_ctrlr *ctrlr;
1972 	struct nvme_ctrlr *nvme_ctrlr;
1973 	const int STRING_SIZE = 32;
1974 	const char *attached_names[STRING_SIZE];
1975 	struct nvme_bdev *nbdev;
1976 	int rc;
1977 
1978 	set_thread(0);
1979 
1980 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1981 	ut_init_trid(&trid);
1982 
1983 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
1984 	 * by probe polling.
1985 	 */
1986 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1987 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1988 
1989 	ctrlr->is_failed = true;
1990 	g_ut_attach_ctrlr_status = -EIO;
1991 	g_ut_attach_bdev_count = 0;
1992 
1993 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1994 			      attach_ctrlr_done, NULL, NULL, NULL, false);
1995 	CU_ASSERT(rc == 0);
1996 
1997 	spdk_delay_us(1000);
1998 	poll_threads();
1999 
2000 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2001 
2002 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
2003 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2004 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2005 
2006 	g_ut_attach_ctrlr_status = 0;
2007 
2008 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2009 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2010 	CU_ASSERT(rc == 0);
2011 
2012 	spdk_delay_us(1000);
2013 	poll_threads();
2014 
2015 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2016 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2017 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2018 
2019 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2020 	CU_ASSERT(rc == 0);
2021 
2022 	poll_threads();
2023 	spdk_delay_us(1000);
2024 	poll_threads();
2025 
2026 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2027 
2028 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
2029 	 * one nvme_bdev is created.
2030 	 */
2031 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2032 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2033 
2034 	g_ut_attach_bdev_count = 1;
2035 
2036 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2037 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2038 	CU_ASSERT(rc == 0);
2039 
2040 	spdk_delay_us(1000);
2041 	poll_threads();
2042 
2043 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2044 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2045 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2046 
2047 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
2048 	attached_names[0] = NULL;
2049 
2050 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2051 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
2052 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
2053 
2054 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2055 	CU_ASSERT(rc == 0);
2056 
2057 	poll_threads();
2058 	spdk_delay_us(1000);
2059 	poll_threads();
2060 
2061 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2062 
2063 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
2064 	 * created because creating one nvme_bdev failed.
2065 	 */
2066 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2067 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2068 
2069 	g_ut_register_bdev_status = -EINVAL;
2070 	g_ut_attach_bdev_count = 0;
2071 
2072 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2073 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2074 	CU_ASSERT(rc == 0);
2075 
2076 	spdk_delay_us(1000);
2077 	poll_threads();
2078 
2079 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2080 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2081 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2082 
2083 	CU_ASSERT(attached_names[0] == NULL);
2084 
2085 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2086 	CU_ASSERT(rc == 0);
2087 
2088 	poll_threads();
2089 	spdk_delay_us(1000);
2090 	poll_threads();
2091 
2092 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2093 
2094 	g_ut_register_bdev_status = 0;
2095 }
2096 
2097 static void
2098 test_aer_cb(void)
2099 {
2100 	struct spdk_nvme_transport_id trid = {};
2101 	struct spdk_nvme_ctrlr *ctrlr;
2102 	struct nvme_ctrlr *nvme_ctrlr;
2103 	struct nvme_bdev *bdev;
2104 	const int STRING_SIZE = 32;
2105 	const char *attached_names[STRING_SIZE];
2106 	union spdk_nvme_async_event_completion event = {};
2107 	struct spdk_nvme_cpl cpl = {};
2108 	int rc;
2109 
2110 	set_thread(0);
2111 
2112 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2113 	ut_init_trid(&trid);
2114 
2115 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
2116 	 * namespaces are populated.
2117 	 */
2118 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
2119 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2120 
2121 	ctrlr->ns[0].is_active = false;
2122 
2123 	g_ut_attach_ctrlr_status = 0;
2124 	g_ut_attach_bdev_count = 3;
2125 
2126 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2127 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2128 	CU_ASSERT(rc == 0);
2129 
2130 	spdk_delay_us(1000);
2131 	poll_threads();
2132 
2133 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2134 	poll_threads();
2135 
2136 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2137 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2138 
2139 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
2140 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2141 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2142 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2143 
2144 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
2145 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2146 	CU_ASSERT(bdev->disk.blockcnt == 1024);
2147 
2148 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
2149 	 * change the size of the 4th namespace.
2150 	 */
2151 	ctrlr->ns[0].is_active = true;
2152 	ctrlr->ns[2].is_active = false;
2153 	ctrlr->nsdata[3].nsze = 2048;
2154 
2155 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2156 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
2157 	cpl.cdw0 = event.raw;
2158 
2159 	aer_cb(nvme_ctrlr, &cpl);
2160 
2161 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2162 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2163 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
2164 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2165 	CU_ASSERT(bdev->disk.blockcnt == 2048);
2166 
2167 	/* Change ANA state of active namespaces. */
2168 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2169 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2170 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2171 
2172 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2173 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
2174 	cpl.cdw0 = event.raw;
2175 
2176 	aer_cb(nvme_ctrlr, &cpl);
2177 
2178 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2179 	poll_threads();
2180 
2181 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2182 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2183 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2184 
2185 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2186 	CU_ASSERT(rc == 0);
2187 
2188 	poll_threads();
2189 	spdk_delay_us(1000);
2190 	poll_threads();
2191 
2192 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2193 }
2194 
2195 static void
2196 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2197 			enum spdk_bdev_io_type io_type)
2198 {
2199 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2200 	struct nvme_io_path *io_path;
2201 	struct spdk_nvme_qpair *qpair;
2202 
2203 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2204 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2205 	qpair = io_path->qpair->qpair;
2206 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2207 
2208 	bdev_io->type = io_type;
2209 	bdev_io->internal.in_submit_request = true;
2210 
2211 	bdev_nvme_submit_request(ch, bdev_io);
2212 
2213 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2214 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2215 
2216 	poll_threads();
2217 
2218 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2219 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2220 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2221 }
2222 
2223 static void
2224 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2225 		   enum spdk_bdev_io_type io_type)
2226 {
2227 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2228 	struct nvme_io_path *io_path;
2229 	struct spdk_nvme_qpair *qpair;
2230 
2231 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2232 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2233 	qpair = io_path->qpair->qpair;
2234 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2235 
2236 	bdev_io->type = io_type;
2237 	bdev_io->internal.in_submit_request = true;
2238 
2239 	bdev_nvme_submit_request(ch, bdev_io);
2240 
2241 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2242 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2243 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2244 }
2245 
2246 static void
2247 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2248 {
2249 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2250 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2251 	struct ut_nvme_req *req;
2252 	struct nvme_io_path *io_path;
2253 	struct spdk_nvme_qpair *qpair;
2254 
2255 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2256 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2257 	qpair = io_path->qpair->qpair;
2258 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2259 
2260 	/* Only compare and write now. */
2261 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2262 	bdev_io->internal.in_submit_request = true;
2263 
2264 	bdev_nvme_submit_request(ch, bdev_io);
2265 
2266 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2267 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2268 	CU_ASSERT(bio->first_fused_submitted == true);
2269 
2270 	/* First outstanding request is compare operation. */
2271 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2272 	SPDK_CU_ASSERT_FATAL(req != NULL);
2273 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2274 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2275 
2276 	poll_threads();
2277 
2278 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2279 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2280 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2281 }
2282 
2283 static void
2284 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2285 			 struct spdk_nvme_ctrlr *ctrlr)
2286 {
2287 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2288 	bdev_io->internal.in_submit_request = true;
2289 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2290 
2291 	bdev_nvme_submit_request(ch, bdev_io);
2292 
2293 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2294 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2295 
2296 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2297 	poll_thread_times(1, 1);
2298 
2299 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2300 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2301 
2302 	poll_thread_times(0, 1);
2303 
2304 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2305 }
2306 
2307 static void
2308 test_submit_nvme_cmd(void)
2309 {
2310 	struct spdk_nvme_transport_id trid = {};
2311 	struct spdk_nvme_ctrlr *ctrlr;
2312 	struct nvme_ctrlr *nvme_ctrlr;
2313 	const int STRING_SIZE = 32;
2314 	const char *attached_names[STRING_SIZE];
2315 	struct nvme_bdev *bdev;
2316 	struct spdk_bdev_io *bdev_io;
2317 	struct spdk_io_channel *ch;
2318 	struct spdk_bdev_ext_io_opts ext_io_opts = {};
2319 	int rc;
2320 
2321 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2322 	ut_init_trid(&trid);
2323 
2324 	set_thread(1);
2325 
2326 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2327 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2328 
2329 	g_ut_attach_ctrlr_status = 0;
2330 	g_ut_attach_bdev_count = 1;
2331 
2332 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2333 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2334 	CU_ASSERT(rc == 0);
2335 
2336 	spdk_delay_us(1000);
2337 	poll_threads();
2338 
2339 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2340 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2341 
2342 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2343 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2344 
2345 	set_thread(0);
2346 
2347 	ch = spdk_get_io_channel(bdev);
2348 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2349 
2350 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2351 
2352 	bdev_io->u.bdev.iovs = NULL;
2353 
2354 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2355 
2356 	ut_bdev_io_set_buf(bdev_io);
2357 
2358 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2359 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2360 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2361 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2362 
2363 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2364 
2365 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2366 
2367 	/* Verify that ext NVME API is called if bdev_io ext_opts is set */
2368 	bdev_io->u.bdev.ext_opts = &ext_io_opts;
2369 	g_ut_readv_ext_called = false;
2370 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2371 	CU_ASSERT(g_ut_readv_ext_called == true);
2372 	g_ut_readv_ext_called = false;
2373 
2374 	g_ut_writev_ext_called = false;
2375 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2376 	CU_ASSERT(g_ut_writev_ext_called == true);
2377 	g_ut_writev_ext_called = false;
2378 	bdev_io->u.bdev.ext_opts = NULL;
2379 
2380 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2381 
2382 	free(bdev_io);
2383 
2384 	spdk_put_io_channel(ch);
2385 
2386 	poll_threads();
2387 
2388 	set_thread(1);
2389 
2390 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2391 	CU_ASSERT(rc == 0);
2392 
2393 	poll_threads();
2394 	spdk_delay_us(1000);
2395 	poll_threads();
2396 
2397 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2398 }
2399 
2400 static void
2401 test_add_remove_trid(void)
2402 {
2403 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2404 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2405 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2406 	const int STRING_SIZE = 32;
2407 	const char *attached_names[STRING_SIZE];
2408 	struct nvme_path_id *ctrid;
2409 	int rc;
2410 
2411 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2412 	ut_init_trid(&path1.trid);
2413 	ut_init_trid2(&path2.trid);
2414 	ut_init_trid3(&path3.trid);
2415 
2416 	set_thread(0);
2417 
2418 	g_ut_attach_ctrlr_status = 0;
2419 	g_ut_attach_bdev_count = 0;
2420 
2421 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2422 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2423 
2424 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2425 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2426 	CU_ASSERT(rc == 0);
2427 
2428 	spdk_delay_us(1000);
2429 	poll_threads();
2430 
2431 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2432 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2433 
2434 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2435 
2436 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2437 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2438 
2439 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2440 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2441 	CU_ASSERT(rc == 0);
2442 
2443 	spdk_delay_us(1000);
2444 	poll_threads();
2445 
2446 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2447 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2448 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2449 			break;
2450 		}
2451 	}
2452 	CU_ASSERT(ctrid != NULL);
2453 
2454 	/* trid3 is not in the registered list. */
2455 	rc = bdev_nvme_delete("nvme0", &path3);
2456 	CU_ASSERT(rc == -ENXIO);
2457 
2458 	/* trid2 is not used, and simply removed. */
2459 	rc = bdev_nvme_delete("nvme0", &path2);
2460 	CU_ASSERT(rc == 0);
2461 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2462 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2463 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2464 	}
2465 
2466 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2467 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2468 
2469 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
2470 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2471 	CU_ASSERT(rc == 0);
2472 
2473 	spdk_delay_us(1000);
2474 	poll_threads();
2475 
2476 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2477 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2478 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2479 			break;
2480 		}
2481 	}
2482 	CU_ASSERT(ctrid != NULL);
2483 
2484 	/* path1 is currently used and path3 is an alternative path.
2485 	 * If we remove path1, path is changed to path3.
2486 	 */
2487 	rc = bdev_nvme_delete("nvme0", &path1);
2488 	CU_ASSERT(rc == 0);
2489 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2490 	CU_ASSERT(nvme_ctrlr->resetting == true);
2491 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2492 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2493 	}
2494 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2495 
2496 	poll_threads();
2497 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2498 	poll_threads();
2499 
2500 	CU_ASSERT(nvme_ctrlr->resetting == false);
2501 
2502 	/* path3 is the current and only path. If we remove path3, the corresponding
2503 	 * nvme_ctrlr is removed.
2504 	 */
2505 	rc = bdev_nvme_delete("nvme0", &path3);
2506 	CU_ASSERT(rc == 0);
2507 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2508 
2509 	poll_threads();
2510 	spdk_delay_us(1000);
2511 	poll_threads();
2512 
2513 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2514 
2515 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2516 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2517 
2518 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2519 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2520 	CU_ASSERT(rc == 0);
2521 
2522 	spdk_delay_us(1000);
2523 	poll_threads();
2524 
2525 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2526 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2527 
2528 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2529 
2530 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2531 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2532 
2533 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2534 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2535 	CU_ASSERT(rc == 0);
2536 
2537 	spdk_delay_us(1000);
2538 	poll_threads();
2539 
2540 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2541 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2542 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2543 			break;
2544 		}
2545 	}
2546 	CU_ASSERT(ctrid != NULL);
2547 
2548 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2549 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2550 	CU_ASSERT(rc == 0);
2551 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2552 
2553 	poll_threads();
2554 	spdk_delay_us(1000);
2555 	poll_threads();
2556 
2557 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2558 }
2559 
2560 static void
2561 test_abort(void)
2562 {
2563 	struct spdk_nvme_transport_id trid = {};
2564 	struct nvme_ctrlr_opts opts = {};
2565 	struct spdk_nvme_ctrlr *ctrlr;
2566 	struct nvme_ctrlr *nvme_ctrlr;
2567 	const int STRING_SIZE = 32;
2568 	const char *attached_names[STRING_SIZE];
2569 	struct nvme_bdev *bdev;
2570 	struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io;
2571 	struct spdk_io_channel *ch1, *ch2;
2572 	struct nvme_bdev_channel *nbdev_ch1;
2573 	struct nvme_io_path *io_path1;
2574 	struct nvme_qpair *nvme_qpair1;
2575 	int rc;
2576 
2577 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2578 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2579 	 * are submitted on thread 1. Both should succeed.
2580 	 */
2581 
2582 	ut_init_trid(&trid);
2583 
2584 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2585 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2586 
2587 	g_ut_attach_ctrlr_status = 0;
2588 	g_ut_attach_bdev_count = 1;
2589 
2590 	set_thread(1);
2591 
2592 	opts.ctrlr_loss_timeout_sec = -1;
2593 	opts.reconnect_delay_sec = 1;
2594 
2595 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2596 			      attach_ctrlr_done, NULL, NULL, &opts, false);
2597 	CU_ASSERT(rc == 0);
2598 
2599 	spdk_delay_us(1000);
2600 	poll_threads();
2601 
2602 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2603 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2604 
2605 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2606 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2607 
2608 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2609 	ut_bdev_io_set_buf(write_io);
2610 
2611 	fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL);
2612 	ut_bdev_io_set_buf(fuse_io);
2613 
2614 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2615 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2616 
2617 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2618 
2619 	set_thread(0);
2620 
2621 	ch1 = spdk_get_io_channel(bdev);
2622 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2623 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2624 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2625 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2626 	nvme_qpair1 = io_path1->qpair;
2627 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
2628 
2629 	set_thread(1);
2630 
2631 	ch2 = spdk_get_io_channel(bdev);
2632 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2633 
2634 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2635 	fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2636 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2637 
2638 	/* Aborting the already completed request should fail. */
2639 	write_io->internal.in_submit_request = true;
2640 	bdev_nvme_submit_request(ch1, write_io);
2641 	poll_threads();
2642 
2643 	CU_ASSERT(write_io->internal.in_submit_request == false);
2644 
2645 	abort_io->u.abort.bio_to_abort = write_io;
2646 	abort_io->internal.in_submit_request = true;
2647 
2648 	bdev_nvme_submit_request(ch1, abort_io);
2649 
2650 	poll_threads();
2651 
2652 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2653 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2654 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2655 
2656 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2657 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2658 
2659 	admin_io->internal.in_submit_request = true;
2660 	bdev_nvme_submit_request(ch1, admin_io);
2661 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2662 	poll_threads();
2663 
2664 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2665 
2666 	abort_io->u.abort.bio_to_abort = admin_io;
2667 	abort_io->internal.in_submit_request = true;
2668 
2669 	bdev_nvme_submit_request(ch2, abort_io);
2670 
2671 	poll_threads();
2672 
2673 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2674 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2675 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2676 
2677 	/* Aborting the write request should succeed. */
2678 	write_io->internal.in_submit_request = true;
2679 	bdev_nvme_submit_request(ch1, write_io);
2680 
2681 	CU_ASSERT(write_io->internal.in_submit_request == true);
2682 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
2683 
2684 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2685 	abort_io->u.abort.bio_to_abort = write_io;
2686 	abort_io->internal.in_submit_request = true;
2687 
2688 	bdev_nvme_submit_request(ch1, abort_io);
2689 
2690 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2691 	poll_threads();
2692 
2693 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2694 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2695 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2696 	CU_ASSERT(write_io->internal.in_submit_request == false);
2697 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2698 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2699 
2700 	/* Aborting the fuse request should succeed. */
2701 	fuse_io->internal.in_submit_request = true;
2702 	bdev_nvme_submit_request(ch1, fuse_io);
2703 
2704 	CU_ASSERT(fuse_io->internal.in_submit_request == true);
2705 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2);
2706 
2707 	abort_io->u.abort.bio_to_abort = fuse_io;
2708 	abort_io->internal.in_submit_request = true;
2709 
2710 	bdev_nvme_submit_request(ch1, abort_io);
2711 
2712 	spdk_delay_us(10000);
2713 	poll_threads();
2714 
2715 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2716 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2717 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2718 	CU_ASSERT(fuse_io->internal.in_submit_request == false);
2719 	CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2720 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2721 
2722 	/* Aborting the admin request should succeed. */
2723 	admin_io->internal.in_submit_request = true;
2724 	bdev_nvme_submit_request(ch1, admin_io);
2725 
2726 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2727 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2728 
2729 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2730 	abort_io->u.abort.bio_to_abort = admin_io;
2731 	abort_io->internal.in_submit_request = true;
2732 
2733 	bdev_nvme_submit_request(ch2, abort_io);
2734 
2735 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2736 	poll_threads();
2737 
2738 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2739 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2740 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2741 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2742 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2743 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2744 
2745 	set_thread(0);
2746 
2747 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2748 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2749 	 * while resetting the nvme_ctrlr.
2750 	 */
2751 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
2752 
2753 	poll_thread_times(0, 3);
2754 
2755 	CU_ASSERT(nvme_qpair1->qpair == NULL);
2756 	CU_ASSERT(nvme_ctrlr->resetting == true);
2757 
2758 	write_io->internal.in_submit_request = true;
2759 
2760 	bdev_nvme_submit_request(ch1, write_io);
2761 
2762 	CU_ASSERT(write_io->internal.in_submit_request == true);
2763 	CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list));
2764 
2765 	/* Aborting the queued write request should succeed immediately. */
2766 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2767 	abort_io->u.abort.bio_to_abort = write_io;
2768 	abort_io->internal.in_submit_request = true;
2769 
2770 	bdev_nvme_submit_request(ch1, abort_io);
2771 
2772 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2773 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2774 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2775 	CU_ASSERT(write_io->internal.in_submit_request == false);
2776 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2777 
2778 	poll_threads();
2779 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2780 	poll_threads();
2781 
2782 	spdk_put_io_channel(ch1);
2783 
2784 	set_thread(1);
2785 
2786 	spdk_put_io_channel(ch2);
2787 
2788 	poll_threads();
2789 
2790 	free(write_io);
2791 	free(fuse_io);
2792 	free(admin_io);
2793 	free(abort_io);
2794 
2795 	set_thread(1);
2796 
2797 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2798 	CU_ASSERT(rc == 0);
2799 
2800 	poll_threads();
2801 	spdk_delay_us(1000);
2802 	poll_threads();
2803 
2804 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2805 }
2806 
2807 static void
2808 test_get_io_qpair(void)
2809 {
2810 	struct spdk_nvme_transport_id trid = {};
2811 	struct spdk_nvme_ctrlr ctrlr = {};
2812 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2813 	struct spdk_io_channel *ch;
2814 	struct nvme_ctrlr_channel *ctrlr_ch;
2815 	struct spdk_nvme_qpair *qpair;
2816 	int rc;
2817 
2818 	ut_init_trid(&trid);
2819 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2820 
2821 	set_thread(0);
2822 
2823 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2824 	CU_ASSERT(rc == 0);
2825 
2826 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2827 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2828 
2829 	ch = spdk_get_io_channel(nvme_ctrlr);
2830 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2831 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2832 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
2833 
2834 	qpair = bdev_nvme_get_io_qpair(ch);
2835 	CU_ASSERT(qpair == ctrlr_ch->qpair->qpair);
2836 
2837 	spdk_put_io_channel(ch);
2838 
2839 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2840 	CU_ASSERT(rc == 0);
2841 
2842 	poll_threads();
2843 	spdk_delay_us(1000);
2844 	poll_threads();
2845 
2846 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2847 }
2848 
2849 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2850  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2851  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2852  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2853  */
2854 static void
2855 test_bdev_unregister(void)
2856 {
2857 	struct spdk_nvme_transport_id trid = {};
2858 	struct spdk_nvme_ctrlr *ctrlr;
2859 	struct nvme_ctrlr *nvme_ctrlr;
2860 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2861 	const int STRING_SIZE = 32;
2862 	const char *attached_names[STRING_SIZE];
2863 	struct nvme_bdev *bdev1, *bdev2;
2864 	int rc;
2865 
2866 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2867 	ut_init_trid(&trid);
2868 
2869 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
2870 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2871 
2872 	g_ut_attach_ctrlr_status = 0;
2873 	g_ut_attach_bdev_count = 2;
2874 
2875 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2876 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2877 	CU_ASSERT(rc == 0);
2878 
2879 	spdk_delay_us(1000);
2880 	poll_threads();
2881 
2882 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2883 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2884 
2885 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
2886 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2887 
2888 	bdev1 = nvme_ns1->bdev;
2889 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2890 
2891 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
2892 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2893 
2894 	bdev2 = nvme_ns2->bdev;
2895 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2896 
2897 	bdev_nvme_destruct(&bdev1->disk);
2898 	bdev_nvme_destruct(&bdev2->disk);
2899 
2900 	poll_threads();
2901 
2902 	CU_ASSERT(nvme_ns1->bdev == NULL);
2903 	CU_ASSERT(nvme_ns2->bdev == NULL);
2904 
2905 	nvme_ctrlr->destruct = true;
2906 	_nvme_ctrlr_destruct(nvme_ctrlr);
2907 
2908 	poll_threads();
2909 	spdk_delay_us(1000);
2910 	poll_threads();
2911 
2912 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2913 }
2914 
2915 static void
2916 test_compare_ns(void)
2917 {
2918 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2919 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2920 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2921 	struct spdk_uuid uuid1 = { .u.raw = { 0xAA } };
2922 	struct spdk_uuid uuid2 = { .u.raw = { 0xAB } };
2923 
2924 	/* No IDs are defined. */
2925 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2926 
2927 	/* Only EUI64 are defined and not matched. */
2928 	nsdata1.eui64 = 0xABCDEF0123456789;
2929 	nsdata2.eui64 = 0xBBCDEF0123456789;
2930 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2931 
2932 	/* Only EUI64 are defined and matched. */
2933 	nsdata2.eui64 = 0xABCDEF0123456789;
2934 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2935 
2936 	/* Only NGUID are defined and not matched. */
2937 	nsdata1.eui64 = 0x0;
2938 	nsdata2.eui64 = 0x0;
2939 	nsdata1.nguid[0] = 0x12;
2940 	nsdata2.nguid[0] = 0x10;
2941 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2942 
2943 	/* Only NGUID are defined and matched. */
2944 	nsdata2.nguid[0] = 0x12;
2945 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2946 
2947 	/* Only UUID are defined and not matched. */
2948 	nsdata1.nguid[0] = 0x0;
2949 	nsdata2.nguid[0] = 0x0;
2950 	ns1.uuid = &uuid1;
2951 	ns2.uuid = &uuid2;
2952 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2953 
2954 	/* Only one UUID is defined. */
2955 	ns1.uuid = NULL;
2956 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2957 
2958 	/* Only UUID are defined and matched. */
2959 	ns1.uuid = &uuid2;
2960 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2961 
2962 	/* All EUI64, NGUID, and UUID are defined and matched. */
2963 	nsdata1.eui64 = 0x123456789ABCDEF;
2964 	nsdata2.eui64 = 0x123456789ABCDEF;
2965 	nsdata1.nguid[15] = 0x34;
2966 	nsdata2.nguid[15] = 0x34;
2967 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2968 
2969 	/* CSI are not matched. */
2970 	ns1.csi = SPDK_NVME_CSI_ZNS;
2971 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2972 }
2973 
2974 static void
2975 test_init_ana_log_page(void)
2976 {
2977 	struct spdk_nvme_transport_id trid = {};
2978 	struct spdk_nvme_ctrlr *ctrlr;
2979 	struct nvme_ctrlr *nvme_ctrlr;
2980 	const int STRING_SIZE = 32;
2981 	const char *attached_names[STRING_SIZE];
2982 	int rc;
2983 
2984 	set_thread(0);
2985 
2986 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2987 	ut_init_trid(&trid);
2988 
2989 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
2990 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2991 
2992 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2993 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2994 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2995 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
2996 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2997 
2998 	g_ut_attach_ctrlr_status = 0;
2999 	g_ut_attach_bdev_count = 5;
3000 
3001 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3002 			      attach_ctrlr_done, NULL, NULL, NULL, false);
3003 	CU_ASSERT(rc == 0);
3004 
3005 	spdk_delay_us(1000);
3006 	poll_threads();
3007 
3008 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3009 	poll_threads();
3010 
3011 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3012 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3013 
3014 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
3015 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
3016 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
3017 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
3018 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
3019 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3020 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3021 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
3022 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
3023 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
3024 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
3025 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
3026 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
3027 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
3028 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
3029 
3030 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3031 	CU_ASSERT(rc == 0);
3032 
3033 	poll_threads();
3034 	spdk_delay_us(1000);
3035 	poll_threads();
3036 
3037 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3038 }
3039 
3040 static void
3041 init_accel(void)
3042 {
3043 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
3044 				sizeof(int), "accel_p");
3045 }
3046 
3047 static void
3048 fini_accel(void)
3049 {
3050 	spdk_io_device_unregister(g_accel_p, NULL);
3051 }
3052 
3053 static void
3054 test_get_memory_domains(void)
3055 {
3056 	struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
3057 	struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef };
3058 	struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 };
3059 	struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 };
3060 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
3061 	struct spdk_memory_domain *domains[4] = {};
3062 	int rc = 0;
3063 
3064 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq);
3065 
3066 	/* nvme controller doesn't have memory domains */
3067 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0);
3068 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3069 	CU_ASSERT(rc == 0);
3070 	CU_ASSERT(domains[0] == NULL);
3071 	CU_ASSERT(domains[1] == NULL);
3072 
3073 	/* nvme controller has a memory domain */
3074 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1);
3075 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3076 	CU_ASSERT(rc == 1);
3077 	CU_ASSERT(domains[0] != NULL);
3078 	memset(domains, 0, sizeof(domains));
3079 
3080 	/* multipath, 2 controllers report 1 memory domain each */
3081 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq);
3082 
3083 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3084 	CU_ASSERT(rc == 2);
3085 	CU_ASSERT(domains[0] != NULL);
3086 	CU_ASSERT(domains[1] != NULL);
3087 	memset(domains, 0, sizeof(domains));
3088 
3089 	/* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */
3090 	rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2);
3091 	CU_ASSERT(rc == 2);
3092 
3093 	/* multipath, 2 controllers report 1 memory domain each, array_size = 0 */
3094 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
3095 	CU_ASSERT(rc == 2);
3096 	CU_ASSERT(domains[0] == NULL);
3097 	CU_ASSERT(domains[1] == NULL);
3098 
3099 	/* multipath, 2 controllers report 1 memory domain each, array_size = 1 */
3100 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1);
3101 	CU_ASSERT(rc == 2);
3102 	CU_ASSERT(domains[0] != NULL);
3103 	CU_ASSERT(domains[1] == NULL);
3104 	memset(domains, 0, sizeof(domains));
3105 
3106 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */
3107 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3108 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4);
3109 	CU_ASSERT(rc == 4);
3110 	CU_ASSERT(domains[0] != NULL);
3111 	CU_ASSERT(domains[1] != NULL);
3112 	CU_ASSERT(domains[2] != NULL);
3113 	CU_ASSERT(domains[3] != NULL);
3114 	memset(domains, 0, sizeof(domains));
3115 
3116 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test)
3117 	 * Array size is less than the number of memory domains */
3118 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3119 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3);
3120 	CU_ASSERT(rc == 4);
3121 	CU_ASSERT(domains[0] != NULL);
3122 	CU_ASSERT(domains[1] != NULL);
3123 	CU_ASSERT(domains[2] != NULL);
3124 	CU_ASSERT(domains[3] == NULL);
3125 	memset(domains, 0, sizeof(domains));
3126 
3127 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains);
3128 }
3129 
3130 static void
3131 test_reconnect_qpair(void)
3132 {
3133 	struct spdk_nvme_transport_id trid = {};
3134 	struct spdk_nvme_ctrlr *ctrlr;
3135 	struct nvme_ctrlr *nvme_ctrlr;
3136 	const int STRING_SIZE = 32;
3137 	const char *attached_names[STRING_SIZE];
3138 	struct nvme_bdev *bdev;
3139 	struct spdk_io_channel *ch1, *ch2;
3140 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3141 	struct nvme_io_path *io_path1, *io_path2;
3142 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
3143 	int rc;
3144 
3145 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3146 	ut_init_trid(&trid);
3147 
3148 	set_thread(0);
3149 
3150 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
3151 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3152 
3153 	g_ut_attach_ctrlr_status = 0;
3154 	g_ut_attach_bdev_count = 1;
3155 
3156 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3157 			      attach_ctrlr_done, NULL, NULL, NULL, false);
3158 	CU_ASSERT(rc == 0);
3159 
3160 	spdk_delay_us(1000);
3161 	poll_threads();
3162 
3163 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3164 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3165 
3166 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
3167 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3168 
3169 	ch1 = spdk_get_io_channel(bdev);
3170 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3171 
3172 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3173 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
3174 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3175 	nvme_qpair1 = io_path1->qpair;
3176 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
3177 
3178 	set_thread(1);
3179 
3180 	ch2 = spdk_get_io_channel(bdev);
3181 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3182 
3183 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3184 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
3185 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3186 	nvme_qpair2 = io_path2->qpair;
3187 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
3188 
3189 	/* If a qpair is disconnected, it is freed and then reconnected via
3190 	 * resetting the corresponding nvme_ctrlr.
3191 	 */
3192 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3193 	ctrlr->is_failed = true;
3194 
3195 	poll_thread_times(1, 3);
3196 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3197 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3198 	CU_ASSERT(nvme_ctrlr->resetting == true);
3199 
3200 	poll_thread_times(0, 3);
3201 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3202 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3203 	CU_ASSERT(ctrlr->is_failed == true);
3204 
3205 	poll_thread_times(1, 2);
3206 	poll_thread_times(0, 1);
3207 	CU_ASSERT(ctrlr->is_failed == false);
3208 	CU_ASSERT(ctrlr->adminq.is_connected == false);
3209 
3210 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3211 	poll_thread_times(0, 2);
3212 	CU_ASSERT(ctrlr->adminq.is_connected == true);
3213 
3214 	poll_thread_times(0, 1);
3215 	poll_thread_times(1, 1);
3216 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3217 	CU_ASSERT(nvme_qpair2->qpair != NULL);
3218 	CU_ASSERT(nvme_ctrlr->resetting == true);
3219 
3220 	poll_thread_times(0, 2);
3221 	poll_thread_times(1, 1);
3222 	poll_thread_times(0, 1);
3223 	CU_ASSERT(nvme_ctrlr->resetting == false);
3224 
3225 	poll_threads();
3226 
3227 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
3228 	 * fails, the qpair is just freed.
3229 	 */
3230 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3231 	ctrlr->is_failed = true;
3232 	ctrlr->fail_reset = true;
3233 
3234 	poll_thread_times(1, 3);
3235 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3236 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3237 	CU_ASSERT(nvme_ctrlr->resetting == true);
3238 
3239 	poll_thread_times(0, 3);
3240 	poll_thread_times(1, 1);
3241 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3242 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3243 	CU_ASSERT(ctrlr->is_failed == true);
3244 
3245 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3246 	poll_thread_times(0, 3);
3247 	poll_thread_times(1, 1);
3248 	poll_thread_times(0, 1);
3249 	CU_ASSERT(ctrlr->is_failed == true);
3250 	CU_ASSERT(nvme_ctrlr->resetting == false);
3251 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3252 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3253 
3254 	poll_threads();
3255 
3256 	spdk_put_io_channel(ch2);
3257 
3258 	set_thread(0);
3259 
3260 	spdk_put_io_channel(ch1);
3261 
3262 	poll_threads();
3263 
3264 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3265 	CU_ASSERT(rc == 0);
3266 
3267 	poll_threads();
3268 	spdk_delay_us(1000);
3269 	poll_threads();
3270 
3271 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3272 }
3273 
3274 static void
3275 test_create_bdev_ctrlr(void)
3276 {
3277 	struct nvme_path_id path1 = {}, path2 = {};
3278 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3279 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3280 	const int STRING_SIZE = 32;
3281 	const char *attached_names[STRING_SIZE];
3282 	int rc;
3283 
3284 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3285 	ut_init_trid(&path1.trid);
3286 	ut_init_trid2(&path2.trid);
3287 
3288 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3289 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3290 
3291 	g_ut_attach_ctrlr_status = 0;
3292 	g_ut_attach_bdev_count = 0;
3293 
3294 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3295 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3296 	CU_ASSERT(rc == 0);
3297 
3298 	spdk_delay_us(1000);
3299 	poll_threads();
3300 
3301 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3302 	poll_threads();
3303 
3304 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3305 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3306 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3307 
3308 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3309 	g_ut_attach_ctrlr_status = -EINVAL;
3310 
3311 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3312 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3313 
3314 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3315 
3316 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3317 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3318 	CU_ASSERT(rc == 0);
3319 
3320 	spdk_delay_us(1000);
3321 	poll_threads();
3322 
3323 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3324 	poll_threads();
3325 
3326 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3327 
3328 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3329 	g_ut_attach_ctrlr_status = 0;
3330 
3331 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3332 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3333 
3334 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3335 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3336 	CU_ASSERT(rc == 0);
3337 
3338 	spdk_delay_us(1000);
3339 	poll_threads();
3340 
3341 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3342 	poll_threads();
3343 
3344 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3345 
3346 	/* Delete two ctrlrs at once. */
3347 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3348 	CU_ASSERT(rc == 0);
3349 
3350 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3351 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3352 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3353 
3354 	poll_threads();
3355 	spdk_delay_us(1000);
3356 	poll_threads();
3357 
3358 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3359 
3360 	/* Add two ctrlrs and delete one by one. */
3361 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3362 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3363 
3364 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3365 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3366 
3367 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3368 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3369 	CU_ASSERT(rc == 0);
3370 
3371 	spdk_delay_us(1000);
3372 	poll_threads();
3373 
3374 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3375 	poll_threads();
3376 
3377 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3378 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3379 	CU_ASSERT(rc == 0);
3380 
3381 	spdk_delay_us(1000);
3382 	poll_threads();
3383 
3384 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3385 	poll_threads();
3386 
3387 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3388 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3389 
3390 	rc = bdev_nvme_delete("nvme0", &path1);
3391 	CU_ASSERT(rc == 0);
3392 
3393 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3394 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3395 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3396 
3397 	poll_threads();
3398 	spdk_delay_us(1000);
3399 	poll_threads();
3400 
3401 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3402 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3403 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3404 
3405 	rc = bdev_nvme_delete("nvme0", &path2);
3406 	CU_ASSERT(rc == 0);
3407 
3408 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3409 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3410 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3411 
3412 	poll_threads();
3413 	spdk_delay_us(1000);
3414 	poll_threads();
3415 
3416 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3417 }
3418 
3419 static struct nvme_ns *
3420 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3421 {
3422 	struct nvme_ns *nvme_ns;
3423 
3424 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3425 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3426 			return nvme_ns;
3427 		}
3428 	}
3429 
3430 	return NULL;
3431 }
3432 
3433 static void
3434 test_add_multi_ns_to_bdev(void)
3435 {
3436 	struct nvme_path_id path1 = {}, path2 = {};
3437 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3438 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3439 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3440 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3441 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3442 	const int STRING_SIZE = 32;
3443 	const char *attached_names[STRING_SIZE];
3444 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3445 	struct spdk_uuid uuid2 = { .u.raw = { 0x2 } };
3446 	struct spdk_uuid uuid3 = { .u.raw = { 0x3 } };
3447 	struct spdk_uuid uuid4 = { .u.raw = { 0x4 } };
3448 	struct spdk_uuid uuid44 = { .u.raw = { 0x44 } };
3449 	int rc;
3450 
3451 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3452 	ut_init_trid(&path1.trid);
3453 	ut_init_trid2(&path2.trid);
3454 
3455 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3456 
3457 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3458 	 * namespaces are populated.
3459 	 */
3460 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3461 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3462 
3463 	ctrlr1->ns[1].is_active = false;
3464 	ctrlr1->ns[4].is_active = false;
3465 	ctrlr1->ns[0].uuid = &uuid1;
3466 	ctrlr1->ns[2].uuid = &uuid3;
3467 	ctrlr1->ns[3].uuid = &uuid4;
3468 
3469 	g_ut_attach_ctrlr_status = 0;
3470 	g_ut_attach_bdev_count = 3;
3471 
3472 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3473 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3474 	CU_ASSERT(rc == 0);
3475 
3476 	spdk_delay_us(1000);
3477 	poll_threads();
3478 
3479 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3480 	poll_threads();
3481 
3482 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3483 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3484 	 * adding 4th namespace to a bdev should fail.
3485 	 */
3486 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3487 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3488 
3489 	ctrlr2->ns[2].is_active = false;
3490 	ctrlr2->ns[4].is_active = false;
3491 	ctrlr2->ns[0].uuid = &uuid1;
3492 	ctrlr2->ns[1].uuid = &uuid2;
3493 	ctrlr2->ns[3].uuid = &uuid44;
3494 
3495 	g_ut_attach_ctrlr_status = 0;
3496 	g_ut_attach_bdev_count = 2;
3497 
3498 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3499 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3500 	CU_ASSERT(rc == 0);
3501 
3502 	spdk_delay_us(1000);
3503 	poll_threads();
3504 
3505 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3506 	poll_threads();
3507 
3508 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3509 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3510 
3511 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3512 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3513 
3514 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3515 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3516 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3517 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3518 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3519 
3520 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3521 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3522 
3523 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3524 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3525 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3526 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3527 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3528 
3529 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3530 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3531 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3532 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3533 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3534 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3535 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3536 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3537 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3538 
3539 	CU_ASSERT(bdev1->ref == 2);
3540 	CU_ASSERT(bdev2->ref == 1);
3541 	CU_ASSERT(bdev3->ref == 1);
3542 	CU_ASSERT(bdev4->ref == 1);
3543 
3544 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3545 	rc = bdev_nvme_delete("nvme0", &path1);
3546 	CU_ASSERT(rc == 0);
3547 
3548 	poll_threads();
3549 	spdk_delay_us(1000);
3550 	poll_threads();
3551 
3552 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3553 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3554 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2);
3555 
3556 	rc = bdev_nvme_delete("nvme0", &path2);
3557 	CU_ASSERT(rc == 0);
3558 
3559 	poll_threads();
3560 	spdk_delay_us(1000);
3561 	poll_threads();
3562 
3563 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3564 
3565 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3566 	 * can be deleted when the bdev subsystem shutdown.
3567 	 */
3568 	g_ut_attach_bdev_count = 1;
3569 
3570 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3571 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3572 
3573 	ctrlr1->ns[0].uuid = &uuid1;
3574 
3575 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3576 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3577 	CU_ASSERT(rc == 0);
3578 
3579 	spdk_delay_us(1000);
3580 	poll_threads();
3581 
3582 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3583 	poll_threads();
3584 
3585 	ut_init_trid2(&path2.trid);
3586 
3587 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3588 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3589 
3590 	ctrlr2->ns[0].uuid = &uuid1;
3591 
3592 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3593 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3594 	CU_ASSERT(rc == 0);
3595 
3596 	spdk_delay_us(1000);
3597 	poll_threads();
3598 
3599 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3600 	poll_threads();
3601 
3602 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3603 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3604 
3605 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3606 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3607 
3608 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3609 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3610 
3611 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3612 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3613 
3614 	/* Check if a nvme_bdev has two nvme_ns. */
3615 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3616 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3617 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3618 
3619 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3620 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3621 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3622 
3623 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3624 	bdev_nvme_destruct(&bdev1->disk);
3625 
3626 	poll_threads();
3627 
3628 	CU_ASSERT(nvme_ns1->bdev == NULL);
3629 	CU_ASSERT(nvme_ns2->bdev == NULL);
3630 
3631 	nvme_ctrlr1->destruct = true;
3632 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3633 
3634 	poll_threads();
3635 	spdk_delay_us(1000);
3636 	poll_threads();
3637 
3638 	nvme_ctrlr2->destruct = true;
3639 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3640 
3641 	poll_threads();
3642 	spdk_delay_us(1000);
3643 	poll_threads();
3644 
3645 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3646 }
3647 
3648 static void
3649 test_add_multi_io_paths_to_nbdev_ch(void)
3650 {
3651 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3652 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3653 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3654 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3655 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3656 	const int STRING_SIZE = 32;
3657 	const char *attached_names[STRING_SIZE];
3658 	struct nvme_bdev *bdev;
3659 	struct spdk_io_channel *ch;
3660 	struct nvme_bdev_channel *nbdev_ch;
3661 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3662 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3663 	int rc;
3664 
3665 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3666 	ut_init_trid(&path1.trid);
3667 	ut_init_trid2(&path2.trid);
3668 	ut_init_trid3(&path3.trid);
3669 	g_ut_attach_ctrlr_status = 0;
3670 	g_ut_attach_bdev_count = 1;
3671 
3672 	set_thread(1);
3673 
3674 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3675 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3676 
3677 	ctrlr1->ns[0].uuid = &uuid1;
3678 
3679 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3680 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3681 	CU_ASSERT(rc == 0);
3682 
3683 	spdk_delay_us(1000);
3684 	poll_threads();
3685 
3686 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3687 	poll_threads();
3688 
3689 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3690 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3691 
3692 	ctrlr2->ns[0].uuid = &uuid1;
3693 
3694 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3695 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3696 	CU_ASSERT(rc == 0);
3697 
3698 	spdk_delay_us(1000);
3699 	poll_threads();
3700 
3701 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3702 	poll_threads();
3703 
3704 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3705 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3706 
3707 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3708 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3709 
3710 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3711 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3712 
3713 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3714 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3715 
3716 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3717 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3718 
3719 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3720 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3721 
3722 	set_thread(0);
3723 
3724 	ch = spdk_get_io_channel(bdev);
3725 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3726 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3727 
3728 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3729 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3730 
3731 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3732 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3733 
3734 	set_thread(1);
3735 
3736 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3737 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3738 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3739 
3740 	ctrlr3->ns[0].uuid = &uuid1;
3741 
3742 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
3743 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3744 	CU_ASSERT(rc == 0);
3745 
3746 	spdk_delay_us(1000);
3747 	poll_threads();
3748 
3749 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3750 	poll_threads();
3751 
3752 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid);
3753 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3754 
3755 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3756 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3757 
3758 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3759 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3760 
3761 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3762 	rc = bdev_nvme_delete("nvme0", &path2);
3763 	CU_ASSERT(rc == 0);
3764 
3765 	poll_threads();
3766 	spdk_delay_us(1000);
3767 	poll_threads();
3768 
3769 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1);
3770 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3771 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3);
3772 
3773 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3774 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3775 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3776 
3777 	set_thread(0);
3778 
3779 	spdk_put_io_channel(ch);
3780 
3781 	poll_threads();
3782 
3783 	set_thread(1);
3784 
3785 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3786 	CU_ASSERT(rc == 0);
3787 
3788 	poll_threads();
3789 	spdk_delay_us(1000);
3790 	poll_threads();
3791 
3792 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3793 }
3794 
3795 static void
3796 test_admin_path(void)
3797 {
3798 	struct nvme_path_id path1 = {}, path2 = {};
3799 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3800 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3801 	const int STRING_SIZE = 32;
3802 	const char *attached_names[STRING_SIZE];
3803 	struct nvme_bdev *bdev;
3804 	struct spdk_io_channel *ch;
3805 	struct spdk_bdev_io *bdev_io;
3806 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3807 	int rc;
3808 
3809 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3810 	ut_init_trid(&path1.trid);
3811 	ut_init_trid2(&path2.trid);
3812 	g_ut_attach_ctrlr_status = 0;
3813 	g_ut_attach_bdev_count = 1;
3814 
3815 	set_thread(0);
3816 
3817 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3818 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3819 
3820 	ctrlr1->ns[0].uuid = &uuid1;
3821 
3822 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3823 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3824 	CU_ASSERT(rc == 0);
3825 
3826 	spdk_delay_us(1000);
3827 	poll_threads();
3828 
3829 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3830 	poll_threads();
3831 
3832 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3833 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3834 
3835 	ctrlr2->ns[0].uuid = &uuid1;
3836 
3837 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3838 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3839 	CU_ASSERT(rc == 0);
3840 
3841 	spdk_delay_us(1000);
3842 	poll_threads();
3843 
3844 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3845 	poll_threads();
3846 
3847 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3848 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3849 
3850 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3851 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3852 
3853 	ch = spdk_get_io_channel(bdev);
3854 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3855 
3856 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
3857 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
3858 
3859 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
3860 	 * submitted to ctrlr2.
3861 	 */
3862 	ctrlr1->is_failed = true;
3863 	bdev_io->internal.in_submit_request = true;
3864 
3865 	bdev_nvme_submit_request(ch, bdev_io);
3866 
3867 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3868 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
3869 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
3870 
3871 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3872 	poll_threads();
3873 
3874 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3875 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3876 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3877 
3878 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
3879 	ctrlr2->is_failed = true;
3880 	bdev_io->internal.in_submit_request = true;
3881 
3882 	bdev_nvme_submit_request(ch, bdev_io);
3883 
3884 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3885 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3886 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3887 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
3888 
3889 	free(bdev_io);
3890 
3891 	spdk_put_io_channel(ch);
3892 
3893 	poll_threads();
3894 
3895 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3896 	CU_ASSERT(rc == 0);
3897 
3898 	poll_threads();
3899 	spdk_delay_us(1000);
3900 	poll_threads();
3901 
3902 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3903 }
3904 
3905 static struct nvme_io_path *
3906 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
3907 			struct nvme_ctrlr *nvme_ctrlr)
3908 {
3909 	struct nvme_io_path *io_path;
3910 
3911 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
3912 		if (io_path->qpair->ctrlr == nvme_ctrlr) {
3913 			return io_path;
3914 		}
3915 	}
3916 
3917 	return NULL;
3918 }
3919 
3920 static void
3921 test_reset_bdev_ctrlr(void)
3922 {
3923 	struct nvme_path_id path1 = {}, path2 = {};
3924 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3925 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3926 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3927 	struct nvme_path_id *curr_path1, *curr_path2;
3928 	const int STRING_SIZE = 32;
3929 	const char *attached_names[STRING_SIZE];
3930 	struct nvme_bdev *bdev;
3931 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
3932 	struct nvme_bdev_io *first_bio;
3933 	struct spdk_io_channel *ch1, *ch2;
3934 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3935 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
3936 	int rc;
3937 
3938 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3939 	ut_init_trid(&path1.trid);
3940 	ut_init_trid2(&path2.trid);
3941 	g_ut_attach_ctrlr_status = 0;
3942 	g_ut_attach_bdev_count = 1;
3943 
3944 	set_thread(0);
3945 
3946 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3947 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3948 
3949 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3950 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3951 	CU_ASSERT(rc == 0);
3952 
3953 	spdk_delay_us(1000);
3954 	poll_threads();
3955 
3956 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3957 	poll_threads();
3958 
3959 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3960 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3961 
3962 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3963 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3964 	CU_ASSERT(rc == 0);
3965 
3966 	spdk_delay_us(1000);
3967 	poll_threads();
3968 
3969 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3970 	poll_threads();
3971 
3972 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3973 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3974 
3975 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3976 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3977 
3978 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
3979 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
3980 
3981 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3982 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3983 
3984 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
3985 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
3986 
3987 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3988 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3989 
3990 	set_thread(0);
3991 
3992 	ch1 = spdk_get_io_channel(bdev);
3993 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3994 
3995 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3996 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
3997 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
3998 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
3999 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
4000 
4001 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
4002 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
4003 
4004 	set_thread(1);
4005 
4006 	ch2 = spdk_get_io_channel(bdev);
4007 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
4008 
4009 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
4010 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
4011 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
4012 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
4013 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
4014 
4015 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
4016 
4017 	/* The first reset request from bdev_io is submitted on thread 0.
4018 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
4019 	 *
4020 	 * A few extra polls are necessary after resetting ctrlr1 to check
4021 	 * pending reset requests for ctrlr1.
4022 	 */
4023 	ctrlr1->is_failed = true;
4024 	curr_path1->is_failed = true;
4025 	ctrlr2->is_failed = true;
4026 	curr_path2->is_failed = true;
4027 
4028 	set_thread(0);
4029 
4030 	bdev_nvme_submit_request(ch1, first_bdev_io);
4031 	CU_ASSERT(first_bio->io_path == io_path11);
4032 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4033 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
4034 
4035 	poll_thread_times(0, 3);
4036 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4037 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4038 
4039 	poll_thread_times(1, 2);
4040 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4041 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4042 	CU_ASSERT(ctrlr1->is_failed == true);
4043 
4044 	poll_thread_times(0, 1);
4045 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4046 	CU_ASSERT(ctrlr1->is_failed == false);
4047 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
4048 	CU_ASSERT(curr_path1->is_failed == true);
4049 
4050 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4051 	poll_thread_times(0, 2);
4052 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
4053 
4054 	poll_thread_times(0, 1);
4055 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4056 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4057 
4058 	poll_thread_times(1, 1);
4059 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4060 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4061 
4062 	poll_thread_times(0, 2);
4063 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4064 	poll_thread_times(1, 1);
4065 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4066 	poll_thread_times(0, 2);
4067 	CU_ASSERT(nvme_ctrlr1->resetting == false);
4068 	CU_ASSERT(curr_path1->is_failed == false);
4069 	CU_ASSERT(first_bio->io_path == io_path12);
4070 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4071 
4072 	poll_thread_times(0, 3);
4073 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4074 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4075 
4076 	poll_thread_times(1, 2);
4077 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4078 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4079 	CU_ASSERT(ctrlr2->is_failed == true);
4080 
4081 	poll_thread_times(0, 1);
4082 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4083 	CU_ASSERT(ctrlr2->is_failed == false);
4084 	CU_ASSERT(ctrlr2->adminq.is_connected == false);
4085 	CU_ASSERT(curr_path2->is_failed == true);
4086 
4087 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4088 	poll_thread_times(0, 2);
4089 	CU_ASSERT(ctrlr2->adminq.is_connected == true);
4090 
4091 	poll_thread_times(0, 1);
4092 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4093 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4094 
4095 	poll_thread_times(1, 2);
4096 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4097 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4098 
4099 	poll_thread_times(0, 2);
4100 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4101 	poll_thread_times(1, 1);
4102 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4103 	poll_thread_times(0, 2);
4104 	CU_ASSERT(first_bio->io_path == NULL);
4105 	CU_ASSERT(nvme_ctrlr2->resetting == false);
4106 	CU_ASSERT(curr_path2->is_failed == false);
4107 
4108 	poll_threads();
4109 
4110 	/* There is a race between two reset requests from bdev_io.
4111 	 *
4112 	 * The first reset request is submitted on thread 0, and the second reset
4113 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
4114 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
4115 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
4116 	 * The second is pending on ctrlr2 again. After the first completes resetting
4117 	 * ctrl2, both complete successfully.
4118 	 */
4119 	ctrlr1->is_failed = true;
4120 	curr_path1->is_failed = true;
4121 	ctrlr2->is_failed = true;
4122 	curr_path2->is_failed = true;
4123 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4124 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4125 
4126 	set_thread(0);
4127 
4128 	bdev_nvme_submit_request(ch1, first_bdev_io);
4129 
4130 	set_thread(1);
4131 
4132 	bdev_nvme_submit_request(ch2, second_bdev_io);
4133 
4134 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4135 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
4136 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io);
4137 
4138 	poll_threads();
4139 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4140 	poll_threads();
4141 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4142 	poll_threads();
4143 
4144 	CU_ASSERT(ctrlr1->is_failed == false);
4145 	CU_ASSERT(curr_path1->is_failed == false);
4146 	CU_ASSERT(ctrlr2->is_failed == false);
4147 	CU_ASSERT(curr_path2->is_failed == false);
4148 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4149 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4150 
4151 	set_thread(0);
4152 
4153 	spdk_put_io_channel(ch1);
4154 
4155 	set_thread(1);
4156 
4157 	spdk_put_io_channel(ch2);
4158 
4159 	poll_threads();
4160 
4161 	set_thread(0);
4162 
4163 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4164 	CU_ASSERT(rc == 0);
4165 
4166 	poll_threads();
4167 	spdk_delay_us(1000);
4168 	poll_threads();
4169 
4170 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4171 
4172 	free(first_bdev_io);
4173 	free(second_bdev_io);
4174 }
4175 
4176 static void
4177 test_find_io_path(void)
4178 {
4179 	struct nvme_bdev_channel nbdev_ch = {
4180 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
4181 	};
4182 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {};
4183 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
4184 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
4185 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
4186 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
4187 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
4188 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
4189 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
4190 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
4191 
4192 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
4193 
4194 	/* Test if io_path whose ANA state is not accessible is excluded. */
4195 
4196 	nvme_qpair1.qpair = &qpair1;
4197 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4198 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4199 
4200 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
4201 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4202 
4203 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
4204 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4205 
4206 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4207 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4208 
4209 	nbdev_ch.current_io_path = NULL;
4210 
4211 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4212 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4213 
4214 	nbdev_ch.current_io_path = NULL;
4215 
4216 	/* Test if io_path whose qpair is resetting is excluded. */
4217 
4218 	nvme_qpair1.qpair = NULL;
4219 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4220 
4221 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
4222 
4223 	/* Test if ANA optimized state or the first found ANA non-optimized state
4224 	 * is prioritized.
4225 	 */
4226 
4227 	nvme_qpair1.qpair = &qpair1;
4228 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4229 	nvme_qpair2.qpair = &qpair2;
4230 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4231 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
4232 
4233 	nbdev_ch.current_io_path = NULL;
4234 
4235 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4236 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4237 
4238 	nbdev_ch.current_io_path = NULL;
4239 }
4240 
4241 static void
4242 test_retry_io_if_ana_state_is_updating(void)
4243 {
4244 	struct nvme_path_id path = {};
4245 	struct nvme_ctrlr_opts opts = {};
4246 	struct spdk_nvme_ctrlr *ctrlr;
4247 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4248 	struct nvme_ctrlr *nvme_ctrlr;
4249 	const int STRING_SIZE = 32;
4250 	const char *attached_names[STRING_SIZE];
4251 	struct nvme_bdev *bdev;
4252 	struct nvme_ns *nvme_ns;
4253 	struct spdk_bdev_io *bdev_io1;
4254 	struct spdk_io_channel *ch;
4255 	struct nvme_bdev_channel *nbdev_ch;
4256 	struct nvme_io_path *io_path;
4257 	struct nvme_qpair *nvme_qpair;
4258 	int rc;
4259 
4260 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4261 	ut_init_trid(&path.trid);
4262 
4263 	set_thread(0);
4264 
4265 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4266 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4267 
4268 	g_ut_attach_ctrlr_status = 0;
4269 	g_ut_attach_bdev_count = 1;
4270 
4271 	opts.ctrlr_loss_timeout_sec = -1;
4272 	opts.reconnect_delay_sec = 1;
4273 
4274 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4275 			      attach_ctrlr_done, NULL, NULL, &opts, false);
4276 	CU_ASSERT(rc == 0);
4277 
4278 	spdk_delay_us(1000);
4279 	poll_threads();
4280 
4281 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4282 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4283 
4284 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4285 	CU_ASSERT(nvme_ctrlr != NULL);
4286 
4287 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4288 	CU_ASSERT(bdev != NULL);
4289 
4290 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4291 	CU_ASSERT(nvme_ns != NULL);
4292 
4293 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4294 	ut_bdev_io_set_buf(bdev_io1);
4295 
4296 	ch = spdk_get_io_channel(bdev);
4297 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4298 
4299 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4300 
4301 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4302 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4303 
4304 	nvme_qpair = io_path->qpair;
4305 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4306 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4307 
4308 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
4309 
4310 	/* If qpair is connected, I/O should succeed. */
4311 	bdev_io1->internal.in_submit_request = true;
4312 
4313 	bdev_nvme_submit_request(ch, bdev_io1);
4314 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4315 
4316 	poll_threads();
4317 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4318 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
4319 
4320 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
4321 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4322 	nbdev_ch->current_io_path = NULL;
4323 
4324 	bdev_io1->internal.in_submit_request = true;
4325 
4326 	bdev_nvme_submit_request(ch, bdev_io1);
4327 
4328 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4329 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4330 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4331 
4332 	/* ANA state became accessible while I/O was queued. */
4333 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4334 
4335 	spdk_delay_us(1000000);
4336 
4337 	poll_thread_times(0, 1);
4338 
4339 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4340 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4341 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4342 
4343 	poll_threads();
4344 
4345 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4346 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4347 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4348 
4349 	free(bdev_io1);
4350 
4351 	spdk_put_io_channel(ch);
4352 
4353 	poll_threads();
4354 
4355 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4356 	CU_ASSERT(rc == 0);
4357 
4358 	poll_threads();
4359 	spdk_delay_us(1000);
4360 	poll_threads();
4361 
4362 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4363 }
4364 
4365 static void
4366 test_retry_io_for_io_path_error(void)
4367 {
4368 	struct nvme_path_id path1 = {}, path2 = {};
4369 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4370 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4371 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4372 	const int STRING_SIZE = 32;
4373 	const char *attached_names[STRING_SIZE];
4374 	struct nvme_bdev *bdev;
4375 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4376 	struct spdk_bdev_io *bdev_io;
4377 	struct nvme_bdev_io *bio;
4378 	struct spdk_io_channel *ch;
4379 	struct nvme_bdev_channel *nbdev_ch;
4380 	struct nvme_io_path *io_path1, *io_path2;
4381 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
4382 	struct ut_nvme_req *req;
4383 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4384 	int rc;
4385 
4386 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4387 	ut_init_trid(&path1.trid);
4388 	ut_init_trid2(&path2.trid);
4389 
4390 	g_opts.bdev_retry_count = 1;
4391 
4392 	set_thread(0);
4393 
4394 	g_ut_attach_ctrlr_status = 0;
4395 	g_ut_attach_bdev_count = 1;
4396 
4397 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4398 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4399 
4400 	ctrlr1->ns[0].uuid = &uuid1;
4401 
4402 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4403 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4404 	CU_ASSERT(rc == 0);
4405 
4406 	spdk_delay_us(1000);
4407 	poll_threads();
4408 
4409 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4410 	poll_threads();
4411 
4412 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4413 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4414 
4415 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4416 	CU_ASSERT(nvme_ctrlr1 != NULL);
4417 
4418 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4419 	CU_ASSERT(bdev != NULL);
4420 
4421 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4422 	CU_ASSERT(nvme_ns1 != NULL);
4423 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4424 
4425 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4426 	ut_bdev_io_set_buf(bdev_io);
4427 
4428 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4429 
4430 	ch = spdk_get_io_channel(bdev);
4431 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4432 
4433 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4434 
4435 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4436 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4437 
4438 	nvme_qpair1 = io_path1->qpair;
4439 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
4440 	SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL);
4441 
4442 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4443 
4444 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4445 	bdev_io->internal.in_submit_request = true;
4446 
4447 	bdev_nvme_submit_request(ch, bdev_io);
4448 
4449 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4450 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4451 
4452 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4453 	SPDK_CU_ASSERT_FATAL(req != NULL);
4454 
4455 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4456 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4457 	req->cpl.status.dnr = 1;
4458 
4459 	poll_thread_times(0, 1);
4460 
4461 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4462 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4463 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4464 
4465 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4466 	bdev_io->internal.in_submit_request = true;
4467 
4468 	bdev_nvme_submit_request(ch, bdev_io);
4469 
4470 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4471 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4472 
4473 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4474 	SPDK_CU_ASSERT_FATAL(req != NULL);
4475 
4476 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4477 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4478 
4479 	poll_thread_times(0, 1);
4480 
4481 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4482 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4483 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4484 
4485 	poll_threads();
4486 
4487 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4488 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4489 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4490 
4491 	/* Add io_path2 dynamically, and create a multipath configuration. */
4492 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4493 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4494 
4495 	ctrlr2->ns[0].uuid = &uuid1;
4496 
4497 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4498 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4499 	CU_ASSERT(rc == 0);
4500 
4501 	spdk_delay_us(1000);
4502 	poll_threads();
4503 
4504 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4505 	poll_threads();
4506 
4507 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4508 	CU_ASSERT(nvme_ctrlr2 != NULL);
4509 
4510 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4511 	CU_ASSERT(nvme_ns2 != NULL);
4512 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4513 
4514 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4515 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4516 
4517 	nvme_qpair2 = io_path2->qpair;
4518 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
4519 	SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL);
4520 
4521 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4522 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4523 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4524 	 */
4525 	bdev_io->internal.in_submit_request = true;
4526 
4527 	bdev_nvme_submit_request(ch, bdev_io);
4528 
4529 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4530 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4531 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4532 
4533 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4534 	SPDK_CU_ASSERT_FATAL(req != NULL);
4535 
4536 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4537 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4538 
4539 	poll_thread_times(0, 1);
4540 
4541 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4542 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4543 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4544 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4545 
4546 	spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair);
4547 	nvme_qpair1->qpair = NULL;
4548 
4549 	poll_threads();
4550 
4551 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4552 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4553 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4554 
4555 	free(bdev_io);
4556 
4557 	spdk_put_io_channel(ch);
4558 
4559 	poll_threads();
4560 
4561 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4562 	CU_ASSERT(rc == 0);
4563 
4564 	poll_threads();
4565 	spdk_delay_us(1000);
4566 	poll_threads();
4567 
4568 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4569 
4570 	g_opts.bdev_retry_count = 0;
4571 }
4572 
4573 static void
4574 test_retry_io_count(void)
4575 {
4576 	struct nvme_path_id path = {};
4577 	struct spdk_nvme_ctrlr *ctrlr;
4578 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4579 	struct nvme_ctrlr *nvme_ctrlr;
4580 	const int STRING_SIZE = 32;
4581 	const char *attached_names[STRING_SIZE];
4582 	struct nvme_bdev *bdev;
4583 	struct nvme_ns *nvme_ns;
4584 	struct spdk_bdev_io *bdev_io;
4585 	struct nvme_bdev_io *bio;
4586 	struct spdk_io_channel *ch;
4587 	struct nvme_bdev_channel *nbdev_ch;
4588 	struct nvme_io_path *io_path;
4589 	struct nvme_qpair *nvme_qpair;
4590 	struct ut_nvme_req *req;
4591 	int rc;
4592 
4593 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4594 	ut_init_trid(&path.trid);
4595 
4596 	set_thread(0);
4597 
4598 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4599 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4600 
4601 	g_ut_attach_ctrlr_status = 0;
4602 	g_ut_attach_bdev_count = 1;
4603 
4604 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4605 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4606 	CU_ASSERT(rc == 0);
4607 
4608 	spdk_delay_us(1000);
4609 	poll_threads();
4610 
4611 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4612 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4613 
4614 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4615 	CU_ASSERT(nvme_ctrlr != NULL);
4616 
4617 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4618 	CU_ASSERT(bdev != NULL);
4619 
4620 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4621 	CU_ASSERT(nvme_ns != NULL);
4622 
4623 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4624 	ut_bdev_io_set_buf(bdev_io);
4625 
4626 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4627 
4628 	ch = spdk_get_io_channel(bdev);
4629 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4630 
4631 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4632 
4633 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4634 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4635 
4636 	nvme_qpair = io_path->qpair;
4637 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4638 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4639 
4640 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4641 
4642 	/* If I/O is aborted by request, it should not be retried. */
4643 	g_opts.bdev_retry_count = 1;
4644 
4645 	bdev_io->internal.in_submit_request = true;
4646 
4647 	bdev_nvme_submit_request(ch, bdev_io);
4648 
4649 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4650 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4651 
4652 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4653 	SPDK_CU_ASSERT_FATAL(req != NULL);
4654 
4655 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
4656 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4657 
4658 	poll_thread_times(0, 1);
4659 
4660 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4661 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4662 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
4663 
4664 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
4665 	 * the failed I/O should not be retried.
4666 	 */
4667 	g_opts.bdev_retry_count = 4;
4668 
4669 	bdev_io->internal.in_submit_request = true;
4670 
4671 	bdev_nvme_submit_request(ch, bdev_io);
4672 
4673 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4674 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4675 
4676 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4677 	SPDK_CU_ASSERT_FATAL(req != NULL);
4678 
4679 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4680 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4681 	bio->retry_count = 4;
4682 
4683 	poll_thread_times(0, 1);
4684 
4685 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4686 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4687 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4688 
4689 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
4690 	g_opts.bdev_retry_count = -1;
4691 
4692 	bdev_io->internal.in_submit_request = true;
4693 
4694 	bdev_nvme_submit_request(ch, bdev_io);
4695 
4696 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4697 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4698 
4699 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4700 	SPDK_CU_ASSERT_FATAL(req != NULL);
4701 
4702 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4703 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4704 	bio->retry_count = 4;
4705 
4706 	poll_thread_times(0, 1);
4707 
4708 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4709 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4710 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4711 
4712 	poll_threads();
4713 
4714 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4715 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4716 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4717 
4718 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
4719 	 * the failed I/O should be retried.
4720 	 */
4721 	g_opts.bdev_retry_count = 4;
4722 
4723 	bdev_io->internal.in_submit_request = true;
4724 
4725 	bdev_nvme_submit_request(ch, bdev_io);
4726 
4727 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4728 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4729 
4730 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4731 	SPDK_CU_ASSERT_FATAL(req != NULL);
4732 
4733 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4734 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4735 	bio->retry_count = 3;
4736 
4737 	poll_thread_times(0, 1);
4738 
4739 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4740 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4741 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4742 
4743 	poll_threads();
4744 
4745 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4746 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4747 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4748 
4749 	free(bdev_io);
4750 
4751 	spdk_put_io_channel(ch);
4752 
4753 	poll_threads();
4754 
4755 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4756 	CU_ASSERT(rc == 0);
4757 
4758 	poll_threads();
4759 	spdk_delay_us(1000);
4760 	poll_threads();
4761 
4762 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4763 
4764 	g_opts.bdev_retry_count = 0;
4765 }
4766 
4767 static void
4768 test_concurrent_read_ana_log_page(void)
4769 {
4770 	struct spdk_nvme_transport_id trid = {};
4771 	struct spdk_nvme_ctrlr *ctrlr;
4772 	struct nvme_ctrlr *nvme_ctrlr;
4773 	const int STRING_SIZE = 32;
4774 	const char *attached_names[STRING_SIZE];
4775 	int rc;
4776 
4777 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4778 	ut_init_trid(&trid);
4779 
4780 	set_thread(0);
4781 
4782 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
4783 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4784 
4785 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4786 
4787 	g_ut_attach_ctrlr_status = 0;
4788 	g_ut_attach_bdev_count = 1;
4789 
4790 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
4791 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4792 	CU_ASSERT(rc == 0);
4793 
4794 	spdk_delay_us(1000);
4795 	poll_threads();
4796 
4797 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4798 	poll_threads();
4799 
4800 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
4801 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
4802 
4803 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4804 
4805 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4806 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4807 
4808 	/* Following read request should be rejected. */
4809 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4810 
4811 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4812 
4813 	set_thread(1);
4814 
4815 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4816 
4817 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4818 
4819 	/* Reset request while reading ANA log page should not be rejected. */
4820 	rc = bdev_nvme_reset(nvme_ctrlr);
4821 	CU_ASSERT(rc == 0);
4822 
4823 	poll_threads();
4824 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4825 	poll_threads();
4826 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4827 	poll_threads();
4828 
4829 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4830 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4831 
4832 	/* Read ANA log page while resetting ctrlr should be rejected. */
4833 	rc = bdev_nvme_reset(nvme_ctrlr);
4834 	CU_ASSERT(rc == 0);
4835 
4836 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4837 
4838 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4839 
4840 	poll_threads();
4841 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4842 	poll_threads();
4843 
4844 	set_thread(0);
4845 
4846 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4847 	CU_ASSERT(rc == 0);
4848 
4849 	poll_threads();
4850 	spdk_delay_us(1000);
4851 	poll_threads();
4852 
4853 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
4854 }
4855 
4856 static void
4857 test_retry_io_for_ana_error(void)
4858 {
4859 	struct nvme_path_id path = {};
4860 	struct spdk_nvme_ctrlr *ctrlr;
4861 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4862 	struct nvme_ctrlr *nvme_ctrlr;
4863 	const int STRING_SIZE = 32;
4864 	const char *attached_names[STRING_SIZE];
4865 	struct nvme_bdev *bdev;
4866 	struct nvme_ns *nvme_ns;
4867 	struct spdk_bdev_io *bdev_io;
4868 	struct nvme_bdev_io *bio;
4869 	struct spdk_io_channel *ch;
4870 	struct nvme_bdev_channel *nbdev_ch;
4871 	struct nvme_io_path *io_path;
4872 	struct nvme_qpair *nvme_qpair;
4873 	struct ut_nvme_req *req;
4874 	uint64_t now;
4875 	int rc;
4876 
4877 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4878 	ut_init_trid(&path.trid);
4879 
4880 	g_opts.bdev_retry_count = 1;
4881 
4882 	set_thread(0);
4883 
4884 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
4885 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4886 
4887 	g_ut_attach_ctrlr_status = 0;
4888 	g_ut_attach_bdev_count = 1;
4889 
4890 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4891 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4892 	CU_ASSERT(rc == 0);
4893 
4894 	spdk_delay_us(1000);
4895 	poll_threads();
4896 
4897 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4898 	poll_threads();
4899 
4900 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4901 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4902 
4903 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4904 	CU_ASSERT(nvme_ctrlr != NULL);
4905 
4906 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4907 	CU_ASSERT(bdev != NULL);
4908 
4909 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4910 	CU_ASSERT(nvme_ns != NULL);
4911 
4912 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4913 	ut_bdev_io_set_buf(bdev_io);
4914 
4915 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4916 
4917 	ch = spdk_get_io_channel(bdev);
4918 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4919 
4920 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4921 
4922 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4923 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4924 
4925 	nvme_qpair = io_path->qpair;
4926 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4927 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4928 
4929 	now = spdk_get_ticks();
4930 
4931 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4932 
4933 	/* If I/O got ANA error, it should be queued, the corresponding namespace
4934 	 * should be freezed and its ANA state should be updated.
4935 	 */
4936 	bdev_io->internal.in_submit_request = true;
4937 
4938 	bdev_nvme_submit_request(ch, bdev_io);
4939 
4940 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4941 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4942 
4943 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4944 	SPDK_CU_ASSERT_FATAL(req != NULL);
4945 
4946 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4947 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
4948 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4949 
4950 	poll_thread_times(0, 1);
4951 
4952 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4953 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4954 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4955 	/* I/O should be retried immediately. */
4956 	CU_ASSERT(bio->retry_ticks == now);
4957 	CU_ASSERT(nvme_ns->ana_state_updating == true);
4958 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4959 
4960 	poll_threads();
4961 
4962 	/* Namespace is inaccessible, and hence I/O should be queued again. */
4963 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4964 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4965 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4966 	/* I/O should be retried after a second if no I/O path was found but
4967 	 * any I/O path may become available.
4968 	 */
4969 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
4970 
4971 	/* Namespace should be unfreezed after completing to update its ANA state. */
4972 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4973 	poll_threads();
4974 
4975 	CU_ASSERT(nvme_ns->ana_state_updating == false);
4976 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
4977 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4978 
4979 	/* Retry the queued I/O should succeed. */
4980 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
4981 	poll_threads();
4982 
4983 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4984 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4985 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4986 
4987 	free(bdev_io);
4988 
4989 	spdk_put_io_channel(ch);
4990 
4991 	poll_threads();
4992 
4993 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4994 	CU_ASSERT(rc == 0);
4995 
4996 	poll_threads();
4997 	spdk_delay_us(1000);
4998 	poll_threads();
4999 
5000 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5001 
5002 	g_opts.bdev_retry_count = 0;
5003 }
5004 
5005 static void
5006 test_check_io_error_resiliency_params(void)
5007 {
5008 	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
5009 	 * 3rd parameter is fast_io_fail_timeout_sec.
5010 	 */
5011 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false);
5012 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false);
5013 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false);
5014 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false);
5015 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false);
5016 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true);
5017 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true);
5018 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true);
5019 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true);
5020 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true);
5021 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false);
5022 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false);
5023 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false);
5024 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false);
5025 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true);
5026 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true);
5027 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true);
5028 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
5029 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true);
5030 }
5031 
5032 static void
5033 test_retry_io_if_ctrlr_is_resetting(void)
5034 {
5035 	struct nvme_path_id path = {};
5036 	struct nvme_ctrlr_opts opts = {};
5037 	struct spdk_nvme_ctrlr *ctrlr;
5038 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5039 	struct nvme_ctrlr *nvme_ctrlr;
5040 	const int STRING_SIZE = 32;
5041 	const char *attached_names[STRING_SIZE];
5042 	struct nvme_bdev *bdev;
5043 	struct nvme_ns *nvme_ns;
5044 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5045 	struct spdk_io_channel *ch;
5046 	struct nvme_bdev_channel *nbdev_ch;
5047 	struct nvme_io_path *io_path;
5048 	struct nvme_qpair *nvme_qpair;
5049 	int rc;
5050 
5051 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5052 	ut_init_trid(&path.trid);
5053 
5054 	set_thread(0);
5055 
5056 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5057 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5058 
5059 	g_ut_attach_ctrlr_status = 0;
5060 	g_ut_attach_bdev_count = 1;
5061 
5062 	opts.ctrlr_loss_timeout_sec = -1;
5063 	opts.reconnect_delay_sec = 1;
5064 
5065 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5066 			      attach_ctrlr_done, NULL, NULL, &opts, false);
5067 	CU_ASSERT(rc == 0);
5068 
5069 	spdk_delay_us(1000);
5070 	poll_threads();
5071 
5072 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5073 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5074 
5075 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5076 	CU_ASSERT(nvme_ctrlr != NULL);
5077 
5078 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5079 	CU_ASSERT(bdev != NULL);
5080 
5081 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5082 	CU_ASSERT(nvme_ns != NULL);
5083 
5084 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5085 	ut_bdev_io_set_buf(bdev_io1);
5086 
5087 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5088 	ut_bdev_io_set_buf(bdev_io2);
5089 
5090 	ch = spdk_get_io_channel(bdev);
5091 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5092 
5093 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5094 
5095 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5096 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5097 
5098 	nvme_qpair = io_path->qpair;
5099 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5100 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5101 
5102 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5103 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5104 
5105 	/* If qpair is connected, I/O should succeed. */
5106 	bdev_io1->internal.in_submit_request = true;
5107 
5108 	bdev_nvme_submit_request(ch, bdev_io1);
5109 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5110 
5111 	poll_threads();
5112 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5113 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5114 
5115 	/* If qpair is disconnected, it is freed and then reconnected via resetting
5116 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5117 	 * while resetting the nvme_ctrlr.
5118 	 */
5119 	nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
5120 	ctrlr->is_failed = true;
5121 
5122 	poll_thread_times(0, 5);
5123 
5124 	CU_ASSERT(nvme_qpair->qpair == NULL);
5125 	CU_ASSERT(nvme_ctrlr->resetting == true);
5126 	CU_ASSERT(ctrlr->is_failed == false);
5127 
5128 	bdev_io1->internal.in_submit_request = true;
5129 
5130 	bdev_nvme_submit_request(ch, bdev_io1);
5131 
5132 	spdk_delay_us(1);
5133 
5134 	bdev_io2->internal.in_submit_request = true;
5135 
5136 	bdev_nvme_submit_request(ch, bdev_io2);
5137 
5138 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5139 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5140 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5141 	CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link));
5142 
5143 	poll_threads();
5144 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5145 	poll_threads();
5146 
5147 	CU_ASSERT(nvme_qpair->qpair != NULL);
5148 	CU_ASSERT(nvme_ctrlr->resetting == false);
5149 
5150 	spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us);
5151 
5152 	poll_thread_times(0, 1);
5153 
5154 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5155 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5156 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5157 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5158 
5159 	poll_threads();
5160 
5161 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5162 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5163 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5164 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5165 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5166 
5167 	spdk_delay_us(1);
5168 
5169 	poll_thread_times(0, 1);
5170 
5171 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5172 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5173 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5174 
5175 	poll_threads();
5176 
5177 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5178 	CU_ASSERT(bdev_io2->internal.in_submit_request == false);
5179 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5180 
5181 	free(bdev_io1);
5182 	free(bdev_io2);
5183 
5184 	spdk_put_io_channel(ch);
5185 
5186 	poll_threads();
5187 
5188 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5189 	CU_ASSERT(rc == 0);
5190 
5191 	poll_threads();
5192 	spdk_delay_us(1000);
5193 	poll_threads();
5194 
5195 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5196 }
5197 
5198 static void
5199 test_reconnect_ctrlr(void)
5200 {
5201 	struct spdk_nvme_transport_id trid = {};
5202 	struct spdk_nvme_ctrlr ctrlr = {};
5203 	struct nvme_ctrlr *nvme_ctrlr;
5204 	struct spdk_io_channel *ch1, *ch2;
5205 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5206 	int rc;
5207 
5208 	ut_init_trid(&trid);
5209 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5210 
5211 	set_thread(0);
5212 
5213 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5214 	CU_ASSERT(rc == 0);
5215 
5216 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5217 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5218 
5219 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2;
5220 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5221 
5222 	ch1 = spdk_get_io_channel(nvme_ctrlr);
5223 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5224 
5225 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5226 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5227 
5228 	set_thread(1);
5229 
5230 	ch2 = spdk_get_io_channel(nvme_ctrlr);
5231 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5232 
5233 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5234 
5235 	/* Reset starts from thread 1. */
5236 	set_thread(1);
5237 
5238 	/* The reset should fail and a reconnect timer should be registered. */
5239 	ctrlr.fail_reset = true;
5240 	ctrlr.is_failed = true;
5241 
5242 	rc = bdev_nvme_reset(nvme_ctrlr);
5243 	CU_ASSERT(rc == 0);
5244 	CU_ASSERT(nvme_ctrlr->resetting == true);
5245 	CU_ASSERT(ctrlr.is_failed == true);
5246 
5247 	poll_threads();
5248 
5249 	CU_ASSERT(nvme_ctrlr->resetting == false);
5250 	CU_ASSERT(ctrlr.is_failed == false);
5251 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5252 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5253 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5254 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5255 
5256 	/* Then a reconnect retry should suceeed. */
5257 	ctrlr.fail_reset = false;
5258 
5259 	spdk_delay_us(SPDK_SEC_TO_USEC);
5260 	poll_thread_times(0, 1);
5261 
5262 	CU_ASSERT(nvme_ctrlr->resetting == true);
5263 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5264 
5265 	poll_threads();
5266 
5267 	CU_ASSERT(nvme_ctrlr->resetting == false);
5268 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
5269 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
5270 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5271 
5272 	/* The reset should fail and a reconnect timer should be registered. */
5273 	ctrlr.fail_reset = true;
5274 	ctrlr.is_failed = true;
5275 
5276 	rc = bdev_nvme_reset(nvme_ctrlr);
5277 	CU_ASSERT(rc == 0);
5278 	CU_ASSERT(nvme_ctrlr->resetting == true);
5279 	CU_ASSERT(ctrlr.is_failed == true);
5280 
5281 	poll_threads();
5282 
5283 	CU_ASSERT(nvme_ctrlr->resetting == false);
5284 	CU_ASSERT(ctrlr.is_failed == false);
5285 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5286 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5287 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5288 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5289 
5290 	/* Then a reconnect retry should still fail. */
5291 	spdk_delay_us(SPDK_SEC_TO_USEC);
5292 	poll_thread_times(0, 1);
5293 
5294 	CU_ASSERT(nvme_ctrlr->resetting == true);
5295 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5296 
5297 	poll_threads();
5298 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5299 	poll_threads();
5300 
5301 	CU_ASSERT(nvme_ctrlr->resetting == false);
5302 	CU_ASSERT(ctrlr.is_failed == false);
5303 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5304 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5305 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5306 
5307 	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5308 	spdk_delay_us(SPDK_SEC_TO_USEC);
5309 	poll_threads();
5310 
5311 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5312 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5313 	CU_ASSERT(nvme_ctrlr->destruct == true);
5314 
5315 	spdk_put_io_channel(ch2);
5316 
5317 	set_thread(0);
5318 
5319 	spdk_put_io_channel(ch1);
5320 
5321 	poll_threads();
5322 	spdk_delay_us(1000);
5323 	poll_threads();
5324 
5325 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5326 }
5327 
5328 static struct nvme_path_id *
5329 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5330 		       const struct spdk_nvme_transport_id *trid)
5331 {
5332 	struct nvme_path_id *p;
5333 
5334 	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5335 		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5336 			break;
5337 		}
5338 	}
5339 
5340 	return p;
5341 }
5342 
5343 static void
5344 test_retry_failover_ctrlr(void)
5345 {
5346 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5347 	struct spdk_nvme_ctrlr ctrlr = {};
5348 	struct nvme_ctrlr *nvme_ctrlr = NULL;
5349 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5350 	struct spdk_io_channel *ch;
5351 	struct nvme_ctrlr_channel *ctrlr_ch;
5352 	int rc;
5353 
5354 	ut_init_trid(&trid1);
5355 	ut_init_trid2(&trid2);
5356 	ut_init_trid3(&trid3);
5357 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5358 
5359 	set_thread(0);
5360 
5361 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5362 	CU_ASSERT(rc == 0);
5363 
5364 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5365 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5366 
5367 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1;
5368 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5369 
5370 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5371 	CU_ASSERT(rc == 0);
5372 
5373 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5374 	CU_ASSERT(rc == 0);
5375 
5376 	ch = spdk_get_io_channel(nvme_ctrlr);
5377 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5378 
5379 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5380 
5381 	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5382 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5383 	CU_ASSERT(path_id1->is_failed == false);
5384 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5385 
5386 	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5387 	ctrlr.fail_reset = true;
5388 	ctrlr.is_failed = true;
5389 
5390 	rc = bdev_nvme_reset(nvme_ctrlr);
5391 	CU_ASSERT(rc == 0);
5392 
5393 	poll_threads();
5394 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5395 	poll_threads();
5396 
5397 	CU_ASSERT(nvme_ctrlr->resetting == false);
5398 	CU_ASSERT(ctrlr.is_failed == false);
5399 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5400 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5401 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5402 	CU_ASSERT(path_id1->is_failed == true);
5403 
5404 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5405 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5406 
5407 	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5408 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5409 	CU_ASSERT(path_id2->is_failed == false);
5410 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5411 
5412 	/* If we remove trid2 while reconnect is scheduled, trid2 is removed and path_id is
5413 	 * switched to trid3 but reset is not started.
5414 	 */
5415 	rc = bdev_nvme_failover(nvme_ctrlr, true);
5416 	CU_ASSERT(rc == 0);
5417 
5418 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) == NULL);
5419 
5420 	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5421 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5422 	CU_ASSERT(path_id3->is_failed == false);
5423 	CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id);
5424 
5425 	CU_ASSERT(nvme_ctrlr->resetting == false);
5426 
5427 	/* If reconnect succeeds, trid3 should be the active path_id */
5428 	ctrlr.fail_reset = false;
5429 
5430 	spdk_delay_us(SPDK_SEC_TO_USEC);
5431 	poll_thread_times(0, 1);
5432 
5433 	CU_ASSERT(nvme_ctrlr->resetting == true);
5434 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5435 
5436 	poll_threads();
5437 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5438 	poll_threads();
5439 
5440 	CU_ASSERT(path_id3->is_failed == false);
5441 	CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id);
5442 	CU_ASSERT(nvme_ctrlr->resetting == false);
5443 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
5444 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5445 
5446 	spdk_put_io_channel(ch);
5447 
5448 	poll_threads();
5449 
5450 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5451 	CU_ASSERT(rc == 0);
5452 
5453 	poll_threads();
5454 	spdk_delay_us(1000);
5455 	poll_threads();
5456 
5457 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5458 }
5459 
5460 static void
5461 test_fail_path(void)
5462 {
5463 	struct nvme_path_id path = {};
5464 	struct nvme_ctrlr_opts opts = {};
5465 	struct spdk_nvme_ctrlr *ctrlr;
5466 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5467 	struct nvme_ctrlr *nvme_ctrlr;
5468 	const int STRING_SIZE = 32;
5469 	const char *attached_names[STRING_SIZE];
5470 	struct nvme_bdev *bdev;
5471 	struct nvme_ns *nvme_ns;
5472 	struct spdk_bdev_io *bdev_io;
5473 	struct spdk_io_channel *ch;
5474 	struct nvme_bdev_channel *nbdev_ch;
5475 	struct nvme_io_path *io_path;
5476 	struct nvme_ctrlr_channel *ctrlr_ch;
5477 	int rc;
5478 
5479 	/* The test scenario is the following.
5480 	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5481 	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5482 	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5483 	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5484 	 *   comes first. The queued I/O is failed.
5485 	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5486 	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5487 	 */
5488 
5489 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5490 	ut_init_trid(&path.trid);
5491 
5492 	set_thread(0);
5493 
5494 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5495 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5496 
5497 	g_ut_attach_ctrlr_status = 0;
5498 	g_ut_attach_bdev_count = 1;
5499 
5500 	opts.ctrlr_loss_timeout_sec = 4;
5501 	opts.reconnect_delay_sec = 1;
5502 	opts.fast_io_fail_timeout_sec = 2;
5503 
5504 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5505 			      attach_ctrlr_done, NULL, NULL, &opts, false);
5506 	CU_ASSERT(rc == 0);
5507 
5508 	spdk_delay_us(1000);
5509 	poll_threads();
5510 
5511 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5512 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5513 
5514 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5515 	CU_ASSERT(nvme_ctrlr != NULL);
5516 
5517 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5518 	CU_ASSERT(bdev != NULL);
5519 
5520 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5521 	CU_ASSERT(nvme_ns != NULL);
5522 
5523 	ch = spdk_get_io_channel(bdev);
5524 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5525 
5526 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5527 
5528 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5529 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5530 
5531 	ctrlr_ch = io_path->qpair->ctrlr_ch;
5532 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
5533 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL);
5534 
5535 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
5536 	ut_bdev_io_set_buf(bdev_io);
5537 
5538 
5539 	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
5540 	ctrlr->fail_reset = true;
5541 	ctrlr->is_failed = true;
5542 
5543 	rc = bdev_nvme_reset(nvme_ctrlr);
5544 	CU_ASSERT(rc == 0);
5545 	CU_ASSERT(nvme_ctrlr->resetting == true);
5546 	CU_ASSERT(ctrlr->is_failed == true);
5547 
5548 	poll_threads();
5549 
5550 	CU_ASSERT(nvme_ctrlr->resetting == false);
5551 	CU_ASSERT(ctrlr->is_failed == false);
5552 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5553 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5554 	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
5555 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5556 
5557 	/* I/O should be queued. */
5558 	bdev_io->internal.in_submit_request = true;
5559 
5560 	bdev_nvme_submit_request(ch, bdev_io);
5561 
5562 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5563 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5564 
5565 	/* After a second, the I/O should be still queued and the ctrlr should be
5566 	 * still recovering.
5567 	 */
5568 	spdk_delay_us(SPDK_SEC_TO_USEC);
5569 	poll_threads();
5570 
5571 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5572 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5573 
5574 	CU_ASSERT(nvme_ctrlr->resetting == false);
5575 	CU_ASSERT(ctrlr->is_failed == false);
5576 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5577 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5578 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5579 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5580 
5581 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5582 
5583 	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
5584 	spdk_delay_us(SPDK_SEC_TO_USEC);
5585 	poll_threads();
5586 
5587 	CU_ASSERT(nvme_ctrlr->resetting == false);
5588 	CU_ASSERT(ctrlr->is_failed == false);
5589 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5590 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5591 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5592 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
5593 
5594 	/* Then within a second, pending I/O should be failed. */
5595 	spdk_delay_us(SPDK_SEC_TO_USEC);
5596 	poll_threads();
5597 
5598 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5599 	poll_threads();
5600 
5601 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5602 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5603 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5604 
5605 	/* Another I/O submission should be failed immediately. */
5606 	bdev_io->internal.in_submit_request = true;
5607 
5608 	bdev_nvme_submit_request(ch, bdev_io);
5609 
5610 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5611 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5612 
5613 	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
5614 	 * be deleted.
5615 	 */
5616 	spdk_delay_us(SPDK_SEC_TO_USEC);
5617 	poll_threads();
5618 
5619 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5620 	poll_threads();
5621 
5622 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5623 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5624 	CU_ASSERT(nvme_ctrlr->destruct == true);
5625 
5626 	spdk_put_io_channel(ch);
5627 
5628 	poll_threads();
5629 	spdk_delay_us(1000);
5630 	poll_threads();
5631 
5632 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5633 
5634 	free(bdev_io);
5635 }
5636 
5637 static void
5638 test_nvme_ns_cmp(void)
5639 {
5640 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
5641 
5642 	nvme_ns1.id = 0;
5643 	nvme_ns2.id = UINT32_MAX;
5644 
5645 	CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0);
5646 	CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0);
5647 }
5648 
5649 static void
5650 test_ana_transition(void)
5651 {
5652 	struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, };
5653 	struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, };
5654 	struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, };
5655 	struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, };
5656 
5657 	/* case 1: ANA transition timedout is canceled. */
5658 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5659 	nvme_ns.ana_transition_timedout = true;
5660 
5661 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5662 
5663 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5664 
5665 	CU_ASSERT(nvme_ns.ana_transition_timedout == false);
5666 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5667 
5668 	/* case 2: ANATT timer is kept. */
5669 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5670 	nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout,
5671 			      &nvme_ns,
5672 			      ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
5673 
5674 	desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5675 
5676 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5677 
5678 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
5679 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
5680 
5681 	/* case 3: ANATT timer is stopped. */
5682 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5683 
5684 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5685 
5686 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
5687 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5688 
5689 	/* ANATT timer is started. */
5690 	desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5691 
5692 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5693 
5694 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
5695 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE);
5696 
5697 	/* ANATT timer is expired. */
5698 	spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
5699 
5700 	poll_threads();
5701 
5702 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
5703 	CU_ASSERT(nvme_ns.ana_transition_timedout == true);
5704 }
5705 
5706 static void
5707 _set_preferred_path_cb(void *cb_arg, int rc)
5708 {
5709 	bool *done = cb_arg;
5710 
5711 	*done = true;
5712 }
5713 
5714 static void
5715 test_set_preferred_path(void)
5716 {
5717 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
5718 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
5719 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5720 	const int STRING_SIZE = 32;
5721 	const char *attached_names[STRING_SIZE];
5722 	struct nvme_bdev *bdev;
5723 	struct spdk_io_channel *ch;
5724 	struct nvme_bdev_channel *nbdev_ch;
5725 	struct nvme_io_path *io_path;
5726 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
5727 	const struct spdk_nvme_ctrlr_data *cdata;
5728 	bool done;
5729 	int rc;
5730 
5731 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5732 	ut_init_trid(&path1.trid);
5733 	ut_init_trid2(&path2.trid);
5734 	ut_init_trid3(&path3.trid);
5735 	g_ut_attach_ctrlr_status = 0;
5736 	g_ut_attach_bdev_count = 1;
5737 
5738 	set_thread(0);
5739 
5740 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
5741 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
5742 
5743 	ctrlr1->ns[0].uuid = &uuid1;
5744 
5745 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
5746 			      attach_ctrlr_done, NULL, NULL, NULL, true);
5747 	CU_ASSERT(rc == 0);
5748 
5749 	spdk_delay_us(1000);
5750 	poll_threads();
5751 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5752 	poll_threads();
5753 
5754 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
5755 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
5756 
5757 	ctrlr2->ns[0].uuid = &uuid1;
5758 
5759 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
5760 			      attach_ctrlr_done, NULL, NULL, NULL, true);
5761 	CU_ASSERT(rc == 0);
5762 
5763 	spdk_delay_us(1000);
5764 	poll_threads();
5765 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5766 	poll_threads();
5767 
5768 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
5769 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
5770 
5771 	ctrlr3->ns[0].uuid = &uuid1;
5772 
5773 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
5774 			      attach_ctrlr_done, NULL, NULL, NULL, true);
5775 	CU_ASSERT(rc == 0);
5776 
5777 	spdk_delay_us(1000);
5778 	poll_threads();
5779 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5780 	poll_threads();
5781 
5782 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5783 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5784 
5785 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5786 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
5787 
5788 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
5789 
5790 	ch = spdk_get_io_channel(bdev);
5791 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5792 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5793 
5794 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5795 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5796 
5797 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
5798 
5799 	/* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path()
5800 	 * should return io_path to ctrlr2.
5801 	 */
5802 
5803 	cdata = spdk_nvme_ctrlr_get_data(ctrlr2);
5804 	done = false;
5805 
5806 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
5807 
5808 	poll_threads();
5809 	CU_ASSERT(done == true);
5810 
5811 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5812 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5813 
5814 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
5815 
5816 	/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
5817 	 * acquired, find_io_path() should return io_path to ctrlr3.
5818 	 */
5819 
5820 	spdk_put_io_channel(ch);
5821 
5822 	poll_threads();
5823 
5824 	cdata = spdk_nvme_ctrlr_get_data(ctrlr3);
5825 	done = false;
5826 
5827 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
5828 
5829 	poll_threads();
5830 	CU_ASSERT(done == true);
5831 
5832 	ch = spdk_get_io_channel(bdev);
5833 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5834 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5835 
5836 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5837 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5838 
5839 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3);
5840 
5841 	spdk_put_io_channel(ch);
5842 
5843 	poll_threads();
5844 
5845 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5846 	CU_ASSERT(rc == 0);
5847 
5848 	poll_threads();
5849 	spdk_delay_us(1000);
5850 	poll_threads();
5851 
5852 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5853 }
5854 
5855 static void
5856 test_find_next_io_path(void)
5857 {
5858 	struct nvme_bdev_channel nbdev_ch = {
5859 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
5860 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
5861 		.mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN,
5862 	};
5863 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
5864 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
5865 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
5866 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
5867 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
5868 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
5869 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
5870 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
5871 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
5872 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
5873 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
5874 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {};
5875 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
5876 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
5877 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
5878 
5879 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
5880 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
5881 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
5882 
5883 	/* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL
5884 	 * is covered in test_find_io_path.
5885 	 */
5886 
5887 	nbdev_ch.current_io_path = &io_path2;
5888 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5889 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5890 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5891 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5892 
5893 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5894 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5895 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5896 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5897 
5898 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5899 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5900 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5901 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
5902 
5903 	nbdev_ch.current_io_path = &io_path3;
5904 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5905 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5906 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5907 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5908 
5909 	/* Test if next io_path is selected according to rr_min_io */
5910 
5911 	nbdev_ch.current_io_path = NULL;
5912 	nbdev_ch.rr_min_io = 2;
5913 	nbdev_ch.rr_counter = 0;
5914 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5915 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5916 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
5917 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
5918 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5919 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5920 
5921 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5922 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
5923 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
5924 }
5925 
5926 static void
5927 test_find_io_path_min_qd(void)
5928 {
5929 	struct nvme_bdev_channel nbdev_ch = {
5930 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
5931 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
5932 		.mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH,
5933 	};
5934 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
5935 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
5936 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
5937 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
5938 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
5939 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
5940 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
5941 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
5942 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
5943 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
5944 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
5945 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {};
5946 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
5947 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
5948 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
5949 
5950 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
5951 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
5952 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
5953 
5954 	/* Test if the minumum io_outstanding or the ANA optimized state is
5955 	 * prioritized when using least queue depth selector
5956 	 */
5957 	qpair1.num_outstanding_reqs = 2;
5958 	qpair2.num_outstanding_reqs = 1;
5959 	qpair3.num_outstanding_reqs = 0;
5960 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5961 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5962 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5963 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5964 
5965 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5966 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5967 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5968 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5969 
5970 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5971 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5972 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5973 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5974 
5975 	qpair2.num_outstanding_reqs = 4;
5976 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
5977 }
5978 
5979 static void
5980 test_disable_auto_failback(void)
5981 {
5982 	struct nvme_path_id path1 = {}, path2 = {};
5983 	struct nvme_ctrlr_opts opts = {};
5984 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
5985 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5986 	struct nvme_ctrlr *nvme_ctrlr1;
5987 	const int STRING_SIZE = 32;
5988 	const char *attached_names[STRING_SIZE];
5989 	struct nvme_bdev *bdev;
5990 	struct spdk_io_channel *ch;
5991 	struct nvme_bdev_channel *nbdev_ch;
5992 	struct nvme_io_path *io_path;
5993 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
5994 	const struct spdk_nvme_ctrlr_data *cdata;
5995 	bool done;
5996 	int rc;
5997 
5998 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5999 	ut_init_trid(&path1.trid);
6000 	ut_init_trid2(&path2.trid);
6001 	g_ut_attach_ctrlr_status = 0;
6002 	g_ut_attach_bdev_count = 1;
6003 
6004 	g_opts.disable_auto_failback = true;
6005 
6006 	opts.ctrlr_loss_timeout_sec = -1;
6007 	opts.reconnect_delay_sec = 1;
6008 
6009 	set_thread(0);
6010 
6011 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6012 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6013 
6014 	ctrlr1->ns[0].uuid = &uuid1;
6015 
6016 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6017 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6018 	CU_ASSERT(rc == 0);
6019 
6020 	spdk_delay_us(1000);
6021 	poll_threads();
6022 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6023 	poll_threads();
6024 
6025 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6026 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6027 
6028 	ctrlr2->ns[0].uuid = &uuid1;
6029 
6030 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6031 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6032 	CU_ASSERT(rc == 0);
6033 
6034 	spdk_delay_us(1000);
6035 	poll_threads();
6036 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6037 	poll_threads();
6038 
6039 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6040 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6041 
6042 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6043 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6044 
6045 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
6046 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6047 
6048 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6049 
6050 	ch = spdk_get_io_channel(bdev);
6051 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6052 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6053 
6054 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6055 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6056 
6057 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6058 
6059 	/* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */
6060 	ctrlr1->fail_reset = true;
6061 	ctrlr1->is_failed = true;
6062 
6063 	bdev_nvme_reset(nvme_ctrlr1);
6064 
6065 	poll_threads();
6066 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6067 	poll_threads();
6068 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6069 	poll_threads();
6070 
6071 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
6072 
6073 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6074 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6075 
6076 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6077 
6078 	/* After a second, ctrlr1 is recovered. However, automatic failback is disabled.
6079 	 * Hence, io_path to ctrlr2 should still be used.
6080 	 */
6081 	ctrlr1->fail_reset = false;
6082 
6083 	spdk_delay_us(SPDK_SEC_TO_USEC);
6084 	poll_threads();
6085 
6086 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
6087 
6088 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6089 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6090 
6091 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6092 
6093 	/* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should
6094 	 * be used again.
6095 	 */
6096 
6097 	cdata = spdk_nvme_ctrlr_get_data(ctrlr1);
6098 	done = false;
6099 
6100 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6101 
6102 	poll_threads();
6103 	CU_ASSERT(done == true);
6104 
6105 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6106 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6107 
6108 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6109 
6110 	spdk_put_io_channel(ch);
6111 
6112 	poll_threads();
6113 
6114 	rc = bdev_nvme_delete("nvme0", &g_any_path);
6115 	CU_ASSERT(rc == 0);
6116 
6117 	poll_threads();
6118 	spdk_delay_us(1000);
6119 	poll_threads();
6120 
6121 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6122 
6123 	g_opts.disable_auto_failback = false;
6124 }
6125 
6126 static void
6127 ut_set_multipath_policy_done(void *cb_arg, int rc)
6128 {
6129 	int *done = cb_arg;
6130 
6131 	SPDK_CU_ASSERT_FATAL(done != NULL);
6132 	*done = rc;
6133 }
6134 
6135 static void
6136 test_set_multipath_policy(void)
6137 {
6138 	struct nvme_path_id path1 = {}, path2 = {};
6139 	struct nvme_ctrlr_opts opts = {};
6140 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6141 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6142 	const int STRING_SIZE = 32;
6143 	const char *attached_names[STRING_SIZE];
6144 	struct nvme_bdev *bdev;
6145 	struct spdk_io_channel *ch;
6146 	struct nvme_bdev_channel *nbdev_ch;
6147 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6148 	int done;
6149 	int rc;
6150 
6151 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6152 	ut_init_trid(&path1.trid);
6153 	ut_init_trid2(&path2.trid);
6154 	g_ut_attach_ctrlr_status = 0;
6155 	g_ut_attach_bdev_count = 1;
6156 
6157 	g_opts.disable_auto_failback = true;
6158 
6159 	opts.ctrlr_loss_timeout_sec = -1;
6160 	opts.reconnect_delay_sec = 1;
6161 
6162 	set_thread(0);
6163 
6164 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6165 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6166 
6167 	ctrlr1->ns[0].uuid = &uuid1;
6168 
6169 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6170 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6171 	CU_ASSERT(rc == 0);
6172 
6173 	spdk_delay_us(1000);
6174 	poll_threads();
6175 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6176 	poll_threads();
6177 
6178 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6179 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6180 
6181 	ctrlr2->ns[0].uuid = &uuid1;
6182 
6183 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6184 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6185 	CU_ASSERT(rc == 0);
6186 
6187 	spdk_delay_us(1000);
6188 	poll_threads();
6189 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6190 	poll_threads();
6191 
6192 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6193 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6194 
6195 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6196 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6197 
6198 	/* If multipath policy is updated before getting any I/O channel,
6199 	 * an new I/O channel should have the update.
6200 	 */
6201 	done = -1;
6202 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6203 				       BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX,
6204 				       ut_set_multipath_policy_done, &done);
6205 	poll_threads();
6206 	CU_ASSERT(done == 0);
6207 
6208 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6209 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6210 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6211 
6212 	ch = spdk_get_io_channel(bdev);
6213 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6214 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6215 
6216 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6217 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6218 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6219 
6220 	/* If multipath policy is updated while a I/O channel is active,
6221 	 * the update should be applied to the I/O channel immediately.
6222 	 */
6223 	done = -1;
6224 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE,
6225 				       BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX,
6226 				       ut_set_multipath_policy_done, &done);
6227 	poll_threads();
6228 	CU_ASSERT(done == 0);
6229 
6230 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6231 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6232 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6233 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6234 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6235 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6236 
6237 	spdk_put_io_channel(ch);
6238 
6239 	poll_threads();
6240 
6241 	rc = bdev_nvme_delete("nvme0", &g_any_path);
6242 	CU_ASSERT(rc == 0);
6243 
6244 	poll_threads();
6245 	spdk_delay_us(1000);
6246 	poll_threads();
6247 
6248 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6249 }
6250 
6251 static void
6252 test_uuid_generation(void)
6253 {
6254 	uint32_t nsid1 = 1, nsid2 = 2;
6255 	char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02";
6256 	char sn3[21] = "                    ";
6257 	char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'};
6258 	struct spdk_uuid uuid1, uuid2;
6259 
6260 	/* Test case 1:
6261 	 * Serial numbers are the same, nsids are different.
6262 	 * Compare two generated UUID - they should be different. */
6263 	uuid1 = nvme_generate_uuid(sn1, nsid1);
6264 	uuid2 = nvme_generate_uuid(sn1, nsid2);
6265 
6266 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6267 
6268 	/* Test case 2:
6269 	 * Serial numbers differ only by one character, nsids are the same.
6270 	 * Compare two generated UUID - they should be different. */
6271 	uuid1 = nvme_generate_uuid(sn1, nsid1);
6272 	uuid2 = nvme_generate_uuid(sn2, nsid1);
6273 
6274 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6275 
6276 	/* Test case 3:
6277 	 * Serial number comprises only of space characters.
6278 	 * Validate the generated UUID. */
6279 	uuid1 = nvme_generate_uuid(sn3, nsid1);
6280 	CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0);
6281 }
6282 
6283 static void
6284 test_retry_io_to_same_path(void)
6285 {
6286 	struct nvme_path_id path1 = {}, path2 = {};
6287 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6288 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6289 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
6290 	const int STRING_SIZE = 32;
6291 	const char *attached_names[STRING_SIZE];
6292 	struct nvme_bdev *bdev;
6293 	struct spdk_bdev_io *bdev_io;
6294 	struct nvme_bdev_io *bio;
6295 	struct spdk_io_channel *ch;
6296 	struct nvme_bdev_channel *nbdev_ch;
6297 	struct nvme_io_path *io_path1, *io_path2;
6298 	struct ut_nvme_req *req;
6299 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6300 	int done;
6301 	int rc;
6302 
6303 	g_opts.nvme_ioq_poll_period_us = 1;
6304 
6305 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6306 	ut_init_trid(&path1.trid);
6307 	ut_init_trid2(&path2.trid);
6308 	g_ut_attach_ctrlr_status = 0;
6309 	g_ut_attach_bdev_count = 1;
6310 
6311 	set_thread(0);
6312 
6313 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6314 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6315 
6316 	ctrlr1->ns[0].uuid = &uuid1;
6317 
6318 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6319 			      attach_ctrlr_done, NULL, NULL, NULL, true);
6320 	CU_ASSERT(rc == 0);
6321 
6322 	spdk_delay_us(1000);
6323 	poll_threads();
6324 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6325 	poll_threads();
6326 
6327 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6328 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6329 
6330 	ctrlr2->ns[0].uuid = &uuid1;
6331 
6332 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6333 			      attach_ctrlr_done, NULL, NULL, NULL, true);
6334 	CU_ASSERT(rc == 0);
6335 
6336 	spdk_delay_us(1000);
6337 	poll_threads();
6338 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6339 	poll_threads();
6340 
6341 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6342 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6343 
6344 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
6345 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6346 
6347 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
6348 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
6349 
6350 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6351 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6352 
6353 	done = -1;
6354 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6355 				       BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done);
6356 	poll_threads();
6357 	CU_ASSERT(done == 0);
6358 
6359 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6360 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6361 	CU_ASSERT(bdev->rr_min_io == 1);
6362 
6363 	ch = spdk_get_io_channel(bdev);
6364 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6365 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6366 
6367 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6368 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6369 	CU_ASSERT(nbdev_ch->rr_min_io == 1);
6370 
6371 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
6372 	ut_bdev_io_set_buf(bdev_io);
6373 
6374 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
6375 
6376 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
6377 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
6378 
6379 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
6380 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
6381 
6382 	/* The 1st I/O should be submitted to io_path1. */
6383 	bdev_io->internal.in_submit_request = true;
6384 
6385 	bdev_nvme_submit_request(ch, bdev_io);
6386 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6387 	CU_ASSERT(bio->io_path == io_path1);
6388 	CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1);
6389 
6390 	spdk_delay_us(1);
6391 
6392 	poll_threads();
6393 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
6394 	CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
6395 
6396 	/* The 2nd I/O should be submitted to io_path2 because the path selection
6397 	 * policy is round-robin.
6398 	 */
6399 	bdev_io->internal.in_submit_request = true;
6400 
6401 	bdev_nvme_submit_request(ch, bdev_io);
6402 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6403 	CU_ASSERT(bio->io_path == io_path2);
6404 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6405 
6406 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6407 	SPDK_CU_ASSERT_FATAL(req != NULL);
6408 
6409 	/* Set retry count to non-zero. */
6410 	g_opts.bdev_retry_count = 1;
6411 
6412 	/* Inject an I/O error. */
6413 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6414 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6415 
6416 	/* The 2nd I/O should be queued to nbdev_ch. */
6417 	spdk_delay_us(1);
6418 	poll_thread_times(0, 1);
6419 
6420 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6421 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6422 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
6423 
6424 	/* The 2nd I/O should keep caching io_path2. */
6425 	CU_ASSERT(bio->io_path == io_path2);
6426 
6427 	/* The 2nd I/O should be submitted to io_path2 again. */
6428 	poll_thread_times(0, 1);
6429 
6430 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6431 	CU_ASSERT(bio->io_path == io_path2);
6432 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6433 
6434 	spdk_delay_us(1);
6435 	poll_threads();
6436 
6437 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6438 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
6439 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
6440 
6441 	free(bdev_io);
6442 
6443 	spdk_put_io_channel(ch);
6444 
6445 	poll_threads();
6446 	spdk_delay_us(1);
6447 	poll_threads();
6448 
6449 	rc = bdev_nvme_delete("nvme0", &g_any_path);
6450 	CU_ASSERT(rc == 0);
6451 
6452 	poll_threads();
6453 	spdk_delay_us(1000);
6454 	poll_threads();
6455 
6456 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
6457 
6458 	g_opts.nvme_ioq_poll_period_us = 0;
6459 	g_opts.bdev_retry_count = 0;
6460 }
6461 
6462 int
6463 main(int argc, const char **argv)
6464 {
6465 	CU_pSuite	suite = NULL;
6466 	unsigned int	num_failures;
6467 
6468 	CU_set_error_action(CUEA_ABORT);
6469 	CU_initialize_registry();
6470 
6471 	suite = CU_add_suite("nvme", NULL, NULL);
6472 
6473 	CU_ADD_TEST(suite, test_create_ctrlr);
6474 	CU_ADD_TEST(suite, test_reset_ctrlr);
6475 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
6476 	CU_ADD_TEST(suite, test_failover_ctrlr);
6477 	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
6478 	CU_ADD_TEST(suite, test_pending_reset);
6479 	CU_ADD_TEST(suite, test_attach_ctrlr);
6480 	CU_ADD_TEST(suite, test_aer_cb);
6481 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
6482 	CU_ADD_TEST(suite, test_add_remove_trid);
6483 	CU_ADD_TEST(suite, test_abort);
6484 	CU_ADD_TEST(suite, test_get_io_qpair);
6485 	CU_ADD_TEST(suite, test_bdev_unregister);
6486 	CU_ADD_TEST(suite, test_compare_ns);
6487 	CU_ADD_TEST(suite, test_init_ana_log_page);
6488 	CU_ADD_TEST(suite, test_get_memory_domains);
6489 	CU_ADD_TEST(suite, test_reconnect_qpair);
6490 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
6491 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
6492 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
6493 	CU_ADD_TEST(suite, test_admin_path);
6494 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
6495 	CU_ADD_TEST(suite, test_find_io_path);
6496 	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
6497 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
6498 	CU_ADD_TEST(suite, test_retry_io_count);
6499 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
6500 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
6501 	CU_ADD_TEST(suite, test_check_io_error_resiliency_params);
6502 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
6503 	CU_ADD_TEST(suite, test_reconnect_ctrlr);
6504 	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
6505 	CU_ADD_TEST(suite, test_fail_path);
6506 	CU_ADD_TEST(suite, test_nvme_ns_cmp);
6507 	CU_ADD_TEST(suite, test_ana_transition);
6508 	CU_ADD_TEST(suite, test_set_preferred_path);
6509 	CU_ADD_TEST(suite, test_find_next_io_path);
6510 	CU_ADD_TEST(suite, test_find_io_path_min_qd);
6511 	CU_ADD_TEST(suite, test_disable_auto_failback);
6512 	CU_ADD_TEST(suite, test_set_multipath_policy);
6513 	CU_ADD_TEST(suite, test_uuid_generation);
6514 	CU_ADD_TEST(suite, test_retry_io_to_same_path);
6515 
6516 	CU_basic_set_mode(CU_BRM_VERBOSE);
6517 
6518 	allocate_threads(3);
6519 	set_thread(0);
6520 	bdev_nvme_library_init();
6521 	init_accel();
6522 
6523 	CU_basic_run_tests();
6524 
6525 	set_thread(0);
6526 	bdev_nvme_library_fini();
6527 	fini_accel();
6528 	free_threads();
6529 
6530 	num_failures = CU_get_number_of_failures();
6531 	CU_cleanup_registry();
6532 
6533 	return num_failures;
6534 }
6535