xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision b6875e1ce57743f3b1416016b9c624d79a862af9)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/thread.h"
10 #include "spdk/bdev_module.h"
11 
12 #include "common/lib/ut_multithread.c"
13 
14 #include "bdev/nvme/bdev_nvme.c"
15 
16 #include "unit/lib/json_mock.c"
17 
18 #include "bdev/nvme/bdev_mdns_client.c"
19 
20 static void *g_accel_p = (void *)0xdeadbeaf;
21 
22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
23 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
24 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
25 	     spdk_nvme_remove_cb remove_cb), NULL);
26 
27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
28 		enum spdk_nvme_transport_type trtype));
29 
30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
31 	    NULL);
32 
33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
34 
35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
36 		struct spdk_nvme_transport_id *trid), 0);
37 
38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
39 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
40 
41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0);
43 
44 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
45 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
46 
47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
48 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
49 
50 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int);
51 
52 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request,
53 		int error_code, const char *msg));
54 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *,
55 	    (struct spdk_jsonrpc_request *request), NULL);
56 DEFINE_STUB_V(spdk_jsonrpc_end_result,
57 	      (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w));
58 
59 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts,
60 		size_t opts_size));
61 
62 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts,
63 		size_t opts_size), 0);
64 DEFINE_STUB(spdk_nvme_dhchap_get_digest_name, const char *, (int id), NULL);
65 DEFINE_STUB(spdk_nvme_dhchap_get_dhgroup_name, const char *, (int id), NULL);
66 
67 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
68 
69 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat,
70 					enum spdk_bdev_reset_stat_mode mode));
71 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total,
72 				      struct spdk_bdev_io_stat *add));
73 
74 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr));
75 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL);
76 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k));
77 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL);
78 
79 int
80 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
81 				   struct spdk_memory_domain **domains, int array_size)
82 {
83 	int i, min_array_size;
84 
85 	if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) {
86 		min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size);
87 		for (i = 0; i < min_array_size; i++) {
88 			domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5;
89 		}
90 	}
91 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains);
92 
93 	return 0;
94 }
95 
96 struct spdk_io_channel *
97 spdk_accel_get_io_channel(void)
98 {
99 	return spdk_get_io_channel(g_accel_p);
100 }
101 
102 void
103 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
104 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
105 {
106 	/* Avoid warning that opts is used uninitialised */
107 	memset(opts, 0, opts_size);
108 }
109 
110 #define UT_HOSTNQN "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"
111 
112 static const struct spdk_nvme_ctrlr_opts g_ut_ctrlr_opts = {.hostnqn = UT_HOSTNQN};
113 
114 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *,
115 	    (struct spdk_nvme_ctrlr *ctrlr), &g_ut_ctrlr_opts);
116 
117 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
118 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
119 
120 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
121 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
122 
123 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
124 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
125 
126 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
127 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
128 
129 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true);
130 
131 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
132 
133 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
134 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
135 
136 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
137 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
138 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
139 
140 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
141 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
142 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
143 
144 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, (
145 		    struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
146 		    struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf,
147 		    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
148 		    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
149 		    spdk_nvme_req_next_sge_cb next_sge_fn), 0);
150 
151 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name,
152 		size_t *size), 0);
153 
154 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
155 
156 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
157 
158 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
159 
160 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
161 
162 DEFINE_STUB(spdk_nvme_ns_get_pi_format, enum spdk_nvme_pi_format, (struct spdk_nvme_ns *ns),
163 	    SPDK_NVME_16B_GUARD_PI);
164 
165 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
166 
167 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
168 
169 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
170 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
171 
172 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
173 
174 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
175 		char *name, size_t *size), 0);
176 
177 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
178 	    (struct spdk_nvme_ns *ns), 0);
179 
180 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
181 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
182 
183 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
184 	    (struct spdk_nvme_ns *ns), 0);
185 
186 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
187 	    (struct spdk_nvme_ns *ns), 0);
188 
189 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
190 	    (struct spdk_nvme_ns *ns), 0);
191 
192 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
193 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
194 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
195 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
196 
197 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
198 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
199 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
200 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
201 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
202 
203 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
204 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
205 	     void *payload, uint32_t payload_size, uint64_t slba,
206 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
207 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
208 
209 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
210 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
211 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
212 
213 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
214 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
215 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
216 
217 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
218 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
219 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
220 
221 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
222 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
223 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
224 
225 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
226 
227 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
228 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
229 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
230 
231 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *,
232 	    (const struct spdk_nvme_status *status), NULL);
233 
234 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *,
235 	    (const struct spdk_nvme_status *status), NULL);
236 
237 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
238 
239 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
240 
241 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
242 
243 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
244 
245 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
246 
247 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
248 		struct iovec *iov,
249 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
250 DEFINE_STUB(spdk_accel_append_crc32c, int,
251 	    (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst,
252 	     struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx,
253 	     uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
254 DEFINE_STUB_V(spdk_accel_sequence_finish,
255 	      (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg));
256 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
257 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq));
258 
259 struct ut_nvme_req {
260 	uint16_t			opc;
261 	spdk_nvme_cmd_cb		cb_fn;
262 	void				*cb_arg;
263 	struct spdk_nvme_cpl		cpl;
264 	TAILQ_ENTRY(ut_nvme_req)	tailq;
265 };
266 
267 struct spdk_nvme_ns {
268 	struct spdk_nvme_ctrlr		*ctrlr;
269 	uint32_t			id;
270 	bool				is_active;
271 	struct spdk_uuid		*uuid;
272 	enum spdk_nvme_ana_state	ana_state;
273 	enum spdk_nvme_csi		csi;
274 };
275 
276 struct spdk_nvme_qpair {
277 	struct spdk_nvme_ctrlr		*ctrlr;
278 	uint8_t				failure_reason;
279 	bool				is_connected;
280 	bool				in_completion_context;
281 	bool				delete_after_completion_context;
282 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
283 	uint32_t			num_outstanding_reqs;
284 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
285 	struct spdk_nvme_poll_group	*poll_group;
286 	void				*poll_group_tailq_head;
287 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
288 };
289 
290 struct spdk_nvme_ctrlr {
291 	uint32_t			num_ns;
292 	struct spdk_nvme_ns		*ns;
293 	struct spdk_nvme_ns_data	*nsdata;
294 	struct spdk_nvme_qpair		adminq;
295 	struct spdk_nvme_ctrlr_data	cdata;
296 	bool				attached;
297 	bool				is_failed;
298 	bool				fail_reset;
299 	bool				is_removed;
300 	struct spdk_nvme_transport_id	trid;
301 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
302 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
303 	struct spdk_nvme_ctrlr_opts	opts;
304 };
305 
306 struct spdk_nvme_poll_group {
307 	void				*ctx;
308 	struct spdk_nvme_accel_fn_table	accel_fn_table;
309 	TAILQ_HEAD(, spdk_nvme_qpair)	connected_qpairs;
310 	TAILQ_HEAD(, spdk_nvme_qpair)	disconnected_qpairs;
311 };
312 
313 struct spdk_nvme_probe_ctx {
314 	struct spdk_nvme_transport_id	trid;
315 	void				*cb_ctx;
316 	spdk_nvme_attach_cb		attach_cb;
317 	struct spdk_nvme_ctrlr		*init_ctrlr;
318 };
319 
320 uint32_t
321 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
322 {
323 	uint32_t nsid;
324 
325 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
326 		if (ctrlr->ns[nsid - 1].is_active) {
327 			return nsid;
328 		}
329 	}
330 
331 	return 0;
332 }
333 
334 uint32_t
335 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
336 {
337 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
338 		if (ctrlr->ns[nsid - 1].is_active) {
339 			return nsid;
340 		}
341 	}
342 
343 	return 0;
344 }
345 
346 uint32_t
347 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair)
348 {
349 	return qpair->num_outstanding_reqs;
350 }
351 
352 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
353 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
354 			g_ut_attached_ctrlrs);
355 static int g_ut_attach_ctrlr_status;
356 static size_t g_ut_attach_bdev_count;
357 static int g_ut_register_bdev_status;
358 static struct spdk_bdev *g_ut_registered_bdev;
359 static uint16_t g_ut_cntlid;
360 static struct nvme_path_id g_any_path = {};
361 
362 static void
363 ut_init_trid(struct spdk_nvme_transport_id *trid)
364 {
365 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
366 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
367 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
368 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
369 }
370 
371 static void
372 ut_init_trid2(struct spdk_nvme_transport_id *trid)
373 {
374 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
375 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
376 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
377 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
378 }
379 
380 static void
381 ut_init_trid3(struct spdk_nvme_transport_id *trid)
382 {
383 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
384 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
385 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
386 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
387 }
388 
389 static int
390 cmp_int(int a, int b)
391 {
392 	return a - b;
393 }
394 
395 int
396 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
397 			       const struct spdk_nvme_transport_id *trid2)
398 {
399 	int cmp;
400 
401 	/* We assume trtype is TCP for now. */
402 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
403 
404 	cmp = cmp_int(trid1->trtype, trid2->trtype);
405 	if (cmp) {
406 		return cmp;
407 	}
408 
409 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
410 	if (cmp) {
411 		return cmp;
412 	}
413 
414 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
415 	if (cmp) {
416 		return cmp;
417 	}
418 
419 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
420 	if (cmp) {
421 		return cmp;
422 	}
423 
424 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
425 	if (cmp) {
426 		return cmp;
427 	}
428 
429 	return 0;
430 }
431 
432 static struct spdk_nvme_ctrlr *
433 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
434 		bool ana_reporting, bool multipath)
435 {
436 	struct spdk_nvme_ctrlr *ctrlr;
437 	uint32_t i;
438 
439 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
440 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
441 			/* There is a ctrlr whose trid matches. */
442 			return NULL;
443 		}
444 	}
445 
446 	ctrlr = calloc(1, sizeof(*ctrlr));
447 	if (ctrlr == NULL) {
448 		return NULL;
449 	}
450 
451 	ctrlr->attached = true;
452 	ctrlr->adminq.ctrlr = ctrlr;
453 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
454 	ctrlr->adminq.is_connected = true;
455 
456 	if (num_ns != 0) {
457 		ctrlr->num_ns = num_ns;
458 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
459 		if (ctrlr->ns == NULL) {
460 			free(ctrlr);
461 			return NULL;
462 		}
463 
464 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
465 		if (ctrlr->nsdata == NULL) {
466 			free(ctrlr->ns);
467 			free(ctrlr);
468 			return NULL;
469 		}
470 
471 		for (i = 0; i < num_ns; i++) {
472 			ctrlr->ns[i].id = i + 1;
473 			ctrlr->ns[i].ctrlr = ctrlr;
474 			ctrlr->ns[i].is_active = true;
475 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
476 			ctrlr->nsdata[i].nsze = 1024;
477 			ctrlr->nsdata[i].nmic.can_share = multipath;
478 		}
479 
480 		ctrlr->cdata.nn = num_ns;
481 		ctrlr->cdata.mnan = num_ns;
482 		ctrlr->cdata.nanagrpid = num_ns;
483 	}
484 
485 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
486 	ctrlr->cdata.cmic.multi_ctrlr = multipath;
487 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
488 	ctrlr->trid = *trid;
489 	TAILQ_INIT(&ctrlr->active_io_qpairs);
490 
491 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
492 
493 	return ctrlr;
494 }
495 
496 static void
497 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
498 {
499 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
500 
501 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
502 	free(ctrlr->nsdata);
503 	free(ctrlr->ns);
504 	free(ctrlr);
505 }
506 
507 static int
508 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
509 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
510 {
511 	struct ut_nvme_req *req;
512 
513 	req = calloc(1, sizeof(*req));
514 	if (req == NULL) {
515 		return -ENOMEM;
516 	}
517 
518 	req->opc = opc;
519 	req->cb_fn = cb_fn;
520 	req->cb_arg = cb_arg;
521 
522 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
523 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
524 
525 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
526 	qpair->num_outstanding_reqs++;
527 
528 	return 0;
529 }
530 
531 static struct ut_nvme_req *
532 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
533 {
534 	struct ut_nvme_req *req;
535 
536 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
537 		if (req->cb_arg == cb_arg) {
538 			break;
539 		}
540 	}
541 
542 	return req;
543 }
544 
545 static struct spdk_bdev_io *
546 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
547 		 struct spdk_io_channel *ch)
548 {
549 	struct spdk_bdev_io *bdev_io;
550 
551 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
552 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
553 	bdev_io->type = type;
554 	bdev_io->bdev = &nbdev->disk;
555 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
556 
557 	return bdev_io;
558 }
559 
560 static void
561 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
562 {
563 	bdev_io->u.bdev.iovs = &bdev_io->iov;
564 	bdev_io->u.bdev.iovcnt = 1;
565 
566 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
567 	bdev_io->iov.iov_len = 4096;
568 }
569 
570 static void
571 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
572 {
573 	if (ctrlr->is_failed) {
574 		free(ctrlr);
575 		return;
576 	}
577 
578 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
579 	if (probe_ctx->cb_ctx) {
580 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
581 	}
582 
583 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
584 
585 	if (probe_ctx->attach_cb) {
586 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
587 	}
588 }
589 
590 int
591 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
592 {
593 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
594 
595 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
596 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
597 			continue;
598 		}
599 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
600 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
601 	}
602 
603 	free(probe_ctx);
604 
605 	return 0;
606 }
607 
608 struct spdk_nvme_probe_ctx *
609 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
610 			const struct spdk_nvme_ctrlr_opts *opts,
611 			spdk_nvme_attach_cb attach_cb)
612 {
613 	struct spdk_nvme_probe_ctx *probe_ctx;
614 
615 	if (trid == NULL) {
616 		return NULL;
617 	}
618 
619 	probe_ctx = calloc(1, sizeof(*probe_ctx));
620 	if (probe_ctx == NULL) {
621 		return NULL;
622 	}
623 
624 	probe_ctx->trid = *trid;
625 	probe_ctx->cb_ctx = (void *)opts;
626 	probe_ctx->attach_cb = attach_cb;
627 
628 	return probe_ctx;
629 }
630 
631 int
632 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
633 {
634 	if (ctrlr->attached) {
635 		ut_detach_ctrlr(ctrlr);
636 	}
637 
638 	return 0;
639 }
640 
641 int
642 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
643 {
644 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
645 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
646 
647 	return 0;
648 }
649 
650 int
651 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
652 {
653 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
654 }
655 
656 void
657 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
658 {
659 	memset(opts, 0, opts_size);
660 
661 	snprintf(opts->hostnqn, sizeof(opts->hostnqn), UT_HOSTNQN);
662 }
663 
664 const struct spdk_nvme_ctrlr_data *
665 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
666 {
667 	return &ctrlr->cdata;
668 }
669 
670 uint32_t
671 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
672 {
673 	return ctrlr->num_ns;
674 }
675 
676 struct spdk_nvme_ns *
677 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
678 {
679 	if (nsid < 1 || nsid > ctrlr->num_ns) {
680 		return NULL;
681 	}
682 
683 	return &ctrlr->ns[nsid - 1];
684 }
685 
686 bool
687 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
688 {
689 	if (nsid < 1 || nsid > ctrlr->num_ns) {
690 		return false;
691 	}
692 
693 	return ctrlr->ns[nsid - 1].is_active;
694 }
695 
696 union spdk_nvme_csts_register
697 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
698 {
699 	union spdk_nvme_csts_register csts;
700 
701 	csts.raw = 0;
702 
703 	return csts;
704 }
705 
706 union spdk_nvme_vs_register
707 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
708 {
709 	union spdk_nvme_vs_register vs;
710 
711 	vs.raw = 0;
712 
713 	return vs;
714 }
715 
716 struct spdk_nvme_qpair *
717 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
718 			       const struct spdk_nvme_io_qpair_opts *user_opts,
719 			       size_t opts_size)
720 {
721 	struct spdk_nvme_qpair *qpair;
722 
723 	qpair = calloc(1, sizeof(*qpair));
724 	if (qpair == NULL) {
725 		return NULL;
726 	}
727 
728 	qpair->ctrlr = ctrlr;
729 	TAILQ_INIT(&qpair->outstanding_reqs);
730 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
731 
732 	return qpair;
733 }
734 
735 static void
736 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
737 {
738 	struct spdk_nvme_poll_group *group = qpair->poll_group;
739 
740 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
741 
742 	qpair->poll_group_tailq_head = &group->connected_qpairs;
743 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
744 	TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq);
745 }
746 
747 static void
748 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
749 {
750 	struct spdk_nvme_poll_group *group = qpair->poll_group;
751 
752 	CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs);
753 
754 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
755 	TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq);
756 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
757 }
758 
759 int
760 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
761 				 struct spdk_nvme_qpair *qpair)
762 {
763 	if (qpair->is_connected) {
764 		return -EISCONN;
765 	}
766 
767 	qpair->is_connected = true;
768 	qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
769 
770 	if (qpair->poll_group) {
771 		nvme_poll_group_connect_qpair(qpair);
772 	}
773 
774 	return 0;
775 }
776 
777 void
778 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
779 {
780 	if (!qpair->is_connected) {
781 		return;
782 	}
783 
784 	qpair->is_connected = false;
785 
786 	if (qpair->poll_group != NULL) {
787 		nvme_poll_group_disconnect_qpair(qpair);
788 	}
789 }
790 
791 int
792 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
793 {
794 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
795 
796 	if (qpair->in_completion_context) {
797 		qpair->delete_after_completion_context = true;
798 		return 0;
799 	}
800 
801 	spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
802 
803 	if (qpair->poll_group != NULL) {
804 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
805 	}
806 
807 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
808 
809 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
810 
811 	free(qpair);
812 
813 	return 0;
814 }
815 
816 int
817 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
818 {
819 	if (ctrlr->fail_reset) {
820 		ctrlr->is_failed = true;
821 		return -EIO;
822 	}
823 
824 	ctrlr->adminq.is_connected = true;
825 	return 0;
826 }
827 
828 void
829 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
830 {
831 }
832 
833 int
834 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
835 {
836 	if (ctrlr->is_removed) {
837 		return -ENXIO;
838 	}
839 
840 	ctrlr->adminq.is_connected = false;
841 	ctrlr->is_failed = false;
842 
843 	return 0;
844 }
845 
846 void
847 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
848 {
849 	ctrlr->is_failed = true;
850 }
851 
852 bool
853 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
854 {
855 	return ctrlr->is_failed;
856 }
857 
858 spdk_nvme_qp_failure_reason
859 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
860 {
861 	return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq);
862 }
863 
864 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
865 				 sizeof(uint32_t))
866 static void
867 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
868 {
869 	struct spdk_nvme_ana_page ana_hdr;
870 	char _ana_desc[UT_ANA_DESC_SIZE];
871 	struct spdk_nvme_ana_group_descriptor *ana_desc;
872 	struct spdk_nvme_ns *ns;
873 	uint32_t i;
874 
875 	memset(&ana_hdr, 0, sizeof(ana_hdr));
876 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
877 
878 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
879 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
880 
881 	buf += sizeof(ana_hdr);
882 	length -= sizeof(ana_hdr);
883 
884 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
885 
886 	for (i = 0; i < ctrlr->num_ns; i++) {
887 		ns = &ctrlr->ns[i];
888 
889 		if (!ns->is_active) {
890 			continue;
891 		}
892 
893 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
894 
895 		ana_desc->ana_group_id = ns->id;
896 		ana_desc->num_of_nsid = 1;
897 		ana_desc->ana_state = ns->ana_state;
898 		ana_desc->nsid[0] = ns->id;
899 
900 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
901 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
902 
903 		buf += UT_ANA_DESC_SIZE;
904 		length -= UT_ANA_DESC_SIZE;
905 	}
906 }
907 
908 int
909 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
910 				 uint8_t log_page, uint32_t nsid,
911 				 void *payload, uint32_t payload_size,
912 				 uint64_t offset,
913 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
914 {
915 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
916 		SPDK_CU_ASSERT_FATAL(offset == 0);
917 		ut_create_ana_log_page(ctrlr, payload, payload_size);
918 	}
919 
920 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
921 				      cb_fn, cb_arg);
922 }
923 
924 int
925 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
926 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
927 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
928 {
929 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
930 }
931 
932 int
933 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
934 			      void *cmd_cb_arg,
935 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
936 {
937 	struct ut_nvme_req *req = NULL, *abort_req;
938 
939 	if (qpair == NULL) {
940 		qpair = &ctrlr->adminq;
941 	}
942 
943 	abort_req = calloc(1, sizeof(*abort_req));
944 	if (abort_req == NULL) {
945 		return -ENOMEM;
946 	}
947 
948 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
949 		if (req->cb_arg == cmd_cb_arg) {
950 			break;
951 		}
952 	}
953 
954 	if (req == NULL) {
955 		free(abort_req);
956 		return -ENOENT;
957 	}
958 
959 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
960 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
961 
962 	abort_req->opc = SPDK_NVME_OPC_ABORT;
963 	abort_req->cb_fn = cb_fn;
964 	abort_req->cb_arg = cb_arg;
965 
966 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
967 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
968 	abort_req->cpl.cdw0 = 0;
969 
970 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
971 	ctrlr->adminq.num_outstanding_reqs++;
972 
973 	return 0;
974 }
975 
976 int32_t
977 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
978 {
979 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
980 }
981 
982 uint32_t
983 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
984 {
985 	return ns->id;
986 }
987 
988 struct spdk_nvme_ctrlr *
989 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
990 {
991 	return ns->ctrlr;
992 }
993 
994 static inline struct spdk_nvme_ns_data *
995 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
996 {
997 	return &ns->ctrlr->nsdata[ns->id - 1];
998 }
999 
1000 const struct spdk_nvme_ns_data *
1001 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
1002 {
1003 	return _nvme_ns_get_data(ns);
1004 }
1005 
1006 uint64_t
1007 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
1008 {
1009 	return _nvme_ns_get_data(ns)->nsze;
1010 }
1011 
1012 const struct spdk_uuid *
1013 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
1014 {
1015 	return ns->uuid;
1016 }
1017 
1018 enum spdk_nvme_csi
1019 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
1020 	return ns->csi;
1021 }
1022 
1023 int
1024 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1025 			      void *metadata, uint64_t lba, uint32_t lba_count,
1026 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1027 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1028 {
1029 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1030 }
1031 
1032 int
1033 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1034 			       void *buffer, void *metadata, uint64_t lba,
1035 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1036 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1037 {
1038 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1039 }
1040 
1041 int
1042 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1043 			       uint64_t lba, uint32_t lba_count,
1044 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1045 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1046 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1047 			       uint16_t apptag_mask, uint16_t apptag)
1048 {
1049 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1050 }
1051 
1052 int
1053 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1054 				uint64_t lba, uint32_t lba_count,
1055 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1056 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1057 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1058 				uint16_t apptag_mask, uint16_t apptag)
1059 {
1060 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1061 }
1062 
1063 static bool g_ut_readv_ext_called;
1064 int
1065 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1066 			   uint64_t lba, uint32_t lba_count,
1067 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1068 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1069 			   spdk_nvme_req_next_sge_cb next_sge_fn,
1070 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1071 {
1072 	g_ut_readv_ext_called = true;
1073 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1074 }
1075 
1076 static bool g_ut_read_ext_called;
1077 int
1078 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1079 			  uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1080 			  struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1081 {
1082 	g_ut_read_ext_called = true;
1083 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1084 }
1085 
1086 static bool g_ut_writev_ext_called;
1087 int
1088 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1089 			    uint64_t lba, uint32_t lba_count,
1090 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1091 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1092 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1093 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1094 {
1095 	g_ut_writev_ext_called = true;
1096 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1097 }
1098 
1099 static bool g_ut_write_ext_called;
1100 int
1101 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1102 			   uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1103 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1104 {
1105 	g_ut_write_ext_called = true;
1106 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1107 }
1108 
1109 int
1110 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1111 				  uint64_t lba, uint32_t lba_count,
1112 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1113 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1114 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1115 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1116 {
1117 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1118 }
1119 
1120 int
1121 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1122 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1123 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1124 {
1125 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1126 }
1127 
1128 int
1129 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1130 			      uint64_t lba, uint32_t lba_count,
1131 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1132 			      uint32_t io_flags)
1133 {
1134 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1135 }
1136 
1137 int
1138 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1139 		      const struct spdk_nvme_scc_source_range *ranges,
1140 		      uint16_t num_ranges, uint64_t dest_lba,
1141 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1142 {
1143 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg);
1144 }
1145 
1146 struct spdk_nvme_poll_group *
1147 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1148 {
1149 	struct spdk_nvme_poll_group *group;
1150 
1151 	group = calloc(1, sizeof(*group));
1152 	if (group == NULL) {
1153 		return NULL;
1154 	}
1155 
1156 	group->ctx = ctx;
1157 	if (table != NULL) {
1158 		group->accel_fn_table = *table;
1159 	}
1160 	TAILQ_INIT(&group->connected_qpairs);
1161 	TAILQ_INIT(&group->disconnected_qpairs);
1162 
1163 	return group;
1164 }
1165 
1166 int
1167 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1168 {
1169 	if (!TAILQ_EMPTY(&group->connected_qpairs) ||
1170 	    !TAILQ_EMPTY(&group->disconnected_qpairs)) {
1171 		return -EBUSY;
1172 	}
1173 
1174 	free(group);
1175 
1176 	return 0;
1177 }
1178 
1179 spdk_nvme_qp_failure_reason
1180 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair)
1181 {
1182 	return qpair->failure_reason;
1183 }
1184 
1185 bool
1186 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair)
1187 {
1188 	return qpair->is_connected;
1189 }
1190 
1191 int32_t
1192 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1193 				    uint32_t max_completions)
1194 {
1195 	struct ut_nvme_req *req, *tmp;
1196 	uint32_t num_completions = 0;
1197 
1198 	if (!qpair->is_connected) {
1199 		return -ENXIO;
1200 	}
1201 
1202 	qpair->in_completion_context = true;
1203 
1204 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1205 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1206 		qpair->num_outstanding_reqs--;
1207 
1208 		req->cb_fn(req->cb_arg, &req->cpl);
1209 
1210 		free(req);
1211 		num_completions++;
1212 	}
1213 
1214 	qpair->in_completion_context = false;
1215 	if (qpair->delete_after_completion_context) {
1216 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1217 	}
1218 
1219 	return num_completions;
1220 }
1221 
1222 int64_t
1223 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1224 		uint32_t completions_per_qpair,
1225 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1226 {
1227 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1228 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1229 
1230 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1231 
1232 	if (disconnected_qpair_cb == NULL) {
1233 		return -EINVAL;
1234 	}
1235 
1236 	TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1237 		disconnected_qpair_cb(qpair, group->ctx);
1238 	}
1239 
1240 	TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1241 		if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
1242 			spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
1243 			/* Bump the number of completions so this counts as "busy" */
1244 			num_completions++;
1245 			continue;
1246 		}
1247 
1248 		local_completions = spdk_nvme_qpair_process_completions(qpair,
1249 				    completions_per_qpair);
1250 		if (local_completions < 0 && error_reason == 0) {
1251 			error_reason = local_completions;
1252 		} else {
1253 			num_completions += local_completions;
1254 			assert(num_completions >= 0);
1255 		}
1256 	}
1257 
1258 	return error_reason ? error_reason : num_completions;
1259 }
1260 
1261 int
1262 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1263 			 struct spdk_nvme_qpair *qpair)
1264 {
1265 	CU_ASSERT(!qpair->is_connected);
1266 
1267 	qpair->poll_group = group;
1268 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
1269 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
1270 
1271 	return 0;
1272 }
1273 
1274 int
1275 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1276 			    struct spdk_nvme_qpair *qpair)
1277 {
1278 	CU_ASSERT(!qpair->is_connected);
1279 
1280 	if (qpair->poll_group == NULL) {
1281 		return -ENOENT;
1282 	}
1283 
1284 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
1285 
1286 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
1287 
1288 	qpair->poll_group = NULL;
1289 	qpair->poll_group_tailq_head = NULL;
1290 
1291 	return 0;
1292 }
1293 
1294 int
1295 spdk_bdev_register(struct spdk_bdev *bdev)
1296 {
1297 	g_ut_registered_bdev = bdev;
1298 
1299 	return g_ut_register_bdev_status;
1300 }
1301 
1302 void
1303 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1304 {
1305 	int rc;
1306 
1307 	rc = bdev->fn_table->destruct(bdev->ctxt);
1308 
1309 	if (bdev == g_ut_registered_bdev) {
1310 		g_ut_registered_bdev = NULL;
1311 	}
1312 
1313 	if (rc <= 0 && cb_fn != NULL) {
1314 		cb_fn(cb_arg, rc);
1315 	}
1316 }
1317 
1318 int
1319 spdk_bdev_open_ext(const char *bdev_name, bool write,
1320 		   spdk_bdev_event_cb_t event_cb, void *event_ctx,
1321 		   struct spdk_bdev_desc **desc)
1322 {
1323 	if (g_ut_registered_bdev == NULL ||
1324 	    strcmp(g_ut_registered_bdev->name, bdev_name) != 0) {
1325 		return -ENODEV;
1326 	}
1327 
1328 	*desc = (struct spdk_bdev_desc *)g_ut_registered_bdev;
1329 
1330 	return 0;
1331 }
1332 
1333 struct spdk_bdev *
1334 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
1335 {
1336 	return (struct spdk_bdev *)desc;
1337 }
1338 
1339 int
1340 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1341 {
1342 	bdev->blockcnt = size;
1343 
1344 	return 0;
1345 }
1346 
1347 struct spdk_io_channel *
1348 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1349 {
1350 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1351 }
1352 
1353 struct spdk_thread *
1354 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
1355 {
1356 	return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io));
1357 }
1358 
1359 void
1360 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1361 {
1362 	bdev_io->internal.status = status;
1363 	bdev_io->internal.in_submit_request = false;
1364 }
1365 
1366 void
1367 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1368 {
1369 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1370 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1371 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1372 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1373 	} else {
1374 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1375 	}
1376 
1377 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1378 	bdev_io->internal.error.nvme.sct = sct;
1379 	bdev_io->internal.error.nvme.sc = sc;
1380 
1381 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1382 }
1383 
1384 void
1385 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1386 {
1387 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1388 
1389 	ut_bdev_io_set_buf(bdev_io);
1390 
1391 	cb(ch, bdev_io, true);
1392 }
1393 
1394 static void
1395 test_create_ctrlr(void)
1396 {
1397 	struct spdk_nvme_transport_id trid = {};
1398 	struct spdk_nvme_ctrlr ctrlr = {};
1399 	int rc;
1400 
1401 	ut_init_trid(&trid);
1402 
1403 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1404 	CU_ASSERT(rc == 0);
1405 
1406 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1407 
1408 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1409 	CU_ASSERT(rc == 0);
1410 
1411 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1412 
1413 	poll_threads();
1414 	spdk_delay_us(1000);
1415 	poll_threads();
1416 
1417 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1418 }
1419 
1420 static void
1421 ut_check_hotplug_on_reset(void *cb_arg, int rc)
1422 {
1423 	bool *detect_remove = cb_arg;
1424 
1425 	CU_ASSERT(rc != 0);
1426 	SPDK_CU_ASSERT_FATAL(detect_remove != NULL);
1427 
1428 	*detect_remove = true;
1429 }
1430 
1431 static void
1432 test_reset_ctrlr(void)
1433 {
1434 	struct spdk_nvme_transport_id trid = {};
1435 	struct spdk_nvme_ctrlr ctrlr = {};
1436 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1437 	struct nvme_path_id *curr_trid;
1438 	struct spdk_io_channel *ch1, *ch2;
1439 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1440 	bool detect_remove;
1441 	int rc;
1442 
1443 	ut_init_trid(&trid);
1444 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1445 
1446 	set_thread(0);
1447 
1448 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1449 	CU_ASSERT(rc == 0);
1450 
1451 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1452 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1453 
1454 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1455 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1456 
1457 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1458 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1459 
1460 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1461 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1462 
1463 	set_thread(1);
1464 
1465 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1466 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1467 
1468 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1469 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1470 
1471 	/* Reset starts from thread 1. */
1472 	set_thread(1);
1473 
1474 	/* Case 1: ctrlr is already being destructed. */
1475 	nvme_ctrlr->destruct = true;
1476 
1477 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1478 	CU_ASSERT(rc == -ENXIO);
1479 
1480 	/* Case 2: reset is in progress. */
1481 	nvme_ctrlr->destruct = false;
1482 	nvme_ctrlr->resetting = true;
1483 
1484 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1485 	CU_ASSERT(rc == -EBUSY);
1486 
1487 	/* Case 3: reset completes successfully. */
1488 	nvme_ctrlr->resetting = false;
1489 	curr_trid->last_failed_tsc = spdk_get_ticks();
1490 	ctrlr.is_failed = true;
1491 
1492 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1493 	CU_ASSERT(rc == 0);
1494 	CU_ASSERT(nvme_ctrlr->resetting == true);
1495 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1496 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1497 
1498 	poll_thread_times(0, 3);
1499 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1500 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1501 
1502 	poll_thread_times(0, 1);
1503 	poll_thread_times(1, 1);
1504 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1505 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1506 	CU_ASSERT(ctrlr.is_failed == true);
1507 
1508 	poll_thread_times(1, 1);
1509 	poll_thread_times(0, 1);
1510 	CU_ASSERT(ctrlr.is_failed == false);
1511 	CU_ASSERT(ctrlr.adminq.is_connected == false);
1512 
1513 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1514 	poll_thread_times(0, 2);
1515 	CU_ASSERT(ctrlr.adminq.is_connected == true);
1516 
1517 	poll_thread_times(0, 1);
1518 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1519 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1520 
1521 	poll_thread_times(1, 1);
1522 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1523 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1524 	CU_ASSERT(nvme_ctrlr->resetting == true);
1525 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1526 
1527 	poll_thread_times(0, 2);
1528 	CU_ASSERT(nvme_ctrlr->resetting == true);
1529 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1530 	poll_thread_times(1, 1);
1531 	CU_ASSERT(nvme_ctrlr->resetting == true);
1532 	poll_thread_times(0, 1);
1533 	CU_ASSERT(nvme_ctrlr->resetting == false);
1534 
1535 	/* Case 4: ctrlr is already removed. */
1536 	ctrlr.is_removed = true;
1537 
1538 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1539 	CU_ASSERT(rc == 0);
1540 
1541 	detect_remove = false;
1542 	nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset;
1543 	nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove;
1544 
1545 	poll_threads();
1546 
1547 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL);
1548 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL);
1549 	CU_ASSERT(detect_remove == true);
1550 
1551 	ctrlr.is_removed = false;
1552 
1553 	spdk_put_io_channel(ch2);
1554 
1555 	set_thread(0);
1556 
1557 	spdk_put_io_channel(ch1);
1558 
1559 	poll_threads();
1560 
1561 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1562 	CU_ASSERT(rc == 0);
1563 
1564 	poll_threads();
1565 	spdk_delay_us(1000);
1566 	poll_threads();
1567 
1568 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1569 }
1570 
1571 static void
1572 test_race_between_reset_and_destruct_ctrlr(void)
1573 {
1574 	struct spdk_nvme_transport_id trid = {};
1575 	struct spdk_nvme_ctrlr ctrlr = {};
1576 	struct nvme_ctrlr *nvme_ctrlr;
1577 	struct spdk_io_channel *ch1, *ch2;
1578 	int rc;
1579 
1580 	ut_init_trid(&trid);
1581 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1582 
1583 	set_thread(0);
1584 
1585 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1586 	CU_ASSERT(rc == 0);
1587 
1588 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1589 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1590 
1591 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1592 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1593 
1594 	set_thread(1);
1595 
1596 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1597 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1598 
1599 	/* Reset starts from thread 1. */
1600 	set_thread(1);
1601 
1602 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1603 	CU_ASSERT(rc == 0);
1604 	CU_ASSERT(nvme_ctrlr->resetting == true);
1605 
1606 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1607 	set_thread(0);
1608 
1609 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1610 	CU_ASSERT(rc == 0);
1611 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1612 	CU_ASSERT(nvme_ctrlr->destruct == true);
1613 	CU_ASSERT(nvme_ctrlr->resetting == true);
1614 
1615 	poll_threads();
1616 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1617 	poll_threads();
1618 
1619 	/* Reset completed but ctrlr is not still destructed yet. */
1620 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1621 	CU_ASSERT(nvme_ctrlr->destruct == true);
1622 	CU_ASSERT(nvme_ctrlr->resetting == false);
1623 
1624 	/* New reset request is rejected. */
1625 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1626 	CU_ASSERT(rc == -ENXIO);
1627 
1628 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1629 	 * However there are two channels and destruct is not completed yet.
1630 	 */
1631 	poll_threads();
1632 
1633 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1634 
1635 	set_thread(0);
1636 
1637 	spdk_put_io_channel(ch1);
1638 
1639 	set_thread(1);
1640 
1641 	spdk_put_io_channel(ch2);
1642 
1643 	poll_threads();
1644 	spdk_delay_us(1000);
1645 	poll_threads();
1646 
1647 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1648 }
1649 
1650 static void
1651 test_failover_ctrlr(void)
1652 {
1653 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1654 	struct spdk_nvme_ctrlr ctrlr = {};
1655 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1656 	struct nvme_path_id *curr_trid, *next_trid;
1657 	struct spdk_io_channel *ch1, *ch2;
1658 	int rc;
1659 
1660 	ut_init_trid(&trid1);
1661 	ut_init_trid2(&trid2);
1662 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1663 
1664 	set_thread(0);
1665 
1666 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1667 	CU_ASSERT(rc == 0);
1668 
1669 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1670 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1671 
1672 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1673 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1674 
1675 	set_thread(1);
1676 
1677 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1678 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1679 
1680 	/* First, test one trid case. */
1681 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1682 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1683 
1684 	/* Failover starts from thread 1. */
1685 	set_thread(1);
1686 
1687 	/* Case 1: ctrlr is already being destructed. */
1688 	nvme_ctrlr->destruct = true;
1689 
1690 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1691 	CU_ASSERT(rc == -ENXIO);
1692 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1693 
1694 	/* Case 2: reset is in progress. */
1695 	nvme_ctrlr->destruct = false;
1696 	nvme_ctrlr->resetting = true;
1697 
1698 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1699 	CU_ASSERT(rc == -EINPROGRESS);
1700 
1701 	/* Case 3: reset completes successfully. */
1702 	nvme_ctrlr->resetting = false;
1703 
1704 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1705 	CU_ASSERT(rc == 0);
1706 
1707 	CU_ASSERT(nvme_ctrlr->resetting == true);
1708 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1709 
1710 	poll_threads();
1711 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1712 	poll_threads();
1713 
1714 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1715 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1716 
1717 	CU_ASSERT(nvme_ctrlr->resetting == false);
1718 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1719 
1720 	set_thread(0);
1721 
1722 	/* Second, test two trids case. */
1723 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1724 	CU_ASSERT(rc == 0);
1725 
1726 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1727 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1728 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1729 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1730 
1731 	/* Failover starts from thread 1. */
1732 	set_thread(1);
1733 
1734 	/* Case 4: reset is in progress. */
1735 	nvme_ctrlr->resetting = true;
1736 
1737 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1738 	CU_ASSERT(rc == -EINPROGRESS);
1739 
1740 	/* Case 5: failover completes successfully. */
1741 	nvme_ctrlr->resetting = false;
1742 
1743 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1744 	CU_ASSERT(rc == 0);
1745 
1746 	CU_ASSERT(nvme_ctrlr->resetting == true);
1747 
1748 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1749 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1750 	CU_ASSERT(next_trid != curr_trid);
1751 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1752 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1753 
1754 	poll_threads();
1755 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1756 	poll_threads();
1757 
1758 	CU_ASSERT(nvme_ctrlr->resetting == false);
1759 
1760 	spdk_put_io_channel(ch2);
1761 
1762 	set_thread(0);
1763 
1764 	spdk_put_io_channel(ch1);
1765 
1766 	poll_threads();
1767 
1768 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1769 	CU_ASSERT(rc == 0);
1770 
1771 	poll_threads();
1772 	spdk_delay_us(1000);
1773 	poll_threads();
1774 
1775 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1776 }
1777 
1778 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1779  *
1780  * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1781  * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1782  * to trid2. While processing the failed reset, trid3 was added. trid1 should
1783  * have been active, i.e., the head of the list until the failover completed.
1784  * However trid3 was inserted to the head of the list by mistake.
1785  *
1786  * I/O qpairs have smaller polling period than admin qpair. When a connection is
1787  * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1788  * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1789  * may be executed repeatedly before failover is executed. Hence this bug is real.
1790  *
1791  * The following test verifies the fix.
1792  */
1793 static void
1794 test_race_between_failover_and_add_secondary_trid(void)
1795 {
1796 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1797 	struct spdk_nvme_ctrlr ctrlr = {};
1798 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1799 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1800 	struct spdk_io_channel *ch1, *ch2;
1801 	int rc;
1802 
1803 	ut_init_trid(&trid1);
1804 	ut_init_trid2(&trid2);
1805 	ut_init_trid3(&trid3);
1806 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1807 
1808 	set_thread(0);
1809 
1810 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1811 	CU_ASSERT(rc == 0);
1812 
1813 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1814 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1815 
1816 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1817 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1818 
1819 	set_thread(1);
1820 
1821 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1822 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1823 
1824 	set_thread(0);
1825 
1826 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1827 	CU_ASSERT(rc == 0);
1828 
1829 	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1830 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1831 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1832 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1833 	path_id2 = TAILQ_NEXT(path_id1, link);
1834 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1835 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1836 
1837 	ctrlr.fail_reset = true;
1838 
1839 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1840 	CU_ASSERT(rc == 0);
1841 
1842 	poll_threads();
1843 
1844 	CU_ASSERT(path_id1->last_failed_tsc != 0);
1845 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1846 
1847 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1848 	CU_ASSERT(rc == 0);
1849 
1850 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1851 	CU_ASSERT(rc == 0);
1852 
1853 	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1854 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1855 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1856 	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1857 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1858 	path_id3 = TAILQ_NEXT(path_id2, link);
1859 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1860 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1861 
1862 	poll_threads();
1863 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1864 	poll_threads();
1865 
1866 	spdk_put_io_channel(ch1);
1867 
1868 	set_thread(1);
1869 
1870 	spdk_put_io_channel(ch2);
1871 
1872 	poll_threads();
1873 
1874 	set_thread(0);
1875 
1876 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1877 	CU_ASSERT(rc == 0);
1878 
1879 	poll_threads();
1880 	spdk_delay_us(1000);
1881 	poll_threads();
1882 
1883 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1884 }
1885 
1886 static void
1887 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1888 {
1889 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1890 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1891 }
1892 
1893 static void
1894 test_pending_reset(void)
1895 {
1896 	struct spdk_nvme_transport_id trid = {};
1897 	struct spdk_nvme_ctrlr *ctrlr;
1898 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
1899 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1900 	const int STRING_SIZE = 32;
1901 	const char *attached_names[STRING_SIZE];
1902 	struct nvme_bdev *bdev;
1903 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1904 	struct spdk_io_channel *ch1, *ch2;
1905 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1906 	struct nvme_io_path *io_path1, *io_path2;
1907 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1908 	int rc;
1909 
1910 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1911 	ut_init_trid(&trid);
1912 
1913 	set_thread(0);
1914 
1915 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1916 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1917 
1918 	g_ut_attach_ctrlr_status = 0;
1919 	g_ut_attach_bdev_count = 1;
1920 
1921 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1922 			      attach_ctrlr_done, NULL, &opts, NULL, false);
1923 	CU_ASSERT(rc == 0);
1924 
1925 	spdk_delay_us(1000);
1926 	poll_threads();
1927 
1928 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1929 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1930 
1931 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1932 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1933 
1934 	ch1 = spdk_get_io_channel(bdev);
1935 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1936 
1937 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1938 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1939 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1940 	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
1941 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1942 
1943 	set_thread(1);
1944 
1945 	ch2 = spdk_get_io_channel(bdev);
1946 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1947 
1948 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1949 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1950 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1951 	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
1952 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1953 
1954 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1955 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1956 
1957 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1958 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1959 
1960 	/* The first reset request is submitted on thread 1, and the second reset request
1961 	 * is submitted on thread 0 while processing the first request.
1962 	 */
1963 	bdev_nvme_submit_request(ch2, first_bdev_io);
1964 	CU_ASSERT(nvme_ctrlr->resetting == true);
1965 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1966 
1967 	set_thread(0);
1968 
1969 	bdev_nvme_submit_request(ch1, second_bdev_io);
1970 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1971 
1972 	poll_threads();
1973 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1974 	poll_threads();
1975 
1976 	CU_ASSERT(nvme_ctrlr->resetting == false);
1977 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1978 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1979 
1980 	/* The first reset request is submitted on thread 1, and the second reset request
1981 	 * is submitted on thread 0 while processing the first request.
1982 	 *
1983 	 * The difference from the above scenario is that the controller is removed while
1984 	 * processing the first request. Hence both reset requests should fail.
1985 	 */
1986 	set_thread(1);
1987 
1988 	bdev_nvme_submit_request(ch2, first_bdev_io);
1989 	CU_ASSERT(nvme_ctrlr->resetting == true);
1990 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1991 
1992 	set_thread(0);
1993 
1994 	bdev_nvme_submit_request(ch1, second_bdev_io);
1995 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1996 
1997 	ctrlr->fail_reset = true;
1998 
1999 	poll_threads();
2000 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2001 	poll_threads();
2002 
2003 	CU_ASSERT(nvme_ctrlr->resetting == false);
2004 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2005 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2006 
2007 	spdk_put_io_channel(ch1);
2008 
2009 	set_thread(1);
2010 
2011 	spdk_put_io_channel(ch2);
2012 
2013 	poll_threads();
2014 
2015 	set_thread(0);
2016 
2017 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2018 	CU_ASSERT(rc == 0);
2019 
2020 	poll_threads();
2021 	spdk_delay_us(1000);
2022 	poll_threads();
2023 
2024 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2025 
2026 	free(first_bdev_io);
2027 	free(second_bdev_io);
2028 }
2029 
2030 static void
2031 test_attach_ctrlr(void)
2032 {
2033 	struct spdk_nvme_transport_id trid = {};
2034 	struct spdk_nvme_ctrlr *ctrlr;
2035 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2036 	struct nvme_ctrlr *nvme_ctrlr;
2037 	const int STRING_SIZE = 32;
2038 	const char *attached_names[STRING_SIZE];
2039 	struct nvme_bdev *nbdev;
2040 	int rc;
2041 
2042 	set_thread(0);
2043 
2044 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2045 	ut_init_trid(&trid);
2046 
2047 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
2048 	 * by probe polling.
2049 	 */
2050 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2051 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2052 
2053 	ctrlr->is_failed = true;
2054 	g_ut_attach_ctrlr_status = -EIO;
2055 	g_ut_attach_bdev_count = 0;
2056 
2057 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2058 			      attach_ctrlr_done, NULL, &opts, NULL, false);
2059 	CU_ASSERT(rc == 0);
2060 
2061 	spdk_delay_us(1000);
2062 	poll_threads();
2063 
2064 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2065 
2066 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
2067 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2068 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2069 
2070 	g_ut_attach_ctrlr_status = 0;
2071 
2072 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2073 			      attach_ctrlr_done, NULL, &opts, NULL, false);
2074 	CU_ASSERT(rc == 0);
2075 
2076 	spdk_delay_us(1000);
2077 	poll_threads();
2078 
2079 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2080 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2081 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2082 
2083 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2084 	CU_ASSERT(rc == 0);
2085 
2086 	poll_threads();
2087 	spdk_delay_us(1000);
2088 	poll_threads();
2089 
2090 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2091 
2092 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
2093 	 * one nvme_bdev is created.
2094 	 */
2095 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2096 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2097 
2098 	g_ut_attach_bdev_count = 1;
2099 
2100 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2101 			      attach_ctrlr_done, NULL, &opts, NULL, false);
2102 	CU_ASSERT(rc == 0);
2103 
2104 	spdk_delay_us(1000);
2105 	poll_threads();
2106 
2107 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2108 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2109 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2110 
2111 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
2112 	attached_names[0] = NULL;
2113 
2114 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2115 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
2116 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
2117 
2118 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2119 	CU_ASSERT(rc == 0);
2120 
2121 	poll_threads();
2122 	spdk_delay_us(1000);
2123 	poll_threads();
2124 
2125 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2126 
2127 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
2128 	 * created because creating one nvme_bdev failed.
2129 	 */
2130 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2131 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2132 
2133 	g_ut_register_bdev_status = -EINVAL;
2134 	g_ut_attach_bdev_count = 0;
2135 
2136 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2137 			      attach_ctrlr_done, NULL, &opts, NULL, false);
2138 	CU_ASSERT(rc == 0);
2139 
2140 	spdk_delay_us(1000);
2141 	poll_threads();
2142 
2143 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2144 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2145 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2146 
2147 	CU_ASSERT(attached_names[0] == NULL);
2148 
2149 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2150 	CU_ASSERT(rc == 0);
2151 
2152 	poll_threads();
2153 	spdk_delay_us(1000);
2154 	poll_threads();
2155 
2156 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2157 
2158 	g_ut_register_bdev_status = 0;
2159 }
2160 
2161 static void
2162 test_aer_cb(void)
2163 {
2164 	struct spdk_nvme_transport_id trid = {};
2165 	struct spdk_nvme_ctrlr *ctrlr;
2166 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2167 	struct nvme_ctrlr *nvme_ctrlr;
2168 	struct nvme_bdev *bdev;
2169 	const int STRING_SIZE = 32;
2170 	const char *attached_names[STRING_SIZE];
2171 	union spdk_nvme_async_event_completion event = {};
2172 	struct spdk_nvme_cpl cpl = {};
2173 	int rc;
2174 
2175 	set_thread(0);
2176 
2177 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2178 	ut_init_trid(&trid);
2179 
2180 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
2181 	 * namespaces are populated.
2182 	 */
2183 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
2184 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2185 
2186 	ctrlr->ns[0].is_active = false;
2187 
2188 	g_ut_attach_ctrlr_status = 0;
2189 	g_ut_attach_bdev_count = 3;
2190 
2191 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2192 			      attach_ctrlr_done, NULL, &opts, NULL, false);
2193 	CU_ASSERT(rc == 0);
2194 
2195 	spdk_delay_us(1000);
2196 	poll_threads();
2197 
2198 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2199 	poll_threads();
2200 
2201 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2202 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2203 
2204 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
2205 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2206 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2207 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2208 
2209 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
2210 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2211 	CU_ASSERT(bdev->disk.blockcnt == 1024);
2212 
2213 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
2214 	 * change the size of the 4th namespace.
2215 	 */
2216 	ctrlr->ns[0].is_active = true;
2217 	ctrlr->ns[2].is_active = false;
2218 	ctrlr->nsdata[3].nsze = 2048;
2219 
2220 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2221 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
2222 	cpl.cdw0 = event.raw;
2223 
2224 	aer_cb(nvme_ctrlr, &cpl);
2225 
2226 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2227 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2228 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
2229 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2230 	CU_ASSERT(bdev->disk.blockcnt == 2048);
2231 
2232 	/* Change ANA state of active namespaces. */
2233 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2234 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2235 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2236 
2237 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2238 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
2239 	cpl.cdw0 = event.raw;
2240 
2241 	aer_cb(nvme_ctrlr, &cpl);
2242 
2243 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2244 	poll_threads();
2245 
2246 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2247 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2248 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2249 
2250 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2251 	CU_ASSERT(rc == 0);
2252 
2253 	poll_threads();
2254 	spdk_delay_us(1000);
2255 	poll_threads();
2256 
2257 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2258 }
2259 
2260 static void
2261 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2262 			enum spdk_bdev_io_type io_type)
2263 {
2264 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2265 	struct nvme_io_path *io_path;
2266 	struct spdk_nvme_qpair *qpair;
2267 
2268 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2269 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2270 	qpair = io_path->qpair->qpair;
2271 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2272 
2273 	bdev_io->type = io_type;
2274 	bdev_io->internal.in_submit_request = true;
2275 
2276 	bdev_nvme_submit_request(ch, bdev_io);
2277 
2278 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2279 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2280 
2281 	poll_threads();
2282 
2283 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2284 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2285 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2286 }
2287 
2288 static void
2289 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2290 		   enum spdk_bdev_io_type io_type)
2291 {
2292 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2293 	struct nvme_io_path *io_path;
2294 	struct spdk_nvme_qpair *qpair;
2295 
2296 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2297 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2298 	qpair = io_path->qpair->qpair;
2299 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2300 
2301 	bdev_io->type = io_type;
2302 	bdev_io->internal.in_submit_request = true;
2303 
2304 	bdev_nvme_submit_request(ch, bdev_io);
2305 
2306 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2307 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2308 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2309 }
2310 
2311 static void
2312 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2313 {
2314 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2315 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2316 	struct ut_nvme_req *req;
2317 	struct nvme_io_path *io_path;
2318 	struct spdk_nvme_qpair *qpair;
2319 
2320 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2321 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2322 	qpair = io_path->qpair->qpair;
2323 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2324 
2325 	/* Only compare and write now. */
2326 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2327 	bdev_io->internal.in_submit_request = true;
2328 
2329 	bdev_nvme_submit_request(ch, bdev_io);
2330 
2331 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2332 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2333 	CU_ASSERT(bio->first_fused_submitted == true);
2334 
2335 	/* First outstanding request is compare operation. */
2336 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2337 	SPDK_CU_ASSERT_FATAL(req != NULL);
2338 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2339 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2340 
2341 	poll_threads();
2342 
2343 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2344 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2345 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2346 }
2347 
2348 static void
2349 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2350 			 struct spdk_nvme_ctrlr *ctrlr)
2351 {
2352 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2353 	bdev_io->internal.in_submit_request = true;
2354 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2355 
2356 	bdev_nvme_submit_request(ch, bdev_io);
2357 
2358 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2359 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2360 
2361 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2362 	poll_thread_times(1, 1);
2363 
2364 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2365 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2366 
2367 	poll_thread_times(0, 1);
2368 
2369 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2370 }
2371 
2372 static void
2373 test_submit_nvme_cmd(void)
2374 {
2375 	struct spdk_nvme_transport_id trid = {};
2376 	struct spdk_nvme_ctrlr *ctrlr;
2377 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2378 	struct nvme_ctrlr *nvme_ctrlr;
2379 	const int STRING_SIZE = 32;
2380 	const char *attached_names[STRING_SIZE];
2381 	struct nvme_bdev *bdev;
2382 	struct spdk_bdev_io *bdev_io;
2383 	struct spdk_io_channel *ch;
2384 	int rc;
2385 
2386 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2387 	ut_init_trid(&trid);
2388 
2389 	set_thread(1);
2390 
2391 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2392 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2393 
2394 	g_ut_attach_ctrlr_status = 0;
2395 	g_ut_attach_bdev_count = 1;
2396 
2397 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2398 			      attach_ctrlr_done, NULL, &opts, NULL, false);
2399 	CU_ASSERT(rc == 0);
2400 
2401 	spdk_delay_us(1000);
2402 	poll_threads();
2403 
2404 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2405 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2406 
2407 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2408 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2409 
2410 	set_thread(0);
2411 
2412 	ch = spdk_get_io_channel(bdev);
2413 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2414 
2415 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2416 
2417 	bdev_io->u.bdev.iovs = NULL;
2418 
2419 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2420 
2421 	ut_bdev_io_set_buf(bdev_io);
2422 
2423 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2424 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2425 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2426 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2427 
2428 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2429 
2430 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2431 
2432 	/* Verify that ext NVME API is called when data is described by memory domain  */
2433 	g_ut_read_ext_called = false;
2434 	bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef;
2435 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2436 	CU_ASSERT(g_ut_read_ext_called == true);
2437 	g_ut_read_ext_called = false;
2438 	bdev_io->u.bdev.memory_domain = NULL;
2439 
2440 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2441 
2442 	free(bdev_io);
2443 
2444 	spdk_put_io_channel(ch);
2445 
2446 	poll_threads();
2447 
2448 	set_thread(1);
2449 
2450 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2451 	CU_ASSERT(rc == 0);
2452 
2453 	poll_threads();
2454 	spdk_delay_us(1000);
2455 	poll_threads();
2456 
2457 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2458 }
2459 
2460 static void
2461 test_add_remove_trid(void)
2462 {
2463 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2464 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2465 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2466 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2467 	const int STRING_SIZE = 32;
2468 	const char *attached_names[STRING_SIZE];
2469 	struct nvme_path_id *ctrid;
2470 	int rc;
2471 
2472 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2473 	ut_init_trid(&path1.trid);
2474 	ut_init_trid2(&path2.trid);
2475 	ut_init_trid3(&path3.trid);
2476 
2477 	set_thread(0);
2478 
2479 	g_ut_attach_ctrlr_status = 0;
2480 	g_ut_attach_bdev_count = 0;
2481 
2482 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2483 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2484 
2485 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2486 			      attach_ctrlr_done, NULL, &opts, NULL, false);
2487 	CU_ASSERT(rc == 0);
2488 
2489 	spdk_delay_us(1000);
2490 	poll_threads();
2491 
2492 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2493 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2494 
2495 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2496 
2497 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2498 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2499 
2500 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2501 			      attach_ctrlr_done, NULL, &opts, NULL, false);
2502 	CU_ASSERT(rc == 0);
2503 
2504 	spdk_delay_us(1000);
2505 	poll_threads();
2506 
2507 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2508 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2509 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2510 			break;
2511 		}
2512 	}
2513 	CU_ASSERT(ctrid != NULL);
2514 
2515 	/* trid3 is not in the registered list. */
2516 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2517 	CU_ASSERT(rc == -ENXIO);
2518 
2519 	/* trid2 is not used, and simply removed. */
2520 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2521 	CU_ASSERT(rc == 0);
2522 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2523 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2524 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2525 	}
2526 
2527 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2528 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2529 
2530 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
2531 			      attach_ctrlr_done, NULL, &opts, NULL, false);
2532 	CU_ASSERT(rc == 0);
2533 
2534 	spdk_delay_us(1000);
2535 	poll_threads();
2536 
2537 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2538 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2539 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2540 			break;
2541 		}
2542 	}
2543 	CU_ASSERT(ctrid != NULL);
2544 
2545 	/* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully.
2546 	 * If we add path2 again, path2 should be inserted between path1 and path3.
2547 	 * Then, we remove path2. It is not used, and simply removed.
2548 	 */
2549 	ctrid->last_failed_tsc = spdk_get_ticks() + 1;
2550 
2551 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2552 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2553 
2554 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2555 			      attach_ctrlr_done, NULL, &opts, NULL, false);
2556 	CU_ASSERT(rc == 0);
2557 
2558 	spdk_delay_us(1000);
2559 	poll_threads();
2560 
2561 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2562 
2563 	ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link);
2564 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2565 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0);
2566 
2567 	ctrid = TAILQ_NEXT(ctrid, link);
2568 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2569 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0);
2570 
2571 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2572 	CU_ASSERT(rc == 0);
2573 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2574 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2575 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2576 	}
2577 
2578 	/* path1 is currently used and path3 is an alternative path.
2579 	 * If we remove path1, path is changed to path3.
2580 	 */
2581 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
2582 	CU_ASSERT(rc == 0);
2583 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2584 	CU_ASSERT(nvme_ctrlr->resetting == true);
2585 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2586 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2587 	}
2588 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2589 
2590 	poll_threads();
2591 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2592 	poll_threads();
2593 
2594 	CU_ASSERT(nvme_ctrlr->resetting == false);
2595 
2596 	/* path3 is the current and only path. If we remove path3, the corresponding
2597 	 * nvme_ctrlr is removed.
2598 	 */
2599 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2600 	CU_ASSERT(rc == 0);
2601 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2602 
2603 	poll_threads();
2604 	spdk_delay_us(1000);
2605 	poll_threads();
2606 
2607 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2608 
2609 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2610 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2611 
2612 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2613 			      attach_ctrlr_done, NULL, &opts, NULL, false);
2614 	CU_ASSERT(rc == 0);
2615 
2616 	spdk_delay_us(1000);
2617 	poll_threads();
2618 
2619 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2620 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2621 
2622 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2623 
2624 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2625 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2626 
2627 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2628 			      attach_ctrlr_done, NULL, &opts, NULL, false);
2629 	CU_ASSERT(rc == 0);
2630 
2631 	spdk_delay_us(1000);
2632 	poll_threads();
2633 
2634 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2635 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2636 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2637 			break;
2638 		}
2639 	}
2640 	CU_ASSERT(ctrid != NULL);
2641 
2642 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2643 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2644 	CU_ASSERT(rc == 0);
2645 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2646 
2647 	poll_threads();
2648 	spdk_delay_us(1000);
2649 	poll_threads();
2650 
2651 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2652 }
2653 
2654 static void
2655 test_abort(void)
2656 {
2657 	struct spdk_nvme_transport_id trid = {};
2658 	struct nvme_ctrlr_opts opts = {};
2659 	struct spdk_nvme_ctrlr *ctrlr;
2660 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
2661 	struct nvme_ctrlr *nvme_ctrlr;
2662 	const int STRING_SIZE = 32;
2663 	const char *attached_names[STRING_SIZE];
2664 	struct nvme_bdev *bdev;
2665 	struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io;
2666 	struct spdk_io_channel *ch1, *ch2;
2667 	struct nvme_bdev_channel *nbdev_ch1;
2668 	struct nvme_io_path *io_path1;
2669 	struct nvme_qpair *nvme_qpair1;
2670 	int rc;
2671 
2672 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2673 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2674 	 * are submitted on thread 1. Both should succeed.
2675 	 */
2676 
2677 	ut_init_trid(&trid);
2678 
2679 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2680 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2681 
2682 	g_ut_attach_ctrlr_status = 0;
2683 	g_ut_attach_bdev_count = 1;
2684 
2685 	set_thread(1);
2686 
2687 	opts.ctrlr_loss_timeout_sec = -1;
2688 	opts.reconnect_delay_sec = 1;
2689 
2690 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2691 			      attach_ctrlr_done, NULL, &dopts, &opts, false);
2692 	CU_ASSERT(rc == 0);
2693 
2694 	spdk_delay_us(1000);
2695 	poll_threads();
2696 
2697 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2698 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2699 
2700 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2701 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2702 
2703 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2704 	ut_bdev_io_set_buf(write_io);
2705 
2706 	fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL);
2707 	ut_bdev_io_set_buf(fuse_io);
2708 
2709 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2710 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2711 
2712 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2713 
2714 	set_thread(0);
2715 
2716 	ch1 = spdk_get_io_channel(bdev);
2717 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2718 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2719 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2720 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2721 	nvme_qpair1 = io_path1->qpair;
2722 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
2723 
2724 	set_thread(1);
2725 
2726 	ch2 = spdk_get_io_channel(bdev);
2727 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2728 
2729 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2730 	fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2731 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2732 
2733 	/* Aborting the already completed request should fail. */
2734 	write_io->internal.in_submit_request = true;
2735 	bdev_nvme_submit_request(ch1, write_io);
2736 	poll_threads();
2737 
2738 	CU_ASSERT(write_io->internal.in_submit_request == false);
2739 
2740 	abort_io->u.abort.bio_to_abort = write_io;
2741 	abort_io->internal.in_submit_request = true;
2742 
2743 	bdev_nvme_submit_request(ch1, abort_io);
2744 
2745 	poll_threads();
2746 
2747 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2748 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2749 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2750 
2751 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2752 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2753 
2754 	admin_io->internal.in_submit_request = true;
2755 	bdev_nvme_submit_request(ch1, admin_io);
2756 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2757 	poll_threads();
2758 
2759 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2760 
2761 	abort_io->u.abort.bio_to_abort = admin_io;
2762 	abort_io->internal.in_submit_request = true;
2763 
2764 	bdev_nvme_submit_request(ch2, abort_io);
2765 
2766 	poll_threads();
2767 
2768 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2769 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2770 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2771 
2772 	/* Aborting the write request should succeed. */
2773 	write_io->internal.in_submit_request = true;
2774 	bdev_nvme_submit_request(ch1, write_io);
2775 
2776 	CU_ASSERT(write_io->internal.in_submit_request == true);
2777 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
2778 
2779 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2780 	abort_io->u.abort.bio_to_abort = write_io;
2781 	abort_io->internal.in_submit_request = true;
2782 
2783 	bdev_nvme_submit_request(ch1, abort_io);
2784 
2785 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2786 	poll_threads();
2787 
2788 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2789 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2790 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2791 	CU_ASSERT(write_io->internal.in_submit_request == false);
2792 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2793 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2794 
2795 	/* Aborting the fuse request should succeed. */
2796 	fuse_io->internal.in_submit_request = true;
2797 	bdev_nvme_submit_request(ch1, fuse_io);
2798 
2799 	CU_ASSERT(fuse_io->internal.in_submit_request == true);
2800 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2);
2801 
2802 	abort_io->u.abort.bio_to_abort = fuse_io;
2803 	abort_io->internal.in_submit_request = true;
2804 
2805 	bdev_nvme_submit_request(ch1, abort_io);
2806 
2807 	spdk_delay_us(10000);
2808 	poll_threads();
2809 
2810 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2811 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2812 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2813 	CU_ASSERT(fuse_io->internal.in_submit_request == false);
2814 	CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2815 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2816 
2817 	/* Aborting the admin request should succeed. */
2818 	admin_io->internal.in_submit_request = true;
2819 	bdev_nvme_submit_request(ch1, admin_io);
2820 
2821 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2822 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2823 
2824 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2825 	abort_io->u.abort.bio_to_abort = admin_io;
2826 	abort_io->internal.in_submit_request = true;
2827 
2828 	bdev_nvme_submit_request(ch2, abort_io);
2829 
2830 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2831 	poll_threads();
2832 
2833 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2834 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2835 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2836 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2837 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2838 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2839 
2840 	set_thread(0);
2841 
2842 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2843 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2844 	 * while resetting the nvme_ctrlr.
2845 	 */
2846 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
2847 
2848 	poll_thread_times(0, 3);
2849 
2850 	CU_ASSERT(nvme_qpair1->qpair == NULL);
2851 	CU_ASSERT(nvme_ctrlr->resetting == true);
2852 
2853 	write_io->internal.in_submit_request = true;
2854 
2855 	bdev_nvme_submit_request(ch1, write_io);
2856 
2857 	CU_ASSERT(write_io->internal.in_submit_request == true);
2858 	CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list));
2859 
2860 	/* Aborting the queued write request should succeed immediately. */
2861 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2862 	abort_io->u.abort.bio_to_abort = write_io;
2863 	abort_io->internal.in_submit_request = true;
2864 
2865 	bdev_nvme_submit_request(ch1, abort_io);
2866 
2867 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2868 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2869 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2870 	CU_ASSERT(write_io->internal.in_submit_request == false);
2871 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2872 
2873 	poll_threads();
2874 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2875 	poll_threads();
2876 
2877 	spdk_put_io_channel(ch1);
2878 
2879 	set_thread(1);
2880 
2881 	spdk_put_io_channel(ch2);
2882 
2883 	poll_threads();
2884 
2885 	free(write_io);
2886 	free(fuse_io);
2887 	free(admin_io);
2888 	free(abort_io);
2889 
2890 	set_thread(1);
2891 
2892 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2893 	CU_ASSERT(rc == 0);
2894 
2895 	poll_threads();
2896 	spdk_delay_us(1000);
2897 	poll_threads();
2898 
2899 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2900 }
2901 
2902 static void
2903 test_get_io_qpair(void)
2904 {
2905 	struct spdk_nvme_transport_id trid = {};
2906 	struct spdk_nvme_ctrlr ctrlr = {};
2907 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2908 	struct spdk_io_channel *ch;
2909 	struct nvme_ctrlr_channel *ctrlr_ch;
2910 	struct spdk_nvme_qpair *qpair;
2911 	int rc;
2912 
2913 	ut_init_trid(&trid);
2914 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2915 
2916 	set_thread(0);
2917 
2918 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2919 	CU_ASSERT(rc == 0);
2920 
2921 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2922 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2923 
2924 	ch = spdk_get_io_channel(nvme_ctrlr);
2925 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2926 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2927 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
2928 
2929 	qpair = bdev_nvme_get_io_qpair(ch);
2930 	CU_ASSERT(qpair == ctrlr_ch->qpair->qpair);
2931 
2932 	spdk_put_io_channel(ch);
2933 
2934 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2935 	CU_ASSERT(rc == 0);
2936 
2937 	poll_threads();
2938 	spdk_delay_us(1000);
2939 	poll_threads();
2940 
2941 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2942 }
2943 
2944 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2945  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2946  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2947  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2948  */
2949 static void
2950 test_bdev_unregister(void)
2951 {
2952 	struct spdk_nvme_transport_id trid = {};
2953 	struct spdk_nvme_ctrlr *ctrlr;
2954 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2955 	struct nvme_ctrlr *nvme_ctrlr;
2956 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2957 	const int STRING_SIZE = 32;
2958 	const char *attached_names[STRING_SIZE];
2959 	struct nvme_bdev *bdev1, *bdev2;
2960 	int rc;
2961 
2962 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2963 	ut_init_trid(&trid);
2964 
2965 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
2966 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2967 
2968 	g_ut_attach_ctrlr_status = 0;
2969 	g_ut_attach_bdev_count = 2;
2970 
2971 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2972 			      attach_ctrlr_done, NULL, &opts, NULL, false);
2973 	CU_ASSERT(rc == 0);
2974 
2975 	spdk_delay_us(1000);
2976 	poll_threads();
2977 
2978 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2979 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2980 
2981 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
2982 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2983 
2984 	bdev1 = nvme_ns1->bdev;
2985 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2986 
2987 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
2988 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2989 
2990 	bdev2 = nvme_ns2->bdev;
2991 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2992 
2993 	bdev_nvme_destruct(&bdev1->disk);
2994 	bdev_nvme_destruct(&bdev2->disk);
2995 
2996 	poll_threads();
2997 
2998 	CU_ASSERT(nvme_ns1->bdev == NULL);
2999 	CU_ASSERT(nvme_ns2->bdev == NULL);
3000 
3001 	nvme_ctrlr->destruct = true;
3002 	_nvme_ctrlr_destruct(nvme_ctrlr);
3003 
3004 	poll_threads();
3005 	spdk_delay_us(1000);
3006 	poll_threads();
3007 
3008 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3009 }
3010 
3011 static void
3012 test_compare_ns(void)
3013 {
3014 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
3015 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
3016 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
3017 	struct spdk_uuid uuid1 = { .u.raw = { 0xAA } };
3018 	struct spdk_uuid uuid2 = { .u.raw = { 0xAB } };
3019 
3020 	/* No IDs are defined. */
3021 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3022 
3023 	/* Only EUI64 are defined and not matched. */
3024 	nsdata1.eui64 = 0xABCDEF0123456789;
3025 	nsdata2.eui64 = 0xBBCDEF0123456789;
3026 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3027 
3028 	/* Only EUI64 are defined and matched. */
3029 	nsdata2.eui64 = 0xABCDEF0123456789;
3030 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3031 
3032 	/* Only NGUID are defined and not matched. */
3033 	nsdata1.eui64 = 0x0;
3034 	nsdata2.eui64 = 0x0;
3035 	nsdata1.nguid[0] = 0x12;
3036 	nsdata2.nguid[0] = 0x10;
3037 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3038 
3039 	/* Only NGUID are defined and matched. */
3040 	nsdata2.nguid[0] = 0x12;
3041 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3042 
3043 	/* Only UUID are defined and not matched. */
3044 	nsdata1.nguid[0] = 0x0;
3045 	nsdata2.nguid[0] = 0x0;
3046 	ns1.uuid = &uuid1;
3047 	ns2.uuid = &uuid2;
3048 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3049 
3050 	/* Only one UUID is defined. */
3051 	ns1.uuid = NULL;
3052 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3053 
3054 	/* Only UUID are defined and matched. */
3055 	ns1.uuid = &uuid2;
3056 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3057 
3058 	/* All EUI64, NGUID, and UUID are defined and matched. */
3059 	nsdata1.eui64 = 0x123456789ABCDEF;
3060 	nsdata2.eui64 = 0x123456789ABCDEF;
3061 	nsdata1.nguid[15] = 0x34;
3062 	nsdata2.nguid[15] = 0x34;
3063 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3064 
3065 	/* CSI are not matched. */
3066 	ns1.csi = SPDK_NVME_CSI_ZNS;
3067 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3068 }
3069 
3070 static void
3071 test_init_ana_log_page(void)
3072 {
3073 	struct spdk_nvme_transport_id trid = {};
3074 	struct spdk_nvme_ctrlr *ctrlr;
3075 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3076 	struct nvme_ctrlr *nvme_ctrlr;
3077 	const int STRING_SIZE = 32;
3078 	const char *attached_names[STRING_SIZE];
3079 	int rc;
3080 
3081 	set_thread(0);
3082 
3083 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3084 	ut_init_trid(&trid);
3085 
3086 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
3087 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3088 
3089 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3090 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3091 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3092 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
3093 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
3094 
3095 	g_ut_attach_ctrlr_status = 0;
3096 	g_ut_attach_bdev_count = 5;
3097 
3098 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3099 			      attach_ctrlr_done, NULL, &opts, NULL, false);
3100 	CU_ASSERT(rc == 0);
3101 
3102 	spdk_delay_us(1000);
3103 	poll_threads();
3104 
3105 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3106 	poll_threads();
3107 
3108 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3109 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3110 
3111 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
3112 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
3113 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
3114 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
3115 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
3116 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3117 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3118 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
3119 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
3120 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
3121 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
3122 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
3123 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
3124 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
3125 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
3126 
3127 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3128 	CU_ASSERT(rc == 0);
3129 
3130 	poll_threads();
3131 	spdk_delay_us(1000);
3132 	poll_threads();
3133 
3134 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3135 }
3136 
3137 static void
3138 init_accel(void)
3139 {
3140 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
3141 				sizeof(int), "accel_p");
3142 }
3143 
3144 static void
3145 fini_accel(void)
3146 {
3147 	spdk_io_device_unregister(g_accel_p, NULL);
3148 }
3149 
3150 static void
3151 test_get_memory_domains(void)
3152 {
3153 	struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
3154 	struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef };
3155 	struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 };
3156 	struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 };
3157 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
3158 	struct spdk_memory_domain *domains[4] = {};
3159 	int rc = 0;
3160 
3161 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq);
3162 
3163 	/* nvme controller doesn't have memory domains */
3164 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0);
3165 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3166 	CU_ASSERT(rc == 0);
3167 	CU_ASSERT(domains[0] == NULL);
3168 	CU_ASSERT(domains[1] == NULL);
3169 
3170 	/* nvme controller has a memory domain */
3171 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1);
3172 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3173 	CU_ASSERT(rc == 1);
3174 	CU_ASSERT(domains[0] != NULL);
3175 	memset(domains, 0, sizeof(domains));
3176 
3177 	/* multipath, 2 controllers report 1 memory domain each */
3178 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq);
3179 
3180 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3181 	CU_ASSERT(rc == 2);
3182 	CU_ASSERT(domains[0] != NULL);
3183 	CU_ASSERT(domains[1] != NULL);
3184 	memset(domains, 0, sizeof(domains));
3185 
3186 	/* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */
3187 	rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2);
3188 	CU_ASSERT(rc == 2);
3189 
3190 	/* multipath, 2 controllers report 1 memory domain each, array_size = 0 */
3191 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
3192 	CU_ASSERT(rc == 2);
3193 	CU_ASSERT(domains[0] == NULL);
3194 	CU_ASSERT(domains[1] == NULL);
3195 
3196 	/* multipath, 2 controllers report 1 memory domain each, array_size = 1 */
3197 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1);
3198 	CU_ASSERT(rc == 2);
3199 	CU_ASSERT(domains[0] != NULL);
3200 	CU_ASSERT(domains[1] == NULL);
3201 	memset(domains, 0, sizeof(domains));
3202 
3203 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */
3204 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3205 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4);
3206 	CU_ASSERT(rc == 4);
3207 	CU_ASSERT(domains[0] != NULL);
3208 	CU_ASSERT(domains[1] != NULL);
3209 	CU_ASSERT(domains[2] != NULL);
3210 	CU_ASSERT(domains[3] != NULL);
3211 	memset(domains, 0, sizeof(domains));
3212 
3213 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test)
3214 	 * Array size is less than the number of memory domains */
3215 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3216 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3);
3217 	CU_ASSERT(rc == 4);
3218 	CU_ASSERT(domains[0] != NULL);
3219 	CU_ASSERT(domains[1] != NULL);
3220 	CU_ASSERT(domains[2] != NULL);
3221 	CU_ASSERT(domains[3] == NULL);
3222 	memset(domains, 0, sizeof(domains));
3223 
3224 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains);
3225 }
3226 
3227 static void
3228 test_reconnect_qpair(void)
3229 {
3230 	struct spdk_nvme_transport_id trid = {};
3231 	struct spdk_nvme_ctrlr *ctrlr;
3232 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3233 	struct nvme_ctrlr *nvme_ctrlr;
3234 	const int STRING_SIZE = 32;
3235 	const char *attached_names[STRING_SIZE];
3236 	struct nvme_bdev *bdev;
3237 	struct spdk_io_channel *ch1, *ch2;
3238 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3239 	struct nvme_io_path *io_path1, *io_path2;
3240 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
3241 	int rc;
3242 
3243 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3244 	ut_init_trid(&trid);
3245 
3246 	set_thread(0);
3247 
3248 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
3249 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3250 
3251 	g_ut_attach_ctrlr_status = 0;
3252 	g_ut_attach_bdev_count = 1;
3253 
3254 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3255 			      attach_ctrlr_done, NULL, &opts, NULL, false);
3256 	CU_ASSERT(rc == 0);
3257 
3258 	spdk_delay_us(1000);
3259 	poll_threads();
3260 
3261 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3262 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3263 
3264 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
3265 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3266 
3267 	ch1 = spdk_get_io_channel(bdev);
3268 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3269 
3270 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3271 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
3272 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3273 	nvme_qpair1 = io_path1->qpair;
3274 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
3275 
3276 	set_thread(1);
3277 
3278 	ch2 = spdk_get_io_channel(bdev);
3279 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3280 
3281 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3282 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
3283 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3284 	nvme_qpair2 = io_path2->qpair;
3285 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
3286 
3287 	/* If a qpair is disconnected, it is freed and then reconnected via
3288 	 * resetting the corresponding nvme_ctrlr.
3289 	 */
3290 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3291 	ctrlr->is_failed = true;
3292 
3293 	poll_thread_times(1, 3);
3294 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3295 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3296 	CU_ASSERT(nvme_ctrlr->resetting == true);
3297 
3298 	poll_thread_times(0, 3);
3299 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3300 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3301 	CU_ASSERT(ctrlr->is_failed == true);
3302 
3303 	poll_thread_times(1, 2);
3304 	poll_thread_times(0, 1);
3305 	CU_ASSERT(ctrlr->is_failed == false);
3306 	CU_ASSERT(ctrlr->adminq.is_connected == false);
3307 
3308 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3309 	poll_thread_times(0, 2);
3310 	CU_ASSERT(ctrlr->adminq.is_connected == true);
3311 
3312 	poll_thread_times(0, 1);
3313 	poll_thread_times(1, 1);
3314 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3315 	CU_ASSERT(nvme_qpair2->qpair != NULL);
3316 	CU_ASSERT(nvme_ctrlr->resetting == true);
3317 
3318 	poll_thread_times(0, 2);
3319 	poll_thread_times(1, 1);
3320 	poll_thread_times(0, 1);
3321 	CU_ASSERT(nvme_ctrlr->resetting == false);
3322 
3323 	poll_threads();
3324 
3325 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
3326 	 * fails, the qpair is just freed.
3327 	 */
3328 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3329 	ctrlr->is_failed = true;
3330 	ctrlr->fail_reset = true;
3331 
3332 	poll_thread_times(1, 3);
3333 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3334 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3335 	CU_ASSERT(nvme_ctrlr->resetting == true);
3336 
3337 	poll_thread_times(0, 3);
3338 	poll_thread_times(1, 1);
3339 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3340 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3341 	CU_ASSERT(ctrlr->is_failed == true);
3342 
3343 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3344 	poll_thread_times(0, 3);
3345 	poll_thread_times(1, 1);
3346 	poll_thread_times(0, 1);
3347 	CU_ASSERT(ctrlr->is_failed == true);
3348 	CU_ASSERT(nvme_ctrlr->resetting == false);
3349 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3350 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3351 
3352 	poll_threads();
3353 
3354 	spdk_put_io_channel(ch2);
3355 
3356 	set_thread(0);
3357 
3358 	spdk_put_io_channel(ch1);
3359 
3360 	poll_threads();
3361 
3362 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3363 	CU_ASSERT(rc == 0);
3364 
3365 	poll_threads();
3366 	spdk_delay_us(1000);
3367 	poll_threads();
3368 
3369 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3370 }
3371 
3372 static void
3373 test_create_bdev_ctrlr(void)
3374 {
3375 	struct nvme_path_id path1 = {}, path2 = {};
3376 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3377 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3378 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3379 	const int STRING_SIZE = 32;
3380 	const char *attached_names[STRING_SIZE];
3381 	int rc;
3382 
3383 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3384 	ut_init_trid(&path1.trid);
3385 	ut_init_trid2(&path2.trid);
3386 
3387 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3388 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3389 
3390 	g_ut_attach_ctrlr_status = 0;
3391 	g_ut_attach_bdev_count = 0;
3392 
3393 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3394 			      attach_ctrlr_done, NULL, &opts, NULL, true);
3395 	CU_ASSERT(rc == 0);
3396 
3397 	spdk_delay_us(1000);
3398 	poll_threads();
3399 
3400 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3401 	poll_threads();
3402 
3403 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3404 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3405 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3406 
3407 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3408 	g_ut_attach_ctrlr_status = -EINVAL;
3409 
3410 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3411 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3412 
3413 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3414 
3415 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3416 			      attach_ctrlr_done, NULL, &opts, NULL, true);
3417 	CU_ASSERT(rc == 0);
3418 
3419 	spdk_delay_us(1000);
3420 	poll_threads();
3421 
3422 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3423 	poll_threads();
3424 
3425 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
3426 
3427 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3428 	g_ut_attach_ctrlr_status = 0;
3429 
3430 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3431 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3432 
3433 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3434 			      attach_ctrlr_done, NULL, &opts, NULL, true);
3435 	CU_ASSERT(rc == 0);
3436 
3437 	spdk_delay_us(1000);
3438 	poll_threads();
3439 
3440 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3441 	poll_threads();
3442 
3443 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3444 
3445 	/* Delete two ctrlrs at once. */
3446 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3447 	CU_ASSERT(rc == 0);
3448 
3449 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3450 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3451 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3452 
3453 	poll_threads();
3454 	spdk_delay_us(1000);
3455 	poll_threads();
3456 
3457 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3458 
3459 	/* Add two ctrlrs and delete one by one. */
3460 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3461 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3462 
3463 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3464 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3465 
3466 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3467 			      attach_ctrlr_done, NULL, &opts, NULL, true);
3468 	CU_ASSERT(rc == 0);
3469 
3470 	spdk_delay_us(1000);
3471 	poll_threads();
3472 
3473 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3474 	poll_threads();
3475 
3476 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3477 			      attach_ctrlr_done, NULL, &opts, NULL, true);
3478 	CU_ASSERT(rc == 0);
3479 
3480 	spdk_delay_us(1000);
3481 	poll_threads();
3482 
3483 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3484 	poll_threads();
3485 
3486 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3487 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3488 
3489 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3490 	CU_ASSERT(rc == 0);
3491 
3492 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3493 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3494 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3495 
3496 	poll_threads();
3497 	spdk_delay_us(1000);
3498 	poll_threads();
3499 
3500 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3501 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3502 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3503 
3504 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3505 	CU_ASSERT(rc == 0);
3506 
3507 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3508 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3509 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3510 
3511 	poll_threads();
3512 	spdk_delay_us(1000);
3513 	poll_threads();
3514 
3515 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3516 }
3517 
3518 static struct nvme_ns *
3519 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3520 {
3521 	struct nvme_ns *nvme_ns;
3522 
3523 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3524 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3525 			return nvme_ns;
3526 		}
3527 	}
3528 
3529 	return NULL;
3530 }
3531 
3532 static void
3533 test_add_multi_ns_to_bdev(void)
3534 {
3535 	struct nvme_path_id path1 = {}, path2 = {};
3536 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3537 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3538 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3539 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3540 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3541 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3542 	const int STRING_SIZE = 32;
3543 	const char *attached_names[STRING_SIZE];
3544 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3545 	struct spdk_uuid uuid2 = { .u.raw = { 0x2 } };
3546 	struct spdk_uuid uuid3 = { .u.raw = { 0x3 } };
3547 	struct spdk_uuid uuid4 = { .u.raw = { 0x4 } };
3548 	struct spdk_uuid uuid44 = { .u.raw = { 0x44 } };
3549 	int rc;
3550 
3551 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3552 	ut_init_trid(&path1.trid);
3553 	ut_init_trid2(&path2.trid);
3554 
3555 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3556 
3557 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3558 	 * namespaces are populated.
3559 	 */
3560 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3561 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3562 
3563 	ctrlr1->ns[1].is_active = false;
3564 	ctrlr1->ns[4].is_active = false;
3565 	ctrlr1->ns[0].uuid = &uuid1;
3566 	ctrlr1->ns[2].uuid = &uuid3;
3567 	ctrlr1->ns[3].uuid = &uuid4;
3568 
3569 	g_ut_attach_ctrlr_status = 0;
3570 	g_ut_attach_bdev_count = 3;
3571 
3572 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3573 			      attach_ctrlr_done, NULL, &opts, NULL, true);
3574 	CU_ASSERT(rc == 0);
3575 
3576 	spdk_delay_us(1000);
3577 	poll_threads();
3578 
3579 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3580 	poll_threads();
3581 
3582 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3583 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3584 	 * adding 4th namespace to a bdev should fail.
3585 	 */
3586 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3587 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3588 
3589 	ctrlr2->ns[2].is_active = false;
3590 	ctrlr2->ns[4].is_active = false;
3591 	ctrlr2->ns[0].uuid = &uuid1;
3592 	ctrlr2->ns[1].uuid = &uuid2;
3593 	ctrlr2->ns[3].uuid = &uuid44;
3594 
3595 	g_ut_attach_ctrlr_status = 0;
3596 	g_ut_attach_bdev_count = 2;
3597 
3598 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3599 			      attach_ctrlr_done, NULL, &opts, NULL, true);
3600 	CU_ASSERT(rc == 0);
3601 
3602 	spdk_delay_us(1000);
3603 	poll_threads();
3604 
3605 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3606 	poll_threads();
3607 
3608 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3609 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3610 
3611 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3612 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3613 
3614 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3615 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3616 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3617 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3618 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3619 
3620 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3621 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3622 
3623 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3624 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3625 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3626 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3627 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3628 
3629 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3630 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3631 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3632 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3633 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3634 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3635 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3636 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3637 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3638 
3639 	CU_ASSERT(bdev1->ref == 2);
3640 	CU_ASSERT(bdev2->ref == 1);
3641 	CU_ASSERT(bdev3->ref == 1);
3642 	CU_ASSERT(bdev4->ref == 1);
3643 
3644 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3645 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3646 	CU_ASSERT(rc == 0);
3647 
3648 	poll_threads();
3649 	spdk_delay_us(1000);
3650 	poll_threads();
3651 
3652 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3653 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3654 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == nvme_ctrlr2);
3655 
3656 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3657 	CU_ASSERT(rc == 0);
3658 
3659 	poll_threads();
3660 	spdk_delay_us(1000);
3661 	poll_threads();
3662 
3663 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3664 
3665 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3666 	 * can be deleted when the bdev subsystem shutdown.
3667 	 */
3668 	g_ut_attach_bdev_count = 1;
3669 
3670 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3671 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3672 
3673 	ctrlr1->ns[0].uuid = &uuid1;
3674 
3675 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3676 			      attach_ctrlr_done, NULL, &opts, NULL, true);
3677 	CU_ASSERT(rc == 0);
3678 
3679 	spdk_delay_us(1000);
3680 	poll_threads();
3681 
3682 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3683 	poll_threads();
3684 
3685 	ut_init_trid2(&path2.trid);
3686 
3687 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3688 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3689 
3690 	ctrlr2->ns[0].uuid = &uuid1;
3691 
3692 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3693 			      attach_ctrlr_done, NULL, &opts, NULL, true);
3694 	CU_ASSERT(rc == 0);
3695 
3696 	spdk_delay_us(1000);
3697 	poll_threads();
3698 
3699 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3700 	poll_threads();
3701 
3702 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3703 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3704 
3705 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3706 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3707 
3708 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3709 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3710 
3711 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3712 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3713 
3714 	/* Check if a nvme_bdev has two nvme_ns. */
3715 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3716 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3717 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3718 
3719 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3720 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3721 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3722 
3723 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3724 	bdev_nvme_destruct(&bdev1->disk);
3725 
3726 	poll_threads();
3727 
3728 	CU_ASSERT(nvme_ns1->bdev == NULL);
3729 	CU_ASSERT(nvme_ns2->bdev == NULL);
3730 
3731 	nvme_ctrlr1->destruct = true;
3732 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3733 
3734 	poll_threads();
3735 	spdk_delay_us(1000);
3736 	poll_threads();
3737 
3738 	nvme_ctrlr2->destruct = true;
3739 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3740 
3741 	poll_threads();
3742 	spdk_delay_us(1000);
3743 	poll_threads();
3744 
3745 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3746 }
3747 
3748 static void
3749 test_add_multi_io_paths_to_nbdev_ch(void)
3750 {
3751 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3752 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3753 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3754 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3755 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3756 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3757 	const int STRING_SIZE = 32;
3758 	const char *attached_names[STRING_SIZE];
3759 	struct nvme_bdev *bdev;
3760 	struct spdk_io_channel *ch;
3761 	struct nvme_bdev_channel *nbdev_ch;
3762 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3763 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3764 	int rc;
3765 
3766 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3767 	ut_init_trid(&path1.trid);
3768 	ut_init_trid2(&path2.trid);
3769 	ut_init_trid3(&path3.trid);
3770 	g_ut_attach_ctrlr_status = 0;
3771 	g_ut_attach_bdev_count = 1;
3772 
3773 	set_thread(1);
3774 
3775 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3776 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3777 
3778 	ctrlr1->ns[0].uuid = &uuid1;
3779 
3780 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3781 			      attach_ctrlr_done, NULL, &opts, NULL, true);
3782 	CU_ASSERT(rc == 0);
3783 
3784 	spdk_delay_us(1000);
3785 	poll_threads();
3786 
3787 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3788 	poll_threads();
3789 
3790 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3791 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3792 
3793 	ctrlr2->ns[0].uuid = &uuid1;
3794 
3795 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3796 			      attach_ctrlr_done, NULL, &opts, NULL, true);
3797 	CU_ASSERT(rc == 0);
3798 
3799 	spdk_delay_us(1000);
3800 	poll_threads();
3801 
3802 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3803 	poll_threads();
3804 
3805 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3806 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3807 
3808 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3809 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3810 
3811 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3812 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3813 
3814 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3815 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3816 
3817 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3818 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3819 
3820 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3821 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3822 
3823 	set_thread(0);
3824 
3825 	ch = spdk_get_io_channel(bdev);
3826 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3827 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3828 
3829 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3830 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3831 
3832 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3833 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3834 
3835 	set_thread(1);
3836 
3837 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3838 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3839 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3840 
3841 	ctrlr3->ns[0].uuid = &uuid1;
3842 
3843 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
3844 			      attach_ctrlr_done, NULL, &opts, NULL, true);
3845 	CU_ASSERT(rc == 0);
3846 
3847 	spdk_delay_us(1000);
3848 	poll_threads();
3849 
3850 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3851 	poll_threads();
3852 
3853 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn);
3854 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3855 
3856 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3857 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3858 
3859 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3860 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3861 
3862 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3863 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3864 	CU_ASSERT(rc == 0);
3865 
3866 	poll_threads();
3867 	spdk_delay_us(1000);
3868 	poll_threads();
3869 
3870 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == nvme_ctrlr1);
3871 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
3872 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn) == nvme_ctrlr3);
3873 
3874 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3875 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3876 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3877 
3878 	set_thread(0);
3879 
3880 	spdk_put_io_channel(ch);
3881 
3882 	poll_threads();
3883 
3884 	set_thread(1);
3885 
3886 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3887 	CU_ASSERT(rc == 0);
3888 
3889 	poll_threads();
3890 	spdk_delay_us(1000);
3891 	poll_threads();
3892 
3893 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3894 }
3895 
3896 static void
3897 test_admin_path(void)
3898 {
3899 	struct nvme_path_id path1 = {}, path2 = {};
3900 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3901 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3902 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3903 	const int STRING_SIZE = 32;
3904 	const char *attached_names[STRING_SIZE];
3905 	struct nvme_bdev *bdev;
3906 	struct spdk_io_channel *ch;
3907 	struct spdk_bdev_io *bdev_io;
3908 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3909 	int rc;
3910 
3911 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3912 	ut_init_trid(&path1.trid);
3913 	ut_init_trid2(&path2.trid);
3914 	g_ut_attach_ctrlr_status = 0;
3915 	g_ut_attach_bdev_count = 1;
3916 
3917 	set_thread(0);
3918 
3919 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3920 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3921 
3922 	ctrlr1->ns[0].uuid = &uuid1;
3923 
3924 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3925 			      attach_ctrlr_done, NULL, &opts, NULL, true);
3926 	CU_ASSERT(rc == 0);
3927 
3928 	spdk_delay_us(1000);
3929 	poll_threads();
3930 
3931 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3932 	poll_threads();
3933 
3934 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3935 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3936 
3937 	ctrlr2->ns[0].uuid = &uuid1;
3938 
3939 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3940 			      attach_ctrlr_done, NULL, &opts, NULL, true);
3941 	CU_ASSERT(rc == 0);
3942 
3943 	spdk_delay_us(1000);
3944 	poll_threads();
3945 
3946 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3947 	poll_threads();
3948 
3949 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3950 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3951 
3952 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3953 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3954 
3955 	ch = spdk_get_io_channel(bdev);
3956 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3957 
3958 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
3959 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
3960 
3961 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
3962 	 * submitted to ctrlr2.
3963 	 */
3964 	ctrlr1->is_failed = true;
3965 	bdev_io->internal.in_submit_request = true;
3966 
3967 	bdev_nvme_submit_request(ch, bdev_io);
3968 
3969 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3970 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
3971 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
3972 
3973 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3974 	poll_threads();
3975 
3976 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3977 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3978 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3979 
3980 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
3981 	ctrlr2->is_failed = true;
3982 	bdev_io->internal.in_submit_request = true;
3983 
3984 	bdev_nvme_submit_request(ch, bdev_io);
3985 
3986 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3987 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3988 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3989 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
3990 
3991 	free(bdev_io);
3992 
3993 	spdk_put_io_channel(ch);
3994 
3995 	poll_threads();
3996 
3997 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3998 	CU_ASSERT(rc == 0);
3999 
4000 	poll_threads();
4001 	spdk_delay_us(1000);
4002 	poll_threads();
4003 
4004 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4005 }
4006 
4007 static struct nvme_io_path *
4008 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
4009 			struct nvme_ctrlr *nvme_ctrlr)
4010 {
4011 	struct nvme_io_path *io_path;
4012 
4013 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
4014 		if (io_path->qpair->ctrlr == nvme_ctrlr) {
4015 			return io_path;
4016 		}
4017 	}
4018 
4019 	return NULL;
4020 }
4021 
4022 static void
4023 test_reset_bdev_ctrlr(void)
4024 {
4025 	struct nvme_path_id path1 = {}, path2 = {};
4026 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4027 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4028 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4029 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4030 	struct nvme_path_id *curr_path1, *curr_path2;
4031 	const int STRING_SIZE = 32;
4032 	const char *attached_names[STRING_SIZE];
4033 	struct nvme_bdev *bdev;
4034 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
4035 	struct nvme_bdev_io *first_bio;
4036 	struct spdk_io_channel *ch1, *ch2;
4037 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
4038 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
4039 	int rc;
4040 
4041 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4042 	ut_init_trid(&path1.trid);
4043 	ut_init_trid2(&path2.trid);
4044 	g_ut_attach_ctrlr_status = 0;
4045 	g_ut_attach_bdev_count = 1;
4046 
4047 	set_thread(0);
4048 
4049 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4050 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4051 
4052 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4053 			      attach_ctrlr_done, NULL, &opts, NULL, true);
4054 	CU_ASSERT(rc == 0);
4055 
4056 	spdk_delay_us(1000);
4057 	poll_threads();
4058 
4059 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4060 	poll_threads();
4061 
4062 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4063 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4064 
4065 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4066 			      attach_ctrlr_done, NULL, &opts, NULL, true);
4067 	CU_ASSERT(rc == 0);
4068 
4069 	spdk_delay_us(1000);
4070 	poll_threads();
4071 
4072 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4073 	poll_threads();
4074 
4075 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4076 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4077 
4078 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
4079 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
4080 
4081 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
4082 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
4083 
4084 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
4085 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
4086 
4087 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
4088 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
4089 
4090 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4091 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
4092 
4093 	set_thread(0);
4094 
4095 	ch1 = spdk_get_io_channel(bdev);
4096 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
4097 
4098 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
4099 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
4100 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
4101 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
4102 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
4103 
4104 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
4105 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
4106 
4107 	set_thread(1);
4108 
4109 	ch2 = spdk_get_io_channel(bdev);
4110 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
4111 
4112 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
4113 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
4114 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
4115 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
4116 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
4117 
4118 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
4119 
4120 	/* The first reset request from bdev_io is submitted on thread 0.
4121 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
4122 	 *
4123 	 * A few extra polls are necessary after resetting ctrlr1 to check
4124 	 * pending reset requests for ctrlr1.
4125 	 */
4126 	ctrlr1->is_failed = true;
4127 	curr_path1->last_failed_tsc = spdk_get_ticks();
4128 	ctrlr2->is_failed = true;
4129 	curr_path2->last_failed_tsc = spdk_get_ticks();
4130 
4131 	set_thread(0);
4132 
4133 	bdev_nvme_submit_request(ch1, first_bdev_io);
4134 	CU_ASSERT(first_bio->io_path == io_path11);
4135 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4136 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4137 
4138 	poll_thread_times(0, 3);
4139 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4140 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4141 
4142 	poll_thread_times(1, 2);
4143 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4144 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4145 	CU_ASSERT(ctrlr1->is_failed == true);
4146 
4147 	poll_thread_times(0, 1);
4148 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4149 	CU_ASSERT(ctrlr1->is_failed == false);
4150 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
4151 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4152 
4153 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4154 	poll_thread_times(0, 2);
4155 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
4156 
4157 	poll_thread_times(0, 1);
4158 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4159 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4160 
4161 	poll_thread_times(1, 1);
4162 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4163 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4164 
4165 	poll_thread_times(0, 2);
4166 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4167 	poll_thread_times(1, 1);
4168 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4169 	poll_thread_times(0, 2);
4170 	CU_ASSERT(nvme_ctrlr1->resetting == false);
4171 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4172 	CU_ASSERT(first_bio->io_path == io_path12);
4173 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4174 
4175 	poll_thread_times(0, 3);
4176 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4177 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4178 
4179 	poll_thread_times(1, 2);
4180 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4181 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4182 	CU_ASSERT(ctrlr2->is_failed == true);
4183 
4184 	poll_thread_times(0, 1);
4185 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4186 	CU_ASSERT(ctrlr2->is_failed == false);
4187 	CU_ASSERT(ctrlr2->adminq.is_connected == false);
4188 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4189 
4190 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4191 	poll_thread_times(0, 2);
4192 	CU_ASSERT(ctrlr2->adminq.is_connected == true);
4193 
4194 	poll_thread_times(0, 1);
4195 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4196 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4197 
4198 	poll_thread_times(1, 2);
4199 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4200 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4201 
4202 	poll_thread_times(0, 2);
4203 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4204 	poll_thread_times(1, 1);
4205 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4206 	poll_thread_times(0, 2);
4207 	CU_ASSERT(first_bio->io_path == NULL);
4208 	CU_ASSERT(nvme_ctrlr2->resetting == false);
4209 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4210 
4211 	poll_threads();
4212 
4213 	/* There is a race between two reset requests from bdev_io.
4214 	 *
4215 	 * The first reset request is submitted on thread 0, and the second reset
4216 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
4217 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
4218 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
4219 	 * The second is pending on ctrlr2 again. After the first completes resetting
4220 	 * ctrl2, both complete successfully.
4221 	 */
4222 	ctrlr1->is_failed = true;
4223 	curr_path1->last_failed_tsc = spdk_get_ticks();
4224 	ctrlr2->is_failed = true;
4225 	curr_path2->last_failed_tsc = spdk_get_ticks();
4226 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4227 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4228 
4229 	set_thread(0);
4230 
4231 	bdev_nvme_submit_request(ch1, first_bdev_io);
4232 
4233 	set_thread(1);
4234 
4235 	bdev_nvme_submit_request(ch2, second_bdev_io);
4236 
4237 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4238 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4239 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io);
4240 
4241 	poll_threads();
4242 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4243 	poll_threads();
4244 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4245 	poll_threads();
4246 
4247 	CU_ASSERT(ctrlr1->is_failed == false);
4248 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4249 	CU_ASSERT(ctrlr2->is_failed == false);
4250 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4251 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4252 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4253 
4254 	set_thread(0);
4255 
4256 	spdk_put_io_channel(ch1);
4257 
4258 	set_thread(1);
4259 
4260 	spdk_put_io_channel(ch2);
4261 
4262 	poll_threads();
4263 
4264 	set_thread(0);
4265 
4266 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4267 	CU_ASSERT(rc == 0);
4268 
4269 	poll_threads();
4270 	spdk_delay_us(1000);
4271 	poll_threads();
4272 
4273 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4274 
4275 	free(first_bdev_io);
4276 	free(second_bdev_io);
4277 }
4278 
4279 static void
4280 test_find_io_path(void)
4281 {
4282 	struct nvme_bdev_channel nbdev_ch = {
4283 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
4284 	};
4285 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {};
4286 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
4287 	struct spdk_nvme_ns ns1 = {}, ns2 = {};
4288 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
4289 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
4290 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
4291 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
4292 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, };
4293 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
4294 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
4295 
4296 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
4297 
4298 	/* Test if io_path whose ANA state is not accessible is excluded. */
4299 
4300 	nvme_qpair1.qpair = &qpair1;
4301 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4302 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4303 
4304 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
4305 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4306 
4307 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
4308 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4309 
4310 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4311 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4312 
4313 	nbdev_ch.current_io_path = NULL;
4314 
4315 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4316 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4317 
4318 	nbdev_ch.current_io_path = NULL;
4319 
4320 	/* Test if io_path whose qpair is resetting is excluded. */
4321 
4322 	nvme_qpair1.qpair = NULL;
4323 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4324 
4325 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
4326 
4327 	/* Test if ANA optimized state or the first found ANA non-optimized state
4328 	 * is prioritized.
4329 	 */
4330 
4331 	nvme_qpair1.qpair = &qpair1;
4332 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4333 	nvme_qpair2.qpair = &qpair2;
4334 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4335 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
4336 
4337 	nbdev_ch.current_io_path = NULL;
4338 
4339 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4340 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4341 
4342 	nbdev_ch.current_io_path = NULL;
4343 }
4344 
4345 static void
4346 test_retry_io_if_ana_state_is_updating(void)
4347 {
4348 	struct nvme_path_id path = {};
4349 	struct nvme_ctrlr_opts opts = {};
4350 	struct spdk_nvme_ctrlr *ctrlr;
4351 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
4352 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4353 	struct nvme_ctrlr *nvme_ctrlr;
4354 	const int STRING_SIZE = 32;
4355 	const char *attached_names[STRING_SIZE];
4356 	struct nvme_bdev *bdev;
4357 	struct nvme_ns *nvme_ns;
4358 	struct spdk_bdev_io *bdev_io1;
4359 	struct spdk_io_channel *ch;
4360 	struct nvme_bdev_channel *nbdev_ch;
4361 	struct nvme_io_path *io_path;
4362 	struct nvme_qpair *nvme_qpair;
4363 	int rc;
4364 
4365 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4366 	ut_init_trid(&path.trid);
4367 
4368 	set_thread(0);
4369 
4370 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4371 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4372 
4373 	g_ut_attach_ctrlr_status = 0;
4374 	g_ut_attach_bdev_count = 1;
4375 
4376 	opts.ctrlr_loss_timeout_sec = -1;
4377 	opts.reconnect_delay_sec = 1;
4378 
4379 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4380 			      attach_ctrlr_done, NULL, &dopts, &opts, false);
4381 	CU_ASSERT(rc == 0);
4382 
4383 	spdk_delay_us(1000);
4384 	poll_threads();
4385 
4386 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4387 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4388 
4389 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
4390 	CU_ASSERT(nvme_ctrlr != NULL);
4391 
4392 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4393 	CU_ASSERT(bdev != NULL);
4394 
4395 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4396 	CU_ASSERT(nvme_ns != NULL);
4397 
4398 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4399 	ut_bdev_io_set_buf(bdev_io1);
4400 
4401 	ch = spdk_get_io_channel(bdev);
4402 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4403 
4404 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4405 
4406 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4407 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4408 
4409 	nvme_qpair = io_path->qpair;
4410 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4411 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4412 
4413 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
4414 
4415 	/* If qpair is connected, I/O should succeed. */
4416 	bdev_io1->internal.in_submit_request = true;
4417 
4418 	bdev_nvme_submit_request(ch, bdev_io1);
4419 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4420 
4421 	poll_threads();
4422 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4423 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
4424 
4425 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
4426 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4427 	nbdev_ch->current_io_path = NULL;
4428 
4429 	bdev_io1->internal.in_submit_request = true;
4430 
4431 	bdev_nvme_submit_request(ch, bdev_io1);
4432 
4433 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4434 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4435 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4436 
4437 	/* ANA state became accessible while I/O was queued. */
4438 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4439 
4440 	spdk_delay_us(1000000);
4441 
4442 	poll_thread_times(0, 1);
4443 
4444 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4445 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4446 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4447 
4448 	poll_threads();
4449 
4450 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4451 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4452 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4453 
4454 	free(bdev_io1);
4455 
4456 	spdk_put_io_channel(ch);
4457 
4458 	poll_threads();
4459 
4460 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4461 	CU_ASSERT(rc == 0);
4462 
4463 	poll_threads();
4464 	spdk_delay_us(1000);
4465 	poll_threads();
4466 
4467 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4468 }
4469 
4470 static void
4471 test_retry_io_for_io_path_error(void)
4472 {
4473 	struct nvme_path_id path1 = {}, path2 = {};
4474 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4475 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4476 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4477 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4478 	const int STRING_SIZE = 32;
4479 	const char *attached_names[STRING_SIZE];
4480 	struct nvme_bdev *bdev;
4481 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4482 	struct spdk_bdev_io *bdev_io;
4483 	struct nvme_bdev_io *bio;
4484 	struct spdk_io_channel *ch;
4485 	struct nvme_bdev_channel *nbdev_ch;
4486 	struct nvme_io_path *io_path1, *io_path2;
4487 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
4488 	struct ut_nvme_req *req;
4489 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4490 	int rc;
4491 
4492 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4493 	ut_init_trid(&path1.trid);
4494 	ut_init_trid2(&path2.trid);
4495 
4496 	g_opts.bdev_retry_count = 1;
4497 
4498 	set_thread(0);
4499 
4500 	g_ut_attach_ctrlr_status = 0;
4501 	g_ut_attach_bdev_count = 1;
4502 
4503 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4504 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4505 
4506 	ctrlr1->ns[0].uuid = &uuid1;
4507 
4508 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4509 			      attach_ctrlr_done, NULL, &opts, NULL, true);
4510 	CU_ASSERT(rc == 0);
4511 
4512 	spdk_delay_us(1000);
4513 	poll_threads();
4514 
4515 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4516 	poll_threads();
4517 
4518 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4519 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4520 
4521 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
4522 	CU_ASSERT(nvme_ctrlr1 != NULL);
4523 
4524 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4525 	CU_ASSERT(bdev != NULL);
4526 
4527 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4528 	CU_ASSERT(nvme_ns1 != NULL);
4529 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4530 
4531 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4532 	ut_bdev_io_set_buf(bdev_io);
4533 
4534 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4535 
4536 	ch = spdk_get_io_channel(bdev);
4537 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4538 
4539 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4540 
4541 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4542 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4543 
4544 	nvme_qpair1 = io_path1->qpair;
4545 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
4546 	SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL);
4547 
4548 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4549 
4550 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4551 	bdev_io->internal.in_submit_request = true;
4552 
4553 	bdev_nvme_submit_request(ch, bdev_io);
4554 
4555 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4556 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4557 
4558 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4559 	SPDK_CU_ASSERT_FATAL(req != NULL);
4560 
4561 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4562 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4563 	req->cpl.status.dnr = 1;
4564 
4565 	poll_thread_times(0, 1);
4566 
4567 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4568 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4569 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4570 
4571 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4572 	bdev_io->internal.in_submit_request = true;
4573 
4574 	bdev_nvme_submit_request(ch, bdev_io);
4575 
4576 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4577 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4578 
4579 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4580 	SPDK_CU_ASSERT_FATAL(req != NULL);
4581 
4582 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4583 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4584 
4585 	poll_thread_times(0, 1);
4586 
4587 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4588 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4589 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4590 
4591 	poll_threads();
4592 
4593 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4594 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4595 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4596 
4597 	/* Add io_path2 dynamically, and create a multipath configuration. */
4598 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4599 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4600 
4601 	ctrlr2->ns[0].uuid = &uuid1;
4602 
4603 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4604 			      attach_ctrlr_done, NULL, &opts, NULL, true);
4605 	CU_ASSERT(rc == 0);
4606 
4607 	spdk_delay_us(1000);
4608 	poll_threads();
4609 
4610 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4611 	poll_threads();
4612 
4613 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
4614 	CU_ASSERT(nvme_ctrlr2 != NULL);
4615 
4616 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4617 	CU_ASSERT(nvme_ns2 != NULL);
4618 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4619 
4620 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4621 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4622 
4623 	nvme_qpair2 = io_path2->qpair;
4624 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
4625 	SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL);
4626 
4627 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4628 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4629 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4630 	 */
4631 	bdev_io->internal.in_submit_request = true;
4632 
4633 	bdev_nvme_submit_request(ch, bdev_io);
4634 
4635 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4636 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4637 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4638 
4639 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4640 	SPDK_CU_ASSERT_FATAL(req != NULL);
4641 
4642 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4643 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4644 
4645 	poll_thread_times(0, 1);
4646 
4647 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4648 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4649 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4650 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4651 
4652 	spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair);
4653 	nvme_qpair1->qpair = NULL;
4654 
4655 	poll_threads();
4656 
4657 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4658 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4659 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4660 
4661 	free(bdev_io);
4662 
4663 	spdk_put_io_channel(ch);
4664 
4665 	poll_threads();
4666 
4667 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4668 	CU_ASSERT(rc == 0);
4669 
4670 	poll_threads();
4671 	spdk_delay_us(1000);
4672 	poll_threads();
4673 
4674 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4675 
4676 	g_opts.bdev_retry_count = 0;
4677 }
4678 
4679 static void
4680 test_retry_io_count(void)
4681 {
4682 	struct nvme_path_id path = {};
4683 	struct spdk_nvme_ctrlr *ctrlr;
4684 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4685 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4686 	struct nvme_ctrlr *nvme_ctrlr;
4687 	const int STRING_SIZE = 32;
4688 	const char *attached_names[STRING_SIZE];
4689 	struct nvme_bdev *bdev;
4690 	struct nvme_ns *nvme_ns;
4691 	struct spdk_bdev_io *bdev_io;
4692 	struct nvme_bdev_io *bio;
4693 	struct spdk_io_channel *ch;
4694 	struct nvme_bdev_channel *nbdev_ch;
4695 	struct nvme_io_path *io_path;
4696 	struct nvme_qpair *nvme_qpair;
4697 	struct ut_nvme_req *req;
4698 	int rc;
4699 
4700 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4701 	ut_init_trid(&path.trid);
4702 
4703 	set_thread(0);
4704 
4705 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4706 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4707 
4708 	g_ut_attach_ctrlr_status = 0;
4709 	g_ut_attach_bdev_count = 1;
4710 
4711 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4712 			      attach_ctrlr_done, NULL, &opts, NULL, false);
4713 	CU_ASSERT(rc == 0);
4714 
4715 	spdk_delay_us(1000);
4716 	poll_threads();
4717 
4718 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4719 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4720 
4721 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn);
4722 	CU_ASSERT(nvme_ctrlr != NULL);
4723 
4724 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4725 	CU_ASSERT(bdev != NULL);
4726 
4727 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4728 	CU_ASSERT(nvme_ns != NULL);
4729 
4730 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4731 	ut_bdev_io_set_buf(bdev_io);
4732 
4733 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4734 
4735 	ch = spdk_get_io_channel(bdev);
4736 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4737 
4738 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4739 
4740 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4741 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4742 
4743 	nvme_qpair = io_path->qpair;
4744 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4745 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4746 
4747 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4748 
4749 	/* If I/O is aborted by request, it should not be retried. */
4750 	g_opts.bdev_retry_count = 1;
4751 
4752 	bdev_io->internal.in_submit_request = true;
4753 
4754 	bdev_nvme_submit_request(ch, bdev_io);
4755 
4756 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4757 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4758 
4759 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4760 	SPDK_CU_ASSERT_FATAL(req != NULL);
4761 
4762 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
4763 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4764 
4765 	poll_thread_times(0, 1);
4766 
4767 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4768 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4769 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
4770 
4771 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
4772 	 * the failed I/O should not be retried.
4773 	 */
4774 	g_opts.bdev_retry_count = 4;
4775 
4776 	bdev_io->internal.in_submit_request = true;
4777 
4778 	bdev_nvme_submit_request(ch, bdev_io);
4779 
4780 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4781 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4782 
4783 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4784 	SPDK_CU_ASSERT_FATAL(req != NULL);
4785 
4786 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4787 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4788 	bio->retry_count = 4;
4789 
4790 	poll_thread_times(0, 1);
4791 
4792 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4793 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4794 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4795 
4796 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
4797 	g_opts.bdev_retry_count = -1;
4798 
4799 	bdev_io->internal.in_submit_request = true;
4800 
4801 	bdev_nvme_submit_request(ch, bdev_io);
4802 
4803 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4804 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4805 
4806 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4807 	SPDK_CU_ASSERT_FATAL(req != NULL);
4808 
4809 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4810 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4811 	bio->retry_count = 4;
4812 
4813 	poll_thread_times(0, 1);
4814 
4815 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4816 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4817 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4818 
4819 	poll_threads();
4820 
4821 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4822 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4823 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4824 
4825 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
4826 	 * the failed I/O should be retried.
4827 	 */
4828 	g_opts.bdev_retry_count = 4;
4829 
4830 	bdev_io->internal.in_submit_request = true;
4831 
4832 	bdev_nvme_submit_request(ch, bdev_io);
4833 
4834 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4835 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4836 
4837 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4838 	SPDK_CU_ASSERT_FATAL(req != NULL);
4839 
4840 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4841 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4842 	bio->retry_count = 3;
4843 
4844 	poll_thread_times(0, 1);
4845 
4846 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4847 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4848 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4849 
4850 	poll_threads();
4851 
4852 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4853 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4854 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4855 
4856 	free(bdev_io);
4857 
4858 	spdk_put_io_channel(ch);
4859 
4860 	poll_threads();
4861 
4862 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4863 	CU_ASSERT(rc == 0);
4864 
4865 	poll_threads();
4866 	spdk_delay_us(1000);
4867 	poll_threads();
4868 
4869 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4870 
4871 	g_opts.bdev_retry_count = 0;
4872 }
4873 
4874 static void
4875 test_concurrent_read_ana_log_page(void)
4876 {
4877 	struct spdk_nvme_transport_id trid = {};
4878 	struct spdk_nvme_ctrlr *ctrlr;
4879 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4880 	struct nvme_ctrlr *nvme_ctrlr;
4881 	const int STRING_SIZE = 32;
4882 	const char *attached_names[STRING_SIZE];
4883 	int rc;
4884 
4885 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4886 	ut_init_trid(&trid);
4887 
4888 	set_thread(0);
4889 
4890 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
4891 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4892 
4893 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4894 
4895 	g_ut_attach_ctrlr_status = 0;
4896 	g_ut_attach_bdev_count = 1;
4897 
4898 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
4899 			      attach_ctrlr_done, NULL, &opts, NULL, false);
4900 	CU_ASSERT(rc == 0);
4901 
4902 	spdk_delay_us(1000);
4903 	poll_threads();
4904 
4905 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4906 	poll_threads();
4907 
4908 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
4909 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
4910 
4911 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4912 
4913 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4914 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4915 
4916 	/* Following read request should be rejected. */
4917 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4918 
4919 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4920 
4921 	set_thread(1);
4922 
4923 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4924 
4925 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4926 
4927 	/* Reset request while reading ANA log page should not be rejected. */
4928 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
4929 	CU_ASSERT(rc == 0);
4930 
4931 	poll_threads();
4932 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4933 	poll_threads();
4934 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4935 	poll_threads();
4936 
4937 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4938 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4939 
4940 	/* Read ANA log page while resetting ctrlr should be rejected. */
4941 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
4942 	CU_ASSERT(rc == 0);
4943 
4944 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4945 
4946 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4947 
4948 	poll_threads();
4949 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4950 	poll_threads();
4951 
4952 	set_thread(0);
4953 
4954 	/* It is possible that target sent ANA change for inactive namespaces.
4955 	 *
4956 	 * Previously, assert() was added because this case was unlikely.
4957 	 * However, assert() was hit in real environment.
4958 
4959 	 * Hence, remove assert() and add unit test case.
4960 	 *
4961 	 * Simulate this case by depopulating namespaces and then parsing ANA
4962 	 * log page created when all namespaces are active.
4963 	 * Then, check if parsing ANA log page completes successfully.
4964 	 */
4965 	nvme_ctrlr_depopulate_namespaces(nvme_ctrlr);
4966 
4967 	rc = bdev_nvme_parse_ana_log_page(nvme_ctrlr, nvme_ctrlr_set_ana_states, nvme_ctrlr);
4968 	CU_ASSERT(rc == 0);
4969 
4970 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4971 	CU_ASSERT(rc == 0);
4972 
4973 	poll_threads();
4974 	spdk_delay_us(1000);
4975 	poll_threads();
4976 
4977 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
4978 }
4979 
4980 static void
4981 test_retry_io_for_ana_error(void)
4982 {
4983 	struct nvme_path_id path = {};
4984 	struct spdk_nvme_ctrlr *ctrlr;
4985 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4986 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4987 	struct nvme_ctrlr *nvme_ctrlr;
4988 	const int STRING_SIZE = 32;
4989 	const char *attached_names[STRING_SIZE];
4990 	struct nvme_bdev *bdev;
4991 	struct nvme_ns *nvme_ns;
4992 	struct spdk_bdev_io *bdev_io;
4993 	struct nvme_bdev_io *bio;
4994 	struct spdk_io_channel *ch;
4995 	struct nvme_bdev_channel *nbdev_ch;
4996 	struct nvme_io_path *io_path;
4997 	struct nvme_qpair *nvme_qpair;
4998 	struct ut_nvme_req *req;
4999 	uint64_t now;
5000 	int rc;
5001 
5002 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5003 	ut_init_trid(&path.trid);
5004 
5005 	g_opts.bdev_retry_count = 1;
5006 
5007 	set_thread(0);
5008 
5009 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
5010 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5011 
5012 	g_ut_attach_ctrlr_status = 0;
5013 	g_ut_attach_bdev_count = 1;
5014 
5015 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5016 			      attach_ctrlr_done, NULL, &opts, NULL, false);
5017 	CU_ASSERT(rc == 0);
5018 
5019 	spdk_delay_us(1000);
5020 	poll_threads();
5021 
5022 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5023 	poll_threads();
5024 
5025 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5026 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5027 
5028 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn);
5029 	CU_ASSERT(nvme_ctrlr != NULL);
5030 
5031 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5032 	CU_ASSERT(bdev != NULL);
5033 
5034 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5035 	CU_ASSERT(nvme_ns != NULL);
5036 
5037 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5038 	ut_bdev_io_set_buf(bdev_io);
5039 
5040 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
5041 
5042 	ch = spdk_get_io_channel(bdev);
5043 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5044 
5045 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5046 
5047 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5048 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5049 
5050 	nvme_qpair = io_path->qpair;
5051 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5052 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5053 
5054 	now = spdk_get_ticks();
5055 
5056 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
5057 
5058 	/* If I/O got ANA error, it should be queued, the corresponding namespace
5059 	 * should be freezed and its ANA state should be updated.
5060 	 */
5061 	bdev_io->internal.in_submit_request = true;
5062 
5063 	bdev_nvme_submit_request(ch, bdev_io);
5064 
5065 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5066 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5067 
5068 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5069 	SPDK_CU_ASSERT_FATAL(req != NULL);
5070 
5071 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5072 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
5073 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
5074 
5075 	poll_thread_times(0, 1);
5076 
5077 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5078 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5079 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5080 	/* I/O should be retried immediately. */
5081 	CU_ASSERT(bio->retry_ticks == now);
5082 	CU_ASSERT(nvme_ns->ana_state_updating == true);
5083 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
5084 
5085 	poll_threads();
5086 
5087 	/* Namespace is inaccessible, and hence I/O should be queued again. */
5088 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5089 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5090 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5091 	/* I/O should be retried after a second if no I/O path was found but
5092 	 * any I/O path may become available.
5093 	 */
5094 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
5095 
5096 	/* Namespace should be unfreezed after completing to update its ANA state. */
5097 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5098 	poll_threads();
5099 
5100 	CU_ASSERT(nvme_ns->ana_state_updating == false);
5101 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5102 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5103 
5104 	/* Retry the queued I/O should succeed. */
5105 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
5106 	poll_threads();
5107 
5108 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5109 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5110 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5111 
5112 	free(bdev_io);
5113 
5114 	spdk_put_io_channel(ch);
5115 
5116 	poll_threads();
5117 
5118 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5119 	CU_ASSERT(rc == 0);
5120 
5121 	poll_threads();
5122 	spdk_delay_us(1000);
5123 	poll_threads();
5124 
5125 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5126 
5127 	g_opts.bdev_retry_count = 0;
5128 }
5129 
5130 static void
5131 test_check_io_error_resiliency_params(void)
5132 {
5133 	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
5134 	 * 3rd parameter is fast_io_fail_timeout_sec.
5135 	 */
5136 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false);
5137 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false);
5138 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false);
5139 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false);
5140 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false);
5141 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true);
5142 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true);
5143 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true);
5144 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true);
5145 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true);
5146 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false);
5147 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false);
5148 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false);
5149 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false);
5150 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true);
5151 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true);
5152 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true);
5153 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
5154 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true);
5155 }
5156 
5157 static void
5158 test_retry_io_if_ctrlr_is_resetting(void)
5159 {
5160 	struct nvme_path_id path = {};
5161 	struct nvme_ctrlr_opts opts = {};
5162 	struct spdk_nvme_ctrlr *ctrlr;
5163 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
5164 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5165 	struct nvme_ctrlr *nvme_ctrlr;
5166 	const int STRING_SIZE = 32;
5167 	const char *attached_names[STRING_SIZE];
5168 	struct nvme_bdev *bdev;
5169 	struct nvme_ns *nvme_ns;
5170 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5171 	struct spdk_io_channel *ch;
5172 	struct nvme_bdev_channel *nbdev_ch;
5173 	struct nvme_io_path *io_path;
5174 	struct nvme_qpair *nvme_qpair;
5175 	int rc;
5176 
5177 	g_opts.bdev_retry_count = 1;
5178 
5179 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5180 	ut_init_trid(&path.trid);
5181 
5182 	set_thread(0);
5183 
5184 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5185 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5186 
5187 	g_ut_attach_ctrlr_status = 0;
5188 	g_ut_attach_bdev_count = 1;
5189 
5190 	opts.ctrlr_loss_timeout_sec = -1;
5191 	opts.reconnect_delay_sec = 1;
5192 
5193 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5194 			      attach_ctrlr_done, NULL, &dopts, &opts, false);
5195 	CU_ASSERT(rc == 0);
5196 
5197 	spdk_delay_us(1000);
5198 	poll_threads();
5199 
5200 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5201 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5202 
5203 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
5204 	CU_ASSERT(nvme_ctrlr != NULL);
5205 
5206 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5207 	CU_ASSERT(bdev != NULL);
5208 
5209 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5210 	CU_ASSERT(nvme_ns != NULL);
5211 
5212 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5213 	ut_bdev_io_set_buf(bdev_io1);
5214 
5215 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5216 	ut_bdev_io_set_buf(bdev_io2);
5217 
5218 	ch = spdk_get_io_channel(bdev);
5219 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5220 
5221 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5222 
5223 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5224 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5225 
5226 	nvme_qpair = io_path->qpair;
5227 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5228 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5229 
5230 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5231 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5232 
5233 	/* If qpair is connected, I/O should succeed. */
5234 	bdev_io1->internal.in_submit_request = true;
5235 
5236 	bdev_nvme_submit_request(ch, bdev_io1);
5237 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5238 
5239 	poll_threads();
5240 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5241 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5242 
5243 	/* If qpair is disconnected, it is freed and then reconnected via resetting
5244 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5245 	 * while resetting the nvme_ctrlr.
5246 	 */
5247 	nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
5248 	ctrlr->is_failed = true;
5249 
5250 	poll_thread_times(0, 5);
5251 
5252 	CU_ASSERT(nvme_qpair->qpair == NULL);
5253 	CU_ASSERT(nvme_ctrlr->resetting == true);
5254 	CU_ASSERT(ctrlr->is_failed == false);
5255 
5256 	bdev_io1->internal.in_submit_request = true;
5257 
5258 	bdev_nvme_submit_request(ch, bdev_io1);
5259 
5260 	spdk_delay_us(1);
5261 
5262 	bdev_io2->internal.in_submit_request = true;
5263 
5264 	bdev_nvme_submit_request(ch, bdev_io2);
5265 
5266 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5267 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5268 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5269 	CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link));
5270 
5271 	poll_threads();
5272 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5273 	poll_threads();
5274 
5275 	CU_ASSERT(nvme_qpair->qpair != NULL);
5276 	CU_ASSERT(nvme_ctrlr->resetting == false);
5277 
5278 	spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us);
5279 
5280 	poll_thread_times(0, 1);
5281 
5282 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5283 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5284 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5285 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5286 
5287 	poll_threads();
5288 
5289 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5290 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5291 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5292 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5293 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5294 
5295 	spdk_delay_us(1);
5296 
5297 	poll_thread_times(0, 1);
5298 
5299 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5300 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5301 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5302 
5303 	poll_threads();
5304 
5305 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5306 	CU_ASSERT(bdev_io2->internal.in_submit_request == false);
5307 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5308 
5309 	free(bdev_io1);
5310 	free(bdev_io2);
5311 
5312 	spdk_put_io_channel(ch);
5313 
5314 	poll_threads();
5315 
5316 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5317 	CU_ASSERT(rc == 0);
5318 
5319 	poll_threads();
5320 	spdk_delay_us(1000);
5321 	poll_threads();
5322 
5323 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5324 
5325 	g_opts.bdev_retry_count = 0;
5326 }
5327 
5328 static void
5329 test_reconnect_ctrlr(void)
5330 {
5331 	struct spdk_nvme_transport_id trid = {};
5332 	struct spdk_nvme_ctrlr ctrlr = {};
5333 	struct nvme_ctrlr *nvme_ctrlr;
5334 	struct spdk_io_channel *ch1, *ch2;
5335 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5336 	int rc;
5337 
5338 	ut_init_trid(&trid);
5339 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5340 
5341 	set_thread(0);
5342 
5343 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5344 	CU_ASSERT(rc == 0);
5345 
5346 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5347 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5348 
5349 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2;
5350 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5351 
5352 	ch1 = spdk_get_io_channel(nvme_ctrlr);
5353 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5354 
5355 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5356 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5357 
5358 	set_thread(1);
5359 
5360 	ch2 = spdk_get_io_channel(nvme_ctrlr);
5361 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5362 
5363 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5364 
5365 	/* Reset starts from thread 1. */
5366 	set_thread(1);
5367 
5368 	/* The reset should fail and a reconnect timer should be registered. */
5369 	ctrlr.fail_reset = true;
5370 	ctrlr.is_failed = true;
5371 
5372 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5373 	CU_ASSERT(rc == 0);
5374 	CU_ASSERT(nvme_ctrlr->resetting == true);
5375 	CU_ASSERT(ctrlr.is_failed == true);
5376 
5377 	poll_threads();
5378 
5379 	CU_ASSERT(nvme_ctrlr->resetting == false);
5380 	CU_ASSERT(ctrlr.is_failed == false);
5381 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5382 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5383 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5384 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5385 
5386 	/* A new reset starts from thread 0. */
5387 	set_thread(1);
5388 
5389 	/* The reset should cancel the reconnect timer and should start from reconnection.
5390 	 * Then, the reset should fail and a reconnect timer should be registered again.
5391 	 */
5392 	ctrlr.fail_reset = true;
5393 	ctrlr.is_failed = true;
5394 
5395 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5396 	CU_ASSERT(rc == 0);
5397 	CU_ASSERT(nvme_ctrlr->resetting == true);
5398 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5399 	CU_ASSERT(ctrlr.is_failed == true);
5400 
5401 	poll_threads();
5402 
5403 	CU_ASSERT(nvme_ctrlr->resetting == false);
5404 	CU_ASSERT(ctrlr.is_failed == false);
5405 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5406 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5407 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5408 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5409 
5410 	/* Then a reconnect retry should suceeed. */
5411 	ctrlr.fail_reset = false;
5412 
5413 	spdk_delay_us(SPDK_SEC_TO_USEC);
5414 	poll_thread_times(0, 1);
5415 
5416 	CU_ASSERT(nvme_ctrlr->resetting == true);
5417 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5418 
5419 	poll_threads();
5420 
5421 	CU_ASSERT(nvme_ctrlr->resetting == false);
5422 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
5423 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
5424 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5425 
5426 	/* The reset should fail and a reconnect timer should be registered. */
5427 	ctrlr.fail_reset = true;
5428 	ctrlr.is_failed = true;
5429 
5430 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5431 	CU_ASSERT(rc == 0);
5432 	CU_ASSERT(nvme_ctrlr->resetting == true);
5433 	CU_ASSERT(ctrlr.is_failed == true);
5434 
5435 	poll_threads();
5436 
5437 	CU_ASSERT(nvme_ctrlr->resetting == false);
5438 	CU_ASSERT(ctrlr.is_failed == false);
5439 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5440 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5441 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5442 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5443 
5444 	/* Then a reconnect retry should still fail. */
5445 	spdk_delay_us(SPDK_SEC_TO_USEC);
5446 	poll_thread_times(0, 1);
5447 
5448 	CU_ASSERT(nvme_ctrlr->resetting == true);
5449 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5450 
5451 	poll_threads();
5452 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5453 	poll_threads();
5454 
5455 	CU_ASSERT(nvme_ctrlr->resetting == false);
5456 	CU_ASSERT(ctrlr.is_failed == false);
5457 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5458 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5459 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5460 
5461 	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5462 	spdk_delay_us(SPDK_SEC_TO_USEC);
5463 	poll_threads();
5464 
5465 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5466 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5467 	CU_ASSERT(nvme_ctrlr->destruct == true);
5468 
5469 	spdk_put_io_channel(ch2);
5470 
5471 	set_thread(0);
5472 
5473 	spdk_put_io_channel(ch1);
5474 
5475 	poll_threads();
5476 	spdk_delay_us(1000);
5477 	poll_threads();
5478 
5479 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5480 }
5481 
5482 static struct nvme_path_id *
5483 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5484 		       const struct spdk_nvme_transport_id *trid)
5485 {
5486 	struct nvme_path_id *p;
5487 
5488 	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5489 		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5490 			break;
5491 		}
5492 	}
5493 
5494 	return p;
5495 }
5496 
5497 static void
5498 test_retry_failover_ctrlr(void)
5499 {
5500 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5501 	struct spdk_nvme_ctrlr ctrlr = {};
5502 	struct nvme_ctrlr *nvme_ctrlr = NULL;
5503 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5504 	struct spdk_io_channel *ch;
5505 	struct nvme_ctrlr_channel *ctrlr_ch;
5506 	int rc;
5507 
5508 	ut_init_trid(&trid1);
5509 	ut_init_trid2(&trid2);
5510 	ut_init_trid3(&trid3);
5511 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5512 
5513 	set_thread(0);
5514 
5515 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5516 	CU_ASSERT(rc == 0);
5517 
5518 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5519 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5520 
5521 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1;
5522 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5523 
5524 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5525 	CU_ASSERT(rc == 0);
5526 
5527 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5528 	CU_ASSERT(rc == 0);
5529 
5530 	ch = spdk_get_io_channel(nvme_ctrlr);
5531 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5532 
5533 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5534 
5535 	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5536 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5537 	CU_ASSERT(path_id1->last_failed_tsc == 0);
5538 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5539 
5540 	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5541 	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5542 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5543 
5544 	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5545 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5546 
5547 	/* It is expected that connecting both of trid1, trid2, and trid3 fail,
5548 	 * and a reconnect timer is started. */
5549 	ctrlr.fail_reset = true;
5550 	ctrlr.is_failed = true;
5551 
5552 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5553 	CU_ASSERT(rc == 0);
5554 
5555 	poll_threads();
5556 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5557 	poll_threads();
5558 
5559 	CU_ASSERT(nvme_ctrlr->resetting == false);
5560 	CU_ASSERT(ctrlr.is_failed == false);
5561 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5562 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5563 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5564 	CU_ASSERT(path_id1->last_failed_tsc != 0);
5565 
5566 	CU_ASSERT(path_id2->last_failed_tsc != 0);
5567 	CU_ASSERT(path_id3->last_failed_tsc != 0);
5568 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5569 
5570 	/* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is
5571 	 * switched to trid2 but reset is not started.
5572 	 */
5573 	rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true);
5574 	CU_ASSERT(rc == -EALREADY);
5575 
5576 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL);
5577 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5578 
5579 	CU_ASSERT(nvme_ctrlr->resetting == false);
5580 
5581 	/* If reconnect succeeds, trid2 should be the active path_id */
5582 	ctrlr.fail_reset = false;
5583 
5584 	spdk_delay_us(SPDK_SEC_TO_USEC);
5585 	poll_thread_times(0, 1);
5586 
5587 	CU_ASSERT(nvme_ctrlr->resetting == true);
5588 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5589 
5590 	poll_threads();
5591 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5592 	poll_threads();
5593 
5594 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL);
5595 	CU_ASSERT(path_id2->last_failed_tsc == 0);
5596 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5597 	CU_ASSERT(nvme_ctrlr->resetting == false);
5598 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
5599 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5600 
5601 	spdk_put_io_channel(ch);
5602 
5603 	poll_threads();
5604 
5605 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5606 	CU_ASSERT(rc == 0);
5607 
5608 	poll_threads();
5609 	spdk_delay_us(1000);
5610 	poll_threads();
5611 
5612 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5613 }
5614 
5615 static void
5616 test_fail_path(void)
5617 {
5618 	struct nvme_path_id path = {};
5619 	struct nvme_ctrlr_opts opts = {};
5620 	struct spdk_nvme_ctrlr *ctrlr;
5621 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
5622 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5623 	struct nvme_ctrlr *nvme_ctrlr;
5624 	const int STRING_SIZE = 32;
5625 	const char *attached_names[STRING_SIZE];
5626 	struct nvme_bdev *bdev;
5627 	struct nvme_ns *nvme_ns;
5628 	struct spdk_bdev_io *bdev_io;
5629 	struct spdk_io_channel *ch;
5630 	struct nvme_bdev_channel *nbdev_ch;
5631 	struct nvme_io_path *io_path;
5632 	struct nvme_ctrlr_channel *ctrlr_ch;
5633 	int rc;
5634 
5635 	/* The test scenario is the following.
5636 	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5637 	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5638 	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5639 	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5640 	 *   comes first. The queued I/O is failed.
5641 	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5642 	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5643 	 */
5644 
5645 	g_opts.bdev_retry_count = 1;
5646 
5647 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5648 	ut_init_trid(&path.trid);
5649 
5650 	set_thread(0);
5651 
5652 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5653 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5654 
5655 	g_ut_attach_ctrlr_status = 0;
5656 	g_ut_attach_bdev_count = 1;
5657 
5658 	opts.ctrlr_loss_timeout_sec = 4;
5659 	opts.reconnect_delay_sec = 1;
5660 	opts.fast_io_fail_timeout_sec = 2;
5661 
5662 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5663 			      attach_ctrlr_done, NULL, &dopts, &opts, false);
5664 	CU_ASSERT(rc == 0);
5665 
5666 	spdk_delay_us(1000);
5667 	poll_threads();
5668 
5669 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5670 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5671 
5672 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
5673 	CU_ASSERT(nvme_ctrlr != NULL);
5674 
5675 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5676 	CU_ASSERT(bdev != NULL);
5677 
5678 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5679 	CU_ASSERT(nvme_ns != NULL);
5680 
5681 	ch = spdk_get_io_channel(bdev);
5682 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5683 
5684 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5685 
5686 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5687 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5688 
5689 	ctrlr_ch = io_path->qpair->ctrlr_ch;
5690 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
5691 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL);
5692 
5693 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
5694 	ut_bdev_io_set_buf(bdev_io);
5695 
5696 
5697 	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
5698 	ctrlr->fail_reset = true;
5699 	ctrlr->is_failed = true;
5700 
5701 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5702 	CU_ASSERT(rc == 0);
5703 	CU_ASSERT(nvme_ctrlr->resetting == true);
5704 	CU_ASSERT(ctrlr->is_failed == true);
5705 
5706 	poll_threads();
5707 
5708 	CU_ASSERT(nvme_ctrlr->resetting == false);
5709 	CU_ASSERT(ctrlr->is_failed == false);
5710 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5711 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5712 	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
5713 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5714 
5715 	/* I/O should be queued. */
5716 	bdev_io->internal.in_submit_request = true;
5717 
5718 	bdev_nvme_submit_request(ch, bdev_io);
5719 
5720 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5721 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5722 
5723 	/* After a second, the I/O should be still queued and the ctrlr should be
5724 	 * still recovering.
5725 	 */
5726 	spdk_delay_us(SPDK_SEC_TO_USEC);
5727 	poll_threads();
5728 
5729 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5730 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5731 
5732 	CU_ASSERT(nvme_ctrlr->resetting == false);
5733 	CU_ASSERT(ctrlr->is_failed == false);
5734 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5735 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5736 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5737 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5738 
5739 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5740 
5741 	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
5742 	spdk_delay_us(SPDK_SEC_TO_USEC);
5743 	poll_threads();
5744 
5745 	CU_ASSERT(nvme_ctrlr->resetting == false);
5746 	CU_ASSERT(ctrlr->is_failed == false);
5747 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5748 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5749 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5750 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
5751 
5752 	/* Then within a second, pending I/O should be failed. */
5753 	spdk_delay_us(SPDK_SEC_TO_USEC);
5754 	poll_threads();
5755 
5756 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5757 	poll_threads();
5758 
5759 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5760 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5761 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5762 
5763 	/* Another I/O submission should be failed immediately. */
5764 	bdev_io->internal.in_submit_request = true;
5765 
5766 	bdev_nvme_submit_request(ch, bdev_io);
5767 
5768 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5769 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5770 
5771 	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
5772 	 * be deleted.
5773 	 */
5774 	spdk_delay_us(SPDK_SEC_TO_USEC);
5775 	poll_threads();
5776 
5777 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5778 	poll_threads();
5779 
5780 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5781 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5782 	CU_ASSERT(nvme_ctrlr->destruct == true);
5783 
5784 	spdk_put_io_channel(ch);
5785 
5786 	poll_threads();
5787 	spdk_delay_us(1000);
5788 	poll_threads();
5789 
5790 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5791 
5792 	free(bdev_io);
5793 
5794 	g_opts.bdev_retry_count = 0;
5795 }
5796 
5797 static void
5798 test_nvme_ns_cmp(void)
5799 {
5800 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
5801 
5802 	nvme_ns1.id = 0;
5803 	nvme_ns2.id = UINT32_MAX;
5804 
5805 	CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0);
5806 	CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0);
5807 }
5808 
5809 static void
5810 test_ana_transition(void)
5811 {
5812 	struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, };
5813 	struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, };
5814 	struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, };
5815 	struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, };
5816 
5817 	/* case 1: ANA transition timedout is canceled. */
5818 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5819 	nvme_ns.ana_transition_timedout = true;
5820 
5821 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5822 
5823 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5824 
5825 	CU_ASSERT(nvme_ns.ana_transition_timedout == false);
5826 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5827 
5828 	/* case 2: ANATT timer is kept. */
5829 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5830 	nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout,
5831 			      &nvme_ns,
5832 			      ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
5833 
5834 	desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5835 
5836 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5837 
5838 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
5839 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
5840 
5841 	/* case 3: ANATT timer is stopped. */
5842 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5843 
5844 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5845 
5846 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
5847 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5848 
5849 	/* ANATT timer is started. */
5850 	desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5851 
5852 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5853 
5854 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
5855 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE);
5856 
5857 	/* ANATT timer is expired. */
5858 	spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
5859 
5860 	poll_threads();
5861 
5862 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
5863 	CU_ASSERT(nvme_ns.ana_transition_timedout == true);
5864 }
5865 
5866 static void
5867 _set_preferred_path_cb(void *cb_arg, int rc)
5868 {
5869 	bool *done = cb_arg;
5870 
5871 	*done = true;
5872 }
5873 
5874 static void
5875 test_set_preferred_path(void)
5876 {
5877 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
5878 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
5879 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
5880 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5881 	const int STRING_SIZE = 32;
5882 	const char *attached_names[STRING_SIZE];
5883 	struct nvme_bdev *bdev;
5884 	struct spdk_io_channel *ch;
5885 	struct nvme_bdev_channel *nbdev_ch;
5886 	struct nvme_io_path *io_path;
5887 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
5888 	const struct spdk_nvme_ctrlr_data *cdata;
5889 	bool done;
5890 	int rc;
5891 
5892 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5893 	ut_init_trid(&path1.trid);
5894 	ut_init_trid2(&path2.trid);
5895 	ut_init_trid3(&path3.trid);
5896 	g_ut_attach_ctrlr_status = 0;
5897 	g_ut_attach_bdev_count = 1;
5898 
5899 	set_thread(0);
5900 
5901 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
5902 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
5903 
5904 	ctrlr1->ns[0].uuid = &uuid1;
5905 
5906 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
5907 			      attach_ctrlr_done, NULL, &opts, NULL, true);
5908 	CU_ASSERT(rc == 0);
5909 
5910 	spdk_delay_us(1000);
5911 	poll_threads();
5912 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5913 	poll_threads();
5914 
5915 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
5916 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
5917 
5918 	ctrlr2->ns[0].uuid = &uuid1;
5919 
5920 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
5921 			      attach_ctrlr_done, NULL, &opts, NULL, true);
5922 	CU_ASSERT(rc == 0);
5923 
5924 	spdk_delay_us(1000);
5925 	poll_threads();
5926 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5927 	poll_threads();
5928 
5929 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
5930 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
5931 
5932 	ctrlr3->ns[0].uuid = &uuid1;
5933 
5934 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
5935 			      attach_ctrlr_done, NULL, &opts, NULL, true);
5936 	CU_ASSERT(rc == 0);
5937 
5938 	spdk_delay_us(1000);
5939 	poll_threads();
5940 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5941 	poll_threads();
5942 
5943 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5944 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5945 
5946 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5947 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
5948 
5949 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
5950 
5951 	ch = spdk_get_io_channel(bdev);
5952 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5953 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5954 
5955 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5956 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5957 
5958 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
5959 
5960 	/* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path()
5961 	 * should return io_path to ctrlr2.
5962 	 */
5963 
5964 	cdata = spdk_nvme_ctrlr_get_data(ctrlr2);
5965 	done = false;
5966 
5967 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
5968 
5969 	poll_threads();
5970 	CU_ASSERT(done == true);
5971 
5972 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5973 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5974 
5975 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
5976 
5977 	/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
5978 	 * acquired, find_io_path() should return io_path to ctrlr3.
5979 	 */
5980 
5981 	spdk_put_io_channel(ch);
5982 
5983 	poll_threads();
5984 
5985 	cdata = spdk_nvme_ctrlr_get_data(ctrlr3);
5986 	done = false;
5987 
5988 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
5989 
5990 	poll_threads();
5991 	CU_ASSERT(done == true);
5992 
5993 	ch = spdk_get_io_channel(bdev);
5994 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5995 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5996 
5997 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5998 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5999 
6000 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3);
6001 
6002 	spdk_put_io_channel(ch);
6003 
6004 	poll_threads();
6005 
6006 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6007 	CU_ASSERT(rc == 0);
6008 
6009 	poll_threads();
6010 	spdk_delay_us(1000);
6011 	poll_threads();
6012 
6013 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6014 }
6015 
6016 static void
6017 test_find_next_io_path(void)
6018 {
6019 	struct nvme_bdev_channel nbdev_ch = {
6020 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6021 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6022 		.mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN,
6023 	};
6024 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6025 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6026 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
6027 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6028 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6029 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6030 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6031 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6032 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6033 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6034 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6035 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6036 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, };
6037 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6038 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6039 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6040 
6041 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6042 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6043 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6044 
6045 	/* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL
6046 	 * is covered in test_find_io_path.
6047 	 */
6048 
6049 	nbdev_ch.current_io_path = &io_path2;
6050 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6051 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6052 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6053 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6054 
6055 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6056 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6057 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6058 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6059 
6060 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6061 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6062 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6063 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6064 
6065 	nbdev_ch.current_io_path = &io_path3;
6066 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6067 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6068 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6069 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6070 
6071 	/* Test if next io_path is selected according to rr_min_io */
6072 
6073 	nbdev_ch.current_io_path = NULL;
6074 	nbdev_ch.rr_min_io = 2;
6075 	nbdev_ch.rr_counter = 0;
6076 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6077 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6078 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6079 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6080 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6081 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6082 
6083 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6084 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6085 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6086 }
6087 
6088 static void
6089 test_find_io_path_min_qd(void)
6090 {
6091 	struct nvme_bdev_channel nbdev_ch = {
6092 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6093 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6094 		.mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH,
6095 	};
6096 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6097 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6098 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
6099 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6100 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6101 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6102 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6103 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6104 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6105 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6106 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6107 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6108 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, };
6109 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6110 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6111 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6112 
6113 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6114 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6115 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6116 
6117 	/* Test if the minumum io_outstanding or the ANA optimized state is
6118 	 * prioritized when using least queue depth selector
6119 	 */
6120 	qpair1.num_outstanding_reqs = 2;
6121 	qpair2.num_outstanding_reqs = 1;
6122 	qpair3.num_outstanding_reqs = 0;
6123 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6124 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6125 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6126 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6127 
6128 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6129 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6130 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6131 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6132 
6133 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6134 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6135 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6136 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6137 
6138 	qpair2.num_outstanding_reqs = 4;
6139 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6140 }
6141 
6142 static void
6143 test_disable_auto_failback(void)
6144 {
6145 	struct nvme_path_id path1 = {}, path2 = {};
6146 	struct nvme_ctrlr_opts opts = {};
6147 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6148 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
6149 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6150 	struct nvme_ctrlr *nvme_ctrlr1;
6151 	const int STRING_SIZE = 32;
6152 	const char *attached_names[STRING_SIZE];
6153 	struct nvme_bdev *bdev;
6154 	struct spdk_io_channel *ch;
6155 	struct nvme_bdev_channel *nbdev_ch;
6156 	struct nvme_io_path *io_path;
6157 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6158 	const struct spdk_nvme_ctrlr_data *cdata;
6159 	bool done;
6160 	int rc;
6161 
6162 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6163 	ut_init_trid(&path1.trid);
6164 	ut_init_trid2(&path2.trid);
6165 	g_ut_attach_ctrlr_status = 0;
6166 	g_ut_attach_bdev_count = 1;
6167 
6168 	g_opts.disable_auto_failback = true;
6169 
6170 	opts.ctrlr_loss_timeout_sec = -1;
6171 	opts.reconnect_delay_sec = 1;
6172 
6173 	set_thread(0);
6174 
6175 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6176 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6177 
6178 	ctrlr1->ns[0].uuid = &uuid1;
6179 
6180 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6181 			      attach_ctrlr_done, NULL, &dopts, &opts, true);
6182 	CU_ASSERT(rc == 0);
6183 
6184 	spdk_delay_us(1000);
6185 	poll_threads();
6186 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6187 	poll_threads();
6188 
6189 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6190 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6191 
6192 	ctrlr2->ns[0].uuid = &uuid1;
6193 
6194 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6195 			      attach_ctrlr_done, NULL, &dopts, &opts, true);
6196 	CU_ASSERT(rc == 0);
6197 
6198 	spdk_delay_us(1000);
6199 	poll_threads();
6200 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6201 	poll_threads();
6202 
6203 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6204 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6205 
6206 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6207 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6208 
6209 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, dopts.hostnqn);
6210 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6211 
6212 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6213 
6214 	ch = spdk_get_io_channel(bdev);
6215 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6216 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6217 
6218 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6219 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6220 
6221 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6222 
6223 	/* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */
6224 	ctrlr1->fail_reset = true;
6225 	ctrlr1->is_failed = true;
6226 
6227 	bdev_nvme_reset_ctrlr(nvme_ctrlr1);
6228 
6229 	poll_threads();
6230 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6231 	poll_threads();
6232 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6233 	poll_threads();
6234 
6235 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
6236 
6237 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6238 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6239 
6240 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6241 
6242 	/* After a second, ctrlr1 is recovered. However, automatic failback is disabled.
6243 	 * Hence, io_path to ctrlr2 should still be used.
6244 	 */
6245 	ctrlr1->fail_reset = false;
6246 
6247 	spdk_delay_us(SPDK_SEC_TO_USEC);
6248 	poll_threads();
6249 
6250 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
6251 
6252 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6253 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6254 
6255 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6256 
6257 	/* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should
6258 	 * be used again.
6259 	 */
6260 
6261 	cdata = spdk_nvme_ctrlr_get_data(ctrlr1);
6262 	done = false;
6263 
6264 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6265 
6266 	poll_threads();
6267 	CU_ASSERT(done == true);
6268 
6269 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6270 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6271 
6272 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6273 
6274 	spdk_put_io_channel(ch);
6275 
6276 	poll_threads();
6277 
6278 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6279 	CU_ASSERT(rc == 0);
6280 
6281 	poll_threads();
6282 	spdk_delay_us(1000);
6283 	poll_threads();
6284 
6285 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6286 
6287 	g_opts.disable_auto_failback = false;
6288 }
6289 
6290 static void
6291 ut_set_multipath_policy_done(void *cb_arg, int rc)
6292 {
6293 	int *done = cb_arg;
6294 
6295 	SPDK_CU_ASSERT_FATAL(done != NULL);
6296 	*done = rc;
6297 }
6298 
6299 static void
6300 test_set_multipath_policy(void)
6301 {
6302 	struct nvme_path_id path1 = {}, path2 = {};
6303 	struct nvme_ctrlr_opts opts = {};
6304 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6305 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
6306 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6307 	const int STRING_SIZE = 32;
6308 	const char *attached_names[STRING_SIZE];
6309 	struct nvme_bdev *bdev;
6310 	struct spdk_io_channel *ch;
6311 	struct nvme_bdev_channel *nbdev_ch;
6312 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6313 	int done;
6314 	int rc;
6315 
6316 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6317 	ut_init_trid(&path1.trid);
6318 	ut_init_trid2(&path2.trid);
6319 	g_ut_attach_ctrlr_status = 0;
6320 	g_ut_attach_bdev_count = 1;
6321 
6322 	g_opts.disable_auto_failback = true;
6323 
6324 	opts.ctrlr_loss_timeout_sec = -1;
6325 	opts.reconnect_delay_sec = 1;
6326 
6327 	set_thread(0);
6328 
6329 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6330 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6331 
6332 	ctrlr1->ns[0].uuid = &uuid1;
6333 
6334 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6335 			      attach_ctrlr_done, NULL, &dopts, &opts, true);
6336 	CU_ASSERT(rc == 0);
6337 
6338 	spdk_delay_us(1000);
6339 	poll_threads();
6340 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6341 	poll_threads();
6342 
6343 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6344 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6345 
6346 	ctrlr2->ns[0].uuid = &uuid1;
6347 
6348 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6349 			      attach_ctrlr_done, NULL, &dopts, &opts, true);
6350 	CU_ASSERT(rc == 0);
6351 
6352 	spdk_delay_us(1000);
6353 	poll_threads();
6354 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6355 	poll_threads();
6356 
6357 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6358 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6359 
6360 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6361 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6362 
6363 	/* If multipath policy is updated before getting any I/O channel,
6364 	 * an new I/O channel should have the update.
6365 	 */
6366 	done = -1;
6367 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6368 				       BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX,
6369 				       ut_set_multipath_policy_done, &done);
6370 	poll_threads();
6371 	CU_ASSERT(done == 0);
6372 
6373 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6374 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6375 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6376 
6377 	ch = spdk_get_io_channel(bdev);
6378 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6379 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6380 
6381 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6382 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6383 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6384 
6385 	/* If multipath policy is updated while a I/O channel is active,
6386 	 * the update should be applied to the I/O channel immediately.
6387 	 */
6388 	done = -1;
6389 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE,
6390 				       BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX,
6391 				       ut_set_multipath_policy_done, &done);
6392 	poll_threads();
6393 	CU_ASSERT(done == 0);
6394 
6395 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6396 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6397 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6398 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6399 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6400 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6401 
6402 	spdk_put_io_channel(ch);
6403 
6404 	poll_threads();
6405 
6406 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6407 	CU_ASSERT(rc == 0);
6408 
6409 	poll_threads();
6410 	spdk_delay_us(1000);
6411 	poll_threads();
6412 
6413 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6414 }
6415 
6416 static void
6417 test_uuid_generation(void)
6418 {
6419 	uint32_t nsid1 = 1, nsid2 = 2;
6420 	char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02";
6421 	char sn3[21] = "                    ";
6422 	char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'};
6423 	struct spdk_uuid uuid1, uuid2;
6424 	int rc;
6425 
6426 	/* Test case 1:
6427 	 * Serial numbers are the same, nsids are different.
6428 	 * Compare two generated UUID - they should be different. */
6429 	rc = nvme_generate_uuid(sn1, nsid1, &uuid1);
6430 	CU_ASSERT(rc == 0);
6431 	rc = nvme_generate_uuid(sn1, nsid2, &uuid2);
6432 	CU_ASSERT(rc == 0);
6433 
6434 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6435 
6436 	/* Test case 2:
6437 	 * Serial numbers differ only by one character, nsids are the same.
6438 	 * Compare two generated UUID - they should be different. */
6439 	rc = nvme_generate_uuid(sn1, nsid1, &uuid1);
6440 	CU_ASSERT(rc == 0);
6441 	rc = nvme_generate_uuid(sn2, nsid1, &uuid2);
6442 	CU_ASSERT(rc == 0);
6443 
6444 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6445 
6446 	/* Test case 3:
6447 	 * Serial number comprises only of space characters.
6448 	 * Validate the generated UUID. */
6449 	rc = nvme_generate_uuid(sn3, nsid1, &uuid1);
6450 	CU_ASSERT(rc == 0);
6451 	CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0);
6452 
6453 }
6454 
6455 static void
6456 test_retry_io_to_same_path(void)
6457 {
6458 	struct nvme_path_id path1 = {}, path2 = {};
6459 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6460 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
6461 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6462 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
6463 	const int STRING_SIZE = 32;
6464 	const char *attached_names[STRING_SIZE];
6465 	struct nvme_bdev *bdev;
6466 	struct spdk_bdev_io *bdev_io;
6467 	struct nvme_bdev_io *bio;
6468 	struct spdk_io_channel *ch;
6469 	struct nvme_bdev_channel *nbdev_ch;
6470 	struct nvme_io_path *io_path1, *io_path2;
6471 	struct ut_nvme_req *req;
6472 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6473 	int done;
6474 	int rc;
6475 
6476 	g_opts.nvme_ioq_poll_period_us = 1;
6477 
6478 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6479 	ut_init_trid(&path1.trid);
6480 	ut_init_trid2(&path2.trid);
6481 	g_ut_attach_ctrlr_status = 0;
6482 	g_ut_attach_bdev_count = 1;
6483 
6484 	set_thread(0);
6485 
6486 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6487 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6488 
6489 	ctrlr1->ns[0].uuid = &uuid1;
6490 
6491 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6492 			      attach_ctrlr_done, NULL, &opts, NULL, true);
6493 	CU_ASSERT(rc == 0);
6494 
6495 	spdk_delay_us(1000);
6496 	poll_threads();
6497 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6498 	poll_threads();
6499 
6500 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6501 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6502 
6503 	ctrlr2->ns[0].uuid = &uuid1;
6504 
6505 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6506 			      attach_ctrlr_done, NULL, &opts, NULL, true);
6507 	CU_ASSERT(rc == 0);
6508 
6509 	spdk_delay_us(1000);
6510 	poll_threads();
6511 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6512 	poll_threads();
6513 
6514 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6515 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6516 
6517 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
6518 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6519 
6520 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
6521 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
6522 
6523 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6524 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6525 
6526 	done = -1;
6527 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6528 				       BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done);
6529 	poll_threads();
6530 	CU_ASSERT(done == 0);
6531 
6532 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6533 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6534 	CU_ASSERT(bdev->rr_min_io == 1);
6535 
6536 	ch = spdk_get_io_channel(bdev);
6537 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6538 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6539 
6540 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6541 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6542 	CU_ASSERT(nbdev_ch->rr_min_io == 1);
6543 
6544 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
6545 	ut_bdev_io_set_buf(bdev_io);
6546 
6547 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
6548 
6549 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
6550 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
6551 
6552 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
6553 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
6554 
6555 	/* The 1st I/O should be submitted to io_path1. */
6556 	bdev_io->internal.in_submit_request = true;
6557 
6558 	bdev_nvme_submit_request(ch, bdev_io);
6559 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6560 	CU_ASSERT(bio->io_path == io_path1);
6561 	CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1);
6562 
6563 	spdk_delay_us(1);
6564 
6565 	poll_threads();
6566 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
6567 	CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
6568 
6569 	/* The 2nd I/O should be submitted to io_path2 because the path selection
6570 	 * policy is round-robin.
6571 	 */
6572 	bdev_io->internal.in_submit_request = true;
6573 
6574 	bdev_nvme_submit_request(ch, bdev_io);
6575 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6576 	CU_ASSERT(bio->io_path == io_path2);
6577 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6578 
6579 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6580 	SPDK_CU_ASSERT_FATAL(req != NULL);
6581 
6582 	/* Set retry count to non-zero. */
6583 	g_opts.bdev_retry_count = 2;
6584 
6585 	/* Inject an I/O error. */
6586 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6587 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6588 
6589 	/* The 2nd I/O should be queued to nbdev_ch. */
6590 	spdk_delay_us(1);
6591 	poll_thread_times(0, 1);
6592 
6593 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6594 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6595 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
6596 
6597 	/* The 2nd I/O should keep caching io_path2. */
6598 	CU_ASSERT(bio->io_path == io_path2);
6599 
6600 	/* The 2nd I/O should be submitted to io_path2 again. */
6601 	poll_thread_times(0, 1);
6602 
6603 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6604 	CU_ASSERT(bio->io_path == io_path2);
6605 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6606 
6607 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6608 	SPDK_CU_ASSERT_FATAL(req != NULL);
6609 
6610 	/* Inject an I/O error again. */
6611 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6612 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6613 	req->cpl.status.crd = 1;
6614 
6615 	ctrlr2->cdata.crdt[1] = 1;
6616 
6617 	/* The 2nd I/O should be queued to nbdev_ch. */
6618 	spdk_delay_us(1);
6619 	poll_thread_times(0, 1);
6620 
6621 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6622 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6623 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
6624 
6625 	/* The 2nd I/O should keep caching io_path2. */
6626 	CU_ASSERT(bio->io_path == io_path2);
6627 
6628 	/* Detach ctrlr2 dynamically. */
6629 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
6630 	CU_ASSERT(rc == 0);
6631 
6632 	spdk_delay_us(1000);
6633 	poll_threads();
6634 	spdk_delay_us(1000);
6635 	poll_threads();
6636 	spdk_delay_us(1000);
6637 	poll_threads();
6638 	spdk_delay_us(1000);
6639 	poll_threads();
6640 
6641 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
6642 
6643 	poll_threads();
6644 	spdk_delay_us(100000);
6645 	poll_threads();
6646 	spdk_delay_us(1);
6647 	poll_threads();
6648 
6649 	/* The 2nd I/O should succeed by io_path1. */
6650 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
6651 	CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
6652 	CU_ASSERT(bio->io_path == io_path1);
6653 
6654 	free(bdev_io);
6655 
6656 	spdk_put_io_channel(ch);
6657 
6658 	poll_threads();
6659 	spdk_delay_us(1);
6660 	poll_threads();
6661 
6662 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6663 	CU_ASSERT(rc == 0);
6664 
6665 	poll_threads();
6666 	spdk_delay_us(1000);
6667 	poll_threads();
6668 
6669 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
6670 
6671 	g_opts.nvme_ioq_poll_period_us = 0;
6672 	g_opts.bdev_retry_count = 0;
6673 }
6674 
6675 /* This case is to verify a fix for a complex race condition that
6676  * failover is lost if fabric connect command gets timeout while
6677  * controller is being reset.
6678  */
6679 static void
6680 test_race_between_reset_and_disconnected(void)
6681 {
6682 	struct spdk_nvme_transport_id trid = {};
6683 	struct spdk_nvme_ctrlr ctrlr = {};
6684 	struct nvme_ctrlr *nvme_ctrlr = NULL;
6685 	struct nvme_path_id *curr_trid;
6686 	struct spdk_io_channel *ch1, *ch2;
6687 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
6688 	int rc;
6689 
6690 	ut_init_trid(&trid);
6691 	TAILQ_INIT(&ctrlr.active_io_qpairs);
6692 
6693 	set_thread(0);
6694 
6695 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
6696 	CU_ASSERT(rc == 0);
6697 
6698 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
6699 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
6700 
6701 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
6702 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
6703 
6704 	ch1 = spdk_get_io_channel(nvme_ctrlr);
6705 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
6706 
6707 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
6708 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
6709 
6710 	set_thread(1);
6711 
6712 	ch2 = spdk_get_io_channel(nvme_ctrlr);
6713 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
6714 
6715 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
6716 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
6717 
6718 	/* Reset starts from thread 1. */
6719 	set_thread(1);
6720 
6721 	nvme_ctrlr->resetting = false;
6722 	curr_trid->last_failed_tsc = spdk_get_ticks();
6723 	ctrlr.is_failed = true;
6724 
6725 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
6726 	CU_ASSERT(rc == 0);
6727 	CU_ASSERT(nvme_ctrlr->resetting == true);
6728 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
6729 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
6730 
6731 	poll_thread_times(0, 3);
6732 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
6733 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
6734 
6735 	poll_thread_times(0, 1);
6736 	poll_thread_times(1, 1);
6737 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
6738 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
6739 	CU_ASSERT(ctrlr.is_failed == true);
6740 
6741 	poll_thread_times(1, 1);
6742 	poll_thread_times(0, 1);
6743 	CU_ASSERT(ctrlr.is_failed == false);
6744 	CU_ASSERT(ctrlr.adminq.is_connected == false);
6745 
6746 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6747 	poll_thread_times(0, 2);
6748 	CU_ASSERT(ctrlr.adminq.is_connected == true);
6749 
6750 	poll_thread_times(0, 1);
6751 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
6752 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
6753 
6754 	poll_thread_times(1, 1);
6755 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
6756 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
6757 	CU_ASSERT(nvme_ctrlr->resetting == true);
6758 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
6759 
6760 	poll_thread_times(0, 2);
6761 	CU_ASSERT(nvme_ctrlr->resetting == true);
6762 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6763 	poll_thread_times(1, 1);
6764 	CU_ASSERT(nvme_ctrlr->resetting == true);
6765 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
6766 
6767 	/* Here is just one poll before _bdev_nvme_reset_complete() is executed.
6768 	 *
6769 	 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric
6770 	 * connect command is executed. If fabric connect command gets timeout,
6771 	 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until
6772 	 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false.
6773 	 *
6774 	 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr().
6775 	 */
6776 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
6777 	CU_ASSERT(rc == -EINPROGRESS);
6778 	CU_ASSERT(nvme_ctrlr->resetting == true);
6779 	CU_ASSERT(nvme_ctrlr->pending_failover == true);
6780 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6781 
6782 	poll_thread_times(0, 1);
6783 
6784 	CU_ASSERT(nvme_ctrlr->resetting == true);
6785 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
6786 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
6787 
6788 	poll_threads();
6789 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6790 	poll_threads();
6791 
6792 	CU_ASSERT(nvme_ctrlr->resetting == false);
6793 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
6794 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6795 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
6796 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
6797 
6798 	spdk_put_io_channel(ch2);
6799 
6800 	set_thread(0);
6801 
6802 	spdk_put_io_channel(ch1);
6803 
6804 	poll_threads();
6805 
6806 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6807 	CU_ASSERT(rc == 0);
6808 
6809 	poll_threads();
6810 	spdk_delay_us(1000);
6811 	poll_threads();
6812 
6813 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6814 }
6815 static void
6816 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc)
6817 {
6818 	int *_rc = (int *)cb_arg;
6819 
6820 	SPDK_CU_ASSERT_FATAL(_rc != NULL);
6821 	*_rc = rc;
6822 }
6823 
6824 static void
6825 test_ctrlr_op_rpc(void)
6826 {
6827 	struct spdk_nvme_transport_id trid = {};
6828 	struct spdk_nvme_ctrlr ctrlr = {};
6829 	struct nvme_ctrlr *nvme_ctrlr = NULL;
6830 	struct nvme_path_id *curr_trid;
6831 	struct spdk_io_channel *ch1, *ch2;
6832 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
6833 	int ctrlr_op_rc;
6834 	int rc;
6835 
6836 	ut_init_trid(&trid);
6837 	TAILQ_INIT(&ctrlr.active_io_qpairs);
6838 
6839 	set_thread(0);
6840 
6841 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
6842 	CU_ASSERT(rc == 0);
6843 
6844 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
6845 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
6846 
6847 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
6848 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
6849 
6850 	ch1 = spdk_get_io_channel(nvme_ctrlr);
6851 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
6852 
6853 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
6854 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
6855 
6856 	set_thread(1);
6857 
6858 	ch2 = spdk_get_io_channel(nvme_ctrlr);
6859 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
6860 
6861 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
6862 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
6863 
6864 	/* Reset starts from thread 1. */
6865 	set_thread(1);
6866 
6867 	/* Case 1: ctrlr is already being destructed. */
6868 	nvme_ctrlr->destruct = true;
6869 	ctrlr_op_rc = 0;
6870 
6871 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
6872 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6873 
6874 	poll_threads();
6875 
6876 	CU_ASSERT(ctrlr_op_rc == -ENXIO);
6877 
6878 	/* Case 2: reset is in progress. */
6879 	nvme_ctrlr->destruct = false;
6880 	nvme_ctrlr->resetting = true;
6881 	ctrlr_op_rc = 0;
6882 
6883 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
6884 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6885 
6886 	poll_threads();
6887 
6888 	CU_ASSERT(ctrlr_op_rc == -EBUSY);
6889 
6890 	/* Case 3: reset completes successfully. */
6891 	nvme_ctrlr->resetting = false;
6892 	curr_trid->last_failed_tsc = spdk_get_ticks();
6893 	ctrlr.is_failed = true;
6894 	ctrlr_op_rc = -1;
6895 
6896 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
6897 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6898 
6899 	CU_ASSERT(nvme_ctrlr->resetting == true);
6900 	CU_ASSERT(ctrlr_op_rc == -1);
6901 
6902 	poll_threads();
6903 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6904 	poll_threads();
6905 
6906 	CU_ASSERT(nvme_ctrlr->resetting == false);
6907 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6908 	CU_ASSERT(ctrlr.is_failed == false);
6909 	CU_ASSERT(ctrlr_op_rc == 0);
6910 
6911 	/* Case 4: invalid operation. */
6912 	nvme_ctrlr_op_rpc(nvme_ctrlr, -1,
6913 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6914 
6915 	poll_threads();
6916 
6917 	CU_ASSERT(ctrlr_op_rc == -EINVAL);
6918 
6919 	spdk_put_io_channel(ch2);
6920 
6921 	set_thread(0);
6922 
6923 	spdk_put_io_channel(ch1);
6924 
6925 	poll_threads();
6926 
6927 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6928 	CU_ASSERT(rc == 0);
6929 
6930 	poll_threads();
6931 	spdk_delay_us(1000);
6932 	poll_threads();
6933 
6934 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6935 }
6936 
6937 static void
6938 test_bdev_ctrlr_op_rpc(void)
6939 {
6940 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
6941 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
6942 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6943 	struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL;
6944 	struct nvme_path_id *curr_trid1, *curr_trid2;
6945 	struct spdk_io_channel *ch11, *ch12, *ch21, *ch22;
6946 	struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22;
6947 	int ctrlr_op_rc;
6948 	int rc;
6949 
6950 	ut_init_trid(&trid1);
6951 	ut_init_trid2(&trid2);
6952 	TAILQ_INIT(&ctrlr1.active_io_qpairs);
6953 	TAILQ_INIT(&ctrlr2.active_io_qpairs);
6954 	ctrlr1.cdata.cmic.multi_ctrlr = 1;
6955 	ctrlr2.cdata.cmic.multi_ctrlr = 1;
6956 	ctrlr1.cdata.cntlid = 1;
6957 	ctrlr2.cdata.cntlid = 2;
6958 	ctrlr1.adminq.is_connected = true;
6959 	ctrlr2.adminq.is_connected = true;
6960 
6961 	set_thread(0);
6962 
6963 	rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL);
6964 	CU_ASSERT(rc == 0);
6965 
6966 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6967 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6968 
6969 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1, UT_HOSTNQN);
6970 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6971 
6972 	curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
6973 	SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL);
6974 
6975 	ch11 = spdk_get_io_channel(nvme_ctrlr1);
6976 	SPDK_CU_ASSERT_FATAL(ch11 != NULL);
6977 
6978 	ctrlr_ch11 = spdk_io_channel_get_ctx(ch11);
6979 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
6980 
6981 	set_thread(1);
6982 
6983 	ch12 = spdk_get_io_channel(nvme_ctrlr1);
6984 	SPDK_CU_ASSERT_FATAL(ch12 != NULL);
6985 
6986 	ctrlr_ch12 = spdk_io_channel_get_ctx(ch12);
6987 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
6988 
6989 	set_thread(0);
6990 
6991 	rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL);
6992 	CU_ASSERT(rc == 0);
6993 
6994 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2, UT_HOSTNQN);
6995 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
6996 
6997 	curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
6998 	SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL);
6999 
7000 	ch21 = spdk_get_io_channel(nvme_ctrlr2);
7001 	SPDK_CU_ASSERT_FATAL(ch21 != NULL);
7002 
7003 	ctrlr_ch21 = spdk_io_channel_get_ctx(ch21);
7004 	CU_ASSERT(ctrlr_ch21->qpair != NULL);
7005 
7006 	set_thread(1);
7007 
7008 	ch22 = spdk_get_io_channel(nvme_ctrlr2);
7009 	SPDK_CU_ASSERT_FATAL(ch22 != NULL);
7010 
7011 	ctrlr_ch22 = spdk_io_channel_get_ctx(ch22);
7012 	CU_ASSERT(ctrlr_ch22->qpair != NULL);
7013 
7014 	/* Reset starts from thread 1. */
7015 	set_thread(1);
7016 
7017 	nvme_ctrlr1->resetting = false;
7018 	nvme_ctrlr2->resetting = false;
7019 	curr_trid1->last_failed_tsc = spdk_get_ticks();
7020 	curr_trid2->last_failed_tsc = spdk_get_ticks();
7021 	ctrlr_op_rc = -1;
7022 
7023 	nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET,
7024 			       ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7025 
7026 	CU_ASSERT(nvme_ctrlr1->resetting == true);
7027 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
7028 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
7029 	CU_ASSERT(nvme_ctrlr2->resetting == false);
7030 
7031 	poll_thread_times(0, 3);
7032 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
7033 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
7034 
7035 	poll_thread_times(0, 1);
7036 	poll_thread_times(1, 1);
7037 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
7038 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
7039 
7040 	poll_thread_times(1, 1);
7041 	poll_thread_times(0, 1);
7042 	CU_ASSERT(ctrlr1.adminq.is_connected == false);
7043 
7044 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7045 	poll_thread_times(0, 2);
7046 	CU_ASSERT(ctrlr1.adminq.is_connected == true);
7047 
7048 	poll_thread_times(0, 1);
7049 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
7050 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
7051 
7052 	poll_thread_times(1, 1);
7053 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
7054 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
7055 	CU_ASSERT(nvme_ctrlr1->resetting == true);
7056 	CU_ASSERT(curr_trid1->last_failed_tsc != 0);
7057 
7058 	poll_thread_times(0, 2);
7059 	poll_thread_times(1, 1);
7060 	poll_thread_times(0, 1);
7061 	poll_thread_times(1, 1);
7062 	poll_thread_times(0, 1);
7063 	poll_thread_times(1, 1);
7064 	poll_thread_times(0, 1);
7065 
7066 	CU_ASSERT(nvme_ctrlr1->resetting == false);
7067 	CU_ASSERT(curr_trid1->last_failed_tsc == 0);
7068 	CU_ASSERT(nvme_ctrlr2->resetting == true);
7069 
7070 	poll_threads();
7071 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7072 	poll_threads();
7073 
7074 	CU_ASSERT(nvme_ctrlr2->resetting == false);
7075 	CU_ASSERT(ctrlr_op_rc == 0);
7076 
7077 	set_thread(1);
7078 
7079 	spdk_put_io_channel(ch12);
7080 	spdk_put_io_channel(ch22);
7081 
7082 	set_thread(0);
7083 
7084 	spdk_put_io_channel(ch11);
7085 	spdk_put_io_channel(ch21);
7086 
7087 	poll_threads();
7088 
7089 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7090 	CU_ASSERT(rc == 0);
7091 
7092 	poll_threads();
7093 	spdk_delay_us(1000);
7094 	poll_threads();
7095 
7096 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
7097 }
7098 
7099 static void
7100 test_disable_enable_ctrlr(void)
7101 {
7102 	struct spdk_nvme_transport_id trid = {};
7103 	struct spdk_nvme_ctrlr ctrlr = {};
7104 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7105 	struct nvme_path_id *curr_trid;
7106 	struct spdk_io_channel *ch1, *ch2;
7107 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7108 	int rc;
7109 
7110 	ut_init_trid(&trid);
7111 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7112 	ctrlr.adminq.is_connected = true;
7113 
7114 	set_thread(0);
7115 
7116 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7117 	CU_ASSERT(rc == 0);
7118 
7119 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7120 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7121 
7122 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7123 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7124 
7125 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7126 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7127 
7128 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7129 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7130 
7131 	set_thread(1);
7132 
7133 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7134 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7135 
7136 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7137 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7138 
7139 	/* Disable starts from thread 1. */
7140 	set_thread(1);
7141 
7142 	/* Case 1: ctrlr is already disabled. */
7143 	nvme_ctrlr->disabled = true;
7144 
7145 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7146 	CU_ASSERT(rc == -EALREADY);
7147 
7148 	/* Case 2: ctrlr is already being destructed. */
7149 	nvme_ctrlr->disabled = false;
7150 	nvme_ctrlr->destruct = true;
7151 
7152 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7153 	CU_ASSERT(rc == -ENXIO);
7154 
7155 	/* Case 3: reset is in progress. */
7156 	nvme_ctrlr->destruct = false;
7157 	nvme_ctrlr->resetting = true;
7158 
7159 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7160 	CU_ASSERT(rc == -EBUSY);
7161 
7162 	/* Case 4: disable completes successfully. */
7163 	nvme_ctrlr->resetting = false;
7164 
7165 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7166 	CU_ASSERT(rc == 0);
7167 	CU_ASSERT(nvme_ctrlr->resetting == true);
7168 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7169 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7170 
7171 	poll_thread_times(0, 3);
7172 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7173 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7174 
7175 	poll_thread_times(0, 1);
7176 	poll_thread_times(1, 1);
7177 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7178 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7179 
7180 	poll_thread_times(1, 1);
7181 	poll_thread_times(0, 1);
7182 	CU_ASSERT(ctrlr.adminq.is_connected == false);
7183 	poll_thread_times(1, 1);
7184 	poll_thread_times(0, 1);
7185 	poll_thread_times(1, 1);
7186 	poll_thread_times(0, 1);
7187 	CU_ASSERT(nvme_ctrlr->resetting == false);
7188 	CU_ASSERT(nvme_ctrlr->disabled == true);
7189 
7190 	/* Case 5: enable completes successfully. */
7191 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7192 	CU_ASSERT(rc == 0);
7193 
7194 	CU_ASSERT(nvme_ctrlr->resetting == true);
7195 	CU_ASSERT(nvme_ctrlr->disabled == false);
7196 
7197 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7198 	poll_thread_times(0, 2);
7199 	CU_ASSERT(ctrlr.adminq.is_connected == true);
7200 
7201 	poll_thread_times(0, 1);
7202 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7203 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7204 
7205 	poll_thread_times(1, 1);
7206 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7207 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7208 	CU_ASSERT(nvme_ctrlr->resetting == true);
7209 
7210 	poll_thread_times(0, 2);
7211 	CU_ASSERT(nvme_ctrlr->resetting == true);
7212 	poll_thread_times(1, 1);
7213 	CU_ASSERT(nvme_ctrlr->resetting == true);
7214 	poll_thread_times(0, 1);
7215 	CU_ASSERT(nvme_ctrlr->resetting == false);
7216 
7217 	/* Case 6: ctrlr is already enabled. */
7218 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7219 	CU_ASSERT(rc == -EALREADY);
7220 
7221 	set_thread(0);
7222 
7223 	/* Case 7: disable cancels delayed reconnect. */
7224 	nvme_ctrlr->opts.reconnect_delay_sec = 10;
7225 	ctrlr.fail_reset = true;
7226 
7227 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7228 	CU_ASSERT(rc == 0);
7229 
7230 	poll_threads();
7231 
7232 	CU_ASSERT(nvme_ctrlr->resetting == false);
7233 	CU_ASSERT(ctrlr.is_failed == false);
7234 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7235 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7236 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
7237 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
7238 
7239 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7240 	CU_ASSERT(rc == 0);
7241 
7242 	CU_ASSERT(nvme_ctrlr->resetting == true);
7243 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
7244 
7245 	poll_threads();
7246 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7247 	poll_threads();
7248 
7249 	CU_ASSERT(nvme_ctrlr->resetting == false);
7250 	CU_ASSERT(nvme_ctrlr->disabled == true);
7251 
7252 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7253 	CU_ASSERT(rc == 0);
7254 
7255 	CU_ASSERT(nvme_ctrlr->resetting == true);
7256 	CU_ASSERT(nvme_ctrlr->disabled == false);
7257 
7258 	poll_threads();
7259 
7260 	CU_ASSERT(nvme_ctrlr->resetting == false);
7261 
7262 	set_thread(1);
7263 
7264 	spdk_put_io_channel(ch2);
7265 
7266 	set_thread(0);
7267 
7268 	spdk_put_io_channel(ch1);
7269 
7270 	poll_threads();
7271 
7272 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7273 	CU_ASSERT(rc == 0);
7274 
7275 	poll_threads();
7276 	spdk_delay_us(1000);
7277 	poll_threads();
7278 
7279 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7280 }
7281 
7282 static void
7283 ut_delete_done(void *ctx, int rc)
7284 {
7285 	int *delete_done_rc = ctx;
7286 	*delete_done_rc = rc;
7287 }
7288 
7289 static void
7290 test_delete_ctrlr_done(void)
7291 {
7292 	struct spdk_nvme_transport_id trid = {};
7293 	struct spdk_nvme_ctrlr ctrlr = {};
7294 	int delete_done_rc = 0xDEADBEEF;
7295 	int rc;
7296 
7297 	ut_init_trid(&trid);
7298 
7299 	nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7300 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
7301 
7302 	rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc);
7303 	CU_ASSERT(rc == 0);
7304 
7305 	for (int i = 0; i < 20; i++) {
7306 		poll_threads();
7307 		if (delete_done_rc == 0) {
7308 			break;
7309 		}
7310 		spdk_delay_us(1000);
7311 	}
7312 
7313 	CU_ASSERT(delete_done_rc == 0);
7314 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7315 }
7316 
7317 static void
7318 test_ns_remove_during_reset(void)
7319 {
7320 	struct nvme_path_id path = {};
7321 	struct nvme_ctrlr_opts opts = {};
7322 	struct spdk_nvme_ctrlr *ctrlr;
7323 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
7324 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
7325 	struct nvme_ctrlr *nvme_ctrlr;
7326 	const int STRING_SIZE = 32;
7327 	const char *attached_names[STRING_SIZE];
7328 	struct nvme_bdev *bdev;
7329 	struct nvme_ns *nvme_ns;
7330 	union spdk_nvme_async_event_completion event = {};
7331 	struct spdk_nvme_cpl cpl = {};
7332 	int rc;
7333 
7334 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
7335 	ut_init_trid(&path.trid);
7336 
7337 	set_thread(0);
7338 
7339 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
7340 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
7341 
7342 	g_ut_attach_ctrlr_status = 0;
7343 	g_ut_attach_bdev_count = 1;
7344 
7345 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
7346 			      attach_ctrlr_done, NULL, &dopts, &opts, false);
7347 	CU_ASSERT(rc == 0);
7348 
7349 	spdk_delay_us(1000);
7350 	poll_threads();
7351 
7352 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
7353 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
7354 
7355 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
7356 	CU_ASSERT(nvme_ctrlr != NULL);
7357 
7358 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
7359 	CU_ASSERT(bdev != NULL);
7360 
7361 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
7362 	CU_ASSERT(nvme_ns != NULL);
7363 
7364 	/* If ns is removed during ctrlr reset, nvme_ns and bdev should still exist,
7365 	 * but nvme_ns->ns should be NULL.
7366 	 */
7367 
7368 	CU_ASSERT(ctrlr->ns[0].is_active == true);
7369 	ctrlr->ns[0].is_active = false;
7370 
7371 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7372 	CU_ASSERT(rc == 0);
7373 
7374 	poll_threads();
7375 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7376 	poll_threads();
7377 
7378 	CU_ASSERT(nvme_ctrlr->resetting == false);
7379 	CU_ASSERT(ctrlr->adminq.is_connected == true);
7380 
7381 	CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr));
7382 	CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1));
7383 	CU_ASSERT(nvme_ns->bdev == bdev);
7384 	CU_ASSERT(nvme_ns->ns == NULL);
7385 
7386 	/* Then, async event should fill nvme_ns->ns again. */
7387 
7388 	ctrlr->ns[0].is_active = true;
7389 
7390 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
7391 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
7392 	cpl.cdw0 = event.raw;
7393 
7394 	aer_cb(nvme_ctrlr, &cpl);
7395 
7396 	CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr));
7397 	CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1));
7398 	CU_ASSERT(nvme_ns->bdev == bdev);
7399 	CU_ASSERT(nvme_ns->ns == &ctrlr->ns[0]);
7400 
7401 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7402 	CU_ASSERT(rc == 0);
7403 
7404 	poll_threads();
7405 	spdk_delay_us(1000);
7406 	poll_threads();
7407 
7408 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7409 }
7410 
7411 static void
7412 test_io_path_is_current(void)
7413 {
7414 	struct nvme_bdev_channel nbdev_ch = {
7415 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
7416 	};
7417 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
7418 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
7419 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
7420 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, },
7421 	nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
7422 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}, ctrlr_ch3 = {};
7423 	struct nvme_qpair nvme_qpair1 = { .qpair = &qpair1, .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
7424 	struct nvme_qpair nvme_qpair2 = { .qpair = &qpair2, .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
7425 	struct nvme_qpair nvme_qpair3 = { .qpair = &qpair3, .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, };
7426 	struct nvme_ns nvme_ns1 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns1, };
7427 	struct nvme_ns nvme_ns2 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns2, };
7428 	struct nvme_ns nvme_ns3 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns3, };
7429 	struct nvme_io_path io_path1 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
7430 	struct nvme_io_path io_path2 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
7431 	struct nvme_io_path io_path3 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
7432 
7433 	/* io_path1 is deleting */
7434 	io_path1.nbdev_ch = NULL;
7435 
7436 	CU_ASSERT(nvme_io_path_is_current(&io_path1) == false);
7437 
7438 	io_path1.nbdev_ch = &nbdev_ch;
7439 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
7440 	io_path2.nbdev_ch = &nbdev_ch;
7441 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
7442 	io_path3.nbdev_ch = &nbdev_ch;
7443 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
7444 
7445 	/* active/active: io_path is current if it is available and ANA optimized. */
7446 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7447 
7448 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7449 
7450 	/* active/active: io_path is not current if it is disconnected even if it is
7451 	 * ANA optimized.
7452 	 */
7453 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7454 
7455 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7456 
7457 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
7458 
7459 	/* active/passive: io_path is current if it is available and cached.
7460 	 * (only ANA optimized path is cached for active/passive.)
7461 	 */
7462 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7463 	nbdev_ch.current_io_path = &io_path2;
7464 
7465 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7466 
7467 	/* active:passive: io_path is not current if it is disconnected even if it is cached */
7468 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7469 
7470 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7471 
7472 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
7473 
7474 	/* active/active and active/passive: io_path is not current if it is ANA inaccessible. */
7475 	nvme_ns2.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
7476 
7477 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7478 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7479 
7480 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7481 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7482 
7483 	/* active/active: non-optimized path is current only if there is no optimized path. */
7484 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7485 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7486 
7487 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7488 
7489 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7490 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7491 
7492 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7493 
7494 	/* active/passive: current is true if it is the first one when there is no optimized path. */
7495 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7496 	nbdev_ch.current_io_path = NULL;
7497 
7498 	CU_ASSERT(nvme_io_path_is_current(&io_path1) == true);
7499 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7500 	CU_ASSERT(nvme_io_path_is_current(&io_path3) == false);
7501 }
7502 
7503 int
7504 main(int argc, char **argv)
7505 {
7506 	CU_pSuite	suite = NULL;
7507 	unsigned int	num_failures;
7508 
7509 	CU_initialize_registry();
7510 
7511 	suite = CU_add_suite("nvme", NULL, NULL);
7512 
7513 	CU_ADD_TEST(suite, test_create_ctrlr);
7514 	CU_ADD_TEST(suite, test_reset_ctrlr);
7515 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
7516 	CU_ADD_TEST(suite, test_failover_ctrlr);
7517 	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
7518 	CU_ADD_TEST(suite, test_pending_reset);
7519 	CU_ADD_TEST(suite, test_attach_ctrlr);
7520 	CU_ADD_TEST(suite, test_aer_cb);
7521 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
7522 	CU_ADD_TEST(suite, test_add_remove_trid);
7523 	CU_ADD_TEST(suite, test_abort);
7524 	CU_ADD_TEST(suite, test_get_io_qpair);
7525 	CU_ADD_TEST(suite, test_bdev_unregister);
7526 	CU_ADD_TEST(suite, test_compare_ns);
7527 	CU_ADD_TEST(suite, test_init_ana_log_page);
7528 	CU_ADD_TEST(suite, test_get_memory_domains);
7529 	CU_ADD_TEST(suite, test_reconnect_qpair);
7530 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
7531 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
7532 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
7533 	CU_ADD_TEST(suite, test_admin_path);
7534 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
7535 	CU_ADD_TEST(suite, test_find_io_path);
7536 	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
7537 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
7538 	CU_ADD_TEST(suite, test_retry_io_count);
7539 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
7540 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
7541 	CU_ADD_TEST(suite, test_check_io_error_resiliency_params);
7542 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
7543 	CU_ADD_TEST(suite, test_reconnect_ctrlr);
7544 	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
7545 	CU_ADD_TEST(suite, test_fail_path);
7546 	CU_ADD_TEST(suite, test_nvme_ns_cmp);
7547 	CU_ADD_TEST(suite, test_ana_transition);
7548 	CU_ADD_TEST(suite, test_set_preferred_path);
7549 	CU_ADD_TEST(suite, test_find_next_io_path);
7550 	CU_ADD_TEST(suite, test_find_io_path_min_qd);
7551 	CU_ADD_TEST(suite, test_disable_auto_failback);
7552 	CU_ADD_TEST(suite, test_set_multipath_policy);
7553 	CU_ADD_TEST(suite, test_uuid_generation);
7554 	CU_ADD_TEST(suite, test_retry_io_to_same_path);
7555 	CU_ADD_TEST(suite, test_race_between_reset_and_disconnected);
7556 	CU_ADD_TEST(suite, test_ctrlr_op_rpc);
7557 	CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc);
7558 	CU_ADD_TEST(suite, test_disable_enable_ctrlr);
7559 	CU_ADD_TEST(suite, test_delete_ctrlr_done);
7560 	CU_ADD_TEST(suite, test_ns_remove_during_reset);
7561 	CU_ADD_TEST(suite, test_io_path_is_current);
7562 
7563 	allocate_threads(3);
7564 	set_thread(0);
7565 	bdev_nvme_library_init();
7566 	init_accel();
7567 
7568 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
7569 
7570 	set_thread(0);
7571 	bdev_nvme_library_fini();
7572 	fini_accel();
7573 	free_threads();
7574 
7575 	CU_cleanup_registry();
7576 
7577 	return num_failures;
7578 }
7579