xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision 318515b44ec8b67f83bcc9ca83f0c7d5ea919e62)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/thread.h"
10 #include "spdk/bdev_module.h"
11 
12 #include "common/lib/ut_multithread.c"
13 
14 #include "bdev/nvme/bdev_nvme.c"
15 
16 #include "unit/lib/json_mock.c"
17 
18 #include "bdev/nvme/bdev_mdns_client.c"
19 
20 static void *g_accel_p = (void *)0xdeadbeaf;
21 
22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
23 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
24 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
25 	     spdk_nvme_remove_cb remove_cb), NULL);
26 
27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
28 		enum spdk_nvme_transport_type trtype));
29 
30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
31 	    NULL);
32 
33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
34 
35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
36 		struct spdk_nvme_transport_id *trid), 0);
37 
38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
39 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
40 
41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0);
43 DEFINE_STUB(spdk_nvme_ctrlr_get_numa_id, int32_t, (struct spdk_nvme_ctrlr *ctrlr),
44 	    SPDK_ENV_NUMA_ID_ANY);
45 
46 DEFINE_STUB(spdk_nvme_qpair_get_id, uint16_t, (struct spdk_nvme_qpair *qpair), 0);
47 
48 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
49 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
50 
51 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
52 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
53 
54 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int);
55 
56 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request,
57 		int error_code, const char *msg));
58 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *,
59 	    (struct spdk_jsonrpc_request *request), NULL);
60 DEFINE_STUB_V(spdk_jsonrpc_end_result,
61 	      (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w));
62 
63 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts,
64 		size_t opts_size));
65 
66 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts,
67 		size_t opts_size), 0);
68 DEFINE_STUB(spdk_nvme_dhchap_get_digest_name, const char *, (int id), NULL);
69 DEFINE_STUB(spdk_nvme_dhchap_get_dhgroup_name, const char *, (int id), NULL);
70 
71 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
72 
73 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat,
74 					enum spdk_bdev_reset_stat_mode mode));
75 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total,
76 				      struct spdk_bdev_io_stat *add));
77 
78 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr));
79 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL);
80 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k));
81 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL);
82 DEFINE_STUB(spdk_nvme_scan_attached, int, (const struct spdk_nvme_transport_id *trid), 0);
83 
84 DEFINE_STUB(spdk_nvme_poll_group_get_fd, int, (struct spdk_nvme_poll_group *group), 0);
85 DEFINE_STUB(spdk_nvme_poll_group_wait, int, (struct spdk_nvme_poll_group *group,
86 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
87 DEFINE_STUB(spdk_nvme_ctrlr_get_admin_qp_fd, int, (struct spdk_nvme_ctrlr *ctrlr,
88 		struct spdk_event_handler_opts *opts), 0);
89 
90 int
91 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
92 				   struct spdk_memory_domain **domains, int array_size)
93 {
94 	int i, min_array_size;
95 
96 	if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) {
97 		min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size);
98 		for (i = 0; i < min_array_size; i++) {
99 			domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5;
100 		}
101 	}
102 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains);
103 
104 	return 0;
105 }
106 
107 struct spdk_io_channel *
108 spdk_accel_get_io_channel(void)
109 {
110 	return spdk_get_io_channel(g_accel_p);
111 }
112 
113 void
114 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
115 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
116 {
117 	/* Avoid warning that opts is used uninitialised */
118 	memset(opts, 0, opts_size);
119 }
120 
121 #define UT_HOSTNQN "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"
122 
123 static const struct spdk_nvme_ctrlr_opts g_ut_ctrlr_opts = {.hostnqn = UT_HOSTNQN};
124 
125 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *,
126 	    (struct spdk_nvme_ctrlr *ctrlr), &g_ut_ctrlr_opts);
127 
128 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
129 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
130 
131 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
132 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
133 
134 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
135 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
136 
137 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
138 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
139 
140 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true);
141 
142 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
143 
144 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
145 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
146 
147 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
148 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
149 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
150 
151 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
152 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
153 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
154 
155 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, (
156 		    struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
157 		    struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf,
158 		    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
159 		    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
160 		    spdk_nvme_req_next_sge_cb next_sge_fn), 0);
161 
162 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name,
163 		size_t *size), 0);
164 
165 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
166 
167 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
168 
169 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
170 
171 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
172 
173 DEFINE_STUB(spdk_nvme_ns_get_pi_format, enum spdk_nvme_pi_format, (struct spdk_nvme_ns *ns),
174 	    SPDK_NVME_16B_GUARD_PI);
175 
176 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
177 
178 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
179 
180 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
181 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
182 
183 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
184 
185 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
186 		char *name, size_t *size), 0);
187 
188 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
189 	    (struct spdk_nvme_ns *ns), 0);
190 
191 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
192 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
193 
194 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
195 	    (struct spdk_nvme_ns *ns), 0);
196 
197 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
198 	    (struct spdk_nvme_ns *ns), 0);
199 
200 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
201 	    (struct spdk_nvme_ns *ns), 0);
202 
203 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
204 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
205 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
206 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
207 
208 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
209 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
210 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
211 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
212 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
213 
214 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
215 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
216 	     void *payload, uint32_t payload_size, uint64_t slba,
217 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
218 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
219 
220 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
221 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
222 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
223 
224 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
225 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
226 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
227 
228 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
229 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
230 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
231 
232 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
233 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
234 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
235 
236 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
237 
238 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
239 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
240 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
241 
242 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *,
243 	    (const struct spdk_nvme_status *status), NULL);
244 
245 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *,
246 	    (const struct spdk_nvme_status *status), NULL);
247 
248 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
249 
250 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
251 
252 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
253 
254 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
255 
256 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
257 
258 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
259 		struct iovec *iov,
260 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
261 DEFINE_STUB(spdk_accel_append_crc32c, int,
262 	    (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst,
263 	     struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx,
264 	     uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
265 DEFINE_STUB(spdk_accel_append_copy, int,
266 	    (struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
267 	     struct iovec *dst_iovs, uint32_t dst_iovcnt,
268 	     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
269 	     struct iovec *src_iovs, uint32_t src_iovcnt,
270 	     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
271 	     spdk_accel_step_cb cb_fn, void *cb_arg), 0);
272 DEFINE_STUB_V(spdk_accel_sequence_finish,
273 	      (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg));
274 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
275 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq));
276 DEFINE_STUB(spdk_nvme_qpair_authenticate, int,
277 	    (struct spdk_nvme_qpair *qpair, spdk_nvme_authenticate_cb cb_fn, void *cb_ctx), 0);
278 DEFINE_STUB(spdk_nvme_ctrlr_authenticate, int,
279 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_authenticate_cb cb_fn, void *cb_ctx), 0);
280 DEFINE_STUB(spdk_nvme_ctrlr_set_keys, int,
281 	    (struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ctrlr_key_opts *opts), 0);
282 
283 struct ut_nvme_req {
284 	uint16_t			opc;
285 	spdk_nvme_cmd_cb		cb_fn;
286 	void				*cb_arg;
287 	struct spdk_nvme_cpl		cpl;
288 	TAILQ_ENTRY(ut_nvme_req)	tailq;
289 };
290 
291 struct spdk_nvme_ns {
292 	struct spdk_nvme_ctrlr		*ctrlr;
293 	uint32_t			id;
294 	bool				is_active;
295 	struct spdk_uuid		*uuid;
296 	enum spdk_nvme_ana_state	ana_state;
297 	enum spdk_nvme_csi		csi;
298 };
299 
300 struct spdk_nvme_qpair {
301 	struct spdk_nvme_ctrlr		*ctrlr;
302 	uint8_t				failure_reason;
303 	bool				is_connected;
304 	bool				in_completion_context;
305 	bool				delete_after_completion_context;
306 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
307 	uint32_t			num_outstanding_reqs;
308 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
309 	struct spdk_nvme_poll_group	*poll_group;
310 	void				*poll_group_tailq_head;
311 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
312 };
313 
314 struct spdk_nvme_ctrlr {
315 	uint32_t			num_ns;
316 	struct spdk_nvme_ns		*ns;
317 	struct spdk_nvme_ns_data	*nsdata;
318 	struct spdk_nvme_qpair		adminq;
319 	struct spdk_nvme_ctrlr_data	cdata;
320 	bool				attached;
321 	bool				is_failed;
322 	bool				fail_reset;
323 	bool				is_removed;
324 	struct spdk_nvme_transport_id	trid;
325 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
326 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
327 	struct spdk_nvme_ctrlr_opts	opts;
328 };
329 
330 struct spdk_nvme_poll_group {
331 	void				*ctx;
332 	struct spdk_nvme_accel_fn_table	accel_fn_table;
333 	TAILQ_HEAD(, spdk_nvme_qpair)	connected_qpairs;
334 	TAILQ_HEAD(, spdk_nvme_qpair)	disconnected_qpairs;
335 };
336 
337 struct spdk_nvme_probe_ctx {
338 	struct spdk_nvme_transport_id	trid;
339 	void				*cb_ctx;
340 	spdk_nvme_attach_cb		attach_cb;
341 	struct spdk_nvme_ctrlr		*init_ctrlr;
342 };
343 
344 uint32_t
345 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
346 {
347 	uint32_t nsid;
348 
349 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
350 		if (ctrlr->ns[nsid - 1].is_active) {
351 			return nsid;
352 		}
353 	}
354 
355 	return 0;
356 }
357 
358 uint32_t
359 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
360 {
361 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
362 		if (ctrlr->ns[nsid - 1].is_active) {
363 			return nsid;
364 		}
365 	}
366 
367 	return 0;
368 }
369 
370 uint32_t
371 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair)
372 {
373 	return qpair->num_outstanding_reqs;
374 }
375 
376 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
377 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
378 			g_ut_attached_ctrlrs);
379 static int g_ut_attach_ctrlr_status;
380 static size_t g_ut_attach_bdev_count;
381 static int g_ut_register_bdev_status;
382 static struct spdk_bdev *g_ut_registered_bdev;
383 static uint16_t g_ut_cntlid;
384 static struct nvme_path_id g_any_path = {};
385 
386 static void
387 ut_init_trid(struct spdk_nvme_transport_id *trid)
388 {
389 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
390 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
391 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
392 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
393 }
394 
395 static void
396 ut_init_trid2(struct spdk_nvme_transport_id *trid)
397 {
398 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
399 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
400 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
401 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
402 }
403 
404 static void
405 ut_init_trid3(struct spdk_nvme_transport_id *trid)
406 {
407 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
408 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
409 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
410 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
411 }
412 
413 static int
414 cmp_int(int a, int b)
415 {
416 	return a - b;
417 }
418 
419 int
420 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
421 			       const struct spdk_nvme_transport_id *trid2)
422 {
423 	int cmp;
424 
425 	/* We assume trtype is TCP for now. */
426 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
427 
428 	cmp = cmp_int(trid1->trtype, trid2->trtype);
429 	if (cmp) {
430 		return cmp;
431 	}
432 
433 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
434 	if (cmp) {
435 		return cmp;
436 	}
437 
438 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
439 	if (cmp) {
440 		return cmp;
441 	}
442 
443 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
444 	if (cmp) {
445 		return cmp;
446 	}
447 
448 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
449 	if (cmp) {
450 		return cmp;
451 	}
452 
453 	return 0;
454 }
455 
456 static struct spdk_nvme_ctrlr *
457 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
458 		bool ana_reporting, bool multipath)
459 {
460 	struct spdk_nvme_ctrlr *ctrlr;
461 	uint32_t i;
462 
463 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
464 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
465 			/* There is a ctrlr whose trid matches. */
466 			return NULL;
467 		}
468 	}
469 
470 	ctrlr = calloc(1, sizeof(*ctrlr));
471 	if (ctrlr == NULL) {
472 		return NULL;
473 	}
474 
475 	ctrlr->attached = true;
476 	ctrlr->adminq.ctrlr = ctrlr;
477 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
478 	ctrlr->adminq.is_connected = true;
479 
480 	if (num_ns != 0) {
481 		ctrlr->num_ns = num_ns;
482 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
483 		if (ctrlr->ns == NULL) {
484 			free(ctrlr);
485 			return NULL;
486 		}
487 
488 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
489 		if (ctrlr->nsdata == NULL) {
490 			free(ctrlr->ns);
491 			free(ctrlr);
492 			return NULL;
493 		}
494 
495 		for (i = 0; i < num_ns; i++) {
496 			ctrlr->ns[i].id = i + 1;
497 			ctrlr->ns[i].ctrlr = ctrlr;
498 			ctrlr->ns[i].is_active = true;
499 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
500 			ctrlr->nsdata[i].nsze = 1024;
501 			ctrlr->nsdata[i].nmic.can_share = multipath;
502 		}
503 
504 		ctrlr->cdata.nn = num_ns;
505 		ctrlr->cdata.mnan = num_ns;
506 		ctrlr->cdata.nanagrpid = num_ns;
507 	}
508 
509 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
510 	ctrlr->cdata.cmic.multi_ctrlr = multipath;
511 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
512 	ctrlr->trid = *trid;
513 	TAILQ_INIT(&ctrlr->active_io_qpairs);
514 
515 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
516 
517 	return ctrlr;
518 }
519 
520 static void
521 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
522 {
523 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
524 
525 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
526 	free(ctrlr->nsdata);
527 	free(ctrlr->ns);
528 	free(ctrlr);
529 }
530 
531 static int
532 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
533 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
534 {
535 	struct ut_nvme_req *req;
536 
537 	req = calloc(1, sizeof(*req));
538 	if (req == NULL) {
539 		return -ENOMEM;
540 	}
541 
542 	req->opc = opc;
543 	req->cb_fn = cb_fn;
544 	req->cb_arg = cb_arg;
545 
546 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
547 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
548 
549 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
550 	qpair->num_outstanding_reqs++;
551 
552 	return 0;
553 }
554 
555 static struct ut_nvme_req *
556 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
557 {
558 	struct ut_nvme_req *req;
559 
560 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
561 		if (req->cb_arg == cb_arg) {
562 			break;
563 		}
564 	}
565 
566 	return req;
567 }
568 
569 static struct spdk_bdev_io *
570 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
571 		 struct spdk_io_channel *ch)
572 {
573 	struct spdk_bdev_io *bdev_io;
574 
575 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
576 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
577 	bdev_io->type = type;
578 	bdev_io->bdev = &nbdev->disk;
579 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
580 
581 	return bdev_io;
582 }
583 
584 static void
585 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
586 {
587 	bdev_io->u.bdev.iovs = &bdev_io->iov;
588 	bdev_io->u.bdev.iovcnt = 1;
589 
590 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
591 	bdev_io->iov.iov_len = 4096;
592 }
593 
594 static void
595 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
596 {
597 	if (ctrlr->is_failed) {
598 		free(ctrlr);
599 		return;
600 	}
601 
602 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
603 	if (probe_ctx->cb_ctx) {
604 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
605 	}
606 
607 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
608 
609 	if (probe_ctx->attach_cb) {
610 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
611 	}
612 }
613 
614 int
615 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
616 {
617 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
618 
619 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
620 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
621 			continue;
622 		}
623 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
624 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
625 	}
626 
627 	free(probe_ctx);
628 
629 	return 0;
630 }
631 
632 struct spdk_nvme_probe_ctx *
633 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
634 			const struct spdk_nvme_ctrlr_opts *opts,
635 			spdk_nvme_attach_cb attach_cb)
636 {
637 	struct spdk_nvme_probe_ctx *probe_ctx;
638 
639 	if (trid == NULL) {
640 		return NULL;
641 	}
642 
643 	probe_ctx = calloc(1, sizeof(*probe_ctx));
644 	if (probe_ctx == NULL) {
645 		return NULL;
646 	}
647 
648 	probe_ctx->trid = *trid;
649 	probe_ctx->cb_ctx = (void *)opts;
650 	probe_ctx->attach_cb = attach_cb;
651 
652 	return probe_ctx;
653 }
654 
655 int
656 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
657 {
658 	if (ctrlr->attached) {
659 		ut_detach_ctrlr(ctrlr);
660 	}
661 
662 	return 0;
663 }
664 
665 int
666 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
667 {
668 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
669 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
670 
671 	return 0;
672 }
673 
674 int
675 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
676 {
677 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
678 }
679 
680 void
681 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
682 {
683 	memset(opts, 0, opts_size);
684 
685 	snprintf(opts->hostnqn, sizeof(opts->hostnqn), UT_HOSTNQN);
686 }
687 
688 const struct spdk_nvme_ctrlr_data *
689 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
690 {
691 	return &ctrlr->cdata;
692 }
693 
694 uint16_t
695 spdk_nvme_ctrlr_get_id(struct spdk_nvme_ctrlr *ctrlr)
696 {
697 	return ctrlr->cdata.cntlid;
698 }
699 
700 uint32_t
701 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
702 {
703 	return ctrlr->num_ns;
704 }
705 
706 struct spdk_nvme_ns *
707 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
708 {
709 	if (nsid < 1 || nsid > ctrlr->num_ns) {
710 		return NULL;
711 	}
712 
713 	return &ctrlr->ns[nsid - 1];
714 }
715 
716 bool
717 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
718 {
719 	if (nsid < 1 || nsid > ctrlr->num_ns) {
720 		return false;
721 	}
722 
723 	return ctrlr->ns[nsid - 1].is_active;
724 }
725 
726 union spdk_nvme_csts_register
727 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
728 {
729 	union spdk_nvme_csts_register csts;
730 
731 	csts.raw = 0;
732 
733 	return csts;
734 }
735 
736 union spdk_nvme_vs_register
737 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
738 {
739 	union spdk_nvme_vs_register vs;
740 
741 	vs.raw = 0;
742 
743 	return vs;
744 }
745 
746 struct spdk_nvme_qpair *
747 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
748 			       const struct spdk_nvme_io_qpair_opts *user_opts,
749 			       size_t opts_size)
750 {
751 	struct spdk_nvme_qpair *qpair;
752 
753 	qpair = calloc(1, sizeof(*qpair));
754 	if (qpair == NULL) {
755 		return NULL;
756 	}
757 
758 	qpair->ctrlr = ctrlr;
759 	TAILQ_INIT(&qpair->outstanding_reqs);
760 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
761 
762 	return qpair;
763 }
764 
765 static void
766 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
767 {
768 	struct spdk_nvme_poll_group *group = qpair->poll_group;
769 
770 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
771 
772 	qpair->poll_group_tailq_head = &group->connected_qpairs;
773 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
774 	TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq);
775 }
776 
777 static void
778 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
779 {
780 	struct spdk_nvme_poll_group *group = qpair->poll_group;
781 
782 	CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs);
783 
784 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
785 	TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq);
786 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
787 }
788 
789 int
790 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
791 				 struct spdk_nvme_qpair *qpair)
792 {
793 	if (qpair->is_connected) {
794 		return -EISCONN;
795 	}
796 
797 	qpair->is_connected = true;
798 	qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
799 
800 	if (qpair->poll_group) {
801 		nvme_poll_group_connect_qpair(qpair);
802 	}
803 
804 	return 0;
805 }
806 
807 void
808 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
809 {
810 	if (!qpair->is_connected) {
811 		return;
812 	}
813 
814 	qpair->is_connected = false;
815 
816 	if (qpair->poll_group != NULL) {
817 		nvme_poll_group_disconnect_qpair(qpair);
818 	}
819 }
820 
821 int
822 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
823 {
824 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
825 
826 	if (qpair->in_completion_context) {
827 		qpair->delete_after_completion_context = true;
828 		return 0;
829 	}
830 
831 	spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
832 
833 	if (qpair->poll_group != NULL) {
834 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
835 	}
836 
837 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
838 
839 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
840 
841 	free(qpair);
842 
843 	return 0;
844 }
845 
846 int
847 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
848 {
849 	if (ctrlr->fail_reset) {
850 		ctrlr->is_failed = true;
851 		return -EIO;
852 	}
853 
854 	ctrlr->adminq.is_connected = true;
855 	return 0;
856 }
857 
858 void
859 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
860 {
861 }
862 
863 int
864 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
865 {
866 	if (ctrlr->is_removed) {
867 		return -ENXIO;
868 	}
869 
870 	ctrlr->adminq.is_connected = false;
871 	ctrlr->is_failed = false;
872 
873 	return 0;
874 }
875 
876 void
877 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
878 {
879 	ctrlr->is_failed = true;
880 }
881 
882 bool
883 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
884 {
885 	return ctrlr->is_failed;
886 }
887 
888 spdk_nvme_qp_failure_reason
889 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
890 {
891 	return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq);
892 }
893 
894 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
895 				 sizeof(uint32_t))
896 static void
897 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
898 {
899 	struct spdk_nvme_ana_page ana_hdr;
900 	char _ana_desc[UT_ANA_DESC_SIZE];
901 	struct spdk_nvme_ana_group_descriptor *ana_desc;
902 	struct spdk_nvme_ns *ns;
903 	uint32_t i;
904 
905 	memset(&ana_hdr, 0, sizeof(ana_hdr));
906 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
907 
908 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
909 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
910 
911 	buf += sizeof(ana_hdr);
912 	length -= sizeof(ana_hdr);
913 
914 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
915 
916 	for (i = 0; i < ctrlr->num_ns; i++) {
917 		ns = &ctrlr->ns[i];
918 
919 		if (!ns->is_active) {
920 			continue;
921 		}
922 
923 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
924 
925 		ana_desc->ana_group_id = ns->id;
926 		ana_desc->num_of_nsid = 1;
927 		ana_desc->ana_state = ns->ana_state;
928 		ana_desc->nsid[0] = ns->id;
929 
930 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
931 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
932 
933 		buf += UT_ANA_DESC_SIZE;
934 		length -= UT_ANA_DESC_SIZE;
935 	}
936 }
937 
938 int
939 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
940 				 uint8_t log_page, uint32_t nsid,
941 				 void *payload, uint32_t payload_size,
942 				 uint64_t offset,
943 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
944 {
945 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
946 		SPDK_CU_ASSERT_FATAL(offset == 0);
947 		ut_create_ana_log_page(ctrlr, payload, payload_size);
948 	}
949 
950 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
951 				      cb_fn, cb_arg);
952 }
953 
954 int
955 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
956 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
957 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
958 {
959 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
960 }
961 
962 int
963 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
964 			      void *cmd_cb_arg,
965 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
966 {
967 	struct ut_nvme_req *req = NULL, *abort_req;
968 
969 	if (qpair == NULL) {
970 		qpair = &ctrlr->adminq;
971 	}
972 
973 	abort_req = calloc(1, sizeof(*abort_req));
974 	if (abort_req == NULL) {
975 		return -ENOMEM;
976 	}
977 
978 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
979 		if (req->cb_arg == cmd_cb_arg) {
980 			break;
981 		}
982 	}
983 
984 	if (req == NULL) {
985 		free(abort_req);
986 		return -ENOENT;
987 	}
988 
989 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
990 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
991 
992 	abort_req->opc = SPDK_NVME_OPC_ABORT;
993 	abort_req->cb_fn = cb_fn;
994 	abort_req->cb_arg = cb_arg;
995 
996 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
997 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
998 	abort_req->cpl.cdw0 = 0;
999 
1000 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
1001 	ctrlr->adminq.num_outstanding_reqs++;
1002 
1003 	return 0;
1004 }
1005 
1006 int32_t
1007 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
1008 {
1009 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
1010 }
1011 
1012 uint32_t
1013 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
1014 {
1015 	return ns->id;
1016 }
1017 
1018 struct spdk_nvme_ctrlr *
1019 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
1020 {
1021 	return ns->ctrlr;
1022 }
1023 
1024 static inline struct spdk_nvme_ns_data *
1025 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
1026 {
1027 	return &ns->ctrlr->nsdata[ns->id - 1];
1028 }
1029 
1030 const struct spdk_nvme_ns_data *
1031 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
1032 {
1033 	return _nvme_ns_get_data(ns);
1034 }
1035 
1036 uint64_t
1037 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
1038 {
1039 	return _nvme_ns_get_data(ns)->nsze;
1040 }
1041 
1042 const struct spdk_uuid *
1043 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
1044 {
1045 	return ns->uuid;
1046 }
1047 
1048 enum spdk_nvme_csi
1049 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
1050 	return ns->csi;
1051 }
1052 
1053 int
1054 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1055 			      void *metadata, uint64_t lba, uint32_t lba_count,
1056 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1057 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1058 {
1059 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1060 }
1061 
1062 int
1063 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1064 			       void *buffer, void *metadata, uint64_t lba,
1065 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1066 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1067 {
1068 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1069 }
1070 
1071 int
1072 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1073 			       uint64_t lba, uint32_t lba_count,
1074 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1075 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1076 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1077 			       uint16_t apptag_mask, uint16_t apptag)
1078 {
1079 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1080 }
1081 
1082 int
1083 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1084 				uint64_t lba, uint32_t lba_count,
1085 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1086 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1087 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1088 				uint16_t apptag_mask, uint16_t apptag)
1089 {
1090 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1091 }
1092 
1093 static bool g_ut_readv_ext_called;
1094 int
1095 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1096 			   uint64_t lba, uint32_t lba_count,
1097 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1098 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1099 			   spdk_nvme_req_next_sge_cb next_sge_fn,
1100 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1101 {
1102 	g_ut_readv_ext_called = true;
1103 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1104 }
1105 
1106 static bool g_ut_read_ext_called;
1107 int
1108 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1109 			  uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1110 			  struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1111 {
1112 	g_ut_read_ext_called = true;
1113 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1114 }
1115 
1116 static bool g_ut_writev_ext_called;
1117 int
1118 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1119 			    uint64_t lba, uint32_t lba_count,
1120 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1121 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1122 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1123 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1124 {
1125 	g_ut_writev_ext_called = true;
1126 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1127 }
1128 
1129 static bool g_ut_write_ext_called;
1130 int
1131 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1132 			   uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1133 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1134 {
1135 	g_ut_write_ext_called = true;
1136 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1137 }
1138 
1139 int
1140 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1141 				  uint64_t lba, uint32_t lba_count,
1142 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1143 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1144 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1145 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1146 {
1147 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1148 }
1149 
1150 int
1151 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1152 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1153 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1154 {
1155 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1156 }
1157 
1158 int
1159 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1160 			      uint64_t lba, uint32_t lba_count,
1161 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1162 			      uint32_t io_flags)
1163 {
1164 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1165 }
1166 
1167 int
1168 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1169 		      const struct spdk_nvme_scc_source_range *ranges,
1170 		      uint16_t num_ranges, uint64_t dest_lba,
1171 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1172 {
1173 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg);
1174 }
1175 
1176 struct spdk_nvme_poll_group *
1177 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1178 {
1179 	struct spdk_nvme_poll_group *group;
1180 
1181 	group = calloc(1, sizeof(*group));
1182 	if (group == NULL) {
1183 		return NULL;
1184 	}
1185 
1186 	group->ctx = ctx;
1187 	if (table != NULL) {
1188 		group->accel_fn_table = *table;
1189 	}
1190 	TAILQ_INIT(&group->connected_qpairs);
1191 	TAILQ_INIT(&group->disconnected_qpairs);
1192 
1193 	return group;
1194 }
1195 
1196 int
1197 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1198 {
1199 	if (!TAILQ_EMPTY(&group->connected_qpairs) ||
1200 	    !TAILQ_EMPTY(&group->disconnected_qpairs)) {
1201 		return -EBUSY;
1202 	}
1203 
1204 	free(group);
1205 
1206 	return 0;
1207 }
1208 
1209 spdk_nvme_qp_failure_reason
1210 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair)
1211 {
1212 	return qpair->failure_reason;
1213 }
1214 
1215 bool
1216 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair)
1217 {
1218 	return qpair->is_connected;
1219 }
1220 
1221 int32_t
1222 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1223 				    uint32_t max_completions)
1224 {
1225 	struct ut_nvme_req *req, *tmp;
1226 	uint32_t num_completions = 0;
1227 
1228 	if (!qpair->is_connected) {
1229 		return -ENXIO;
1230 	}
1231 
1232 	qpair->in_completion_context = true;
1233 
1234 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1235 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1236 		qpair->num_outstanding_reqs--;
1237 
1238 		req->cb_fn(req->cb_arg, &req->cpl);
1239 
1240 		free(req);
1241 		num_completions++;
1242 	}
1243 
1244 	qpair->in_completion_context = false;
1245 	if (qpair->delete_after_completion_context) {
1246 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1247 	}
1248 
1249 	return num_completions;
1250 }
1251 
1252 int64_t
1253 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1254 		uint32_t completions_per_qpair,
1255 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1256 {
1257 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1258 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1259 
1260 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1261 
1262 	if (disconnected_qpair_cb == NULL) {
1263 		return -EINVAL;
1264 	}
1265 
1266 	TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1267 		disconnected_qpair_cb(qpair, group->ctx);
1268 	}
1269 
1270 	TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1271 		if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
1272 			spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
1273 			/* Bump the number of completions so this counts as "busy" */
1274 			num_completions++;
1275 			continue;
1276 		}
1277 
1278 		local_completions = spdk_nvme_qpair_process_completions(qpair,
1279 				    completions_per_qpair);
1280 		if (local_completions < 0 && error_reason == 0) {
1281 			error_reason = local_completions;
1282 		} else {
1283 			num_completions += local_completions;
1284 			assert(num_completions >= 0);
1285 		}
1286 	}
1287 
1288 	return error_reason ? error_reason : num_completions;
1289 }
1290 
1291 int
1292 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1293 			 struct spdk_nvme_qpair *qpair)
1294 {
1295 	CU_ASSERT(!qpair->is_connected);
1296 
1297 	qpair->poll_group = group;
1298 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
1299 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
1300 
1301 	return 0;
1302 }
1303 
1304 int
1305 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1306 			    struct spdk_nvme_qpair *qpair)
1307 {
1308 	CU_ASSERT(!qpair->is_connected);
1309 
1310 	if (qpair->poll_group == NULL) {
1311 		return -ENOENT;
1312 	}
1313 
1314 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
1315 
1316 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
1317 
1318 	qpair->poll_group = NULL;
1319 	qpair->poll_group_tailq_head = NULL;
1320 
1321 	return 0;
1322 }
1323 
1324 int
1325 spdk_bdev_register(struct spdk_bdev *bdev)
1326 {
1327 	g_ut_registered_bdev = bdev;
1328 
1329 	return g_ut_register_bdev_status;
1330 }
1331 
1332 void
1333 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1334 {
1335 	int rc;
1336 
1337 	rc = bdev->fn_table->destruct(bdev->ctxt);
1338 
1339 	if (bdev == g_ut_registered_bdev) {
1340 		g_ut_registered_bdev = NULL;
1341 	}
1342 
1343 	if (rc <= 0 && cb_fn != NULL) {
1344 		cb_fn(cb_arg, rc);
1345 	}
1346 }
1347 
1348 int
1349 spdk_bdev_open_ext(const char *bdev_name, bool write,
1350 		   spdk_bdev_event_cb_t event_cb, void *event_ctx,
1351 		   struct spdk_bdev_desc **desc)
1352 {
1353 	if (g_ut_registered_bdev == NULL ||
1354 	    strcmp(g_ut_registered_bdev->name, bdev_name) != 0) {
1355 		return -ENODEV;
1356 	}
1357 
1358 	*desc = (struct spdk_bdev_desc *)g_ut_registered_bdev;
1359 
1360 	return 0;
1361 }
1362 
1363 struct spdk_bdev *
1364 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
1365 {
1366 	return (struct spdk_bdev *)desc;
1367 }
1368 
1369 int
1370 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1371 {
1372 	bdev->blockcnt = size;
1373 
1374 	return 0;
1375 }
1376 
1377 struct spdk_io_channel *
1378 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1379 {
1380 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1381 }
1382 
1383 struct spdk_thread *
1384 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
1385 {
1386 	return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io));
1387 }
1388 
1389 void
1390 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1391 {
1392 	bdev_io->internal.status = status;
1393 	bdev_io->internal.f.in_submit_request = false;
1394 }
1395 
1396 void
1397 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1398 {
1399 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1400 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1401 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1402 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1403 	} else {
1404 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1405 	}
1406 
1407 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1408 	bdev_io->internal.error.nvme.sct = sct;
1409 	bdev_io->internal.error.nvme.sc = sc;
1410 
1411 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1412 }
1413 
1414 void
1415 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1416 {
1417 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1418 
1419 	ut_bdev_io_set_buf(bdev_io);
1420 
1421 	cb(ch, bdev_io, true);
1422 }
1423 
1424 static void
1425 test_create_ctrlr(void)
1426 {
1427 	struct spdk_nvme_transport_id trid = {};
1428 	struct spdk_nvme_ctrlr ctrlr = {};
1429 	int rc;
1430 
1431 	ut_init_trid(&trid);
1432 
1433 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1434 	CU_ASSERT(rc == 0);
1435 
1436 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1437 
1438 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1439 	CU_ASSERT(rc == 0);
1440 
1441 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1442 
1443 	poll_threads();
1444 	spdk_delay_us(1000);
1445 	poll_threads();
1446 
1447 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1448 }
1449 
1450 static void
1451 ut_check_hotplug_on_reset(void *cb_arg, int rc)
1452 {
1453 	bool *detect_remove = cb_arg;
1454 
1455 	CU_ASSERT(rc != 0);
1456 	SPDK_CU_ASSERT_FATAL(detect_remove != NULL);
1457 
1458 	*detect_remove = true;
1459 }
1460 
1461 static void
1462 test_reset_ctrlr(void)
1463 {
1464 	struct spdk_nvme_transport_id trid = {};
1465 	struct spdk_nvme_ctrlr ctrlr = {};
1466 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1467 	struct nvme_path_id *curr_trid;
1468 	struct spdk_io_channel *ch1, *ch2;
1469 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1470 	bool detect_remove;
1471 	int rc;
1472 
1473 	ut_init_trid(&trid);
1474 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1475 
1476 	set_thread(0);
1477 
1478 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1479 	CU_ASSERT(rc == 0);
1480 
1481 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1482 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1483 
1484 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1485 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1486 
1487 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1488 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1489 
1490 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1491 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1492 
1493 	set_thread(1);
1494 
1495 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1496 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1497 
1498 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1499 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1500 
1501 	/* Reset starts from thread 1. */
1502 	set_thread(1);
1503 
1504 	/* Case 1: ctrlr is already being destructed. */
1505 	nvme_ctrlr->destruct = true;
1506 
1507 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1508 	CU_ASSERT(rc == -ENXIO);
1509 
1510 	/* Case 2: reset is in progress. */
1511 	nvme_ctrlr->destruct = false;
1512 	nvme_ctrlr->resetting = true;
1513 
1514 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1515 	CU_ASSERT(rc == -EBUSY);
1516 
1517 	/* Case 3: reset completes successfully. */
1518 	nvme_ctrlr->resetting = false;
1519 	curr_trid->last_failed_tsc = spdk_get_ticks();
1520 	ctrlr.is_failed = true;
1521 
1522 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1523 	CU_ASSERT(rc == 0);
1524 	CU_ASSERT(nvme_ctrlr->resetting == true);
1525 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1526 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1527 
1528 	poll_thread_times(0, 3);
1529 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1530 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1531 
1532 	poll_thread_times(0, 1);
1533 	poll_thread_times(1, 1);
1534 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1535 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1536 	CU_ASSERT(ctrlr.is_failed == true);
1537 
1538 	poll_thread_times(1, 1);
1539 	poll_thread_times(0, 1);
1540 	CU_ASSERT(ctrlr.is_failed == false);
1541 	CU_ASSERT(ctrlr.adminq.is_connected == false);
1542 
1543 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1544 	poll_thread_times(0, 2);
1545 	CU_ASSERT(ctrlr.adminq.is_connected == true);
1546 
1547 	poll_thread_times(0, 1);
1548 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1549 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1550 
1551 	poll_thread_times(1, 1);
1552 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1553 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1554 	CU_ASSERT(nvme_ctrlr->resetting == true);
1555 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1556 
1557 	poll_thread_times(0, 1);
1558 	CU_ASSERT(nvme_ctrlr->resetting == false);
1559 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1560 
1561 	/* Case 4: ctrlr is already removed. */
1562 	ctrlr.is_removed = true;
1563 
1564 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1565 	CU_ASSERT(rc == 0);
1566 
1567 	detect_remove = false;
1568 	nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset;
1569 	nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove;
1570 
1571 	poll_threads();
1572 
1573 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL);
1574 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL);
1575 	CU_ASSERT(detect_remove == true);
1576 
1577 	ctrlr.is_removed = false;
1578 
1579 	spdk_put_io_channel(ch2);
1580 
1581 	set_thread(0);
1582 
1583 	spdk_put_io_channel(ch1);
1584 
1585 	poll_threads();
1586 
1587 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1588 	CU_ASSERT(rc == 0);
1589 
1590 	poll_threads();
1591 	spdk_delay_us(1000);
1592 	poll_threads();
1593 
1594 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1595 }
1596 
1597 static void
1598 test_race_between_reset_and_destruct_ctrlr(void)
1599 {
1600 	struct spdk_nvme_transport_id trid = {};
1601 	struct spdk_nvme_ctrlr ctrlr = {};
1602 	struct nvme_ctrlr *nvme_ctrlr;
1603 	struct spdk_io_channel *ch1, *ch2;
1604 	int rc;
1605 
1606 	ut_init_trid(&trid);
1607 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1608 
1609 	set_thread(0);
1610 
1611 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1612 	CU_ASSERT(rc == 0);
1613 
1614 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1615 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1616 
1617 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1618 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1619 
1620 	set_thread(1);
1621 
1622 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1623 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1624 
1625 	/* Reset starts from thread 1. */
1626 	set_thread(1);
1627 
1628 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1629 	CU_ASSERT(rc == 0);
1630 	CU_ASSERT(nvme_ctrlr->resetting == true);
1631 
1632 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1633 	set_thread(0);
1634 
1635 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1636 	CU_ASSERT(rc == 0);
1637 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1638 	CU_ASSERT(nvme_ctrlr->destruct == true);
1639 	CU_ASSERT(nvme_ctrlr->resetting == true);
1640 
1641 	poll_threads();
1642 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1643 	poll_threads();
1644 
1645 	/* Reset completed but ctrlr is not still destructed yet. */
1646 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1647 	CU_ASSERT(nvme_ctrlr->destruct == true);
1648 	CU_ASSERT(nvme_ctrlr->resetting == false);
1649 
1650 	/* New reset request is rejected. */
1651 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1652 	CU_ASSERT(rc == -ENXIO);
1653 
1654 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1655 	 * However there are two channels and destruct is not completed yet.
1656 	 */
1657 	poll_threads();
1658 
1659 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1660 
1661 	set_thread(0);
1662 
1663 	spdk_put_io_channel(ch1);
1664 
1665 	set_thread(1);
1666 
1667 	spdk_put_io_channel(ch2);
1668 
1669 	poll_threads();
1670 	spdk_delay_us(1000);
1671 	poll_threads();
1672 
1673 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1674 }
1675 
1676 static void
1677 test_failover_ctrlr(void)
1678 {
1679 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1680 	struct spdk_nvme_ctrlr ctrlr = {};
1681 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1682 	struct nvme_path_id *curr_trid, *next_trid;
1683 	struct spdk_io_channel *ch1, *ch2;
1684 	int rc;
1685 
1686 	ut_init_trid(&trid1);
1687 	ut_init_trid2(&trid2);
1688 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1689 
1690 	set_thread(0);
1691 
1692 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1693 	CU_ASSERT(rc == 0);
1694 
1695 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1696 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1697 
1698 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1699 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1700 
1701 	set_thread(1);
1702 
1703 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1704 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1705 
1706 	/* First, test one trid case. */
1707 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1708 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1709 
1710 	/* Failover starts from thread 1. */
1711 	set_thread(1);
1712 
1713 	/* Case 1: ctrlr is already being destructed. */
1714 	nvme_ctrlr->destruct = true;
1715 
1716 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1717 	CU_ASSERT(rc == -ENXIO);
1718 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1719 
1720 	/* Case 2: reset is in progress. */
1721 	nvme_ctrlr->destruct = false;
1722 	nvme_ctrlr->resetting = true;
1723 
1724 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1725 	CU_ASSERT(rc == -EINPROGRESS);
1726 
1727 	/* Case 3: reset completes successfully. */
1728 	nvme_ctrlr->resetting = false;
1729 
1730 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1731 	CU_ASSERT(rc == 0);
1732 
1733 	CU_ASSERT(nvme_ctrlr->resetting == true);
1734 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1735 
1736 	poll_threads();
1737 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1738 	poll_threads();
1739 
1740 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1741 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1742 
1743 	CU_ASSERT(nvme_ctrlr->resetting == false);
1744 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1745 
1746 	set_thread(0);
1747 
1748 	/* Second, test two trids case. */
1749 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1750 	CU_ASSERT(rc == 0);
1751 
1752 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1753 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1754 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1755 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1756 
1757 	/* Failover starts from thread 1. */
1758 	set_thread(1);
1759 
1760 	/* Case 4: reset is in progress. */
1761 	nvme_ctrlr->resetting = true;
1762 
1763 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1764 	CU_ASSERT(rc == -EINPROGRESS);
1765 
1766 	/* Case 5: failover completes successfully. */
1767 	nvme_ctrlr->resetting = false;
1768 
1769 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1770 	CU_ASSERT(rc == 0);
1771 
1772 	CU_ASSERT(nvme_ctrlr->resetting == true);
1773 
1774 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1775 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1776 	CU_ASSERT(next_trid != curr_trid);
1777 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1778 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1779 
1780 	poll_threads();
1781 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1782 	poll_threads();
1783 
1784 	CU_ASSERT(nvme_ctrlr->resetting == false);
1785 
1786 	spdk_put_io_channel(ch2);
1787 
1788 	set_thread(0);
1789 
1790 	spdk_put_io_channel(ch1);
1791 
1792 	poll_threads();
1793 
1794 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1795 	CU_ASSERT(rc == 0);
1796 
1797 	poll_threads();
1798 	spdk_delay_us(1000);
1799 	poll_threads();
1800 
1801 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1802 }
1803 
1804 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1805  *
1806  * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1807  * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1808  * to trid2. While processing the failed reset, trid3 was added. trid1 should
1809  * have been active, i.e., the head of the list until the failover completed.
1810  * However trid3 was inserted to the head of the list by mistake.
1811  *
1812  * I/O qpairs have smaller polling period than admin qpair. When a connection is
1813  * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1814  * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1815  * may be executed repeatedly before failover is executed. Hence this bug is real.
1816  *
1817  * The following test verifies the fix.
1818  */
1819 static void
1820 test_race_between_failover_and_add_secondary_trid(void)
1821 {
1822 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1823 	struct spdk_nvme_ctrlr ctrlr = {};
1824 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1825 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1826 	struct spdk_io_channel *ch1, *ch2;
1827 	int rc;
1828 
1829 	ut_init_trid(&trid1);
1830 	ut_init_trid2(&trid2);
1831 	ut_init_trid3(&trid3);
1832 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1833 
1834 	set_thread(0);
1835 
1836 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1837 	CU_ASSERT(rc == 0);
1838 
1839 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1840 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1841 
1842 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1843 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1844 
1845 	set_thread(1);
1846 
1847 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1848 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1849 
1850 	set_thread(0);
1851 
1852 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1853 	CU_ASSERT(rc == 0);
1854 
1855 	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1856 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1857 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1858 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1859 	path_id2 = TAILQ_NEXT(path_id1, link);
1860 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1861 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1862 
1863 	ctrlr.fail_reset = true;
1864 
1865 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1866 	CU_ASSERT(rc == 0);
1867 
1868 	poll_threads();
1869 
1870 	CU_ASSERT(path_id1->last_failed_tsc != 0);
1871 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1872 
1873 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1874 	CU_ASSERT(rc == 0);
1875 
1876 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1877 	CU_ASSERT(rc == 0);
1878 
1879 	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1880 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1881 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1882 	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1883 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1884 	path_id3 = TAILQ_NEXT(path_id2, link);
1885 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1886 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1887 
1888 	poll_threads();
1889 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1890 	poll_threads();
1891 
1892 	spdk_put_io_channel(ch1);
1893 
1894 	set_thread(1);
1895 
1896 	spdk_put_io_channel(ch2);
1897 
1898 	poll_threads();
1899 
1900 	set_thread(0);
1901 
1902 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1903 	CU_ASSERT(rc == 0);
1904 
1905 	poll_threads();
1906 	spdk_delay_us(1000);
1907 	poll_threads();
1908 
1909 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1910 }
1911 
1912 static void
1913 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1914 {
1915 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1916 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1917 }
1918 
1919 static void
1920 test_pending_reset(void)
1921 {
1922 	struct spdk_nvme_transport_id trid = {};
1923 	struct spdk_nvme_ctrlr *ctrlr;
1924 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
1925 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1926 	const int STRING_SIZE = 32;
1927 	const char *attached_names[STRING_SIZE];
1928 	struct nvme_bdev *bdev;
1929 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1930 	struct spdk_io_channel *ch1, *ch2;
1931 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1932 	struct nvme_io_path *io_path1, *io_path2;
1933 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1934 	int rc;
1935 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
1936 
1937 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
1938 	bdev_opts.multipath = false;
1939 
1940 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1941 	ut_init_trid(&trid);
1942 
1943 	set_thread(0);
1944 
1945 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1946 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1947 
1948 	g_ut_attach_ctrlr_status = 0;
1949 	g_ut_attach_bdev_count = 1;
1950 
1951 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1952 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
1953 	CU_ASSERT(rc == 0);
1954 
1955 	spdk_delay_us(1000);
1956 	poll_threads();
1957 
1958 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1959 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1960 
1961 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1962 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1963 
1964 	ch1 = spdk_get_io_channel(bdev);
1965 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1966 
1967 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1968 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1969 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1970 	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
1971 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1972 
1973 	set_thread(1);
1974 
1975 	ch2 = spdk_get_io_channel(bdev);
1976 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1977 
1978 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1979 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1980 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1981 	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
1982 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1983 
1984 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1985 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1986 
1987 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1988 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1989 
1990 	/* The first reset request is submitted on thread 1, and the second reset request
1991 	 * is submitted on thread 0 while processing the first request.
1992 	 */
1993 	bdev_nvme_submit_request(ch2, first_bdev_io);
1994 
1995 	poll_thread_times(0, 1);
1996 	poll_thread_times(1, 2);
1997 
1998 	CU_ASSERT(nvme_ctrlr->resetting == true);
1999 	CU_ASSERT(TAILQ_EMPTY(&nvme_ctrlr->pending_resets));
2000 
2001 	set_thread(0);
2002 
2003 	bdev_nvme_submit_request(ch1, second_bdev_io);
2004 
2005 	poll_thread_times(0, 1);
2006 	poll_thread_times(1, 1);
2007 	poll_thread_times(0, 2);
2008 	poll_thread_times(1, 1);
2009 	poll_thread_times(0, 1);
2010 
2011 	CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&nvme_ctrlr->pending_resets)) == second_bdev_io);
2012 
2013 	poll_threads();
2014 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2015 	poll_threads();
2016 
2017 	CU_ASSERT(nvme_ctrlr->resetting == false);
2018 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2019 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2020 
2021 	/* The first reset request is submitted on thread 1, and the second reset request
2022 	 * is submitted on thread 0 while processing the first request.
2023 	 *
2024 	 * The difference from the above scenario is that the controller is removed while
2025 	 * processing the first request. Hence both reset requests should fail.
2026 	 */
2027 	set_thread(1);
2028 
2029 	bdev_nvme_submit_request(ch2, first_bdev_io);
2030 
2031 	poll_thread_times(0, 1);
2032 	poll_thread_times(1, 2);
2033 
2034 	CU_ASSERT(nvme_ctrlr->resetting == true);
2035 	CU_ASSERT(TAILQ_EMPTY(&nvme_ctrlr->pending_resets));
2036 
2037 	set_thread(0);
2038 
2039 	bdev_nvme_submit_request(ch1, second_bdev_io);
2040 
2041 	poll_thread_times(0, 1);
2042 	poll_thread_times(1, 1);
2043 	poll_thread_times(0, 2);
2044 	poll_thread_times(1, 1);
2045 	poll_thread_times(0, 1);
2046 
2047 	CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&nvme_ctrlr->pending_resets)) == second_bdev_io);
2048 
2049 	ctrlr->fail_reset = true;
2050 
2051 	poll_threads();
2052 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2053 	poll_threads();
2054 
2055 	CU_ASSERT(nvme_ctrlr->resetting == false);
2056 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2057 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2058 
2059 	spdk_put_io_channel(ch1);
2060 
2061 	set_thread(1);
2062 
2063 	spdk_put_io_channel(ch2);
2064 
2065 	poll_threads();
2066 
2067 	set_thread(0);
2068 
2069 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2070 	CU_ASSERT(rc == 0);
2071 
2072 	poll_threads();
2073 	spdk_delay_us(1000);
2074 	poll_threads();
2075 
2076 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2077 
2078 	free(first_bdev_io);
2079 	free(second_bdev_io);
2080 }
2081 
2082 static void
2083 test_attach_ctrlr(void)
2084 {
2085 	struct spdk_nvme_transport_id trid = {};
2086 	struct spdk_nvme_ctrlr *ctrlr;
2087 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2088 	struct nvme_ctrlr *nvme_ctrlr;
2089 	const int STRING_SIZE = 32;
2090 	const char *attached_names[STRING_SIZE];
2091 	struct nvme_bdev *nbdev;
2092 	int rc;
2093 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2094 
2095 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2096 	bdev_opts.multipath = false;
2097 
2098 	set_thread(0);
2099 
2100 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2101 	ut_init_trid(&trid);
2102 
2103 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
2104 	 * by probe polling.
2105 	 */
2106 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2107 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2108 
2109 	ctrlr->is_failed = true;
2110 	g_ut_attach_ctrlr_status = -EIO;
2111 	g_ut_attach_bdev_count = 0;
2112 
2113 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2114 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2115 	CU_ASSERT(rc == 0);
2116 
2117 	spdk_delay_us(1000);
2118 	poll_threads();
2119 
2120 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2121 
2122 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
2123 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2124 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2125 
2126 	g_ut_attach_ctrlr_status = 0;
2127 
2128 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2129 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2130 	CU_ASSERT(rc == 0);
2131 
2132 	spdk_delay_us(1000);
2133 	poll_threads();
2134 
2135 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2136 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2137 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2138 
2139 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2140 	CU_ASSERT(rc == 0);
2141 
2142 	poll_threads();
2143 	spdk_delay_us(1000);
2144 	poll_threads();
2145 
2146 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2147 
2148 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
2149 	 * one nvme_bdev is created.
2150 	 */
2151 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2152 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2153 
2154 	g_ut_attach_bdev_count = 1;
2155 
2156 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2157 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2158 	CU_ASSERT(rc == 0);
2159 
2160 	spdk_delay_us(1000);
2161 	poll_threads();
2162 
2163 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2164 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2165 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2166 
2167 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
2168 	attached_names[0] = NULL;
2169 
2170 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2171 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
2172 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
2173 
2174 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2175 	CU_ASSERT(rc == 0);
2176 
2177 	poll_threads();
2178 	spdk_delay_us(1000);
2179 	poll_threads();
2180 
2181 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2182 
2183 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
2184 	 * created because creating one nvme_bdev failed.
2185 	 */
2186 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2187 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2188 
2189 	g_ut_register_bdev_status = -EINVAL;
2190 	g_ut_attach_bdev_count = 0;
2191 
2192 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2193 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2194 	CU_ASSERT(rc == 0);
2195 
2196 	spdk_delay_us(1000);
2197 	poll_threads();
2198 
2199 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2200 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2201 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2202 
2203 	CU_ASSERT(attached_names[0] == NULL);
2204 
2205 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2206 	CU_ASSERT(rc == 0);
2207 
2208 	poll_threads();
2209 	spdk_delay_us(1000);
2210 	poll_threads();
2211 
2212 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2213 
2214 	g_ut_register_bdev_status = 0;
2215 }
2216 
2217 static void
2218 test_aer_cb(void)
2219 {
2220 	struct spdk_nvme_transport_id trid = {};
2221 	struct spdk_nvme_ctrlr *ctrlr;
2222 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2223 	struct nvme_ctrlr *nvme_ctrlr;
2224 	struct nvme_bdev *bdev;
2225 	const int STRING_SIZE = 32;
2226 	const char *attached_names[STRING_SIZE];
2227 	union spdk_nvme_async_event_completion event = {};
2228 	struct spdk_nvme_cpl cpl = {};
2229 	int rc;
2230 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2231 
2232 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2233 	bdev_opts.multipath = false;
2234 
2235 	set_thread(0);
2236 
2237 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2238 	ut_init_trid(&trid);
2239 
2240 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
2241 	 * namespaces are populated.
2242 	 */
2243 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
2244 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2245 
2246 	ctrlr->ns[0].is_active = false;
2247 
2248 	g_ut_attach_ctrlr_status = 0;
2249 	g_ut_attach_bdev_count = 3;
2250 
2251 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2252 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2253 	CU_ASSERT(rc == 0);
2254 
2255 	spdk_delay_us(1000);
2256 	poll_threads();
2257 
2258 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2259 	poll_threads();
2260 
2261 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2262 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2263 
2264 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
2265 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2266 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2267 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2268 
2269 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
2270 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2271 	CU_ASSERT(bdev->disk.blockcnt == 1024);
2272 
2273 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
2274 	 * change the size of the 4th namespace.
2275 	 */
2276 	ctrlr->ns[0].is_active = true;
2277 	ctrlr->ns[2].is_active = false;
2278 	ctrlr->nsdata[3].nsze = 2048;
2279 
2280 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2281 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
2282 	cpl.cdw0 = event.raw;
2283 
2284 	aer_cb(nvme_ctrlr, &cpl);
2285 
2286 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2287 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2288 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
2289 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2290 	CU_ASSERT(bdev->disk.blockcnt == 2048);
2291 
2292 	/* Change ANA state of active namespaces. */
2293 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2294 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2295 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2296 
2297 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2298 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
2299 	cpl.cdw0 = event.raw;
2300 
2301 	aer_cb(nvme_ctrlr, &cpl);
2302 
2303 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2304 	poll_threads();
2305 
2306 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2307 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2308 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2309 
2310 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2311 	CU_ASSERT(rc == 0);
2312 
2313 	poll_threads();
2314 	spdk_delay_us(1000);
2315 	poll_threads();
2316 
2317 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2318 }
2319 
2320 static void
2321 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2322 			enum spdk_bdev_io_type io_type)
2323 {
2324 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2325 	struct nvme_io_path *io_path;
2326 	struct spdk_nvme_qpair *qpair;
2327 
2328 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2329 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2330 	qpair = io_path->qpair->qpair;
2331 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2332 
2333 	bdev_io->type = io_type;
2334 	bdev_io->internal.f.in_submit_request = true;
2335 
2336 	bdev_nvme_submit_request(ch, bdev_io);
2337 
2338 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2339 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2340 
2341 	poll_threads();
2342 
2343 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2344 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2345 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2346 }
2347 
2348 static void
2349 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2350 		   enum spdk_bdev_io_type io_type)
2351 {
2352 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2353 	struct nvme_io_path *io_path;
2354 	struct spdk_nvme_qpair *qpair;
2355 
2356 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2357 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2358 	qpair = io_path->qpair->qpair;
2359 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2360 
2361 	bdev_io->type = io_type;
2362 	bdev_io->internal.f.in_submit_request = true;
2363 
2364 	bdev_nvme_submit_request(ch, bdev_io);
2365 
2366 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2367 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2368 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2369 }
2370 
2371 static void
2372 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2373 {
2374 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2375 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2376 	struct ut_nvme_req *req;
2377 	struct nvme_io_path *io_path;
2378 	struct spdk_nvme_qpair *qpair;
2379 
2380 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2381 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2382 	qpair = io_path->qpair->qpair;
2383 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2384 
2385 	/* Only compare and write now. */
2386 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2387 	bdev_io->internal.f.in_submit_request = true;
2388 
2389 	bdev_nvme_submit_request(ch, bdev_io);
2390 
2391 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2392 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2393 	CU_ASSERT(bio->first_fused_submitted == true);
2394 
2395 	/* First outstanding request is compare operation. */
2396 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2397 	SPDK_CU_ASSERT_FATAL(req != NULL);
2398 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2399 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2400 
2401 	poll_threads();
2402 
2403 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2404 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2405 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2406 }
2407 
2408 static void
2409 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2410 			 struct spdk_nvme_ctrlr *ctrlr)
2411 {
2412 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2413 	bdev_io->internal.f.in_submit_request = true;
2414 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2415 
2416 	bdev_nvme_submit_request(ch, bdev_io);
2417 
2418 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2419 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2420 
2421 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2422 	poll_thread_times(1, 1);
2423 
2424 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2425 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2426 
2427 	poll_thread_times(0, 1);
2428 
2429 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2430 }
2431 
2432 static void
2433 test_submit_nvme_cmd(void)
2434 {
2435 	struct spdk_nvme_transport_id trid = {};
2436 	struct spdk_nvme_ctrlr *ctrlr;
2437 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2438 	struct nvme_ctrlr *nvme_ctrlr;
2439 	const int STRING_SIZE = 32;
2440 	const char *attached_names[STRING_SIZE];
2441 	struct nvme_bdev *bdev;
2442 	struct spdk_bdev_io *bdev_io;
2443 	struct spdk_io_channel *ch;
2444 	int rc;
2445 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2446 
2447 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2448 	bdev_opts.multipath = false;
2449 
2450 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2451 	ut_init_trid(&trid);
2452 
2453 	set_thread(1);
2454 
2455 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2456 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2457 
2458 	g_ut_attach_ctrlr_status = 0;
2459 	g_ut_attach_bdev_count = 1;
2460 
2461 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2462 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2463 	CU_ASSERT(rc == 0);
2464 
2465 	spdk_delay_us(1000);
2466 	poll_threads();
2467 
2468 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2469 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2470 
2471 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2472 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2473 
2474 	set_thread(0);
2475 
2476 	ch = spdk_get_io_channel(bdev);
2477 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2478 
2479 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2480 
2481 	bdev_io->u.bdev.iovs = NULL;
2482 
2483 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2484 
2485 	ut_bdev_io_set_buf(bdev_io);
2486 
2487 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2488 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2489 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2490 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2491 
2492 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2493 
2494 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2495 
2496 	/* Verify that ext NVME API is called when data is described by memory domain  */
2497 	g_ut_read_ext_called = false;
2498 	bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef;
2499 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2500 	CU_ASSERT(g_ut_read_ext_called == true);
2501 	g_ut_read_ext_called = false;
2502 	bdev_io->u.bdev.memory_domain = NULL;
2503 
2504 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2505 
2506 	free(bdev_io);
2507 
2508 	spdk_put_io_channel(ch);
2509 
2510 	poll_threads();
2511 
2512 	set_thread(1);
2513 
2514 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2515 	CU_ASSERT(rc == 0);
2516 
2517 	poll_threads();
2518 	spdk_delay_us(1000);
2519 	poll_threads();
2520 
2521 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2522 }
2523 
2524 static void
2525 test_add_remove_trid(void)
2526 {
2527 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2528 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2529 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2530 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2531 	const int STRING_SIZE = 32;
2532 	const char *attached_names[STRING_SIZE];
2533 	struct nvme_path_id *ctrid;
2534 	int rc;
2535 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2536 
2537 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2538 	bdev_opts.multipath = false;
2539 
2540 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2541 	ut_init_trid(&path1.trid);
2542 	ut_init_trid2(&path2.trid);
2543 	ut_init_trid3(&path3.trid);
2544 
2545 	set_thread(0);
2546 
2547 	g_ut_attach_ctrlr_status = 0;
2548 	g_ut_attach_bdev_count = 0;
2549 
2550 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2551 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2552 
2553 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2554 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2555 	CU_ASSERT(rc == 0);
2556 
2557 	spdk_delay_us(1000);
2558 	poll_threads();
2559 
2560 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2561 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2562 
2563 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2564 
2565 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2566 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2567 
2568 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2569 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2570 	CU_ASSERT(rc == 0);
2571 
2572 	spdk_delay_us(1000);
2573 	poll_threads();
2574 
2575 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2576 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2577 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2578 			break;
2579 		}
2580 	}
2581 	CU_ASSERT(ctrid != NULL);
2582 
2583 	/* trid3 is not in the registered list. */
2584 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2585 	CU_ASSERT(rc == -ENXIO);
2586 
2587 	/* trid2 is not used, and simply removed. */
2588 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2589 	CU_ASSERT(rc == 0);
2590 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2591 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2592 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2593 	}
2594 
2595 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2596 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2597 
2598 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
2599 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2600 	CU_ASSERT(rc == 0);
2601 
2602 	spdk_delay_us(1000);
2603 	poll_threads();
2604 
2605 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2606 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2607 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2608 			break;
2609 		}
2610 	}
2611 	CU_ASSERT(ctrid != NULL);
2612 
2613 	/* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully.
2614 	 * If we add path2 again, path2 should be inserted between path1 and path3.
2615 	 * Then, we remove path2. It is not used, and simply removed.
2616 	 */
2617 	ctrid->last_failed_tsc = spdk_get_ticks() + 1;
2618 
2619 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2620 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2621 
2622 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2623 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2624 	CU_ASSERT(rc == 0);
2625 
2626 	spdk_delay_us(1000);
2627 	poll_threads();
2628 
2629 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2630 
2631 	ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link);
2632 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2633 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0);
2634 
2635 	ctrid = TAILQ_NEXT(ctrid, link);
2636 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2637 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0);
2638 
2639 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2640 	CU_ASSERT(rc == 0);
2641 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2642 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2643 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2644 	}
2645 
2646 	/* path1 is currently used and path3 is an alternative path.
2647 	 * If we remove path1, path is changed to path3.
2648 	 */
2649 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
2650 	CU_ASSERT(rc == 0);
2651 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2652 	CU_ASSERT(nvme_ctrlr->resetting == true);
2653 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2654 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2655 	}
2656 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2657 
2658 	poll_threads();
2659 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2660 	poll_threads();
2661 
2662 	CU_ASSERT(nvme_ctrlr->resetting == false);
2663 
2664 	/* path3 is the current and only path. If we remove path3, the corresponding
2665 	 * nvme_ctrlr is removed.
2666 	 */
2667 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2668 	CU_ASSERT(rc == 0);
2669 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2670 
2671 	poll_threads();
2672 	spdk_delay_us(1000);
2673 	poll_threads();
2674 
2675 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2676 
2677 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2678 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2679 
2680 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2681 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2682 	CU_ASSERT(rc == 0);
2683 
2684 	spdk_delay_us(1000);
2685 	poll_threads();
2686 
2687 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2688 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2689 
2690 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2691 
2692 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2693 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2694 
2695 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2696 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2697 	CU_ASSERT(rc == 0);
2698 
2699 	spdk_delay_us(1000);
2700 	poll_threads();
2701 
2702 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2703 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2704 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2705 			break;
2706 		}
2707 	}
2708 	CU_ASSERT(ctrid != NULL);
2709 
2710 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2711 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2712 	CU_ASSERT(rc == 0);
2713 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2714 
2715 	poll_threads();
2716 	spdk_delay_us(1000);
2717 	poll_threads();
2718 
2719 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2720 }
2721 
2722 static void
2723 test_abort(void)
2724 {
2725 	struct spdk_nvme_transport_id trid = {};
2726 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
2727 	struct spdk_nvme_ctrlr *ctrlr;
2728 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
2729 	struct nvme_ctrlr *nvme_ctrlr;
2730 	const int STRING_SIZE = 32;
2731 	const char *attached_names[STRING_SIZE];
2732 	struct nvme_bdev *bdev;
2733 	struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io;
2734 	struct spdk_io_channel *ch1, *ch2;
2735 	struct nvme_bdev_channel *nbdev_ch1;
2736 	struct nvme_io_path *io_path1;
2737 	struct nvme_qpair *nvme_qpair1;
2738 	int rc;
2739 
2740 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2741 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2742 	 * are submitted on thread 1. Both should succeed.
2743 	 */
2744 
2745 	ut_init_trid(&trid);
2746 
2747 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2748 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2749 
2750 	g_ut_attach_ctrlr_status = 0;
2751 	g_ut_attach_bdev_count = 1;
2752 
2753 	set_thread(1);
2754 
2755 	opts.ctrlr_loss_timeout_sec = -1;
2756 	opts.reconnect_delay_sec = 1;
2757 	opts.multipath = false;
2758 
2759 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2760 				   attach_ctrlr_done, NULL, &dopts, &opts);
2761 	CU_ASSERT(rc == 0);
2762 
2763 	spdk_delay_us(1000);
2764 	poll_threads();
2765 
2766 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2767 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2768 
2769 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2770 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2771 
2772 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2773 	ut_bdev_io_set_buf(write_io);
2774 
2775 	fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL);
2776 	ut_bdev_io_set_buf(fuse_io);
2777 
2778 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2779 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2780 
2781 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2782 
2783 	set_thread(0);
2784 
2785 	ch1 = spdk_get_io_channel(bdev);
2786 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2787 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2788 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2789 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2790 	nvme_qpair1 = io_path1->qpair;
2791 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
2792 
2793 	set_thread(1);
2794 
2795 	ch2 = spdk_get_io_channel(bdev);
2796 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2797 
2798 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2799 	fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2800 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2801 
2802 	/* Aborting the already completed request should fail. */
2803 	write_io->internal.f.in_submit_request = true;
2804 	bdev_nvme_submit_request(ch1, write_io);
2805 	poll_threads();
2806 
2807 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2808 
2809 	abort_io->u.abort.bio_to_abort = write_io;
2810 	abort_io->internal.f.in_submit_request = true;
2811 
2812 	bdev_nvme_submit_request(ch1, abort_io);
2813 
2814 	poll_threads();
2815 
2816 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2817 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2818 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2819 
2820 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2821 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2822 
2823 	admin_io->internal.f.in_submit_request = true;
2824 	bdev_nvme_submit_request(ch1, admin_io);
2825 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2826 	poll_threads();
2827 
2828 	CU_ASSERT(admin_io->internal.f.in_submit_request == false);
2829 
2830 	abort_io->u.abort.bio_to_abort = admin_io;
2831 	abort_io->internal.f.in_submit_request = true;
2832 
2833 	bdev_nvme_submit_request(ch2, abort_io);
2834 
2835 	poll_threads();
2836 
2837 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2838 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2839 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2840 
2841 	/* Aborting the write request should succeed. */
2842 	write_io->internal.f.in_submit_request = true;
2843 	bdev_nvme_submit_request(ch1, write_io);
2844 
2845 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
2846 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
2847 
2848 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2849 	abort_io->u.abort.bio_to_abort = write_io;
2850 	abort_io->internal.f.in_submit_request = true;
2851 
2852 	bdev_nvme_submit_request(ch1, abort_io);
2853 
2854 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2855 	poll_threads();
2856 
2857 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2858 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2859 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2860 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2861 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2862 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2863 
2864 	/* Aborting the fuse request should succeed. */
2865 	fuse_io->internal.f.in_submit_request = true;
2866 	bdev_nvme_submit_request(ch1, fuse_io);
2867 
2868 	CU_ASSERT(fuse_io->internal.f.in_submit_request == true);
2869 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2);
2870 
2871 	abort_io->u.abort.bio_to_abort = fuse_io;
2872 	abort_io->internal.f.in_submit_request = true;
2873 
2874 	bdev_nvme_submit_request(ch1, abort_io);
2875 
2876 	spdk_delay_us(10000);
2877 	poll_threads();
2878 
2879 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2880 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2881 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2882 	CU_ASSERT(fuse_io->internal.f.in_submit_request == false);
2883 	CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2884 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2885 
2886 	/* Aborting the admin request should succeed. */
2887 	admin_io->internal.f.in_submit_request = true;
2888 	bdev_nvme_submit_request(ch1, admin_io);
2889 
2890 	CU_ASSERT(admin_io->internal.f.in_submit_request == true);
2891 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2892 
2893 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2894 	abort_io->u.abort.bio_to_abort = admin_io;
2895 	abort_io->internal.f.in_submit_request = true;
2896 
2897 	bdev_nvme_submit_request(ch2, abort_io);
2898 
2899 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2900 	poll_threads();
2901 
2902 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2903 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2904 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2905 	CU_ASSERT(admin_io->internal.f.in_submit_request == false);
2906 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2907 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2908 
2909 	set_thread(0);
2910 
2911 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2912 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2913 	 * while resetting the nvme_ctrlr.
2914 	 */
2915 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
2916 
2917 	poll_thread_times(0, 3);
2918 
2919 	CU_ASSERT(nvme_qpair1->qpair == NULL);
2920 	CU_ASSERT(nvme_ctrlr->resetting == true);
2921 
2922 	write_io->internal.f.in_submit_request = true;
2923 
2924 	bdev_nvme_submit_request(ch1, write_io);
2925 
2926 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
2927 	CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list)));
2928 
2929 	/* Aborting the queued write request should succeed immediately. */
2930 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2931 	abort_io->u.abort.bio_to_abort = write_io;
2932 	abort_io->internal.f.in_submit_request = true;
2933 
2934 	bdev_nvme_submit_request(ch1, abort_io);
2935 
2936 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2937 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2938 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2939 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2940 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2941 
2942 	poll_threads();
2943 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2944 	poll_threads();
2945 
2946 	spdk_put_io_channel(ch1);
2947 
2948 	set_thread(1);
2949 
2950 	spdk_put_io_channel(ch2);
2951 
2952 	poll_threads();
2953 
2954 	free(write_io);
2955 	free(fuse_io);
2956 	free(admin_io);
2957 	free(abort_io);
2958 
2959 	set_thread(1);
2960 
2961 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2962 	CU_ASSERT(rc == 0);
2963 
2964 	poll_threads();
2965 	spdk_delay_us(1000);
2966 	poll_threads();
2967 
2968 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2969 }
2970 
2971 static void
2972 test_get_io_qpair(void)
2973 {
2974 	struct spdk_nvme_transport_id trid = {};
2975 	struct spdk_nvme_ctrlr ctrlr = {};
2976 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2977 	struct spdk_io_channel *ch;
2978 	struct nvme_ctrlr_channel *ctrlr_ch;
2979 	struct spdk_nvme_qpair *qpair;
2980 	int rc;
2981 
2982 	ut_init_trid(&trid);
2983 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2984 
2985 	set_thread(0);
2986 
2987 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2988 	CU_ASSERT(rc == 0);
2989 
2990 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2991 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2992 
2993 	ch = spdk_get_io_channel(nvme_ctrlr);
2994 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2995 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2996 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
2997 
2998 	qpair = bdev_nvme_get_io_qpair(ch);
2999 	CU_ASSERT(qpair == ctrlr_ch->qpair->qpair);
3000 
3001 	spdk_put_io_channel(ch);
3002 
3003 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3004 	CU_ASSERT(rc == 0);
3005 
3006 	poll_threads();
3007 	spdk_delay_us(1000);
3008 	poll_threads();
3009 
3010 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3011 }
3012 
3013 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
3014  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
3015  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
3016  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
3017  */
3018 static void
3019 test_bdev_unregister(void)
3020 {
3021 	struct spdk_nvme_transport_id trid = {};
3022 	struct spdk_nvme_ctrlr *ctrlr;
3023 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3024 	struct nvme_ctrlr *nvme_ctrlr;
3025 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3026 	const int STRING_SIZE = 32;
3027 	const char *attached_names[STRING_SIZE];
3028 	struct nvme_bdev *bdev1, *bdev2;
3029 	int rc;
3030 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3031 
3032 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3033 	bdev_opts.multipath = false;
3034 
3035 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3036 	ut_init_trid(&trid);
3037 
3038 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
3039 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3040 
3041 	g_ut_attach_ctrlr_status = 0;
3042 	g_ut_attach_bdev_count = 2;
3043 
3044 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3045 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3046 	CU_ASSERT(rc == 0);
3047 
3048 	spdk_delay_us(1000);
3049 	poll_threads();
3050 
3051 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3052 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3053 
3054 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
3055 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3056 
3057 	bdev1 = nvme_ns1->bdev;
3058 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3059 
3060 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
3061 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3062 
3063 	bdev2 = nvme_ns2->bdev;
3064 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3065 
3066 	bdev_nvme_destruct(&bdev1->disk);
3067 	bdev_nvme_destruct(&bdev2->disk);
3068 
3069 	poll_threads();
3070 
3071 	CU_ASSERT(nvme_ns1->bdev == NULL);
3072 	CU_ASSERT(nvme_ns2->bdev == NULL);
3073 
3074 	nvme_ctrlr->destruct = true;
3075 	_nvme_ctrlr_destruct(nvme_ctrlr);
3076 
3077 	poll_threads();
3078 	spdk_delay_us(1000);
3079 	poll_threads();
3080 
3081 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3082 }
3083 
3084 static void
3085 test_compare_ns(void)
3086 {
3087 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
3088 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
3089 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
3090 	struct spdk_uuid uuid1 = { .u.raw = { 0xAA } };
3091 	struct spdk_uuid uuid2 = { .u.raw = { 0xAB } };
3092 
3093 	/* No IDs are defined. */
3094 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3095 
3096 	/* Only EUI64 are defined and not matched. */
3097 	nsdata1.eui64 = 0xABCDEF0123456789;
3098 	nsdata2.eui64 = 0xBBCDEF0123456789;
3099 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3100 
3101 	/* Only EUI64 are defined and matched. */
3102 	nsdata2.eui64 = 0xABCDEF0123456789;
3103 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3104 
3105 	/* Only NGUID are defined and not matched. */
3106 	nsdata1.eui64 = 0x0;
3107 	nsdata2.eui64 = 0x0;
3108 	nsdata1.nguid[0] = 0x12;
3109 	nsdata2.nguid[0] = 0x10;
3110 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3111 
3112 	/* Only NGUID are defined and matched. */
3113 	nsdata2.nguid[0] = 0x12;
3114 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3115 
3116 	/* Only UUID are defined and not matched. */
3117 	nsdata1.nguid[0] = 0x0;
3118 	nsdata2.nguid[0] = 0x0;
3119 	ns1.uuid = &uuid1;
3120 	ns2.uuid = &uuid2;
3121 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3122 
3123 	/* Only one UUID is defined. */
3124 	ns1.uuid = NULL;
3125 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3126 
3127 	/* Only UUID are defined and matched. */
3128 	ns1.uuid = &uuid2;
3129 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3130 
3131 	/* All EUI64, NGUID, and UUID are defined and matched. */
3132 	nsdata1.eui64 = 0x123456789ABCDEF;
3133 	nsdata2.eui64 = 0x123456789ABCDEF;
3134 	nsdata1.nguid[15] = 0x34;
3135 	nsdata2.nguid[15] = 0x34;
3136 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3137 
3138 	/* CSI are not matched. */
3139 	ns1.csi = SPDK_NVME_CSI_ZNS;
3140 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3141 }
3142 
3143 static void
3144 test_init_ana_log_page(void)
3145 {
3146 	struct spdk_nvme_transport_id trid = {};
3147 	struct spdk_nvme_ctrlr *ctrlr;
3148 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3149 	struct nvme_ctrlr *nvme_ctrlr;
3150 	const int STRING_SIZE = 32;
3151 	const char *attached_names[STRING_SIZE];
3152 	int rc;
3153 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3154 
3155 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3156 	bdev_opts.multipath = false;
3157 
3158 	set_thread(0);
3159 
3160 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3161 	ut_init_trid(&trid);
3162 
3163 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
3164 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3165 
3166 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3167 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3168 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3169 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
3170 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
3171 
3172 	g_ut_attach_ctrlr_status = 0;
3173 	g_ut_attach_bdev_count = 5;
3174 
3175 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3176 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3177 	CU_ASSERT(rc == 0);
3178 
3179 	spdk_delay_us(1000);
3180 	poll_threads();
3181 
3182 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3183 	poll_threads();
3184 
3185 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3186 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3187 
3188 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
3189 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
3190 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
3191 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
3192 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
3193 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3194 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3195 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
3196 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
3197 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
3198 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
3199 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
3200 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
3201 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
3202 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
3203 
3204 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3205 	CU_ASSERT(rc == 0);
3206 
3207 	poll_threads();
3208 	spdk_delay_us(1000);
3209 	poll_threads();
3210 
3211 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3212 }
3213 
3214 static void
3215 init_accel(void)
3216 {
3217 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
3218 				sizeof(int), "accel_p");
3219 }
3220 
3221 static void
3222 fini_accel(void)
3223 {
3224 	spdk_io_device_unregister(g_accel_p, NULL);
3225 }
3226 
3227 static void
3228 test_get_memory_domains(void)
3229 {
3230 	struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
3231 	struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef };
3232 	struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 };
3233 	struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 };
3234 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
3235 	struct spdk_memory_domain *domains[4] = {};
3236 	int rc = 0;
3237 
3238 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq);
3239 
3240 	/* nvme controller doesn't have memory domains */
3241 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0);
3242 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3243 	CU_ASSERT(rc == 0);
3244 	CU_ASSERT(domains[0] == NULL);
3245 	CU_ASSERT(domains[1] == NULL);
3246 
3247 	/* nvme controller has a memory domain */
3248 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1);
3249 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3250 	CU_ASSERT(rc == 1);
3251 	CU_ASSERT(domains[0] != NULL);
3252 	memset(domains, 0, sizeof(domains));
3253 
3254 	/* multipath, 2 controllers report 1 memory domain each */
3255 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq);
3256 
3257 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3258 	CU_ASSERT(rc == 2);
3259 	CU_ASSERT(domains[0] != NULL);
3260 	CU_ASSERT(domains[1] != NULL);
3261 	memset(domains, 0, sizeof(domains));
3262 
3263 	/* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */
3264 	rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2);
3265 	CU_ASSERT(rc == 2);
3266 
3267 	/* multipath, 2 controllers report 1 memory domain each, array_size = 0 */
3268 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
3269 	CU_ASSERT(rc == 2);
3270 	CU_ASSERT(domains[0] == NULL);
3271 	CU_ASSERT(domains[1] == NULL);
3272 
3273 	/* multipath, 2 controllers report 1 memory domain each, array_size = 1 */
3274 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1);
3275 	CU_ASSERT(rc == 2);
3276 	CU_ASSERT(domains[0] != NULL);
3277 	CU_ASSERT(domains[1] == NULL);
3278 	memset(domains, 0, sizeof(domains));
3279 
3280 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */
3281 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3282 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4);
3283 	CU_ASSERT(rc == 4);
3284 	CU_ASSERT(domains[0] != NULL);
3285 	CU_ASSERT(domains[1] != NULL);
3286 	CU_ASSERT(domains[2] != NULL);
3287 	CU_ASSERT(domains[3] != NULL);
3288 	memset(domains, 0, sizeof(domains));
3289 
3290 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test)
3291 	 * Array size is less than the number of memory domains */
3292 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3293 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3);
3294 	CU_ASSERT(rc == 4);
3295 	CU_ASSERT(domains[0] != NULL);
3296 	CU_ASSERT(domains[1] != NULL);
3297 	CU_ASSERT(domains[2] != NULL);
3298 	CU_ASSERT(domains[3] == NULL);
3299 	memset(domains, 0, sizeof(domains));
3300 
3301 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains);
3302 }
3303 
3304 static void
3305 test_reconnect_qpair(void)
3306 {
3307 	struct spdk_nvme_transport_id trid = {};
3308 	struct spdk_nvme_ctrlr *ctrlr;
3309 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3310 	struct nvme_ctrlr *nvme_ctrlr;
3311 	const int STRING_SIZE = 32;
3312 	const char *attached_names[STRING_SIZE];
3313 	struct nvme_bdev *bdev;
3314 	struct spdk_io_channel *ch1, *ch2;
3315 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3316 	struct nvme_io_path *io_path1, *io_path2;
3317 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
3318 	int rc;
3319 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3320 
3321 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3322 	bdev_opts.multipath = false;
3323 
3324 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3325 	ut_init_trid(&trid);
3326 
3327 	set_thread(0);
3328 
3329 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
3330 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3331 
3332 	g_ut_attach_ctrlr_status = 0;
3333 	g_ut_attach_bdev_count = 1;
3334 
3335 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3336 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3337 	CU_ASSERT(rc == 0);
3338 
3339 	spdk_delay_us(1000);
3340 	poll_threads();
3341 
3342 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3343 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3344 
3345 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
3346 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3347 
3348 	ch1 = spdk_get_io_channel(bdev);
3349 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3350 
3351 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3352 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
3353 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3354 	nvme_qpair1 = io_path1->qpair;
3355 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
3356 
3357 	set_thread(1);
3358 
3359 	ch2 = spdk_get_io_channel(bdev);
3360 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3361 
3362 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3363 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
3364 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3365 	nvme_qpair2 = io_path2->qpair;
3366 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
3367 
3368 	/* If a qpair is disconnected, it is freed and then reconnected via
3369 	 * resetting the corresponding nvme_ctrlr.
3370 	 */
3371 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3372 	ctrlr->is_failed = true;
3373 
3374 	poll_thread_times(1, 3);
3375 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3376 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3377 	CU_ASSERT(nvme_ctrlr->resetting == true);
3378 
3379 	poll_thread_times(0, 3);
3380 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3381 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3382 	CU_ASSERT(ctrlr->is_failed == true);
3383 
3384 	poll_thread_times(1, 2);
3385 	poll_thread_times(0, 1);
3386 	CU_ASSERT(ctrlr->is_failed == false);
3387 	CU_ASSERT(ctrlr->adminq.is_connected == false);
3388 
3389 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3390 	poll_thread_times(0, 2);
3391 	CU_ASSERT(ctrlr->adminq.is_connected == true);
3392 
3393 	poll_thread_times(0, 1);
3394 	poll_thread_times(1, 1);
3395 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3396 	CU_ASSERT(nvme_qpair2->qpair != NULL);
3397 	CU_ASSERT(nvme_ctrlr->resetting == true);
3398 
3399 	poll_thread_times(0, 2);
3400 	poll_thread_times(1, 1);
3401 	poll_thread_times(0, 1);
3402 	CU_ASSERT(nvme_ctrlr->resetting == false);
3403 
3404 	poll_threads();
3405 
3406 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
3407 	 * fails, the qpair is just freed.
3408 	 */
3409 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3410 	ctrlr->is_failed = true;
3411 	ctrlr->fail_reset = true;
3412 
3413 	poll_thread_times(1, 3);
3414 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3415 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3416 	CU_ASSERT(nvme_ctrlr->resetting == true);
3417 
3418 	poll_thread_times(0, 3);
3419 	poll_thread_times(1, 1);
3420 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3421 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3422 	CU_ASSERT(ctrlr->is_failed == true);
3423 
3424 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3425 	poll_thread_times(0, 3);
3426 	poll_thread_times(1, 1);
3427 	poll_thread_times(0, 1);
3428 	CU_ASSERT(ctrlr->is_failed == true);
3429 	CU_ASSERT(nvme_ctrlr->resetting == false);
3430 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3431 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3432 
3433 	poll_threads();
3434 
3435 	spdk_put_io_channel(ch2);
3436 
3437 	set_thread(0);
3438 
3439 	spdk_put_io_channel(ch1);
3440 
3441 	poll_threads();
3442 
3443 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3444 	CU_ASSERT(rc == 0);
3445 
3446 	poll_threads();
3447 	spdk_delay_us(1000);
3448 	poll_threads();
3449 
3450 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3451 }
3452 
3453 static void
3454 test_create_bdev_ctrlr(void)
3455 {
3456 	struct nvme_path_id path1 = {}, path2 = {};
3457 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3458 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3459 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3460 	const int STRING_SIZE = 32;
3461 	const char *attached_names[STRING_SIZE];
3462 	int rc;
3463 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3464 
3465 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3466 	bdev_opts.multipath = true;
3467 
3468 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3469 	ut_init_trid(&path1.trid);
3470 	ut_init_trid2(&path2.trid);
3471 
3472 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3473 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3474 
3475 	g_ut_attach_ctrlr_status = 0;
3476 	g_ut_attach_bdev_count = 0;
3477 
3478 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3479 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3480 	CU_ASSERT(rc == 0);
3481 
3482 	spdk_delay_us(1000);
3483 	poll_threads();
3484 
3485 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3486 	poll_threads();
3487 
3488 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3489 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3490 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3491 
3492 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3493 	g_ut_attach_ctrlr_status = -EINVAL;
3494 
3495 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3496 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3497 
3498 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3499 
3500 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3501 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3502 	CU_ASSERT(rc == 0);
3503 
3504 	spdk_delay_us(1000);
3505 	poll_threads();
3506 
3507 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3508 	poll_threads();
3509 
3510 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
3511 
3512 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3513 	g_ut_attach_ctrlr_status = 0;
3514 
3515 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3516 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3517 
3518 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3519 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3520 	CU_ASSERT(rc == 0);
3521 
3522 	spdk_delay_us(1000);
3523 	poll_threads();
3524 
3525 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3526 	poll_threads();
3527 
3528 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3529 
3530 	/* Delete two ctrlrs at once. */
3531 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3532 	CU_ASSERT(rc == 0);
3533 
3534 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3535 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3536 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3537 
3538 	poll_threads();
3539 	spdk_delay_us(1000);
3540 	poll_threads();
3541 
3542 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3543 
3544 	/* Add two ctrlrs and delete one by one. */
3545 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3546 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3547 
3548 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3549 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3550 
3551 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3552 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3553 	CU_ASSERT(rc == 0);
3554 
3555 	spdk_delay_us(1000);
3556 	poll_threads();
3557 
3558 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3559 	poll_threads();
3560 
3561 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3562 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3563 	CU_ASSERT(rc == 0);
3564 
3565 	spdk_delay_us(1000);
3566 	poll_threads();
3567 
3568 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3569 	poll_threads();
3570 
3571 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3572 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3573 
3574 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3575 	CU_ASSERT(rc == 0);
3576 
3577 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3578 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3579 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3580 
3581 	poll_threads();
3582 	spdk_delay_us(1000);
3583 	poll_threads();
3584 
3585 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3586 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3587 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3588 
3589 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3590 	CU_ASSERT(rc == 0);
3591 
3592 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3593 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3594 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3595 
3596 	poll_threads();
3597 	spdk_delay_us(1000);
3598 	poll_threads();
3599 
3600 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3601 }
3602 
3603 static struct nvme_ns *
3604 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3605 {
3606 	struct nvme_ns *nvme_ns;
3607 
3608 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3609 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3610 			return nvme_ns;
3611 		}
3612 	}
3613 
3614 	return NULL;
3615 }
3616 
3617 static void
3618 test_add_multi_ns_to_bdev(void)
3619 {
3620 	struct nvme_path_id path1 = {}, path2 = {};
3621 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3622 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3623 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3624 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3625 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3626 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3627 	const int STRING_SIZE = 32;
3628 	const char *attached_names[STRING_SIZE];
3629 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3630 	struct spdk_uuid uuid2 = { .u.raw = { 0x2 } };
3631 	struct spdk_uuid uuid3 = { .u.raw = { 0x3 } };
3632 	struct spdk_uuid uuid4 = { .u.raw = { 0x4 } };
3633 	struct spdk_uuid uuid44 = { .u.raw = { 0x44 } };
3634 	int rc;
3635 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3636 
3637 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3638 	bdev_opts.multipath = true;
3639 
3640 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3641 	ut_init_trid(&path1.trid);
3642 	ut_init_trid2(&path2.trid);
3643 
3644 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3645 
3646 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3647 	 * namespaces are populated.
3648 	 */
3649 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3650 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3651 
3652 	ctrlr1->ns[1].is_active = false;
3653 	ctrlr1->ns[4].is_active = false;
3654 	ctrlr1->ns[0].uuid = &uuid1;
3655 	ctrlr1->ns[2].uuid = &uuid3;
3656 	ctrlr1->ns[3].uuid = &uuid4;
3657 
3658 	g_ut_attach_ctrlr_status = 0;
3659 	g_ut_attach_bdev_count = 3;
3660 
3661 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3662 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3663 	CU_ASSERT(rc == 0);
3664 
3665 	spdk_delay_us(1000);
3666 	poll_threads();
3667 
3668 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3669 	poll_threads();
3670 
3671 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3672 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3673 	 * adding 4th namespace to a bdev should fail.
3674 	 */
3675 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3676 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3677 
3678 	ctrlr2->ns[2].is_active = false;
3679 	ctrlr2->ns[4].is_active = false;
3680 	ctrlr2->ns[0].uuid = &uuid1;
3681 	ctrlr2->ns[1].uuid = &uuid2;
3682 	ctrlr2->ns[3].uuid = &uuid44;
3683 
3684 	g_ut_attach_ctrlr_status = 0;
3685 	g_ut_attach_bdev_count = 2;
3686 
3687 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3688 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3689 	CU_ASSERT(rc == 0);
3690 
3691 	spdk_delay_us(1000);
3692 	poll_threads();
3693 
3694 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3695 	poll_threads();
3696 
3697 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3698 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3699 
3700 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3701 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3702 
3703 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3704 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3705 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3706 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3707 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3708 
3709 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3710 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3711 
3712 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3713 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3714 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3715 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3716 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3717 
3718 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3719 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3720 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3721 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3722 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3723 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3724 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3725 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3726 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3727 
3728 	CU_ASSERT(bdev1->ref == 2);
3729 	CU_ASSERT(bdev2->ref == 1);
3730 	CU_ASSERT(bdev3->ref == 1);
3731 	CU_ASSERT(bdev4->ref == 1);
3732 
3733 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3734 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3735 	CU_ASSERT(rc == 0);
3736 
3737 	poll_threads();
3738 	spdk_delay_us(1000);
3739 	poll_threads();
3740 
3741 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3742 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3743 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == nvme_ctrlr2);
3744 
3745 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3746 	CU_ASSERT(rc == 0);
3747 
3748 	poll_threads();
3749 	spdk_delay_us(1000);
3750 	poll_threads();
3751 
3752 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3753 
3754 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3755 	 * can be deleted when the bdev subsystem shutdown.
3756 	 */
3757 	g_ut_attach_bdev_count = 1;
3758 
3759 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3760 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3761 
3762 	ctrlr1->ns[0].uuid = &uuid1;
3763 
3764 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3765 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3766 	CU_ASSERT(rc == 0);
3767 
3768 	spdk_delay_us(1000);
3769 	poll_threads();
3770 
3771 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3772 	poll_threads();
3773 
3774 	ut_init_trid2(&path2.trid);
3775 
3776 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3777 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3778 
3779 	ctrlr2->ns[0].uuid = &uuid1;
3780 
3781 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3782 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3783 	CU_ASSERT(rc == 0);
3784 
3785 	spdk_delay_us(1000);
3786 	poll_threads();
3787 
3788 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3789 	poll_threads();
3790 
3791 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3792 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3793 
3794 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3795 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3796 
3797 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3798 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3799 
3800 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3801 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3802 
3803 	/* Check if a nvme_bdev has two nvme_ns. */
3804 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3805 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3806 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3807 
3808 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3809 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3810 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3811 
3812 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3813 	bdev_nvme_destruct(&bdev1->disk);
3814 
3815 	poll_threads();
3816 
3817 	CU_ASSERT(nvme_ns1->bdev == NULL);
3818 	CU_ASSERT(nvme_ns2->bdev == NULL);
3819 
3820 	nvme_ctrlr1->destruct = true;
3821 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3822 
3823 	poll_threads();
3824 	spdk_delay_us(1000);
3825 	poll_threads();
3826 
3827 	nvme_ctrlr2->destruct = true;
3828 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3829 
3830 	poll_threads();
3831 	spdk_delay_us(1000);
3832 	poll_threads();
3833 
3834 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3835 }
3836 
3837 static void
3838 test_add_multi_io_paths_to_nbdev_ch(void)
3839 {
3840 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3841 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3842 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3843 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3844 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3845 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3846 	const int STRING_SIZE = 32;
3847 	const char *attached_names[STRING_SIZE];
3848 	struct nvme_bdev *bdev;
3849 	struct spdk_io_channel *ch;
3850 	struct nvme_bdev_channel *nbdev_ch;
3851 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3852 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3853 	int rc;
3854 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3855 
3856 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3857 	bdev_opts.multipath = true;
3858 
3859 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3860 	ut_init_trid(&path1.trid);
3861 	ut_init_trid2(&path2.trid);
3862 	ut_init_trid3(&path3.trid);
3863 	g_ut_attach_ctrlr_status = 0;
3864 	g_ut_attach_bdev_count = 1;
3865 
3866 	set_thread(1);
3867 
3868 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3869 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3870 
3871 	ctrlr1->ns[0].uuid = &uuid1;
3872 
3873 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3874 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3875 	CU_ASSERT(rc == 0);
3876 
3877 	spdk_delay_us(1000);
3878 	poll_threads();
3879 
3880 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3881 	poll_threads();
3882 
3883 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3884 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3885 
3886 	ctrlr2->ns[0].uuid = &uuid1;
3887 
3888 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3889 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3890 	CU_ASSERT(rc == 0);
3891 
3892 	spdk_delay_us(1000);
3893 	poll_threads();
3894 
3895 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3896 	poll_threads();
3897 
3898 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3899 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3900 
3901 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3902 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3903 
3904 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3905 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3906 
3907 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3908 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3909 
3910 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3911 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3912 
3913 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3914 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3915 
3916 	set_thread(0);
3917 
3918 	ch = spdk_get_io_channel(bdev);
3919 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3920 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3921 
3922 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3923 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3924 
3925 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3926 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3927 
3928 	set_thread(1);
3929 
3930 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3931 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3932 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3933 
3934 	ctrlr3->ns[0].uuid = &uuid1;
3935 
3936 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
3937 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3938 	CU_ASSERT(rc == 0);
3939 
3940 	spdk_delay_us(1000);
3941 	poll_threads();
3942 
3943 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3944 	poll_threads();
3945 
3946 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn);
3947 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3948 
3949 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3950 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3951 
3952 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3953 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3954 
3955 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3956 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3957 	CU_ASSERT(rc == 0);
3958 
3959 	poll_threads();
3960 	spdk_delay_us(1000);
3961 	poll_threads();
3962 
3963 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == nvme_ctrlr1);
3964 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
3965 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn) == nvme_ctrlr3);
3966 
3967 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3968 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3969 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3970 
3971 	set_thread(0);
3972 
3973 	spdk_put_io_channel(ch);
3974 
3975 	poll_threads();
3976 
3977 	set_thread(1);
3978 
3979 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3980 	CU_ASSERT(rc == 0);
3981 
3982 	poll_threads();
3983 	spdk_delay_us(1000);
3984 	poll_threads();
3985 
3986 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3987 }
3988 
3989 static void
3990 test_admin_path(void)
3991 {
3992 	struct nvme_path_id path1 = {}, path2 = {};
3993 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3994 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3995 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3996 	const int STRING_SIZE = 32;
3997 	const char *attached_names[STRING_SIZE];
3998 	struct nvme_bdev *bdev;
3999 	struct spdk_io_channel *ch;
4000 	struct spdk_bdev_io *bdev_io;
4001 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4002 	int rc;
4003 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
4004 
4005 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
4006 	bdev_opts.multipath = true;
4007 
4008 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4009 	ut_init_trid(&path1.trid);
4010 	ut_init_trid2(&path2.trid);
4011 	g_ut_attach_ctrlr_status = 0;
4012 	g_ut_attach_bdev_count = 1;
4013 
4014 	set_thread(0);
4015 
4016 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4017 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4018 
4019 	ctrlr1->ns[0].uuid = &uuid1;
4020 
4021 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4022 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4023 	CU_ASSERT(rc == 0);
4024 
4025 	spdk_delay_us(1000);
4026 	poll_threads();
4027 
4028 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4029 	poll_threads();
4030 
4031 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4032 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4033 
4034 	ctrlr2->ns[0].uuid = &uuid1;
4035 
4036 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4037 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4038 	CU_ASSERT(rc == 0);
4039 
4040 	spdk_delay_us(1000);
4041 	poll_threads();
4042 
4043 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4044 	poll_threads();
4045 
4046 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4047 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4048 
4049 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4050 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
4051 
4052 	ch = spdk_get_io_channel(bdev);
4053 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4054 
4055 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
4056 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
4057 
4058 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
4059 	 * submitted to ctrlr2.
4060 	 */
4061 	ctrlr1->is_failed = true;
4062 	bdev_io->internal.f.in_submit_request = true;
4063 
4064 	bdev_nvme_submit_request(ch, bdev_io);
4065 
4066 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4067 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
4068 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4069 
4070 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4071 	poll_threads();
4072 
4073 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4074 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4075 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4076 
4077 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
4078 	ctrlr2->is_failed = true;
4079 	bdev_io->internal.f.in_submit_request = true;
4080 
4081 	bdev_nvme_submit_request(ch, bdev_io);
4082 
4083 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4084 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4085 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4086 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4087 
4088 	free(bdev_io);
4089 
4090 	spdk_put_io_channel(ch);
4091 
4092 	poll_threads();
4093 
4094 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4095 	CU_ASSERT(rc == 0);
4096 
4097 	poll_threads();
4098 	spdk_delay_us(1000);
4099 	poll_threads();
4100 
4101 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4102 }
4103 
4104 static struct nvme_io_path *
4105 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
4106 			struct nvme_ctrlr *nvme_ctrlr)
4107 {
4108 	struct nvme_io_path *io_path;
4109 
4110 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
4111 		if (io_path->qpair->ctrlr == nvme_ctrlr) {
4112 			return io_path;
4113 		}
4114 	}
4115 
4116 	return NULL;
4117 }
4118 
4119 static void
4120 test_reset_bdev_ctrlr(void)
4121 {
4122 	struct nvme_path_id path1 = {}, path2 = {};
4123 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4124 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4125 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4126 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4127 	struct nvme_path_id *curr_path1, *curr_path2;
4128 	const int STRING_SIZE = 32;
4129 	const char *attached_names[STRING_SIZE];
4130 	struct nvme_bdev *bdev;
4131 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
4132 	struct nvme_bdev_io *first_bio;
4133 	struct spdk_io_channel *ch1, *ch2;
4134 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
4135 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
4136 	int rc;
4137 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
4138 
4139 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
4140 	bdev_opts.multipath = true;
4141 
4142 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4143 	ut_init_trid(&path1.trid);
4144 	ut_init_trid2(&path2.trid);
4145 	g_ut_attach_ctrlr_status = 0;
4146 	g_ut_attach_bdev_count = 1;
4147 
4148 	set_thread(0);
4149 
4150 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4151 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4152 
4153 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4154 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4155 	CU_ASSERT(rc == 0);
4156 
4157 	spdk_delay_us(1000);
4158 	poll_threads();
4159 
4160 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4161 	poll_threads();
4162 
4163 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4164 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4165 
4166 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4167 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4168 	CU_ASSERT(rc == 0);
4169 
4170 	spdk_delay_us(1000);
4171 	poll_threads();
4172 
4173 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4174 	poll_threads();
4175 
4176 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4177 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4178 
4179 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
4180 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
4181 
4182 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
4183 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
4184 
4185 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
4186 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
4187 
4188 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
4189 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
4190 
4191 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4192 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
4193 
4194 	set_thread(0);
4195 
4196 	ch1 = spdk_get_io_channel(bdev);
4197 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
4198 
4199 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
4200 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
4201 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
4202 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
4203 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
4204 
4205 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
4206 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
4207 
4208 	set_thread(1);
4209 
4210 	ch2 = spdk_get_io_channel(bdev);
4211 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
4212 
4213 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
4214 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
4215 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
4216 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
4217 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
4218 
4219 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
4220 
4221 	/* The first reset request from bdev_io is submitted on thread 0.
4222 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
4223 	 *
4224 	 * A few extra polls are necessary after resetting ctrlr1 to check
4225 	 * pending reset requests for ctrlr1.
4226 	 */
4227 	ctrlr1->is_failed = true;
4228 	curr_path1->last_failed_tsc = spdk_get_ticks();
4229 	ctrlr2->is_failed = true;
4230 	curr_path2->last_failed_tsc = spdk_get_ticks();
4231 
4232 	set_thread(0);
4233 
4234 	bdev_nvme_submit_request(ch1, first_bdev_io);
4235 
4236 	poll_thread_times(0, 1);
4237 	poll_thread_times(1, 1);
4238 	poll_thread_times(0, 2);
4239 	poll_thread_times(1, 1);
4240 	poll_thread_times(0, 1);
4241 
4242 	CU_ASSERT(first_bio->io_path == io_path11);
4243 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4244 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4245 
4246 	poll_thread_times(0, 3);
4247 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4248 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4249 
4250 	poll_thread_times(1, 2);
4251 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4252 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4253 	CU_ASSERT(ctrlr1->is_failed == true);
4254 
4255 	poll_thread_times(0, 1);
4256 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4257 	CU_ASSERT(ctrlr1->is_failed == false);
4258 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
4259 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4260 
4261 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4262 	poll_thread_times(0, 2);
4263 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
4264 
4265 	poll_thread_times(0, 1);
4266 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4267 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4268 
4269 	poll_thread_times(1, 1);
4270 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4271 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4272 
4273 	poll_thread_times(0, 1);
4274 	CU_ASSERT(nvme_ctrlr1->resetting == false);
4275 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4276 	poll_thread_times(0, 1);
4277 	CU_ASSERT(first_bio->io_path == io_path12);
4278 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4279 
4280 	poll_thread_times(0, 2);
4281 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4282 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4283 
4284 	poll_thread_times(1, 2);
4285 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4286 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4287 	CU_ASSERT(ctrlr2->is_failed == true);
4288 
4289 	poll_thread_times(0, 1);
4290 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4291 	CU_ASSERT(ctrlr2->is_failed == false);
4292 	CU_ASSERT(ctrlr2->adminq.is_connected == false);
4293 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4294 
4295 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4296 	poll_thread_times(0, 2);
4297 	CU_ASSERT(ctrlr2->adminq.is_connected == true);
4298 
4299 	poll_thread_times(0, 1);
4300 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4301 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4302 
4303 	poll_thread_times(1, 2);
4304 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4305 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4306 
4307 	poll_thread_times(0, 1);
4308 	CU_ASSERT(nvme_ctrlr2->resetting == false);
4309 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4310 
4311 	poll_thread_times(0, 1);
4312 	CU_ASSERT(first_bio->io_path == NULL);
4313 
4314 	poll_threads();
4315 
4316 	/* There is a race between two reset requests from bdev_io.
4317 	 *
4318 	 * The first reset request is submitted on thread 0, and the second reset
4319 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
4320 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
4321 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
4322 	 * The second is pending on ctrlr2 again. After the first completes resetting
4323 	 * ctrl2, both complete successfully.
4324 	 */
4325 	ctrlr1->is_failed = true;
4326 	curr_path1->last_failed_tsc = spdk_get_ticks();
4327 	ctrlr2->is_failed = true;
4328 	curr_path2->last_failed_tsc = spdk_get_ticks();
4329 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4330 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4331 
4332 	set_thread(0);
4333 
4334 	bdev_nvme_submit_request(ch1, first_bdev_io);
4335 
4336 	set_thread(1);
4337 
4338 	bdev_nvme_submit_request(ch2, second_bdev_io);
4339 
4340 	poll_thread_times(0, 1);
4341 	poll_thread_times(1, 1);
4342 	poll_thread_times(0, 2);
4343 	poll_thread_times(1, 1);
4344 	poll_thread_times(0, 1);
4345 	poll_thread_times(1, 1);
4346 
4347 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4348 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4349 	CU_ASSERT(TAILQ_FIRST(&nvme_ctrlr1->pending_resets) ==
4350 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4351 
4352 	poll_threads();
4353 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4354 	poll_threads();
4355 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4356 	poll_threads();
4357 
4358 	CU_ASSERT(ctrlr1->is_failed == false);
4359 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4360 	CU_ASSERT(ctrlr2->is_failed == false);
4361 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4362 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4363 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4364 
4365 	/* Reset of the first path succeeds, reset of the second path fails.
4366 	 * Since we have at least one working path we should not fail RESET IO.
4367 	 */
4368 	ctrlr1->is_failed = true;
4369 	curr_path1->last_failed_tsc = spdk_get_ticks();
4370 	ctrlr2->is_failed = true;
4371 	curr_path2->last_failed_tsc = spdk_get_ticks();
4372 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4373 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4374 
4375 	set_thread(0);
4376 	bdev_nvme_submit_request(ch1, first_bdev_io);
4377 
4378 	set_thread(1);
4379 	bdev_nvme_submit_request(ch2, second_bdev_io);
4380 
4381 	poll_thread_times(0, 1);
4382 	poll_thread_times(1, 1);
4383 	poll_thread_times(0, 2);
4384 	poll_thread_times(1, 1);
4385 	poll_thread_times(0, 1);
4386 	poll_thread_times(1, 1);
4387 
4388 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4389 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4390 	CU_ASSERT(TAILQ_FIRST(&nvme_ctrlr1->pending_resets) ==
4391 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4392 
4393 	ctrlr2->fail_reset = true;
4394 
4395 	poll_threads();
4396 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4397 	poll_threads();
4398 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4399 	poll_threads();
4400 
4401 	CU_ASSERT(ctrlr1->is_failed == false);
4402 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4403 	CU_ASSERT(ctrlr2->is_failed == true);
4404 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4405 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4406 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4407 
4408 	/* Path 2 recovers */
4409 	ctrlr2->fail_reset = false;
4410 	poll_threads();
4411 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4412 	poll_threads();
4413 
4414 	CU_ASSERT(ctrlr2->is_failed == false);
4415 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4416 
4417 	/* Reset of the first path fails, reset of the second path succeeds.
4418 	 * Since we have at least one working path we should not fail RESET IO.
4419 	 */
4420 	ctrlr1->is_failed = true;
4421 	curr_path1->last_failed_tsc = spdk_get_ticks();
4422 	ctrlr2->is_failed = true;
4423 	curr_path2->last_failed_tsc = spdk_get_ticks();
4424 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4425 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4426 
4427 	set_thread(0);
4428 	bdev_nvme_submit_request(ch1, first_bdev_io);
4429 
4430 	set_thread(1);
4431 	bdev_nvme_submit_request(ch2, second_bdev_io);
4432 
4433 	poll_thread_times(0, 1);
4434 	poll_thread_times(1, 1);
4435 	poll_thread_times(0, 2);
4436 	poll_thread_times(1, 1);
4437 	poll_thread_times(0, 1);
4438 	poll_thread_times(1, 1);
4439 
4440 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4441 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4442 	CU_ASSERT(TAILQ_FIRST(&nvme_ctrlr1->pending_resets) ==
4443 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4444 
4445 	ctrlr1->fail_reset = true;
4446 
4447 	poll_threads();
4448 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4449 	poll_threads();
4450 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4451 	poll_threads();
4452 
4453 	CU_ASSERT(ctrlr1->is_failed == true);
4454 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4455 	CU_ASSERT(ctrlr2->is_failed == false);
4456 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4457 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4458 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4459 
4460 	/* Path 1 recovers */
4461 	ctrlr1->fail_reset = false;
4462 	poll_threads();
4463 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4464 	poll_threads();
4465 
4466 	CU_ASSERT(ctrlr1->is_failed == false);
4467 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4468 
4469 	/* Reset of both paths fail.
4470 	 * Since we have no working paths we should fail RESET IO.
4471 	 */
4472 	ctrlr1->is_failed = true;
4473 	curr_path1->last_failed_tsc = spdk_get_ticks();
4474 	ctrlr2->is_failed = true;
4475 	curr_path2->last_failed_tsc = spdk_get_ticks();
4476 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4477 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4478 
4479 	set_thread(0);
4480 	bdev_nvme_submit_request(ch1, first_bdev_io);
4481 
4482 	set_thread(1);
4483 	bdev_nvme_submit_request(ch2, second_bdev_io);
4484 
4485 	poll_thread_times(0, 1);
4486 	poll_thread_times(1, 1);
4487 	poll_thread_times(0, 2);
4488 	poll_thread_times(1, 1);
4489 	poll_thread_times(0, 1);
4490 	poll_thread_times(1, 1);
4491 
4492 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4493 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4494 	CU_ASSERT(TAILQ_FIRST(&nvme_ctrlr1->pending_resets) ==
4495 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4496 
4497 	ctrlr1->fail_reset = true;
4498 	ctrlr2->fail_reset = true;
4499 
4500 	poll_threads();
4501 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4502 	poll_threads();
4503 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4504 	poll_threads();
4505 
4506 	CU_ASSERT(ctrlr1->is_failed == true);
4507 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4508 	CU_ASSERT(ctrlr2->is_failed == true);
4509 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4510 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4511 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4512 
4513 	/* Paths 1 and 2 recover */
4514 	ctrlr1->fail_reset = false;
4515 	ctrlr2->fail_reset = false;
4516 	poll_threads();
4517 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4518 	poll_threads();
4519 
4520 	CU_ASSERT(ctrlr1->is_failed == false);
4521 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4522 	CU_ASSERT(ctrlr2->is_failed == false);
4523 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4524 
4525 	/* Reset of the first path failes, reset of the second path succeeds.
4526 	 * Since we have at least one working path we should not fail RESET IO.
4527 	 *
4528 	 * Here, reset of the first path fails immediately because it is disabled.
4529 	 *
4530 	 * The purpose is to verify the fix. We had a bug that bdev_io did not
4531 	 * hold io_path when reset of it failed immediately, and then continue
4532 	 * operation caused NULL pointer access.
4533 	 */
4534 	nvme_ctrlr1->disabled = true;
4535 	ctrlr1->is_failed = true;
4536 	curr_path1->last_failed_tsc = spdk_get_ticks();
4537 	ctrlr2->is_failed = true;
4538 	curr_path2->last_failed_tsc = spdk_get_ticks();
4539 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4540 
4541 	set_thread(0);
4542 	bdev_nvme_submit_request(ch1, first_bdev_io);
4543 
4544 	poll_threads();
4545 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4546 	poll_threads();
4547 
4548 	CU_ASSERT(ctrlr1->is_failed == true);
4549 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4550 	CU_ASSERT(ctrlr2->is_failed == false);
4551 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4552 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4553 
4554 	nvme_ctrlr1->disabled = false;
4555 	ctrlr1->is_failed = false;
4556 	curr_path1->last_failed_tsc = 0;
4557 
4558 	set_thread(0);
4559 
4560 	spdk_put_io_channel(ch1);
4561 
4562 	set_thread(1);
4563 
4564 	spdk_put_io_channel(ch2);
4565 
4566 	poll_threads();
4567 
4568 	set_thread(0);
4569 
4570 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4571 	CU_ASSERT(rc == 0);
4572 
4573 	poll_threads();
4574 	spdk_delay_us(1000);
4575 	poll_threads();
4576 
4577 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4578 
4579 	free(first_bdev_io);
4580 	free(second_bdev_io);
4581 }
4582 
4583 static void
4584 test_find_io_path(void)
4585 {
4586 	struct nvme_bdev_channel nbdev_ch = {
4587 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
4588 	};
4589 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {};
4590 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
4591 	struct spdk_nvme_ns ns1 = {}, ns2 = {};
4592 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
4593 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
4594 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
4595 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
4596 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, };
4597 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
4598 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
4599 
4600 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
4601 
4602 	/* Test if io_path whose ANA state is not accessible is excluded. */
4603 
4604 	nvme_qpair1.qpair = &qpair1;
4605 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4606 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4607 
4608 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
4609 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4610 
4611 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
4612 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4613 
4614 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4615 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4616 
4617 	nbdev_ch.current_io_path = NULL;
4618 
4619 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4620 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4621 
4622 	nbdev_ch.current_io_path = NULL;
4623 
4624 	/* Test if io_path whose qpair is resetting is excluded. */
4625 
4626 	nvme_qpair1.qpair = NULL;
4627 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4628 
4629 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
4630 
4631 	/* Test if ANA optimized state or the first found ANA non-optimized state
4632 	 * is prioritized.
4633 	 */
4634 
4635 	nvme_qpair1.qpair = &qpair1;
4636 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4637 	nvme_qpair2.qpair = &qpair2;
4638 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4639 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
4640 
4641 	nbdev_ch.current_io_path = NULL;
4642 
4643 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4644 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4645 
4646 	nbdev_ch.current_io_path = NULL;
4647 }
4648 
4649 static void
4650 test_retry_io_if_ana_state_is_updating(void)
4651 {
4652 	struct nvme_path_id path = {};
4653 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
4654 	struct spdk_nvme_ctrlr *ctrlr;
4655 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
4656 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4657 	struct nvme_ctrlr *nvme_ctrlr;
4658 	const int STRING_SIZE = 32;
4659 	const char *attached_names[STRING_SIZE];
4660 	struct nvme_bdev *bdev;
4661 	struct nvme_ns *nvme_ns;
4662 	struct spdk_bdev_io *bdev_io1;
4663 	struct spdk_io_channel *ch;
4664 	struct nvme_bdev_channel *nbdev_ch;
4665 	struct nvme_io_path *io_path;
4666 	struct nvme_qpair *nvme_qpair;
4667 	int rc;
4668 
4669 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4670 	ut_init_trid(&path.trid);
4671 
4672 	set_thread(0);
4673 
4674 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4675 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4676 
4677 	g_ut_attach_ctrlr_status = 0;
4678 	g_ut_attach_bdev_count = 1;
4679 
4680 	opts.ctrlr_loss_timeout_sec = -1;
4681 	opts.reconnect_delay_sec = 1;
4682 	opts.multipath = false;
4683 
4684 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4685 				   attach_ctrlr_done, NULL, &dopts, &opts);
4686 	CU_ASSERT(rc == 0);
4687 
4688 	spdk_delay_us(1000);
4689 	poll_threads();
4690 
4691 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4692 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4693 
4694 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
4695 	CU_ASSERT(nvme_ctrlr != NULL);
4696 
4697 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4698 	CU_ASSERT(bdev != NULL);
4699 
4700 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4701 	CU_ASSERT(nvme_ns != NULL);
4702 
4703 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4704 	ut_bdev_io_set_buf(bdev_io1);
4705 
4706 	ch = spdk_get_io_channel(bdev);
4707 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4708 
4709 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4710 
4711 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4712 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4713 
4714 	nvme_qpair = io_path->qpair;
4715 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4716 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4717 
4718 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
4719 
4720 	/* If qpair is connected, I/O should succeed. */
4721 	bdev_io1->internal.f.in_submit_request = true;
4722 
4723 	bdev_nvme_submit_request(ch, bdev_io1);
4724 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4725 
4726 	poll_threads();
4727 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
4728 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4729 
4730 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
4731 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4732 	nbdev_ch->current_io_path = NULL;
4733 
4734 	bdev_io1->internal.f.in_submit_request = true;
4735 
4736 	bdev_nvme_submit_request(ch, bdev_io1);
4737 
4738 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4739 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4740 	CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4741 
4742 	/* ANA state became accessible while I/O was queued. */
4743 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4744 
4745 	spdk_delay_us(1000000);
4746 
4747 	poll_thread_times(0, 1);
4748 
4749 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4750 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4751 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4752 
4753 	poll_threads();
4754 
4755 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4756 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
4757 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4758 
4759 	free(bdev_io1);
4760 
4761 	spdk_put_io_channel(ch);
4762 
4763 	poll_threads();
4764 
4765 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4766 	CU_ASSERT(rc == 0);
4767 
4768 	poll_threads();
4769 	spdk_delay_us(1000);
4770 	poll_threads();
4771 
4772 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4773 }
4774 
4775 static void
4776 test_retry_io_for_io_path_error(void)
4777 {
4778 	struct nvme_path_id path1 = {}, path2 = {};
4779 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4780 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4781 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4782 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4783 	const int STRING_SIZE = 32;
4784 	const char *attached_names[STRING_SIZE];
4785 	struct nvme_bdev *bdev;
4786 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4787 	struct spdk_bdev_io *bdev_io;
4788 	struct nvme_bdev_io *bio;
4789 	struct spdk_io_channel *ch;
4790 	struct nvme_bdev_channel *nbdev_ch;
4791 	struct nvme_io_path *io_path1, *io_path2;
4792 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
4793 	struct ut_nvme_req *req;
4794 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4795 	int rc;
4796 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
4797 
4798 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
4799 	bdev_opts.multipath = true;
4800 
4801 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4802 	ut_init_trid(&path1.trid);
4803 	ut_init_trid2(&path2.trid);
4804 
4805 	g_opts.bdev_retry_count = 1;
4806 
4807 	set_thread(0);
4808 
4809 	g_ut_attach_ctrlr_status = 0;
4810 	g_ut_attach_bdev_count = 1;
4811 
4812 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4813 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4814 
4815 	ctrlr1->ns[0].uuid = &uuid1;
4816 
4817 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4818 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4819 	CU_ASSERT(rc == 0);
4820 
4821 	spdk_delay_us(1000);
4822 	poll_threads();
4823 
4824 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4825 	poll_threads();
4826 
4827 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4828 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4829 
4830 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
4831 	CU_ASSERT(nvme_ctrlr1 != NULL);
4832 
4833 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4834 	CU_ASSERT(bdev != NULL);
4835 
4836 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4837 	CU_ASSERT(nvme_ns1 != NULL);
4838 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4839 
4840 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4841 	ut_bdev_io_set_buf(bdev_io);
4842 
4843 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4844 
4845 	ch = spdk_get_io_channel(bdev);
4846 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4847 
4848 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4849 
4850 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4851 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4852 
4853 	nvme_qpair1 = io_path1->qpair;
4854 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
4855 	SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL);
4856 
4857 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4858 
4859 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4860 	bdev_io->internal.f.in_submit_request = true;
4861 
4862 	bdev_nvme_submit_request(ch, bdev_io);
4863 
4864 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4865 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4866 
4867 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4868 	SPDK_CU_ASSERT_FATAL(req != NULL);
4869 
4870 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4871 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4872 	req->cpl.status.dnr = 1;
4873 
4874 	poll_thread_times(0, 1);
4875 
4876 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4877 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4878 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4879 
4880 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4881 	bdev_io->internal.f.in_submit_request = true;
4882 
4883 	bdev_nvme_submit_request(ch, bdev_io);
4884 
4885 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4886 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4887 
4888 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4889 	SPDK_CU_ASSERT_FATAL(req != NULL);
4890 
4891 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4892 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4893 
4894 	poll_thread_times(0, 1);
4895 
4896 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4897 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4898 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4899 
4900 	poll_threads();
4901 
4902 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4903 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4904 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4905 
4906 	/* Add io_path2 dynamically, and create a multipath configuration. */
4907 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4908 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4909 
4910 	ctrlr2->ns[0].uuid = &uuid1;
4911 
4912 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4913 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4914 	CU_ASSERT(rc == 0);
4915 
4916 	spdk_delay_us(1000);
4917 	poll_threads();
4918 
4919 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4920 	poll_threads();
4921 
4922 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
4923 	CU_ASSERT(nvme_ctrlr2 != NULL);
4924 
4925 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4926 	CU_ASSERT(nvme_ns2 != NULL);
4927 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4928 
4929 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4930 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4931 
4932 	nvme_qpair2 = io_path2->qpair;
4933 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
4934 	SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL);
4935 
4936 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4937 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4938 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4939 	 */
4940 	bdev_io->internal.f.in_submit_request = true;
4941 
4942 	bdev_nvme_submit_request(ch, bdev_io);
4943 
4944 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4945 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4946 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4947 
4948 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4949 	SPDK_CU_ASSERT_FATAL(req != NULL);
4950 
4951 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4952 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4953 
4954 	poll_thread_times(0, 1);
4955 
4956 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4957 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4958 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4959 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4960 
4961 	spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair);
4962 	nvme_qpair1->qpair = NULL;
4963 
4964 	poll_threads();
4965 
4966 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4967 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4968 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4969 
4970 	free(bdev_io);
4971 
4972 	spdk_put_io_channel(ch);
4973 
4974 	poll_threads();
4975 
4976 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4977 	CU_ASSERT(rc == 0);
4978 
4979 	poll_threads();
4980 	spdk_delay_us(1000);
4981 	poll_threads();
4982 
4983 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4984 
4985 	g_opts.bdev_retry_count = 0;
4986 }
4987 
4988 static void
4989 test_retry_io_count(void)
4990 {
4991 	struct nvme_path_id path = {};
4992 	struct spdk_nvme_ctrlr *ctrlr;
4993 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4994 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4995 	struct nvme_ctrlr *nvme_ctrlr;
4996 	const int STRING_SIZE = 32;
4997 	const char *attached_names[STRING_SIZE];
4998 	struct nvme_bdev *bdev;
4999 	struct nvme_ns *nvme_ns;
5000 	struct spdk_bdev_io *bdev_io;
5001 	struct nvme_bdev_io *bio;
5002 	struct spdk_io_channel *ch;
5003 	struct nvme_bdev_channel *nbdev_ch;
5004 	struct nvme_io_path *io_path;
5005 	struct nvme_qpair *nvme_qpair;
5006 	struct ut_nvme_req *req;
5007 	int rc;
5008 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
5009 
5010 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
5011 	bdev_opts.multipath = false;
5012 
5013 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5014 	ut_init_trid(&path.trid);
5015 
5016 	set_thread(0);
5017 
5018 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5019 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5020 
5021 	g_ut_attach_ctrlr_status = 0;
5022 	g_ut_attach_bdev_count = 1;
5023 
5024 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5025 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
5026 	CU_ASSERT(rc == 0);
5027 
5028 	spdk_delay_us(1000);
5029 	poll_threads();
5030 
5031 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5032 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5033 
5034 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn);
5035 	CU_ASSERT(nvme_ctrlr != NULL);
5036 
5037 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5038 	CU_ASSERT(bdev != NULL);
5039 
5040 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5041 	CU_ASSERT(nvme_ns != NULL);
5042 
5043 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5044 	ut_bdev_io_set_buf(bdev_io);
5045 
5046 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
5047 
5048 	ch = spdk_get_io_channel(bdev);
5049 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5050 
5051 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5052 
5053 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5054 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5055 
5056 	nvme_qpair = io_path->qpair;
5057 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5058 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5059 
5060 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
5061 
5062 	/* If I/O is aborted by request, it should not be retried. */
5063 	g_opts.bdev_retry_count = 1;
5064 
5065 	bdev_io->internal.f.in_submit_request = true;
5066 
5067 	bdev_nvme_submit_request(ch, bdev_io);
5068 
5069 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5070 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5071 
5072 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5073 	SPDK_CU_ASSERT_FATAL(req != NULL);
5074 
5075 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
5076 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5077 
5078 	poll_thread_times(0, 1);
5079 
5080 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5081 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5082 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
5083 
5084 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
5085 	 * the failed I/O should not be retried.
5086 	 */
5087 	g_opts.bdev_retry_count = 4;
5088 
5089 	bdev_io->internal.f.in_submit_request = true;
5090 
5091 	bdev_nvme_submit_request(ch, bdev_io);
5092 
5093 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5094 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5095 
5096 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5097 	SPDK_CU_ASSERT_FATAL(req != NULL);
5098 
5099 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5100 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5101 	bio->retry_count = 4;
5102 
5103 	poll_thread_times(0, 1);
5104 
5105 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5106 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5107 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
5108 
5109 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
5110 	g_opts.bdev_retry_count = -1;
5111 
5112 	bdev_io->internal.f.in_submit_request = true;
5113 
5114 	bdev_nvme_submit_request(ch, bdev_io);
5115 
5116 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5117 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5118 
5119 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5120 	SPDK_CU_ASSERT_FATAL(req != NULL);
5121 
5122 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5123 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5124 	bio->retry_count = 4;
5125 
5126 	poll_thread_times(0, 1);
5127 
5128 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5129 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5130 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5131 
5132 	poll_threads();
5133 
5134 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5135 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5136 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5137 
5138 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
5139 	 * the failed I/O should be retried.
5140 	 */
5141 	g_opts.bdev_retry_count = 4;
5142 
5143 	bdev_io->internal.f.in_submit_request = true;
5144 
5145 	bdev_nvme_submit_request(ch, bdev_io);
5146 
5147 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5148 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5149 
5150 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5151 	SPDK_CU_ASSERT_FATAL(req != NULL);
5152 
5153 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5154 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5155 	bio->retry_count = 3;
5156 
5157 	poll_thread_times(0, 1);
5158 
5159 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5160 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5161 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5162 
5163 	poll_threads();
5164 
5165 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5166 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5167 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5168 
5169 	free(bdev_io);
5170 
5171 	spdk_put_io_channel(ch);
5172 
5173 	poll_threads();
5174 
5175 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5176 	CU_ASSERT(rc == 0);
5177 
5178 	poll_threads();
5179 	spdk_delay_us(1000);
5180 	poll_threads();
5181 
5182 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5183 
5184 	g_opts.bdev_retry_count = 0;
5185 }
5186 
5187 static void
5188 test_concurrent_read_ana_log_page(void)
5189 {
5190 	struct spdk_nvme_transport_id trid = {};
5191 	struct spdk_nvme_ctrlr *ctrlr;
5192 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
5193 	struct nvme_ctrlr *nvme_ctrlr;
5194 	const int STRING_SIZE = 32;
5195 	const char *attached_names[STRING_SIZE];
5196 	int rc;
5197 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
5198 
5199 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
5200 	bdev_opts.multipath = false;
5201 
5202 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5203 	ut_init_trid(&trid);
5204 
5205 	set_thread(0);
5206 
5207 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
5208 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5209 
5210 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5211 
5212 	g_ut_attach_ctrlr_status = 0;
5213 	g_ut_attach_bdev_count = 1;
5214 
5215 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
5216 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
5217 	CU_ASSERT(rc == 0);
5218 
5219 	spdk_delay_us(1000);
5220 	poll_threads();
5221 
5222 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5223 	poll_threads();
5224 
5225 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5226 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5227 
5228 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5229 
5230 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
5231 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5232 
5233 	/* Following read request should be rejected. */
5234 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5235 
5236 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5237 
5238 	set_thread(1);
5239 
5240 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5241 
5242 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5243 
5244 	/* Reset request while reading ANA log page should not be rejected. */
5245 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5246 	CU_ASSERT(rc == 0);
5247 
5248 	poll_threads();
5249 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5250 	poll_threads();
5251 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5252 	poll_threads();
5253 
5254 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5255 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5256 
5257 	/* Read ANA log page while resetting ctrlr should be rejected. */
5258 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5259 	CU_ASSERT(rc == 0);
5260 
5261 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5262 
5263 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5264 
5265 	poll_threads();
5266 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5267 	poll_threads();
5268 
5269 	set_thread(0);
5270 
5271 	/* It is possible that target sent ANA change for inactive namespaces.
5272 	 *
5273 	 * Previously, assert() was added because this case was unlikely.
5274 	 * However, assert() was hit in real environment.
5275 
5276 	 * Hence, remove assert() and add unit test case.
5277 	 *
5278 	 * Simulate this case by depopulating namespaces and then parsing ANA
5279 	 * log page created when all namespaces are active.
5280 	 * Then, check if parsing ANA log page completes successfully.
5281 	 */
5282 	nvme_ctrlr_depopulate_namespaces(nvme_ctrlr);
5283 
5284 	rc = bdev_nvme_parse_ana_log_page(nvme_ctrlr, nvme_ctrlr_set_ana_states, nvme_ctrlr);
5285 	CU_ASSERT(rc == 0);
5286 
5287 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5288 	CU_ASSERT(rc == 0);
5289 
5290 	poll_threads();
5291 	spdk_delay_us(1000);
5292 	poll_threads();
5293 
5294 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5295 }
5296 
5297 static void
5298 test_retry_io_for_ana_error(void)
5299 {
5300 	struct nvme_path_id path = {};
5301 	struct spdk_nvme_ctrlr *ctrlr;
5302 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
5303 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5304 	struct nvme_ctrlr *nvme_ctrlr;
5305 	const int STRING_SIZE = 32;
5306 	const char *attached_names[STRING_SIZE];
5307 	struct nvme_bdev *bdev;
5308 	struct nvme_ns *nvme_ns;
5309 	struct spdk_bdev_io *bdev_io;
5310 	struct nvme_bdev_io *bio;
5311 	struct spdk_io_channel *ch;
5312 	struct nvme_bdev_channel *nbdev_ch;
5313 	struct nvme_io_path *io_path;
5314 	struct nvme_qpair *nvme_qpair;
5315 	struct ut_nvme_req *req;
5316 	uint64_t now;
5317 	int rc;
5318 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
5319 
5320 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
5321 	bdev_opts.multipath = false;
5322 
5323 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5324 	ut_init_trid(&path.trid);
5325 
5326 	g_opts.bdev_retry_count = 1;
5327 
5328 	set_thread(0);
5329 
5330 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
5331 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5332 
5333 	g_ut_attach_ctrlr_status = 0;
5334 	g_ut_attach_bdev_count = 1;
5335 
5336 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5337 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
5338 	CU_ASSERT(rc == 0);
5339 
5340 	spdk_delay_us(1000);
5341 	poll_threads();
5342 
5343 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5344 	poll_threads();
5345 
5346 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5347 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5348 
5349 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn);
5350 	CU_ASSERT(nvme_ctrlr != NULL);
5351 
5352 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5353 	CU_ASSERT(bdev != NULL);
5354 
5355 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5356 	CU_ASSERT(nvme_ns != NULL);
5357 
5358 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5359 	ut_bdev_io_set_buf(bdev_io);
5360 
5361 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
5362 
5363 	ch = spdk_get_io_channel(bdev);
5364 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5365 
5366 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5367 
5368 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5369 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5370 
5371 	nvme_qpair = io_path->qpair;
5372 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5373 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5374 
5375 	now = spdk_get_ticks();
5376 
5377 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
5378 
5379 	/* If I/O got ANA error, it should be queued, the corresponding namespace
5380 	 * should be freezed and its ANA state should be updated.
5381 	 */
5382 	bdev_io->internal.f.in_submit_request = true;
5383 
5384 	bdev_nvme_submit_request(ch, bdev_io);
5385 
5386 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5387 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5388 
5389 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5390 	SPDK_CU_ASSERT_FATAL(req != NULL);
5391 
5392 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5393 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
5394 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
5395 
5396 	poll_thread_times(0, 1);
5397 
5398 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5399 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5400 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5401 	/* I/O should be retried immediately. */
5402 	CU_ASSERT(bio->retry_ticks == now);
5403 	CU_ASSERT(nvme_ns->ana_state_updating == true);
5404 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
5405 
5406 	poll_threads();
5407 
5408 	/* Namespace is inaccessible, and hence I/O should be queued again. */
5409 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5410 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5411 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5412 	/* I/O should be retried after a second if no I/O path was found but
5413 	 * any I/O path may become available.
5414 	 */
5415 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
5416 
5417 	/* Namespace should be unfreezed after completing to update its ANA state. */
5418 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5419 	poll_threads();
5420 
5421 	CU_ASSERT(nvme_ns->ana_state_updating == false);
5422 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5423 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5424 
5425 	/* Retry the queued I/O should succeed. */
5426 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
5427 	poll_threads();
5428 
5429 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5430 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5431 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5432 
5433 	free(bdev_io);
5434 
5435 	spdk_put_io_channel(ch);
5436 
5437 	poll_threads();
5438 
5439 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5440 	CU_ASSERT(rc == 0);
5441 
5442 	poll_threads();
5443 	spdk_delay_us(1000);
5444 	poll_threads();
5445 
5446 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5447 
5448 	g_opts.bdev_retry_count = 0;
5449 }
5450 
5451 static void
5452 test_check_io_error_resiliency_params(void)
5453 {
5454 	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
5455 	 * 3rd parameter is fast_io_fail_timeout_sec.
5456 	 */
5457 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false);
5458 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false);
5459 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false);
5460 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false);
5461 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false);
5462 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true);
5463 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true);
5464 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true);
5465 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true);
5466 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true);
5467 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false);
5468 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false);
5469 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false);
5470 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false);
5471 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true);
5472 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true);
5473 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true);
5474 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
5475 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true);
5476 }
5477 
5478 static void
5479 test_retry_io_if_ctrlr_is_resetting(void)
5480 {
5481 	struct nvme_path_id path = {};
5482 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
5483 	struct spdk_nvme_ctrlr *ctrlr;
5484 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
5485 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5486 	struct nvme_ctrlr *nvme_ctrlr;
5487 	const int STRING_SIZE = 32;
5488 	const char *attached_names[STRING_SIZE];
5489 	struct nvme_bdev *bdev;
5490 	struct nvme_ns *nvme_ns;
5491 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5492 	struct spdk_io_channel *ch;
5493 	struct nvme_bdev_channel *nbdev_ch;
5494 	struct nvme_io_path *io_path;
5495 	struct nvme_qpair *nvme_qpair;
5496 	int rc;
5497 
5498 	g_opts.bdev_retry_count = 1;
5499 
5500 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5501 	ut_init_trid(&path.trid);
5502 
5503 	set_thread(0);
5504 
5505 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5506 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5507 
5508 	g_ut_attach_ctrlr_status = 0;
5509 	g_ut_attach_bdev_count = 1;
5510 
5511 	opts.ctrlr_loss_timeout_sec = -1;
5512 	opts.reconnect_delay_sec = 1;
5513 	opts.multipath = false;
5514 
5515 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5516 				   attach_ctrlr_done, NULL, &dopts, &opts);
5517 	CU_ASSERT(rc == 0);
5518 
5519 	spdk_delay_us(1000);
5520 	poll_threads();
5521 
5522 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5523 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5524 
5525 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
5526 	CU_ASSERT(nvme_ctrlr != NULL);
5527 
5528 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5529 	CU_ASSERT(bdev != NULL);
5530 
5531 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5532 	CU_ASSERT(nvme_ns != NULL);
5533 
5534 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5535 	ut_bdev_io_set_buf(bdev_io1);
5536 
5537 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5538 	ut_bdev_io_set_buf(bdev_io2);
5539 
5540 	ch = spdk_get_io_channel(bdev);
5541 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5542 
5543 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5544 
5545 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5546 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5547 
5548 	nvme_qpair = io_path->qpair;
5549 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5550 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5551 
5552 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5553 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5554 
5555 	/* If qpair is connected, I/O should succeed. */
5556 	bdev_io1->internal.f.in_submit_request = true;
5557 
5558 	bdev_nvme_submit_request(ch, bdev_io1);
5559 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5560 
5561 	poll_threads();
5562 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
5563 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5564 
5565 	/* If qpair is disconnected, it is freed and then reconnected via resetting
5566 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5567 	 * while resetting the nvme_ctrlr.
5568 	 */
5569 	nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
5570 	ctrlr->is_failed = true;
5571 
5572 	poll_thread_times(0, 5);
5573 
5574 	CU_ASSERT(nvme_qpair->qpair == NULL);
5575 	CU_ASSERT(nvme_ctrlr->resetting == true);
5576 	CU_ASSERT(ctrlr->is_failed == false);
5577 
5578 	bdev_io1->internal.f.in_submit_request = true;
5579 
5580 	bdev_nvme_submit_request(ch, bdev_io1);
5581 
5582 	spdk_delay_us(1);
5583 
5584 	bdev_io2->internal.f.in_submit_request = true;
5585 
5586 	bdev_nvme_submit_request(ch, bdev_io2);
5587 
5588 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5589 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5590 	CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5591 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(
5592 			  TAILQ_NEXT((struct nvme_bdev_io *)bdev_io1->driver_ctx,
5593 				     retry_link)));
5594 
5595 	poll_threads();
5596 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5597 	poll_threads();
5598 
5599 	CU_ASSERT(nvme_qpair->qpair != NULL);
5600 	CU_ASSERT(nvme_ctrlr->resetting == false);
5601 
5602 	spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us);
5603 
5604 	poll_thread_times(0, 1);
5605 
5606 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5607 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5608 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5609 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5610 
5611 	poll_threads();
5612 
5613 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5614 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
5615 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5616 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5617 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5618 
5619 	spdk_delay_us(1);
5620 
5621 	poll_thread_times(0, 1);
5622 
5623 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5624 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5625 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5626 
5627 	poll_threads();
5628 
5629 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5630 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == false);
5631 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5632 
5633 	free(bdev_io1);
5634 	free(bdev_io2);
5635 
5636 	spdk_put_io_channel(ch);
5637 
5638 	poll_threads();
5639 
5640 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5641 	CU_ASSERT(rc == 0);
5642 
5643 	poll_threads();
5644 	spdk_delay_us(1000);
5645 	poll_threads();
5646 
5647 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5648 
5649 	g_opts.bdev_retry_count = 0;
5650 }
5651 
5652 static void
5653 test_reconnect_ctrlr(void)
5654 {
5655 	struct spdk_nvme_transport_id trid = {};
5656 	struct spdk_nvme_ctrlr ctrlr = {};
5657 	struct nvme_ctrlr *nvme_ctrlr;
5658 	struct spdk_io_channel *ch1, *ch2;
5659 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5660 	int rc;
5661 
5662 	ut_init_trid(&trid);
5663 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5664 
5665 	set_thread(0);
5666 
5667 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5668 	CU_ASSERT(rc == 0);
5669 
5670 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5671 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5672 
5673 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2;
5674 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5675 
5676 	ch1 = spdk_get_io_channel(nvme_ctrlr);
5677 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5678 
5679 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5680 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5681 
5682 	set_thread(1);
5683 
5684 	ch2 = spdk_get_io_channel(nvme_ctrlr);
5685 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5686 
5687 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5688 
5689 	/* Reset starts from thread 1. */
5690 	set_thread(1);
5691 
5692 	/* The reset should fail and a reconnect timer should be registered. */
5693 	ctrlr.fail_reset = true;
5694 	ctrlr.is_failed = true;
5695 
5696 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5697 	CU_ASSERT(rc == 0);
5698 	CU_ASSERT(nvme_ctrlr->resetting == true);
5699 	CU_ASSERT(ctrlr.is_failed == true);
5700 
5701 	poll_threads();
5702 
5703 	CU_ASSERT(nvme_ctrlr->resetting == false);
5704 	CU_ASSERT(ctrlr.is_failed == false);
5705 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5706 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5707 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5708 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5709 
5710 	/* A new reset starts from thread 0. */
5711 	set_thread(1);
5712 
5713 	/* The reset should cancel the reconnect timer and should start from reconnection.
5714 	 * Then, the reset should fail and a reconnect timer should be registered again.
5715 	 */
5716 	ctrlr.fail_reset = true;
5717 	ctrlr.is_failed = true;
5718 
5719 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5720 	CU_ASSERT(rc == 0);
5721 	CU_ASSERT(nvme_ctrlr->resetting == true);
5722 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5723 	CU_ASSERT(ctrlr.is_failed == true);
5724 
5725 	poll_threads();
5726 
5727 	CU_ASSERT(nvme_ctrlr->resetting == false);
5728 	CU_ASSERT(ctrlr.is_failed == false);
5729 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5730 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5731 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5732 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5733 
5734 	/* Then a reconnect retry should suceeed. */
5735 	ctrlr.fail_reset = false;
5736 
5737 	spdk_delay_us(SPDK_SEC_TO_USEC);
5738 	poll_thread_times(0, 1);
5739 
5740 	CU_ASSERT(nvme_ctrlr->resetting == true);
5741 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5742 
5743 	poll_threads();
5744 
5745 	CU_ASSERT(nvme_ctrlr->resetting == false);
5746 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
5747 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
5748 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5749 
5750 	/* The reset should fail and a reconnect timer should be registered. */
5751 	ctrlr.fail_reset = true;
5752 	ctrlr.is_failed = true;
5753 
5754 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5755 	CU_ASSERT(rc == 0);
5756 	CU_ASSERT(nvme_ctrlr->resetting == true);
5757 	CU_ASSERT(ctrlr.is_failed == true);
5758 
5759 	poll_threads();
5760 
5761 	CU_ASSERT(nvme_ctrlr->resetting == false);
5762 	CU_ASSERT(ctrlr.is_failed == false);
5763 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5764 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5765 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5766 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5767 
5768 	/* Then a reconnect retry should still fail. */
5769 	spdk_delay_us(SPDK_SEC_TO_USEC);
5770 	poll_thread_times(0, 1);
5771 
5772 	CU_ASSERT(nvme_ctrlr->resetting == true);
5773 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5774 
5775 	poll_threads();
5776 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5777 	poll_threads();
5778 
5779 	CU_ASSERT(nvme_ctrlr->resetting == false);
5780 	CU_ASSERT(ctrlr.is_failed == false);
5781 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5782 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5783 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5784 
5785 	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5786 	spdk_delay_us(SPDK_SEC_TO_USEC);
5787 	poll_threads();
5788 
5789 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5790 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5791 	CU_ASSERT(nvme_ctrlr->destruct == true);
5792 
5793 	spdk_put_io_channel(ch2);
5794 
5795 	set_thread(0);
5796 
5797 	spdk_put_io_channel(ch1);
5798 
5799 	poll_threads();
5800 	spdk_delay_us(1000);
5801 	poll_threads();
5802 
5803 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5804 }
5805 
5806 static struct nvme_path_id *
5807 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5808 		       const struct spdk_nvme_transport_id *trid)
5809 {
5810 	struct nvme_path_id *p;
5811 
5812 	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5813 		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5814 			break;
5815 		}
5816 	}
5817 
5818 	return p;
5819 }
5820 
5821 static void
5822 test_retry_failover_ctrlr(void)
5823 {
5824 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5825 	struct spdk_nvme_ctrlr ctrlr = {};
5826 	struct nvme_ctrlr *nvme_ctrlr = NULL;
5827 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5828 	struct spdk_io_channel *ch;
5829 	struct nvme_ctrlr_channel *ctrlr_ch;
5830 	int rc;
5831 
5832 	ut_init_trid(&trid1);
5833 	ut_init_trid2(&trid2);
5834 	ut_init_trid3(&trid3);
5835 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5836 
5837 	set_thread(0);
5838 
5839 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5840 	CU_ASSERT(rc == 0);
5841 
5842 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5843 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5844 
5845 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1;
5846 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5847 
5848 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5849 	CU_ASSERT(rc == 0);
5850 
5851 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5852 	CU_ASSERT(rc == 0);
5853 
5854 	ch = spdk_get_io_channel(nvme_ctrlr);
5855 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5856 
5857 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5858 
5859 	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5860 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5861 	CU_ASSERT(path_id1->last_failed_tsc == 0);
5862 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5863 
5864 	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5865 	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5866 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5867 
5868 	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5869 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5870 
5871 	/* It is expected that connecting both of trid1, trid2, and trid3 fail,
5872 	 * and a reconnect timer is started. */
5873 	ctrlr.fail_reset = true;
5874 	ctrlr.is_failed = true;
5875 
5876 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5877 	CU_ASSERT(rc == 0);
5878 
5879 	poll_threads();
5880 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5881 	poll_threads();
5882 
5883 	CU_ASSERT(nvme_ctrlr->resetting == false);
5884 	CU_ASSERT(ctrlr.is_failed == false);
5885 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5886 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5887 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5888 	CU_ASSERT(path_id1->last_failed_tsc != 0);
5889 
5890 	CU_ASSERT(path_id2->last_failed_tsc != 0);
5891 	CU_ASSERT(path_id3->last_failed_tsc != 0);
5892 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5893 
5894 	/* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is
5895 	 * switched to trid2 but reset is not started.
5896 	 */
5897 	rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true);
5898 	CU_ASSERT(rc == -EALREADY);
5899 
5900 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL);
5901 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5902 
5903 	CU_ASSERT(nvme_ctrlr->resetting == false);
5904 
5905 	/* If reconnect succeeds, trid2 should be the active path_id */
5906 	ctrlr.fail_reset = false;
5907 
5908 	spdk_delay_us(SPDK_SEC_TO_USEC);
5909 	poll_thread_times(0, 1);
5910 
5911 	CU_ASSERT(nvme_ctrlr->resetting == true);
5912 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5913 
5914 	poll_threads();
5915 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5916 	poll_threads();
5917 
5918 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL);
5919 	CU_ASSERT(path_id2->last_failed_tsc == 0);
5920 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5921 	CU_ASSERT(nvme_ctrlr->resetting == false);
5922 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
5923 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5924 
5925 	spdk_put_io_channel(ch);
5926 
5927 	poll_threads();
5928 
5929 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5930 	CU_ASSERT(rc == 0);
5931 
5932 	poll_threads();
5933 	spdk_delay_us(1000);
5934 	poll_threads();
5935 
5936 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5937 }
5938 
5939 static void
5940 test_fail_path(void)
5941 {
5942 	struct nvme_path_id path = {};
5943 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
5944 	struct spdk_nvme_ctrlr *ctrlr;
5945 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
5946 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5947 	struct nvme_ctrlr *nvme_ctrlr;
5948 	const int STRING_SIZE = 32;
5949 	const char *attached_names[STRING_SIZE];
5950 	struct nvme_bdev *bdev;
5951 	struct nvme_ns *nvme_ns;
5952 	struct spdk_bdev_io *bdev_io;
5953 	struct spdk_io_channel *ch;
5954 	struct nvme_bdev_channel *nbdev_ch;
5955 	struct nvme_io_path *io_path;
5956 	struct nvme_ctrlr_channel *ctrlr_ch;
5957 	int rc;
5958 
5959 	/* The test scenario is the following.
5960 	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5961 	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5962 	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5963 	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5964 	 *   comes first. The queued I/O is failed.
5965 	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5966 	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5967 	 */
5968 
5969 	g_opts.bdev_retry_count = 1;
5970 
5971 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5972 	ut_init_trid(&path.trid);
5973 
5974 	set_thread(0);
5975 
5976 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5977 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5978 
5979 	g_ut_attach_ctrlr_status = 0;
5980 	g_ut_attach_bdev_count = 1;
5981 
5982 	opts.ctrlr_loss_timeout_sec = 4;
5983 	opts.reconnect_delay_sec = 1;
5984 	opts.fast_io_fail_timeout_sec = 2;
5985 	opts.multipath = false;
5986 
5987 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5988 				   attach_ctrlr_done, NULL, &dopts, &opts);
5989 	CU_ASSERT(rc == 0);
5990 
5991 	spdk_delay_us(1000);
5992 	poll_threads();
5993 
5994 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5995 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5996 
5997 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
5998 	CU_ASSERT(nvme_ctrlr != NULL);
5999 
6000 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6001 	CU_ASSERT(bdev != NULL);
6002 
6003 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
6004 	CU_ASSERT(nvme_ns != NULL);
6005 
6006 	ch = spdk_get_io_channel(bdev);
6007 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6008 
6009 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6010 
6011 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
6012 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6013 
6014 	ctrlr_ch = io_path->qpair->ctrlr_ch;
6015 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
6016 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL);
6017 
6018 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
6019 	ut_bdev_io_set_buf(bdev_io);
6020 
6021 
6022 	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
6023 	ctrlr->fail_reset = true;
6024 	ctrlr->is_failed = true;
6025 
6026 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
6027 	CU_ASSERT(rc == 0);
6028 	CU_ASSERT(nvme_ctrlr->resetting == true);
6029 	CU_ASSERT(ctrlr->is_failed == true);
6030 
6031 	poll_threads();
6032 
6033 	CU_ASSERT(nvme_ctrlr->resetting == false);
6034 	CU_ASSERT(ctrlr->is_failed == false);
6035 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
6036 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6037 	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
6038 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
6039 
6040 	/* I/O should be queued. */
6041 	bdev_io->internal.f.in_submit_request = true;
6042 
6043 	bdev_nvme_submit_request(ch, bdev_io);
6044 
6045 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6046 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6047 
6048 	/* After a second, the I/O should be still queued and the ctrlr should be
6049 	 * still recovering.
6050 	 */
6051 	spdk_delay_us(SPDK_SEC_TO_USEC);
6052 	poll_threads();
6053 
6054 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6055 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6056 
6057 	CU_ASSERT(nvme_ctrlr->resetting == false);
6058 	CU_ASSERT(ctrlr->is_failed == false);
6059 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
6060 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6061 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
6062 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
6063 
6064 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6065 
6066 	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
6067 	spdk_delay_us(SPDK_SEC_TO_USEC);
6068 	poll_threads();
6069 
6070 	CU_ASSERT(nvme_ctrlr->resetting == false);
6071 	CU_ASSERT(ctrlr->is_failed == false);
6072 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
6073 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6074 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
6075 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
6076 
6077 	/* Then within a second, pending I/O should be failed. */
6078 	spdk_delay_us(SPDK_SEC_TO_USEC);
6079 	poll_threads();
6080 
6081 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6082 	poll_threads();
6083 
6084 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6085 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
6086 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
6087 
6088 	/* Another I/O submission should be failed immediately. */
6089 	bdev_io->internal.f.in_submit_request = true;
6090 
6091 	bdev_nvme_submit_request(ch, bdev_io);
6092 
6093 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6094 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
6095 
6096 	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
6097 	 * be deleted.
6098 	 */
6099 	spdk_delay_us(SPDK_SEC_TO_USEC);
6100 	poll_threads();
6101 
6102 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6103 	poll_threads();
6104 
6105 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
6106 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
6107 	CU_ASSERT(nvme_ctrlr->destruct == true);
6108 
6109 	spdk_put_io_channel(ch);
6110 
6111 	poll_threads();
6112 	spdk_delay_us(1000);
6113 	poll_threads();
6114 
6115 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6116 
6117 	free(bdev_io);
6118 
6119 	g_opts.bdev_retry_count = 0;
6120 }
6121 
6122 static void
6123 test_nvme_ns_cmp(void)
6124 {
6125 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
6126 
6127 	nvme_ns1.id = 0;
6128 	nvme_ns2.id = UINT32_MAX;
6129 
6130 	CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0);
6131 	CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0);
6132 }
6133 
6134 static void
6135 test_ana_transition(void)
6136 {
6137 	struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, };
6138 	struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, };
6139 	struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, };
6140 	struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, };
6141 
6142 	/* case 1: ANA transition timedout is canceled. */
6143 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6144 	nvme_ns.ana_transition_timedout = true;
6145 
6146 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6147 
6148 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6149 
6150 	CU_ASSERT(nvme_ns.ana_transition_timedout == false);
6151 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
6152 
6153 	/* case 2: ANATT timer is kept. */
6154 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6155 	nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout,
6156 			      &nvme_ns,
6157 			      ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
6158 
6159 	desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6160 
6161 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6162 
6163 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
6164 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
6165 
6166 	/* case 3: ANATT timer is stopped. */
6167 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6168 
6169 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6170 
6171 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
6172 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
6173 
6174 	/* ANATT timer is started. */
6175 	desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6176 
6177 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6178 
6179 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
6180 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE);
6181 
6182 	/* ANATT timer is expired. */
6183 	spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
6184 
6185 	poll_threads();
6186 
6187 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
6188 	CU_ASSERT(nvme_ns.ana_transition_timedout == true);
6189 }
6190 
6191 static void
6192 _set_preferred_path_cb(void *cb_arg, int rc)
6193 {
6194 	bool *done = cb_arg;
6195 
6196 	*done = true;
6197 }
6198 
6199 static void
6200 test_set_preferred_path(void)
6201 {
6202 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
6203 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
6204 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
6205 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6206 	const int STRING_SIZE = 32;
6207 	const char *attached_names[STRING_SIZE];
6208 	struct nvme_bdev *bdev;
6209 	struct spdk_io_channel *ch;
6210 	struct nvme_bdev_channel *nbdev_ch;
6211 	struct nvme_io_path *io_path;
6212 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6213 	const struct spdk_nvme_ctrlr_data *cdata;
6214 	bool done;
6215 	int rc;
6216 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
6217 
6218 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
6219 	bdev_opts.multipath = true;
6220 
6221 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6222 	ut_init_trid(&path1.trid);
6223 	ut_init_trid2(&path2.trid);
6224 	ut_init_trid3(&path3.trid);
6225 	g_ut_attach_ctrlr_status = 0;
6226 	g_ut_attach_bdev_count = 1;
6227 
6228 	set_thread(0);
6229 
6230 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6231 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6232 
6233 	ctrlr1->ns[0].uuid = &uuid1;
6234 
6235 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6236 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6237 	CU_ASSERT(rc == 0);
6238 
6239 	spdk_delay_us(1000);
6240 	poll_threads();
6241 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6242 	poll_threads();
6243 
6244 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6245 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6246 
6247 	ctrlr2->ns[0].uuid = &uuid1;
6248 
6249 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6250 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6251 	CU_ASSERT(rc == 0);
6252 
6253 	spdk_delay_us(1000);
6254 	poll_threads();
6255 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6256 	poll_threads();
6257 
6258 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
6259 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
6260 
6261 	ctrlr3->ns[0].uuid = &uuid1;
6262 
6263 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
6264 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6265 	CU_ASSERT(rc == 0);
6266 
6267 	spdk_delay_us(1000);
6268 	poll_threads();
6269 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6270 	poll_threads();
6271 
6272 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6273 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6274 
6275 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6276 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6277 
6278 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6279 
6280 	ch = spdk_get_io_channel(bdev);
6281 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6282 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6283 
6284 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6285 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6286 
6287 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6288 
6289 	/* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path()
6290 	 * should return io_path to ctrlr2.
6291 	 */
6292 
6293 	cdata = spdk_nvme_ctrlr_get_data(ctrlr2);
6294 	done = false;
6295 
6296 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6297 
6298 	poll_threads();
6299 	CU_ASSERT(done == true);
6300 
6301 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6302 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6303 
6304 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6305 
6306 	/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
6307 	 * acquired, find_io_path() should return io_path to ctrlr3.
6308 	 */
6309 
6310 	spdk_put_io_channel(ch);
6311 
6312 	poll_threads();
6313 
6314 	cdata = spdk_nvme_ctrlr_get_data(ctrlr3);
6315 	done = false;
6316 
6317 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6318 
6319 	poll_threads();
6320 	CU_ASSERT(done == true);
6321 
6322 	ch = spdk_get_io_channel(bdev);
6323 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6324 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6325 
6326 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6327 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6328 
6329 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3);
6330 
6331 	spdk_put_io_channel(ch);
6332 
6333 	poll_threads();
6334 
6335 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6336 	CU_ASSERT(rc == 0);
6337 
6338 	poll_threads();
6339 	spdk_delay_us(1000);
6340 	poll_threads();
6341 
6342 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6343 }
6344 
6345 static void
6346 test_find_next_io_path(void)
6347 {
6348 	struct nvme_bdev_channel nbdev_ch = {
6349 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6350 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6351 		.mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN,
6352 	};
6353 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6354 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6355 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
6356 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6357 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6358 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6359 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6360 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6361 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6362 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6363 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6364 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6365 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, };
6366 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6367 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6368 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6369 
6370 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6371 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6372 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6373 
6374 	/* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL
6375 	 * is covered in test_find_io_path.
6376 	 */
6377 
6378 	nbdev_ch.current_io_path = &io_path2;
6379 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6380 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6381 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6382 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6383 
6384 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6385 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6386 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6387 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6388 
6389 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6390 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6391 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6392 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6393 
6394 	nbdev_ch.current_io_path = &io_path3;
6395 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6396 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6397 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6398 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6399 
6400 	/* Test if next io_path is selected according to rr_min_io */
6401 
6402 	nbdev_ch.current_io_path = NULL;
6403 	nbdev_ch.rr_min_io = 2;
6404 	nbdev_ch.rr_counter = 0;
6405 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6406 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6407 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6408 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6409 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6410 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6411 
6412 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6413 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6414 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6415 }
6416 
6417 static void
6418 test_find_io_path_min_qd(void)
6419 {
6420 	struct nvme_bdev_channel nbdev_ch = {
6421 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6422 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6423 		.mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH,
6424 	};
6425 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6426 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6427 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
6428 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6429 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6430 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6431 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6432 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6433 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6434 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6435 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6436 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6437 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, };
6438 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6439 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6440 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6441 
6442 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6443 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6444 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6445 
6446 	/* Test if the minimum io_outstanding or the ANA optimized state is
6447 	 * prioritized when using least queue depth selector
6448 	 */
6449 	qpair1.num_outstanding_reqs = 2;
6450 	qpair2.num_outstanding_reqs = 1;
6451 	qpair3.num_outstanding_reqs = 0;
6452 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6453 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6454 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6455 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6456 
6457 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6458 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6459 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6460 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6461 
6462 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6463 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6464 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6465 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6466 
6467 	qpair2.num_outstanding_reqs = 4;
6468 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6469 }
6470 
6471 static void
6472 test_disable_auto_failback(void)
6473 {
6474 	struct nvme_path_id path1 = {}, path2 = {};
6475 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
6476 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6477 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
6478 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6479 	struct nvme_ctrlr *nvme_ctrlr1;
6480 	const int STRING_SIZE = 32;
6481 	const char *attached_names[STRING_SIZE];
6482 	struct nvme_bdev *bdev;
6483 	struct spdk_io_channel *ch;
6484 	struct nvme_bdev_channel *nbdev_ch;
6485 	struct nvme_io_path *io_path;
6486 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6487 	const struct spdk_nvme_ctrlr_data *cdata;
6488 	bool done;
6489 	int rc;
6490 
6491 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6492 	ut_init_trid(&path1.trid);
6493 	ut_init_trid2(&path2.trid);
6494 	g_ut_attach_ctrlr_status = 0;
6495 	g_ut_attach_bdev_count = 1;
6496 
6497 	g_opts.disable_auto_failback = true;
6498 
6499 	opts.ctrlr_loss_timeout_sec = -1;
6500 	opts.reconnect_delay_sec = 1;
6501 	opts.multipath = true;
6502 
6503 	set_thread(0);
6504 
6505 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6506 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6507 
6508 	ctrlr1->ns[0].uuid = &uuid1;
6509 
6510 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6511 				   attach_ctrlr_done, NULL, &dopts, &opts);
6512 	CU_ASSERT(rc == 0);
6513 
6514 	spdk_delay_us(1000);
6515 	poll_threads();
6516 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6517 	poll_threads();
6518 
6519 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6520 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6521 
6522 	ctrlr2->ns[0].uuid = &uuid1;
6523 
6524 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6525 				   attach_ctrlr_done, NULL, &dopts, &opts);
6526 	CU_ASSERT(rc == 0);
6527 
6528 	spdk_delay_us(1000);
6529 	poll_threads();
6530 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6531 	poll_threads();
6532 
6533 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6534 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6535 
6536 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6537 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6538 
6539 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, dopts.hostnqn);
6540 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6541 
6542 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6543 
6544 	ch = spdk_get_io_channel(bdev);
6545 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6546 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6547 
6548 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6549 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6550 
6551 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6552 
6553 	/* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */
6554 	ctrlr1->fail_reset = true;
6555 	ctrlr1->is_failed = true;
6556 
6557 	bdev_nvme_reset_ctrlr(nvme_ctrlr1);
6558 
6559 	poll_threads();
6560 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6561 	poll_threads();
6562 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6563 	poll_threads();
6564 
6565 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
6566 
6567 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6568 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6569 
6570 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6571 
6572 	/* After a second, ctrlr1 is recovered. However, automatic failback is disabled.
6573 	 * Hence, io_path to ctrlr2 should still be used.
6574 	 */
6575 	ctrlr1->fail_reset = false;
6576 
6577 	spdk_delay_us(SPDK_SEC_TO_USEC);
6578 	poll_threads();
6579 
6580 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
6581 
6582 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6583 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6584 
6585 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6586 
6587 	/* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should
6588 	 * be used again.
6589 	 */
6590 
6591 	cdata = spdk_nvme_ctrlr_get_data(ctrlr1);
6592 	done = false;
6593 
6594 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6595 
6596 	poll_threads();
6597 	CU_ASSERT(done == true);
6598 
6599 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6600 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6601 
6602 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6603 
6604 	spdk_put_io_channel(ch);
6605 
6606 	poll_threads();
6607 
6608 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6609 	CU_ASSERT(rc == 0);
6610 
6611 	poll_threads();
6612 	spdk_delay_us(1000);
6613 	poll_threads();
6614 
6615 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6616 
6617 	g_opts.disable_auto_failback = false;
6618 }
6619 
6620 static void
6621 ut_set_multipath_policy_done(void *cb_arg, int rc)
6622 {
6623 	int *done = cb_arg;
6624 
6625 	SPDK_CU_ASSERT_FATAL(done != NULL);
6626 	*done = rc;
6627 }
6628 
6629 static void
6630 test_set_multipath_policy(void)
6631 {
6632 	struct nvme_path_id path1 = {}, path2 = {};
6633 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
6634 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6635 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
6636 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6637 	const int STRING_SIZE = 32;
6638 	const char *attached_names[STRING_SIZE];
6639 	struct nvme_bdev *bdev;
6640 	struct spdk_io_channel *ch;
6641 	struct nvme_bdev_channel *nbdev_ch;
6642 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6643 	int done;
6644 	int rc;
6645 
6646 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6647 	ut_init_trid(&path1.trid);
6648 	ut_init_trid2(&path2.trid);
6649 	g_ut_attach_ctrlr_status = 0;
6650 	g_ut_attach_bdev_count = 1;
6651 
6652 	g_opts.disable_auto_failback = true;
6653 
6654 	opts.ctrlr_loss_timeout_sec = -1;
6655 	opts.reconnect_delay_sec = 1;
6656 	opts.multipath = true;
6657 
6658 	set_thread(0);
6659 
6660 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6661 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6662 
6663 	ctrlr1->ns[0].uuid = &uuid1;
6664 
6665 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6666 				   attach_ctrlr_done, NULL, &dopts, &opts);
6667 	CU_ASSERT(rc == 0);
6668 
6669 	spdk_delay_us(1000);
6670 	poll_threads();
6671 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6672 	poll_threads();
6673 
6674 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6675 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6676 
6677 	ctrlr2->ns[0].uuid = &uuid1;
6678 
6679 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6680 				   attach_ctrlr_done, NULL, &dopts, &opts);
6681 	CU_ASSERT(rc == 0);
6682 
6683 	spdk_delay_us(1000);
6684 	poll_threads();
6685 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6686 	poll_threads();
6687 
6688 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6689 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6690 
6691 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6692 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6693 
6694 	/* If multipath policy is updated before getting any I/O channel,
6695 	 * an new I/O channel should have the update.
6696 	 */
6697 	done = -1;
6698 	spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6699 					    BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX,
6700 					    ut_set_multipath_policy_done, &done);
6701 	poll_threads();
6702 	CU_ASSERT(done == 0);
6703 
6704 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6705 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6706 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6707 
6708 	ch = spdk_get_io_channel(bdev);
6709 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6710 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6711 
6712 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6713 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6714 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6715 
6716 	/* If multipath policy is updated while a I/O channel is active,
6717 	 * the update should be applied to the I/O channel immediately.
6718 	 */
6719 	done = -1;
6720 	spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE,
6721 					    BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX,
6722 					    ut_set_multipath_policy_done, &done);
6723 	poll_threads();
6724 	CU_ASSERT(done == 0);
6725 
6726 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6727 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6728 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6729 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6730 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6731 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6732 
6733 	spdk_put_io_channel(ch);
6734 
6735 	poll_threads();
6736 
6737 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6738 	CU_ASSERT(rc == 0);
6739 
6740 	poll_threads();
6741 	spdk_delay_us(1000);
6742 	poll_threads();
6743 
6744 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6745 }
6746 
6747 static void
6748 test_uuid_generation(void)
6749 {
6750 	uint32_t nsid1 = 1, nsid2 = 2;
6751 	char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02";
6752 	char sn3[21] = "                    ";
6753 	char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'};
6754 	struct spdk_uuid uuid1, uuid2;
6755 	int rc;
6756 
6757 	/* Test case 1:
6758 	 * Serial numbers are the same, nsids are different.
6759 	 * Compare two generated UUID - they should be different. */
6760 	rc = nvme_generate_uuid(sn1, nsid1, &uuid1);
6761 	CU_ASSERT(rc == 0);
6762 	rc = nvme_generate_uuid(sn1, nsid2, &uuid2);
6763 	CU_ASSERT(rc == 0);
6764 
6765 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6766 
6767 	/* Test case 2:
6768 	 * Serial numbers differ only by one character, nsids are the same.
6769 	 * Compare two generated UUID - they should be different. */
6770 	rc = nvme_generate_uuid(sn1, nsid1, &uuid1);
6771 	CU_ASSERT(rc == 0);
6772 	rc = nvme_generate_uuid(sn2, nsid1, &uuid2);
6773 	CU_ASSERT(rc == 0);
6774 
6775 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6776 
6777 	/* Test case 3:
6778 	 * Serial number comprises only of space characters.
6779 	 * Validate the generated UUID. */
6780 	rc = nvme_generate_uuid(sn3, nsid1, &uuid1);
6781 	CU_ASSERT(rc == 0);
6782 	CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0);
6783 
6784 }
6785 
6786 static void
6787 test_retry_io_to_same_path(void)
6788 {
6789 	struct nvme_path_id path1 = {}, path2 = {};
6790 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6791 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
6792 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6793 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
6794 	const int STRING_SIZE = 32;
6795 	const char *attached_names[STRING_SIZE];
6796 	struct nvme_bdev *bdev;
6797 	struct spdk_bdev_io *bdev_io;
6798 	struct nvme_bdev_io *bio;
6799 	struct spdk_io_channel *ch;
6800 	struct nvme_bdev_channel *nbdev_ch;
6801 	struct nvme_io_path *io_path1, *io_path2;
6802 	struct ut_nvme_req *req;
6803 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6804 	int done;
6805 	int rc;
6806 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
6807 
6808 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
6809 	bdev_opts.multipath = true;
6810 
6811 	g_opts.nvme_ioq_poll_period_us = 1;
6812 
6813 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6814 	ut_init_trid(&path1.trid);
6815 	ut_init_trid2(&path2.trid);
6816 	g_ut_attach_ctrlr_status = 0;
6817 	g_ut_attach_bdev_count = 1;
6818 
6819 	set_thread(0);
6820 
6821 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6822 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6823 
6824 	ctrlr1->ns[0].uuid = &uuid1;
6825 
6826 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6827 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6828 	CU_ASSERT(rc == 0);
6829 
6830 	spdk_delay_us(1000);
6831 	poll_threads();
6832 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6833 	poll_threads();
6834 
6835 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6836 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6837 
6838 	ctrlr2->ns[0].uuid = &uuid1;
6839 
6840 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6841 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6842 	CU_ASSERT(rc == 0);
6843 
6844 	spdk_delay_us(1000);
6845 	poll_threads();
6846 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6847 	poll_threads();
6848 
6849 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6850 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6851 
6852 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
6853 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6854 
6855 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
6856 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
6857 
6858 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6859 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6860 
6861 	done = -1;
6862 	spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6863 					    BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done);
6864 	poll_threads();
6865 	CU_ASSERT(done == 0);
6866 
6867 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6868 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6869 	CU_ASSERT(bdev->rr_min_io == 1);
6870 
6871 	ch = spdk_get_io_channel(bdev);
6872 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6873 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6874 
6875 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6876 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6877 	CU_ASSERT(nbdev_ch->rr_min_io == 1);
6878 
6879 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
6880 	ut_bdev_io_set_buf(bdev_io);
6881 
6882 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
6883 
6884 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
6885 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
6886 
6887 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
6888 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
6889 
6890 	/* The 1st I/O should be submitted to io_path1. */
6891 	bdev_io->internal.f.in_submit_request = true;
6892 
6893 	bdev_nvme_submit_request(ch, bdev_io);
6894 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6895 	CU_ASSERT(bio->io_path == io_path1);
6896 	CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1);
6897 
6898 	spdk_delay_us(1);
6899 
6900 	poll_threads();
6901 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6902 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
6903 
6904 	/* The 2nd I/O should be submitted to io_path2 because the path selection
6905 	 * policy is round-robin.
6906 	 */
6907 	bdev_io->internal.f.in_submit_request = true;
6908 
6909 	bdev_nvme_submit_request(ch, bdev_io);
6910 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6911 	CU_ASSERT(bio->io_path == io_path2);
6912 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6913 
6914 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6915 	SPDK_CU_ASSERT_FATAL(req != NULL);
6916 
6917 	/* Set retry count to non-zero. */
6918 	g_opts.bdev_retry_count = 2;
6919 
6920 	/* Inject an I/O error. */
6921 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6922 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6923 
6924 	/* The 2nd I/O should be queued to nbdev_ch. */
6925 	spdk_delay_us(1);
6926 	poll_thread_times(0, 1);
6927 
6928 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6929 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6930 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6931 
6932 	/* The 2nd I/O should keep caching io_path2. */
6933 	CU_ASSERT(bio->io_path == io_path2);
6934 
6935 	/* The 2nd I/O should be submitted to io_path2 again. */
6936 	poll_thread_times(0, 1);
6937 
6938 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6939 	CU_ASSERT(bio->io_path == io_path2);
6940 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6941 
6942 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6943 	SPDK_CU_ASSERT_FATAL(req != NULL);
6944 
6945 	/* Inject an I/O error again. */
6946 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6947 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6948 	req->cpl.status.crd = 1;
6949 
6950 	ctrlr2->cdata.crdt[1] = 1;
6951 
6952 	/* The 2nd I/O should be queued to nbdev_ch. */
6953 	spdk_delay_us(1);
6954 	poll_thread_times(0, 1);
6955 
6956 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6957 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6958 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6959 
6960 	/* The 2nd I/O should keep caching io_path2. */
6961 	CU_ASSERT(bio->io_path == io_path2);
6962 
6963 	/* Detach ctrlr2 dynamically. */
6964 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
6965 	CU_ASSERT(rc == 0);
6966 
6967 	spdk_delay_us(1000);
6968 	poll_threads();
6969 	spdk_delay_us(1000);
6970 	poll_threads();
6971 	spdk_delay_us(1000);
6972 	poll_threads();
6973 	spdk_delay_us(1000);
6974 	poll_threads();
6975 
6976 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
6977 
6978 	poll_threads();
6979 	spdk_delay_us(100000);
6980 	poll_threads();
6981 	spdk_delay_us(1);
6982 	poll_threads();
6983 
6984 	/* The 2nd I/O should succeed by io_path1. */
6985 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6986 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
6987 	CU_ASSERT(bio->io_path == io_path1);
6988 
6989 	free(bdev_io);
6990 
6991 	spdk_put_io_channel(ch);
6992 
6993 	poll_threads();
6994 	spdk_delay_us(1);
6995 	poll_threads();
6996 
6997 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6998 	CU_ASSERT(rc == 0);
6999 
7000 	poll_threads();
7001 	spdk_delay_us(1000);
7002 	poll_threads();
7003 
7004 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
7005 
7006 	g_opts.nvme_ioq_poll_period_us = 0;
7007 	g_opts.bdev_retry_count = 0;
7008 }
7009 
7010 /* This case is to verify a fix for a complex race condition that
7011  * failover is lost if fabric connect command gets timeout while
7012  * controller is being reset.
7013  */
7014 static void
7015 test_race_between_reset_and_disconnected(void)
7016 {
7017 	struct spdk_nvme_transport_id trid = {};
7018 	struct spdk_nvme_ctrlr ctrlr = {};
7019 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7020 	struct nvme_path_id *curr_trid;
7021 	struct spdk_io_channel *ch1, *ch2;
7022 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7023 	int rc;
7024 
7025 	ut_init_trid(&trid);
7026 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7027 
7028 	set_thread(0);
7029 
7030 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7031 	CU_ASSERT(rc == 0);
7032 
7033 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7034 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7035 
7036 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7037 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7038 
7039 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7040 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7041 
7042 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7043 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7044 
7045 	set_thread(1);
7046 
7047 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7048 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7049 
7050 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7051 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7052 
7053 	/* Reset starts from thread 1. */
7054 	set_thread(1);
7055 
7056 	nvme_ctrlr->resetting = false;
7057 	curr_trid->last_failed_tsc = spdk_get_ticks();
7058 	ctrlr.is_failed = true;
7059 
7060 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7061 	CU_ASSERT(rc == 0);
7062 	CU_ASSERT(nvme_ctrlr->resetting == true);
7063 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7064 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7065 
7066 	poll_thread_times(0, 3);
7067 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7068 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7069 
7070 	poll_thread_times(0, 1);
7071 	poll_thread_times(1, 1);
7072 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7073 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7074 	CU_ASSERT(ctrlr.is_failed == true);
7075 
7076 	poll_thread_times(1, 1);
7077 	poll_thread_times(0, 1);
7078 	CU_ASSERT(ctrlr.is_failed == false);
7079 	CU_ASSERT(ctrlr.adminq.is_connected == false);
7080 
7081 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7082 	poll_thread_times(0, 2);
7083 	CU_ASSERT(ctrlr.adminq.is_connected == true);
7084 
7085 	poll_thread_times(0, 1);
7086 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7087 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7088 
7089 	poll_thread_times(1, 1);
7090 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7091 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7092 	CU_ASSERT(nvme_ctrlr->resetting == true);
7093 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
7094 
7095 	/* Here is just one poll before _bdev_nvme_reset_complete() is executed.
7096 	 *
7097 	 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric
7098 	 * connect command is executed. If fabric connect command gets timeout,
7099 	 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until
7100 	 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false.
7101 	 *
7102 	 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr().
7103 	 */
7104 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
7105 	CU_ASSERT(rc == -EINPROGRESS);
7106 	CU_ASSERT(nvme_ctrlr->resetting == true);
7107 	CU_ASSERT(nvme_ctrlr->pending_failover == true);
7108 
7109 	poll_thread_times(0, 1);
7110 	CU_ASSERT(nvme_ctrlr->resetting == true);
7111 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
7112 
7113 	poll_threads();
7114 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7115 	poll_threads();
7116 
7117 	CU_ASSERT(nvme_ctrlr->resetting == false);
7118 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
7119 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7120 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7121 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7122 
7123 	spdk_put_io_channel(ch2);
7124 
7125 	set_thread(0);
7126 
7127 	spdk_put_io_channel(ch1);
7128 
7129 	poll_threads();
7130 
7131 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7132 	CU_ASSERT(rc == 0);
7133 
7134 	poll_threads();
7135 	spdk_delay_us(1000);
7136 	poll_threads();
7137 
7138 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7139 }
7140 static void
7141 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc)
7142 {
7143 	int *_rc = (int *)cb_arg;
7144 
7145 	SPDK_CU_ASSERT_FATAL(_rc != NULL);
7146 	*_rc = rc;
7147 }
7148 
7149 static void
7150 test_ctrlr_op_rpc(void)
7151 {
7152 	struct spdk_nvme_transport_id trid = {};
7153 	struct spdk_nvme_ctrlr ctrlr = {};
7154 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7155 	struct nvme_path_id *curr_trid;
7156 	struct spdk_io_channel *ch1, *ch2;
7157 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7158 	int ctrlr_op_rc;
7159 	int rc;
7160 
7161 	ut_init_trid(&trid);
7162 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7163 
7164 	set_thread(0);
7165 
7166 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7167 	CU_ASSERT(rc == 0);
7168 
7169 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7170 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7171 
7172 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7173 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7174 
7175 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7176 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7177 
7178 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7179 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7180 
7181 	set_thread(1);
7182 
7183 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7184 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7185 
7186 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7187 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7188 
7189 	/* Reset starts from thread 1. */
7190 	set_thread(1);
7191 
7192 	/* Case 1: ctrlr is already being destructed. */
7193 	nvme_ctrlr->destruct = true;
7194 	ctrlr_op_rc = 0;
7195 
7196 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7197 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7198 
7199 	poll_threads();
7200 
7201 	CU_ASSERT(ctrlr_op_rc == -ENXIO);
7202 
7203 	/* Case 2: reset is in progress. */
7204 	nvme_ctrlr->destruct = false;
7205 	nvme_ctrlr->resetting = true;
7206 	ctrlr_op_rc = 0;
7207 
7208 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7209 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7210 
7211 	poll_threads();
7212 
7213 	CU_ASSERT(ctrlr_op_rc == -EBUSY);
7214 
7215 	/* Case 3: reset completes successfully. */
7216 	nvme_ctrlr->resetting = false;
7217 	curr_trid->last_failed_tsc = spdk_get_ticks();
7218 	ctrlr.is_failed = true;
7219 	ctrlr_op_rc = -1;
7220 
7221 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7222 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7223 
7224 	CU_ASSERT(nvme_ctrlr->resetting == true);
7225 	CU_ASSERT(ctrlr_op_rc == -1);
7226 
7227 	poll_threads();
7228 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7229 	poll_threads();
7230 
7231 	CU_ASSERT(nvme_ctrlr->resetting == false);
7232 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7233 	CU_ASSERT(ctrlr.is_failed == false);
7234 	CU_ASSERT(ctrlr_op_rc == 0);
7235 
7236 	/* Case 4: invalid operation. */
7237 	nvme_ctrlr_op_rpc(nvme_ctrlr, -1,
7238 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7239 
7240 	poll_threads();
7241 
7242 	CU_ASSERT(ctrlr_op_rc == -EINVAL);
7243 
7244 	spdk_put_io_channel(ch2);
7245 
7246 	set_thread(0);
7247 
7248 	spdk_put_io_channel(ch1);
7249 
7250 	poll_threads();
7251 
7252 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7253 	CU_ASSERT(rc == 0);
7254 
7255 	poll_threads();
7256 	spdk_delay_us(1000);
7257 	poll_threads();
7258 
7259 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7260 }
7261 
7262 static void
7263 test_bdev_ctrlr_op_rpc(void)
7264 {
7265 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
7266 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
7267 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
7268 	struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL;
7269 	struct nvme_path_id *curr_trid1, *curr_trid2;
7270 	struct spdk_io_channel *ch11, *ch12, *ch21, *ch22;
7271 	struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22;
7272 	int ctrlr_op_rc;
7273 	int rc;
7274 
7275 	ut_init_trid(&trid1);
7276 	ut_init_trid2(&trid2);
7277 	TAILQ_INIT(&ctrlr1.active_io_qpairs);
7278 	TAILQ_INIT(&ctrlr2.active_io_qpairs);
7279 	ctrlr1.cdata.cmic.multi_ctrlr = 1;
7280 	ctrlr2.cdata.cmic.multi_ctrlr = 1;
7281 	ctrlr1.cdata.cntlid = 1;
7282 	ctrlr2.cdata.cntlid = 2;
7283 	ctrlr1.adminq.is_connected = true;
7284 	ctrlr2.adminq.is_connected = true;
7285 
7286 	set_thread(0);
7287 
7288 	rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL);
7289 	CU_ASSERT(rc == 0);
7290 
7291 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
7292 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
7293 
7294 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1, UT_HOSTNQN);
7295 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
7296 
7297 	curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
7298 	SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL);
7299 
7300 	ch11 = spdk_get_io_channel(nvme_ctrlr1);
7301 	SPDK_CU_ASSERT_FATAL(ch11 != NULL);
7302 
7303 	ctrlr_ch11 = spdk_io_channel_get_ctx(ch11);
7304 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
7305 
7306 	set_thread(1);
7307 
7308 	ch12 = spdk_get_io_channel(nvme_ctrlr1);
7309 	SPDK_CU_ASSERT_FATAL(ch12 != NULL);
7310 
7311 	ctrlr_ch12 = spdk_io_channel_get_ctx(ch12);
7312 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
7313 
7314 	set_thread(0);
7315 
7316 	rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL);
7317 	CU_ASSERT(rc == 0);
7318 
7319 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2, UT_HOSTNQN);
7320 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
7321 
7322 	curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
7323 	SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL);
7324 
7325 	ch21 = spdk_get_io_channel(nvme_ctrlr2);
7326 	SPDK_CU_ASSERT_FATAL(ch21 != NULL);
7327 
7328 	ctrlr_ch21 = spdk_io_channel_get_ctx(ch21);
7329 	CU_ASSERT(ctrlr_ch21->qpair != NULL);
7330 
7331 	set_thread(1);
7332 
7333 	ch22 = spdk_get_io_channel(nvme_ctrlr2);
7334 	SPDK_CU_ASSERT_FATAL(ch22 != NULL);
7335 
7336 	ctrlr_ch22 = spdk_io_channel_get_ctx(ch22);
7337 	CU_ASSERT(ctrlr_ch22->qpair != NULL);
7338 
7339 	/* Reset starts from thread 1. */
7340 	set_thread(1);
7341 
7342 	nvme_ctrlr1->resetting = false;
7343 	nvme_ctrlr2->resetting = false;
7344 	curr_trid1->last_failed_tsc = spdk_get_ticks();
7345 	curr_trid2->last_failed_tsc = spdk_get_ticks();
7346 	ctrlr_op_rc = -1;
7347 
7348 	nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET,
7349 			       ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7350 
7351 	CU_ASSERT(nvme_ctrlr1->resetting == true);
7352 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
7353 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
7354 	CU_ASSERT(nvme_ctrlr2->resetting == false);
7355 
7356 	poll_thread_times(0, 3);
7357 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
7358 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
7359 
7360 	poll_thread_times(0, 1);
7361 	poll_thread_times(1, 1);
7362 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
7363 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
7364 
7365 	poll_thread_times(1, 1);
7366 	poll_thread_times(0, 1);
7367 	CU_ASSERT(ctrlr1.adminq.is_connected == false);
7368 
7369 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7370 	poll_thread_times(0, 2);
7371 	CU_ASSERT(ctrlr1.adminq.is_connected == true);
7372 
7373 	poll_thread_times(0, 1);
7374 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
7375 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
7376 
7377 	poll_thread_times(1, 1);
7378 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
7379 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
7380 	CU_ASSERT(nvme_ctrlr1->resetting == true);
7381 	CU_ASSERT(curr_trid1->last_failed_tsc != 0);
7382 
7383 	poll_thread_times(0, 2);
7384 	poll_thread_times(1, 1);
7385 	poll_thread_times(0, 1);
7386 	poll_thread_times(1, 1);
7387 	poll_thread_times(0, 1);
7388 	poll_thread_times(1, 1);
7389 	poll_thread_times(0, 1);
7390 
7391 	CU_ASSERT(nvme_ctrlr1->resetting == false);
7392 	CU_ASSERT(curr_trid1->last_failed_tsc == 0);
7393 	CU_ASSERT(nvme_ctrlr2->resetting == true);
7394 
7395 	poll_threads();
7396 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7397 	poll_threads();
7398 
7399 	CU_ASSERT(nvme_ctrlr2->resetting == false);
7400 	CU_ASSERT(ctrlr_op_rc == 0);
7401 
7402 	set_thread(1);
7403 
7404 	spdk_put_io_channel(ch12);
7405 	spdk_put_io_channel(ch22);
7406 
7407 	set_thread(0);
7408 
7409 	spdk_put_io_channel(ch11);
7410 	spdk_put_io_channel(ch21);
7411 
7412 	poll_threads();
7413 
7414 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7415 	CU_ASSERT(rc == 0);
7416 
7417 	poll_threads();
7418 	spdk_delay_us(1000);
7419 	poll_threads();
7420 
7421 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
7422 }
7423 
7424 static void
7425 test_disable_enable_ctrlr(void)
7426 {
7427 	struct spdk_nvme_transport_id trid = {};
7428 	struct spdk_nvme_ctrlr ctrlr = {};
7429 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7430 	struct nvme_path_id *curr_trid;
7431 	struct spdk_io_channel *ch1, *ch2;
7432 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7433 	int rc;
7434 
7435 	ut_init_trid(&trid);
7436 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7437 	ctrlr.adminq.is_connected = true;
7438 
7439 	set_thread(0);
7440 
7441 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7442 	CU_ASSERT(rc == 0);
7443 
7444 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7445 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7446 
7447 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7448 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7449 
7450 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7451 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7452 
7453 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7454 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7455 
7456 	set_thread(1);
7457 
7458 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7459 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7460 
7461 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7462 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7463 
7464 	/* Disable starts from thread 1. */
7465 	set_thread(1);
7466 
7467 	/* Case 1: ctrlr is already disabled. */
7468 	nvme_ctrlr->disabled = true;
7469 
7470 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7471 	CU_ASSERT(rc == -EALREADY);
7472 
7473 	/* Case 2: ctrlr is already being destructed. */
7474 	nvme_ctrlr->disabled = false;
7475 	nvme_ctrlr->destruct = true;
7476 
7477 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7478 	CU_ASSERT(rc == -ENXIO);
7479 
7480 	/* Case 3: reset is in progress. */
7481 	nvme_ctrlr->destruct = false;
7482 	nvme_ctrlr->resetting = true;
7483 
7484 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7485 	CU_ASSERT(rc == -EBUSY);
7486 
7487 	/* Case 4: disable completes successfully. */
7488 	nvme_ctrlr->resetting = false;
7489 
7490 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7491 	CU_ASSERT(rc == 0);
7492 	CU_ASSERT(nvme_ctrlr->resetting == true);
7493 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7494 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7495 
7496 	poll_thread_times(0, 3);
7497 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7498 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7499 
7500 	poll_thread_times(0, 1);
7501 	poll_thread_times(1, 1);
7502 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7503 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7504 
7505 	poll_thread_times(1, 1);
7506 	poll_thread_times(0, 1);
7507 	CU_ASSERT(ctrlr.adminq.is_connected == false);
7508 	poll_thread_times(1, 1);
7509 	poll_thread_times(0, 1);
7510 	poll_thread_times(1, 1);
7511 	poll_thread_times(0, 1);
7512 	CU_ASSERT(nvme_ctrlr->resetting == false);
7513 	CU_ASSERT(nvme_ctrlr->disabled == true);
7514 
7515 	/* Case 5: enable completes successfully. */
7516 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7517 	CU_ASSERT(rc == 0);
7518 
7519 	CU_ASSERT(nvme_ctrlr->resetting == true);
7520 	CU_ASSERT(nvme_ctrlr->disabled == false);
7521 
7522 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7523 	poll_thread_times(0, 2);
7524 	CU_ASSERT(ctrlr.adminq.is_connected == true);
7525 
7526 	poll_thread_times(0, 1);
7527 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7528 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7529 
7530 	poll_thread_times(1, 1);
7531 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7532 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7533 	CU_ASSERT(nvme_ctrlr->resetting == true);
7534 
7535 	poll_thread_times(0, 1);
7536 	CU_ASSERT(nvme_ctrlr->resetting == false);
7537 
7538 	/* Case 6: ctrlr is already enabled. */
7539 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7540 	CU_ASSERT(rc == -EALREADY);
7541 
7542 	set_thread(0);
7543 
7544 	/* Case 7: disable cancels delayed reconnect. */
7545 	nvme_ctrlr->opts.reconnect_delay_sec = 10;
7546 	ctrlr.fail_reset = true;
7547 
7548 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7549 	CU_ASSERT(rc == 0);
7550 
7551 	poll_threads();
7552 
7553 	CU_ASSERT(nvme_ctrlr->resetting == false);
7554 	CU_ASSERT(ctrlr.is_failed == false);
7555 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7556 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7557 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
7558 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
7559 
7560 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7561 	CU_ASSERT(rc == 0);
7562 
7563 	CU_ASSERT(nvme_ctrlr->resetting == true);
7564 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
7565 
7566 	poll_threads();
7567 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7568 	poll_threads();
7569 
7570 	CU_ASSERT(nvme_ctrlr->resetting == false);
7571 	CU_ASSERT(nvme_ctrlr->disabled == true);
7572 
7573 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7574 	CU_ASSERT(rc == 0);
7575 
7576 	CU_ASSERT(nvme_ctrlr->resetting == true);
7577 	CU_ASSERT(nvme_ctrlr->disabled == false);
7578 
7579 	poll_threads();
7580 
7581 	CU_ASSERT(nvme_ctrlr->resetting == false);
7582 
7583 	set_thread(1);
7584 
7585 	spdk_put_io_channel(ch2);
7586 
7587 	set_thread(0);
7588 
7589 	spdk_put_io_channel(ch1);
7590 
7591 	poll_threads();
7592 
7593 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7594 	CU_ASSERT(rc == 0);
7595 
7596 	poll_threads();
7597 	spdk_delay_us(1000);
7598 	poll_threads();
7599 
7600 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7601 }
7602 
7603 static void
7604 ut_delete_done(void *ctx, int rc)
7605 {
7606 	int *delete_done_rc = ctx;
7607 	*delete_done_rc = rc;
7608 }
7609 
7610 static void
7611 test_delete_ctrlr_done(void)
7612 {
7613 	struct spdk_nvme_transport_id trid = {};
7614 	struct spdk_nvme_ctrlr ctrlr = {};
7615 	int delete_done_rc = 0xDEADBEEF;
7616 	int rc;
7617 
7618 	ut_init_trid(&trid);
7619 
7620 	nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7621 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
7622 
7623 	rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc);
7624 	CU_ASSERT(rc == 0);
7625 
7626 	for (int i = 0; i < 20; i++) {
7627 		poll_threads();
7628 		if (delete_done_rc == 0) {
7629 			break;
7630 		}
7631 		spdk_delay_us(1000);
7632 	}
7633 
7634 	CU_ASSERT(delete_done_rc == 0);
7635 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7636 }
7637 
7638 static void
7639 test_ns_remove_during_reset(void)
7640 {
7641 	struct nvme_path_id path = {};
7642 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
7643 	struct spdk_nvme_ctrlr *ctrlr;
7644 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
7645 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
7646 	struct nvme_ctrlr *nvme_ctrlr;
7647 	const int STRING_SIZE = 32;
7648 	const char *attached_names[STRING_SIZE];
7649 	struct nvme_bdev *bdev;
7650 	struct nvme_ns *nvme_ns;
7651 	union spdk_nvme_async_event_completion event = {};
7652 	struct spdk_nvme_cpl cpl = {};
7653 	int rc;
7654 
7655 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
7656 	ut_init_trid(&path.trid);
7657 
7658 	set_thread(0);
7659 
7660 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
7661 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
7662 
7663 	g_ut_attach_ctrlr_status = 0;
7664 	g_ut_attach_bdev_count = 1;
7665 
7666 	opts.multipath = false;
7667 
7668 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
7669 				   attach_ctrlr_done, NULL, &dopts, &opts);
7670 	CU_ASSERT(rc == 0);
7671 
7672 	spdk_delay_us(1000);
7673 	poll_threads();
7674 
7675 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
7676 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
7677 
7678 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
7679 	CU_ASSERT(nvme_ctrlr != NULL);
7680 
7681 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
7682 	CU_ASSERT(bdev != NULL);
7683 
7684 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
7685 	CU_ASSERT(nvme_ns != NULL);
7686 
7687 	/* If ns is removed during ctrlr reset, nvme_ns and bdev should still exist,
7688 	 * but nvme_ns->ns should be NULL.
7689 	 */
7690 
7691 	CU_ASSERT(ctrlr->ns[0].is_active == true);
7692 	ctrlr->ns[0].is_active = false;
7693 
7694 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7695 	CU_ASSERT(rc == 0);
7696 
7697 	poll_threads();
7698 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7699 	poll_threads();
7700 
7701 	CU_ASSERT(nvme_ctrlr->resetting == false);
7702 	CU_ASSERT(ctrlr->adminq.is_connected == true);
7703 
7704 	CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr));
7705 	CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1));
7706 	CU_ASSERT(nvme_ns->bdev == bdev);
7707 	CU_ASSERT(nvme_ns->ns == NULL);
7708 
7709 	/* Then, async event should fill nvme_ns->ns again. */
7710 
7711 	ctrlr->ns[0].is_active = true;
7712 
7713 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
7714 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
7715 	cpl.cdw0 = event.raw;
7716 
7717 	aer_cb(nvme_ctrlr, &cpl);
7718 
7719 	CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr));
7720 	CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1));
7721 	CU_ASSERT(nvme_ns->bdev == bdev);
7722 	CU_ASSERT(nvme_ns->ns == &ctrlr->ns[0]);
7723 
7724 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7725 	CU_ASSERT(rc == 0);
7726 
7727 	poll_threads();
7728 	spdk_delay_us(1000);
7729 	poll_threads();
7730 
7731 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7732 }
7733 
7734 static void
7735 test_io_path_is_current(void)
7736 {
7737 	struct nvme_bdev_channel nbdev_ch = {
7738 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
7739 	};
7740 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
7741 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
7742 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
7743 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, },
7744 	nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
7745 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}, ctrlr_ch3 = {};
7746 	struct nvme_qpair nvme_qpair1 = { .qpair = &qpair1, .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
7747 	struct nvme_qpair nvme_qpair2 = { .qpair = &qpair2, .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
7748 	struct nvme_qpair nvme_qpair3 = { .qpair = &qpair3, .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, };
7749 	struct nvme_ns nvme_ns1 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns1, };
7750 	struct nvme_ns nvme_ns2 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns2, };
7751 	struct nvme_ns nvme_ns3 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns3, };
7752 	struct nvme_io_path io_path1 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
7753 	struct nvme_io_path io_path2 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
7754 	struct nvme_io_path io_path3 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
7755 
7756 	/* io_path1 is deleting */
7757 	io_path1.nbdev_ch = NULL;
7758 
7759 	CU_ASSERT(nvme_io_path_is_current(&io_path1) == false);
7760 
7761 	io_path1.nbdev_ch = &nbdev_ch;
7762 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
7763 	io_path2.nbdev_ch = &nbdev_ch;
7764 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
7765 	io_path3.nbdev_ch = &nbdev_ch;
7766 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
7767 
7768 	/* active/active: io_path is current if it is available and ANA optimized. */
7769 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7770 
7771 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7772 
7773 	/* active/active: io_path is not current if it is disconnected even if it is
7774 	 * ANA optimized.
7775 	 */
7776 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7777 
7778 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7779 
7780 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
7781 
7782 	/* active/passive: io_path is current if it is available and cached.
7783 	 * (only ANA optimized path is cached for active/passive.)
7784 	 */
7785 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7786 	nbdev_ch.current_io_path = &io_path2;
7787 
7788 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7789 
7790 	/* active:passive: io_path is not current if it is disconnected even if it is cached */
7791 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7792 
7793 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7794 
7795 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
7796 
7797 	/* active/active and active/passive: io_path is not current if it is ANA inaccessible. */
7798 	nvme_ns2.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
7799 
7800 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7801 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7802 
7803 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7804 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7805 
7806 	/* active/active: non-optimized path is current only if there is no optimized path. */
7807 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7808 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7809 
7810 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7811 
7812 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7813 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7814 
7815 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7816 
7817 	/* active/passive: current is true if it is the first one when there is no optimized path. */
7818 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7819 	nbdev_ch.current_io_path = NULL;
7820 
7821 	CU_ASSERT(nvme_io_path_is_current(&io_path1) == true);
7822 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7823 	CU_ASSERT(nvme_io_path_is_current(&io_path3) == false);
7824 }
7825 
7826 static void
7827 test_bdev_reset_abort_io(void)
7828 {
7829 	struct spdk_nvme_transport_id trid = {};
7830 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
7831 	struct spdk_nvme_ctrlr *ctrlr;
7832 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
7833 	struct nvme_ctrlr *nvme_ctrlr;
7834 	const int STRING_SIZE = 32;
7835 	const char *attached_names[STRING_SIZE];
7836 	struct nvme_bdev *bdev;
7837 	struct spdk_bdev_io *write_io, *read_io, *reset_io;
7838 	struct spdk_io_channel *ch1, *ch2;
7839 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
7840 	struct nvme_io_path *io_path1, *io_path2;
7841 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
7842 	int rc;
7843 
7844 	g_opts.bdev_retry_count = -1;
7845 
7846 	ut_init_trid(&trid);
7847 
7848 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
7849 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
7850 
7851 	g_ut_attach_ctrlr_status = 0;
7852 	g_ut_attach_bdev_count = 1;
7853 
7854 	set_thread(1);
7855 
7856 	opts.ctrlr_loss_timeout_sec = -1;
7857 	opts.reconnect_delay_sec = 1;
7858 	opts.multipath = false;
7859 
7860 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
7861 				   attach_ctrlr_done, NULL, &dopts, &opts);
7862 	CU_ASSERT(rc == 0);
7863 
7864 	spdk_delay_us(1000);
7865 	poll_threads();
7866 
7867 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7868 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7869 
7870 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
7871 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
7872 
7873 	set_thread(0);
7874 
7875 	ch1 = spdk_get_io_channel(bdev);
7876 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7877 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
7878 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
7879 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
7880 	nvme_qpair1 = io_path1->qpair;
7881 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
7882 
7883 	set_thread(1);
7884 
7885 	ch2 = spdk_get_io_channel(bdev);
7886 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7887 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
7888 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
7889 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
7890 	nvme_qpair2 = io_path2->qpair;
7891 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
7892 
7893 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch1);
7894 	ut_bdev_io_set_buf(write_io);
7895 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
7896 
7897 	read_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_READ, bdev, ch1);
7898 	ut_bdev_io_set_buf(read_io);
7899 	read_io->internal.ch = (struct spdk_bdev_channel *)ch1;
7900 
7901 	reset_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
7902 
7903 	/* If qpair is disconnected, it is freed and then reconnected via resetting
7904 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
7905 	 * while resetting the nvme_ctrlr.
7906 	 */
7907 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7908 
7909 	poll_thread_times(0, 3);
7910 
7911 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7912 	CU_ASSERT(nvme_ctrlr->resetting == true);
7913 
7914 	set_thread(0);
7915 
7916 	write_io->internal.f.in_submit_request = true;
7917 
7918 	bdev_nvme_submit_request(ch1, write_io);
7919 
7920 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
7921 	CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list)));
7922 
7923 	set_thread(1);
7924 
7925 	/* Submit a reset request to a bdev while resetting a nvme_ctrlr.
7926 	 * Further I/O queueing should be disabled and queued I/Os should be aborted.
7927 	 * Verify these behaviors.
7928 	 */
7929 	reset_io->internal.f.in_submit_request = true;
7930 
7931 	bdev_nvme_submit_request(ch2, reset_io);
7932 
7933 	poll_thread_times(0, 1);
7934 	poll_thread_times(1, 2);
7935 
7936 	CU_ASSERT(nbdev_ch1->resetting == true);
7937 
7938 	/* qpair1 should be still disconnected. */
7939 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7940 
7941 	set_thread(0);
7942 
7943 	read_io->internal.f.in_submit_request = true;
7944 
7945 	bdev_nvme_submit_request(ch1, read_io);
7946 
7947 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7948 
7949 	poll_thread_times(0, 1);
7950 
7951 	/* The I/O which was submitted during bdev_reset should fail immediately. */
7952 	CU_ASSERT(read_io->internal.f.in_submit_request == false);
7953 	CU_ASSERT(read_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
7954 
7955 	poll_threads();
7956 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7957 	poll_threads();
7958 
7959 	/* The completion of bdev_reset should ensure queued I/O is aborted. */
7960 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
7961 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
7962 
7963 	/* The reset request itself should complete with success. */
7964 	CU_ASSERT(reset_io->internal.f.in_submit_request == false);
7965 	CU_ASSERT(reset_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
7966 
7967 	set_thread(0);
7968 
7969 	spdk_put_io_channel(ch1);
7970 
7971 	set_thread(1);
7972 
7973 	spdk_put_io_channel(ch2);
7974 
7975 	poll_threads();
7976 
7977 	set_thread(0);
7978 
7979 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7980 	CU_ASSERT(rc == 0);
7981 
7982 	poll_threads();
7983 	spdk_delay_us(1000);
7984 	poll_threads();
7985 
7986 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7987 
7988 	free(write_io);
7989 	free(read_io);
7990 	free(reset_io);
7991 
7992 	g_opts.bdev_retry_count = 0;
7993 }
7994 
7995 static void
7996 test_race_between_clear_pending_resets_and_reset_ctrlr_complete(void)
7997 {
7998 	struct nvme_path_id path = {};
7999 	struct spdk_nvme_ctrlr *ctrlr;
8000 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
8001 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
8002 	struct nvme_ctrlr *nvme_ctrlr;
8003 	const int STRING_SIZE = 32;
8004 	const char *attached_names[STRING_SIZE];
8005 	struct nvme_bdev *bdev;
8006 	struct spdk_bdev_io *bdev_io;
8007 	struct nvme_bdev_io *bio;
8008 	struct spdk_io_channel *ch1, *ch2;
8009 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
8010 	struct nvme_io_path *io_path1, *io_path2;
8011 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
8012 	int rc;
8013 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
8014 
8015 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
8016 	bdev_opts.multipath = true;
8017 
8018 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
8019 	ut_init_trid(&path.trid);
8020 	g_ut_attach_ctrlr_status = 0;
8021 	g_ut_attach_bdev_count = 1;
8022 
8023 	set_thread(0);
8024 
8025 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, true);
8026 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
8027 
8028 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
8029 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
8030 	CU_ASSERT(rc == 0);
8031 
8032 	spdk_delay_us(1000);
8033 	poll_threads();
8034 
8035 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
8036 	poll_threads();
8037 
8038 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
8039 	poll_threads();
8040 
8041 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
8042 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
8043 
8044 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn);
8045 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
8046 
8047 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
8048 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
8049 
8050 	set_thread(0);
8051 
8052 	ch1 = spdk_get_io_channel(bdev);
8053 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
8054 
8055 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
8056 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr);
8057 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
8058 	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
8059 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
8060 
8061 	set_thread(1);
8062 
8063 	ch2 = spdk_get_io_channel(bdev);
8064 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
8065 
8066 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
8067 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr);
8068 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
8069 	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
8070 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
8071 
8072 	/* Internal reset request started. */
8073 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
8074 	CU_ASSERT(rc == 0);
8075 	CU_ASSERT(nvme_ctrlr->resetting == true);
8076 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
8077 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
8078 
8079 	poll_thread_times(0, 3);
8080 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
8081 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
8082 
8083 	poll_thread_times(0, 1);
8084 	poll_thread_times(1, 1);
8085 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
8086 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
8087 
8088 	poll_thread_times(1, 1);
8089 	poll_thread_times(0, 1);
8090 	CU_ASSERT(ctrlr->adminq.is_connected == false);
8091 
8092 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
8093 	poll_thread_times(0, 2);
8094 	CU_ASSERT(ctrlr->adminq.is_connected == true);
8095 
8096 	poll_thread_times(0, 1);
8097 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
8098 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
8099 
8100 	poll_thread_times(1, 1);
8101 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
8102 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
8103 	CU_ASSERT(nvme_ctrlr->resetting == true);
8104 
8105 	set_thread(0);
8106 
8107 	/* Submit external reset request from bdev_io just one polling before completing
8108 	 * internal before reset request.
8109 	 *
8110 	 * Previously, there was a race window before clearing pending reset and completing
8111 	 * reset request. If external reset request was submitted in the window, it never woke up.
8112 	 *
8113 	 * The lost wake up bug was fixed and there is no such race window.
8114 	 *
8115 	 * Hence, submit external reset request as late as possible to avoid future degradation.
8116 	 */
8117 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
8118 	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
8119 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
8120 
8121 	/* For simplicity, skip freezing bdev channels. */
8122 	bdev_nvme_freeze_bdev_channel_done(bdev, bio, 0);
8123 
8124 	CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&nvme_ctrlr->pending_resets)) == bdev_io);
8125 
8126 	poll_thread_times(0, 1);
8127 
8128 	/* External reset request should be cleared. */
8129 	CU_ASSERT(nvme_ctrlr->resetting == false);
8130 	CU_ASSERT(TAILQ_EMPTY(&nvme_ctrlr->pending_resets));
8131 
8132 	poll_threads();
8133 
8134 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
8135 
8136 	set_thread(0);
8137 
8138 	spdk_put_io_channel(ch1);
8139 
8140 	set_thread(1);
8141 
8142 	spdk_put_io_channel(ch2);
8143 
8144 	poll_threads();
8145 
8146 	set_thread(0);
8147 
8148 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
8149 	CU_ASSERT(rc == 0);
8150 
8151 	poll_threads();
8152 	spdk_delay_us(1000);
8153 	poll_threads();
8154 
8155 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
8156 
8157 	free(bdev_io);
8158 }
8159 
8160 int
8161 main(int argc, char **argv)
8162 {
8163 	CU_pSuite	suite = NULL;
8164 	unsigned int	num_failures;
8165 
8166 	CU_initialize_registry();
8167 
8168 	suite = CU_add_suite("nvme", NULL, NULL);
8169 
8170 	CU_ADD_TEST(suite, test_create_ctrlr);
8171 	CU_ADD_TEST(suite, test_reset_ctrlr);
8172 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
8173 	CU_ADD_TEST(suite, test_failover_ctrlr);
8174 	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
8175 	CU_ADD_TEST(suite, test_pending_reset);
8176 	CU_ADD_TEST(suite, test_attach_ctrlr);
8177 	CU_ADD_TEST(suite, test_aer_cb);
8178 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
8179 	CU_ADD_TEST(suite, test_add_remove_trid);
8180 	CU_ADD_TEST(suite, test_abort);
8181 	CU_ADD_TEST(suite, test_get_io_qpair);
8182 	CU_ADD_TEST(suite, test_bdev_unregister);
8183 	CU_ADD_TEST(suite, test_compare_ns);
8184 	CU_ADD_TEST(suite, test_init_ana_log_page);
8185 	CU_ADD_TEST(suite, test_get_memory_domains);
8186 	CU_ADD_TEST(suite, test_reconnect_qpair);
8187 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
8188 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
8189 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
8190 	CU_ADD_TEST(suite, test_admin_path);
8191 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
8192 	CU_ADD_TEST(suite, test_find_io_path);
8193 	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
8194 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
8195 	CU_ADD_TEST(suite, test_retry_io_count);
8196 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
8197 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
8198 	CU_ADD_TEST(suite, test_check_io_error_resiliency_params);
8199 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
8200 	CU_ADD_TEST(suite, test_reconnect_ctrlr);
8201 	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
8202 	CU_ADD_TEST(suite, test_fail_path);
8203 	CU_ADD_TEST(suite, test_nvme_ns_cmp);
8204 	CU_ADD_TEST(suite, test_ana_transition);
8205 	CU_ADD_TEST(suite, test_set_preferred_path);
8206 	CU_ADD_TEST(suite, test_find_next_io_path);
8207 	CU_ADD_TEST(suite, test_find_io_path_min_qd);
8208 	CU_ADD_TEST(suite, test_disable_auto_failback);
8209 	CU_ADD_TEST(suite, test_set_multipath_policy);
8210 	CU_ADD_TEST(suite, test_uuid_generation);
8211 	CU_ADD_TEST(suite, test_retry_io_to_same_path);
8212 	CU_ADD_TEST(suite, test_race_between_reset_and_disconnected);
8213 	CU_ADD_TEST(suite, test_ctrlr_op_rpc);
8214 	CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc);
8215 	CU_ADD_TEST(suite, test_disable_enable_ctrlr);
8216 	CU_ADD_TEST(suite, test_delete_ctrlr_done);
8217 	CU_ADD_TEST(suite, test_ns_remove_during_reset);
8218 	CU_ADD_TEST(suite, test_io_path_is_current);
8219 	CU_ADD_TEST(suite, test_bdev_reset_abort_io);
8220 	CU_ADD_TEST(suite, test_race_between_clear_pending_resets_and_reset_ctrlr_complete);
8221 
8222 	allocate_threads(3);
8223 	set_thread(0);
8224 	bdev_nvme_library_init();
8225 	init_accel();
8226 
8227 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
8228 
8229 	set_thread(0);
8230 	bdev_nvme_library_fini();
8231 	fini_accel();
8232 	free_threads();
8233 
8234 	CU_cleanup_registry();
8235 
8236 	return num_failures;
8237 }
8238